hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1c6f742ff7bb6409fa5b1d806e2433034d2aa878
| 1,096
|
py
|
Python
|
distillation/build_student.py
|
fengxiaoshuai/CNN_model_optimizer
|
4c48420989ffe31a4075d36a5133fee0d999466a
|
[
"Apache-2.0"
] | null | null | null |
distillation/build_student.py
|
fengxiaoshuai/CNN_model_optimizer
|
4c48420989ffe31a4075d36a5133fee0d999466a
|
[
"Apache-2.0"
] | 1
|
2021-01-05T10:41:24.000Z
|
2021-01-05T10:41:24.000Z
|
distillation/build_student.py
|
fengxiaoshuai/CNN_model_optimizer
|
4c48420989ffe31a4075d36a5133fee0d999466a
|
[
"Apache-2.0"
] | 1
|
2020-08-07T02:56:20.000Z
|
2020-08-07T02:56:20.000Z
|
import tensorflow as tf
import numpy as np
with tf.variable_scope("student"):
input_label = tf.placeholder(dtype=tf.float32, shape=[10, 10], name="label")
input_image = tf.placeholder(dtype=tf.float32, shape=[10, 224, 224, 3], name="input")
conv1 = tf.layers.conv2d(inputs=input_image, filters=64, kernel_size=[3, 3], padding='same')
conv2 = tf.layers.conv2d(conv1, filters=64, kernel_size=[3, 3], padding='same')
conv3 = tf.layers.conv2d(conv2, filters=64, kernel_size=[3, 3], padding='same')
shape = int(np.prod(conv3.get_shape()[1:]))
flat = tf.reshape(conv3, [-1, shape])
fc1 = tf.layers.dense(flat, units=100)
fc2 = tf.layers.dense(fc1, units=10, name="logit")
probability = tf.nn.softmax(fc2)
loss = tf.losses.softmax_cross_entropy(input_label, fc2)
print(input_label)
image = np.ones(shape=[10, 224, 224, 3])
with tf.Session() as sess:
init = tf.global_variables_initializer()
sess.run(init)
saver = tf.train.Saver()
saver.save(sess, "./student/student")
print(sess.run(probability, feed_dict={input_image: image}))
| 40.592593
| 96
| 0.681569
| 166
| 1,096
| 4.403614
| 0.39759
| 0.05472
| 0.057456
| 0.077975
| 0.253078
| 0.22435
| 0.22435
| 0.131327
| 0
| 0
| 0
| 0.064447
| 0.150547
| 1,096
| 26
| 97
| 42.153846
| 0.72073
| 0
| 0
| 0
| 0
| 0
| 0.046533
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.090909
| 0
| 0.090909
| 0.090909
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1c70409e75cdadbb3949d0d1cde6a6029abd620b
| 5,365
|
py
|
Python
|
code/statistical_tests.py
|
ChamiLamelas/Math36B_FinalProject
|
0bdb5d17769553a4edb163534c21cc641860a07a
|
[
"MIT"
] | null | null | null |
code/statistical_tests.py
|
ChamiLamelas/Math36B_FinalProject
|
0bdb5d17769553a4edb163534c21cc641860a07a
|
[
"MIT"
] | null | null | null |
code/statistical_tests.py
|
ChamiLamelas/Math36B_FinalProject
|
0bdb5d17769553a4edb163534c21cc641860a07a
|
[
"MIT"
] | null | null | null |
import scipy.stats
import numpy as np
def f_test(sample_x, sample_y, larger_varx_alt):
"""
Computes the F-value and corresponding p-value for a pair of samples and alternative hypothesis.
Parameters
----------
sample_x : list
A random sample x1,...,xnx. Let its (underlying) variance be ox^2 and its sample variance Sx^2.
sample_y : list
A random sample y1,...,yny. Let its (underlying) variance be oy^2 and its sample variance Sy^2.
larger_varx_alt : bool
True if alternative hypothesis is ox^2 > oy^2. False if ox^2 < oy^2.
Returns
-------
f_value : float
Sx^2 / Sy^2 as defined in 'A Quick, Compact, Two-Sample Dispersion Test: Count Five'.
p_value : float
Let F be the F-distribution with nx, ny df. 1 - P(F < f_value) if larger_varx_alt = True, P(F < f_value) otherwise. More extreme F = Sx^2 / Sy^2 values for alternative ox^2 > oy^2 are to the right. More extreme F values for ox^2 < oy^2 are to the left.
"""
# calculate unbiased sample variances (n-1 in the denominator)
sample_var_x = np.var(sample_x, ddof=1)
sample_var_y = np.var(sample_y, ddof=1)
f_value = sample_var_x/sample_var_y
nx = len(sample_x)
ny = len(sample_y)
# compute P(F < f_value) with nx-1, ny-1 df
cdf = scipy.stats.f.cdf(f_value, nx-1, ny-1)
# More extreme f_value = Sx^2 / Sy^2 values for alternative ox^2 > oy^2. ox^2 being even bigger would be represented by larger quotient Sx^2 / Sy^2.
# More extreme f_value for ox^2 < oy^2 are to the left. ox^2 being even smaller would be represented by smaller quotient.
p_value = 1 - cdf if larger_varx_alt else cdf
return f_value, p_value
def f1_test(sample_x, sample_y, larger_varx_alt):
"""
Computes the F1-value as defined in 'Fixing the F Test for Equal Variances' and corresponding p-value for a pair of samples and alternative hypothesis.
Parameters
----------
sample_x : list
A random sample x1,...,xnx. Let its (underlying) variance be ox^2 and its sample variance Sx^2.
sample_y : list
A random sample y1,...,yny. Let its (underlying) variance be oy^2 and its sample variance Sy^2.
larger_varx_alt : bool
True if alternative hypothesis is ox^2 > oy^2. False if ox^2 < oy^2.
Returns
-------
p_value : float
Let F be the F-distribution with rx, ry df as specified in equation (1) of 'Fixing the F Test for Equal Variances'. 1 - P(F < f_value) if larger_varx_alt = True, P(F < f_value) otherwise.
"""
# calculate unbiased sample variances (n-1 in the denominator)
sample_var_x = np.var(sample_x, ddof=1)
sample_var_y = np.var(sample_y, ddof=1)
f_value = sample_var_x/sample_var_y
nx = len(sample_x)
ny = len(sample_y)
xmean = np.mean(sample_x)
ymean = np.mean(sample_y)
# compute moment, variance below equatio (1) of Shoemaker paper
fourth_moment = (np.sum((sample_x - xmean)**4) +
np.sum((sample_y - ymean)**4))/(nx + ny)
pooled_var = ((nx-1)*sample_var_x + (ny-1)*sample_var_y)/(nx + ny)
# see equation (1) of Shoemaker paper
rx = 2*nx / ((fourth_moment/pooled_var**2) - ((nx - 3)/(nx - 1)))
ry = 2*ny / ((fourth_moment/pooled_var**2) - ((ny - 3)/(ny - 1)))
# compute P(F < f_value) with rx-1, ry-1 df
cdf = scipy.stats.f.cdf(f_value, rx-1, ry-1)
# More extreme f_value = Sx^2 / Sy^2 values for alternative ox^2 > oy^2. ox^2 being even bigger would be represented by larger quotient Sx^2 / Sy^2.
# More extreme f_value for ox^2 < oy^2 are to the left. ox^2 being even smaller would be represented by smaller quotient.
p_value = 1 - cdf if larger_varx_alt else cdf
return p_value
def count_five(sample_x, sample_y, center):
"""
Computes the extreme counts for samples x and y as defined in 'A Quick, Compact, Two-Sample Dispersion Test: Count Five'.
Parameters
----------
sample_x : list
A random sample x1,...,xn.
sample_y : list
A random sample y1,...,ym.
center : str
Whether to use 'mean' or 'median' for centering.
Returns
-------
extreme_count_x : int
C_x computed with centering mu being sample mean if center = 'mean' and sample median if center = 'median' as defined in equation (1) of 'A Quick, Compact, Two-Sample Dispersion Test: Count Five'.
extreme_count_y : int
C_y defined analogously to C_x above.
Raises
------
ValueError
If center is neither 'mean' or 'median'.
"""
if center not in {'mean', 'median'}:
raise ValueError('Invalid center %s' % (center))
if center == 'mean':
centering_x = np.mean(sample_x)
centering_y = np.mean(sample_y)
else:
centering_x = np.median(sample_x)
centering_y = np.median(sample_y)
# compute absolute deviations from centering for x, y samples
abs_dev_x = np.abs(np.array(sample_x) - centering_x)
abs_dev_y = np.abs(np.array(sample_y) - centering_y)
# count number of X deviations greater than max Y deviation and vice versa
# see equation (1) of Count Five paper
extreme_count_x = np.sum(np.where(abs_dev_x > np.max(abs_dev_y), 1, 0))
extreme_count_y = np.sum(np.where(abs_dev_y > np.max(abs_dev_x), 1, 0))
return extreme_count_x, extreme_count_y
| 41.269231
| 261
| 0.654054
| 906
| 5,365
| 3.729581
| 0.168874
| 0.030186
| 0.014797
| 0.017757
| 0.66055
| 0.625629
| 0.603729
| 0.575318
| 0.564664
| 0.529743
| 0
| 0.022249
| 0.237651
| 5,365
| 129
| 262
| 41.589147
| 0.803912
| 0.608947
| 0
| 0.292683
| 0
| 0
| 0.016631
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.073171
| false
| 0
| 0.04878
| 0
| 0.195122
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1c720e3c45ed8efa4771cbbb3a3b55d0385c9d41
| 1,125
|
py
|
Python
|
finnhub_python/socket.py
|
humdings/finnhub-python
|
ca98681e5a529598e9d17e3ebc2f6d49c64b54de
|
[
"MIT"
] | null | null | null |
finnhub_python/socket.py
|
humdings/finnhub-python
|
ca98681e5a529598e9d17e3ebc2f6d49c64b54de
|
[
"MIT"
] | null | null | null |
finnhub_python/socket.py
|
humdings/finnhub-python
|
ca98681e5a529598e9d17e3ebc2f6d49c64b54de
|
[
"MIT"
] | null | null | null |
"""
Example usage of Finnhub socket API.
"""
from __future__ import print_function # Py2 compat
import websocket
from finnhub_python.utils import get_finnhub_api_key
def write_line(data, fname):
with open(fname, 'a+') as f:
f.write(data + '\n')
def on_message(ws, message):
write_line(message, tick_file)
def on_error(ws, error):
print(error)
def on_close(ws):
print("### closed ###")
def on_open(ws):
for symbol in SYMBOLS:
subscribe(ws, symbol)
def subscribe(ws, symbol):
template = '{"type":"subscribe","symbol":"X"}'
req = template.replace('X', symbol.upper())
ws.send(req)
tick_file = 'raw_ticks.txt'
token = get_finnhub_api_key()
SYMBOLS = [
"AAPL",
"SPY",
"VXX",
"BINANCE:ETHUSDT",
"BINANCE:BTCUSDT"
]
if __name__ == "__main__":
websocket.enableTrace(True)
ws = websocket.WebSocketApp("wss://ws.finnhub.io?token=" + token,
on_message=on_message,
on_error=on_error,
on_close=on_close)
ws.on_open = on_open
ws.run_forever()
| 20.089286
| 69
| 0.604444
| 144
| 1,125
| 4.458333
| 0.479167
| 0.031153
| 0.040498
| 0.049844
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.001202
| 0.260444
| 1,125
| 56
| 70
| 20.089286
| 0.770433
| 0.042667
| 0
| 0
| 0
| 0
| 0.129907
| 0.05514
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| false
| 0
| 0.083333
| 0
| 0.25
| 0.083333
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1c722363623f21dde32f8eb4058f20a248ddb2fd
| 2,570
|
py
|
Python
|
pycovjson/cli/convert.py
|
RileyWilliams/pycovjson
|
741737f53ef18ef1476eccb5e626866843c152bd
|
[
"BSD-3-Clause"
] | 10
|
2016-08-16T17:46:30.000Z
|
2021-04-06T22:03:58.000Z
|
pycovjson/cli/convert.py
|
RileyWilliams/pycovjson
|
741737f53ef18ef1476eccb5e626866843c152bd
|
[
"BSD-3-Clause"
] | 46
|
2016-07-21T13:14:14.000Z
|
2020-07-02T09:16:29.000Z
|
pycovjson/cli/convert.py
|
RileyWilliams/pycovjson
|
741737f53ef18ef1476eccb5e626866843c152bd
|
[
"BSD-3-Clause"
] | 6
|
2016-07-29T09:56:37.000Z
|
2020-08-23T18:20:47.000Z
|
"""
Pycovjson - Command line interface
Author: rileywilliams
Version: 0.1.0
"""
import argparse
from pycovjson.write import Writer
from pycovjson.read_netcdf import NetCDFReader as Reader
def main():
"""
Command line interface for pycovjson - Converts Scientific Data Formats into CovJSON and saves to disk.
:argument -i: Input file path.
:argument -o: Output file name.
:argument -t: Use Tiling.
:argument -v: Which variable to populate coverage with.
:argument -s: [tile shape]: Tile shape.
:argument -n: Use interactive mode.
:argument -u: MongoDB URL
"""
parser = argparse.ArgumentParser(
description='Convert Scientific Data Formats into CovJSON.')
parser.add_argument('-i', '--input', dest='inputfile',
help='Name of input file', required=True)
parser.add_argument('-o', '--output', dest='outputfile',
help='Name and location of output file', default='coverage.covjson')
parser.add_argument('-t', '--tiled', action='store_true', help='Apply tiling')
parser.add_argument('-s', '--shape', nargs='+',
help='Tile shape, list', type=int)
parser.add_argument('-v', dest='variable',
help='Variable to populate coverage with', required=True)
parser.add_argument('-n', '--interactive', action='store_true', help='Enter interactive mode')
parser.add_argument('-u', '--endpoint_url', dest='endpoint_url', nargs=1,
help='MongoDB endpoint for CovJSON persistence')
args = parser.parse_args()
inputfile = args.inputfile
outputfile = args.outputfile
variable = args.variable
tiled = args.tiled
tile_shape = args.shape
interactive = args.interactive
endpoint_url = args.endpoint_url
if interactive:
axis = input('Which Axis?', Reader.get_axis(variable))
if tiled and len(tile_shape) == 0:
reader = Reader(inputfile)
shape_list = reader.get_shape(variable)
dims = reader.get_dimensions(variable)
print(list(zip(dims, shape_list)))
tile_shape = input(
'Enter the shape tile shape as a list of comma separated integers')
tile_shape = tile_shape.split(',')
tile_shape = list(map(int, tile_shape))
print(tile_shape)
if outputfile == None:
outputfile = outputfile.default
Writer(outputfile, inputfile, [variable],
tiled=tiled, tile_shape=tile_shape, endpoint_url=endpoint_url).write()
if __name__ == '__main__':
main()
| 36.714286
| 107
| 0.649027
| 306
| 2,570
| 5.320261
| 0.330065
| 0.077396
| 0.073096
| 0.03317
| 0.111794
| 0
| 0
| 0
| 0
| 0
| 0
| 0.002536
| 0.232685
| 2,570
| 69
| 108
| 37.246377
| 0.823022
| 0.164591
| 0
| 0
| 0
| 0
| 0.2149
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.022727
| false
| 0
| 0.068182
| 0
| 0.090909
| 0.045455
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1c72ce0a57a6b20d9f3b0b840d03685a73126b0e
| 22,727
|
py
|
Python
|
duels/duels.py
|
ridinginstyle00/redcogs
|
216869935f322f7e5927740da22fa36f728c48db
|
[
"MIT"
] | 8
|
2016-08-23T16:56:17.000Z
|
2021-07-24T16:44:31.000Z
|
duels/duels.py
|
ridinginstyle00/redcogs
|
216869935f322f7e5927740da22fa36f728c48db
|
[
"MIT"
] | 1
|
2018-04-25T14:20:06.000Z
|
2018-04-25T14:20:06.000Z
|
duels/duels.py
|
ridinginstyle00/redcogs
|
216869935f322f7e5927740da22fa36f728c48db
|
[
"MIT"
] | 8
|
2016-07-26T21:36:44.000Z
|
2019-08-03T16:38:57.000Z
|
import discord
from discord.ext import commands
from .utils import checks
from .utils.dataIO import dataIO
from __main__ import send_cmd_help
from __main__ import settings
from datetime import datetime
from random import choice
from random import sample
from copy import deepcopy
from collections import namedtuple, defaultdict
import os
import logging
import aiohttp
import asyncio
import time
from time import sleep
client = discord.Client()
class Duels:
def __init__(self, bot):
global globvar
self.bot = bot
self.duelist = dataIO.load_json("data/duels/duelist.json")
self.nuels = "duels"
self.counter = "Number:"
self.setter = "Max:"
self.wlt = dataIO.load_json("data/duels/account.json")
self.timer_board = dataIO.load_json("data/duels/timer.json")
@commands.group(name="duels", pass_context=True)
async def _duels(self, ctx):
"""Duel with another player!!"""
if ctx.invoked_subcommand is None:
await send_cmd_help(ctx)
@commands.command(name="tjoin", pass_context=True)
@checks.admin_or_permissions(manage_server=True)
async def tjoin(self, ctx):
"""Add server to timer list"""
author = ctx.message.author
server = author.server
if server.id not in self.timer_board:
self.timer_board[server.id] = {"time": 0}
dataIO.save_json("data/duels/timer.json", self.timer_board)
await self.bot.say("**{}** has been added to the timer board!".format(server.name))
else:
await self.bot.say("**{}** has already been added to the timer_board!".format(server.name))
@commands.command(name="duel", pass_context=True, no_pm=True)
async def _duel(self, ctx, user: discord.Member=None, otheruser : discord.Member=None):
"""Duel another player"""
author = ctx.message.author
server = author.server
if not user or not otheruser:
await self.bot.reply("Please mention two users that you want to see a duel of!")
elif user.id == otheruser.id:
await self.bot.reply("Silly, you can't see a duel of someone against themselves!")
else:
if server.id in self.timer_board:
if self.timer_board[server.id]["time"] == 0:
self.timer_board[server.id]["time"] += 1
dataIO.save_json("data/duels/timer.json", self.timer_board)
nick_player1 = user.name
nick_player2 = otheruser.name
action = self.duelist[self.nuels]
action_damage1, action_damage2, action_damage3, action_damage4 = self.action_damage()
action_chosen1, action_chosen2, action_chosen3, action_chosen4 = sample(action,4)
hp_player1 = 100
hp_player2 = 100
player1_id = user.id
player2_id = otheruser.id
await self.bot.say("**{}** dueled **{}**!!\n\nPlease wait for the duel to start! Both players will begin with **{}** health!".format(user.mention, otheruser.mention, hp_player1))
await asyncio.sleep(1)
await self.bot.say("**{}** `{}` **{}** and took off **{}** health!".format(nick_player1, action_chosen1, nick_player2, action_damage1))
hp_player2 = hp_player2 - action_damage1
await asyncio.sleep(1)
await self.bot.say("**{}** `{}` **{}** and took off **{}** health!".format(nick_player2, action_chosen2, nick_player1, action_damage2))
hp_player1 = hp_player1 - action_damage2
await asyncio.sleep(1)
await self.bot.say("**{}** `{}` **{}** and took off **{}** health!".format(nick_player1, action_chosen3, nick_player2, action_damage3))
hp_player2 = hp_player2 - action_damage3
await asyncio.sleep(1)
await self.bot.say("**{}** `{}` **{}** and took off **{}** health!".format(nick_player2, action_chosen2, nick_player1, action_damage4))
hp_player1 = hp_player1 - action_damage4
if hp_player1 > hp_player2:
winning_player = nick_player1
losing_player = nick_player2
remaining_hp = hp_player1
await asyncio.sleep(1)
await self.bot.say("After 4 rounds of bloody combat, the winner is **{}** with **{}** health!".format(winning_player, remaining_hp))
if player1_id not in self.wlt:
self.wlt[player1_id] = {"name": winning_player, "Wins": 0, "Losses": 0, "Ties": 0}
dataIO.save_json("data/duels/account.json", self.wlt)
await self.bot.say("{} has not yet entered the duel tournament!".format(winning_player))
await asyncio.sleep(.5)
await self.bot.say("{} has joined the duel tournament, currently changing settings!".format(winning_player))
await self.bot.say("{} gained +1 WIN!!".format(winning_player))
self.wlt[player1_id]["Wins"] += 1
dataIO.save_json("data/duels/account.json", self.wlt)
else:
await self.bot.say("{} gained +1 WIN!!".format(winning_player))
self.wlt[player1_id]["Wins"] += 1
dataIO.save_json("data/duels/account.json", self.wlt)
if player2_id not in self.wlt:
self.wlt[player2_id] = {"name": losing_player, "Wins": 0, "Losses": 0, "Ties": 0}
dataIO.save_json("data/duels/account.json", self.wlt)
await self.bot.say("{} has not yet entered the duel tournament!".format(losing_player))
await asyncio.sleep(.5)
await self.bot.say("{} has joined the duel tournament, currently changing settings!".format(losing_player))
await self.bot.say("{} gained +1 LOSE!!".format(losing_player))
self.wlt[player2_id]["Losses"] += 1
dataIO.save_json("data/duels/account.json", self.wlt)
else:
await self.bot.say("{} gained +1 LOSE!!".format(losing_player))
self.wlt[player2_id]["Losses"] += 1
dataIO.save_json("data/duels/account.json", self.wlt)
elif hp_player1 == hp_player2:
remaining_hp = hp_player1
await asyncio.sleep(1)
await self.bot.say("After 4 rounds of bloody combat, the winner is **no one because it's a draw** with both players still having **{}** health!".format(remaining_hp))
if player1_id not in self.wlt:
self.wlt[player1_id] = {"name": nick_player1, "Wins": 0, "Losses": 0, "Ties": 0}
dataIO.save_json("data/duels/account.json", self.wlt)
await self.bot.say("{} has not yet entered the duel tournament!".format(nick_player1))
await asyncio.sleep(.5)
await self.bot.say("{} has joined the duel tournament, currently changing settings!".format(nick_player1))
await self.bot.say("{} gained +1 TIE!!".format(nick_player1))
self.wlt[player1_id]["Ties"] += 1
dataIO.save_json("data/duels/account.json", self.wlt)
else:
await self.bot.say("{} gained +1 TIE!!".format(nick_player1))
self.wlt[player1_id]["Ties"] += 1
dataIO.save_json("data/duels/account.json", self.wlt)
if player2_id not in self.wlt:
self.wlt[player2_id] = {"name": nick_player2, "Wins": 0, "Losses": 0, "Ties": 0}
dataIO.save_json("data/duels/account.json", self.wlt)
await self.bot.say("{} has not yet entered the duel tournament!".format(nick_player2))
await asyncio.sleep(.5)
await self.bot.say("{} has joined the duel tournament, currently changing settings!".format(nick_player2))
await self.bot.say("{} gained +1 TIE!!".format(nick_player2))
self.wlt[player2_id]["Ties"] += 1
dataIO.save_json("data/duels/account.json", self.wlt)
else:
await self.bot.say("{} gained +1 TIE!!".format(nick_player2))
self.wlt[player2_id]["Ties"] += 1
dataIO.save_json("data/duels/account.json", self.wlt)
else:
winning_player = nick_player2
losing_player = nick_player1
remaining_hp = hp_player2
await asyncio.sleep(1)
await self.bot.say("After 4 rounds of bloody combat, the winner is **{}** with **{}** health!".format(winning_player, remaining_hp))
if player2_id not in self.wlt:
self.wlt[player2_id] = {"name": winning_player, "Wins": 0, "Losses": 0, "Ties": 0}
dataIO.save_json("data/duels/account.json", self.wlt)
await self.bot.say("{} has not yet entered the duel tournament!".format(winning_player))
await asyncio.sleep(.5)
await self.bot.say("{} has joined the duel tournament, currently changing settings!".format(winning_player))
await self.bot.say("{} gained +1 WIN!!".format(winning_player))
self.wlt[player2_id]["Wins"] += 1
dataIO.save_json("data/duels/account.json", self.wlt)
else:
await self.bot.say("{} gained +1 WIN!!".format(winning_player))
self.wlt[player2_id]["Wins"] += 1
dataIO.save_json("data/duels/account.json", self.wlt)
if player1_id not in self.wlt:
self.wlt[player1_id] = {"name": losing_player, "Wins": 0, "Losses": 0, "Ties": 0}
dataIO.save_json("data/duels/account.json", self.wlt)
await self.bot.say("{} has not yet entered the duel tournament!".format(losing_player))
await asyncio.sleep(.5)
await self.bot.say("{} has joined the duel tournament, currently changing settings!".format(losing_player))
await self.bot.say("{} gained +1 LOSE!!".format(losing_player))
self.wlt[player1_id]["Losses"] += 1
dataIO.save_json("data/duels/account.json", self.wlt)
else:
await self.bot.say("{} gained +1 LOSE!!".format(losing_player))
self.wlt[player1_id]["Losses"] += 1
dataIO.save_json("data/duels/account.json", self.wlt)
self.timer_board[server.id]["time"] -= 1
dataIO.save_json("data/duels/timer.json", self.timer_board)
else:
await self.bot.say("**A duel is already running!\nPlease wait for the current one to finish!**")
else:
await self.bot.say("Please do {}tjoin to be added to the timer board!".format(ctx.prefix))
@_duels.command(pass_context=True, no_pm=True)
@checks.admin_or_permissions(manage_server=True)
async def add (self, ctx, *, Duel : str):
"""Adds a duel to the list"""
if self.nuels not in self.duelist:
self.duelist[self.nuels] = ["Super Falcon Punched",
"shot",
"kidnapped",
"called 'The Spanker' on",
"ran over",
"Super Falcon Kicked",
"One Punched",
"used One Punch Man on",
"Kamehameha'd",
"Final Flashed",
"Instant Transmission Kamehameha'd",
"Omega Blastered",
"Rick Roll'd",
"Kaioken X4 Kamehameha'd",
"Spirit Bombed",
"hacked",
"Perfect Kamehameha'd",
"used Destructo Disc on",
"used Destructo Disc X2 on",
"used Destructo Disc Chain on",
"Big Bang Kamehameha'd",
"Big Bang Attacked",
"Galick Gunned",
"used Chuck Norris on",
"used Dragon Fist on",
"Final Kamehameha'd",
"Air striked",
"concrete donkey'd",
"super banana bombed",
"Holy Hand Grenaded"]
self.duelist[self.setter] = 100
await self.bot.say("Setter hasn't been added yet. Setter has been auto set to: **{}**".format(self.duelist[self.setter]))
dataIO.save_json("data/duels/duelist.json", self.duelist)
if Duel in self.duelist[self.nuels]:
await self.bot.say("Uh oh. It seems `{}` has already been added to the list.".format(Duel))
else:
if self.counter not in self.duelist:
self.duelist[self.counter] = 0
if self.setter not in self.duelist:
self.duelist[self.setter] = 100
dataIO.save_json("data/duels/duelist.json", self.duelist)
await self.bot.say("Setter hasn't been added yet. Setter has been auto set to: **{}**".format(self.duelist[self.setter]))
if self.duelist[self.counter] < self.duelist[self.setter]:
self.duelist[self.nuels].append(Duel)
self.duelist[self.counter] += 1
dataIO.save_json("data/duels/duelist.json", self.duelist)
await self.bot.say("`{}` has been added to the duel list!".format(Duel))
else:
await self.bot.say("The maximum amount of duel actions has been added (**{}**). Please contact someone with the `Manage Server` permission to change this.".format(self.duelist[self.setter]))
@_duels.command(name="set", pass_context=True, no_pm=True)
@checks.admin_or_permissions(manage_server=True)
async def _set(self, ctx, setter : int=None):
"""Sets the maximum amount of duels that can be added"""
if not setter:
if self.setter not in self.duelist:
self.duelist[self.setter] = 100
await self.bot.say("Setter is currently set to: **{}**".format(self.duelist[self.setter]))
else:
if self.setter not in self.duelist:
self.duelist[self.setter] = 100
await self.bot.say("Setter hasn't been added yet. Setter has been auto set to: **{}**".format(self.duelist[self.setter]))
self.duelist[self.setter] = setter
dataIO.save_json("data/duels/duelist.json", self.duelist)
await self.bot.say("The Duel List Setter has been set to allow a maximum of **{}** items.".format(setter))
#Save function here that isn't added yet
else:
self.duelist[self.setter] = setter
dataIO.save_json("data/duels/duelist.json", self.duelist)
await self.bot.say("The Duel List Setter has been set to allow a maximum of **{}** items.".format(setter))
#Save function here that isn't added yet
if not setter:
await self.bot.say("Setter is currently set to: **{}**".format(self.duelist[self.setter]))
@_duels.command(pass_context=True, no_pm=True)
async def join(self, ctx, user: discord.Member=None):
"""Join tournament"""
user = ctx.message.author
if user.id not in self.wlt:
self.wlt[user.id] = {"name": user.name, "Wins": 0, "Losses": 0, "Ties": 0}
dataIO.save_json("data/duels/account.json", self.wlt)
await self.bot.say("{} has joined the tournament!".format(user.mention))
else:
await self.bot.say("{} has already joined the tournament".format(user.mention))
@_duels.command(name="stats", pass_context=True)
async def _stats(self, ctx, user : discord.Member=None):
"""Show rank and XP of users.
Defaults to yours."""
if not user:
user = ctx.message.author
if self.check_joined(user.id):
await self.bot.say("{}'s stats: **Wins: {} | Losses: {} | Ties: {} **".format(user.name, self.get_wins(user.id),
self.get_losses(user.id),
self.get_ties(user.id)))
else:
await self.bot.say("{}, you are not yet in the tournament!".format(user.mention))
else:
if self.check_joined(user.id):
await self.bot.say("{}'s stats: **Wins: {} | Losses: {} | Ties: {} **".format(user.name, self.get_wins(user.id),
self.get_losses(user.id),
self.get_ties(user.id)))
else:
await self.bot.say("This user has not joined the tournament")
@_duels.command(pass_context=True, no_pm=True)
async def show (self, ctx):
"""Shows list of available duels"""
if self.nuels not in self.duelist:
self.duelist[self.setter] = 100
self.duelist[self.counter] = 30
self.duelist[self.nuels] = ["Super Falcon Punched",
"shot",
"kidnapped",
"called 'The Spanker' on",
"ran over",
"Super Falcon Kicked",
"One Punched",
"used One Punch Man on",
"Kamehameha'd",
"Final Flashed",
"Instant Transmission Kamehameha'd",
"Omega Blastered",
"Rick Roll'd",
"Kaioken X4 Kamehameha'd",
"Spirit Bombed",
"hacked",
"Perfect Kamehameha'd",
"used Destructo Disc on",
"used Destructo Disc X2 on",
"used Destructo Disc Chain on",
"Big Bang Kamehameha'd",
"Big Bang Attacked",
"Galick Gunned",
"used Chuck Norris on",
"used Dragon Fist on",
"Final Kamehameha'd",
"Air striked",
"concrete donkey'd",
"super banana bombed",
"Holy Hand Grenaded"]
dataIO.save_json("data/duels/duelist.json", self.duelist)
await self.bot.say(" \n\n\n\n\nThe 30 duels are preset duels that are added automatically on first run. (Code looks like crap right now though :wink:)".format(ctx.prefix))
strbuffer = self.duel_show().split("\n")
mess = ""
if self.duelist[self.counter] == self.duelist[self.setter]:
await self.bot.say("**{}** out of **{}** spaces used! **MAXED OUT!!**".format(len(self.duelist[self.nuels]), self.duelist[self.setter]))
else:
await self.bot.say("**{}** out of **{}** spaces used!".format(len(self.duelist[self.nuels]), self.duelist[self.setter]))
for line in strbuffer:
if len(mess) + len(line) + 1 < 300:
mess += "\n" + line
else:
await self.bot.say(mess)
mess = ""
if mess != "":
await self.bot.say(mess)
else:
strbuffer = self.duel_show().split("\n")
mess = ""
if self.duelist[self.counter] == self.duelist[self.setter]:
await self.bot.say("**{}** out of **{}** spaces used! **MAXED OUT!!**".format(len(self.duelist[self.nuels]), self.duelist[self.setter]))
else:
await self.bot.say("**{}** out of **{}** spaces used!".format(len(self.duelist[self.nuels]), self.duelist[self.setter]))
for line in strbuffer:
if len(mess) + len(line) + 1 < 300:
mess += "\n" + line
else:
await self.bot.say(mess)
mess = ""
if mess != "":
await self.bot.say(mess)
@_duels.command(pass_context=True, no_pm=True)
async def remove (self, ctx, Duel : str):
"""removes a duel from the list"""
try:
x = self.duelist[self.nuels].remove(Duel)
if x is not ValueError:
dataIO.save_json("data/duels/duelist.json", self.duelist)
await self.bot.say("{} has been successfully removed from the duel list!".format(Duel))
except ValueError:
await self.bot.say("I can't remove what hasn't been added to the list to begin with.")
@_duels.command(pass_context=True, no_pm=True)
async def reset (self, ctx):
"""For when you have waaay too many duels"""
if len(self.duelist[self.nuels]) > 0:
self.duelist[self.counter] = 0
self.duelist[self.nuels] = []
dataIO.save_json("data/duels/duelist.json", self.duelist)
dataIO.save_json("data/duels/duelist.json", self.duelist)
await self.bot.say("Duel list has been reset")
else:
await self.bot.say("I can't delete a list that's already empty!")
@_duels.command(pass_context=True)
async def timerreset(self, ctx):
"""Reset the duel timer, only use if the system hangs or breaks!"""
author = ctx.message.author
server = author.server
if server.id in self.timer_board:
if self.timer_board[server.id]["time"] == 0:
await self.bot.say("There isn't a timer right now (no duel running).")
else:
self.timer_board[server.id]["time"] = 0
await self.bot.say("Timer has been reset!")
else:
await self.bot.say("Please do {}tjoin to be added to the timer board!".format(ctx.prefix))
#This cog was made by Axaios and Ridinginstyle00. And any code taken from others we also credit them here, whether we know their name or not.
def duel_show (self):
ret = "```--------```"
for num, duels in enumerate(self.duelist[self.nuels]):
ret += str(num + 1) + ") `" + duels + "`\n"
ret += "```--------```"
return ret
def action_choose (self):
action = choice(sample(self.duelist[self.nuels],1))
return action
def multiple_action_choose (self):
action1 = self.action_choose()
action2 = self.action_choose()
action3 = self.action_choose()
action4 = self.action_choose()
return action1, action2, action3, action4
def action_damage (self):
action_chosen1, action_chosen2, action_chosen3, action_chosen4 = self.multiple_action_choose()
action_damage1 = self.duelist[self.nuels].index(action_chosen1)
action_damage2 = self.duelist[self.nuels].index(action_chosen2)
action_damage3 = self.duelist[self.nuels].index(action_chosen3)
action_damage4 = self.duelist[self.nuels].index(action_chosen4)
return action_damage1, action_damage2, action_damage3, action_damage4
def check_joined(self, id):
if id in self.wlt:
return True
else:
return False
def get_wins(self, id):
if self.check_joined(id):
return self.wlt[id]["Wins"]
def get_losses(self, id):
if self.check_joined(id):
return self.wlt[id]["Losses"]
def get_ties(self, id):
if self.check_joined(id):
return self.wlt[id]["Ties"]
def display_time(self, seconds, granularity=2): # What would I ever do without stackoverflow?
intervals = ( # Source: http://stackoverflow.com/a/24542445
('weeks', 604800), # 60 * 60 * 24 * 7
('days', 86400), # 60 * 60 * 24
('hours', 3600), # 60 * 60
('minutes', 60),
('seconds', 1),
)
result = []
for name, count in intervals:
value = seconds // count
if value:
seconds -= value * count
if value == 1:
name = name.rstrip('s')
result.append("{} {}".format(value, name))
return ', '.join(result[:granularity])
def check_folders():
if not os.path.exists("data/duels"):
print("Creating data/duels folder...")
os.mkdir("data/duels")
def check_files():
fp = "data/duels/duelist.json"
if not dataIO.is_valid_json(fp):
print("Creating duelist.json...")
dataIO.save_json(fp, {})
acc = "data/duels/account.json"
if not dataIO.is_valid_json(acc):
print("creating account.json...")
dataIO.save_json(acc, {})
fp = "data/duels/timer.json"
if not dataIO.is_valid_json(fp):
print("Creating timer.json...")
dataIO.save_json(fp, {})
def setup(bot):
global logger
check_folders()
check_files()
n = Duels(bot)
logger = logging.getLogger("red.duels")
if logger.level == 0: # Prevents the logger from being loaded again in case of module reload
logger.setLevel(logging.INFO)
handler = logging.FileHandler(filename='data/duels/duels.log', encoding='utf-8', mode='a')
handler.setFormatter(logging.Formatter('%(asctime)s %(message)s', datefmt="[%d/%m/%Y %H:%M]"))
logger.addHandler(handler)
bot.add_cog(n)
| 44.215953
| 195
| 0.6267
| 3,099
| 22,727
| 4.504356
| 0.132623
| 0.036106
| 0.060176
| 0.073071
| 0.704993
| 0.670607
| 0.639372
| 0.621391
| 0.597321
| 0.582492
| 0
| 0.015554
| 0.230563
| 22,727
| 514
| 196
| 44.215953
| 0.782696
| 0.018172
| 0
| 0.565789
| 0
| 0.008772
| 0.262099
| 0.039362
| 0
| 0
| 0
| 0
| 0
| 1
| 0.028509
| false
| 0.024123
| 0.037281
| 0
| 0.089912
| 0.008772
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1c741e6bc69fc8671df5a15c26f40ce7a3bf09f3
| 2,839
|
py
|
Python
|
paranuara/citizens/models/citizens.py
|
SPLAYER-HD/Paranuara
|
5a42f23d761e16e3b486ba04d9185551614f06a5
|
[
"MIT"
] | null | null | null |
paranuara/citizens/models/citizens.py
|
SPLAYER-HD/Paranuara
|
5a42f23d761e16e3b486ba04d9185551614f06a5
|
[
"MIT"
] | 4
|
2021-06-08T20:53:43.000Z
|
2022-03-12T00:13:51.000Z
|
paranuara/citizens/models/citizens.py
|
SPLAYER-HD/RestServiceDjango
|
5a42f23d761e16e3b486ba04d9185551614f06a5
|
[
"MIT"
] | null | null | null |
"""Citizens model."""
# Django
from django.db import models
from django.contrib.auth.models import AbstractUser
from django.core.validators import RegexValidator
# models
from paranuara.companies.models import Company
# PostgreSQL fields
from django.contrib.postgres.fields import JSONField
# Utilities
from paranuara.utils.models import ParanuaraModel
class Citizen(ParanuaraModel, AbstractUser):
"""Citizen model.
Extend from Django's Abstract User, change the username field
to email and add some extra fields.
"""
index = models.IntegerField(
unique=True,
default=-1
)
favorite_food = models.ManyToManyField(
'foods.Food',
related_name='favorite_food'
)
has_died = models.BooleanField(
'died',
default=False,
help_text=(
'Help easily distinguish citizens died or alive. '
)
)
balance = models.DecimalField(
max_digits=15,
decimal_places=2,
default=None
)
picture = models.ImageField(
'profile picture',
upload_to='paranuara/citizens/pictures/',
blank=True,
null=True
)
age = models.IntegerField(
default=-1
)
eyeColor = models.CharField(
max_length=50,
blank=False
)
gender = models.CharField(
max_length=6,
blank=True
)
email = models.EmailField(
'email address',
unique=True,
error_messages={
'unique': 'A user with that email already exists.'
}
)
phone_regex = RegexValidator(
regex=r'\+?1?\d{9,15}$',
message="Phone number must be entered in the format: +999999999. Up to 15 digits allowed."
)
phone = models.CharField(
validators=[phone_regex],
max_length=20,
blank=True
)
address = models.CharField(
max_length=100,
blank=True
)
company = models.ForeignKey(
Company,
related_name='employees_company',
on_delete=models.SET_NULL,
null=True
)
about = models.CharField(
max_length=1000,
blank=True,
null=True
)
greeting = models.CharField(
max_length=1000,
blank=True,
null=True
)
tags = JSONField(
default=None,
blank=True,
null=True
)
REQUIRED_FIELDS = ['has_died', 'eyeColor', 'index']
def get_relations(self):
return models.Relationship.objects.get(from_person=self)
class Relationship(models.Model):
"""Class to represent many to many relation between Ctizens"""
from_people = models.ForeignKey(Citizen, related_name='from_people', on_delete=models.CASCADE)
to_people = models.ForeignKey(Citizen, related_name='to_people', on_delete=models.CASCADE)
| 22.007752
| 98
| 0.62205
| 306
| 2,839
| 5.663399
| 0.441176
| 0.036353
| 0.051933
| 0.069244
| 0.129256
| 0.098096
| 0.051933
| 0.051933
| 0.051933
| 0
| 0
| 0.017699
| 0.283551
| 2,839
| 128
| 99
| 22.179688
| 0.834317
| 0.08031
| 0
| 0.197802
| 0
| 0
| 0.126793
| 0.010857
| 0
| 0
| 0
| 0
| 0
| 1
| 0.010989
| false
| 0
| 0.065934
| 0.010989
| 0.318681
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1c770de3012ff3f97ad6bf07fd17d96b765a28e4
| 2,442
|
py
|
Python
|
chess/rules.py
|
DevStrikerTech/Chess-Engine
|
f0d2e0fc48b820325b1826e4379bf0520c8d3b52
|
[
"MIT"
] | 18
|
2021-01-26T19:21:45.000Z
|
2021-01-27T00:32:49.000Z
|
chess/rules.py
|
KingCobra2018/Chess-Engine
|
f0d2e0fc48b820325b1826e4379bf0520c8d3b52
|
[
"MIT"
] | null | null | null |
chess/rules.py
|
KingCobra2018/Chess-Engine
|
f0d2e0fc48b820325b1826e4379bf0520c8d3b52
|
[
"MIT"
] | 9
|
2021-01-26T19:51:20.000Z
|
2021-01-26T22:39:28.000Z
|
import pygame
from chess.board import Board
from .variable_declaration import black_piece, white_piece, position_piece, board_square_size
class Rules:
def __init__(self, window):
self._init()
self.window = window
def update(self):
self.chess_board.draw_pieces(self.window)
self.draw_valid_moves(self.logical_moves)
pygame.display.update()
def _init(self):
self.current_piece = None
self.chess_board = Board()
self.turn_taken = black_piece
self.logical_moves = {}
def winner(self):
return self.chess_board.winner()
def reset(self):
self._init()
def select(self, board_row, board_column):
if self.current_piece:
result = self._move(board_row, board_column)
if not result:
self.current_piece = None
self.select(board_row, board_column)
piece = self.chess_board.get_pieces(board_row, board_column)
if piece != 0 and piece.piece_color == self.turn_taken:
self.current_piece = piece
self.logical_moves = self.chess_board.get_logical_moves(piece)
return True
return False
def _move(self, board_row, board_column):
piece = self.chess_board.get_pieces(board_row, board_column)
if self.current_piece and piece == 0 and (board_row, board_column) in self.logical_moves:
self.chess_board.move_pieces(self.current_piece, board_row, board_column)
skipped = self.logical_moves[(board_row, board_column)]
if skipped:
self.chess_board.remove(skipped)
self.change_turn()
else:
return False
return True
def draw_valid_moves(self, moves):
for move in moves:
row, col = move
pygame.draw.circle(self.window, position_piece,
(col * board_square_size + board_square_size // 2,
row * board_square_size + board_square_size // 2), 15)
def change_turn(self):
self.logical_moves = {}
if self.turn_taken == black_piece:
self.turn_taken = white_piece
else:
self.turn_taken = black_piece
def get_board(self):
return self.chess_board
def algorithm_move(self, chess_board):
self.chess_board = chess_board
self.change_turn()
| 29.421687
| 97
| 0.620393
| 303
| 2,442
| 4.69967
| 0.188119
| 0.091292
| 0.108146
| 0.120084
| 0.375
| 0.25632
| 0.176264
| 0.132725
| 0.095506
| 0.095506
| 0
| 0.003507
| 0.299345
| 2,442
| 82
| 98
| 29.780488
| 0.828755
| 0
| 0
| 0.3
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.183333
| false
| 0
| 0.05
| 0.033333
| 0.35
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1c77f1e65b1460f3b0a09bd95f3c03183aa1bcf6
| 1,542
|
py
|
Python
|
kivygames/games/noughtsandcrosses/__init__.py
|
jonathanjameswatson/kivygames
|
7636580956562af0814c973f94afede926cfa4b9
|
[
"MIT"
] | null | null | null |
kivygames/games/noughtsandcrosses/__init__.py
|
jonathanjameswatson/kivygames
|
7636580956562af0814c973f94afede926cfa4b9
|
[
"MIT"
] | null | null | null |
kivygames/games/noughtsandcrosses/__init__.py
|
jonathanjameswatson/kivygames
|
7636580956562af0814c973f94afede926cfa4b9
|
[
"MIT"
] | null | null | null |
import numpy as np
from kivygames.games import Game
import kivygames.games.noughtsandcrosses.c as c
class CellOccupiedError(Exception):
pass
class NoughtsAndCrosses(Game):
minPlayers = 2
maxPlayers = 2
hasAI = True
gridShape = (3, 3)
def __init__(self):
Game.__init__(self)
self.grid = np.zeros(self.gridShape, dtype="u1")
self.player = 1
def isEmpty(self, position):
return self.grid[position] == 0
async def turn(self):
await self.sendOutput("Player", self.player)
while True:
position = await self.getInput("Position", tuple, self.player)
if self.isEmpty(position):
break
await self.sendOutput("Error", "That space is already full.")
await self.sendOutput("Error", "")
self.grid[position] = self.player
await self.sendOutput("Grid", self.grid)
if c.hasPlayerWon(self.grid, self.player):
await self.sendOutput("End", f"Player {self.player} wins.")
return True
if np.count_nonzero(self.grid) == 9:
await self.sendOutput("End", f"It's a draw!")
return True
self.player = 3 - self.player
return False
def getAIInput(self, name):
if name == "Position":
return c.minimax(self.player, self.player, True, self.grid)[1]
async def game(self):
while True:
ended = await self.turn()
if ended:
break
await self.end()
| 26.135593
| 74
| 0.586252
| 184
| 1,542
| 4.86413
| 0.353261
| 0.111732
| 0.127374
| 0.053631
| 0.094972
| 0
| 0
| 0
| 0
| 0
| 0
| 0.00932
| 0.30415
| 1,542
| 58
| 75
| 26.586207
| 0.82479
| 0
| 0
| 0.139535
| 0
| 0
| 0.070687
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.069767
| false
| 0.023256
| 0.069767
| 0.023256
| 0.395349
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1c7894b14ef779955e6bd0f109d8986f10e8fa84
| 1,206
|
py
|
Python
|
03-Decouvrez-POO/download_agents.py
|
gruiick/openclassrooms-py
|
add4b28eab8b311dea7c1d3915a22061f54326a9
|
[
"BSD-2-Clause"
] | null | null | null |
03-Decouvrez-POO/download_agents.py
|
gruiick/openclassrooms-py
|
add4b28eab8b311dea7c1d3915a22061f54326a9
|
[
"BSD-2-Clause"
] | null | null | null |
03-Decouvrez-POO/download_agents.py
|
gruiick/openclassrooms-py
|
add4b28eab8b311dea7c1d3915a22061f54326a9
|
[
"BSD-2-Clause"
] | null | null | null |
#! /usr/bin/env python
import argparse
import json
import time
import urllib.error
import urllib.request
def main():
parser = argparse.ArgumentParser(description="Download agents from pplapi.com")
parser.add_argument("-c", "--count", type=int, default=10, help="Number of agents to download.")
parser.add_argument("-d", "--dest", help="Destination file. If absent, will print to stdout")
args = parser.parse_args()
agents = []
while len(agents) < args.count:
if agents:
# Wait one second between every request
time.sleep(1)
request_count = min(args.count - len(agents), 500)
try:
response = urllib.request.urlopen("http://pplapi.com/batch/{}/sample.json".format(request_count))
agents += json.loads(response.read().decode("utf8"))
except urllib.error.HTTPError:
print("Too may requests, sleeping 10s ({} agents)".format(len(agents)))
time.sleep(10)
result = json.dumps(agents, indent=2, sort_keys=True)
if args.dest:
with open(args.dest, 'w') as out_f:
out_f.write(result)
else:
print(result)
if __name__ == "__main__":
main()
| 30.923077
| 109
| 0.630182
| 154
| 1,206
| 4.831169
| 0.577922
| 0.03629
| 0.045699
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.012987
| 0.233831
| 1,206
| 38
| 110
| 31.736842
| 0.792208
| 0.048922
| 0
| 0
| 0
| 0
| 0.191266
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.034483
| false
| 0
| 0.172414
| 0
| 0.206897
| 0.103448
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1c78aef6937bac0c47b2a7aeef06915d8ec4cebe
| 3,681
|
py
|
Python
|
Commands/images.py
|
Mariobob/Proton
|
7c5eab0251266ca1da83591d396b357bab692399
|
[
"MIT"
] | null | null | null |
Commands/images.py
|
Mariobob/Proton
|
7c5eab0251266ca1da83591d396b357bab692399
|
[
"MIT"
] | null | null | null |
Commands/images.py
|
Mariobob/Proton
|
7c5eab0251266ca1da83591d396b357bab692399
|
[
"MIT"
] | null | null | null |
import functools
import re
import asyncio
from io import BytesIO
from discord.ext import commands
import discord
from Utils import canvas
import random
class Images:
"""
Contains commands for manipulation of images.
"""
def __init__(self, bot):
self.bot = bot
self.imageClient = canvas.Client(bot)
@commands.command(name="illegal")
async def illegal(self, ctx, *, args=None):
"""Ask US President Donald Trump to make something illegal."""
if args is None:
await ctx.send("Please provide something to make it illegal.")
return
if len(args) > 10 or len(args) < 1:
await ctx.send("You can make only 1 to 10 lettered things illegal.")
return
elif not bool(re.match('^[a-zA-Z0-9]+$', args)):
await ctx.send("Oops! Only alphanumeric characters are allowed.")
return
payload = {"task": "gif", "word": args.upper()}
async with ctx.message.channel.typing():
message = await ctx.send(f"Convincing US President Donald Trump to make `{args}` illegal.")
async with self.bot.session.post("https://is-now-illegal.firebaseio.com/queue/tasks.json", json=payload) as resp:
pass
await asyncio.sleep(5)
url = f"https://storage.googleapis.com/is-now-illegal.appspot.com/gifs/{args.upper()}.gif"
async with self.bot.session.get(url) as resp:
image = await resp.read()
await ctx.send(file=discord.File(BytesIO(image), "illegal.gif"))
await message.delete()
@commands.command(name="beautiful")
async def beautiful(self, ctx, user: discord.Member = None):
"""This... this is beautiful!"""
member = user or ctx.author
async with ctx.typing():
avatar = await self.imageClient.getAvatar(user=member, size=128)
func = functools.partial(self.imageClient.beautify, avatar)
image = await self.bot.loop.run_in_executor(None, func)
await ctx.send(file=discord.File(fp=image, filename="beautiful.png"))
@commands.command(name="delet")
async def delet(self, ctx, user: discord.Member = None):
"""Delet this garbage!"""
member = user or ctx.author
async with ctx.typing():
avatar = await self.imageClient.getAvatar(user=member, size=128)
func = functools.partial(self.imageClient.deletify, avatar, f"{member.name}#{member.discriminator}")
image = await self.bot.loop.run_in_executor(None, func)
await ctx.send(file=discord.File(fp=image, filename="delet.png"))
@commands.command(name="robot")
async def robot(self, ctx, *, args=None):
"""See a unique robot image from any text."""
if args is None:
args = ctx.author.name
randomInt = random.randrange(1, 3)
async with ctx.typing():
image = await self.imageClient.getRobotImage(args, randomInt)
file = discord.File(fp=image, filename=f"{args}.png")
await ctx.send(file=file)
@commands.command(name="thuglife")
async def thuglife(self, ctx, user: discord.Member = None):
"""Thug Life....."""
member = user or ctx.author
async with ctx.typing():
avatar = await self.imageClient.getAvatar(user=member, size=512)
func = functools.partial(self.imageClient.thugLife, avatar)
image = await self.bot.loop.run_in_executor(None, func)
await ctx.send(file=discord.File(fp=image, filename="thuglife.png"))
def setup(bot):
bot.add_cog(Images(bot))
| 41.359551
| 125
| 0.620212
| 467
| 3,681
| 4.865096
| 0.308351
| 0.03169
| 0.047535
| 0.035211
| 0.388204
| 0.352553
| 0.265845
| 0.265845
| 0.265845
| 0.265845
| 0
| 0.007294
| 0.255094
| 3,681
| 89
| 126
| 41.359551
| 0.821298
| 0.012225
| 0
| 0.246377
| 0
| 0.014493
| 0.141984
| 0.010474
| 0
| 0
| 0
| 0
| 0
| 1
| 0.028986
| false
| 0.014493
| 0.115942
| 0
| 0.202899
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1c7b885a3c4fad049ff2d1a6a859aa95838e0630
| 2,954
|
py
|
Python
|
encyclopaedia/labels.py
|
tcyrus/renpy-encyclopaedia
|
900517b34ab7b870f6ee03057f898fb5eb61313c
|
[
"MIT"
] | null | null | null |
encyclopaedia/labels.py
|
tcyrus/renpy-encyclopaedia
|
900517b34ab7b870f6ee03057f898fb5eb61313c
|
[
"MIT"
] | null | null | null |
encyclopaedia/labels.py
|
tcyrus/renpy-encyclopaedia
|
900517b34ab7b870f6ee03057f898fb5eb61313c
|
[
"MIT"
] | null | null | null |
from renpy import store
class Labels(store.object):
"""Controls how the labels that display Encyclopaedia data appear.
Attributes:
percentage_label (str): Placed next to the percentage unlocked number
page_label (str): Placed before the entry page displayed
page_separator_label (str): Placed in-between the
current page number and the total page number
sort_number_label (str): Label for Number Sorting
sort_alphabetical_label (str): Label for Alphabetical sorting
sort_reverse_alphabetical_label (str): Label for Reverse Alphabetical
sorting
sort_subject_label (str): Label for Subject sorting
sort_unread_label (str): Label for Unread sorting
unread_entry_label (str): Default for the tag next to unread entries
locked_entry_label (str): Default for a "Locked Entry" button
"""
def __init__(self, encyclopaedia):
self.encyclopaedia = encyclopaedia
self.percentage_label = '%'
self.page_label = 'Page'
self.page_separator_label = '/'
self.sort_number_label = "Number"
self.sort_alphabetical_label = "A to Z"
self.sort_reverse_alphabetical_label = "Z to A"
self.sort_subject_label = "Subject"
self.sort_unread_label = "Unread"
self.unread_entry_label = "New!"
self.locked_entry_label = "???"
@property
def percentage_unlocked(self):
"""Percentage representation of the amount of the encyclopaedia
that's unlocked. ie: '50%'.
Returns:
str
"""
percentage_unlocked = int(self.encyclopaedia.percentage_unlocked)
return "{}{}".format(percentage_unlocked, self.percentage_label)
@property
def entry_current_page(self):
"""The sub-page of an entry that is being viewed.
Returns:
str
"""
try:
total_pages = self.encyclopaedia.active.pages
except AttributeError:
raise AttributeError(
"Cannot display Entry's current page when no entry is open."
)
label = "{0} {1} {2} {3}".format(
self.page_label,
self.encyclopaedia.sub_current_position,
self.page_separator_label,
total_pages
)
return label
@property
def sorting_mode(self):
"""Label for the encyclopaedia's current sorting mode.
Returns:
str
"""
enc = self.encyclopaedia
sorting_strings = {
enc.SORT_NUMBER: self.sort_number_label,
enc.SORT_ALPHABETICAL: self.sort_alphabetical_label,
enc.SORT_REVERSE_ALPHABETICAL: self.sort_reverse_alphabetical_label, # NOQA: E501
enc.SORT_SUBJECT: self.sort_subject_label,
enc.SORT_UNREAD: self.sort_unread_label
}
return sorting_strings[enc.sorting_mode]
| 32.461538
| 94
| 0.635748
| 335
| 2,954
| 5.376119
| 0.262687
| 0.04442
| 0.036091
| 0.04442
| 0.092171
| 0
| 0
| 0
| 0
| 0
| 0
| 0.004292
| 0.290115
| 2,954
| 90
| 95
| 32.822222
| 0.854554
| 0.350034
| 0
| 0.068182
| 0
| 0
| 0.068789
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.090909
| false
| 0
| 0.022727
| 0
| 0.204545
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1c7bed607992f89cbbe011d8fbb3d755bb77d244
| 1,816
|
py
|
Python
|
ncservice/ncDeviceOps/threaded/get_configs.py
|
cunningr/yanccm
|
2d8f891d704672f4d3a15472c7a13edf7832d53d
|
[
"MIT"
] | null | null | null |
ncservice/ncDeviceOps/threaded/get_configs.py
|
cunningr/yanccm
|
2d8f891d704672f4d3a15472c7a13edf7832d53d
|
[
"MIT"
] | null | null | null |
ncservice/ncDeviceOps/threaded/get_configs.py
|
cunningr/yanccm
|
2d8f891d704672f4d3a15472c7a13edf7832d53d
|
[
"MIT"
] | null | null | null |
import logging
from ncservice.ncDeviceOps.nc_device_ops import NcDeviceOps
from ncservice.ncDeviceOps.task_report import TaskReport
from ncservice.ncDeviceOps.threaded.base_thread_class import BaseThreadClass
logger = logging.getLogger('main.{}'.format(__name__))
extra = {'signature': '---SIGNATURE-NOT-SET---'}
class GetConfigs(BaseThreadClass):
def __init__(self, service):
super().__init__()
self.service = service
self.results = TaskReport(service)
def get_configs(self):
logger.debug('Requesting thread queue for _th_read_configs', extra=extra)
enclosure_queue = self.create_thread_queue(
self._th_read_configs
)
for device in self.service:
enclosure_queue.put(device)
enclosure_queue.join()
return self.results
def _th_read_configs(self, tid, queue):
while True:
target_device = queue.get()
device_name = target_device['device']
host = target_device['host']
port = target_device.get('ncport', 830)
session = NcDeviceOps(host, port=port, tid=tid)
current_config = session.nc_get_configs()
if current_config is not None:
self.results.set_device_config_data('original_running_configs', device_name, current_config)
self.results.set_device_config_data('current_running_configs', device_name, current_config)
self.results.set_service_result(device_name, 'SUCCESS')
else:
logger.error('TID-{}: Unable to retrieve config for device: {}'
.format(tid, device_name), extra=extra)
queue.task_done()
continue
session.close_session()
queue.task_done()
| 37.061224
| 108
| 0.643722
| 200
| 1,816
| 5.535
| 0.365
| 0.049684
| 0.065041
| 0.036134
| 0.133695
| 0.133695
| 0.092141
| 0.092141
| 0.092141
| 0
| 0
| 0.002246
| 0.264317
| 1,816
| 48
| 109
| 37.833333
| 0.826347
| 0
| 0
| 0.051282
| 0
| 0
| 0.110683
| 0.038546
| 0
| 0
| 0
| 0
| 0
| 1
| 0.076923
| false
| 0
| 0.102564
| 0
| 0.230769
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1c7fbcb14ea301bda84e83c0a6cddb4f13bae6fe
| 14,860
|
py
|
Python
|
Postprocessing/Hardt/Hardt.py
|
maliha93/Fairness-Analysis-Code
|
acf13c6e7993704fc627249fe4ada44d8b616264
|
[
"MIT"
] | null | null | null |
Postprocessing/Hardt/Hardt.py
|
maliha93/Fairness-Analysis-Code
|
acf13c6e7993704fc627249fe4ada44d8b616264
|
[
"MIT"
] | null | null | null |
Postprocessing/Hardt/Hardt.py
|
maliha93/Fairness-Analysis-Code
|
acf13c6e7993704fc627249fe4ada44d8b616264
|
[
"MIT"
] | null | null | null |
import cvxpy as cvx
import numpy as np
from collections import namedtuple
from metric import metric, cd
import pandas as pd
import sys
from helper import make_dataset
class Model(namedtuple('Model', 'pred label')):
def logits(self):
raw_logits = np.clip(np.log(self.pred / (1 - self.pred)), -100, 100)
return raw_logits
def num_samples(self):
return len(self.pred)
def base_rate(self):
"""
Percentage of samples belonging to the positive class
"""
return np.mean(self.label)
def accuracy(self):
return self.accuracies().mean()
def precision(self):
return (self.label[self.pred.round() == 1]).mean()
def recall(self):
return (self.label[self.label == 1].round()).mean()
def tpr(self):
"""
True positive rate
"""
return np.mean(np.logical_and(self.pred.round() == 1, self.label == 1))
def fpr(self):
"""
False positive rate
"""
return np.mean(np.logical_and(self.pred.round() == 1, self.label == 0))
def tnr(self):
"""
True negative rate
"""
return np.mean(np.logical_and(self.pred.round() == 0, self.label == 0))
def fnr(self):
"""
False negative rate
"""
return np.mean(np.logical_and(self.pred.round() == 0, self.label == 1))
def fn_cost(self):
"""
Generalized false negative cost
"""
return 1 - self.pred[self.label == 1].mean()
def fp_cost(self):
"""
Generalized false positive cost
"""
return self.pred[self.label == 0].mean()
def accuracies(self):
return self.pred.round() == self.label
def eq_odds(self, othr, mix_rates=None):
has_mix_rates = not (mix_rates is None)
if not has_mix_rates:
mix_rates = self.eq_odds_optimal_mix_rates(othr)
sp2p, sn2p, op2p, on2p = tuple(mix_rates)
self_fair_pred = self.pred.copy()
self_pp_indices, = np.nonzero(self.pred.round())
self_pn_indices, = np.nonzero(1 - self.pred.round())
np.random.shuffle(self_pp_indices)
np.random.shuffle(self_pn_indices)
n2p_indices = self_pn_indices[:int(len(self_pn_indices) * sn2p)]
self_fair_pred[n2p_indices] = 1 - self_fair_pred[n2p_indices]
p2n_indices = self_pp_indices[:int(len(self_pp_indices) * (1 - sp2p))]
self_fair_pred[p2n_indices] = 1 - self_fair_pred[p2n_indices]
othr_fair_pred = othr.pred.copy()
othr_pp_indices, = np.nonzero(othr.pred.round())
othr_pn_indices, = np.nonzero(1 - othr.pred.round())
np.random.shuffle(othr_pp_indices)
np.random.shuffle(othr_pn_indices)
n2p_indices = othr_pn_indices[:int(len(othr_pn_indices) * on2p)]
othr_fair_pred[n2p_indices] = 1 - othr_fair_pred[n2p_indices]
p2n_indices = othr_pp_indices[:int(len(othr_pp_indices) * (1 - op2p))]
othr_fair_pred[p2n_indices] = 1 - othr_fair_pred[p2n_indices]
fair_self = Model(self_fair_pred, self.label)
fair_othr = Model(othr_fair_pred, othr.label)
if not has_mix_rates:
return fair_self, fair_othr, mix_rates
else:
return fair_self, fair_othr
def eq_odds_optimal_mix_rates(self, othr):
sbr = float(self.base_rate())
obr = float(othr.base_rate())
sp2p = cvx.Variable(1)
sp2n = cvx.Variable(1)
sn2p = cvx.Variable(1)
sn2n = cvx.Variable(1)
op2p = cvx.Variable(1)
op2n = cvx.Variable(1)
on2p = cvx.Variable(1)
on2n = cvx.Variable(1)
sfpr = self.fpr() * sp2p + self.tnr() * sn2p
sfnr = self.fnr() * sn2n + self.tpr() * sp2n
ofpr = othr.fpr() * op2p + othr.tnr() * on2p
ofnr = othr.fnr() * on2n + othr.tpr() * op2n
error = sfpr + sfnr + ofpr + ofnr
sflip = 1 - self.pred
sconst = self.pred
oflip = 1 - othr.pred
oconst = othr.pred
sm_tn = np.logical_and(self.pred.round() == 0, self.label == 0)
sm_fn = np.logical_and(self.pred.round() == 0, self.label == 1)
sm_tp = np.logical_and(self.pred.round() == 1, self.label == 1)
sm_fp = np.logical_and(self.pred.round() == 1, self.label == 0)
om_tn = np.logical_and(othr.pred.round() == 0, othr.label == 0)
om_fn = np.logical_and(othr.pred.round() == 0, othr.label == 1)
om_tp = np.logical_and(othr.pred.round() == 1, othr.label == 1)
om_fp = np.logical_and(othr.pred.round() == 1, othr.label == 0)
spn_given_p = (sn2p * (sflip * sm_fn).mean() + sn2n * (sconst * sm_fn).mean()) / sbr + \
(sp2p * (sconst * sm_tp).mean() + sp2n * (sflip * sm_tp).mean()) / sbr
spp_given_n = (sp2n * (sflip * sm_fp).mean() + sp2p * (sconst * sm_fp).mean()) / (1 - sbr) + \
(sn2p * (sflip * sm_tn).mean() + sn2n * (sconst * sm_tn).mean()) / (1 - sbr)
opn_given_p = (on2p * (oflip * om_fn).mean() + on2n * (oconst * om_fn).mean()) / obr + \
(op2p * (oconst * om_tp).mean() + op2n * (oflip * om_tp).mean()) / obr
opp_given_n = (op2n * (oflip * om_fp).mean() + op2p * (oconst * om_fp).mean()) / (1 - obr) + \
(on2p * (oflip * om_tn).mean() + on2n * (oconst * om_tn).mean()) / (1 - obr)
constraints = [
sp2p == 1 - sp2n,
sn2p == 1 - sn2n,
op2p == 1 - op2n,
on2p == 1 - on2n,
sp2p <= 1,
sp2p >= 0,
sn2p <= 1,
sn2p >= 0,
op2p <= 1,
op2p >= 0,
on2p <= 1,
on2p >= 0,
spp_given_n == opp_given_n,
spn_given_p == opn_given_p,
]
prob = cvx.Problem(cvx.Minimize(error), constraints)
prob.solve()
res = np.array([sp2p.value, sn2p.value, op2p.value, on2p.value])
return res
def __repr__(self):
return '\n'.join([
'Accuracy:\t%.3f' % self.accuracy(),
'F.P. cost:\t%.3f' % self.fp_cost(),
'F.N. cost:\t%.3f' % self.fn_cost(),
'T.P. rate:\t%.3f' % self.tpr(),
'T.N. rate:\t%.3f' % self.tnr(),
'Precision:\t%.3f' % self.precision(),
'Recall:\t\t%.3f' % self.recall(),
'Base rate:\t%.3f' % self.base_rate(),
'Avg. score:\t%.3f' % self.pred.mean(),
])
def Adult(f="data/adult_post.csv"):
data_filename = f
test_and_val_data = pd.read_csv(data_filename)
order = np.arange(len(test_and_val_data))
leng = order.shape[0]
val_indices = order[0:int(leng*0.7)]
test_indices = order[int(leng*0.7):]
val_data = test_and_val_data.iloc[val_indices]
test_data = test_and_val_data.iloc[test_indices]
# Create model objects - one for each group, validation and test
group_0_val_data = val_data[val_data['group'] == 0]
group_1_val_data = val_data[val_data['group'] == 1]
group_0_test_data = test_data[test_data['group'] == 0]
group_1_test_data = test_data[test_data['group'] == 1]
group_0_val_model = Model(group_0_val_data['prediction'].to_numpy(), group_0_val_data['label'].to_numpy())
group_1_val_model = Model(group_1_val_data['prediction'].to_numpy(), group_1_val_data['label'].to_numpy())
group_0_test_model = Model(group_0_test_data['prediction'].to_numpy(), group_0_test_data['label'].to_numpy())
group_1_test_model = Model(group_1_test_data['prediction'].to_numpy(), group_1_test_data['label'].to_numpy())
# Find mixing rates for equalized odds models
_, _, mix_rates = Model.eq_odds(group_0_val_model, group_1_val_model)
# Apply the mixing rates to the test models
eq_odds_group_0_test_model, eq_odds_group_1_test_model = Model.eq_odds(group_0_test_model,
group_1_test_model,
mix_rates)
cd_eq_odds_group_0_test_model, cd_eq_odds_group_1_test_model = Model.eq_odds(group_1_test_model,
group_0_test_model,
mix_rates)
metric(eq_odds_group_0_test_model.label, eq_odds_group_0_test_model.pred,
eq_odds_group_1_test_model.label, eq_odds_group_1_test_model.pred)
y_cd = cd(eq_odds_group_0_test_model.pred.round(), eq_odds_group_1_test_model.pred.round(),\
cd_eq_odds_group_0_test_model.pred.round(),cd_eq_odds_group_1_test_model.pred.round())
group_0_test_data['pred'] = eq_odds_group_0_test_model.pred.round()
group_1_test_data['pred'] = eq_odds_group_1_test_model.pred.round()
df = group_0_test_data.append(group_1_test_data)
df = df.drop(['group', 'prediction', 'label'], axis=1).sample(frac=1)
df.to_csv("results_Hardt/adult_test_repaired.csv", index=False)
np.savetxt("results_Hardt/adult_test_repaired_cd.csv", y_cd, delimiter=",")
def Compas(f="data/compas_post.csv", f1='', f2=''):
data_filename = f
test_and_val_data = pd.read_csv(data_filename)
# Randomly split the data into two sets - one for computing the fairness constants
order = np.arange(len(test_and_val_data))
leng = order.shape[0]
val_indices = order[0:int(leng*0.7)]
test_indices = order[int(leng*0.7):]
val_data = test_and_val_data.iloc[val_indices]
test_data = test_and_val_data.iloc[test_indices]
# Create model objects - one for each group, validation and test
group_0_val_data = val_data[val_data['group'] == 0]
group_1_val_data = val_data[val_data['group'] == 1]
group_0_test_data = test_data[test_data['group'] == 0]
group_1_test_data = test_data[test_data['group'] == 1]
group_0_val_model = Model(group_0_val_data['prediction'].to_numpy(), group_0_val_data['label'].to_numpy())
group_1_val_model = Model(group_1_val_data['prediction'].to_numpy(), group_1_val_data['label'].to_numpy())
group_0_test_model = Model(group_0_test_data['prediction'].to_numpy(), group_0_test_data['label'].to_numpy())
group_1_test_model = Model(group_1_test_data['prediction'].to_numpy(), group_1_test_data['label'].to_numpy())
# Find mixing rates for equalized odds models
_, _, mix_rates = Model.eq_odds(group_0_val_model, group_1_val_model)
# Apply the mixing rates to the test models
eq_odds_group_0_test_model, eq_odds_group_1_test_model = Model.eq_odds(group_0_test_model,
group_1_test_model,
mix_rates)
cd_eq_odds_group_0_test_model, cd_eq_odds_group_1_test_model = Model.eq_odds(group_1_test_model,
group_0_test_model,
mix_rates)
metric(eq_odds_group_0_test_model.label, eq_odds_group_0_test_model.pred,
eq_odds_group_1_test_model.label, eq_odds_group_1_test_model.pred)
y_cd = cd(eq_odds_group_0_test_model.pred.round(), eq_odds_group_1_test_model.pred.round(),\
cd_eq_odds_group_0_test_model.pred.round(),cd_eq_odds_group_1_test_model.pred.round())
group_0_test_data['pred'] = eq_odds_group_0_test_model.pred.round()
group_1_test_data['pred'] = eq_odds_group_1_test_model.pred.round()
df = group_0_test_data.append(group_1_test_data)
df = df.drop(['group', 'prediction', 'label'], axis=1).sample(frac=1)
df.to_csv(f1+"results_Hardt/compas_test_repaired"+f2+".csv", index=False)
np.savetxt(f1+"results_Hardt/compas_test_repaired"+f2+"_cd.csv", y_cd, delimiter=",")
def German(f="data/german_post.csv"):
data_filename = f
test_and_val_data = pd.read_csv(data_filename)
# Randomly split the data into two sets - one for computing the fairness constants
order = np.arange(len(test_and_val_data))
leng = order.shape[0]
val_indices = order[0:int(leng*0.7)]
test_indices = order[int(leng*0.7):]
val_data = test_and_val_data.iloc[val_indices]
test_data = test_and_val_data.iloc[test_indices]
# Create model objects - one for each group, validation and test
group_0_val_data = val_data[val_data['group'] == 0]
group_1_val_data = val_data[val_data['group'] == 1]
group_0_test_data = test_data[test_data['group'] == 0]
group_1_test_data = test_data[test_data['group'] == 1]
group_0_val_model = Model(group_0_val_data['prediction'].to_numpy(), group_0_val_data['label'].to_numpy())
group_1_val_model = Model(group_1_val_data['prediction'].to_numpy(), group_1_val_data['label'].to_numpy())
group_0_test_model = Model(group_0_test_data['prediction'].to_numpy(), group_0_test_data['label'].to_numpy())
group_1_test_model = Model(group_1_test_data['prediction'].to_numpy(), group_1_test_data['label'].to_numpy())
# Find mixing rates for equalized odds models
_, _, mix_rates = Model.eq_odds(group_0_val_model, group_1_val_model)
# Apply the mixing rates to the test models
eq_odds_group_0_test_model, eq_odds_group_1_test_model = Model.eq_odds(group_0_test_model,
group_1_test_model,
mix_rates)
cd_eq_odds_group_0_test_model, cd_eq_odds_group_1_test_model = Model.eq_odds(group_1_test_model,
group_0_test_model,
mix_rates)
metric(eq_odds_group_0_test_model.label, eq_odds_group_0_test_model.pred,
eq_odds_group_1_test_model.label, eq_odds_group_1_test_model.pred)
y_cd = cd(eq_odds_group_0_test_model.pred.round(), eq_odds_group_1_test_model.pred.round(),\
cd_eq_odds_group_0_test_model.pred.round(),cd_eq_odds_group_1_test_model.pred.round())
group_0_test_data['pred'] = eq_odds_group_0_test_model.pred.round()
group_1_test_data['pred'] = eq_odds_group_1_test_model.pred.round()
df = group_0_test_data.append(group_1_test_data)
df = df.drop(['group', 'prediction', 'label'], axis=1).sample(frac=1)
df.to_csv("results_Hardt/german_test_repaired.csv", index=False)
np.savetxt("results_Hardt/german_test_repaired_cd.csv", y_cd, delimiter=",")
def Hardt(dataset):
make_dataset(dataset)
if dataset == 'adult':
Adult()
elif dataset == 'compas':
Compas()
elif dataset == 'german':
German()
| 43.323615
| 113
| 0.611844
| 2,148
| 14,860
| 3.866387
| 0.086127
| 0.047682
| 0.06755
| 0.054184
| 0.694401
| 0.635882
| 0.626731
| 0.618543
| 0.610837
| 0.582902
| 0
| 0.028582
| 0.263055
| 14,860
| 343
| 114
| 43.323615
| 0.729796
| 0.054172
| 0
| 0.392562
| 0
| 0
| 0.057476
| 0.016134
| 0
| 0
| 0
| 0
| 0
| 1
| 0.082645
| false
| 0
| 0.028926
| 0.024793
| 0.18595
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1c802e1801de4019c3b100aff72c042e2ff702ed
| 1,632
|
py
|
Python
|
tests/test_exceptions.py
|
nesnahnoj/py3-textract
|
61290fb44c964cf78ce64593fdf0076143dbcd91
|
[
"MIT"
] | 2
|
2015-03-03T12:40:17.000Z
|
2015-03-03T13:05:14.000Z
|
tests/test_exceptions.py
|
anderser/textract
|
8f7b32cadabcd13ad1eab1a56b9aa151901d0453
|
[
"MIT"
] | null | null | null |
tests/test_exceptions.py
|
anderser/textract
|
8f7b32cadabcd13ad1eab1a56b9aa151901d0453
|
[
"MIT"
] | null | null | null |
import unittest
import os
import subprocess
import base
class ExceptionTestCase(base.GenericUtilities, unittest.TestCase):
"""This class contains a bunch of tests to make sure that textract
fails in expected ways.
"""
def test_unsupported_extension_cli(self):
"""Make sure unsupported extension exits with non-zero status"""
filename = self.get_temp_filename(extension="extension")
command = "textract %(filename)s 2> /dev/null" % locals()
self.assertEqual(1, subprocess.call(command, shell=True))
os.remove(filename)
def test_unsupported_extension_python(self):
"""Make sure unsupported extension raises the correct error"""
filename = self.get_temp_filename(extension="extension")
import textract
from textract.exceptions import ExtensionNotSupported
with self.assertRaises(ExtensionNotSupported):
textract.process(filename)
os.remove(filename)
def test_missing_filename_cli(self):
"""Make sure missing files exits with non-zero status"""
filename = self.get_temp_filename()
os.remove(filename)
command = "textract %(filename)s 2> /dev/null" % locals()
self.assertEqual(1, subprocess.call(command, shell=True))
def test_missing_filename_python(self):
"""Make sure missing files raise the correct error"""
filename = self.get_temp_filename()
os.remove(filename)
import textract
from textract.exceptions import MissingFileError
with self.assertRaises(MissingFileError):
textract.process(filename)
| 37.090909
| 72
| 0.692402
| 184
| 1,632
| 6.032609
| 0.347826
| 0.036036
| 0.043243
| 0.068468
| 0.594595
| 0.452252
| 0.376577
| 0.344144
| 0.23964
| 0.23964
| 0
| 0.003142
| 0.219975
| 1,632
| 43
| 73
| 37.953488
| 0.868814
| 0.185049
| 0
| 0.551724
| 0
| 0
| 0.066409
| 0
| 0
| 0
| 0
| 0
| 0.137931
| 1
| 0.137931
| false
| 0
| 0.275862
| 0
| 0.448276
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1c81071b5834983f0325a721292427a8ce6ce5f8
| 1,998
|
py
|
Python
|
dloud_ads/circular_queue.py
|
dataloudlabs/dloud-ads
|
d0ad3f169c2384292db4097e00ba7858f37a8198
|
[
"MIT"
] | null | null | null |
dloud_ads/circular_queue.py
|
dataloudlabs/dloud-ads
|
d0ad3f169c2384292db4097e00ba7858f37a8198
|
[
"MIT"
] | null | null | null |
dloud_ads/circular_queue.py
|
dataloudlabs/dloud-ads
|
d0ad3f169c2384292db4097e00ba7858f37a8198
|
[
"MIT"
] | null | null | null |
"""Queue implementation using circularly linked list for storage."""
class CircularQueue:
"""Queue implementation using circularly linked list for storage."""
class _Node:
"""Lightweight, nonpublic class for storing a singly linked node."""
__slots__ = '_element', '_next'
def __init__(self, element, next_element):
self._element = element
self._next = next_element
def __init__(self):
"""Create an empty queue."""
self._tail = None
self._size = 0
def __len__(self):
"""Return the number of elements in the queue."""
return self._size
def is_empty(self):
"""Return True if the queue is empty."""
return self._size == 0
def first(self):
"""Return (but do not remove) the element at the front of the queue.
Raise ValueError exception if the queue is empty.
"""
if self.is_empty():
raise ValueError('Queue is empty')
head = self._tail._next
return head._element
def dequeue(self):
"""Remove and return the first element of the queue (i.e., FIFO).
Raise ValueError exception if the queue is empty.
"""
if self.is_empty():
raise ValueError('Queue is empty')
oldhead = self._tail._next
if self._size == 1:
self._tail = None
else:
self._tail._next = oldhead._next
self._size -= 1
return oldhead._element
def enqueue(self, element):
"""Add an element to the back of queue."""
newest = self._Node(element, None)
if self.is_empty():
newest._next = newest
else:
newest._next = self._tail._next
self._tail._next = newest
self._tail = newest
self._size += 1
def rotate(self):
"""Rotate front element to the back of the queue."""
if self._size > 0:
self._tail = self._tail._next
| 29.382353
| 76
| 0.578579
| 243
| 1,998
| 4.526749
| 0.263374
| 0.072727
| 0.065455
| 0.032727
| 0.302727
| 0.254545
| 0.254545
| 0.254545
| 0.254545
| 0.147273
| 0
| 0.004438
| 0.323323
| 1,998
| 67
| 77
| 29.820896
| 0.809172
| 0.302803
| 0
| 0.225
| 0
| 0
| 0.031346
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| false
| 0
| 0
| 0
| 0.35
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1c850ddd900887b33d213aba43297d734592063b
| 31,713
|
py
|
Python
|
geofem/emg3d/meshes.py
|
iisadoramacedo/geofem-master
|
cc5cf4ae660480dd4dc3d805310f7207fb28230e
|
[
"MIT"
] | null | null | null |
geofem/emg3d/meshes.py
|
iisadoramacedo/geofem-master
|
cc5cf4ae660480dd4dc3d805310f7207fb28230e
|
[
"MIT"
] | 1
|
2020-10-29T11:42:21.000Z
|
2020-10-29T11:42:21.000Z
|
build/lib/geofem/emg3d/meshes.py
|
iisadoramacedo/geofem-master
|
cc5cf4ae660480dd4dc3d805310f7207fb28230e
|
[
"MIT"
] | 1
|
2020-07-09T18:15:10.000Z
|
2020-07-09T18:15:10.000Z
|
"""
:mod:`meshes` -- Discretization
===============================
Everything related to meshes appropriate for the multigrid solver.
"""
# Copyright 2018-2020 The emg3d Developers.
#
# This file is part of emg3d.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
import numpy as np
from copy import deepcopy
from scipy import optimize
__all__ = ['TensorMesh', 'get_hx_h0', 'get_cell_numbers', 'get_stretched_h',
'get_domain', 'get_hx']
class TensorMesh:
"""Rudimentary mesh for multigrid calculation.
The tensor-mesh :class:`discretize.TensorMesh` is a powerful tool,
including sophisticated mesh-generation possibilities in 1D, 2D, and 3D,
plotting routines, and much more. However, in the multigrid solver we have
to generate a mesh at each level, many times over and over again, and we
only need a very limited set of attributes. This tensor-mesh class provides
all required attributes. All attributes here are the same as their
counterparts in :class:`discretize.TensorMesh` (both in name and value).
.. warning::
This is a slimmed-down version of :class:`discretize.TensorMesh`, meant
principally for internal use by the multigrid modeller. It is highly
recommended to use :class:`discretize.TensorMesh` to create the input
meshes instead of this class. There are no input-checks carried out
here, and there is only one accepted input format for `h` and `x0`.
Parameters
----------
h : list of three ndarrays
Cell widths in [x, y, z] directions.
x0 : ndarray of dimension (3, )
Origin (x, y, z).
"""
def __init__(self, h, x0):
"""Initialize the mesh."""
self.x0 = x0
# Width of cells.
self.hx = h[0]
self.hy = h[1]
self.hz = h[2]
# Cell related properties.
self.nCx = int(self.hx.size)
self.nCy = int(self.hy.size)
self.nCz = int(self.hz.size)
self.vnC = np.array([self.hx.size, self.hy.size, self.hz.size])
self.nC = int(self.vnC.prod())
self.vectorCCx = np.r_[0, self.hx[:-1].cumsum()]+self.hx*0.5+self.x0[0]
self.vectorCCy = np.r_[0, self.hy[:-1].cumsum()]+self.hy*0.5+self.x0[1]
self.vectorCCz = np.r_[0, self.hz[:-1].cumsum()]+self.hz*0.5+self.x0[2]
# Node related properties.
self.nNx = self.nCx + 1
self.nNy = self.nCy + 1
self.nNz = self.nCz + 1
self.vnN = np.array([self.nNx, self.nNy, self.nNz], dtype=int)
self.nN = int(self.vnN.prod())
self.vectorNx = np.r_[0., self.hx.cumsum()] + self.x0[0]
self.vectorNy = np.r_[0., self.hy.cumsum()] + self.x0[1]
self.vectorNz = np.r_[0., self.hz.cumsum()] + self.x0[2]
# Edge related properties.
self.vnEx = np.array([self.nCx, self.nNy, self.nNz], dtype=int)
self.vnEy = np.array([self.nNx, self.nCy, self.nNz], dtype=int)
self.vnEz = np.array([self.nNx, self.nNy, self.nCz], dtype=int)
self.nEx = int(self.vnEx.prod())
self.nEy = int(self.vnEy.prod())
self.nEz = int(self.vnEz.prod())
self.vnE = np.array([self.nEx, self.nEy, self.nEz], dtype=int)
self.nE = int(self.vnE.sum())
def __repr__(self):
"""Simple representation."""
return (f"TensorMesh: {self.nCx} x {self.nCy} x {self.nCz} "
f"({self.nC:,})")
def copy(self):
"""Return a copy of the TensorMesh."""
return TensorMesh.from_dict(self.to_dict(True))
def to_dict(self, copy=False):
"""Store the necessary information of the TensorMesh in a dict."""
out = {'hx': self.hx, 'hy': self.hy, 'hz': self.hz, 'x0': self.x0,
'__class__': self.__class__.__name__}
if copy:
return deepcopy(out)
else:
return out
@classmethod
def from_dict(cls, inp):
"""Convert dictionary into :class:`TensorMesh` instance.
Parameters
----------
inp : dict
Dictionary as obtained from :func:`TensorMesh.to_dict`.
The dictionary needs the keys `hx`, `hy`, `hz`, and `x0`.
Returns
-------
obj : :class:`TensorMesh` instance
"""
try:
return cls(h=[inp['hx'], inp['hy'], inp['hz']], x0=inp['x0'])
except KeyError as e:
print(f"* ERROR :: Variable {e} missing in `inp`.")
raise
@property
def vol(self):
"""Construct cell volumes of the 3D model as 1D array."""
if getattr(self, '_vol', None) is None:
self._vol = (self.hx[None, None, :]*self.hy[None, :, None] *
self.hz[:, None, None]).ravel()
return self._vol
def get_hx_h0(freq, res, domain, fixed=0., possible_nx=None, min_width=None,
pps=3, alpha=None, max_domain=100000., raise_error=True, verb=1,
return_info=False):
r"""Return cell widths and origin for given parameters.
Returns cell widths for the provided frequency, resistivity, domain extent,
and other parameters using a flexible amount of cells. See input parameters
for more details. A maximum of three hard/fixed boundaries can be provided
(one of which is the grid center).
The minimum cell width is calculated through :math:`\delta/\rm{pps}`, where
the skin depth is given by :math:`\delta = 503.3 \sqrt{\rho/f}`, and the
parameter `pps` stands for 'points-per-skindepth'. The minimum cell width
can be restricted with the parameter `min_width`.
The actual calculation domain adds a buffer zone around the (survey)
domain. The thickness of the buffer is six times the skin depth. The field
is basically zero after two wavelengths. A wavelength is
:math:`2\pi\delta`, hence roughly 6 times the skin depth. Taking a factor 6
gives therefore almost two wavelengths, as the field travels to the
boundary and back. The actual buffer thickness can be steered with the
`res` parameter.
One has to take into account that the air is very resistive, which has to
be considered not just in the vertical direction, but also in the
horizontal directions, as the airwave will bounce back from the sides
otherwise. In the marine case this issue reduces with increasing water
depth.
See Also
--------
get_stretched_h : Get `hx` for a fixed number `nx` and within a fixed
domain.
Parameters
----------
freq : float
Frequency (Hz) to calculate the skin depth. The skin depth is a concept
defined in the frequency domain. If a negative frequency is provided,
it is assumed that the calculation is carried out in the Laplace
domain. To calculate the skin depth, the value of `freq` is then
multiplied by :math:`-2\pi`, to simulate the closest
frequency-equivalent.
res : float or list
Resistivity (Ohm m) to calculate the skin depth. The skin depth is
used to calculate the minimum cell width and the boundary thicknesses.
Up to three resistivities can be provided:
- float: Same resistivity for everything;
- [min_width, boundaries];
- [min_width, left boundary, right boundary].
domain : list
Contains the survey-domain limits [min, max]. The actual calculation
domain consists of this domain plus a buffer zone around it, which
depends on frequency and resistivity.
fixed : list, optional
Fixed boundaries, one, two, or maximum three values. The grid is
centered around the first value. Hence it is the center location with
the smallest cell. Two more fixed boundaries can be added, at most one
on each side of the first one.
Default is 0.
possible_nx : list, optional
List of possible numbers of cells. See :func:`get_cell_numbers`.
Default is ``get_cell_numbers(500, 5, 3)``, which corresponds to
[16, 24, 32, 40, 48, 64, 80, 96, 128, 160, 192, 256, 320, 384].
min_width : float, list or None, optional
Minimum cell width restriction:
- None : No restriction;
- float : Fixed to this value, ignoring skin depth and `pps`.
- list [min, max] : Lower and upper bounds.
Default is None.
pps : int, optional
Points per skindepth; minimum cell width is calculated via
`dmin = skindepth/pps`.
Default = 3.
alpha : list, optional
Maximum alpha and step size to find a good alpha. The first value is
the maximum alpha of the survey domain, the second value is the maximum
alpha for the buffer zone, and the third value is the step size.
Default = [1, 1.5, .01], hence no stretching within the survey domain
and a maximum stretching of 1.5 in the buffer zone; step size is 0.01.
max_domain : float, optional
Maximum calculation domain from fixed[0] (usually source position).
Default is 100,000.
raise_error : bool, optional
If True, an error is raised if no suitable grid is found. Otherwise it
just prints a message and returns None's.
Default is True.
verb : int, optional
Verbosity, 0 or 1.
Default = 1.
return_info : bool
If True, a dictionary is returned with some grid info (min and max
cell width and alpha).
Returns
-------
hx : ndarray
Cell widths of mesh.
x0 : float
Origin of the mesh.
info : dict
Dictionary with mesh info; only if ``return_info=True``.
Keys:
- `dmin`: Minimum cell width;
- `dmax`: Maximum cell width;
- `amin`: Minimum alpha;
- `amax`: Maximum alpha.
"""
# Get variables with default lists:
if alpha is None:
alpha = [1, 1.5, 0.01]
if possible_nx is None:
possible_nx = get_cell_numbers(500, 5, 3)
# Cast resistivity value(s).
res = np.array(res, ndmin=1)
if res.size == 1:
res_arr = np.array([res[0], res[0], res[0]])
elif res.size == 2:
res_arr = np.array([res[0], res[1], res[1]])
else:
res_arr = np.array([res[0], res[1], res[2]])
# Cast and check fixed.
fixed = np.array(fixed, ndmin=1)
if fixed.size > 2:
# Check length.
if fixed.size > 3:
print("\n* ERROR :: Maximum three fixed boundaries permitted.\n"
f" Provided: {fixed.size}.")
raise ValueError("Wrong input for fixed")
# Sort second and third, so it doesn't matter how it was provided.
fixed = np.array([fixed[0], max(fixed[1:]), min(fixed[1:])])
# Check side.
if np.sign(np.diff(fixed[:2])) == np.sign(np.diff(fixed[::2])):
print("\n* ERROR :: 2nd and 3rd fixed boundaries have to be "
"left and right of the first one.\n "
f"Provided: [{fixed[0]}, {fixed[1]}, {fixed[2]}]")
raise ValueError("Wrong input for fixed")
# Calculate skin depth.
skind = 503.3*np.sqrt(res_arr/abs(freq))
if freq < 0: # For Laplace-domain calculations.
skind /= np.sqrt(2*np.pi)
# Minimum cell width.
dmin = skind[0]/pps
if min_width is not None: # Respect user input.
min_width = np.array(min_width, ndmin=1)
if min_width.size == 1:
dmin = min_width
else:
dmin = np.clip(dmin, *min_width)
# Survey domain; contains all sources and receivers.
domain = np.array(domain, dtype=float)
# Calculation domain; big enough to avoid boundary effects.
# To avoid boundary effects we want the signal to travel two wavelengths
# from the source to the boundary and back to the receiver.
# => 2*pi*sd ~ 6.3*sd = one wavelength => signal is ~ 0.2 %.
# Two wavelengths we can safely assume it is zero.
#
# The air does not follow the concept of skin depth, as it is a wave rather
# than diffusion. For this is the factor `max_domain`, which restricts
# the domain in each direction to this value from the center.
# (a) Source to edges of domain.
dist_in_domain = abs(domain - fixed[0])
# (b) Two wavelengths.
two_lambda = skind[1:]*4*np.pi
# (c) Required buffer, additional to domain.
dist_buff = np.max([np.zeros(2), (two_lambda - dist_in_domain)/2], axis=0)
# (d) Add buffer to domain.
calc_domain = np.array([domain[0]-dist_buff[0], domain[1]+dist_buff[1]])
# (e) Restrict total domain to max_domain.
calc_domain[0] = max(calc_domain[0], fixed[0]-max_domain)
calc_domain[1] = min(calc_domain[1], fixed[0]+max_domain)
# Initiate flag if terminated.
finished = False
# Initiate alpha variables for survey and calculation domains.
sa, ca = 1.0, 1.0
# Loop over possible cell numbers from small to big.
for nx in np.unique(possible_nx):
# Loop over possible alphas for domain.
for sa in np.arange(1.0, alpha[0]+alpha[2]/2, alpha[2]):
# Get current stretched grid cell sizes.
thxl = dmin*sa**np.arange(nx) # Left of origin.
thxr = dmin*sa**np.arange(nx) # Right of origin.
# 0. Adjust stretching for fixed boundaries.
if fixed.size > 1: # Move mesh to first fixed boundary.
t_nx = np.r_[fixed[0], fixed[0]+np.cumsum(thxr)]
ii = np.argmin(abs(t_nx-fixed[1]))
thxr *= abs(fixed[1]-fixed[0])/np.sum(thxr[:ii])
if fixed.size > 2: # Move mesh to second fixed boundary.
t_nx = np.r_[fixed[0], fixed[0]-np.cumsum(thxl)]
ii = np.argmin(abs(t_nx-fixed[2]))
thxl *= abs(fixed[2]-fixed[0])/np.sum(thxl[:ii])
# 1. Fill from center to left domain.
nl = np.sum((fixed[0]-np.cumsum(thxl)) > domain[0])+1
# 2. Fill from center to right domain.
nr = np.sum((fixed[0]+np.cumsum(thxr)) < domain[1])+1
# 3. Get remaining number of cells and check termination criteria.
nsdc = nl+nr # Number of domain cells.
nx_remain = nx-nsdc
# Not good, try next.
if nx_remain <= 0:
continue
# Create the current hx-array.
hx = np.r_[thxl[:nl][::-1], thxr[:nr]]
hxo = np.r_[thxl[:nl][::-1], thxr[:nr]]
# Get actual domain:
asurv_domain = [fixed[0]-np.sum(thxl[:nl]),
fixed[0]+np.sum(thxr[:nr])]
x0 = float(fixed[0]-np.sum(thxl[:nl]))
# Get actual stretching (differs in case of fixed layers).
sa_adj = np.max([hx[1:]/hx[:-1], hx[:-1]/hx[1:]])
# Loop over possible alphas for calc_domain.
for ca in np.arange(sa, alpha[1]+alpha[2]/2, alpha[2]):
# 4. Fill to left calc_domain.
thxl = hx[0]*ca**np.arange(1, nx_remain+1)
nl = np.sum((asurv_domain[0]-np.cumsum(thxl)) >
calc_domain[0])+1
# 5. Fill to right calc_domain.
thxr = hx[-1]*ca**np.arange(1, nx_remain+1)
nr = np.sum((asurv_domain[1]+np.cumsum(thxr)) <
calc_domain[1])+1
# 6. Get remaining number of cells and check termination
# criteria.
ncdc = nl+nr # Number of calc_domain cells.
nx_remain2 = nx-nsdc-ncdc
if nx_remain2 < 0: # Not good, try next.
continue
# Create hx-array.
nl += int(np.floor(nx_remain2/2)) # If uneven, add one cell
nr += int(np.ceil(nx_remain2/2)) # more on the right.
hx = np.r_[thxl[:nl][::-1], hx, thxr[:nr]]
# Calculate origin.
x0 = float(asurv_domain[0]-np.sum(thxl[:nl]))
# Mark it as finished and break out of the loop.
finished = True
break
if finished:
break
if finished:
break
# Check finished and print info about found grid.
if not finished:
# Throw message if no solution was found.
print("\n* ERROR :: No suitable grid found; relax your criteria.\n")
if raise_error:
raise ArithmeticError("No grid found!")
else:
hx, x0 = None, None
elif verb > 0:
print(f" Skin depth ", end="")
if res.size == 1:
print(f" [m] : {skind[0]:.0f}")
elif res.size == 2:
print(f"(m/l-r) [m] : {skind[0]:.0f} / {skind[1]:.0f}")
else:
print(f"(m/l/r) [m] : {skind[0]:.0f} / {skind[1]:.0f} / "
f"{skind[2]:.0f}")
print(f" Survey domain [m] : {domain[0]:.0f} - "
f"{domain[1]:.0f}")
print(f" Calculation domain [m] : {calc_domain[0]:.0f} - "
f"{calc_domain[1]:.0f}")
print(f" Final extent [m] : {x0:.0f} - "
f"{x0+np.sum(hx):.0f}")
extstr = f" Min/max cell width [m] : {min(hx):.0f} / "
alstr = f" Alpha survey"
nrstr = " Number of cells "
if not np.isclose(sa, sa_adj):
sastr = f"{sa:.3f} ({sa_adj:.3f})"
else:
sastr = f"{sa:.3f}"
print(extstr+f"{max(hxo):.0f} / {max(hx):.0f}")
print(alstr+f"/calc : {sastr} / {ca:.3f}")
print(nrstr+f"(s/c/r) : {nx} ({nsdc}/{ncdc}/{nx_remain2})")
print()
if return_info:
if not fixed.size > 1:
sa_adj = sa
info = {'dmin': dmin,
'dmax': np.nanmax(hx),
'amin': np.nanmin([ca, sa, sa_adj]),
'amax': np.nanmax([ca, sa, sa_adj])}
return hx, x0, info
else:
return hx, x0
def get_cell_numbers(max_nr, max_prime=5, min_div=3):
r"""Returns 'good' cell numbers for the multigrid method.
'Good' cell numbers are numbers which can be divided by 2 as many times as
possible. At the end there will be a low prime number.
The function adds all numbers :math:`p 2^n \leq M` for :math:`p={2, 3, ...,
p_\text{max}}` and :math:`n={n_\text{min}, n_\text{min}+1, ..., \infty}`;
:math:`M, p_\text{max}, n_\text{min}` correspond to `max_nr`, `max_prime`,
and `min_div`, respectively.
Parameters
----------
max_nr : int
Maximum number of cells.
max_prime : int
Highest permitted prime number p for p*2^n. {2, 3, 5, 7} are good upper
limits in order to avoid too big lowest grids in the multigrid method.
Default is 5.
min_div : int
Minimum times the number can be divided by two.
Default is 3.
Returns
-------
numbers : array
Array containing all possible cell numbers from lowest to highest.
"""
# Primes till 20.
primes = np.array([2, 3, 5, 7, 11, 13, 17, 19])
# Sanity check; 19 is already ridiculously high.
if max_prime > primes[-1]:
print(f"* ERROR :: Highest prime is {max_prime}, "
"please use a value < 20.")
raise ValueError("Highest prime too high")
# Restrict to max_prime.
primes = primes[primes <= max_prime]
# Get possible values.
# Currently restricted to prime*2**30 (for prime=2 => 1,073,741,824 cells).
numbers = primes[:, None]*2**np.arange(min_div, 30)
# Get unique values.
numbers = np.unique(numbers)
# Restrict to max_nr and return.
return numbers[numbers <= max_nr]
def get_stretched_h(min_width, domain, nx, x0=0, x1=None, resp_domain=False):
"""Return cell widths for a stretched grid within the domain.
Returns `nx` cell widths within `domain`, where the minimum cell width is
`min_width`. The cells are not stretched within `x0` and `x1`, and outside
uses a power-law stretching. The actual stretching factor and the number of
cells left and right of `x0` and `x1` are find in a minimization process.
The domain is not completely respected. The starting point of the domain
is, but the endpoint of the domain might slightly shift (this is more
likely the case for small `nx`, for big `nx` the shift should be small).
The new endpoint can be obtained with ``domain[0]+np.sum(hx)``. If you want
the domain to be respected absolutely, set ``resp_domain=True``. However,
be aware that this will introduce one stretch-factor which is different
from the other stretch factors, to accommodate the restriction. This
one-off factor is between the left- and right-side of `x0`, or, if `x1` is
provided, just after `x1`.
See Also
--------
get_hx_x0 : Get `hx` and `x0` for a flexible number of `nx` with
given bounds.
Parameters
----------
min_width : float
Minimum cell width. If x1 is provided, the actual minimum cell width
might be smaller than min_width.
domain : list
[start, end] of model domain.
nx : int
Number of cells.
x0 : float
Center of the grid. `x0` is restricted to `domain`.
Default is 0.
x1 : float
If provided, then no stretching is applied between `x0` and `x1`. The
non-stretched part starts at `x0` and stops at the first possible
location at or after `x1`. `x1` is restricted to `domain`. This will
min_width so that an integer number of cells fit within x0 and x1.
resp_domain : bool
If False (default), then the domain-end might shift slightly to assure
that the same stretching factor is applied throughout. If set to True,
however, the domain is respected absolutely. This will introduce one
stretch-factor which is different from the other stretch factors, to
accommodate the restriction. This one-off factor is between the left-
and right-side of `x0`, or, if `x1` is provided, just after `x1`.
Returns
-------
hx : ndarray
Cell widths of mesh.
"""
# Cast to arrays
domain = np.array(domain, dtype=float)
x0 = np.array(x0, dtype=float)
x0 = np.clip(x0, *domain) # Restrict to model domain
min_width = np.array(min_width, dtype=float)
if x1 is not None:
x1 = np.array(x1, dtype=float)
x1 = np.clip(x1, *domain) # Restrict to model domain
# If x1 is provided (a part is not stretched)
if x1 is not None:
# Store original values
xlim_orig = domain.copy()
nx_orig = int(nx)
x0_orig = x0.copy()
h_min_orig = min_width.copy()
# Get number of non-stretched cells
n_nos = int(np.ceil((x1-x0)/min_width))
# Re-calculate min_width to fit with x0-x1-limits:
min_width = (x1-x0)/n_nos
# Subtract one cell, because the standard scheme provides one
# min_width-cell.
n_nos -= 1
# Reset x0, because the first min_width comes from normal scheme
x0 += min_width
# Reset xmax for normal scheme
domain[1] -= n_nos*min_width
# Reset nx for normal scheme
nx -= n_nos
# If there are not enough points reset to standard procedure. The limit
# of five is arbitrary. However, nx should be much bigger than five
# anyways, otherwise stretched grid doesn't make sense.
if nx <= 5:
print("Warning :: Not enough points for non-stretched part,"
"ignoring therefore `x1`.")
domain = xlim_orig
nx = nx_orig
x0 = x0_orig
x1 = None
min_width = h_min_orig
# Get stretching factor (a = 1+alpha).
if min_width == 0 or min_width > np.diff(domain)/nx:
# If min_width is bigger than the domain-extent divided by nx, no
# stretching is required at all.
alpha = 0
else:
# Wrap _get_dx into a minimization function to call with fsolve.
def find_alpha(alpha, min_width, args):
"""Find alpha such that min(hx) = min_width."""
return min(get_hx(alpha, *args))/min_width-1
# Search for best alpha, must be at least 0
args = (domain, nx, x0)
alpha = max(0, optimize.fsolve(find_alpha, 0.02, (min_width, args)))
# With alpha get actual cell spacing with `resp_domain` to respect the
# users decision.
hx = get_hx(alpha, domain, nx, x0, resp_domain)
# Add the non-stretched center if x1 is provided
if x1 is not None:
hx = np.r_[hx[: np.argmin(hx)], np.ones(n_nos)*min_width,
hx[np.argmin(hx):]]
# Print warning min_width could not be respected.
if abs(hx.min() - min_width) > 0.1:
print(f"Warning :: Minimum cell width ({np.round(hx.min(), 2)} m) is "
"below `min_width`, because `nx` is too big for `domain`.")
return hx
def get_domain(x0=0, freq=1, res=0.3, limits=None, min_width=None,
fact_min=0.2, fact_neg=5, fact_pos=None):
r"""Get domain extent and minimum cell width as a function of skin depth.
Returns the extent of the calculation domain and the minimum cell width as
a multiple of the skin depth, with possible user restrictions on minimum
calculation domain and range of possible minimum cell widths.
.. math::
\delta &= 503.3 \sqrt{\frac{\rho}{f}} , \\
x_\text{start} &= x_0-k_\text{neg}\delta , \\
x_\text{end} &= x_0+k_\text{pos}\delta , \\
h_\text{min} &= k_\text{min} \delta .
Parameters
----------
x0 : float
Center of the calculation domain. Normally the source location.
Default is 0.
freq : float
Frequency (Hz) to calculate the skin depth. The skin depth is a concept
defined in the frequency domain. If a negative frequency is provided,
it is assumed that the calculation is carried out in the Laplace
domain. To calculate the skin depth, the value of `freq` is then
multiplied by :math:`-2\pi`, to simulate the closest
frequency-equivalent.
Default is 1 Hz.
res : float, optional
Resistivity (Ohm m) to calculate skin depth.
Default is 0.3 Ohm m (sea water).
limits : None or list
[start, end] of model domain. This extent represents the minimum extent
of the domain. The domain is therefore only adjusted if it has to reach
outside of [start, end].
Default is None.
min_width : None, float, or list of two floats
Minimum cell width is calculated as a function of skin depth:
fact_min*sd. If `min_width` is a float, this is used. If a list of
two values [min, max] are provided, they are used to restrain
min_width. Default is None.
fact_min, fact_neg, fact_pos : floats
The skin depth is multiplied with these factors to estimate:
- Minimum cell width (`fact_min`, default 0.2)
- Domain-start (`fact_neg`, default 5), and
- Domain-end (`fact_pos`, defaults to `fact_neg`).
Returns
-------
h_min : float
Minimum cell width.
domain : list
Start- and end-points of calculation domain.
"""
# Set fact_pos to fact_neg if not provided.
if fact_pos is None:
fact_pos = fact_neg
# Calculate the skin depth.
skind = 503.3*np.sqrt(res/abs(freq))
if freq < 0: # For Laplace-domain calculations.
skind /= np.sqrt(2*np.pi)
# Estimate minimum cell width.
h_min = fact_min*skind
if min_width is not None: # Respect user input.
if np.array(min_width).size == 1:
h_min = min_width
else:
h_min = np.clip(h_min, *min_width)
# Estimate calculation domain.
domain = [x0-fact_neg*skind, x0+fact_pos*skind]
if limits is not None: # Respect user input.
domain = [min(limits[0], domain[0]), max(limits[1], domain[1])]
return h_min, domain
def get_hx(alpha, domain, nx, x0, resp_domain=True):
r"""Return cell widths for given input.
Find the number of cells left and right of `x0`, `nl` and `nr`
respectively, for the provided alpha. For this, we solve
.. math:: \frac{x_\text{max}-x_0}{x_0-x_\text{min}} =
\frac{a^{nr}-1}{a^{nl}-1}
where :math:`a = 1+\alpha`.
Parameters
----------
alpha : float
Stretching factor `a` is given by ``a=1+alpha``.
domain : list
[start, end] of model domain.
nx : int
Number of cells.
x0 : float
Center of the grid. `x0` is restricted to `domain`.
resp_domain : bool
If False (default), then the domain-end might shift slightly to assure
that the same stretching factor is applied throughout. If set to True,
however, the domain is respected absolutely. This will introduce one
stretch-factor which is different from the other stretch factors, to
accommodate the restriction. This one-off factor is between the left-
and right-side of `x0`, or, if `x1` is provided, just after `x1`.
Returns
-------
hx : ndarray
Cell widths of mesh.
"""
if alpha <= 0.: # If alpha <= 0: equal spacing (no stretching at all)
hx = np.ones(nx)*np.diff(np.squeeze(domain))/nx
else: # Get stretched hx
a = alpha+1
# Get hx depending if x0 is on the domain boundary or not.
if np.isclose(x0, domain[0]) or np.isclose(x0, domain[1]):
# Get al a's
alr = np.diff(domain)*alpha/(a**nx-1)*a**np.arange(nx)
if x0 == domain[1]:
alr = alr[::-1]
# Calculate differences
hx = alr*np.diff(domain)/sum(alr)
else:
# Find number of elements left and right by solving:
# (xmax-x0)/(x0-xmin) = a**nr-1/(a**nl-1)
nr = np.arange(2, nx+1)
er = (domain[1]-x0)/(x0-domain[0]) - (a**nr[::-1]-1)/(a**nr-1)
nl = np.argmin(abs(np.floor(er)))+1
nr = nx-nl
# Get all a's
al = a**np.arange(nl-1, -1, -1)
ar = a**np.arange(1, nr+1)
# Calculate differences
if resp_domain:
# This version honours domain[0] and domain[1], but to achieve
# this it introduces one stretch-factor which is different from
# all the others between al to ar.
hx = np.r_[al*(x0-domain[0])/sum(al),
ar*(domain[1]-x0)/sum(ar)]
else:
# This version moves domain[1], but each stretch-factor is
# exactly the same.
fact = (x0-domain[0])/sum(al) # Take distance from al.
hx = np.r_[al, ar]*fact
# Note: this hx is equivalent as providing the following h
# to TensorMesh:
# h = [(min_width, nl-1, -a), (min_width, n_nos+1),
# (min_width, nr, a)]
return hx
| 35.833898
| 79
| 0.590578
| 4,601
| 31,713
| 4.010433
| 0.15627
| 0.021244
| 0.014741
| 0.002601
| 0.208649
| 0.168058
| 0.143562
| 0.127683
| 0.121504
| 0.104
| 0
| 0.023983
| 0.29789
| 31,713
| 884
| 80
| 35.874434
| 0.804725
| 0.542743
| 0
| 0.138408
| 0
| 0.00692
| 0.107544
| 0.002125
| 0
| 0
| 0
| 0
| 0
| 1
| 0.041522
| false
| 0
| 0.010381
| 0
| 0.100346
| 0.062284
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1c88139e81ccf155fe77c897a8674f07ab2d5797
| 1,461
|
py
|
Python
|
common-scrapers/common_src/scrapers/second_extinction.py
|
mrPaintMan/blog-scraper
|
9b1ff3d398bd23d799d86c9a62ec76a6950555cc
|
[
"MIT"
] | null | null | null |
common-scrapers/common_src/scrapers/second_extinction.py
|
mrPaintMan/blog-scraper
|
9b1ff3d398bd23d799d86c9a62ec76a6950555cc
|
[
"MIT"
] | null | null | null |
common-scrapers/common_src/scrapers/second_extinction.py
|
mrPaintMan/blog-scraper
|
9b1ff3d398bd23d799d86c9a62ec76a6950555cc
|
[
"MIT"
] | 1
|
2020-03-11T14:49:00.000Z
|
2020-03-11T14:49:00.000Z
|
from common_src.lib.model.post import Post
from common_src.lib.model.source import Source
from common_src.scrapers.abstract_scraper import make_soup, remove_dups, now
SOURCE_CODE = "second_extinction"
WEBSITE = "https://www.secondextinctiongame.com/news"
ALT_IMAGE = 'https://www.secondextinctiongame.com/static/242486b363d867dc483deb6d7038dde1/d8255/se_screenshot_5.jpg'
FILENAME = "../resources/data/second_extinction.txt"
def get_source():
name = "Second Extinction"
description = 'Second Extinction is a first person shooter game where earth has been invaded by mutated dinosaurs.'
profile_image = 'https://www.secondextinctiongame.com/static/logo-0d52f8575a251eff8ebd6e2d6bd6c51b.png'
return Source(SOURCE_CODE, name, description, profile_image, ALT_IMAGE, None)
def scrape():
soup = make_soup(WEBSITE)
base_site = "https://www.secondextinctiongame.com"
data = []
for post in soup.findAll("article", {"class": "cgYILD"}):
date = post.find("time").text.replace("-", "") + "0000"
title = post.find("h3").text.strip()
link = base_site + post.find("a").get("href")
alt_image = ALT_IMAGE
image = base_site + post.find("picture").find("img").get("src").replace(" ", "%20")
data.append(Post(None, date, title, link, image, alt_image, SOURCE_CODE, None))
if len(data) % 25 == 0:
print(now() + f"Processed {len(data)} posts")
return remove_dups(data)
| 39.486486
| 119
| 0.699521
| 188
| 1,461
| 5.297872
| 0.5
| 0.040161
| 0.11245
| 0.124498
| 0.126506
| 0.084337
| 0
| 0
| 0
| 0
| 0
| 0.043265
| 0.161533
| 1,461
| 36
| 120
| 40.583333
| 0.769796
| 0
| 0
| 0
| 0
| 0
| 0.351814
| 0.026694
| 0
| 0
| 0
| 0
| 0
| 1
| 0.076923
| false
| 0
| 0.115385
| 0
| 0.269231
| 0.038462
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1c88d1e1834d792edf9c14b13846bd1ee7d80360
| 3,860
|
py
|
Python
|
systems/ILSVRC12/AlexNet/alexnet.py
|
mdatres/quantlab
|
09fb24ede78f49768f829afe0fac2ac291b8fd4f
|
[
"Apache-2.0"
] | 7
|
2021-07-01T17:02:50.000Z
|
2022-03-29T10:54:41.000Z
|
systems/ILSVRC12/AlexNet/alexnet.py
|
mdatres/quantlab
|
09fb24ede78f49768f829afe0fac2ac291b8fd4f
|
[
"Apache-2.0"
] | null | null | null |
systems/ILSVRC12/AlexNet/alexnet.py
|
mdatres/quantlab
|
09fb24ede78f49768f829afe0fac2ac291b8fd4f
|
[
"Apache-2.0"
] | 2
|
2021-07-10T20:57:06.000Z
|
2022-01-02T10:10:25.000Z
|
#
# alexnet.py
#
# Author(s):
# Matteo Spallanzani <spmatteo@iis.ee.ethz.ch>
#
# Copyright (c) 2020-2021 ETH Zurich.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import torch
import torch.nn as nn
class AlexNet(nn.Module):
def __init__(self, use_bn: bool, num_classes: int = 1000, seed : int = -1) -> None:
super(AlexNet, self).__init__()
self.features = self._make_features(use_bn)
self.avgpool = nn.AdaptiveAvgPool2d((6, 6))
self.classifier = self._make_classifier(num_classes)
self._initialize_weights(seed)
def _make_features(self, use_bn: bool) -> nn.Sequential:
modules = []
# conv 1
modules += [nn.Conv2d(3, 64, kernel_size=11, stride=4, padding=2, bias=not use_bn)]
modules += [nn.BatchNorm2d(64)] if use_bn else []
modules += [nn.ReLU(inplace=True)]
# max pool
modules += [nn.MaxPool2d(kernel_size=3, stride=2)]
# conv 2
modules += [nn.Conv2d(64, 192, kernel_size=5, padding=2, bias=not use_bn)]
modules += [nn.BatchNorm2d(192)] if use_bn else []
modules += [nn.ReLU(inplace=True)]
# max pool
modules += [nn.MaxPool2d(kernel_size=3, stride=2)]
# conv 3
modules += [nn.Conv2d(192, 384, kernel_size=3, padding=1, bias=not use_bn)]
modules += [nn.BatchNorm2d(384)] if use_bn else []
modules += [nn.ReLU(inplace=True)]
# conv 4
modules += [nn.Conv2d(384, 256, kernel_size=3, padding=1, bias=not use_bn)]
modules += [nn.BatchNorm2d(256)] if use_bn else []
modules += [nn.ReLU(inplace=True)]
# conv 5
modules += [nn.Conv2d(256, 256, kernel_size=3, padding=1, bias=not use_bn)]
modules += [nn.BatchNorm2d(256)] if use_bn else []
modules += [nn.ReLU(inplace=True)]
# max pool
modules += [nn.MaxPool2d(kernel_size=3, stride=2)]
return nn.Sequential(*modules)
def _make_classifier(self, num_classes: int) -> nn.Sequential:
modules = []
# dropout
modules += [nn.Dropout()]
# linear 1
modules += [nn.Linear(256 * 6 * 6, 4096)]
modules += [nn.ReLU(inplace=True)]
# dropout
modules += [nn.Dropout()]
# linear 2
modules += [nn.Linear(4096, 4096)]
modules += [nn.ReLU(inplace=True)]
# linear 3
modules += [nn.Linear(4096, num_classes)]
return nn.Sequential(*modules)
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = self.features(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.classifier(x)
return x
def _initialize_weights(self, seed: int = -1):
if seed >= 0:
torch.manual_seed(seed)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
| 31.900826
| 91
| 0.591192
| 521
| 3,860
| 4.276392
| 0.285988
| 0.100987
| 0.040844
| 0.062837
| 0.386445
| 0.335278
| 0.310144
| 0.310144
| 0.310144
| 0.274237
| 0
| 0.048149
| 0.279016
| 3,860
| 120
| 92
| 32.166667
| 0.752425
| 0.189378
| 0
| 0.377049
| 0
| 0
| 0.003556
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.081967
| false
| 0
| 0.032787
| 0
| 0.180328
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1c8a214cb9301b78671cb8aa70f1cebef2a6167b
| 448
|
py
|
Python
|
e/mail-relay/web/apps/core/migrations/0012_auto_20151105_1442.py
|
zhouli121018/nodejsgm
|
0ccbc8acf61badc812f684dd39253d55c99f08eb
|
[
"MIT"
] | null | null | null |
e/mail-relay/web/apps/core/migrations/0012_auto_20151105_1442.py
|
zhouli121018/nodejsgm
|
0ccbc8acf61badc812f684dd39253d55c99f08eb
|
[
"MIT"
] | 18
|
2020-06-05T18:17:40.000Z
|
2022-03-11T23:25:21.000Z
|
e/mail-relay/web/apps/core/migrations/0012_auto_20151105_1442.py
|
zhouli121018/nodejsgm
|
0ccbc8acf61badc812f684dd39253d55c99f08eb
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('core', '0011_customersetting'),
]
operations = [
migrations.AlterField(
model_name='customersetting',
name='bounce',
field=models.BooleanField(default=True, verbose_name='\u5f00\u542f\u9000\u4fe1'),
),
]
| 22.4
| 93
| 0.625
| 41
| 448
| 6.634146
| 0.780488
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.050746
| 0.252232
| 448
| 19
| 94
| 23.578947
| 0.761194
| 0.046875
| 0
| 0
| 0
| 0
| 0.162353
| 0.056471
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.153846
| 0
| 0.384615
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1c8eddd2bd80bb485d60b7d54110b5642d861af4
| 16,525
|
py
|
Python
|
mainTrain.py
|
PolarizedLightFieldMicroscopy/LFMNet2
|
c9b064d7625e018ef54b8dd8a0e53801c4565397
|
[
"Apache-2.0"
] | null | null | null |
mainTrain.py
|
PolarizedLightFieldMicroscopy/LFMNet2
|
c9b064d7625e018ef54b8dd8a0e53801c4565397
|
[
"Apache-2.0"
] | null | null | null |
mainTrain.py
|
PolarizedLightFieldMicroscopy/LFMNet2
|
c9b064d7625e018ef54b8dd8a0e53801c4565397
|
[
"Apache-2.0"
] | null | null | null |
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.tensorboard import SummaryWriter
from torch.utils import data
from torch import optim
import torchvision.models as models
from torch.autograd import Variable
import torchvision as tv
import random
import math
import time
from datetime import datetime
import os
import argparse
import subprocess
from util.LFUtil import *
import numpy as np
from networks.LFMNet import LFMNet
def main(args=None):
# # Arguments
# parser = argparse.ArgumentParser()
# # Number of epochs
# parser.add_argument('--epochs', type=int, default=1000)
# # Validate every n percentage of the data
# parser.add_argument('--valEvery', type=float, default=0.25)
# # Image indices to use for training and validation
# parser.add_argument('--imagesToUse', nargs='+', type=int, default=list(range(0,5,1)))
# # List of GPUs to use: 0 1 2 for example
# parser.add_argument('--GPUs', nargs='+', type=int, default=None)
# # Batch size
# parser.add_argument('--batchSize', type=int, default=128)
# # Perentage of the data to use for validation, from 0 to 1
# parser.add_argument('--validationSplit', type=float, default=0.1)
# # Bias initialization value
# parser.add_argument('--biasVal', type=float, default=0.1)
# # Learning rate
# parser.add_argument('--learningRate', type=float, default=0.001)
# # Use bias flag
# parser.add_argument('--useBias', type=str2bool, default=True)
# # Use skip connections flag
# parser.add_argument('--useSkipCon', type=str2bool, default=False)
# # User selected random seed
# parser.add_argument('--randomSeed', type=int, default=None)
# # fov of input or neighboarhood around lenslet to reconstruct
# parser.add_argument('--fovInput', type=int, default=9)
# # nT number of lenslets to reconstruct simultaneously use at training time
# parser.add_argument('--neighShape', type=int, default=3)
# # Flag to use shallow or large U-net
# parser.add_argument('--useShallowUnet', type=str2bool, default=True)
# # Lower threshold of GT stacks, to get rid of autofluorescence
# parser.add_argument('--ths', type=float, default=0.03)
# # Path to dataset
# parser.add_argument('--datasetPath', nargs='?', default="BrainLFMConfocalDataset/Brain_40x_64Depths_362imgs.h5")
# # Path to directory where models and tensorboard logs are stored
# parser.add_argument('--outputPath', nargs='?', default="runs/")
# # Prefix for current output folder
# parser.add_argument('--outputPrefix', nargs='?', default="")
# # Path to model in case of continuing a training
# parser.add_argument('--checkpointPath', nargs='?', default=None)
# args = parser.parse_args()
nImgs = len(args.imagesToUse)
# Setup multithreading
num_workers = getThreads()
if num_workers!=0:
torch.set_num_threads(num_workers)
if not torch.cuda.is_available():
print("GPU initialization error")
exit(-1)
if torch.cuda.is_available():
print ("Cuda is available")
device_id = torch.cuda.current_device()
gpu_properties = torch.cuda.get_device_properties(device_id)
print("Found %d GPUs available. Using GPU %d (%s) of compute capability %d.%d with "
"%.1fGb total memory.\n" %
(torch.cuda.device_count(),
device_id,
gpu_properties.name,
gpu_properties.major,
gpu_properties.minor,
gpu_properties.total_memory / 1e9))
# Select GPUs to use
args.GPUs = list(range(torch.cuda.device_count())) if args.GPUs is None else args.GPUs
print('Using GPUs: ' + str(args.GPUs))
device_ids = args.GPUs
# Set common random seed
if args.randomSeed is not None:
np.random.seed(args.randomSeed)
torch.manual_seed(args.randomSeed)
# Load checkpoint if provided
if args.checkpointPath is not None:
checkpointPath = args.checkpointPath
checkpoint = torch.load(checkpointPath)
# overwrite args
args = checkpoint['args']
args.checkpointPath = checkpointPath
# set Device to use
device = torch.device("cuda:"+str(device_ids[0]) if torch.cuda.is_available() else "cpu")
# Create unique label
today = datetime.now()
# Get commit number
# label = subprocess.check_output(["git", "describe", "--always"]).strip()
#specific to MBL lab workstation
label = subprocess.check_output(["C:/Program Files/git/bin/git", "describe", "--always"]).strip()
comment = today.strftime('%Y_%m_%d__%H%M%S') + "_"+ str(args.useBias) +"B_"+str(args.biasVal)+"bias_" + str(nImgs) + \
"I_"+ str(args.batchSize)+"BS_"+str(args.useSkipCon)+"Sk_" + str(args.fovInput) + "FOV_" + str(args.neighShape) + "nT_" \
+ str(args.ths) + "ths_" + str(label.decode("utf-8") ) + "_commit__" + args.outputPrefix
# Create output folder
save_folder = args.outputPath + "/" + comment
# If asked to continue a training, save in the same folder
if args.checkpointPath is not None:
save_folder = os.path.split(args.checkpointPath)[0]
print(save_folder)
# Create summary writer to log stuff
writer = SummaryWriter(log_dir=save_folder)
writer.add_text('Description',comment,0)
writer.flush()
# Load dataset
all_data = Dataset(args.datasetPath, args.randomSeed, \
fov=args.fovInput, neighShape=args.neighShape, img_indices=args.imagesToUse, get_full_imgs=False, center_region=None)
# Split validation and testing
train_size = int((1 - args.validationSplit) * len(all_data))
test_size = len(all_data) - train_size
train_dataset, test_dataset = torch.utils.data.random_split(all_data, [train_size, test_size])
# Create data loaders
train_dataset = data.DataLoader(train_dataset, batch_size=args.batchSize,
shuffle=True, num_workers=num_workers, pin_memory=True)
test_dataset = data.DataLoader(test_dataset, batch_size=args.batchSize,
shuffle=True, num_workers=num_workers, pin_memory=True)
validate_every = np.round(len(train_dataset)*args.valEvery)
# Get Dataset information
nDepths = all_data.get_n_depths()
volShape, LFshape = all_data.__shape__()
LFshape = LFshape[0:4]
lateralTile = int(math.sqrt(nDepths))
# Find normalization values
maxInputTrain, maxVolumeTrain = all_data.get_max()
maxInputTest, maxVolumeTest = all_data.get_max()
# Create network
net = LFMNet(nDepths, args.useBias, args.useSkipCon, LFshape, LFfov=args.fovInput, use_small_unet=args.useShallowUnet).to(device)
optimizer = optim.Adam(net.parameters(), lr=args.learningRate)
lossFunction = nn.L1Loss()
# Create SSIM criteria
ssim = SSIM()
ssim.eval()
# Init bias and weights if needed
if args.useBias:
def bias_init(m):
if isinstance(m, nn.Conv2d) or isinstance(m, nn.Conv3d):
if m.bias is not None:
nn.init.constant_(m.bias.data, args.biasVal)
nn.init.kaiming_normal_(m.weight)
if isinstance(m, nn.ConvTranspose2d):
nn.init.constant_(m.bias.data, args.biasVal)
nn.init.kaiming_normal_(m.weight)
net.apply(bias_init)
# Load network from checkpoint
if args.checkpointPath is not None:
net.load_state_dict(checkpoint['model_state_dict'])
optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
epochStart = checkpoint['epoch']
epochs = args.epochs + epochStart
train_loss = checkpoint['loss']
# Start distributed data parallel, as it's faster than DataParallel
if torch.cuda.device_count() > 1:
print("Let's use", torch.cuda.device_count(), "GPUs!")
os.environ['MASTER_ADDR'] = 'localhost'
os.environ['MASTER_PORT'] = '1234'+str(device_ids[0])
torch.distributed.init_process_group(backend="nccl", rank=0, world_size=1)
# Move network to distributed data parallel
net = nn.parallel.DistributedDataParallel(net, device_ids=args.GPUs, output_device=args.GPUs[0]).to(device)
# timers
start = torch.cuda.Event(enable_timing=True)
end = torch.cuda.Event(enable_timing=True)
global_it_counter = 0
# define indices to grab for tensorboard visualization
indices_to_show = torch.randperm(test_size)[0:8]
# Init arrays to store losses
train_losses, test_losses = [], []
test_loss = 0
epochStart = 0
# Start training
for epoch in range(epochStart, args.epochs):
net.train()
torch.set_grad_enabled(True)
torch.cuda.empty_cache()
train_loss = 0
print('Training')
global_it_counter = 0
for nBatch,(inputs,labels) in enumerate(train_dataset):
# compute current iteration
curr_it = epoch*len(train_dataset) + nBatch
# start timer
start.record()
print('ep: ' + str(epoch) + ' ' + str(nBatch+1) + '/' + str(len(train_dataset)) + ' currIt: ' + str(curr_it))
optimizer.zero_grad()
# load data to gpu and normalize from 0 to 1
inputGPU = inputs.float().to(device) / maxInputTest
outputsGT = labels.float().to(device) / maxVolumeTrain
# Threshold GT to get rid of autofluorescence
if args.ths!=0:
outputsGT = imadjust(outputsGT, args.ths,outputsGT.max(), outputsGT.min(), outputsGT.max())
# Predict
outputsVol = net(inputGPU)
loss = lossFunction(outputsGT,outputsVol)
loss.backward()
train_loss += loss.item() / nDepths
optimizer.step()
global_it_counter += inputs.shape[0]
# Record training time
end.record()
torch.cuda.synchronize()
end_time = start.elapsed_time(end)
# Compute time per sample
elapsed_time = end_time/inputs.shape[0]
# Check if validation is required
if nBatch%validate_every==0:
print(comment)
# Write training images to tensorboard
lastBatchSize = min(outputsGT.shape[0],4)
gridOut2 = torch.cat((outputsGT[0:lastBatchSize, :, :, :, :].sum(2).cpu().data.detach(), outputsVol[0:lastBatchSize, :, :, :, :].sum(2).cpu().data.detach()), dim=0)
gridOut2 = tv.utils.make_grid(gridOut2, normalize=True, scale_each=False)
# Select some images in the batch for showing
indices_to_display = torch.randperm(inputGPU.shape[0])[0:4]
outputsGT = F.interpolate(outputsGT[indices_to_display, :, :, :, :],[LFshape[0]*2,LFshape[1]*2,volShape[2]])
outputsVol = F.interpolate(outputsVol[indices_to_display, :, :, :, :],[LFshape[0]*2,LFshape[1]*2,volShape[2]])
inputGPU = inputGPU[indices_to_display,:,:,:,:,:]
currPred = convert3Dto2DTiles(outputsVol, [lateralTile, lateralTile])
currGT = convert3Dto2DTiles(outputsGT, [lateralTile, lateralTile])
inputGrid = LF2Spatial(inputGPU, inputGPU.shape[2:])
gridPred = tv.utils.make_grid(currPred,normalize=True, scale_each=False)
gridGT = tv.utils.make_grid(currGT,normalize=True, scale_each=False)
gridInput = tv.utils.make_grid(inputGrid,normalize=True, scale_each=False)
gt = outputsGT[0,:,:,:,:].sum(3).repeat(3,1,1)
gt /= gt.max()
# Write to tensorboard
writer.add_image('z_proj_train',gt,curr_it)
writer.add_image('images_train_YZ_projection', gridOut2, curr_it)
writer.add_image('outputRGB_train', gridPred, curr_it)
writer.add_image('outputRGB_train_GT', gridGT, curr_it)
writer.add_image('input_train', gridInput, curr_it)
writer.add_scalar('Loss/train', train_loss/global_it_counter, curr_it)
writer.add_scalar('times/train', elapsed_time, curr_it)
# Restart
train_loss = 0.0
global_it_counter = 0
print('Validating')
net.eval()
with torch.no_grad():
avg_psnr = 0
avg_ssim = 0
test_loss = 0
start.record()
for nBatch,(inputs,labels) in enumerate(test_dataset):
inputGPU = inputs.float().to(device) / maxInputTest
outputsGT = labels.float().to(device) / maxVolumeTrain
# Threshold GT to get rid of autofluorescence
outputsGT = imadjust(outputsGT,args.ths,outputsGT.max(), outputsGT.min(), outputsGT.max())
outputsVol = net(inputGPU)
loss = lossFunction(outputsGT,outputsVol)
test_loss += loss.item() / nDepths
# Compute PSNR
lossMSE = nn.functional.mse_loss(outputsVol.to(device).detach(), outputsGT.to(device).detach())
avg_psnr += 10 * math.log10(1 / lossMSE.item())
# Compute ssim
avg_ssim += ssim(outputsVol[:,0,:,:,:].permute(0,3,1,2).contiguous().detach().to(device), outputsGT[:,0,:,:,:].permute(0,3,1,2).contiguous().detach().to(device)).sum()
end.record()
torch.cuda.synchronize()
lastBatchSize = min(outputsGT.shape[0],4)
gridOut2 = torch.cat((outputsGT[0:lastBatchSize, :, :, :, :].sum(2).cpu().data.detach(), outputsVol[0:lastBatchSize, :, :, :, :].sum(2).cpu().data.detach()), dim=0)
gridOut2 = tv.utils.make_grid(gridOut2, normalize=True, scale_each=False)
# process some for showing
indices_to_display = torch.randperm(inputGPU.shape[0])[0:lastBatchSize]
outputsGT = F.interpolate(outputsGT[indices_to_display, :, :, :, :],[LFshape[0]*2,LFshape[1]*2,volShape[2]])
outputsVol = F.interpolate(outputsVol[indices_to_display, :, :, :, :],[LFshape[0]*2,LFshape[1]*2,volShape[2]])
inputGPU = inputGPU[indices_to_display,:,:,:,:,:]
currPred = convert3Dto2DTiles(outputsVol, [lateralTile, lateralTile])
currGT = convert3Dto2DTiles(outputsGT, [lateralTile, lateralTile])
inputGrid = LF2Spatial(inputGPU, inputGPU.shape[2:])
gridPred = tv.utils.make_grid(currPred,normalize=True, scale_each=False)
gridGT = tv.utils.make_grid(currGT,normalize=True, scale_each=False)
gridInput = tv.utils.make_grid(inputGrid,normalize=True, scale_each=False)
# Write to tensorboard
writer.add_image('images_val_YZ_projection', gridOut2, curr_it)
writer.add_image('outputRGB_test', gridPred, curr_it)
writer.add_image('outputRGB_test_GT', gridGT, curr_it)
writer.add_image('input_test', gridInput, curr_it)
writer.add_scalar('Loss/test', test_loss/len(test_dataset), curr_it)
writer.add_scalar('Loss/psnr_val', avg_psnr/len(test_dataset), curr_it)
writer.add_scalar('Loss/ssim_val', avg_ssim/len(test_dataset), curr_it)
writer.add_scalar('LearningRate', args.learningRate, curr_it)
writer.add_scalar('times/val', start.elapsed_time(end)/test_size, curr_it)
net.train()
if epoch%2==0:
torch.save({
'epoch': epoch,
'args' : args,
'model_state_dict': net.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
'loss': train_loss,
'dataset_path': args.datasetPath},
save_folder + '/model_'+str(epoch))
print(f"Epoch {epoch + 1}/{args.epochs}.. "
f"Train loss: {train_loss / len(train_dataset):.7f}.. "
f"Test loss: {test_loss / len(test_dataset):.7f}.. ")
if __name__ == '__main__':
main()
| 47.34957
| 191
| 0.616884
| 1,938
| 16,525
| 5.104747
| 0.216202
| 0.017285
| 0.032649
| 0.021227
| 0.341555
| 0.312342
| 0.279288
| 0.251794
| 0.241585
| 0.223795
| 0
| 0.01405
| 0.26348
| 16,525
| 349
| 192
| 47.34957
| 0.798784
| 0.198245
| 0
| 0.253394
| 0
| 0.004525
| 0.064145
| 0.007533
| 0
| 0
| 0
| 0
| 0
| 1
| 0.00905
| false
| 0
| 0.085973
| 0
| 0.095023
| 0.049774
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1c8fed7e472142a2a42ee1131ff8f6b28599bc16
| 1,295
|
py
|
Python
|
tools/utils.py
|
valsworthen/toxic-comment-classification
|
12ceb4d78410a14fba05e43f6f424cec52e6665d
|
[
"MIT"
] | 10
|
2018-03-26T05:46:39.000Z
|
2020-04-30T08:03:18.000Z
|
tools/utils.py
|
valsworthen/toxic_comment_classification
|
12ceb4d78410a14fba05e43f6f424cec52e6665d
|
[
"MIT"
] | null | null | null |
tools/utils.py
|
valsworthen/toxic_comment_classification
|
12ceb4d78410a14fba05e43f6f424cec52e6665d
|
[
"MIT"
] | null | null | null |
"""Utilities"""
import pandas as pd
import numpy as np
from attrdict import AttrDict
import yaml
def average_predictions(cv_predictions, n_splits, num_samples = 153164, num_labels = 6):
"""Average k-fold predictions stored in a dict"""
preds = np.zeros((num_samples, num_labels))
for preds_i in cv_predictions:
preds += preds_i
preds /= n_splits
return preds
def geom_average_predictions(cv_predictions, n_splits, num_samples = 153164, num_labels = 6):
"""Average k-fold predictions stored in a dict"""
preds = np.ones((num_samples, num_labels))
for preds_i in cv_predictions:
preds *= preds_i
preds = preds **(1/n_splits)
return preds
def create_submission(preds, filename):
labels = ['toxic', 'severe_toxic', 'obscene', 'threat', 'insult', 'identity_hate']
subm = pd.read_csv('input/sample_submission.csv')
submid = pd.DataFrame({'id': subm["id"]})
submission = pd.concat([submid, pd.DataFrame(preds, columns = labels)], axis=1)
submission.to_csv(filename, index=False)
def format_time(sec):
m, s = divmod(sec, 60)
h, m = divmod(m, 60)
return "{:.0f}h {:.0f}min {:.0f}s".format(h, m, s)
def read_yaml(filepath):
with open(filepath) as f:
config = yaml.load(f)
return AttrDict(config)
| 33.205128
| 93
| 0.67722
| 188
| 1,295
| 4.5
| 0.398936
| 0.061466
| 0.047281
| 0.073286
| 0.44208
| 0.392435
| 0.392435
| 0.392435
| 0.392435
| 0.392435
| 0
| 0.021926
| 0.189961
| 1,295
| 38
| 94
| 34.078947
| 0.784557
| 0.074903
| 0
| 0.133333
| 0
| 0
| 0.088832
| 0.022843
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| false
| 0
| 0.133333
| 0
| 0.433333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1c90552cf52e653e519bda73228f741afee1058c
| 3,148
|
py
|
Python
|
pyhelp/scripts/produce_meteo_maps.py
|
jnsebgosselin/help
|
f0194a96ba7e1474fe1864d79447ee20cee949ec
|
[
"MIT"
] | 12
|
2019-03-11T12:38:35.000Z
|
2021-06-26T03:40:18.000Z
|
pyhelp/scripts/produce_meteo_maps.py
|
jnsebgosselin/help
|
f0194a96ba7e1474fe1864d79447ee20cee949ec
|
[
"MIT"
] | 23
|
2018-11-22T15:16:12.000Z
|
2022-03-25T12:55:33.000Z
|
pyhelp/scripts/produce_meteo_maps.py
|
jnsebgosselin/help
|
f0194a96ba7e1474fe1864d79447ee20cee949ec
|
[
"MIT"
] | 2
|
2019-04-18T17:47:00.000Z
|
2021-08-31T04:45:30.000Z
|
# -*- coding: utf-8 -*-
"""
Created on Tue Feb 27 10:54:25 2018
@author: jsgosselin
"""
# ---- Standard Library Imports
from itertools import product
import os.path as osp
import os
# ---- Third Party Imports
import netCDF4
from geopandas import GeoDataFrame
import pandas as pd
from shapely.geometry import Point, Polygon
import numpy as np
dirpath_netcdf = "D:/MeteoGrilleDaily"
# %% Get lat/lon from the netCDF
filename = osp.join(dirpath_netcdf, 'GCQ_v2_2000.nc')
netcdf_dset = netCDF4.Dataset(filename, 'r+')
lat = np.array(netcdf_dset['lat'])
lon = np.array(netcdf_dset['lon'])
netcdf_dset.close()
# %% Read the weather data from the InfoClimat grid
stack_precip = []
stack_tasmax = []
stack_tasmin = []
nyear = 0
for year in range(2000, 2015):
print("\rProcessing year %d" % year, end=' ')
filename = osp.join(dirpath_netcdf, 'GCQ_v2_%d.nc' % year)
netcdf_dset = netCDF4.Dataset(filename, 'r+')
stack_precip.append(np.array(netcdf_dset['pr']))
stack_tasmax.append(np.array(netcdf_dset['tasmax']))
stack_tasmin.append(np.array(netcdf_dset['tasmin']))
netcdf_dset.close()
nyear += 1
print('')
daily_precip = np.vstack(stack_precip)
daily_tasmax = np.vstack(stack_tasmax)
daily_tasmin = np.vstack(stack_tasmin)
daily_tasavg = (daily_tasmax + daily_tasmin) / 2
yearly_avg_precip = np.sum(daily_precip, axis=0) / nyear
yearly_avg_tasavg = np.average(daily_tasavg, axis=0)
yearly_avg_tasmax = np.average(daily_tasmax, axis=0)
yearly_avg_tasmin = np.average(daily_tasmin, axis=0)
# %% Create a grid
Np = len(lat) * len(lon)
geometry = []
arr_yearly_avg_precip = np.zeros(Np)
arr_avg_yearly_tasavg = np.zeros(Np)
arr_avg_yearly_tasmax = np.zeros(Np)
arr_avg_yearly_tasmin = np.zeros(Np)
i = 0
dx = dy = 0.1/2
for j, k in product(range(len(lat)), range(len(lon))):
print("\rProcessing cell %d of %d" % (i, Np), end=' ')
point = Point((lon[k], lat[j]))
# polygon = Polygon([(lon[k]-dx, lat[j]-dy),
# (lon[k]-dx, lat[j]+dy),
# (lon[k]+dx, lat[j]+dy),
# (lon[k]+dx, lat[j]-dy)])
geometry.append(point)
arr_yearly_avg_precip[i] = yearly_avg_precip[j, k]
arr_avg_yearly_tasavg[i] = yearly_avg_tasavg[j, k]
arr_avg_yearly_tasmax[i] = yearly_avg_tasmax[j, k]
arr_avg_yearly_tasmin[i] = yearly_avg_tasmin[j, k]
i += 1
print("\rProcessing cell %d of %d" % (i, Np))
# %%
print('\rFormating the data in a shapefile...', end=' ')
df = pd.DataFrame(data={'precip': arr_yearly_avg_precip,
'tasavg': arr_avg_yearly_tasavg,
'tasmax': arr_avg_yearly_tasmax,
'tasmin': arr_avg_yearly_tasmin})
crs = "+proj=longlat +ellps=GRS80 +datum=NAD83 +towgs84=0,0,0,0,0,0,0 +no_defs"
gdf = GeoDataFrame(df, crs=crs, geometry=geometry)
print('\rFormating the data in a shapefile... done')
print('\rSaving to Shapefile...', end=' ')
path_shp_out = ("D:/MeteoGrilleDaily/grid_yearly_meteo/grid_yearly_meteo.shp")
if not osp.exists(path_shp_out):
os.makedirs(path_shp_out)
gdf.to_file(path_shp_out)
print('\rSaving to Shapefile... done', end=' ')
| 29.420561
| 79
| 0.67249
| 481
| 3,148
| 4.191268
| 0.261954
| 0.049107
| 0.053571
| 0.042163
| 0.239583
| 0.184524
| 0.117063
| 0.050595
| 0.02381
| 0.02381
| 0
| 0.021211
| 0.176302
| 3,148
| 106
| 80
| 29.698113
| 0.756267
| 0.129288
| 0
| 0.058824
| 0
| 0.014706
| 0.159383
| 0.029747
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.117647
| 0
| 0.117647
| 0.117647
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1c90b62f02619f835bc7d89b23d75b9ecf0b6be0
| 1,803
|
py
|
Python
|
platform/core/tests/test_activitylogs/test_service.py
|
hackerwins/polyaxon
|
ff56a098283ca872abfbaae6ba8abba479ffa394
|
[
"Apache-2.0"
] | null | null | null |
platform/core/tests/test_activitylogs/test_service.py
|
hackerwins/polyaxon
|
ff56a098283ca872abfbaae6ba8abba479ffa394
|
[
"Apache-2.0"
] | null | null | null |
platform/core/tests/test_activitylogs/test_service.py
|
hackerwins/polyaxon
|
ff56a098283ca872abfbaae6ba8abba479ffa394
|
[
"Apache-2.0"
] | null | null | null |
# pylint:disable=ungrouped-imports
import uuid
import pytest
import activitylogs
from db.models.activitylogs import ActivityLog
from events.registry.experiment import EXPERIMENT_DELETED_TRIGGERED
from events.registry.user import USER_ACTIVATED
from factories.factory_experiments import ExperimentFactory
from factories.factory_users import UserFactory
from tests.base.case import BaseTest
@pytest.mark.activitylogs_mark
class ActivityLogsTest(BaseTest):
def setUp(self):
super().setUp()
self.experiment = ExperimentFactory()
self.admin = UserFactory(is_staff=True, is_superuser=True)
self.user = UserFactory()
def test_record_creates_activities(self):
assert ActivityLog.objects.count() == 0
activitylogs.record(ref_id=uuid.uuid4(),
event_type=USER_ACTIVATED,
instance=self.user,
actor_id=self.admin.id,
actor_name=self.admin.username)
assert ActivityLog.objects.count() == 1
activity = ActivityLog.objects.last()
assert activity.event_type == USER_ACTIVATED
assert activity.content_object == self.user
assert activity.actor == self.admin
activitylogs.record(ref_id=uuid.uuid4(),
event_type=EXPERIMENT_DELETED_TRIGGERED,
instance=self.experiment,
actor_id=self.admin.id,
actor_name=self.admin.username)
assert ActivityLog.objects.count() == 2
activity = ActivityLog.objects.last()
assert activity.event_type == EXPERIMENT_DELETED_TRIGGERED
assert activity.content_object == self.experiment
assert activity.actor == self.admin
| 36.795918
| 68
| 0.658902
| 187
| 1,803
| 6.197861
| 0.342246
| 0.054357
| 0.067299
| 0.075065
| 0.434858
| 0.288179
| 0.288179
| 0.288179
| 0.125971
| 0.125971
| 0
| 0.003776
| 0.265668
| 1,803
| 48
| 69
| 37.5625
| 0.871601
| 0.017748
| 0
| 0.263158
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.236842
| 1
| 0.052632
| false
| 0
| 0.236842
| 0
| 0.315789
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1c91c9a08ce7e29a5358fe242bc8b960fc941c8f
| 1,844
|
py
|
Python
|
software/hippietrap/gradient.py
|
mayhem/led-chandelier
|
899caa8d81e6aac6e954f78b4f5b4ab101bf5257
|
[
"MIT"
] | 2
|
2018-09-20T08:36:11.000Z
|
2019-08-25T20:06:11.000Z
|
software/hippietrap/gradient.py
|
mayhem/led-chandelier
|
899caa8d81e6aac6e954f78b4f5b4ab101bf5257
|
[
"MIT"
] | null | null | null |
software/hippietrap/gradient.py
|
mayhem/led-chandelier
|
899caa8d81e6aac6e954f78b4f5b4ab101bf5257
|
[
"MIT"
] | 1
|
2020-12-12T18:21:18.000Z
|
2020-12-12T18:21:18.000Z
|
from colorsys import hsv_to_rgb
from math import fabs, fmod
import os
from hippietrap.color import Color
class Gradient(object):
def __init__(self, palette, num_leds = 1):
# palletes are in format [ (.345, (128, 0, 128)) ]
self._validate_palette(palette)
self.palette = palette
self.num_leds = num_leds
self.led_scale = 1.0
self.led_offset = 0.0
def _validate_palette(self, palette):
if len(palette) < 2:
raise ValueError("Palette must have at least two points.")
if palette[0][0] > 0.0:
raise ValueError("First point in palette must be less than or equal to 0.0")
if palette[-1][0] < 1.0:
raise ValueError("Last point in palette must be greater than or equal to 1.0")
def set_scale(self, scale):
self.led_scale = scale
def set_offset(self, offset):
self.led_offset = offset
def get_color(self, offset):
if offset < 0.0 or offset > 1.0:
raise IndexError("Invalid offset.")
for index in range(len(self.palette)):
# skip the first item
if index == 0:
continue
if self.palette[index][0] >= offset:
section_begin_offset = self.palette[index-1][0]
section_end_offset = self.palette[index][0]
percent = (offset - section_begin_offset) / (section_end_offset - section_begin_offset)
new_color = []
for color in range(3):
new_color.append(int(self.palette[index-1][1][color] +
((self.palette[index][1][color] - self.palette[index-1][1][color]) * percent)))
return Color(min(new_color[0], 255), min(new_color[1], 255), min(new_color[2], 255))
assert False
| 29.741935
| 107
| 0.58026
| 248
| 1,844
| 4.173387
| 0.310484
| 0.10628
| 0.092754
| 0.0657
| 0.104348
| 0.0657
| 0
| 0
| 0
| 0
| 0
| 0.043375
| 0.312364
| 1,844
| 61
| 108
| 30.229508
| 0.772871
| 0.036876
| 0
| 0
| 0
| 0
| 0.094191
| 0
| 0
| 0
| 0
| 0
| 0.026316
| 1
| 0.131579
| false
| 0
| 0.105263
| 0
| 0.289474
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1c962e345da89a5eb411a0b3f49cfb775dfe43b5
| 1,850
|
py
|
Python
|
src/http_pick/pickergui.py
|
thomaspcole/http-pick
|
c470869878483241672c2928fd85458ab30555c4
|
[
"MIT"
] | null | null | null |
src/http_pick/pickergui.py
|
thomaspcole/http-pick
|
c470869878483241672c2928fd85458ab30555c4
|
[
"MIT"
] | null | null | null |
src/http_pick/pickergui.py
|
thomaspcole/http-pick
|
c470869878483241672c2928fd85458ab30555c4
|
[
"MIT"
] | null | null | null |
from PyQt5.QtWidgets import (QMainWindow, QToolButton, QWidget, QHBoxLayout)
from PyQt5.QtGui import QIcon
from PyQt5 import QtCore
from math import floor
import sys
class MainWindow(QMainWindow):
def __init__(self, browsers, iconsize=72, displayappname=False, x=0, y=0, callback=lambda v: print(v)):
super().__init__()
self.setFocus()
self.centralwidget = QWidget()
self.setAttribute(QtCore.Qt.WA_TranslucentBackground)
self.setWindowFlags(QtCore.Qt.FramelessWindowHint)
self.setCentralWidget(self.centralwidget)
self.lay = QHBoxLayout(self.centralwidget)
self.lay.setContentsMargins(0,0,0,0)
self.lay.setSpacing(0)
xOffset = floor((iconsize*len(browsers))/2)
yOffset = floor(iconsize*1.25)
self.move(x-xOffset,y-yOffset)
for b in browsers:
self.btn = QToolButton(self)
if '/' in b: #'Normal' launch path
path = b
appname = path.split('/')
elif '.' in b: #Flatpak ref
path = b
appname = path.split('.')
self.btn.setIcon(QIcon.fromTheme(appname[-1]))
self.btn.setIconSize(QtCore.QSize(iconsize,iconsize))
self.btn.setStyleSheet("QToolButton {background-color: transparent; border: 0px; color: white;}")
if(displayappname):
self.btn.setToolButtonStyle(QtCore.Qt.ToolButtonStyle.ToolButtonTextUnderIcon)
self.btn.setText(appname[-1].capitalize())
self.btn.clicked.connect(lambda v, path=path : callback(path))
self.lay.addWidget(self.btn)
def on_focusChanged(self):
if(self.isActiveWindow() == False):
quit()
| 40.217391
| 148
| 0.591892
| 191
| 1,850
| 5.680628
| 0.455497
| 0.051613
| 0.03871
| 0.04424
| 0.03871
| 0
| 0
| 0
| 0
| 0
| 0
| 0.014638
| 0.298378
| 1,850
| 46
| 149
| 40.217391
| 0.821263
| 0.016757
| 0
| 0.052632
| 0
| 0
| 0.041254
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.052632
| false
| 0
| 0.131579
| 0
| 0.210526
| 0.026316
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1c9707dc1574081d46ce438a0fbd3d659ca252fc
| 7,985
|
py
|
Python
|
openverse_catalog/dags/providers/provider_api_scripts/science_museum.py
|
yavik-kapadia/openverse-catalog
|
853766f2176a96450f456a9fd6675e134c0866e1
|
[
"MIT"
] | 25
|
2021-05-06T20:53:45.000Z
|
2022-03-30T23:18:50.000Z
|
openverse_catalog/dags/providers/provider_api_scripts/science_museum.py
|
yavik-kapadia/openverse-catalog
|
853766f2176a96450f456a9fd6675e134c0866e1
|
[
"MIT"
] | 272
|
2021-05-17T05:53:00.000Z
|
2022-03-31T23:57:20.000Z
|
openverse_catalog/dags/providers/provider_api_scripts/science_museum.py
|
yavik-kapadia/openverse-catalog
|
853766f2176a96450f456a9fd6675e134c0866e1
|
[
"MIT"
] | 13
|
2021-06-12T07:09:06.000Z
|
2022-03-29T17:39:13.000Z
|
import logging
from common.licenses import get_license_info
from common.loader import provider_details as prov
from common.requester import DelayedRequester
from common.storage.image import ImageStore
logging.basicConfig(
format="%(asctime)s - %(name)s - %(levelname)s: %(message)s", level=logging.INFO
)
logger = logging.getLogger(__name__)
LIMIT = 100
DELAY = 5.0
RETRIES = 3
PROVIDER = prov.SCIENCE_DEFAULT_PROVIDER
ENDPOINT = "https://collection.sciencemuseumgroup.org.uk/search/"
delay_request = DelayedRequester(delay=DELAY)
image_store = ImageStore(provider=PROVIDER)
HEADERS = {"Accept": "application/json"}
DEFAULT_QUERY_PARAMS = {
"has_image": 1,
"image_license": "CC",
"page[size]": LIMIT,
"page[number]": 0,
"date[from]": 0,
"date[to]": 1500,
}
YEAR_RANGE = [
(0, 1500),
(1500, 1750),
(1750, 1825),
(1825, 1850),
(1850, 1875),
(1875, 1900),
(1900, 1915),
(1915, 1940),
(1940, 1965),
(1965, 1990),
(1990, 2020),
]
# global variable to keep track of records pulled
RECORD_IDS = []
def main():
logger.info("Begin: Science Museum script")
for year_range in YEAR_RANGE:
logger.info(f"Running for years {year_range}")
from_year, to_year = year_range
image_count = _page_records(from_year=from_year, to_year=to_year)
logger.info(f"Images pulled till now {image_count}")
image_count = image_store.commit()
logger.info(f"Total images pulled {image_count}")
def _page_records(from_year, to_year):
image_count = 0
page_number = 0
condition = True
while condition:
query_param = _get_query_param(
page_number=page_number, from_year=from_year, to_year=to_year
)
batch_data = _get_batch_objects(query_param=query_param)
if type(batch_data) == list:
if len(batch_data) > 0:
image_count = _handle_object_data(batch_data)
page_number += 1
else:
condition = False
else:
condition = False
return image_count
def _get_query_param(
page_number=0, from_year=0, to_year=1500, default_query_param=None
):
if default_query_param is None:
default_query_param = DEFAULT_QUERY_PARAMS
query_param = default_query_param.copy()
query_param["page[number]"] = page_number
query_param["date[from]"] = from_year
query_param["date[to]"] = to_year
return query_param
def _get_batch_objects(
endpoint=ENDPOINT, headers=None, retries=RETRIES, query_param=None
):
if headers is None:
headers = HEADERS.copy()
data = None
for retry in range(retries):
response = delay_request.get(endpoint, query_param, headers=headers)
try:
response_json = response.json()
if "data" in response_json.keys():
data = response_json.get("data")
break
except Exception as e:
logger.error(f"Failed to due to {e}")
return data
def _handle_object_data(batch_data):
image_count = 0
for obj_ in batch_data:
id_ = obj_.get("id")
if id_ in RECORD_IDS:
continue
RECORD_IDS.append(id_)
foreign_landing_url = obj_.get("links", {}).get("self")
if foreign_landing_url is None:
continue
obj_attributes = obj_.get("attributes")
if obj_attributes is None:
continue
title = obj_attributes.get("summary_title")
creator = _get_creator_info(obj_attributes)
metadata = _get_metadata(obj_attributes)
multimedia = obj_attributes.get("multimedia")
if multimedia is None:
continue
for image_data in multimedia:
foreign_id = image_data.get("admin", {}).get("uid")
if foreign_id is None:
continue
processed = image_data.get("processed")
source = image_data.get("source")
image_url, height, width = _get_image_info(processed)
if image_url is None:
continue
license_version = _get_license_version(source)
if license_version is None:
continue
license_, version = license_version.lower().split(" ")
license_ = license_.replace("cc-", "")
license_info = get_license_info(license_=license_, license_version=version)
thumbnail_url = _get_thumbnail_url(processed)
image_count = image_store.add_item(
foreign_identifier=foreign_id,
foreign_landing_url=foreign_landing_url,
image_url=image_url,
height=height,
width=width,
license_info=license_info,
thumbnail_url=thumbnail_url,
creator=creator,
title=title,
meta_data=metadata,
)
return image_count
def _get_creator_info(obj_attr):
creator_info = None
life_cycle = obj_attr.get("lifecycle")
if life_cycle:
creation = life_cycle.get("creation")
if type(creation) == list:
maker = creation[0].get("maker")
if type(maker) == list:
creator_info = maker[0].get("summary_title")
return creator_info
def _get_image_info(processed):
if processed.get("large"):
image = processed.get("large").get("location")
measurements = processed.get("large").get("measurements")
elif processed.get("medium"):
image = processed.get("medium").get("location")
measurements = processed.get("medium").get("measurements")
else:
image = None
measurements = None
image = check_url(image)
height, width = _get_dimensions(measurements)
return image, height, width
def _get_thumbnail_url(processed):
if processed.get("large_thumbnail"):
image = processed.get("large_thumbnail").get("location")
elif processed.get("medium_thumbnail"):
image = processed.get("medium_thumbnail").get("location")
elif processed.get("small_thumbnail"):
image = processed.get("small_thumbnail").get("location")
else:
image = None
thumbnail_url = check_url(image)
return thumbnail_url
def check_url(image_url):
base_url = "https://coimages.sciencemuseumgroup.org.uk/images/"
if image_url:
if "http" in image_url:
checked_url = image_url
else:
checked_url = base_url + image_url
else:
checked_url = None
return checked_url
def _get_dimensions(measurements):
height_width = {}
if measurements:
dimensions = measurements.get("dimensions")
if dimensions:
for dim in dimensions:
height_width[dim.get("dimension")] = dim.get("value")
return height_width.get("height"), height_width.get("width")
def _get_license_version(source):
license_version = None
if source:
legal = source.get("legal")
if legal:
rights = legal.get("rights")
if type(rights) == list:
license_version = rights[0].get("usage_terms")
return license_version
def _get_metadata(obj_attr):
metadata = {}
identifier = obj_attr.get("identifier")
if type(identifier) == list:
metadata["accession number"] = identifier[0].get("value")
name = obj_attr.get("name")
if type(name) == list:
metadata["name"] = name[0].get("value")
category = obj_attr.get("categories")
if type(category) == list:
metadata["category"] = category[0].get("value")
creditline = obj_attr.get("legal")
if type(creditline) == dict:
metadata["creditline"] = creditline.get("credit_line")
description = obj_attr.get("description")
if type(description) == list:
metadata["description"] = description[0].get("value")
return metadata
if __name__ == "__main__":
main()
| 30.830116
| 87
| 0.628053
| 944
| 7,985
| 5.049788
| 0.202331
| 0.033564
| 0.012587
| 0.011747
| 0.119992
| 0.049927
| 0.011747
| 0.011747
| 0
| 0
| 0
| 0.019731
| 0.263745
| 7,985
| 258
| 88
| 30.949612
| 0.791121
| 0.005886
| 0
| 0.104072
| 0
| 0
| 0.114541
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.054299
| false
| 0
| 0.022624
| 0
| 0.126697
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1c97af3344054a3843093ee257c735adccd419f3
| 1,089
|
py
|
Python
|
digitaltape.py
|
heerdyes/tapegame
|
d6e0c6f81fe9c7c85a54edbd037be318ff7ed391
|
[
"Artistic-2.0"
] | null | null | null |
digitaltape.py
|
heerdyes/tapegame
|
d6e0c6f81fe9c7c85a54edbd037be318ff7ed391
|
[
"Artistic-2.0"
] | null | null | null |
digitaltape.py
|
heerdyes/tapegame
|
d6e0c6f81fe9c7c85a54edbd037be318ff7ed391
|
[
"Artistic-2.0"
] | null | null | null |
# tape variables
TS_MAX=1000
# the digital tape model
class DTape:
def __init__(self,size,alphabet,noopidx=0):
if size>TS_MAX:
self.size=TS_MAX
else:
self.size=size
if len(alphabet)==0:
raise Exception('alphabet has zero symbols')
self.alphabet=alphabet
self.data=[self.alphabet[noopidx] for x in range(self.size)]
class DTapeMC:
def __init__(self,dtape,cmdmap,noopsym):
self.tape=dtape
self.thead=0
self.cmdmap=cmdmap
self.noopsym=noopsym
self.jmpctr=1
def process_cell(self):
if self.thead>=len(self.tape.data) or self.thead<0:
print('[TAPEBOUND_EXCEEDED] machine head @[%d] is beyond tape'%self.thead)
return
datum=self.tape.data[self.thead]
print('evaluating: %s'%datum)
if datum==self.noopsym:
print('noop')
else:
eval(cmdmap[datum])
self.thead+=self.jmpctr
class DTapeComputer:
def __init__(self,dtapemc,casetteimg):
self.tapemc=dtapemc
| 26.560976
| 86
| 0.602388
| 138
| 1,089
| 4.630435
| 0.405797
| 0.084507
| 0.051643
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.011613
| 0.288338
| 1,089
| 40
| 87
| 27.225
| 0.812903
| 0.033976
| 0
| 0.0625
| 0
| 0
| 0.092469
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.125
| false
| 0
| 0
| 0
| 0.25
| 0.09375
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1c980836374b3fb5fedf0a12599c8c546395b546
| 422
|
py
|
Python
|
webhook-cdk/lambda/vars.py
|
ncalteen/github-webhook-lambda-example
|
414daf1a70343abf207ff37dc4a9d65d6892197d
|
[
"MIT"
] | null | null | null |
webhook-cdk/lambda/vars.py
|
ncalteen/github-webhook-lambda-example
|
414daf1a70343abf207ff37dc4a9d65d6892197d
|
[
"MIT"
] | null | null | null |
webhook-cdk/lambda/vars.py
|
ncalteen/github-webhook-lambda-example
|
414daf1a70343abf207ff37dc4a9d65d6892197d
|
[
"MIT"
] | 1
|
2022-03-29T14:42:25.000Z
|
2022-03-29T14:42:25.000Z
|
import json
# Output must be returned in the format mentioned below:
# https://docs.aws.amazon.com/apigateway/latest/developerguide/set-up-lambda-proxy-integrations.html#api-gateway-simple-proxy-for-lambda-output-format
lambda_response = {
"isBase64Encoded": False,
"statusCode": 200,
"headers": {
"Content-Type": "application/json",
},
"body": json.dumps({
"Status": "OK"
})
}
| 26.375
| 152
| 0.668246
| 49
| 422
| 5.734694
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.014451
| 0.180095
| 422
| 15
| 153
| 28.133333
| 0.797688
| 0.483412
| 0
| 0
| 0
| 0
| 0.336449
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.090909
| 0
| 0.090909
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1c988d19204c6f421dff8e8f0c696fe6f0e5ec4f
| 3,737
|
py
|
Python
|
gym_unblockme/envs/unblockme_render.py
|
fedingo/gym-unblockme
|
a4dd20a7608122e09862d681259111e2634f3d4b
|
[
"MIT"
] | 3
|
2019-02-12T15:53:17.000Z
|
2019-07-03T12:00:32.000Z
|
gym_unblockme/envs/unblockme_render.py
|
fedingo/gym-unblockme
|
a4dd20a7608122e09862d681259111e2634f3d4b
|
[
"MIT"
] | null | null | null |
gym_unblockme/envs/unblockme_render.py
|
fedingo/gym-unblockme
|
a4dd20a7608122e09862d681259111e2634f3d4b
|
[
"MIT"
] | null | null | null |
import pygame
import time
import numpy as np
import sys
gray = (150, 150, 150)
white = (255, 255, 255)
black = (0, 0, 0, )
red_block = (255, 0, 0)
red_border = (76, 0, 19)
block_color = (255, 128, 0)
border_color = (165,42,42)
screen = None
SIDE = 50
BORDER = 5
MARGIN = 5
LINE = 1
h_switch = True
def __draw_horizontal_block(x,y):
global screen, h_switch
pygame.draw.rect(screen, border_color, pygame.Rect(MARGIN + y*SIDE,MARGIN + x*SIDE, SIDE, SIDE))
pygame.draw.rect(screen, block_color, pygame.Rect(MARGIN + y*SIDE + h_switch*BORDER, MARGIN + x*SIDE + BORDER,
SIDE - BORDER, SIDE - 2*BORDER))
h_switch = not h_switch
def __draw_red_block(x,y):
global screen, h_switch
pygame.draw.rect(screen, red_border, pygame.Rect(MARGIN + y*SIDE,MARGIN + x*SIDE, SIDE, SIDE))
pygame.draw.rect(screen, red_block, pygame.Rect(MARGIN + y*SIDE + h_switch*BORDER, MARGIN + x*SIDE + BORDER,
SIDE - BORDER, SIDE - 2*BORDER))
h_switch = not h_switch
def __draw_vertical_block(x,y):
global screen
pygame.draw.rect(screen, border_color, pygame.Rect(MARGIN + y*SIDE, MARGIN + x*SIDE, SIDE, 2*SIDE))
pygame.draw.rect(screen, block_color, pygame.Rect(MARGIN + y*SIDE + BORDER, MARGIN + x*SIDE + BORDER,
SIDE - 2*BORDER, 2*SIDE - 2*BORDER))
## Render function for the unblockme_class
def render_unblockme(game_object):
matrix = game_object.internal_state
k, h, _ = game_object.shape
global screen
if screen is None:
pygame.init()
screen = pygame.display.set_mode((2*MARGIN+k*SIDE, 2*MARGIN+h*SIDE))
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.display.quit()
pygame.quit()
sys.exit(0)
screen.fill(black)
# first we draw the background
for x in range(0,k):
for y in range(0,h):
cell = matrix[x,y,:]
selected_block = np.where(cell == 1)[0]
if len(selected_block) != 0:
#draw the exit on the outer border
if selected_block[0] == 0:
if y == 0:
pygame.draw.rect(screen, white, pygame.Rect(y*SIDE,x*SIDE+MARGIN, SIDE+MARGIN, SIDE))
else:
pygame.draw.rect(screen, white, pygame.Rect(y*SIDE+MARGIN,x*SIDE+MARGIN, SIDE+MARGIN, SIDE))
# Draw the background with the grid pattern
pygame.draw.rect(screen, gray , pygame.Rect(MARGIN + y*SIDE,MARGIN + x*SIDE, SIDE, SIDE))
pygame.draw.rect(screen, white, pygame.Rect(MARGIN + y*SIDE + LINE,MARGIN + x*SIDE + LINE,
SIDE - 2*LINE, SIDE - 2*LINE))
# then we draw the blocks in the grid
for x in range(0,k):
for y in range(0,h):
cell = matrix[x,y,1:]
selected_block = np.where(cell == 1)[0]
if len(selected_block) != 0:
if selected_block[-1] == 1:
__draw_horizontal_block(x,y)
elif selected_block[-1] == 2:
if (x == 0 or not (matrix[x-1,y,1:] == cell).all() ) and \
(x != k-1 and (matrix[x+1,y,1:] == cell).all() ):
__draw_vertical_block(x,y)
elif selected_block[-1] == 0:
__draw_red_block(x,y)
pygame.display.update()
time.sleep(0.1)
if __name__ == "__main__":
from unblockme_class import *
matrix, goal = get_example()
game = unblock_me(matrix, goal)
render_unblockme(game)
| 36.637255
| 116
| 0.557399
| 515
| 3,737
| 3.906796
| 0.190291
| 0.049702
| 0.069583
| 0.099404
| 0.568091
| 0.511431
| 0.473161
| 0.423956
| 0.423956
| 0.384195
| 0
| 0.035108
| 0.321648
| 3,737
| 102
| 117
| 36.637255
| 0.75858
| 0.047899
| 0
| 0.2
| 0
| 0
| 0.002252
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.05
| false
| 0
| 0.0625
| 0
| 0.1125
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1c990cbd7a7616bc0cdb891dffbb562850c5ab57
| 21,364
|
py
|
Python
|
phy/cluster/tests/test_supervisor.py
|
mikailweston/phy
|
d774cb989152a4b7344ac9b70c79c204a5036763
|
[
"BSD-3-Clause"
] | null | null | null |
phy/cluster/tests/test_supervisor.py
|
mikailweston/phy
|
d774cb989152a4b7344ac9b70c79c204a5036763
|
[
"BSD-3-Clause"
] | null | null | null |
phy/cluster/tests/test_supervisor.py
|
mikailweston/phy
|
d774cb989152a4b7344ac9b70c79c204a5036763
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
"""Test GUI component."""
#------------------------------------------------------------------------------
# Imports
#------------------------------------------------------------------------------
#from contextlib import contextmanager
from pytest import yield_fixture, fixture, raises
import numpy as np
from numpy.testing import assert_array_equal as ae
from .. import supervisor as _supervisor
from ..supervisor import (Supervisor,
TaskLogger,
ClusterView,
SimilarityView,
ActionCreator,
)
from phy.gui import GUI
from phy.gui.widgets import Barrier
from phy.gui.qt import qInstallMessageHandler
from phy.gui.tests.test_widgets import _assert, _wait_until_table_ready
from phy.utils.context import Context
from phylib.utils import connect, Bunch, emit
def handler(msg_type, msg_log_context, msg_string):
pass
qInstallMessageHandler(handler)
#------------------------------------------------------------------------------
# Fixtures
#------------------------------------------------------------------------------
@yield_fixture
def gui(tempdir, qtbot):
# NOTE: mock patch show box exec_
_supervisor._show_box = lambda _: _
gui = GUI(position=(200, 100), size=(500, 500), config_dir=tempdir)
gui.set_default_actions()
gui.show()
qtbot.waitForWindowShown(gui)
yield gui
qtbot.wait(5)
gui.close()
del gui
qtbot.wait(5)
@fixture
def supervisor(qtbot, gui, cluster_ids, cluster_groups, cluster_labels,
similarity, tempdir):
spike_clusters = np.repeat(cluster_ids, 2)
s = Supervisor(
spike_clusters,
cluster_groups=cluster_groups,
cluster_labels=cluster_labels,
similarity=similarity,
context=Context(tempdir),
sort=('id', 'desc'),
)
s.attach(gui)
b = Barrier()
connect(b('cluster_view'), event='ready', sender=s.cluster_view)
connect(b('similarity_view'), event='ready', sender=s.similarity_view)
b.wait()
return s
#------------------------------------------------------------------------------
# Test tasks
#------------------------------------------------------------------------------
@fixture
def tl():
class MockClusterView(object):
_selected = [0]
def select(self, cl, callback=None, **kwargs):
self._selected = cl
callback({'selected': cl, 'next': cl[-1] + 1})
def next(self, callback=None):
callback({'selected': [self._selected[-1] + 1], 'next': self._selected[-1] + 2})
def previous(self, callback=None): # pragma: no cover
callback({'selected': [self._selected[-1] - 1], 'next': self._selected[-1]})
class MockSimilarityView(MockClusterView):
pass
class MockSupervisor(object):
def merge(self, cluster_ids, to, callback=None):
callback(Bunch(deleted=cluster_ids, added=[to]))
def split(self, old_cluster_ids, new_cluster_ids, callback=None):
callback(Bunch(deleted=old_cluster_ids, added=new_cluster_ids))
def move(self, which, group, callback=None):
callback(Bunch(metadata_changed=which, metadata_value=group))
def undo(self, callback=None):
callback(Bunch())
def redo(self, callback=None):
callback(Bunch())
out = TaskLogger(MockClusterView(), MockSimilarityView(), MockSupervisor())
return out
def test_task_1(tl):
assert tl.last_state(None) is None
def test_task_2(tl):
tl.enqueue(tl.cluster_view, 'select', [0])
tl.process()
assert tl.last_state() == ([0], 1, None, None)
def test_task_3(tl):
tl.enqueue(tl.cluster_view, 'select', [0])
tl.enqueue(tl.similarity_view, 'select', [100])
tl.process()
assert tl.last_state() == ([0], 1, [100], 101)
def test_task_merge(tl):
tl.enqueue(tl.cluster_view, 'select', [0])
tl.enqueue(tl.similarity_view, 'select', [100])
tl.enqueue(tl.supervisor, 'merge', [0, 100], 1000)
tl.process()
assert tl.last_state() == ([1000], 1001, None, None)
tl.enqueue(tl.supervisor, 'undo')
tl.process()
assert tl.last_state() == ([0], 1, [100], 101)
tl.enqueue(tl.supervisor, 'redo')
tl.process()
assert tl.last_state() == ([1000], 1001, None, None)
def test_task_split(tl):
tl.enqueue(tl.cluster_view, 'select', [0])
tl.enqueue(tl.similarity_view, 'select', [100])
tl.enqueue(tl.supervisor, 'split', [0, 100], [1000, 1001])
tl.process()
assert tl.last_state() == ([1000, 1001], 1002, None, None)
def test_task_move_1(tl):
tl.enqueue(tl.cluster_view, 'select', [0])
tl.enqueue(tl.supervisor, 'move', [0], 'good')
tl.process()
assert tl.last_state() == ([1], 2, None, None)
def test_task_move_best(tl):
tl.enqueue(tl.cluster_view, 'select', [0])
tl.enqueue(tl.similarity_view, 'select', [100])
tl.enqueue(tl.supervisor, 'move', 'best', 'good')
tl.process()
assert tl.last_state() == ([1], 2, None, None)
def test_task_move_similar(tl):
tl.enqueue(tl.cluster_view, 'select', [0])
tl.enqueue(tl.similarity_view, 'select', [100])
tl.enqueue(tl.supervisor, 'move', 'similar', 'good')
tl.process()
assert tl.last_state() == ([0], 1, [101], 102)
def test_task_move_all(tl):
tl.enqueue(tl.cluster_view, 'select', [0])
tl.enqueue(tl.similarity_view, 'select', [100])
tl.enqueue(tl.supervisor, 'move', 'all', 'good')
tl.process()
assert tl.last_state() == ([1], 2, [101], 102)
#------------------------------------------------------------------------------
# Test cluster and similarity views
#------------------------------------------------------------------------------
@fixture
def data():
_data = [{"id": i,
"n_spikes": 100 - 10 * i,
"group": {2: 'noise', 3: 'noise', 5: 'mua', 8: 'good'}.get(i, None),
"is_masked": i in (2, 3, 5),
} for i in range(10)]
return _data
def test_cluster_view_1(qtbot, gui, data):
cv = ClusterView(gui, data=data)
_wait_until_table_ready(qtbot, cv)
cv.sort_by('n_spikes', 'asc')
cv.select([1])
qtbot.wait(10)
assert cv.state == {'current_sort': ('n_spikes', 'asc'), 'selected': [1]}
cv.set_state({'current_sort': ('id', 'desc'), 'selected': [2]})
assert cv.state == {'current_sort': ('id', 'desc'), 'selected': [2]}
def test_similarity_view_1(qtbot, gui, data):
sv = SimilarityView(gui, data=data)
_wait_until_table_ready(qtbot, sv)
@connect(sender=sv)
def on_request_similar_clusters(sender, cluster_id):
return [{'id': id} for id in (100 + cluster_id, 110 + cluster_id, 102 + cluster_id)]
sv.reset([5])
_assert(sv.get_ids, [105, 115, 107])
def test_cluster_view_extra_columns(qtbot, gui, data):
for cl in data:
cl['my_metrics'] = cl['id'] * 1000
cv = ClusterView(gui, data=data, columns=['id', 'n_spikes', 'my_metrics'])
_wait_until_table_ready(qtbot, cv)
#------------------------------------------------------------------------------
# Test ActionCreator
#------------------------------------------------------------------------------
def test_action_creator_1(qtbot, gui):
ac = ActionCreator()
ac.attach(gui)
gui.show()
#------------------------------------------------------------------------------
# Test GUI component
#------------------------------------------------------------------------------
def _select(supervisor, cluster_ids, similar=None):
supervisor.task_logger.enqueue(supervisor.cluster_view, 'select', cluster_ids)
if similar is not None:
supervisor.task_logger.enqueue(supervisor.similarity_view, 'select', similar)
supervisor.task_logger.process()
supervisor.block()
supervisor.task_logger.show_history()
assert supervisor.task_logger.last_state()[0] == cluster_ids
assert supervisor.task_logger.last_state()[2] == similar
def _assert_selected(supervisor, sel):
assert supervisor.selected == sel
def test_select(qtbot, supervisor):
_select(supervisor, [30], [20])
_assert_selected(supervisor, [30, 20])
def test_supervisor_busy(qtbot, supervisor):
_select(supervisor, [30], [20])
o = object()
emit('is_busy', o, True)
assert supervisor._is_busy
# The action fails while the supervisor is busy.
with raises(RuntimeError):
emit('action', supervisor.action_creator, 'merge')
emit('is_busy', o, False)
assert not supervisor._is_busy
# The action succeeds because the supervisor is no longer busy.
emit('action', supervisor.action_creator, 'merge')
supervisor.block()
assert not supervisor._is_busy
def test_supervisor_cluster_metrics(
qtbot, gui, cluster_ids, cluster_groups, similarity, tempdir):
spike_clusters = np.repeat(cluster_ids, 2)
def my_metrics(cluster_id):
return cluster_id ** 2
cluster_metrics = {'my_metrics': my_metrics}
mc = Supervisor(spike_clusters,
cluster_groups=cluster_groups,
cluster_metrics=cluster_metrics,
similarity=similarity,
context=Context(tempdir),
)
mc.attach(gui)
b = Barrier()
connect(b('cluster_view'), event='ready', sender=mc.cluster_view)
connect(b('similarity_view'), event='ready', sender=mc.similarity_view)
b.wait()
assert 'my_metrics' in mc.columns
def test_supervisor_select_1(qtbot, supervisor):
# WARNING: always use actions in tests, because this doesn't call
# the supervisor method directly, but raises an event, enqueue the task,
# and call TaskLogger.process() which handles the cascade of callbacks.
supervisor.select_actions.select([0])
supervisor.block()
_assert_selected(supervisor, [0])
supervisor.task_logger.show_history()
def test_supervisor_color(qtbot, supervisor):
supervisor.view_actions.colormap_linear()
supervisor.view_actions.color_field_n_spikes()
supervisor.view_actions.toggle_categorical_colormap(False)
supervisor.view_actions.toggle_logarithmic_colormap(True)
def test_supervisor_select_2(qtbot, supervisor):
supervisor.select_actions.next_best()
supervisor.block()
_assert_selected(supervisor, [30])
def test_supervisor_select_order(qtbot, supervisor):
_select(supervisor, [1, 0])
_assert_selected(supervisor, [1, 0])
_select(supervisor, [0, 1])
_assert_selected(supervisor, [0, 1])
def test_supervisor_edge_cases(supervisor):
# Empty selection at first.
ae(supervisor.clustering.cluster_ids, [0, 1, 2, 10, 11, 20, 30])
_select(supervisor, [0])
supervisor.undo()
supervisor.block()
supervisor.redo()
supervisor.block()
# Merge.
supervisor.merge()
supervisor.block()
_assert_selected(supervisor, [0])
supervisor.merge([])
supervisor.block()
_assert_selected(supervisor, [0])
supervisor.merge([10])
supervisor.block()
_assert_selected(supervisor, [0])
# Split.
supervisor.split([])
supervisor.block()
_assert_selected(supervisor, [0])
# Move.
supervisor.move('ignored', [])
supervisor.block()
supervisor.save()
def test_supervisor_save(qtbot, gui, supervisor):
emit('request_save', gui)
def test_supervisor_skip(qtbot, gui, supervisor):
# yield [0, 1, 2, 10, 11, 20, 30]
# # i, g, N, i, g, N, N
expected = [30, 20, 11, 2, 1]
for clu in expected:
supervisor.select_actions.next_best()
supervisor.block()
_assert_selected(supervisor, [clu])
def test_supervisor_sort(qtbot, supervisor):
supervisor.sort('id', 'desc')
qtbot.wait(50)
assert supervisor.state.cluster_view.current_sort == ('id', 'desc')
supervisor.select_actions.sort_by_n_spikes()
qtbot.wait(50)
assert supervisor.state.cluster_view.current_sort == ('n_spikes', 'desc')
def test_supervisor_filter(qtbot, supervisor):
supervisor.filter('5 <= id && id <= 20')
qtbot.wait(50)
_cl = []
supervisor.cluster_view.get_ids(lambda cluster_ids: _cl.extend(cluster_ids))
qtbot.wait(50)
assert _cl == [20, 11, 10]
def test_supervisor_merge_1(qtbot, supervisor):
_select(supervisor, [30], [20])
_assert_selected(supervisor, [30, 20])
supervisor.actions.merge()
supervisor.block()
_assert_selected(supervisor, [31])
supervisor.actions.undo()
supervisor.block()
_assert_selected(supervisor, [30, 20])
supervisor.actions.redo()
supervisor.block()
supervisor.task_logger.show_history()
_assert_selected(supervisor, [31])
assert supervisor.is_dirty()
def test_supervisor_merge_event(qtbot, supervisor):
_select(supervisor, [30], [20])
_l = []
@connect(sender=supervisor)
def on_select(sender, cluster_ids):
_l.append(cluster_ids)
supervisor.actions.merge()
supervisor.block()
# After a merge, there should be only one select event.
assert len(_l) == 1
def test_supervisor_merge_move(qtbot, supervisor):
"""Check that merge then move selects the next cluster in the original
cluster view, not the updated cluster view."""
_select(supervisor, [20, 11], [])
_assert_selected(supervisor, [20, 11])
supervisor.actions.merge()
supervisor.block()
_assert_selected(supervisor, [31])
supervisor.actions.move('good', 'all')
supervisor.block()
_assert_selected(supervisor, [30])
supervisor.actions.move('good', 'all')
supervisor.block()
_assert_selected(supervisor, [2])
def test_supervisor_split_0(qtbot, supervisor):
_select(supervisor, [1, 2])
_assert_selected(supervisor, [1, 2])
supervisor.actions.split([1, 2])
supervisor.block()
_assert_selected(supervisor, [31, 32, 33])
supervisor.actions.undo()
supervisor.block()
_assert_selected(supervisor, [1, 2])
supervisor.actions.redo()
supervisor.block()
_assert_selected(supervisor, [31, 32, 33])
def test_supervisor_split_1(supervisor):
supervisor.select_actions.select([1, 2])
supervisor.block()
@connect(sender=supervisor)
def on_request_split(sender):
return [1, 2]
supervisor.actions.split()
supervisor.block()
_assert_selected(supervisor, [31, 32, 33])
def test_supervisor_split_2(gui, similarity):
spike_clusters = np.array([0, 0, 1])
supervisor = Supervisor(spike_clusters,
similarity=similarity,
)
supervisor.attach(gui)
b = Barrier()
connect(b('cluster_view'), event='ready', sender=supervisor.cluster_view)
connect(b('similarity_view'), event='ready', sender=supervisor.similarity_view)
b.wait()
supervisor.actions.split([0])
supervisor.block()
_assert_selected(supervisor, [2, 3])
def test_supervisor_state(tempdir, qtbot, gui, supervisor):
supervisor.select(1)
cv = supervisor.cluster_view
assert supervisor.state.cluster_view.current_sort == ('id', 'desc')
assert supervisor.state.cluster_view.selected == [1]
cv.sort_by('id')
assert supervisor.state.cluster_view.current_sort == ('id', 'asc')
cv.set_state({'current_sort': ('n_spikes', 'desc')})
assert supervisor.state.cluster_view.current_sort == ('n_spikes', 'desc')
cv.sort_by('id', 'desc')
assert supervisor.all_cluster_ids == [30, 20, 11, 10, 2, 1, 0]
def test_supervisor_label(supervisor):
_select(supervisor, [20])
supervisor.label("my_field", 3.14)
supervisor.block()
supervisor.label("my_field", 1.23, cluster_ids=30)
supervisor.block()
assert 'my_field' in supervisor.fields
assert supervisor.get_labels('my_field')[20] == 3.14
assert supervisor.get_labels('my_field')[30] == 1.23
def test_supervisor_label_cluster_1(supervisor):
_select(supervisor, [20, 30])
supervisor.label("my_field", 3.14)
supervisor.block()
# Same value for the old clusters.
l = supervisor.get_labels('my_field')
assert l[20] == l[30] == 3.14
up = supervisor.merge()
supervisor.block()
assert supervisor.get_labels('my_field')[up.added[0]] == 3.14
def test_supervisor_label_cluster_2(supervisor):
_select(supervisor, [20])
supervisor.label("my_field", 3.14)
supervisor.block()
# One of the parents.
l = supervisor.get_labels('my_field')
assert l[20] == 3.14
assert l[30] is None
up = supervisor.merge([20, 30])
supervisor.block()
assert supervisor.get_labels('my_field')[up.added[0]] == 3.14
def test_supervisor_label_cluster_3(supervisor):
# Conflict: largest cluster wins.
_select(supervisor, [20, 30])
supervisor.label("my_field", 3.14)
supervisor.block()
# Create merged cluster from 20 and 30.
up = supervisor.merge()
new = up.added[0]
supervisor.block()
# It fot the label of its parents.
assert supervisor.get_labels('my_field')[new] == 3.14
# Now, we label a smaller cluster.
supervisor.label("my_field", 2.718, cluster_ids=[10])
# We merge the large and small cluster together.
up = supervisor.merge(up.added + [10])
supervisor.block()
# The new cluster should have the value of the first, merged big cluster, i.e. 3.14.
assert supervisor.get_labels('my_field')[up.added[0]] == 3.14
def test_supervisor_move_1(supervisor):
_select(supervisor, [20])
_assert_selected(supervisor, [20])
assert not supervisor.move('', '')
supervisor.actions.move('noise', 'all')
supervisor.block()
_assert_selected(supervisor, [11])
supervisor.actions.undo()
supervisor.block()
_assert_selected(supervisor, [20])
supervisor.actions.redo()
supervisor.block()
_assert_selected(supervisor, [11])
def test_supervisor_move_2(supervisor):
_select(supervisor, [20], [10])
_assert_selected(supervisor, [20, 10])
supervisor.actions.move('noise', 10)
supervisor.block()
_assert_selected(supervisor, [20, 2])
supervisor.actions.undo()
supervisor.block()
_assert_selected(supervisor, [20, 10])
supervisor.actions.redo()
supervisor.block()
_assert_selected(supervisor, [20, 2])
def test_supervisor_move_3(qtbot, supervisor):
supervisor.select_actions.next()
supervisor.block()
_assert_selected(supervisor, [30])
supervisor.actions.move_best_to_noise()
supervisor.block()
_assert_selected(supervisor, [20])
supervisor.actions.move_best_to_mua()
supervisor.block()
_assert_selected(supervisor, [11])
supervisor.actions.move_best_to_good()
supervisor.block()
_assert_selected(supervisor, [2])
supervisor.cluster_meta.get('group', 30) == 'noise'
supervisor.cluster_meta.get('group', 20) == 'mua'
supervisor.cluster_meta.get('group', 11) == 'good'
def test_supervisor_move_4(supervisor):
_select(supervisor, [30], [20])
_assert_selected(supervisor, [30, 20])
supervisor.actions.move_similar_to_noise()
supervisor.block()
_assert_selected(supervisor, [30, 11])
supervisor.actions.move_similar_to_mua()
supervisor.block()
_assert_selected(supervisor, [30, 2])
supervisor.actions.move_similar_to_good()
supervisor.block()
_assert_selected(supervisor, [30, 1])
supervisor.cluster_meta.get('group', 20) == 'noise'
supervisor.cluster_meta.get('group', 11) == 'mua'
supervisor.cluster_meta.get('group', 2) == 'good'
def test_supervisor_move_5(supervisor):
_select(supervisor, [30], [20])
_assert_selected(supervisor, [30, 20])
supervisor.actions.move_all_to_noise()
supervisor.block()
_assert_selected(supervisor, [11, 2])
supervisor.select_actions.next()
supervisor.block()
_assert_selected(supervisor, [11, 1])
supervisor.actions.move_all_to_mua()
supervisor.block()
_assert_selected(supervisor, [2])
supervisor.actions.move_all_to_good()
supervisor.block()
_assert_selected(supervisor, [])
supervisor.cluster_meta.get('group', 30) == 'noise'
supervisor.cluster_meta.get('group', 20) == 'noise'
supervisor.cluster_meta.get('group', 11) == 'mua'
supervisor.cluster_meta.get('group', 10) == 'mua'
supervisor.cluster_meta.get('group', 2) == 'good'
supervisor.cluster_meta.get('group', 1) == 'good'
def test_supervisor_reset(qtbot, supervisor):
supervisor.select_actions.select([10, 11])
supervisor.select_actions.reset_wizard()
supervisor.block()
_assert_selected(supervisor, [30])
supervisor.select_actions.next()
supervisor.block()
_assert_selected(supervisor, [30, 20])
supervisor.select_actions.next()
supervisor.block()
_assert_selected(supervisor, [30, 11])
supervisor.select_actions.previous()
supervisor.block()
_assert_selected(supervisor, [30, 20])
def test_supervisor_nav(qtbot, supervisor):
supervisor.select_actions.reset_wizard()
supervisor.block()
_assert_selected(supervisor, [30])
supervisor.select_actions.next_best()
supervisor.block()
_assert_selected(supervisor, [20])
supervisor.select_actions.previous_best()
supervisor.block()
_assert_selected(supervisor, [30])
| 27.460154
| 92
| 0.637521
| 2,555
| 21,364
| 5.115851
| 0.113503
| 0.06656
| 0.097315
| 0.090965
| 0.583735
| 0.499273
| 0.44396
| 0.393772
| 0.310535
| 0.260806
| 0
| 0.032909
| 0.187839
| 21,364
| 777
| 93
| 27.495496
| 0.72042
| 0.096892
| 0
| 0.464213
| 0
| 0
| 0.047305
| 0
| 0
| 0
| 0
| 0
| 0.198364
| 1
| 0.126789
| false
| 0.00409
| 0.022495
| 0.006135
| 0.169734
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1c9997692782ea5187e69a11b0059d2cc2e4c11c
| 2,733
|
py
|
Python
|
Source/CommandManager.py
|
SOBotics/Botpy
|
8e3eb48fcc2a46fd60f4d49832941fa1b71bc223
|
[
"WTFPL"
] | 5
|
2017-09-19T10:19:33.000Z
|
2020-10-11T13:29:43.000Z
|
Source/CommandManager.py
|
SOBotics/Botpy
|
8e3eb48fcc2a46fd60f4d49832941fa1b71bc223
|
[
"WTFPL"
] | 33
|
2018-05-14T09:05:06.000Z
|
2020-04-20T08:48:59.000Z
|
Source/CommandManager.py
|
SOBotics/Botpy
|
8e3eb48fcc2a46fd60f4d49832941fa1b71bc223
|
[
"WTFPL"
] | 1
|
2017-09-27T10:40:34.000Z
|
2017-09-27T10:40:34.000Z
|
#
# CommandManager.py
# Botpy
#
# Created by Ashish Ahuja on 4th September 2017.
#
#
import threading
import chatexchange as ce
class CommandManager:
def __init__(self, commands):
self.commands = commands
self.running_commands = []
def run_command(self, command):
if command.privileges() == 0:
command_thread = threading.Thread(target=command.run)
self.running_commands.append([command, command_thread])
command_thread.start()
return
if command.message.room.is_user_privileged(command.message.user.id, command.privileges()):
command_thread = threading.Thread(target=command.run)
self.running_commands.append([command, command_thread])
command_thread.start()
return
command.reply("You do not have sufficient privileges to run this command.")
def handle_command(self, message):
try:
message_content = message.content.split()
del message_content[0]
except AttributeError:
return
for command in self.commands:
command_usage = command.usage()
usage_index = -1
for usage in command_usage:
usage_index += 1
usage_components = usage.split()
args = []
match = True
last_index = min(len(usage_components), len(message_content))
for i in range(last_index):
content_component = message_content[i]
usage_component = usage_components[i]
if usage_component == '*':
args.append(content_component)
elif usage_component == '...':
#Everything else is arguments
temp_index = i
while temp_index < len(message_content):
args.append(message_content[temp_index])
temp_index += 1
elif content_component != usage_component:
match = False
min_count = len(usage_components) - 1 \
if usage_components[-1] == '...' else len(usage_components)
if len(message_content) < min_count:
match = False
if match:
self.run_command(command(self, message, args, usage_index))
return
def cleanup_finished_commands(self):
for command, command_thread in self.running_commands:
if not command_thread.isAlive():
self.running_commands.remove([command, command_thread])
| 35.038462
| 98
| 0.555799
| 267
| 2,733
| 5.47191
| 0.299625
| 0.080082
| 0.065024
| 0.03833
| 0.186174
| 0.154689
| 0.154689
| 0.154689
| 0.154689
| 0.154689
| 0
| 0.006932
| 0.36663
| 2,733
| 77
| 99
| 35.493506
| 0.837088
| 0.035858
| 0
| 0.214286
| 0
| 0
| 0.024752
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.071429
| false
| 0
| 0.035714
| 0
| 0.196429
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1c9adb0e11b484554e5e9a324f68f256e624b588
| 13,217
|
py
|
Python
|
iotronic/wamp/agent.py
|
smartmeio/stack4things-openstack-iotronic
|
3e5782eb1fb33b7c3c8c9362e24d30241153c371
|
[
"Apache-2.0"
] | 1
|
2021-11-04T09:43:49.000Z
|
2021-11-04T09:43:49.000Z
|
iotronic/wamp/agent.py
|
smartmeio/stack4things-openstack-iotronic
|
3e5782eb1fb33b7c3c8c9362e24d30241153c371
|
[
"Apache-2.0"
] | null | null | null |
iotronic/wamp/agent.py
|
smartmeio/stack4things-openstack-iotronic
|
3e5782eb1fb33b7c3c8c9362e24d30241153c371
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2017 MDSLAB - University of Messina
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import asyncio
import json
import subprocess
import time
import txaio
from iotronic.common import exception
from iotronic.common.i18n import _
from iotronic.common.i18n import _LI
from iotronic.common.i18n import _LW
from iotronic.db import api as dbapi
from oslo_config import cfg
from oslo_log import log as logging
import oslo_messaging
from oslo_messaging.rpc import dispatcher
import importlib
from threading import Thread
import ssl
import os
import signal
from autobahn.asyncio.component import Component
LOG = logging.getLogger(__name__)
service_opts = [
cfg.StrOpt('notification_level',
choices=[('debug', _('"debug" level')),
('info', _('"info" level')),
('warning', _('"warning" level')),
('error', _('"error" level')),
('critical', _('"critical" level'))],
help=_('Specifies the minimum level for which to send '
'notifications. If not set, no notifications will '
'be sent. The default is for this option to be unset.')),
]
wamp_opts = [
cfg.StrOpt('wamp_transport_url',
default='ws://localhost:8181/',
help=('URL of wamp broker')),
cfg.StrOpt('wamp_realm',
default='s4t',
help=('realm broker')),
cfg.BoolOpt('register_agent',
default=False,
help=('Flag for marking this agent as a registration agent')),
cfg.BoolOpt('skip_cert_verify',
default=False,
help=(
'Flag for skipping the verification of the server cert '
'(for the auto-signed ones)')),
cfg.IntOpt('autoPingInterval',
default=2,
help=('autoPingInterval parameter for wamp')),
cfg.IntOpt('autoPingTimeout',
default=2,
help=('autoPingInterval parameter for wamp')),
cfg.BoolOpt('service_allow_list',
default=False,
help='Enable service allow list checks.'),
cfg.StrOpt('service_allow_list_path',
default="(/var/lib/wstun/allowlist)",
help='Path of allowlist.json file.'),
]
proxy_opts = [
cfg.StrOpt('proxy',
choices=[('nginx', _('nginx proxy')), ],
help=_('Proxy for webservices')),
]
CONF = cfg.CONF
cfg.CONF.register_opts(service_opts)
cfg.CONF.register_opts(proxy_opts)
CONF.register_opts(wamp_opts, 'wamp')
txaio.start_logging(level="info")
wamp_session_caller = None
AGENT_HOST = None
LOOP = None
connected = False
async def wamp_request(kwarg):
# for previous LR version (to be removed asap)
if 'req' in kwarg:
LOG.debug("calling: " + kwarg['wamp_rpc_call'] +
" with request id: " + kwarg['req']['uuid'])
d = await wamp_session_caller.call(kwarg['wamp_rpc_call'],
kwarg['req'],
*kwarg['data'])
else:
LOG.debug("calling: " + kwarg['wamp_rpc_call'])
d = await wamp_session_caller.call(kwarg['wamp_rpc_call'],
*kwarg['data'])
return d
# OSLO ENDPOINT
class WampEndpoint(object):
def s4t_invoke_wamp(self, ctx, **kwarg):
LOG.debug("CONDUCTOR sent me: " + kwarg['wamp_rpc_call'])
r = asyncio.run_coroutine_threadsafe(wamp_request(kwarg), LOOP)
return r.result()
def read_allowlist():
try:
with open(CONF.wamp.service_allow_list_path, "r") as allow_file:
allow_list_str = allow_file.read()
allow_list = json.loads(allow_list_str)
#LOG.debug(allow_list)
return allow_list
except Exception as err:
LOG.error(err)
class AgentEndpoint(object):
# used for testing
def echo(self, ctx, text):
LOG.debug("ECHO of " + text)
return text
def create_tap_interface(self, ctx, port_uuid, tcp_port):
time.sleep(12)
LOG.debug('Creating tap interface on the wamp agent host')
p = subprocess.Popen('socat -d -d TCP:localhost:' + tcp_port +
',reuseaddr,forever,interval=10 TUN,tun-type=tap,'
'tun-name=tap' + port_uuid[0:14] +
',up ', shell=True, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
return 1
def addin_allowlist(self, ctx, device, port):
try:
allow_list = read_allowlist()
new_node={}
new_node['client']=device
new_node['port']=str(port)
if new_node in allow_list:
LOG.warning("This device already exposes this port!")
else:
allow_list.append(new_node)
with open(CONF.wamp.service_allow_list_path, "r+") as allow_file:
allow_file.seek(0)
allow_file.write("%s" % json.dumps(allow_list))
allow_file.truncate()
read_allowlist()
LOG.debug("Added device/service port in allow list.")
except Exception as err:
print(err)
def remove_from_allowlist(self, ctx, device, port):
try:
allow_list = read_allowlist()
new_node={}
new_node['client']=device
new_node['port']=str(port)
if new_node in allow_list:
allow_list.remove(new_node)
with open(CONF.wamp.service_allow_list_path, "r+") as allow_file:
allow_file.seek(0)
allow_file.write("%s" % json.dumps(allow_list))
allow_file.truncate()
LOG.debug("Removed device/service port from allow list.")
except Exception as err:
print(err)
class RPCServer(Thread):
def __init__(self):
# AMQP CONFIG
proxy = importlib.import_module("iotronic.wamp.proxies." + CONF.proxy)
endpoints = [
WampEndpoint(),
AgentEndpoint(),
proxy.ProxyManager()
]
Thread.__init__(self)
transport = oslo_messaging.get_transport(CONF)
target = oslo_messaging.Target(topic='s4t',
server=AGENT_HOST)
access_policy = dispatcher.DefaultRPCAccessPolicy
self.server = oslo_messaging.get_rpc_server(
transport, target,
endpoints, executor='threading',
access_policy=access_policy)
def run(self):
LOG.info("Starting AMQP server... ")
self.server.start()
def stop(self):
LOG.info("Stopping AMQP server... ")
self.server.stop()
LOG.info("AMQP server stopped. ")
class WampManager(object):
def __init__(self):
LOG.debug("wamp url: %s wamp realm: %s",
CONF.wamp.wamp_transport_url, CONF.wamp.wamp_realm)
self.loop = asyncio.get_event_loop()
global LOOP
LOOP = self.loop
wamp_transport = CONF.wamp.wamp_transport_url
wurl_list = wamp_transport.split(':')
is_wss = False
if wurl_list[0] == "wss":
is_wss = True
whost = wurl_list[1].replace('/', '')
wport = int(wurl_list[2].replace('/', ''))
if is_wss and CONF.wamp.skip_cert_verify:
ctx = ssl.create_default_context()
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
wamp_transport = [
{
"url": CONF.wamp.wamp_transport_url,
"serializers": ["json"],
"endpoint": {
"type": "tcp",
"host": whost,
"port": wport,
"tls": ctx
},
},
]
comp = Component(
transports=wamp_transport,
realm=CONF.wamp.wamp_realm
)
self.comp = comp
@comp.on_join
async def onJoin(session, details):
global connected
connected = True
global wamp_session_caller, AGENT_HOST
wamp_session_caller = session
import iotronic.wamp.functions as fun
session.subscribe(fun.board_on_leave,
'wamp.session.on_leave')
session.subscribe(fun.board_on_join,
'wamp.session.on_join')
try:
if CONF.wamp.register_agent:
session.register(fun.registration,
u'stack4things.register')
LOG.info("I have been set as registration agent")
session.register(fun.connection,
AGENT_HOST + u'.stack4things.connection')
session.register(fun.echo,
AGENT_HOST + u'.stack4things.echo')
session.register(fun.alive,
AGENT_HOST + u'.stack4things.alive')
session.register(fun.wamp_alive,
AGENT_HOST + u'.stack4things.wamp_alive')
session.register(fun.notify_result,
AGENT_HOST + u'.stack4things.notify_result')
LOG.debug("procedure registered")
except Exception as e:
LOG.error("could not register procedure: {0}".format(e))
LOG.info("WAMP session ready.")
session_l = await session.call(u'wamp.session.list')
session_l.remove(details.session)
fun.update_sessions(session_l, AGENT_HOST)
@comp.on_leave
async def onLeave(session, details):
LOG.warning('WAMP Session Left: ' + str(details))
@comp.on_disconnect
async def onDisconnect(session, was_clean):
LOG.warning('WAMP Transport Left: ' + str(was_clean))
global connected
connected = False
if not connected:
comp.start(self.loop)
def start(self):
LOG.info("Starting WAMP server...")
self.comp.start(self.loop)
self.loop.run_forever()
def stop(self):
LOG.info("Stopping WAMP server...")
# Canceling pending tasks and stopping the loop
asyncio.gather(*asyncio.Task.all_tasks()).cancel()
# Stopping the loop
self.loop.stop()
LOG.info("WAMP server stopped.")
class WampAgent(object):
def __init__(self, host):
signal.signal(signal.SIGINT, self.stop_handler)
logging.register_options(CONF)
CONF(project='iotronic')
logging.setup(CONF, "iotronic-wamp-agent")
if CONF.debug:
txaio.start_logging(level="debug")
# to be removed asap
self.host = host
self.dbapi = dbapi.get_instance()
try:
wpa = self.dbapi.register_wampagent(
{'hostname': self.host, 'wsurl': CONF.wamp.wamp_transport_url})
except exception.WampAgentAlreadyRegistered:
LOG.warn(_LW("A wampagent with hostname %(hostname)s "
"was previously registered. Updating registration"),
{'hostname': self.host})
wpa = self.dbapi.register_wampagent(
{'hostname': self.host, 'wsurl': CONF.wamp.wamp_transport_url},
update_existing=True)
self.wampagent = wpa
self.wampagent.ragent = CONF.wamp.register_agent
self.wampagent.save()
global AGENT_HOST
AGENT_HOST = self.host
self.r = RPCServer()
self.w = WampManager()
self.r.start()
self.w.start()
def del_host(self, deregister=True):
if deregister:
try:
self.dbapi.unregister_wampagent(self.host)
LOG.info(_LI('Successfully stopped wampagent with hostname '
'%(hostname)s.'),
{'hostname': self.host})
except exception.WampAgentNotFound:
pass
else:
LOG.info(_LI('Not deregistering wampagent with hostname '
'%(hostname)s.'),
{'hostname': self.host})
def stop_handler(self, signum, frame):
self.w.stop()
self.r.stop()
self.del_host()
os._exit(0)
| 31.544153
| 81
| 0.557464
| 1,433
| 13,217
| 4.980461
| 0.251221
| 0.026482
| 0.015693
| 0.011209
| 0.21914
| 0.171501
| 0.152305
| 0.143618
| 0.107188
| 0.107188
| 0
| 0.00505
| 0.340773
| 13,217
| 418
| 82
| 31.619617
| 0.814071
| 0.061133
| 0
| 0.191419
| 0
| 0
| 0.168497
| 0.017601
| 0
| 0
| 0
| 0
| 0
| 1
| 0.049505
| false
| 0.0033
| 0.072607
| 0
| 0.155116
| 0.006601
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1c9ae3c6c99371fef5bc7aaa5ea9deed848c23c0
| 1,406
|
py
|
Python
|
src/export_to_poseviz.py
|
anibali/metro-pose3d
|
dd0c8a82ae271ce69441d216d615428e5ab1d5d1
|
[
"MIT"
] | 52
|
2020-03-10T05:18:02.000Z
|
2021-12-23T04:03:38.000Z
|
src/export_to_poseviz.py
|
anibali/metro-pose3d
|
dd0c8a82ae271ce69441d216d615428e5ab1d5d1
|
[
"MIT"
] | 2
|
2020-03-30T08:08:06.000Z
|
2020-03-31T04:26:10.000Z
|
src/export_to_poseviz.py
|
anibali/metro-pose3d
|
dd0c8a82ae271ce69441d216d615428e5ab1d5d1
|
[
"MIT"
] | 7
|
2020-04-02T09:02:00.000Z
|
2020-12-12T07:11:07.000Z
|
#!/usr/bin/env python3
import argparse
import logging
import sys
import numpy as np
import util
def main():
flags = initialize()
logging.debug(f'Loading from {flags.in_path}')
a = np.load(flags.in_path, allow_pickle=True)
all_results_3d = {}
for image_path, coords3d_pred in zip(a['image_path'], a['coords3d_pred_world']):
image_path = image_path.decode('utf8')
all_results_3d.setdefault(
image_path, []).append(coords3d_pred.tolist())
logging.info(f'Writing to file {flags.out_path}')
util.dump_json(all_results_3d, flags.out_path)
def initialize():
parser = argparse.ArgumentParser()
parser.add_argument('--in-path', type=str, required=True)
parser.add_argument('--out-path', type=str, default=None)
parser.add_argument('--loglevel', type=str, default='info')
flags = parser.parse_args()
if flags.out_path is None:
flags.out_path = flags.in_path.replace('.npz', '.json')
loglevel = dict(error=40, warning=30, info=20, debug=10)[flags.loglevel]
simple_formatter = logging.Formatter('{asctime}-{levelname:^1.1} -- {message}', style='{')
print_handler = logging.StreamHandler(sys.stdout)
print_handler.setLevel(loglevel)
print_handler.setFormatter(simple_formatter)
logging.basicConfig(level=loglevel, handlers=[print_handler])
return flags
if __name__ == '__main__':
main()
| 29.914894
| 94
| 0.69559
| 188
| 1,406
| 4.978723
| 0.473404
| 0.048077
| 0.051282
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.015306
| 0.163585
| 1,406
| 46
| 95
| 30.565217
| 0.780612
| 0.014936
| 0
| 0
| 0
| 0
| 0.132225
| 0.018786
| 0
| 0
| 0
| 0
| 0
| 1
| 0.060606
| false
| 0
| 0.151515
| 0
| 0.242424
| 0.121212
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1c9b5d5805105a181cbbe52dc9cadbd70001e7f9
| 1,606
|
py
|
Python
|
xcube/core/gen2/local/helpers.py
|
bcdev/xcube
|
9d275ef3baef8fbcea5c1fbbfb84c3d0164aecd3
|
[
"MIT"
] | 97
|
2018-06-26T13:02:55.000Z
|
2022-03-26T21:03:13.000Z
|
xcube/core/gen2/local/helpers.py
|
bcdev/xcube
|
9d275ef3baef8fbcea5c1fbbfb84c3d0164aecd3
|
[
"MIT"
] | 524
|
2018-11-09T12:00:08.000Z
|
2022-03-31T17:00:13.000Z
|
xcube/core/gen2/local/helpers.py
|
bcdev/xcube
|
9d275ef3baef8fbcea5c1fbbfb84c3d0164aecd3
|
[
"MIT"
] | 15
|
2019-07-09T08:46:03.000Z
|
2022-02-07T18:47:34.000Z
|
# The MIT License (MIT)
# Copyright (c) 2021 by the xcube development team and contributors
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import numpy as np
import xarray as xr
def is_empty_cube(cube: xr.Dataset) -> bool:
return len(cube.data_vars) == 0
def strip_cube(cube: xr.Dataset) -> xr.Dataset:
drop_vars = [k for k, v in cube.data_vars.items()
if len(v.shape) < 3
or np.product(v.shape) == 0
or v.shape[-2] < 2
or v.shape[-1] < 2]
if drop_vars:
return cube.drop_vars(drop_vars)
return cube
| 41.179487
| 81
| 0.71731
| 250
| 1,606
| 4.572
| 0.504
| 0.07699
| 0.022747
| 0.029746
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.008723
| 0.214819
| 1,606
| 38
| 82
| 42.263158
| 0.8977
| 0.689913
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.153846
| false
| 0
| 0.153846
| 0.076923
| 0.538462
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
98bbff443455dac48b3d58e95d525a9389d58413
| 7,896
|
py
|
Python
|
smarts/core/utils/traffic_history_service.py
|
c-h-a-r-l-i-e/SMARTS
|
6cb8ffda74e235c25d63b74313623b2e03e402f7
|
[
"MIT"
] | null | null | null |
smarts/core/utils/traffic_history_service.py
|
c-h-a-r-l-i-e/SMARTS
|
6cb8ffda74e235c25d63b74313623b2e03e402f7
|
[
"MIT"
] | null | null | null |
smarts/core/utils/traffic_history_service.py
|
c-h-a-r-l-i-e/SMARTS
|
6cb8ffda74e235c25d63b74313623b2e03e402f7
|
[
"MIT"
] | null | null | null |
# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import logging
import os
import pickle
from dataclasses import dataclass
from multiprocessing import Pipe, Process, Queue
import ijson
import smarts.core.scenario as scenario
@dataclass
class RequestHistoryRange:
start_index: int
batch_count: int
class Traffic_history_service:
"""responsible for dynamically fetching traffic history json to reduce
memory use of traffic history data
"""
class QueueDone:
pass
def __init__(self, history_file_path):
self._history_file_path = history_file_path
self._all_timesteps = set()
self._current_traffic_history = {}
self._prev_batch_history = {}
# return if traffic history is not used
if history_file_path is None:
return
self._log = logging.getLogger(self.__class__.__name__)
send_data_conn, receive_data_conn = Pipe()
self._receive_data_conn = receive_data_conn
self._request_queue = Queue()
self._fetch_history_proc = Process(
target=self._fetch_history,
args=(
send_data_conn,
self._request_queue,
self._history_file_path,
),
)
self._fetch_history_proc.daemon = True
self._fetch_history_proc.start()
self._range_start = 0
self._batch_size = 300
# initialize
with open(self._history_file_path, "rb") as f:
for index, (t, vehicles_state) in enumerate(
ijson.kvitems(f, "", use_float=True)
):
self._all_timesteps.add(t)
if (
self._range_start <= index
and index < self._range_start + self._batch_size
):
self._current_traffic_history[t] = vehicles_state
self._range_start += self._batch_size
# prepares the next batch
self._prepare_next_batch()
self._receive_data_conn.recv()
def teardown(self):
if self.is_in_use:
self._request_queue.put(Traffic_history_service.QueueDone())
self._request_queue.close()
self._request_queue = None
self._fetch_history_proc.join(timeout=3)
if self._fetch_history_proc.is_alive():
self._log.warning("fetch history process still alive after teardown")
self._fetch_history_proc = None
self._history_file_path = None
def __del__(self):
self.teardown()
@property
def is_in_use(self):
return self._history_file_path is not None
def _fetch_history(self, send_data_conn, request_queue, history_file_path):
"""prepare 1 batch ahead, when received request, immediately return the previously
prepared batch and prepares the next batch.
"""
return_batch = {}
while True:
historyRange = request_queue.get()
if type(historyRange) is Traffic_history_service.QueueDone:
break
assert isinstance(historyRange, RequestHistoryRange)
send_data_conn.send(return_batch)
return_batch = {}
with open(history_file_path, "rb") as f:
for index, (t, vehicles_state) in enumerate(
ijson.kvitems(f, "", use_float=True)
):
if (
historyRange.start_index <= index
and index < historyRange.start_index + historyRange.batch_count
):
return_batch[t] = vehicles_state
send_data_conn.close()
@property
def all_timesteps(self):
return self._all_timesteps
@property
def history_file_path(self):
return self._history_file_path
@property
def traffic_history(self):
return {**self._current_traffic_history, **self._prev_batch_history}
def _prepare_next_batch(self):
self._request_queue.put(
RequestHistoryRange(
start_index=self._range_start,
batch_count=self._batch_size,
)
)
self._range_start += self._batch_size
def fetch_history_at_timestep(self, timestep: str):
if timestep not in self._all_timesteps:
return {}
elif timestep in self.traffic_history:
return self.traffic_history[timestep]
# ask child process to prepare the next batch:
self._prepare_next_batch()
self._prev_batch_history = self._current_traffic_history
# receives the previous batch child process prepared
self._current_traffic_history = self._receive_data_conn.recv()
if timestep in self._current_traffic_history:
return self._current_traffic_history[timestep]
# no history exists at requested timestamp
return {}
@staticmethod
def apply_map_location_offset(position, map_offset):
return [pos + map_offset[i] for i, pos in enumerate(position[:2])]
@staticmethod
def fetch_agent_missions(
history_file_path: str, scenario_root_path: str, mapLocationOffset
):
assert os.path.isdir(scenario_root_path)
history_mission_filepath = os.path.join(
scenario_root_path, "history_mission.pkl"
)
if not os.path.exists(history_mission_filepath):
history_mission = {}
else:
with open(history_mission_filepath, "rb") as f:
history_mission = pickle.load(f)
if history_file_path in history_mission:
return history_mission[history_file_path]
vehicle_missions = {}
with open(history_file_path, "rb") as f:
for t, vehicles_state in ijson.kvitems(f, "", use_float=True):
for vehicle_id in vehicles_state:
if vehicle_id in vehicle_missions:
continue
vehicle_missions[vehicle_id] = scenario.Mission(
start=scenario.Start(
Traffic_history_service.apply_map_location_offset(
vehicles_state[vehicle_id]["position"],
mapLocationOffset,
),
scenario.Heading(vehicles_state[vehicle_id]["heading"]),
),
goal=scenario.EndlessGoal(),
start_time=float(t),
)
history_mission[history_file_path] = vehicle_missions
# update cached history_mission_file
with open(history_mission_filepath, "wb") as f:
pickle.dump(history_mission, f)
return vehicle_missions
| 37.245283
| 90
| 0.630699
| 909
| 7,896
| 5.179318
| 0.267327
| 0.050552
| 0.054163
| 0.02825
| 0.20497
| 0.129354
| 0.09452
| 0.075828
| 0.041419
| 0.033135
| 0
| 0.001997
| 0.302305
| 7,896
| 211
| 91
| 37.421801
| 0.852605
| 0.198075
| 0
| 0.198676
| 0
| 0
| 0.014673
| 0
| 0
| 0
| 0
| 0
| 0.013245
| 1
| 0.07947
| false
| 0.006623
| 0.046358
| 0.033113
| 0.238411
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
98bd3099195cf49ba522ba023294ea3a974ffe7f
| 1,599
|
py
|
Python
|
calvin/runtime/south/plugins/media/defaultimpl/image.py
|
josrolgil/exjobbCalvin
|
976459eaa50246586360c049b9880d753623d574
|
[
"Apache-2.0"
] | 1
|
2016-05-10T22:36:31.000Z
|
2016-05-10T22:36:31.000Z
|
calvin/runtime/south/plugins/media/defaultimpl/image.py
|
josrolgil/exjobbCalvin
|
976459eaa50246586360c049b9880d753623d574
|
[
"Apache-2.0"
] | null | null | null |
calvin/runtime/south/plugins/media/defaultimpl/image.py
|
josrolgil/exjobbCalvin
|
976459eaa50246586360c049b9880d753623d574
|
[
"Apache-2.0"
] | null | null | null |
import pygame
from StringIO import StringIO
import cv2
import os
import numpy
class Image(object):
"""
Image object
"""
def __init__(self):
self.display = None
def show_image(self, image, width, height):
"""
Show image
"""
size = (width, height)
self.display = pygame.display.set_mode(size, 0)
self.snapshot = pygame.surface.Surface(size, 0, self.display)
img = pygame.image.load(StringIO(image))
self.display.blit(img, (0, 0))
pygame.display.flip()
def detect_face(self, image):
linux_prefix = "/usr/share/opencv"
mac_prefix = "/usr/local/share/OpenCV"
suffix = "/haarcascades/haarcascade_frontalface_default.xml"
linux_path = linux_prefix + suffix
mac_path = mac_prefix + suffix
if os.path.exists(linux_path) :
cpath = linux_path
elif os.path.exists(mac_path) :
cpath = mac_path
else :
raise Exception("No Haarcascade found")
classifier = cv2.CascadeClassifier(cpath)
jpg = numpy.fromstring(image, numpy.int8)
image = cv2.imdecode(jpg, 1)
faces = classifier.detectMultiScale(image)
if len(faces) > 0 :
for (x,y,w,h) in faces :
if w < 120 :
# Too small to be a nearby face
continue
return True
return False
def close(self):
"""
Close display
"""
if not self.display is None:
pygame.display.quit()
| 26.65
| 69
| 0.56035
| 182
| 1,599
| 4.818681
| 0.461538
| 0.062714
| 0.020525
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.012357
| 0.342089
| 1,599
| 59
| 70
| 27.101695
| 0.821293
| 0.042527
| 0
| 0
| 0
| 0
| 0.0742
| 0.049013
| 0
| 0
| 0
| 0
| 0
| 1
| 0.1
| false
| 0
| 0.125
| 0
| 0.3
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
98bd3fd17ab9f4b238b6d43814353c33f950c7b3
| 3,340
|
py
|
Python
|
durin/models.py
|
mlodic/django-rest-durin
|
b31a7257fb9765a4928c08bb1e68e80f48159229
|
[
"MIT"
] | null | null | null |
durin/models.py
|
mlodic/django-rest-durin
|
b31a7257fb9765a4928c08bb1e68e80f48159229
|
[
"MIT"
] | null | null | null |
durin/models.py
|
mlodic/django-rest-durin
|
b31a7257fb9765a4928c08bb1e68e80f48159229
|
[
"MIT"
] | null | null | null |
import binascii
from os import urandom
import humanize
from django.conf import settings
from django.db import models
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from durin.settings import durin_settings
from durin.signals import token_renewed
User = settings.AUTH_USER_MODEL
def _create_token_string() -> str:
return binascii.hexlify(
urandom(int(durin_settings.TOKEN_CHARACTER_LENGTH / 2))
).decode()
class Client(models.Model):
name = models.CharField(
max_length=64,
null=False,
blank=False,
db_index=True,
unique=True,
help_text=_("A unique identification name for the client."),
)
token_ttl = models.DurationField(
null=False,
default=durin_settings.DEFAULT_TOKEN_TTL,
help_text=_(
"""
Token Time To Live (TTL) in timedelta. Format: <em>DAYS HH:MM:SS</em>.
"""
),
)
def __str__(self):
td = humanize.naturaldelta(self.token_ttl)
return "({0}, {1})".format(self.name, td)
class AuthTokenManager(models.Manager):
def create(self, user, client, delta_ttl=None):
token = _create_token_string()
if delta_ttl is not None:
expiry = timezone.now() + delta_ttl
else:
expiry = timezone.now() + client.token_ttl
instance = super(AuthTokenManager, self).create(
token=token, user=user, client=client, expiry=expiry
)
return instance
class AuthToken(models.Model):
class Meta:
constraints = [
models.UniqueConstraint(
fields=["user", "client"], name="unique token for user per client"
)
]
objects = AuthTokenManager()
token = models.CharField(
max_length=durin_settings.TOKEN_CHARACTER_LENGTH,
null=False,
blank=False,
db_index=True,
unique=True,
help_text=_("Token is auto-generated on save."),
)
user = models.ForeignKey(
User,
null=False,
blank=False,
related_name="auth_token_set",
on_delete=models.CASCADE,
)
client = models.ForeignKey(
Client,
null=False,
blank=False,
related_name="auth_token_set",
on_delete=models.CASCADE,
)
created = models.DateTimeField(auto_now_add=True)
expiry = models.DateTimeField(null=False)
def renew_token(self, renewed_by):
new_expiry = timezone.now() + self.client.token_ttl
self.expiry = new_expiry
self.save(update_fields=("expiry",))
token_renewed.send(
sender=renewed_by,
username=self.user.get_username(),
token_id=self.pk,
expiry=new_expiry,
)
return new_expiry
@property
def expires_in(self) -> str:
if self.expiry:
td = self.expiry - self.created
return humanize.naturaldelta(td)
else:
return "N/A"
@property
def has_expired(self) -> bool:
return timezone.now() > self.expiry
def __repr__(self) -> str:
return "({0}, {1}/{2})".format(
self.token, self.user.get_username(), self.client.name
)
def __str__(self) -> str:
return self.token
| 26.299213
| 82
| 0.606886
| 382
| 3,340
| 5.112565
| 0.314136
| 0.02765
| 0.028674
| 0.038914
| 0.147465
| 0.113671
| 0.113671
| 0.113671
| 0.113671
| 0.113671
| 0
| 0.003381
| 0.291617
| 3,340
| 126
| 83
| 26.507937
| 0.822063
| 0
| 0
| 0.205882
| 0
| 0
| 0.055281
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.078431
| false
| 0
| 0.088235
| 0.039216
| 0.372549
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
98bf3939045052dd4fba91a19ad1fdf6be1101a5
| 640
|
py
|
Python
|
PP4E-Examples-1.4/Examples/PP4E/Dstruct/Basic/inter2.py
|
AngelLiang/PP4E
|
3a7f63b366e1e4700b4d2524884696999a87ba9d
|
[
"MIT"
] | null | null | null |
PP4E-Examples-1.4/Examples/PP4E/Dstruct/Basic/inter2.py
|
AngelLiang/PP4E
|
3a7f63b366e1e4700b4d2524884696999a87ba9d
|
[
"MIT"
] | null | null | null |
PP4E-Examples-1.4/Examples/PP4E/Dstruct/Basic/inter2.py
|
AngelLiang/PP4E
|
3a7f63b366e1e4700b4d2524884696999a87ba9d
|
[
"MIT"
] | null | null | null |
"set operations for multiple sequences"
def intersect(*args):
res = []
for x in args[0]: # scan the first list
for other in args[1:]: # for all other arguments
if x not in other: break # this item in each one?
else:
res.append(x) # add common items to the end
return res
def union(*args):
res = []
for seq in args: # for all sequence-arguments
for x in seq: # for all nodes in argument
if not x in res:
res.append(x) # add new items to result
return res
| 33.684211
| 68
| 0.504688
| 86
| 640
| 3.755814
| 0.488372
| 0.027864
| 0.06192
| 0.080495
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.005464
| 0.428125
| 640
| 18
| 69
| 35.555556
| 0.877049
| 0.328125
| 0
| 0.375
| 0
| 0
| 0.08026
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.125
| false
| 0
| 0
| 0
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
98c0a5642acac614148ed6f1d7bcaa9979233d3b
| 8,950
|
py
|
Python
|
scripts/fast_queue.py
|
ourresearch/openalex-guts
|
f6c3e9992361e4bb1dbe76fbfb644c80f081319a
|
[
"MIT"
] | 48
|
2021-11-20T08:17:53.000Z
|
2022-03-19T13:57:15.000Z
|
scripts/fast_queue.py
|
ourresearch/openalex-guts
|
f6c3e9992361e4bb1dbe76fbfb644c80f081319a
|
[
"MIT"
] | null | null | null |
scripts/fast_queue.py
|
ourresearch/openalex-guts
|
f6c3e9992361e4bb1dbe76fbfb644c80f081319a
|
[
"MIT"
] | 2
|
2022-01-04T16:28:48.000Z
|
2022-02-05T21:25:01.000Z
|
import argparse
from time import sleep, time
from collections import defaultdict
from sqlalchemy import orm, text, insert, delete
from sqlalchemy.orm import selectinload
import models
from app import db
from app import logger
from scripts.queue import JsonWorks, JsonAuthors, JsonConcepts, JsonInstitutions, JsonVenues
from util import elapsed
def run(**kwargs):
entity_type = kwargs.get("entity")
method_name = kwargs.get("method")
if entity_type == "work" and method_name == "add_everything":
queue_table = "queue.work_add_everything"
elif method_name == "store":
queue_table = f"queue.{entity_type.lower()}_store"
else:
queue_table = f"queue.{method_name.lower()}"
if single_id := kwargs.get('id'):
if objects := get_objects(entity_type, [single_id]):
logger.info(f'found object {objects[0]}')
store_objects(objects)
db.session.commit()
else:
logger.warn(f'found no object with id {single_id}')
else:
objects_updated = 0
limit = kwargs.get('limit')
chunk = kwargs.get('chunk')
total_count = 0
while limit is None or objects_updated < limit:
loop_start = time()
if object_ids := fetch_queue_chunk_ids(queue_table, chunk):
objects = get_objects(entity_type, object_ids)
for obj in objects:
method_start_time = time()
total_count += 1
print(f"*** #{total_count} starting {obj}.{method_name}() method")
method_to_run = getattr(obj, method_name)
method_to_run()
print(f">>> finished {obj}.{method_name}(). took {elapsed(method_start_time, 4)} seconds")
# print(1/0)
logger.info('committing')
start_time = time()
if method_name == "store":
store_json_objects(objects)
else:
db.session.commit() # fail loudly for now
logger.info(f'commit took {elapsed(start_time, 4)}s')
finish_object_ids(queue_table, object_ids)
objects_updated += len(objects)
logger.info(f'processed chunk of {chunk} objects in {elapsed(loop_start, 2)} seconds')
else:
logger.info('nothing ready in the queue, waiting 5 seconds...')
sleep(5)
def store_json_objects(objects):
delete_dict_all_objects = defaultdict(list)
insert_dict_all_objects = defaultdict(list)
for count, obj in enumerate(objects):
obj.delete_dict = defaultdict(list)
for row in obj.insert_dicts:
for table_name, insert_dict in row.items():
insert_dict_all_objects[table_name] += [insert_dict]
obj.delete_dict[table_name] += [insert_dict["id"]]
for table_name, ids in obj.delete_dict.items():
delete_dict_all_objects[table_name] += ids
start_time = time()
for table_name, delete_ids in delete_dict_all_objects.items():
my_table = globals()[table_name]
db.session.remove()
db.session.execute(delete(my_table).where(my_table.id.in_(delete_ids)))
db.session.commit()
print("delete done")
for table_name, all_insert_strings in insert_dict_all_objects.items():
my_table = globals()[table_name]
db.session.remove()
db.session.execute(insert(my_table).values(all_insert_strings))
db.session.commit()
print("insert and commit took {} seconds".format(elapsed(start_time, 2)))
def fetch_queue_chunk_ids(queue_table, chunk_size):
text_query = f"""
with chunk as (
select id
from {queue_table}
where started is null
order by
finished asc nulls first,
rand
limit :chunk
for update skip locked
)
update {queue_table}
set started = now()
from chunk
where {queue_table}.id = chunk.id
returning chunk.id;
"""
logger.info(f'getting {chunk_size} ids from the queue')
start_time = time()
ids = [
row[0] for row in
db.engine.execute(text(text_query).bindparams(chunk=chunk_size).execution_options(autocommit=True)).all()
]
logger.info(f'got {len(ids)} ids from the queue in {elapsed(start_time, 4)}s')
logger.info(f'got these ids: {ids}')
return ids
def finish_object_ids(queue_table, object_ids):
# logger.info(f'finishing queue chunk')
start_time = time()
query_text = f'''
update {queue_table}
set finished = now(), started=null
where id = any(:ids)
'''
db.session.execute(text(query_text).bindparams(ids=object_ids))
db.session.commit()
# logger.info(f'finished saving finish_objects in {elapsed(start_time, 4)}s')
def get_objects(entity_type, object_ids):
logger.info(f'getting {len(object_ids)} objects')
start_time = time()
if entity_type == "work":
objects = db.session.query(models.Work).options(
selectinload(models.Work.records).selectinload(models.Record.journals).raiseload('*'),
selectinload(models.Work.records).raiseload('*'),
selectinload(models.Work.locations),
selectinload(models.Work.journal).raiseload('*'),
selectinload(models.Work.references).raiseload('*'),
selectinload(models.Work.references_unmatched).raiseload('*'),
selectinload(models.Work.mesh),
selectinload(models.Work.counts_by_year).raiseload('*'),
selectinload(models.Work.abstract),
selectinload(models.Work.extra_ids).raiseload('*'),
selectinload(models.Work.related_works).raiseload('*'),
selectinload(models.Work.affiliations).selectinload(models.Affiliation.author).selectinload(models.Author.orcids).raiseload('*'),
selectinload(models.Work.affiliations).selectinload(models.Affiliation.author).raiseload('*'),
selectinload(models.Work.affiliations).selectinload(models.Affiliation.institution).selectinload(models.Institution.ror).raiseload('*'),
selectinload(models.Work.affiliations).selectinload(models.Affiliation.institution).raiseload('*'),
selectinload(models.Work.concepts).selectinload(models.WorkConcept.concept).raiseload('*'),
selectinload(models.Work.concepts_full).raiseload('*'),
orm.Load(models.Work).raiseload('*')
).filter(models.Work.paper_id.in_(object_ids)).all()
elif entity_type == "author":
objects = db.session.query(models.Author).options(
selectinload(models.Author.counts_by_year_papers),
selectinload(models.Author.counts_by_year_citations),
selectinload(models.Author.alternative_names),
selectinload(models.Author.author_concepts),
selectinload(models.Author.orcids).selectinload(models.AuthorOrcid.orcid_data),
selectinload(models.Author.last_known_institution).selectinload(models.Institution.ror).raiseload('*'),
selectinload(models.Author.last_known_institution).raiseload('*'),
orm.Load(models.Author).raiseload('*')
).filter(models.Author.author_id.in_(object_ids)).all()
elif entity_type == "venue":
objects = db.session.query(models.Venue).options(
selectinload(models.Venue.counts_by_year_papers),
selectinload(models.Venue.counts_by_year_citations),
orm.Load(models.Venue).raiseload('*')
).filter(models.Venue.journal_id.in_(object_ids)).all()
elif entity_type == "institution":
objects = db.session.query(models.Institution).filter(models.Institution.affiliation_id.in_(object_ids)).all()
elif entity_type == "concept":
objects = db.session.query(models.Concept).filter(models.Concept.field_of_study_id.in_(object_ids)).all()
logger.info(f'got {len(objects)} objects in {elapsed(start_time, 4)}s')
return objects
# python -m scripts.fast_queue --entity=work --method=add_everything --limit=3
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Run fast queue.")
parser.add_argument('--entity', type=str, help="the entity type to run")
parser.add_argument('--method', type=str, help="the method to run")
parser.add_argument('--id', nargs="?", type=str, help="id of the one thing you want to update (case sensitive)")
parser.add_argument('--limit', "-l", nargs="?", type=int, help="how many objects to work on")
parser.add_argument(
'--chunk', "-ch", nargs="?", default=100, type=int, help="how many objects to take off the queue at once"
)
parsed_args = parser.parse_args()
run(**vars(parsed_args))
| 42.216981
| 149
| 0.639777
| 1,063
| 8,950
| 5.195673
| 0.20508
| 0.117328
| 0.067717
| 0.072968
| 0.324642
| 0.22868
| 0.168749
| 0.124932
| 0.084737
| 0.026435
| 0
| 0.002931
| 0.237654
| 8,950
| 211
| 150
| 42.417062
| 0.806537
| 0.024693
| 0
| 0.121387
| 0
| 0
| 0.182162
| 0.017769
| 0
| 0
| 0
| 0
| 0
| 1
| 0.028902
| false
| 0
| 0.057803
| 0
| 0.098266
| 0.023121
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
98c1fbeb0d5441c90960a350fd079ea801185651
| 2,298
|
py
|
Python
|
scripts/collect_timelines1.py
|
tedhchen/twitter_timeline_tools
|
bc21e8c7c4e976409281e2697e1ec75044648eb8
|
[
"MIT"
] | null | null | null |
scripts/collect_timelines1.py
|
tedhchen/twitter_timeline_tools
|
bc21e8c7c4e976409281e2697e1ec75044648eb8
|
[
"MIT"
] | null | null | null |
scripts/collect_timelines1.py
|
tedhchen/twitter_timeline_tools
|
bc21e8c7c4e976409281e2697e1ec75044648eb8
|
[
"MIT"
] | null | null | null |
# Prep
import json, configparser, pickle, csv, logging, os
import pandas as pd
from tweepy import AppAuthHandler, API, Cursor
# Reading in configuation
params = configparser.ConfigParser()
params.read('config.ini')
# Functions
# Takes config file and returns authenticated api object
def twitter_auth(config):
auth = AppAuthHandler(params['keys']['key'], params['keys']['secret'])
api = API(auth, wait_on_rate_limit = True, wait_on_rate_limit_notify = True)
return api
# Get relevant user ids
def get_ids(path, subset = None):
df = pd.read_csv(path, header = 0, dtype = {'user': 'object', 'subset': 'object'})
if subset != None:
df.user = df.user[df['subset'] == subset]
return list(df.user)
# takes user ids, and writes out a txt file wiith each user's status jsons
def get_timelines(users, api, outfolder):
i = 0
for user in users:
timeline = []
try:
for status in Cursor(api.user_timeline, user_id = user, include_rts = True, exclude_replies = False, count = 200, tweet_mode = 'extended').items():
timeline.append(status)
timeline = [json.dumps(line._json) for line in timeline]
filename = 'timeline_' + user + '.txt'
with open(os.path.join(outfolder, filename), 'a', encoding = 'utf-8', newline = '') as outfile:
for line in timeline:
outfile.write(line + '\n')
except Exception as e:
logging.exception("Exception occurred when working with user id: " + user + '.')
i += 1
if i % 100 == 0:
print('Finished ' + str(i) + ' users.')
return None
def retry_missed_users(log, api, outfolder):
missed = []
with open(log, 'r') as infile:
for line in infile:
if 'Exception occurred when working with user id:' in line:
missed.append(line[79:-2])
get_timelines(missed, api, outfolder)
# Running script
# Setting up logger
logging.basicConfig(filename, filemode = 'a', format = '(%(asctime)s) %(levelname)s: %(message)s', level = logging.INFO)
# Authenticating api
api = twitter_auth(params)
# Get users from pre-parsed data
# csv file with:
# user, subset
# ..., ...
# subset is just a way to subset users from the csv file
# if subset == None, then no subsetting is performed
users = get_ids(path, subset)
# Getting timelines
get_timelines(users, api, outpath)
# Double checking errors
retry_missed_users(logfile, api, outpath)
| 31.479452
| 150
| 0.700174
| 333
| 2,298
| 4.753754
| 0.444444
| 0.018951
| 0.017056
| 0.018951
| 0.04801
| 0.04801
| 0.04801
| 0
| 0
| 0
| 0
| 0.007341
| 0.170148
| 2,298
| 72
| 151
| 31.916667
| 0.822758
| 0.197998
| 0
| 0
| 0
| 0
| 0.128219
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.090909
| false
| 0
| 0.068182
| 0
| 0.227273
| 0.022727
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
98c615953ef0bbcfd93b9c52b023ec8e35bea466
| 115,101
|
py
|
Python
|
trade_remedies_caseworker/cases/views.py
|
uktrade/trade-remedies-caseworker
|
fece9fde3cb241d96cbc1aaf7188d976f8621600
|
[
"MIT"
] | 1
|
2020-08-27T09:53:00.000Z
|
2020-08-27T09:53:00.000Z
|
trade_remedies_caseworker/cases/views.py
|
uktrade/trade-remedies-caseworker
|
fece9fde3cb241d96cbc1aaf7188d976f8621600
|
[
"MIT"
] | 7
|
2020-10-14T16:23:42.000Z
|
2021-09-24T14:18:47.000Z
|
trade_remedies_caseworker/cases/views.py
|
uktrade/trade-remedies-caseworker
|
fece9fde3cb241d96cbc1aaf7188d976f8621600
|
[
"MIT"
] | null | null | null |
import itertools
import json
import logging
import re
from django.views.generic import TemplateView
from django.http import HttpResponse
from django.views import View
from django.contrib.auth.mixins import LoginRequiredMixin, PermissionRequiredMixin
from django.views.decorators.csrf import csrf_exempt
from django.shortcuts import render, redirect
from django.urls import reverse
from django.utils import timezone
from django.utils.decorators import method_decorator
from django_chunk_upload_handlers.clam_av import VirusFoundInFileException
from core.base import GroupRequiredMixin
from core.utils import (
deep_index_items_by,
deep_index_items_by_exists,
get,
key_by,
index_users_by_group,
compact_list,
submission_contact,
public_login_url,
parse_notify_template,
parse_api_datetime,
pluck,
to_json,
from_json,
deep_update,
internal_redirect,
is_date,
notify_footer,
notify_contact_email,
)
from django_countries import countries
from django.conf import settings
from cases.submissions import SUBMISSION_TYPE_HELPERS, get_submission_deadline
from cases.utils import decorate_orgs
from core.constants import (
ALL_REGION_ALLOWED_TYPE_IDS,
SECURITY_GROUP_TRA_HEAD_OF_INVESTIGATION,
SECURITY_GROUP_TRA_LEAD_INVESTIGATOR,
SECURITY_GROUPS_TRA,
SECURITY_GROUP_TRA_ADMINISTRATOR,
SECURITY_GROUPS_TRA_ADMINS,
SECURITY_GROUP_ORGANISATION_OWNER,
SUBMISSION_TYPE_QUESTIONNAIRE,
SUBMISSION_TYPE_APPLICATION,
SUBMISSION_NOTICE_TYPE_INVITE,
SUBMISSION_NOTICE_TYPE_DEFICIENCY,
SUBMISSION_TYPE_THIRD_PARTY,
CASE_ROLE_AWAITING_APPROVAL,
CASE_ROLE_REJECTED,
CASE_ROLE_APPLICANT,
CASE_ROLE_PREPARING,
DIRECTION_TRA_TO_PUBLIC,
)
from trade_remedies_client.mixins import TradeRemediesAPIClientMixin
from trade_remedies_client.exceptions import APIException
logger = logging.getLogger(__name__)
org_fields = json.dumps(
{
"Organisation": {
"id": 0,
"has_non_draft_subs": 0,
"gov_body": 0,
"has_roi": 0,
}
}
)
class CasesView(LoginRequiredMixin, TemplateView, TradeRemediesAPIClientMixin):
template_name = "cases/cases.html"
def get(self, request, *args, **kwargs):
list_mode = request.GET.get("tab", "my")
panel_layout = self.client(self.request.user).get_system_boolean("PRE_RELEASE_PANELS")
fields = {
"Case": {
"id": 0,
"user_case": 0,
"name": 0,
"reference": 0,
"created_at": 0,
"type": {
"name": 0,
"acronym": 0,
"colour": 0,
"reference": 0,
"applicant": {"organisation": 0, "name": 0, "id": 0},
},
"applicant": {
"organisation": {
"name": 0,
"id": 0,
}
},
"stage": {"name": 0},
"case_status": {"next_action": 0, "next_notice_due": 0},
}
}
if list_mode == "archived":
fields = deep_update(
fields,
{
"Case": {
"workflow_state": {
"MEASURE_EXPIRY": 0,
"DETERMINATION_ACTIVE_DATE": 0,
}
}
},
)
cases = self.client(request.user).get_cases(
archived=list_mode == "archived",
all_cases=list_mode == "all",
new_cases=list_mode == "new",
fields=json.dumps(fields),
)
tabs = {
"value": list_mode,
"tabList": [
{"label": "Your cases", "value": "my", "sr_text": "Show your cases"},
{"label": "Open cases", "value": "all", "sr_text": "Show open cases"},
{
"label": "New applications",
"value": "new",
"sr_text": "Show new applications",
},
{
"label": "Archived",
"value": "archived",
"sr_text": "Show archived cases",
},
],
}
template_name = self.template_name if panel_layout else "cases/cases_old.html"
body_class = "full-width kill-footer" if panel_layout else "full-width"
return render(
request,
template_name,
{
"body_classes": body_class,
"cases": cases,
"tabs": tabs,
},
)
class CaseBaseView(
LoginRequiredMixin,
GroupRequiredMixin,
PermissionRequiredMixin,
TemplateView,
TradeRemediesAPIClientMixin,
):
permission_required = []
groups_required = SECURITY_GROUPS_TRA
supress_nav_section = False
def dispatch(self, *args, **kwargs):
if self.request.user.is_authenticated:
self._client = self.client(self.request.user)
self.case_id = kwargs.get("case_id")
return super().dispatch(*args, **kwargs)
def get(self, request, *args, **kwargs):
self.kwargs = kwargs
self.organisation_id = kwargs.get("organisation_id")
self.request = request
self.user_token = request.user.token
case_fields = json.dumps(
{
"Case": {
"id": 0,
"name": 0,
"initiated_at": 0,
"decision_to_initiate,name": 0,
"reference": 0,
"sequence": 0,
"type": 0,
"archived_at": 0,
"archive_reason": {"name": 0},
"submission_count": 0,
"participant_count": 0,
"stage": {"name": 0},
"case_status": 0,
"organisation": {"id": 0, "name": 0},
}
}
)
self.case = self._client.get_case(self.case_id, fields=case_fields)
self.document_count = self._client.get_case_document_count(self.case_id)
self.start = int(request.GET.get("start", 0))
self.limit = int(request.GET.get("limit", 20))
content_id = self.kwargs.get("nav_section_id")
context = {
"case": self.case,
"case_id": self.case_id,
"document_count": self.document_count,
"content": self._client.get_case_content(self.case_id, content_id=content_id),
"tree": self._client.get_nav_section(self.case_id, selected_content=content_id),
"body_classes": "full-width",
"panel_layout": self._client.get_system_boolean("PRE_RELEASE_PANELS"),
"organisation_id": self.organisation_id,
"submission_group_name": "submission",
"alert": request.GET.get("alert"),
"user": request.user,
}
deep_update(context, self.add_page_data())
if context.get("redirect"):
return redirect(context.get("redirect"))
return render(request, self.template_name, context)
def add_page_data(self):
return {}
def get_documents(self, submission, all_versions=None):
result = self._client.get_submission_documents(
self.case_id, submission.get("id"), all_versions=all_versions
)
all_documents = result.get("documents", [])
deficiency_docs = result.get("deficiency_documents", [])
if all_versions:
# If this submission has an immediate ancestor, get the docs from that to mark status
docs_by_submission = deep_index_items_by(all_documents, "version")
this_version = int(submission.get("version"))
this_sub = docs_by_submission.get(str(this_version))
sub_docs = this_sub[0].get("documents")
# we have a list of the submissions that make up a family - id, version and documents.
if this_version > 1:
parent_sub = docs_by_submission.get(str(this_version - 1))
parent_docs = parent_sub and parent_sub[0].get("documents")
parent_doc_idx = {}
for parent_doc in parent_docs:
doc_type = get(parent_doc, "type/name") + "|" + get(parent_doc, "name")
parent_doc_idx[doc_type] = parent_doc
for document in sub_docs:
document["parent"] = parent_doc_idx.get(
get(document, "type/name") + "|" + get(document, "name")
)
else:
sub_docs = all_documents
submission_documents = deep_index_items_by(sub_docs, "type/key")
document_conf_index = deep_index_items_by(
submission_documents.get("respondent", []), "confidential"
)
confidential = document_conf_index.get("true", [])
confidential.sort(key=lambda cf: cf.get("name"))
non_conf = document_conf_index.get("", [])
doc_index = key_by(confidential, "id")
non_conf.sort(key=lambda nc: get(get(doc_index, str(nc.get("parent_id"))), "name"))
return {
"caseworker": submission_documents.get("caseworker", []),
"respondent": submission_documents.get("respondent", []),
"loa": submission_documents.get("loa", []),
"deficiency": deficiency_docs,
"confidential": confidential,
"nonconfidential": non_conf,
}
def has_permission(self):
"""
Override this method to customize the way permissions are checked.
"""
perms = self.get_permission_required()
return not perms or self.request.user.has_perms(perms)
class CaseAdminView(CaseBaseView):
groups_required = SECURITY_GROUPS_TRA
permission_required = ("case_admin",)
template_name = "cases/admin.html"
def add_page_data(self):
case_enums = self._client.get_all_case_enums()
case_users = self._client.get_case_users(self.case["id"])
context = {
"case_enums": case_enums,
"case": self.case,
"users": case_users,
"participants": self._client.get_case_participants(self.case_id),
}
return context
def post(self, request, case_id, *args, **kwargs):
action = request.POST.get("action")
case = self._client.get_case(case_id)
update_spec = {}
if action == "initiation_flag_toggle":
if case["initiated_at"]:
update_spec["initiated_at"] = ""
else:
update_spec["initiated_at"] = timezone.now()
elif action == "set_case_stage":
update_spec["ignore_flow"] = request.POST.get("ignore_flow") or "false"
update_spec["stage_id"] = request.POST.get("stage_id")
elif action == "set_name":
update_spec["name"] = request.POST.get("name")
elif action == "set_case_type":
update_spec["stage_id"] = ""
update_spec["type_id"] = request.POST.get("type_id")
elif action == "toggle_archived":
if case.get("archived_at"):
update_spec["archived_at"] = ""
else:
update_spec["archived_at"] = timezone.now()
update_spec["archive_reason_id"] = request.POST.get("archive_reason_id")
elif action == "reset_initiation_decision":
update_spec["reset_initiation_decision"] = True
if update_spec:
response = self._client.update_case(case_id, update_spec)
return redirect(f"/case/{case_id}/admin/")
class CaseMilestoneDatesView(CaseBaseView):
groups_required = SECURITY_GROUPS_TRA
permission_required = ("case_admin",)
template_name = "cases/milestone_dates.html"
def add_page_data(self):
case_enums = self._client.get_all_case_enums(self.case_id)
case_milestones = self._client.case_milestones(self.case["id"])
existing_keys = [cm["key"] for cm in case_milestones]
context = {
"milestone_types": case_enums.get("milestone_types"),
"available_review_types": case_enums.get("available_review_types"),
"milestones": case_milestones,
"existing_milestones": existing_keys,
}
return context
def post(self, request, case_id, milestone_key=None):
milestone_key = milestone_key or request.POST.get("milestone_key")
date = request.POST.get("date")
response = self._client.set_case_milestone(case_id, milestone_key, date)
return redirect(f"/case/{case_id}/milestones/")
class CaseView(CaseBaseView):
groups_required = SECURITY_GROUPS_TRA
permission_required = []
template_name = "cases/case.html"
extra_case_fields = json.dumps(
{
"Case": {
"applicant": {
"organisation": {
"id": 0,
"name": 0,
"primary_contact": {
"name": 0,
"email": 0,
"phone": 0,
"address": 0,
"post_code": 0,
"country": {"name": 0},
"has_user": 0,
"user": {"id": 0, "organisation": {"id": 0, "name": 0}},
},
}
},
"parent": {"id": 0, "name": 0, "reference": 0, "type": 0},
"workflow_state": {"LINKED_CASE_CONFIRM": 0},
"initiated_sequence": 0,
}
}
)
def add_page_data(self):
team = self._client.get_case_team_members(self.case_id)
team_by_group = index_users_by_group([member.get("user") for member in team])
group_order = [
SECURITY_GROUP_TRA_ADMINISTRATOR,
SECURITY_GROUP_TRA_HEAD_OF_INVESTIGATION,
SECURITY_GROUP_TRA_LEAD_INVESTIGATOR,
]
case_extras = self._client.get_case(self.case_id, fields=self.extra_case_fields)
return {
"audit": self._client.get_audit(
case_id=self.case_id, start=0, limit=20, milestone=True
),
"case_page": True,
"case": case_extras,
"team_groups": team_by_group,
"group_order": group_order,
"public_base_url": settings.PUBLIC_BASE_URL,
}
def post(self, request, case_id, *args, **kwargs):
self._client.set_case_data(case_id, {"name": request.POST.get("name")})
redirect = request.POST.get("redirect")
if redirect:
return internal_redirect(request.POST.get("redirect"), "/")
else:
return HttpResponse(json.dumps({"result": "ok"}), content_type="application/json")
class PartiesView(CaseBaseView):
groups_required = SECURITY_GROUPS_TRA
template_name = "cases/parties.html"
def add_page_data(self):
parties = []
roles = self._client.get_case_roles()
all_case_invites = self._client.get_contact_case_invitations(self.case_id)
all_participants = self._client.get_case_participants(self.case_id, fields=org_fields)
case_invites = deep_index_items_by(all_case_invites, "contact/id")
invited = set([])
accepted = set([])
for invite in all_case_invites:
org_id = invite.get("organisation", {}).get("id")
if invite.get("accepted_at"):
# note: accepted and invited are mutually exclusive
accepted.add(org_id)
else:
invited.add(org_id)
for role in roles:
_base = all_participants[role["key"]]
_base["key"] = role["key"]
_base["name"] = role["plural"]
if role["allow_cw_create"]:
_base["add_link"] = f"Add {role['name']}"
parties.append(_base)
return {
"party_types": parties,
"invites": case_invites,
"accepted_orgs": list(accepted),
"invited_orgs": list(invited),
"pre_release_invitations": self._client.get_system_boolean("PRE_RELEASE_INVITATIONS"),
"alert": self.request.GET.get("alert"),
}
class CaseTeamView(CaseBaseView):
permission_required = "can_assign_team"
template_name = "cases/team.html"
def add_page_data(self):
all_users = self._client.get_all_users(group_name="caseworker")
users_by_group = index_users_by_group(all_users)
team = self._client.get_case_team_members(self.case_id)
return {
"team": [member.get("user", {}).get("id") for member in team],
"tra_users": all_users,
"grouped_users": users_by_group,
"groups": SECURITY_GROUPS_TRA[1:],
"inactive_user_count": sum(user["active"] is False for user in all_users),
"singleton_groups": [
SECURITY_GROUP_TRA_HEAD_OF_INVESTIGATION,
SECURITY_GROUP_TRA_ADMINISTRATOR,
],
}
def post(self, request, case_id, *args, **kwargs):
user_ids = request.POST.getlist("user_id")
response = self._client.assign_case_team(case_id, user_ids)
return redirect(f"/case/{case_id}/")
class SubmissionsView(CaseBaseView):
groups_required = SECURITY_GROUPS_TRA
template_name = "cases/submissions.html"
show_global = False
sub_page = ""
def get_tab(self, role, party):
if not role.get("allow_cw_create"):
return role["key"]
return "sampled" if party.get("sampled") else "not_sampled"
def consolidate_submissions(
self, case, participants, submissions_by_party, counts, selected_tab
):
roles = []
single_role_return = None # for awaiting and rejected - only return that specific role
for role in self._client.get_case_roles():
role["participants"] = []
for party in participants.get(role["key"], {}).get("parties", []):
tab = self.get_tab(role, party)
submissions = submissions_by_party.get(party["id"], [])
submissions += submissions_by_party.get("", [])
if submissions:
counts[tab] = counts.get(tab, 0) + len(submissions)
if tab == selected_tab:
party["submissions"] = submissions
role["participants"].append(party)
if not party.get("gov_body"):
role["customer_parties"] = True
sort_key = (
"submissions/0/received_at"
if selected_tab == CASE_ROLE_AWAITING_APPROVAL
else "name"
)
role["participants"].sort(key=lambda pt: get(pt, sort_key) or "")
if role.get("key") == selected_tab:
single_role_return = role
if role.get("allow_cw_create"):
roles.append(role)
return [single_role_return] if single_role_return else roles
def get_name(self, participant):
return participant.get("name")
def flatten_participants(self, source):
participants = []
for role in source:
rec = source[role]
participants = participants + rec["parties"]
participants.sort(key=self.get_name)
return participants
def divide_submissions(self, submissions):
incoming = []
outgoing = []
draft = []
for submission in submissions:
if get(submission, "status/sent"):
outgoing.append(submission)
elif get(submission, "status/default") and get(submission, "type/direction") != 1:
draft.append(submission)
else:
if (
not get(submission, "status/draft")
or get(submission, "type/key") == "application"
): # customer draft should not be seen by investigators
incoming.append(submission)
return {
"incoming": sorted(incoming, key=lambda su: su.get("received_at") or "", reverse=True),
"outgoing": sorted(outgoing, key=lambda su: su.get("sent_at") or "", reverse=True),
"draft": sorted(draft, key=lambda su: su.get("created_at") or "", reverse=True),
}
def add_page_data(self):
tab = self.request.GET.get("tab", "sampled").lower()
all_submissions = self._client.get_submissions(self.case_id, show_global=True)
submissions_by_type = deep_index_items_by(all_submissions, "type/name")
# Get submissions that have just been created by customer
# or are still in draft after creation
draft_submissions = deep_index_items_by(all_submissions, "status/default").get("true") or []
# Remove any that are back with the customer following deficiency
draft_first_version_submissions = (
deep_index_items_by(draft_submissions, "version").get("1") or []
)
# Exclude these drafts from our list
non_draft_submissions = [
sub for sub in all_submissions if sub not in draft_first_version_submissions
]
# draft applications are included to allow a heads up view
# to the caseworker before it's submitted
if submissions_by_type.get("application", [{}])[0].get("status", {}).get("default") is True:
submissions_by_type["application"][0]["tra_editable"] = True
non_draft_submissions += submissions_by_type["application"]
submissions_by_party = deep_index_items_by(non_draft_submissions, "organisation/id")
case_enums = self._client.get_all_case_enums()
invites = self._client.get_case_invite_submissions(self.case_id)
participants = self._client.get_case_participants(self.case_id, fields=org_fields)
flat_participants = self.flatten_participants(participants)
counts = {}
if self.sub_page:
self.template_name = f"cases/submissions_{self.sub_page}.html"
tab = self.request.GET.get("tab", "incoming").lower()
elif self._client.get_system_boolean("PRE_NEW_SUBMISSION_PAGE"):
self.template_name = "cases/submissions_new.html"
context = {
"raw_participants": participants,
"submissions": submissions_by_type,
"participants": flat_participants,
"counts": counts,
"all_roles": self.consolidate_submissions(
self.case,
participants=participants,
submissions_by_party=submissions_by_party,
counts=counts,
selected_tab=tab,
),
"submission_types": case_enums["case_worker_allowed_submission_types"],
"invites": invites,
"tab": tab,
"submission_groups": self.divide_submissions(all_submissions),
"all_submissions": all_submissions,
}
# TODO: Temp handling of application vs ex_officio ones
if not submissions_by_type.get("application") and submissions_by_type.get(
"ex officio application"
):
context["submissions"]["application"] = submissions_by_type["ex officio application"]
return context
class SubmissionView(CaseBaseView):
"""
View and modify submissions
"""
groups_required = SECURITY_GROUPS_TRA
template_name = "cases/submission.html"
extra_case_fields = json.dumps(
{
"Case": {
"applicant": 0,
"product": 0,
"sources": 0,
}
}
)
def add_page_data_old(self):
alert = self.request.GET.get("alert") # indicates the submission has just been created
documents = []
submission = {}
submission_id = self.kwargs.get("submission_id")
third_party_invite = False
if submission_id:
submission = self._client.get_submission(self.case_id, submission_id)
submission_type = submission["type"]
third_party_invite = submission_type["name"] == "Invite 3rd party"
self.organisation_id = submission["organisation"]["id"]
created_by_id = get(submission, "created_by/id")
if created_by_id:
full_user = self._client.get_user(created_by_id)
submission["created_by"]["organisation"] = get(full_user, "organisations/0")
submission_context = {}
if SUBMISSION_TYPE_HELPERS.get(submission_type["key"]):
submission_context = SUBMISSION_TYPE_HELPERS[submission_type["key"]](
submission, self.request.user
).get_context()
self.template_name = "cases/submission.html"
case_extras = self._client.get_case(self.case_id, fields=self.extra_case_fields)
context = {
"submission": submission,
"template_name": f"{submission_type['key']}",
"documents": self.get_documents(submission=submission, all_versions=True),
"alert": alert,
"case": case_extras,
"third_party_invite": third_party_invite,
**submission_context,
}
if (
not submission
or not submission.get("status")
or submission.get("status", {}).get("default")
):
context["mode"] = "form"
else:
context["mode"] = "view"
if self.organisation_id:
self.organisation = self._client.get_organisation(self.organisation_id)
context["organisation"] = self.organisation
context["organisation_id"] = str(self.organisation["id"])
return context
def get_all_participants(self, case_participants):
all_parties = []
roles = {}
for type_name, role_parties in case_participants.items():
parties = role_parties.get("parties")
if parties:
all_parties.extend(parties)
role = parties[0].get("role")
roles[role.get("key")] = role
return deep_index_items_by(all_parties, "sampled"), roles
def add_page_data(self):
case_enums = self._client.get_all_case_enums()
submission = {}
participants = self._client.get_case_participants(self.case_id, fields=org_fields)
parties, roles = self.get_all_participants(participants)
alert = self.request.GET.get("alert") # indicates the submission has just been created
virus = self.request.GET.get("virus")
upload_error = self.request.GET.get("upload_error")
return_data = {
"virus": virus,
"upload_error": upload_error,
}
submission_id = self.kwargs.get("submission_id")
if submission_id:
submission = self._client.get_submission(self.case_id, submission_id)
json_data = from_json(submission.get("deficiency_notice_params"))
_default = submission.get("status", {}).get("default")
if not _default or (
_default and submission["type"]["id"] == SUBMISSION_TYPE_APPLICATION
):
page_data = self.add_page_data_old()
return_data.update(page_data)
return return_data
self.organisation_id = submission["organisation"]["id"]
return_data.update(
{
"roles": roles,
"submission": submission,
"status": (submission.get("status") or {}).get("id"),
"alert": alert,
"documents": self.get_documents(submission=submission),
"role": submission.get("organisation_case_role") or {"name": "Public file"},
"participants": participants,
"all_participants": parties,
"json_data": json_data,
"selected_submission_type": submission.get("type", {}).get("key")
or "questionnaire",
}
)
else:
role = self.request.GET.get("for")
sampled = self.request.GET.get("sampled") == "sampled"
full_role = (
self._client.get_case_role(role)
if (role and role != "public")
else {"name": "Public file"}
)
case_enums = self._client.get_all_case_enums(direction=DIRECTION_TRA_TO_PUBLIC)
# Get all draft submissions of this type
all_submissions = self._client.get_submissions(self.case_id, show_global=True)
draft_submissions = (
deep_index_items_by(all_submissions, "status/default").get("true") or []
)
# draft_submissions_this_role = deep_index_items_by(draft_submissions,
# 'organisation_case_role/key').get('' if role == 'public' else role)
draft_submissions_this_role = deep_index_items_by(
draft_submissions, "organisation_id"
).get("")
return_data.update(
{
"submission": submission,
"submission_type_id": self.kwargs.get("submission_type_id")
or self.request.GET.get("submission_type_id"),
"submission_statuses": case_enums["submission_statuses"],
"statuses_by_type": case_enums["statuses_by_type"],
"selected_submission_type": self.request.GET.get("submission_type")
or "questionnaire",
"organisation_id": self.kwargs.get("organisation_id"),
"draft_submissions": draft_submissions_this_role,
"role": full_role,
}
)
if role == "public":
return_data.update(
{
"submission_types": case_enums["public_submission_types"],
"public": True,
"organisation_id": self.kwargs.get("organisation_id"),
}
)
else:
role_participants = participants.get(role, {}).get("parties", [])
filtered = list(
filter(
lambda party: party
if party.get("sampled") == sampled and not party.get("gov_body")
else None,
role_participants,
)
)
return_data.update(
{
"submission_types": case_enums["case_worker_allowed_submission_types"],
"participants": participants,
"roles": roles,
}
)
self.organisation_id = self.organisation_id or self.request.GET.get("organisation_id")
if self.organisation_id:
self.organisation = self._client.get_organisation(self.organisation_id)
return_data["organisation"] = self.organisation
return_data["organisation_id"] = str(self.organisation["id"])
# add errors from the url
errors = self.request.GET.get("errors")
if errors:
try:
return_data["errors"] = json.loads(errors)
except Exception as ex:
pass
# Set up template to use
template_name = (
submission["type"]["key"]
if submission
else (role if role == "public" else "questionnaire")
)
return_data.update({"template_name": template_name, "mode": "form"})
return return_data
def post( # noqa: C901
self,
request,
case_id,
submission_id=None,
organisation_id=None,
*args,
**kwargs,
):
"""
Update an existing submission
"""
return_data = {"submission_id": str(submission_id)}
contact_id = request.POST.get("contact_id")
btn_value = request.POST.get("btn-value")
review = request.POST.get("review")
name = request.POST.get("name")
due_at = request.POST.get("due_at")
response_window_yn = request.POST.get("response_window_yn")
time_window = request.POST.get("time_window")
meta_raw = request.POST.getlist("meta")
meta = [json.loads(block) for block in meta_raw]
file_details = deep_index_items_by(meta, "name")
file_details_by_id = deep_index_items_by(meta, "file/id")
organisation_id = organisation_id or request.POST.get("organisation_id")
send_to = request.POST.get("send_to")
submission = self._client.get_submission(case_id, submission_id)
organisation_id = submission.get("organisation", {}).get("id")
status_id = request.POST.get("submission_status_id")
if submission_id and btn_value == "discard":
delete_submission_response = self._client.delete_submission(
case_id=case_id, submission_id=submission_id
)
return HttpResponse(
json.dumps({"redirect_url": f"/case/{case_id}/submissions/"}),
content_type="application/json",
)
# check if the update is for name or notify contact
if (
submission["name"] != name
or not submission["contact"]
or submission.get("contact", {}).get("id") != contact_id
):
if name is not None and not name:
return_data.update({"errors": '{"name":"You must enter a name"}'})
if due_at and not is_date(due_at):
return_data.update({"errors": '{"due_date":"Invalid date"}'})
if not return_data.get("errors"):
self._client.update_submission(
case_id=case_id,
submission_id=submission_id,
name=name,
contact_id=contact_id, # TODO:not used
due_at=due_at,
time_window=time_window,
description=request.POST.get("description"),
url=request.POST.get("url"),
)
# API `update_submission` returns an incomplete submission
# (no documents) so we re-fetch the submission here.
submission = self._client.get_submission(case_id, submission_id)
return_data.update({"submission": submission})
if submission.get("id"):
for _file in request.FILES.getlist("files"):
try:
_file.readline() # Important, will raise VirusFoundInFileException if infected
original_file_name = _file.original_name
details = file_details.get(original_file_name.lower())[0]
confidential = details.get("confidential")
document_type = details.get("submission_document_type")
document = self._client.upload_document(
case_id=str(case_id),
submission_id=submission_id,
organisation_id=str(organisation_id),
data={
"name": "Questionnaire",
"confidential": confidential,
"submission_document_type": document_type,
"document_name": original_file_name,
"file_name": _file.name,
"file_size": _file.file_size,
},
)
except (VirusFoundInFileException, APIException) as e:
redirect_url = f"/case/{case_id}/submission/{submission_id}/?"
if isinstance(e, VirusFoundInFileException):
redirect_url += "virus=true"
else:
redirect_url += f"upload_error={e}"
logger.warning(f"File upload aborted: {e}")
return HttpResponse(
json.dumps({"redirect_url": redirect_url}),
content_type="application/json",
)
if case_files := request.POST.getlist("case_files"):
for case_file_id in case_files:
details = (file_details_by_id.get(case_file_id) or [])[0]
document = self._client.attach_document(
case_id=str(case_id),
submission_id=submission_id,
organisation_id=str(organisation_id),
data={"submission_document_type": details.get("submission_document_type")},
document_id=case_file_id,
)
submission_group_name = get(submission, "type/key")
if btn_value in ["send", "publish", "withdraw"]:
if btn_value in ["publish", "withdraw"]:
result = self._client.set_submission_state(
case_id,
submission_id,
"sent"
if (btn_value == "send" or submission_group_name == "public")
else "",
{"publish": "issue", "withdraw": "un-issue"}[btn_value],
)
result = self._client.update_submission(
case_id=case_id, submission_id=submission_id
)
return_data.update(
{
"redirect_url": f"/case/{case_id}/submission/{submission['id']}/?alert={btn_value}" # noqa: E301, E501
}
)
if btn_value == "sufficient":
# Set the submission to sufficient
result = self._client.set_submission_state(case_id, submission_id, btn_value)
return_data.update({"alert": "Submission approved"})
submission_type = submission["type"]
type_helpers = SUBMISSION_TYPE_HELPERS.get(submission_type["key"])
if type_helpers:
return_data.update(
type_helpers(submission, self.request.user).on_approve() or {}
)
# Update submission document approvals
self.update_submission_status(request.POST, submission)
# set any deficiency-notice parameters
updated = False
deficiency_notice_params = from_json(submission.get("deficiency_notice_params"))
send_to = request.POST.getlist("send_to")
if send_to:
deficiency_notice_params["send_to"] = send_to
updated = True
regex = r"^deficiency_notice_params_"
for param_key in request.POST:
matches = re.split(regex, param_key)
if len(matches) > 1:
value = request.POST[param_key]
updated = updated or (deficiency_notice_params.get(matches[1]) != value)
if value == "__remove":
if get(deficiency_notice_params, matches[1]):
deficiency_notice_params.pop(matches[1])
else:
deficiency_notice_params[matches[1]] = value
if updated:
update_submission_response = self._client.update_submission(
case_id=case_id,
submission_id=submission_id,
deficiency_notice_params=to_json(deficiency_notice_params),
)
if btn_value == "save-exit":
return_data.update({"redirect_url": f"/case/{case_id}/submissions"})
if deficiency_notice_params:
return_data.update(
{"redirect_url": f"/case/{case_id}/submission/{submission_id}"}
)
return HttpResponse(json.dumps(return_data), content_type="application/json")
def update_submission_status(self, request_params, submission):
"""Update submission document statuses.
For each document in the submission review, examine response to
establish if it was marked sufficient/deficient. Call API to update
submission document status if it has changed.
:param (dict) request_params: request parameters
:param (dict) submission: submission
"""
submission_docs = {doc["id"]: doc for doc in submission.get("documents")}
for doc_id in request_params:
if doc_id in submission_docs:
current_status = submission_docs[doc_id]["sufficient"]
new_status = request_params[doc_id] == "yes"
if current_status != new_status:
self._client.set_submission_document_state(
case_id=submission["case"]["id"],
submission_id=submission.get("id"),
document_id=doc_id,
status="sufficient" if new_status else "deficient",
block_from_public_file=submission_docs.get("block_from_public_file"),
block_reason=submission_docs.get("block_reason"),
)
class SubmissionCreateView(SubmissionView):
groups_required = SECURITY_GROUPS_TRA
def post(self, request, case_id, *args, **kwargs):
btn_value = request.POST.get("btn-value")
send_to = request.POST.getlist("send_to")
organisation_id = request.POST.get("organisation_id")
submission_data = {
"submission_type": int(
request.POST.get("submission_type_id", SUBMISSION_TYPE_QUESTIONNAIRE)
),
"case_id": str(case_id),
"organisation_id": str(organisation_id) if organisation_id else None,
"contact_id": request.POST.getlist("contact_id"),
"public": request.POST.get("public"),
}
if send_to:
submission_data["deficiency_notice_params"] = to_json(
{"send_to": send_to, "case_role": request.POST.get("role_key")}
)
result = self._client.create_submission(**submission_data)
submission = result.get("submission", {}) if result else {}
return HttpResponse(
json.dumps(
{
"submission_id": submission.get("id"),
"redirect_url": f"/case/{case_id}/submission/{submission['id']}/",
}
),
content_type="application/json",
)
class SubmissionDocumentView(CaseBaseView):
groups_required = SECURITY_GROUPS_TRA
def post(self, request, case_id, submission_id, organisation_id=None, *args, **kwargs):
response = {}
document_list_json = request.POST.get("document_list")
if document_list_json:
document_list = json.loads(document_list_json)
for doc_id, doc_status in document_list.items():
logger.debug(f"update document state {doc_id}")
response = self._client.set_submission_document_state(
case_id=case_id,
submission_id=submission_id,
document_id=doc_id,
status=doc_status["status"],
block_from_public_file=doc_status["block_from_public_file"],
block_reason=doc_status["block_reason"],
)
return HttpResponse(json.dumps(response), content_type="application/json")
def delete(self, request, case_id, submission_id, document_id, *args, **kwargs):
response = self._client.detach_document(
case_id=case_id, submission_id=submission_id, document_id=document_id
)
return HttpResponse(json.dumps(response), content_type="application/json")
class SubmissionStatusView(CaseBaseView):
groups_required = SECURITY_GROUPS_TRA
def post(self, request, case_id, submission_id, *args, **kwargs):
stage_change_if_sufficient = request.POST.get("stage_change_if_sufficient")
stage_change_if_deficient = request.POST.get("stage_change_if_deficient")
submission = self._client.get_submission(case_id, submission_id)
status_id = request.POST.get("submission_status_id")
if submission.get("status", {}).get("id") != status_id:
status_response = self._client.set_submission_status(
case_id=case_id,
submission_id=submission_id,
status_id=status_id,
stage_change_if_sufficient=stage_change_if_sufficient,
stage_change_if_deficient=stage_change_if_deficient,
deficiency_documents=request.FILES.getlist("deficiency_document"),
issue=request.POST.get("issue"),
)
if status_response.get("submission"):
submission_id = status_response["submission"]["id"]
return redirect(f"/case/{case_id}/submission/{submission_id}/")
class SubmissionApprovalView(CaseBaseView):
groups_required = SECURITY_GROUPS_TRA
template_name = "cases/submission.html"
def add_page_data(self):
submission_id = self.kwargs.get("submission_id")
submission = self._client.get_submission(self.case_id, submission_id)
sub_documents = self._client.get_submission_documents(self.case_id, submission_id)
documents = sub_documents.get("documents", [])
submission.update(sub_documents)
case_enums = self._client.get_all_case_enums()
submission_type_id = submission["type"]["id"]
status_map = case_enums["submission_status_map"]
status_options = status_map.get(str(submission_type_id), {}).get("keys", [])
status_context = status_map.get(str(submission_type_id))
submission_documents = self.get_documents(submission=submission)
context = {
"template_name": submission["type"]["key"],
"mode": "approval",
"submission": submission,
"case_enums": case_enums,
"status_context": status_context,
"documents": submission_documents,
}
return context
class SubmissionDeficiencyView(CaseBaseView):
"""
Set the submission into a deficiency status and notify the party about it.
"""
groups_required = SECURITY_GROUPS_TRA
raise_exception = True
def get(self, request, case_id, submission_id, *args, **kwargs):
submission = self._client.get_submission(case_id, submission_id)
submission_type = submission["type"]
contact = submission_contact(submission)
contact_name = contact.get("name")
organisation_name = submission.get("organisation", {}).get("name") or (
contact.get("organisation") or {}
).get("name")
notification_template = self._client.get_notification_template(
"NOTIFY_SUBMISSION_DEFICIENCY"
)
template_name = f"cases/submissions/{submission_type['key']}/notify.html"
due_at = get_submission_deadline(submission, settings.FRIENDLY_DATE_FORMAT)
case_number = submission["case"]["reference"]
email = notify_contact_email(self._client, case_number)
footer = notify_footer(self._client, email)
values = {
"full_name": contact_name,
"case_name": submission["case"]["name"],
"case_number": case_number,
"company_name": organisation_name,
"deadline": due_at or "No deadline assigned",
"submission_type": submission.get("type", {}).get("name"),
"login_url": public_login_url(),
"footer": footer,
}
context = {
"form_action": f"/case/{case_id}/submission/{submission_id}/status/notify/",
"form_title": f"Deficiency Notice for {organisation_name}",
"cancel_redirect_url": f"/case/{case_id}/submission/{submission_id}/",
"editable_fields": { # leaving one as a future example
# 'full_name': {'title': 'Name'},
},
"notification_template": notification_template,
"submission": submission,
"case_id": str(case_id),
"contact": contact,
"values": values,
"parsed_template": parse_notify_template(notification_template["body"], values),
}
return render(request, template_name, context)
def post(self, request, case_id, submission_id, *args, **kwargs):
stage_change_if_sufficient = request.POST.get("stage_change_if_sufficient")
stage_change_if_deficient = request.POST.get("stage_change_if_deficient")
submission = self._client.get_submission(case_id, submission_id)
notify_keys = [
"full_name",
"case_name",
"case_number",
"company_name",
"deadline",
"submission_type",
"login_url",
]
notify_data = {key: request.POST.get(key) for key in notify_keys}
if request.POST.get("contact_id"):
notify_data["contact_id"] = request.POST["contact_id"]
case_enums = self._client.get_all_case_enums()
submission_type_id = submission["type"]["id"]
status_map = case_enums["submission_status_map"]
status_context = status_map.get(str(submission_type_id))
status_id = status_context.get("NO")
error = None
if status_id:
if submission.get("status", {}).get("id") != status_id:
status_response = self._client.set_submission_status(
case_id=case_id,
submission_id=submission_id,
status_id=status_id,
stage_change_if_sufficient=stage_change_if_sufficient,
stage_change_if_deficient=stage_change_if_deficient,
)
self._client.submission_notify(
case_id=case_id,
organisation_id=submission["organisation"]["id"],
submission_id=submission["id"],
values=notify_data,
notice_type=SUBMISSION_NOTICE_TYPE_DEFICIENCY,
)
# reset the submission id to redirect to the new clone if available
if status_response.get("submission"):
submission_id = status_response["submission"]["id"]
return HttpResponse(
json.dumps(
{
"redirect_url": f"/case/{case_id}/submission/{submission_id}/",
}
),
content_type="application/json",
)
# If there's no deficiency state for this submission type, return an error
return HttpResponse(
json.dumps(
{
"error": "No deficiency status for this submission type",
}
),
content_type="application/json",
)
class SubmissionVerifyBaseView(CaseBaseView):
groups_required = SECURITY_GROUPS_TRA
def get_submission_id(self, case_id=None, organisation_id=None):
submission_id = self.kwargs.get("submission_id")
if not submission_id:
# If this is called from the party page - there is no submission id
# so find from the org/case
submissions = self._client.get_submissions_public(
organisation_id=organisation_id,
case_id=case_id,
fields=json.dumps({"id": 0, "type": {"key": 0}}),
)
for submission in submissions:
if get(submission, "type/key") in ["interest", "application"]:
submission_id = submission.get("id")
break # we only want one reg-of-interest submission
return submission_id
def update_submission_json(self, case_id, submission, params):
regex = r"^deficiency_notice_params_"
deficiency_notice_params = submission.get("deficiency_notice_params") or {}
updated = False
response = None
for param_key in params:
matches = re.split(regex, param_key)
if len(matches) > 1:
value = params[param_key]
updated = updated or (deficiency_notice_params.get(matches[1]) != value)
deficiency_notice_params[matches[1]] = value
if updated:
response = self._client.update_submission(
case_id=case_id,
submission_id=get(submission, "id"),
deficiency_notice_params=to_json(deficiency_notice_params),
)
return response
class SubmissionVerifyViewTasks(SubmissionVerifyBaseView):
"""
Used to verify user and orgs admission to a case
"""
template_name = "cases/verify/submission_verify_tasks.html"
submission_fields = json.dumps(
{
"Submission": {
"id": 0,
"deficiency_notice_params": 0,
"organisation": {
"id": 0,
"name": 0,
},
"contact": {
"name": 0,
"email": 0,
"user": {
"name": 0,
"email": 0,
"id": 0,
"organisation": {
"organisation": {
"id": 0,
"name": 0,
}
},
},
"organisation": {
"id": 0,
"name": 0,
},
},
"case": 0,
"type": 0,
"created_by": 0,
"organisation_case_role_outer": 0,
}
}
)
def get(self, request, case_id, organisation_id, **kwargs):
submission_id = self.get_submission_id(case_id=case_id, organisation_id=organisation_id)
if not submission_id:
return HttpResponse(
json.dumps(
{
"error": "You cannot verify this organisation "
"as they have not yet registered interest in this case.",
}
),
content_type="application/json",
)
submission = self._client.get_submission(
self.case_id, submission_id, fields=self.submission_fields
)
json_data = submission.get("deficiency_notice_params") or {}
organisation = submission.get("organisation")
caserole = self._client.get_organisation_case_role(
case_id=case_id, organisation_id=get(submission, "organisation/id")
)
org_matches = self._client.get_organisation_matches(organisation_id, with_details="none")
return render(
request,
self.template_name,
{
"submission": submission,
"organisation": organisation,
"caserole": caserole,
"org_matches": org_matches,
"page_data": {
"submission": submission,
"organisation": organisation,
},
},
)
class SubmisisonVerifyEditLoaView(SubmissionVerifyBaseView):
def get(self, request, case_id, organisation_id):
submission_id = self.get_submission_id(case_id=case_id, organisation_id=organisation_id)
submission = self._client.get_submission(case_id, submission_id)
organisation = self._client.get_organisation(
case_id=case_id, organisation_id=organisation_id
)
documents = self.get_documents(submission)
caserole = self._client.get_organisation_case_role(
case_id=self.case_id, organisation_id=organisation_id
)
org_contacts = self._client.get_organisation_contacts(
organisation_id, case_id, exclude_indirect=True
)
return render(
request,
"cases/verify/loa.html",
{
"auth_contacts": org_contacts,
"organisation": organisation,
"documents": documents,
"LOA": caserole.get("auth_contact"),
"submission": submission,
},
)
def post(self, request, case_id, organisation_id, *args, **kwargs):
submission_id = self.get_submission_id(case_id=case_id, organisation_id=organisation_id)
submission = self._client.get_submission(case_id, submission_id)
self.update_submission_json(case_id, submission, request.POST)
result = self._client.set_organisation_case_role_loa(
case_id,
organisation_id,
pluck(
request.POST,
["LOA_contact_id", "name", "email", "address", "org_name", "phone"],
),
)
return HttpResponse(json.dumps(result))
class SubmisisonVerifyOrganisation(SubmissionVerifyBaseView):
enable_merge = False
def get(self, request, case_id, organisation_id):
test_org_id = request.GET.get("org_id") or organisation_id
submission_id = self.get_submission_id(case_id=case_id, organisation_id=organisation_id)
submission = self._client.get_submission(case_id, submission_id)
organisation = self._client.get_organisation(case_id=case_id, organisation_id=test_org_id)
if self.enable_merge:
org_matches = self._client.get_organisation_matches(test_org_id, with_details=True)
else:
org_matches = self._client.get_organisation_matches(test_org_id, with_details=False)
org_matches.sort(
key=lambda m: 1 if m.get("id") == test_org_id else 0
) # put the actual match at the end
matches = decorate_orgs(org_matches, test_org_id, exclude_case_id=case_id)
for match in matches:
if str(match.get("id")) == str(organisation.get("id")):
organisation.update(match)
return render(
request,
"cases/verify/merge_org.html" if self.enable_merge else "cases/verify/verify_org.html",
{
"case_id": self.case_id,
"organisation": organisation,
"match_list": matches,
"representing": test_org_id != organisation_id,
"json_data": submission.get("deficiency_notice_params"),
},
)
def post(self, request, case_id, organisation_id, *args, **kwargs):
test_org_id = request.POST.get("org_id") or organisation_id
submission_id = self.get_submission_id(case_id=case_id, organisation_id=organisation_id)
submission = self._client.get_submission(case_id, submission_id)
verify = request.POST.get("deficiency_notice_params_org_verify")
if verify == "verified":
self._client.verify_caserole(
case_id=case_id, organisation_id=get(submission, "organisation/id")
)
elif verify == "rejected":
result = self._client.reject_organisation(case_id, organisation_id)
result = self.update_submission_json(case_id, submission, request.POST)
return HttpResponse(json.dumps({"result": True}))
class SubmissionVerifyAccept(SubmissionVerifyBaseView):
def get(self, request, case_id, organisation_id):
submission_id = self.get_submission_id(case_id=case_id, organisation_id=organisation_id)
submission = self._client.get_submission(case_id, submission_id)
organisation = self._client.get_organisation(
case_id=case_id, organisation_id=organisation_id
)
caserole = self._client.get_organisation_case_role(
case_id=self.case_id, organisation_id=organisation_id
)
roles = self._client.get_case_roles(
exclude=[
CASE_ROLE_APPLICANT,
CASE_ROLE_AWAITING_APPROVAL,
CASE_ROLE_REJECTED,
CASE_ROLE_PREPARING,
]
)
return render(
request,
"cases/verify/accept.html",
{
"submission": submission,
"organisation": organisation,
"roles": roles,
"caserole": caserole,
"role_name": get(caserole, "role/name"),
},
)
def post(self, request, case_id, organisation_id, *args, **kwargs):
role_key = request.POST.get("role_key")
result = {}
result = self._client.set_organisation_case_role(
case_id, organisation_id, role_key, pluck(request.POST, ["approve"])
)
return HttpResponse(json.dumps(result))
class SubmissionVerifyNotify(SubmissionVerifyBaseView):
def get(self, request, case_id, organisation_id):
caserole = self._client.get_organisation_case_role(
case_id=self.case_id, organisation_id=organisation_id
)
role_name = get(caserole, "role/name")
action = (
"reject" if get(caserole, "role/key") == "rejected" else "accept"
) # Todo: get this from the right place
submission_id = self.get_submission_id(case_id=case_id, organisation_id=organisation_id)
submission = self._client.get_submission(case_id, submission_id)
case = self._client.get_case(case_id)
contact = submission_contact(submission)
organisation = self._client.get_organisation(
case_id=case_id, organisation_id=organisation_id
)
notify_key = (
"NOTIFY_INTERESTED_PARTY_REQUEST_PERMITTED"
if action == "accept"
else "NOTIFY_INTERESTED_PARTY_REQUEST_DENIED"
)
try:
notification_template = self._client.get_notification_template(notify_key)
values = self._client.create_notify_context(
{
"full_name": contact.get("name"),
"case_name": case.get("name"),
"case_number": case.get("reference"),
"company_name": organisation["name"],
"login_url": public_login_url(),
"role": role_name,
}
)
parsed_template = parse_notify_template(notification_template["body"], values)
except Exception as ex:
parsed_template = ""
# contacts for the notification contact selector
contacts = organisation.get("contacts", [])
user = self._client.get_user(get(submission, "created_by/id"))
contacts.append(user.get("contact"))
return render(
request,
"cases/verify/notify.html",
{
"parsed_template": parsed_template,
},
)
def post(self, request, case_id, organisation_id, *args, **kwargs):
submission_id = self.get_submission_id(case_id=case_id, organisation_id=organisation_id)
self._client.approve_submission(submission_id=submission_id)
return HttpResponse(json.dumps({"result": True}))
class SubmissionNotifyView(CaseBaseView):
groups_required = SECURITY_GROUPS_TRA
raise_exception = True
def get(self, request, case_id, submission_id, *args, **kwargs):
case = self._client.get_case(case_id)
submission = self._client.get_submission(case_id, submission_id)
json_data = from_json(submission.get("deficiency_notice_params"))
contact = None
contact_name = None
send_to = json_data.get("send_to")
if not send_to:
contact = submission_contact(submission)
contact_name = contact and contact.get("name")
submission_type = submission["type"]
notify_sys_param_name = submission_type.get("notify_template") or "NOTIFY_QUESTIONNAIRE"
notification_template = self._client.get_notification_template(notify_sys_param_name)
template_name = f"cases/submissions/{submission_type['key']}/notify.html"
due_at = get_submission_deadline(submission, settings.FRIENDLY_DATE_FORMAT)
case_number = case["reference"]
email = notify_contact_email(self._client, case_number)
footer = notify_footer(self._client, email)
values = {
"full_name": contact_name,
"case_number": case_number,
"case_name": case["name"],
"investigation_type": case["type"]["name"],
"country": case["sources"][0]["country"] if case["sources"] else "N/A",
"company_name": submission["organisation"].get("name"),
"deadline": due_at or "No deadline assigned",
"login_url": public_login_url(),
"description": submission.get("description"),
"submission_request_name": submission.get("name"),
"notice_type": submission.get("type", {}).get("name"),
"notice_url": submission["url"],
"notice_of_initiation_url": submission["url"],
"footer": footer,
}
template_list = []
if send_to:
for case_role, participant_list in (
self._client.get_case_participants(case_id) or {}
).items():
for participant in participant_list.get("parties"):
if participant.get("id") in send_to:
contact = participant.get("primary_contact")
if contact:
local_values = {
"full_name": contact.get("name"),
"email": contact.get("email"),
"company_name": participant.get("name"),
}
values.update(local_values)
template_list.append(
{
"values": local_values,
"preview": parse_notify_template(
notification_template["body"], values
),
}
)
else:
template_list[contact.get("email")] = parse_notify_template(
notification_template["body"], values
)
context = {
"form_action": f"/case/{case_id}/submission/{submission_id}/notify/",
"form_title": f"Invite {contact_name}",
"cancel_redirect_url": f"/case/{case_id}/submission/{submission_id}/",
"editable_fields": { # leaving one as an example
# 'full_name': {'title': 'Full Name', 'disabled': True},
},
"notification_template": notification_template,
"templates": template_list,
"submission": submission,
"case_id": str(case_id),
"contact": contact,
"values": values,
}
return render(request, template_name, context)
def post(self, request, case_id, submission_id, *args, **kwargs):
submission = self._client.get_submission(case_id, submission_id)
notify_keys = ["full_name", "product", "submission_request_name", "description"]
notify_data = {key: request.POST.get(key) for key in notify_keys if key in request.POST}
due_at = get_submission_deadline(submission, settings.FRIENDLY_DATE_FORMAT)
notify_data["deadline"] = due_at or "No deadline assigned"
if request.POST.get("multiple"):
return self.post_multiple(request, case_id, submission, context=notify_data)
self._client.submission_notify(
case_id=case_id,
organisation_id=submission["organisation"]["id"],
submission_id=submission["id"],
values=notify_data,
notice_type=SUBMISSION_NOTICE_TYPE_INVITE,
)
return HttpResponse(
json.dumps(
{
"redirect_url": f"/case/{case_id}/submission/{submission_id}/",
"error": None,
}
),
content_type="application/json",
)
def post_multiple(self, request, case_id, submission, context=None):
"""
Called to handle a notify post to multiple recipents.
We must clone the submission for each target and send the notification
"""
case = self._client.get_case(case_id)
json_data = from_json(submission.get("deficiency_notice_params"))
send_to = json_data.get("send_to")
# We need to know which is the last party in the list
# so we can modify the existing sub rather than clone it.
party_counter = len(send_to)
for case_role, participant_list in (
self._client.get_case_participants(case_id) or {}
).items():
for participant in participant_list.get("parties"):
if participant.get("id") in send_to:
contact = participant.get("primary_contact")
party_counter -= 1
if contact: # don't try to send if there is no contact
data = {
"case_id": case_id,
"submission_id": submission["id"],
"organisation_id": participant.get("id"),
"contact_id": contact.get("id"),
}
if party_counter:
cloned_submission = self._client.clone_submission(**data)
else:
cloned_submission = self._client.update_submission(**data).get(
"submission"
)
context["full_name"] = contact.get("full_name")
self._client.submission_notify(
case_id=case_id,
organisation_id=participant.get("id"),
submission_id=cloned_submission["id"],
values=context or {},
notice_type=SUBMISSION_NOTICE_TYPE_INVITE,
)
return HttpResponse(
json.dumps(
{
"alert": f'Sent {len(send_to)} request{"" if len(send_to) < 2 else "s"}',
"redirect_url": f'/case/{case_id}/submission/{submission.get("id")}/'
if len(send_to) < 2
else f"/case/{case_id}/submissions/",
"error": None,
}
),
content_type="application/json",
)
class OrganisationDetailsView(LoginRequiredMixin, View, TradeRemediesAPIClientMixin):
def get(self, request, case_id, organisation_id, *args, **kwargs):
client = self.client(request.user)
item = request.GET.get("item")
template = request.GET.get("template")
result = {}
case_submissions = client.get_submissions(case_id)
idx_submissions = deep_index_items_by(case_submissions, "organisation/id")
org_id = str(organisation_id)
third_party_contacts = []
if item == "contacts":
contacts = client.get_organisation_contacts(org_id, case_id)
for contact in contacts:
case = get(contact, "cases/" + str(case_id)) or {}
contact["primary"] = case.get("primary")
all_case_invites = client.get_contact_case_invitations(case_id)
if org_id in idx_submissions:
org_submission_idx = deep_index_items_by(idx_submissions[org_id], "id")
third_party_contacts = self.get_third_party_contacts(
org_id, org_submission_idx, all_case_invites
)
# `contacts` may also contain on-boarded third-party contacts that
# have a user, so we need to prune these out.
third_party_contact_ids = set([i["id"] for i in third_party_contacts])
contacts = [
i
for i in itertools.filterfalse(
lambda x: x["id"] in third_party_contact_ids, contacts
)
]
result = {
"contacts": contacts,
"pre_release_invitations": client.get_system_boolean("PRE_RELEASE_INVITATIONS"),
"invites": deep_index_items_by(all_case_invites, "contact/id"),
"third_party_contacts": third_party_contacts,
"case_role_id": request.GET.get("caserole"),
}
elif item == "submissions":
result["submissions"] = idx_submissions.get(org_id, [])
elif item == "details":
result["party"] = client.get_organisation(organisation_id=organisation_id)
if template:
deep_update(
result,
{
"case_id": case_id,
"case": {"id": case_id},
"organisation": {"id": org_id},
},
)
return render(request, template, result)
return HttpResponse(json.dumps({"result": result}), content_type="application/json")
@staticmethod
def get_third_party_contacts(organisation_id, submissions, invites):
"""Get third party contacts.
Given an organisation, its submissions and all invitations for a case,
build a list of third party invite contacts. We include the invite submissions
yet to be approved but flag the contact with `submission_sufficient`
:param (str) organisation_id: Organisation ID.
:param (dict) submissions: The organisation's submissions keyed on id.
:param (list) invites: All invites for a case.
:returns (list): Contacts arising from 3rd party invite submissions.
"""
third_party_contacts = []
for invite in invites:
if invite["submission"]:
submission_id = invite["submission"]["id"]
full_submission = submissions.get(submission_id)
if not full_submission:
# Submission not at this org
continue
if full_submission[0]["type"]["id"] != SUBMISSION_TYPE_THIRD_PARTY:
# Not a third party submission
continue
inviting_organisation = full_submission[0]["organisation"]["id"]
if inviting_organisation == organisation_id:
submission_sufficient = full_submission[0]["status"]["sufficient"]
invite["contact"]["is_third_party"] = True
invite["contact"]["submission_id"] = submission_id
invite["contact"]["submission_sufficient"] = submission_sufficient
invite["contact"]["invited"] = invite["email_sent"]
third_party_contacts.append(invite["contact"])
return third_party_contacts
class CaseOrganisationView(CaseBaseView):
groups_required = SECURITY_GROUPS_TRA
template_name = "organisations/organisation_in_case.html"
def add_page_data(self):
organisation = self._client.get_organisation(organisation_id=self.organisation_id)
caserole = None
case_submissions = self._client.get_submissions_public(self.case_id, self.organisation_id)
idx_submissions = deep_index_items_by(case_submissions, "organisation/id")
submissions = idx_submissions.get(str(self.organisation_id), [])
roi_app_submission = next(
filter(lambda x: get(x, "type/key") in ["interest", "application"], submissions),
None,
)
cases = self._client.organisation_cases(self.organisation_id)
user_cases = self._client.organisation_user_cases(self.organisation_id)
cases_idx = deep_index_items_by_exists(cases, "archived_at")
for case in cases:
if get(case, "id") == str(self.case_id):
caserole = case
invites = self._client.get_contact_case_invitations(
self.case_id,
)
return {
"case": self.case,
"invites": invites,
"party": organisation,
"organisation": organisation,
"cases_idx": cases_idx,
"submissions": submissions,
"user_cases": user_cases,
"roi_app_submission": roi_app_submission,
"caserole": caserole,
}
class OrganisationMatchView(CaseBaseView):
groups_required = SECURITY_GROUPS_TRA
template_name = "cases/organisation_dedupe.html"
def add_page_data(self):
organisation = self._client.get_organisation(
organisation_id=self.organisation_id, case_id=self.case_id
)
org_matches = self._client.get_organisation_matches(self.organisation_id)
org_matches = decorate_orgs(org_matches, self.organisation_id)
return {
"case": self.case,
"organisation": organisation,
"org_matches": org_matches,
}
class FilesView(CaseBaseView):
"""
View all case documents
"""
groups_required = SECURITY_GROUPS_TRA
template_name = "cases/files.html"
def add_page_data(self):
tab = self.request.GET.get("tab", "respondent")
sort = self.request.GET.get("sort")
direction = self.request.GET.get("dir", "asc")
submission_id = self.request.GET.get("submission_id")
collapse_identical = self.request.GET.get("collapse_identical", "false") in (
"true",
"1",
"Y",
)
tabs = {
"tabList": [
{"label": "Respondent", "value": "respondent"},
{"label": "Investigator", "value": "investigator"},
],
"value": tab,
}
case_enums = self._client.get_all_case_enums(direction=DIRECTION_TRA_TO_PUBLIC)
case_files = self._client.get_case_documents(
case_id=self.case_id,
source=tab,
submission_id=submission_id,
order_by=sort,
order_dir=direction,
)
submission = None
if submission_id:
submission = self._client.get_submission(self.case_id, submission_id)
return {
"tabs": tabs,
"tab": tab,
"case_enums": case_enums,
"file_list": case_files,
"sort": sort,
"dir": direction,
"collapse_identical": collapse_identical,
"submission": submission,
"pre_document_search": self._client.get_system_boolean("PRE_DOCUMENT_SEARCH"),
}
def post(self, request, case_id, *args, **kwargs):
action = request.POST.get("action")
name = request.POST.get("name")
confirm = request.POST.get("confirm") == "true"
tab = request.POST.get("tab", "respondent")
document_ids = request.POST.getlist("document_id")
if document_ids:
if action == "issue" and confirm:
submission_type_id = request.POST.get("submission_type_id")
response = self._client.issue_documents_to_case(
case_id=case_id,
name=name,
document_ids=document_ids,
submission_type_id=submission_type_id,
)
elif action == "confidential":
response = self._client.toggle_documents_confidentiality(
case_id=case_id, document_ids=document_ids
)
return redirect(f"/case/{case_id}/files/?tab={tab}")
class FileBrowseView(View, TradeRemediesAPIClientMixin):
def get(self, request, case_id, *args, **kwargs):
_client = self.client(request.user)
case_files = _client.get_case_documents(case_id=case_id, source="investigator")
# Add application bundle documents
case_files.extend(_client.get_system_documents())
return HttpResponse(json.dumps(case_files), content_type="application/json")
class WorkflowEditor(CaseBaseView):
groups_required = SECURITY_GROUPS_TRA
permission_required = ("workflow_editor",)
template_name = "cases/workflow_editor.html"
def add_page_data(self):
case_workflow = self._client.get_case_workflow(self.case_id)
return {
"workflow": case_workflow.get("workflow"),
"state": case_workflow.get("state"),
}
def post(self, request, case_id, *args, **kwargs):
workflow = request.POST.get("workflow")
self._client.save_case_workflow(case_id, workflow)
return HttpResponse(json.dumps({"saved": 1}), content_type="application/json")
class ActionsView(CaseBaseView):
groups_required = SECURITY_GROUPS_TRA
template_name = "cases/actions.html"
def add_page_data(self):
permissions = {}
for permission_key in self.request.user.permissions:
permissions[permission_key] = 1
case_workflow = self._client.get_case_workflow(self.case_id)
return {
"workflow": case_workflow.get("workflow"),
"state": case_workflow.get("state"),
"permissions": permissions,
}
class StateView(CaseBaseView):
groups_required = SECURITY_GROUPS_TRA
template_name = "cases/action.html"
def post(self, request, case_id, state_key=None, *args, **kwargs):
value = request.POST.get(state_key)
state_map = self._client.set_case_workflow_state(case_id, [state_key], {state_key: value})
return HttpResponse(
json.dumps({"workflow_state": state_map}), content_type="application/json"
)
class ActionView(CaseBaseView):
groups_required = SECURITY_GROUPS_TRA
template_name = "cases/action.html"
def get_state_from_children(self, item):
any_mode = item.get("required") # this is a bodge and the logic is reverse
state = None
completed = False if any_mode else True
for child in item.get("children", []):
value = self.get_value(child.get("key"))
if value:
state = state or "in-progress"
if any_mode:
if value == "complete":
completed = True
else:
if value != "complete":
completed = False
return "complete" if state and completed else state
state_map = {}
def get_value(self, key):
return (self.state_map.get(key) or [""])[0]
def set_value(self, key, value):
arr = self.state_map.get(key) or [""]
arr[0] = value
self.state_map[key] = arr
def post(self, request, case_id, action_id=None, *args, **kwargs): # noqa: C901
values = {}
node_keys = []
action_key = request.POST.get("action-key")
btn_action = request.POST.get("btn_action")
complete = True
error = False
state = ""
wf = self._client.get_case_workflow(case_id)
workflow = wf.get("workflow")
self.state_map = wf.get("state")
index = key_by(workflow["root"], "key", "children")
action = index.get(action_key.lower(), {})
for task in action.get("children", []):
response_type = task.get("response_type", {}).get("name", "")
if response_type.lower() not in (
"notesection",
"timer",
"label",
): # notes don't count as in-progress
task_key = task.get("key")
old_val = self.get_value(task_key)
new_val = request.POST.get(task_key)
if old_val != new_val:
values[task_key] = new_val
node_keys.append(task_key)
if not new_val:
if task.get("required"):
complete = False
else:
if new_val != "na":
state = "in-progress"
if complete:
state = "complete"
if (self.get_value(action_key) or "") != state:
values[action_key] = state
node_keys.append(action_key)
self.set_value(action_key, state)
# ripple the state down the tree
loc_action = action
while loc_action.get("parent_key"):
loc_action = index.get(loc_action.get("parent_key"))
loc_key = loc_action.get("key")
loc_state = self.get_state_from_children(loc_action)
if (self.get_value(loc_key) or "") != loc_state:
values[loc_key] = loc_state
node_keys.append(loc_key)
self.set_value(loc_key, loc_state)
if any(values):
self.state_map = self._client.set_case_workflow_state(case_id, node_keys, values)
if error:
action_id = action.get("id")
return redirect(f"/case/{case_id}/action/{action_id}")
else:
return HttpResponse(
json.dumps({"workflow_state": self.state_map}),
content_type="application/json",
)
class NavSectionView(CaseBaseView):
groups_required = SECURITY_GROUPS_TRA
template_name = "cases/nav_section.html"
def post(self, request, case_id, *args, **kwargs):
content_id = kwargs.get("nav_section_id")
response = self._client.set_case_content(
case_id, content_id=content_id, content=request.POST
)
content_id = response.get("id")
return redirect(f"/case/{case_id}/section/{content_id}")
def add_page_data(self):
return {}
class AuditView(CaseBaseView):
groups_required = SECURITY_GROUPS_TRA
template_name = "cases/audit.html"
def add_page_data(self):
milestone = self.request.GET.get("milestone", "true") == "true"
limit = int(self.request.GET.get("limit", self.limit))
audit_data = self._client.get_audit(
case_id=self.case_id, start=self.start, limit=limit, milestone=milestone
)
url = reverse("case_audit", kwargs={"case_id": self.case_id})
prev_url = next_url = None
prev_page = max(0, self.start - limit)
milestone_flag = f"milestone={milestone}".lower()
if len(audit_data) >= limit:
next_page = max(0, self.start + limit)
next_url = f"{url}?{milestone_flag}&start={next_page}"
if next_page > limit:
prev_url = f"{url}?{milestone_flag}&start={prev_page}"
self.start = next_page
else:
self.start = prev_page + len(audit_data)
if prev_page:
prev_url = f"{url}?{milestone_flag}&start={prev_page}"
return {
"milestone": milestone,
"events": audit_data,
"next_url": next_url,
"prev_url": prev_url,
}
class CaseAuditExport(LoginRequiredMixin, View, TradeRemediesAPIClientMixin):
groups_required = SECURITY_GROUPS_TRA
def get(self, request, case_id, *args, **kwargs):
file = self.client(request.user).get_audit_export(case_id)
response = HttpResponse(file, content_type="application/vnd.ms-excel")
response["Content-Disposition"] = "attachment; filename=trade_remedies_export.xlsx"
return response
class NoteView(LoginRequiredMixin, View, TradeRemediesAPIClientMixin):
groups_required = SECURITY_GROUPS_TRA
def get(
self,
request,
case_id,
content_type=None,
model_id=None,
model_key=None,
*args,
**kwargs,
):
notes = self.client(request.user).get_notes(
case_id, content_type, model_id, model_key=model_key
)
return HttpResponse(json.dumps(notes), content_type="application/json")
def post(self, request, case_id, note_id=None, *args, **kwargs): # noqa: C901
entity_id = request.POST.get("model_id")
model_key = request.POST.get("model_key")
content_type = request.POST.get("content_type")
client = self.client(request.user)
content = request.POST.get("content")
if note_id is None:
result = client.create_note(
case_id=case_id,
content_type=content_type,
model_id=entity_id,
model_key=model_key,
note_text=content,
)
note_id = result.get("id")
else:
delete_list = request.POST.getlist("delete_list")
if delete_list:
for document_id in delete_list:
deleted = client.delete_note_document(case_id, note_id, document_id)
conf_list = request.POST.getlist("set_confidential")
if conf_list:
for document_id in conf_list:
result = client.update_note_document(
case_id, note_id, document_id, "confidential"
)
nonconf_list = request.POST.getlist("set_non-confidential")
if nonconf_list:
for document_id in nonconf_list:
result = client.update_note_document(
case_id, note_id, document_id, "non-confidential"
)
result = client.update_note(case_id, note_id, content)
file_meta = request.POST.getlist("file-meta")
files = request.FILES.getlist("files")
for idx, _file in enumerate(files):
try:
_file.readline() # Important, will raise VirusFoundInFileException if infected
except VirusFoundInFileException:
# Display a fake doc in the widget until
# a poll for success clears it
msg = "File upload aborted: malware detected in file!"
document = {
"name": msg,
"safe": False,
}
result["documents"].append(document)
else:
document = {
"document_name": _file.original_name,
"name": _file.name,
"size": _file.file_size,
}
result = client.add_note_document(
case_id=case_id,
note_id=note_id,
document=json.dumps(document),
confidentiality=file_meta[idx],
)
redirect_url = request.POST.get("redirect")
if redirect_url:
return internal_redirect(redirect_url, "/")
else:
# Return note json to be rendered at the client
return HttpResponse(json.dumps(result), content_type="application/json")
class PublicFileView(CaseBaseView):
groups_required = SECURITY_GROUPS_TRA
template_name = "cases/public_file.html"
def add_page_data(self):
tab = self.request.GET.get("tab", "all")
tabs = {
"tabList": [
{"label": "All", "value": "all"},
{"label": "Notices", "value": "tra"},
{"label": "Business", "value": "business"},
{"label": "Withdrawn", "value": "withdrawn"},
],
"value": tab,
}
case_submissions = self._client.get_submissions(self.case_id, show_global=True)
by_tra = deep_index_items_by_exists(case_submissions, "is_tra")
tra_by_published = deep_index_items_by_exists(by_tra.get("true"), "issued_at")
by_published = deep_index_items_by_exists(case_submissions, "issued_at")
if tab == "all":
submissions = by_published.get("true")
if tab == "tra":
submissions = deep_index_items_by(by_published.get("true"), "is_tra").get("true")
if tab == "business":
submissions = deep_index_items_by(by_published.get("true"), "is_tra").get("")
if tab == "withdrawn":
submissions = deep_index_items_by(by_published.get("false"), "is_tra").get("true")
return {
"tabs": tabs,
"submissions": submissions,
"public_base_url": settings.PUBLIC_BASE_URL,
}
class CaseFormView(LoginRequiredMixin, TemplateView, TradeRemediesAPIClientMixin):
groups_required = SECURITY_GROUPS_TRA
template_name = "cases/case_form.html"
def get_context(self, client, case_id=None):
if case_id:
case = client.get_case(case_id)
else:
case = {
"new": True,
"id": "",
"organisation": {"id": ""},
"type": {"id": "1"},
}
enums = client.get_all_case_enums()
gov_bodies = client.get_organisations(gov_body=True)
country_dict = {}
for country in countries:
country_dict[country[0]] = country[1]
context = {
"body_classes": "full-width",
"case": case,
"organisations": gov_bodies,
"country_dict": country_dict,
"organisation_name": case.get("organisation", {}).get("name") or "Secretary of State",
"contact_country": "GB",
"submission": {"type": {"id": 4}},
"tra_team_names": [
settings.ORGANISATION_NAME,
settings.ORGANISATION_INITIALISM + " Team 1",
settings.ORGANISATION_INITIALISM + " Team 2",
settings.ORGANISATION_INITIALISM + " Team 3",
],
}
context.update(enums)
# context['countries'] = countries[0]
return context
def get(self, request, case_id=None, *args, **kwargs):
client = self.client(request.user)
context = self.get_context(client, case_id)
return render(request, self.template_name, context)
def post(self, request, case_id=None, *args, **kwargs):
post_data = {
"id": case_id,
}
non_required_fields = [
"submission_status_id",
"case_name",
"organisation_name",
"organisation_id",
# 'organisation_address', 'organisation_post_code', 'companies_house_id',
# 'contact_name', 'contact_email', 'contact_phone', 'contact_address',
# 'contact_country',
]
error_lookup = {
"case_type_id": "Case type",
"product_name": "Product name",
"submission_type_id": "Submission type",
"sector_id": "Product sector",
"product_description": "Product description",
"export_country_code": "Export country",
"hs_code": "Product code",
}
required_fields = list(error_lookup.keys())
list_fields = ["export_country_code", "hs_code"]
case_fields = required_fields + non_required_fields
errors = {}
client = self.client(request.user)
if request.POST.get("case_type_id") in ALL_REGION_ALLOWED_TYPE_IDS:
required_fields.remove("export_country_code")
for field in case_fields:
post_data[field] = (
compact_list(request.POST.getlist(field))
if field in list_fields
else request.POST.get(field)
)
for field in required_fields:
if field in error_lookup and not post_data.get(field):
fieldname = error_lookup.get(field)
errors[field] = f"{fieldname} is required"
for i, code in enumerate(post_data.get("hs_code")):
if len(str(code)) not in (6, 7, 8, 9, 10): # temporary validation
errors["hs_code"] = "HS codes should be between 6 and 10 digits"
if not errors:
post_data["ex_oficio"] = True
result = client.submit_full_case_data(post_data)
return redirect("/cases/")
else:
context = self.get_context(client, case_id)
context["errors"] = errors
context.update(post_data)
return render(request, self.template_name, context)
class InviteContactView(CaseBaseView):
"""
Invite a contact to the case
"""
groups_required = SECURITY_GROUPS_TRA
template_name = "cases/invite.html"
raise_exception = True
def get_organisation_admin_user_contact(self, organisation_id):
contact = None
organisation = self._client.get_organisation(organisation_id)
admin_user = [
user
for user in organisation.get("users", [])
if user.get("security_group") == SECURITY_GROUP_ORGANISATION_OWNER
]
if admin_user:
user = self._client.get_user(admin_user[0]["user_id"])
contact = user.get("contact")
contact["organisation"] = organisation
return contact
def add_page_data(self):
contact = None
organisation = None
if self.kwargs.get("organisation_id"):
organisation = self._client.get_organisation(self.kwargs.get("organisation_id"))
if self.kwargs.get("contact_id"):
contact = self._client.get_contact(self.kwargs["contact_id"])
form_url = f"/case/{self.case['id']}/invite/{self.kwargs['contact_id']}/as/{self.kwargs['case_role_id']}/" # noqa: E501
if organisation:
form_url = f"{form_url}for/{organisation['id']}/"
elif self.kwargs.get("organisation_id"):
contact = self.get_organisation_admin_user_contact(self.kwargs["organisation_id"])
form_url = f"/case/{self.case['id']}/invite/organisation/{self.kwargs['organisation_id']}/as/{self.kwargs['case_role_id']}/" # noqa: E501
if not organisation:
organisation = contact["organisation"]
notification_template = self._client.get_notification_template(
"NOTIFY_INFORM_INTERESTED_PARTIES"
)
deep_update(
self.case,
self._client.get_case(
self.case_id,
fields=json.dumps(
{
"Case": {
"latest_notice_of_initiation_url": 0,
"registration_deadline": 0,
"product": 0,
}
}
),
),
)
case_number = self.case["reference"]
email = notify_contact_email(self._client, case_number)
footer = notify_footer(self._client, email)
values = {
"full_name": contact["name"],
"product": get(self.case, "product/name"),
"case_number": case_number,
"case_name": self.case["name"],
"notice_of_initiation_url": self.case.get("latest_notice_of_initiation_url"),
"company_name": organisation["name"],
"deadline": parse_api_datetime(
get(self.case, "registration_deadline"), settings.FRIENDLY_DATE_FORMAT
),
"footer": footer,
"guidance_url": self._client.get_system_parameters("LINK_HELP_BOX_GUIDANCE")["value"],
"email": email,
"login_url": f"{settings.PUBLIC_BASE_URL}",
}
context = {
"form_url": form_url,
"editable_fields": ["full_name", "product"],
"case": self.case,
"contact": contact,
"case_role_id": self.kwargs["case_role_id"],
"parsed_template": parse_notify_template(notification_template["body"], values),
"values": values,
"organisation": organisation,
"organisation_id": self.kwargs.get("organisation_id"),
}
return context
def post(
self,
request,
contact_id=None,
case_id=None,
case_role_id=None,
organisation_id=None,
*args,
**kwargs,
):
notify_keys = ["full_name", "product"]
notify_data = {key: request.POST.get(key) for key in notify_keys}
if organisation_id and contact_id:
notify_data["organisation_id"] = organisation_id
elif organisation_id and not contact_id:
contact = self.get_organisation_admin_user_contact(organisation_id)
contact_id = contact["id"]
response = self._client.invite_contact(case_id, contact_id, case_role_id, notify_data)
return HttpResponse(json.dumps(response), content_type="application/json")
class IssueFilesFormView(CaseBaseView):
"""
Issue files to case
"""
groups_required = SECURITY_GROUPS_TRA
template_name = "widgets/issue_files_form.html"
def add_page_data(self):
case_enums = self._client.get_all_case_enums()
return {
"case_enums": case_enums,
"case": self.case,
}
class CaseBundlesView(CaseBaseView):
"""
Assign documents to the case directly (not via submissions)
"""
groups_required = SECURITY_GROUPS_TRA
template_name = "cases/case_bundles.html"
def add_page_data(self):
list_mode = self.request.GET.get("tab", "live")
tabs = {
"value": list_mode,
"tabList": [
{"label": "Live", "value": "live", "sr_text": "Show live bundles"},
{"label": "Draft", "value": "draft", "sr_text": "Show draft bundles"},
],
}
case_bundles = self._client.get_case_submission_bundles(
case_id=self.case["id"],
status=list_mode.upper(),
)
return {
"bundles": case_bundles,
"error": self.kwargs.get("error"),
"tabs": tabs,
"status": list_mode,
}
@method_decorator(csrf_exempt, name="dispatch")
class CaseBundleView(CaseBaseView):
"""
View and edit a specific bundle full of documents
"""
groups_required = SECURITY_GROUPS_TRA
template_name = "cases/case_bundle_builder.html"
def add_page_data(self):
case_enums = self._client.get_all_case_enums()
bundle = None
bundle_id = self.kwargs.get("bundle_id")
virus = self.request.GET.get("virus")
upload_error = self.request.GET.get("upload_error")
return_data = {
"virus": virus,
"upload_error": upload_error,
}
if bundle_id:
bundle = self._client.get_case_submission_bundles(
case_id=self.case["id"], bundle_id=self.kwargs.get("bundle_id")
)
return_data.update(
{
"bundle": bundle,
"submission_types": case_enums["submission_types"],
}
)
return return_data
def post(self, request, case_id, bundle_id=None, *args, **kwargs): # noqa: C901
name = request.POST.get("name")
data = pluck(request.POST, ["name", "description"])
btn_value = request.POST.get("btn-value")
if btn_value == "send":
data["status"] = "LIVE"
# Upload documents
if bundle_id:
meta_raw = request.POST.getlist("meta")
meta = [json.loads(block) for block in meta_raw]
file_details = deep_index_items_by(meta, "name")
for _file in request.FILES.getlist("files"):
try:
_file.readline() # Important, will raise VirusFoundInFileException if infected
original_file_name = _file.original_name
details = file_details.get(original_file_name.lower())[0]
confidential = details.get("confidential")
document_type = details.get("submission_document_type")
document = self._client.upload_document(
case_id=str(case_id),
data={
"bundle_id": bundle_id,
"confidential": confidential,
"submission_document_type": document_type,
"document_name": original_file_name,
"file_name": _file.name,
"file_size": _file.file_size,
},
)
except (VirusFoundInFileException, APIException) as e:
redirect_url = f"/case/{case_id}/bundle/{bundle_id}/?"
msg = "File upload aborted: "
if isinstance(e, VirusFoundInFileException):
redirect_url += "virus=true"
else:
msg += f"{e}"
redirect_url += f"upload_error={msg}"
logger.warning(f"{msg}")
return HttpResponse(
json.dumps({"redirect_url": redirect_url}),
content_type="application/json",
)
# Attach existing documents to this bundle
if case_files := request.POST.getlist("case_files"):
file_details_by_id = deep_index_items_by(meta, "file/id")
for case_file_id in case_files:
details = (file_details_by_id.get(case_file_id) or [])[0]
document = self._client.attach_document(
case_id=str(case_id),
data={
"bundle_id": bundle_id,
"submission_document_type": details.get("submission_document_type"),
},
document_id=case_file_id,
)
else:
data = pluck(request.POST, ["name", "submission_type_id"])
data["case_id"] = case_id
# Anything else to send?
response = None
if data:
response = self._client.set_case_submission_bundle(bundle_id=bundle_id, data=data)
ret = {"result": "ok", "status": data.get("status")}
response_id = response and response.get("id")
if response_id:
ret["redirect_url"] = f"/case/{case_id}/bundle/{response_id}/"
return HttpResponse(json.dumps(ret), content_type="application/json")
def delete(self, request, case_id, document_id, *args, **kwargs):
response = self._client.delete_case_submission_bundle(case_id, document_id)
return redirect(f"/case/{case_id}/documents/")
class SubmissionInviteNotifyView(CaseBaseView):
"""
Notify an invitee about an invitation to case.
"""
groups_required = SECURITY_GROUPS_TRA
raise_exception = True
template_name = "cases/invite.html"
def add_page_data(self):
"""Add page data.
CaseBaseView override.
"""
case_id = self.kwargs.get("case_id")
submission_id = self.kwargs.get("submission_id")
contact_id = self.kwargs.get("contact_id")
case = self._client.get_case(case_id)
submission = self._client.get_submission(case_id, submission_id)
inviting_organisation = submission["organisation"]
invited_contact = self._client.get_contact(contact_id)
inviting_contact = submission.get("contact") or {}
notification_template = self._client.get_notification_template("NOTIFY_THIRD_PARTY_INVITE")
form_url = f"/case/{case_id}/submission/{submission_id}/invite/{contact_id}/notify/"
# Attempt to infer the invite URL
login_url = f"{settings.PUBLIC_BASE_URL}"
invites = self._client.get_invitations(case_id, submission_id)
for i in invites:
if i["contact"]["id"] == str(contact_id):
invite = self._client.get_invite_details(i["id"])
code = invite.get("code")
login_url = f"{login_url}/invitation/{code}/{case_id}/"
break
case_number = case["reference"]
email = notify_contact_email(self._client, case_number)
footer = notify_footer(self._client, email)
values = {
"full_name": invited_contact["name"],
"case_name": case["name"],
"invited_by_organisation": inviting_organisation["name"],
"invited_by_name": inviting_contact["name"],
"notice_of_initiation_url": self.case.get("latest_notice_of_initiation_url"),
"login_url": login_url,
"deadline": parse_api_datetime(
get(self.case, "registration_deadline"), settings.FRIENDLY_DATE_FORMAT
),
"footer": footer,
"email": email,
}
context = {
"form_url": form_url,
"notification_template": notification_template,
"submission": submission,
"case": case,
"contact": invited_contact,
"parsed_template": parse_notify_template(notification_template["body"], values),
"values": values,
}
return context
def post(self, request, case_id, submission_id, contact_id, *args, **kwargs):
notify_data = {
"case_id": case_id,
"submission_id": submission_id,
"contact_id": contact_id,
}
response = self._client.action_third_party_invite(
case_id=case_id,
submission_id=submission_id,
contact_id=contact_id,
params=notify_data,
)
return HttpResponse(json.dumps(response), content_type="application/json")
class UpdateParentView(CaseBaseView):
template_name = "cases/update_parent.html"
linked_case_confirm_key = "LINKED_CASE_CONFIRM"
cases_fields = json.dumps(
{
"Case": {
"name": 0,
"id": 0,
"reference": 0,
}
}
)
case_fields = json.dumps(
{"Case": {"parent": {"id": 0}, "workflow_state": {linked_case_confirm_key: 0}}}
)
def add_page_data(self):
cases = self._client.get_cases(archived=True, all_cases=False, fields=self.cases_fields)
case = self._client.get_case(self.case_id, fields=self.case_fields)
return {"case": case, "cases": cases}
def post(self, request, case_id, *args, **kwargs):
link_confirm = request.POST.get("link_confirm")
parent_id = request.POST.get("parent_id")
_client = self.client(request.user)
case = _client.get_case(case_id, fields=self.case_fields)
if get(case, "parent/id") != parent_id:
_client.set_case_data(case_id, {"parent_id": parent_id})
if (get(case, f"workflow_state/{self.linked_case_confirm_key}") or [0])[0] != link_confirm:
_client.set_case_workflow_state(
case_id, values={f"{self.linked_case_confirm_key}": link_confirm}
)
return HttpResponse(
json.dumps({"parent_id": parent_id, "link_confirm": link_confirm}),
content_type="application/json",
)
class NoticesView(
LoginRequiredMixin, GroupRequiredMixin, TemplateView, TradeRemediesAPIClientMixin
):
groups_required = SECURITY_GROUPS_TRA_ADMINS
template_name = "cases/notices.html"
def get(self, request):
client = self.client(request.user)
notices = client.get_notices()
return render(
request,
self.template_name,
{
"body_classes": "full-width",
"notices": notices,
},
)
class NoticeView(LoginRequiredMixin, GroupRequiredMixin, TemplateView, TradeRemediesAPIClientMixin):
groups_required = SECURITY_GROUPS_TRA_ADMINS
template_name = "cases/notice.html"
cases_fields = json.dumps({"Case": {"name": 0, "id": 0, "reference": 0}})
def get(self, request, notice_id=None):
client = self.client(request.user)
enums = client.get_all_case_enums()
case_types = enums.get("case_types", [])
cases = client.get_cases(archived=True, all_cases=False, fields=self.cases_fields)
notice = {}
if notice_id:
notice = client.get_notice(notice_id)
return render(
request,
self.template_name,
{
"body_classes": "full-width",
"notice": notice,
"cases": cases,
"case_types": case_types,
},
)
def post(self, request, notice_id=None):
client = self.client(request.user)
notice = client.create_update_notice(
name=request.POST.get("name"),
reference=request.POST.get("reference"),
terminated_at=request.POST.get("terminated_at"),
published_at=request.POST.get("published_at"),
case_type=request.POST.get("case_type_id"),
review_case=request.POST.get("review_case_id"),
notice_id=notice_id,
)
return redirect("/cases/notices/")
class DocumentSearchView(CaseBaseView):
template_name = "documents/documents.html"
def add_page_data(self):
query = self.request.GET.get("query")
conf_status = self.request.GET.get("confidential_status")
user_type = self.request.GET.get("user_type")
response = self._client.search_documents(
case_id=self.case_id,
query=query,
confidential_status=conf_status,
user_type=user_type,
)
return {
"body_classes": "full-width",
"documents": response.pop("results", []),
"query": query,
"conf_status": conf_status,
**response,
}
class CaseTeamJsonView(LoginRequiredMixin, View, TradeRemediesAPIClientMixin):
def get(self, request, case_id, **kwargs):
team = self.client(request.user).get_case_team_members(case_id)
return HttpResponse(json.dumps(team), content_type="application/json")
| 41.063503
| 150
| 0.574391
| 11,958
| 115,101
| 5.238836
| 0.061632
| 0.028254
| 0.023034
| 0.013792
| 0.497222
| 0.421495
| 0.369186
| 0.325831
| 0.288111
| 0.251365
| 0
| 0.002329
| 0.321057
| 115,101
| 2,802
| 151
| 41.078158
| 0.799314
| 0.038132
| 0
| 0.355856
| 0
| 0.000819
| 0.141229
| 0.03705
| 0
| 0
| 0
| 0.001071
| 0
| 1
| 0.036446
| false
| 0.00041
| 0.009419
| 0.001638
| 0.141278
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
98c872b368191fe4e11021c3430aca414eab1a34
| 2,698
|
py
|
Python
|
mmdet/models/emod_ops/ar_module.py
|
zhenglab/EMOD
|
68bef744a99d0ec4eef8f3cc6b1f5ab3c0807d89
|
[
"Apache-2.0"
] | 2
|
2020-12-09T08:40:04.000Z
|
2021-07-27T08:44:46.000Z
|
mmdet/models/emod_ops/ar_module.py
|
zhenglab/EMOD
|
68bef744a99d0ec4eef8f3cc6b1f5ab3c0807d89
|
[
"Apache-2.0"
] | null | null | null |
mmdet/models/emod_ops/ar_module.py
|
zhenglab/EMOD
|
68bef744a99d0ec4eef8f3cc6b1f5ab3c0807d89
|
[
"Apache-2.0"
] | null | null | null |
import torch
from torch import nn
from mmcv.cnn.utils import constant_init, kaiming_init
class SimAttention(nn.Module):
def __init__(self, in_channels):
super(SimAttention, self).__init__()
self.conv_attn = nn.Conv2d(in_channels, 1, kernel_size=1)
self.softmax = nn.Softmax(dim=2)
kaiming_init(self.conv_attn, mode='fan_in')
self.conv_attn.inited = True
def forward(self, x):
b, c, h, w = x.size()
x_in = x
x_in = x_in.view(b, c, h * w)
x_in = x_in.unsqueeze(1)
x_attn = self.conv_attn(x)
x_attn = x_attn.view(b, 1, h * w)
x_attn = self.softmax(x_attn)
x_attn = x_attn.unsqueeze(-1)
x_out = torch.matmul(x_in, x_attn)
x_out = x_out.view(b, c, 1, 1)
return x_out
class SimRelation(nn.Module):
def __init__(self, in_channels, ratio, act=False):
super(SimRelation, self).__init__()
self.planes = int(in_channels * ratio)
self.act = act
self.mlp = nn.Sequential(
nn.Linear(in_features=in_channels, out_features=self.planes),
nn.LayerNorm([self.planes]),
nn.ReLU(inplace=True),
nn.Linear(in_features=self.planes, out_features=in_channels))
constant_init(self.mlp[-1], val=0)
if self.act:
self.activate = nn.Sigmoid()
def forward(self, x):
x_in = x
x_in = x_in.view(x.size(0), -1)
x_out = self.mlp(x_in)
if self.act:
x_out = self.activate(x_out)
x_out = x_out.view(x.size(0), x.size(1), 1, 1)
return x_out
class ARModule(nn.Module):
"""AR Module for EMOD."""
def __init__(self,
in_channels,
ratio,
fusion_type='add'):
super(ARModule, self).__init__()
assert fusion_type in ['add', 'mul'], 'fusion_type should be add or mul.'
self.fusion_type = fusion_type
# attention
self.sim_attention = SimAttention(in_channels)
# relation
if self.fusion_type == 'add':
self.sim_relation = SimRelation(in_channels, ratio, act=False)
else:
self.sim_relation = SimRelation(in_channels, ratio, act=True)
def forward(self, x):
x_attn = self.sim_attention(x)
out = x
if self.fusion_type == 'add':
x_rel = self.sim_relation(x_attn)
out = out + x_rel
else:
x_rel = self.sim_relation(x_attn)
out = out * x_rel
return out
| 29.326087
| 81
| 0.546331
| 364
| 2,698
| 3.785714
| 0.211538
| 0.039913
| 0.017417
| 0.028302
| 0.323657
| 0.222787
| 0.175617
| 0.133527
| 0.049347
| 0.049347
| 0
| 0.009594
| 0.343217
| 2,698
| 91
| 82
| 29.648352
| 0.768059
| 0.014455
| 0
| 0.227273
| 0
| 0
| 0.020362
| 0
| 0
| 0
| 0
| 0
| 0.015152
| 1
| 0.090909
| false
| 0
| 0.045455
| 0
| 0.227273
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
98ca9c54fc93a4a5630df7be404c28ca3e935a2c
| 4,962
|
py
|
Python
|
sqlpuzzle/_common/argsparser.py
|
Dundee/python-sqlpuzzle
|
260524922a0645c9bf94a9779195f93ef2c78cba
|
[
"MIT"
] | 8
|
2015-03-19T11:25:32.000Z
|
2020-09-02T11:30:10.000Z
|
sqlpuzzle/_common/argsparser.py
|
Dundee/python-sqlpuzzle
|
260524922a0645c9bf94a9779195f93ef2c78cba
|
[
"MIT"
] | 7
|
2015-03-23T14:34:28.000Z
|
2022-02-21T12:36:01.000Z
|
sqlpuzzle/_common/argsparser.py
|
Dundee/python-sqlpuzzle
|
260524922a0645c9bf94a9779195f93ef2c78cba
|
[
"MIT"
] | 4
|
2018-11-28T21:59:27.000Z
|
2020-01-05T01:50:08.000Z
|
from sqlpuzzle.exceptions import InvalidArgumentException
__all__ = ('parse_args',)
# pylint: disable=dangerous-default-value,keyword-arg-before-vararg
def parse_args(options={}, *args, **kwds):
"""
Parser of arguments.
dict options {
int min_items: Min of required items to fold one tuple. (default: 1)
int max_items: Count of items in one tuple. Last `max_items-min_items`
items is by default set to None. (default: 1)
bool allow_dict: Flag allowing dictionary as first (and only one)
argument or dictinary as **kwds. (default: False)
bool allow_list: Flag allowing list as first (and only one) argument.
(default: False)
}
Examples:
calling with min_items=1, max_items=2, allow_dict=False:
arg1, arg2 => ((arg1, None), (arg2, None))
(arg1a, arg1b), arg2 => ((arg1a, arg1b), arg2, None))
arg1=val1 => FAIL
{key1: val1} => FAIL
calling with min_items=2, max_items=3, allow_dict=True:
arg1, arg2 => ((arg1, arg2, None),)
arg1, arg2, arg3 => ((arg1, arg2, arg3),)
(arg1a, arg1b, arg1c) => ((arg1a, arg1b, arg1c),)
arg1=val1, arg2=val2 => ((arg1, val1, None), (arg2, val2, None))
{key1: val1, key2: val2} => ((key1, val1, None), (key2, val2, None))
(arg1a, arg1b), arg2a, arg2b => FAIL
"""
parser_options = ParserOptions(options)
parser_input = ParserInput(args, kwds)
parser = Parser(parser_options, parser_input)
parser.parse()
return parser.output_data
# pylint: disable=too-few-public-methods
class ParserOptions:
def __init__(self, options):
self.min_items = options.get('min_items', 1)
self.max_items = options.get('max_items', 1)
self.allow_dict = options.get('allow_dict', False)
self.allow_list = options.get('allow_list', False)
assert self.min_items <= self.max_items
assert not self.allow_dict or (self.allow_dict and self.max_items > 1)
class ParserInput:
def __init__(self, args, kwds):
self.args = args
self.kwds = kwds
@property
def list(self):
if self.is_list:
return self.args[0]
return []
@property
def dictionary_or_kwds(self):
if self.is_dictionary:
return self.args[0]
if self.is_kwds:
return self.kwds
return {}
@property
def is_list(self):
return len(self.args) == 1 and isinstance(self.args[0], (list, tuple))
@property
def is_dictionary(self):
return len(self.args) == 1 and isinstance(self.args[0], dict)
@property
def is_kwds(self):
return self.kwds != {}
@property
def is_args(self):
return len(self.args) > 0 and not isinstance(self.args[0], (list, tuple))
def count_of_args_is_in_interval(self, min_, max_):
return min_ <= len(self.args) <= max_
class Parser:
def __init__(self, options, input_data):
self.options = options
self.input_data = input_data
self.output_data = []
def parse(self):
if (
self.options.min_items > 1
and self.input_data.is_args
and self.input_data.count_of_args_is_in_interval(self.options.min_items, self.options.max_items)
):
self._parse_item(self.input_data.args)
elif self.options.allow_list and self.input_data.is_list:
self._parse_list(self.input_data.list)
elif not self.input_data.is_dictionary and self.input_data.args:
self._parse_list(self.input_data.args)
if self.input_data.is_dictionary or self.input_data.is_kwds:
if not self.options.allow_dict:
raise InvalidArgumentException('Dictionary or kwds is disabled.')
self._parse_dictionary(self.input_data.dictionary_or_kwds)
def _parse_dictionary(self, dictionary):
for item in sorted(dictionary.items()):
self._parse_item(item)
def _parse_list(self, list_):
for item in list_:
if isinstance(item, (list, tuple)):
self._parse_item(item)
elif self.options.min_items == 1:
self._parse_item((item,))
else:
raise InvalidArgumentException('Too few arguments.')
def _parse_item(self, item):
batch = self._create_batch(item)
self.output_data.append(batch)
def _create_batch(self, values):
if len(values) > self.options.max_items:
raise InvalidArgumentException('Too many arguments.')
return self._append_nones(tuple(values))
def _append_nones(self, tuple_with_values):
count_of_nones = self.options.max_items - len(tuple_with_values)
tuple_with_nones = (None,) * count_of_nones
return tuple_with_values + tuple_with_nones
| 33.755102
| 112
| 0.620314
| 638
| 4,962
| 4.595611
| 0.170846
| 0.042974
| 0.053206
| 0.02558
| 0.165757
| 0.115621
| 0.048431
| 0.030014
| 0.030014
| 0.030014
| 0
| 0.019114
| 0.272471
| 4,962
| 146
| 113
| 33.986301
| 0.793075
| 0.249899
| 0
| 0.113636
| 0
| 0
| 0.032071
| 0
| 0
| 0
| 0
| 0
| 0.022727
| 1
| 0.193182
| false
| 0
| 0.011364
| 0.056818
| 0.386364
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
98cab2bad7becb5d77b33c01de7f7ffa0e4c8c44
| 16,809
|
py
|
Python
|
reviewboard/webapi/tests/test_review_screenshot_comment.py
|
ParikhKadam/reviewboard
|
7395902e4c181bcd1d633f61105012ffb1d18e1b
|
[
"MIT"
] | 921
|
2015-01-01T15:26:28.000Z
|
2022-03-29T11:30:38.000Z
|
reviewboard/webapi/tests/test_review_screenshot_comment.py
|
ParikhKadam/reviewboard
|
7395902e4c181bcd1d633f61105012ffb1d18e1b
|
[
"MIT"
] | 5
|
2015-03-17T18:57:47.000Z
|
2020-10-02T13:24:31.000Z
|
reviewboard/webapi/tests/test_review_screenshot_comment.py
|
ParikhKadam/reviewboard
|
7395902e4c181bcd1d633f61105012ffb1d18e1b
|
[
"MIT"
] | 285
|
2015-01-12T06:24:36.000Z
|
2022-03-29T11:03:50.000Z
|
from __future__ import unicode_literals
from django.contrib.auth.models import User
from djblets.webapi.errors import PERMISSION_DENIED
from reviewboard.reviews.models import ScreenshotComment
from reviewboard.webapi.resources import resources
from reviewboard.webapi.tests.base import BaseWebAPITestCase
from reviewboard.webapi.tests.mimetypes import (
screenshot_comment_item_mimetype,
screenshot_comment_list_mimetype)
from reviewboard.webapi.tests.mixins import (
BasicTestsMetaclass,
ReviewRequestChildItemMixin,
ReviewRequestChildListMixin)
from reviewboard.webapi.tests.mixins_comment import (
CommentItemMixin,
CommentListMixin)
from reviewboard.webapi.tests.urls import (
get_review_screenshot_comment_item_url,
get_review_screenshot_comment_list_url)
class BaseTestCase(BaseWebAPITestCase):
fixtures = ['test_users']
def _create_screenshot_review_with_issue(self, publish=False,
comment_text=None):
"""Sets up a review for a screenshot that includes an open issue.
If `publish` is True, the review is published. The review request is
always published.
Returns the response from posting the comment, the review object, and
the review request object.
"""
if not comment_text:
comment_text = 'Test screenshot comment with an opened issue'
review_request = self.create_review_request(publish=True,
submitter=self.user)
screenshot = self.create_screenshot(review_request)
review = self.create_review(review_request, user=self.user,
publish=publish)
comment = self.create_screenshot_comment(review, screenshot,
comment_text,
issue_opened=True)
return comment, review, review_request
class ResourceListTests(CommentListMixin, ReviewRequestChildListMixin,
BaseTestCase, metaclass=BasicTestsMetaclass):
"""Testing the ReviewScreenshotCommentResource list APIs."""
sample_api_url = 'review-requests/<id>/reviews/<id>/screenshot-comments/'
resource = resources.review_screenshot_comment
def setup_review_request_child_test(self, review_request):
self.create_screenshot(review_request)
review = self.create_review(review_request, user=self.user)
return (get_review_screenshot_comment_list_url(review),
screenshot_comment_list_mimetype)
def compare_item(self, item_rsp, comment):
self.assertEqual(item_rsp['id'], comment.pk)
self.assertEqual(item_rsp['text'], comment.text)
self.assertEqual(item_rsp['x'], comment.x)
self.assertEqual(item_rsp['y'], comment.y)
self.assertEqual(item_rsp['w'], comment.w)
self.assertEqual(item_rsp['h'], comment.h)
self.assertEqual(item_rsp['extra_data'], comment.extra_data)
if comment.rich_text:
self.assertEqual(item_rsp['text_type'], 'markdown')
else:
self.assertEqual(item_rsp['text_type'], 'plain')
#
# HTTP GET tests
#
def setup_basic_get_test(self, user, with_local_site, local_site_name,
populate_items):
review_request = self.create_review_request(
with_local_site=with_local_site,
submitter=user,
publish=True)
screenshot = self.create_screenshot(review_request)
review = self.create_review(review_request, user=user)
if populate_items:
items = [self.create_screenshot_comment(review, screenshot)]
else:
items = []
return (get_review_screenshot_comment_list_url(review,
local_site_name),
screenshot_comment_list_mimetype,
items)
#
# HTTP POST tests
#
def setup_basic_post_test(self, user, with_local_site, local_site_name,
post_valid_data):
review_request = self.create_review_request(
with_local_site=with_local_site,
submitter=user,
publish=True)
screenshot = self.create_screenshot(review_request)
review = self.create_review(review_request, user=user)
return (get_review_screenshot_comment_list_url(review,
local_site_name),
screenshot_comment_item_mimetype,
{
'screenshot_id': screenshot.pk,
'text': 'Test comment',
'x': 2,
'y': 2,
'w': 10,
'h': 10,
},
[review, screenshot])
def check_post_result(self, user, rsp, review, screenshot):
comment = \
ScreenshotComment.objects.get(pk=rsp['screenshot_comment']['id'])
self.compare_item(rsp['screenshot_comment'], comment)
def test_post_with_issue(self):
"""Testing the
POST review-requests/<id>/reviews/<id>/screenshot-comments/ API
with an issue
"""
comment_text = "Test screenshot comment with an opened issue"
comment, review, review_request = \
self._create_screenshot_review_with_issue(
publish=False, comment_text=comment_text)
rsp = self.api_get(
get_review_screenshot_comment_list_url(review),
expected_mimetype=screenshot_comment_list_mimetype)
self.assertEqual(rsp['stat'], 'ok')
self.assertIn('screenshot_comments', rsp)
self.assertEqual(len(rsp['screenshot_comments']), 1)
self.assertEqual(rsp['screenshot_comments'][0]['text'], comment_text)
self.assertTrue(rsp['screenshot_comments'][0]['issue_opened'])
class ResourceItemTests(CommentItemMixin, ReviewRequestChildItemMixin,
BaseTestCase, metaclass=BasicTestsMetaclass):
"""Testing the ReviewScreenshotCommentResource item APIs."""
fixtures = ['test_users']
sample_api_url = \
'review-requests/<id>/reviews/<id>/screenshot-comments/<id>/'
resource = resources.review_screenshot_comment
def compare_item(self, item_rsp, comment):
self.assertEqual(item_rsp['id'], comment.pk)
self.assertEqual(item_rsp['text'], comment.text)
self.assertEqual(item_rsp['x'], comment.x)
self.assertEqual(item_rsp['y'], comment.y)
self.assertEqual(item_rsp['w'], comment.w)
self.assertEqual(item_rsp['h'], comment.h)
self.assertEqual(item_rsp['extra_data'], comment.extra_data)
if comment.rich_text:
self.assertEqual(item_rsp['text_type'], 'markdown')
else:
self.assertEqual(item_rsp['text_type'], 'plain')
def setup_review_request_child_test(self, review_request):
screenshot = self.create_screenshot(review_request)
review = self.create_review(review_request, user=self.user)
comment = self.create_screenshot_comment(review, screenshot)
return (get_review_screenshot_comment_item_url(review, comment.pk),
screenshot_comment_item_mimetype)
#
# HTTP DELETE tests
#
def setup_basic_delete_test(self, user, with_local_site, local_site_name):
review_request = self.create_review_request(
with_local_site=with_local_site,
submitter=user,
publish=True)
screenshot = self.create_screenshot(review_request)
review = self.create_review(review_request, user=user)
comment = self.create_screenshot_comment(review, screenshot)
return (get_review_screenshot_comment_item_url(review, comment.pk,
local_site_name),
[comment, review])
def check_delete_result(self, user, comment, review):
self.assertNotIn(comment, review.screenshot_comments.all())
def test_delete_with_does_not_exist_error(self):
"""Testing the
DELETE review-requests/<id>/reviews/<id>/screenshot-comments/<id>/ API
with Does Not Exist error
"""
review_request = self.create_review_request(publish=True)
self.create_screenshot(review_request)
review = self.create_review(review_request, user=self.user)
self.api_delete(get_review_screenshot_comment_item_url(review, 123),
expected_status=404)
#
# HTTP GET tests
#
def setup_basic_get_test(self, user, with_local_site, local_site_name):
review_request = self.create_review_request(
with_local_site=with_local_site,
submitter=user,
publish=True)
screenshot = self.create_screenshot(review_request)
review = self.create_review(review_request, user=user)
comment = self.create_screenshot_comment(review, screenshot)
return (get_review_screenshot_comment_item_url(review, comment.pk,
local_site_name),
screenshot_comment_item_mimetype,
comment)
#
# HTTP PUT tests
#
def setup_basic_put_test(self, user, with_local_site, local_site_name,
put_valid_data):
review_request = self.create_review_request(
with_local_site=with_local_site,
submitter=user,
publish=True)
screenshot = self.create_screenshot(review_request)
review = self.create_review(review_request, user=user)
comment = self.create_screenshot_comment(review, screenshot)
return (get_review_screenshot_comment_item_url(review, comment.pk,
local_site_name),
screenshot_comment_item_mimetype,
{'text': 'Test comment'},
comment,
[])
def check_put_result(self, user, item_rsp, comment, *args):
comment = ScreenshotComment.objects.get(pk=comment.pk)
self.assertEqual(item_rsp['text_type'], 'plain')
self.assertEqual(item_rsp['text'], 'Test comment')
self.compare_item(item_rsp, comment)
def test_put_with_issue(self):
"""Testing the
PUT review-requests/<id>/reviews/<id>/screenshot-comments/<id>/ API
with an issue, removing issue_opened
"""
comment, review, review_request = \
self._create_screenshot_review_with_issue()
rsp = self.api_put(
get_review_screenshot_comment_item_url(review, comment.pk),
{'issue_opened': False},
expected_mimetype=screenshot_comment_item_mimetype)
self.assertEqual(rsp['stat'], 'ok')
self.assertFalse(rsp['screenshot_comment']['issue_opened'])
def test_put_issue_status_before_publish(self):
"""Testing the
PUT review-requests/<id>/reviews/<id>/screenshot-comments/<id> API
with an issue, before review is published
"""
comment, review, review_request = \
self._create_screenshot_review_with_issue()
# The issue_status should not be able to be changed while the review is
# unpublished.
rsp = self.api_put(
get_review_screenshot_comment_item_url(review, comment.pk),
{'issue_status': 'resolved'},
expected_mimetype=screenshot_comment_item_mimetype)
self.assertEqual(rsp['stat'], 'ok')
# The issue_status should still be "open"
self.assertEqual(rsp['screenshot_comment']['issue_status'], 'open')
def test_put_issue_status_after_publish(self):
"""Testing the
PUT review-requests/<id>/reviews/<id>/screenshot-comments/<id>/ API
with an issue, after review is published
"""
comment, review, review_request = \
self._create_screenshot_review_with_issue(publish=True)
rsp = self.api_put(
get_review_screenshot_comment_item_url(review, comment.pk),
{'issue_status': 'resolved'},
expected_mimetype=screenshot_comment_item_mimetype)
self.assertEqual(rsp['stat'], 'ok')
self.assertEqual(rsp['screenshot_comment']['issue_status'], 'resolved')
def test_put_issue_status_by_issue_creator(self):
"""Testing the
PUT review-requests/<id>/reviews/<id>/screenshot-comments/<id>/ API
permissions for issue creator
"""
comment, review, review_request = \
self._create_screenshot_review_with_issue(publish=True)
# Change the owner of the review request so that it's not owned by
# self.user
review_request.submitter = User.objects.get(username='doc')
review_request.save()
# The review/comment (and therefore issue) is still owned by self.user,
# so we should be able to change the issue status.
rsp = self.api_put(
get_review_screenshot_comment_item_url(review, comment.pk),
{'issue_status': 'dropped'},
expected_mimetype=screenshot_comment_item_mimetype)
self.assertEqual(rsp['stat'], 'ok')
self.assertEqual(rsp['screenshot_comment']['issue_status'], 'dropped')
def test_put_issue_status_by_uninvolved_user(self):
"""Testing the
PUT review-requests/<id>/reviews/<id>/screenshot-comments/<id>/ API
permissions for an uninvolved user
"""
comment, review, review_request = \
self._create_screenshot_review_with_issue(publish=True)
# Change the owner of the review request and review so that they're not
# owned by self.user.
new_owner = User.objects.get(username='doc')
review_request.submitter = new_owner
review_request.save()
review.user = new_owner
review.save()
rsp = self.api_put(
get_review_screenshot_comment_item_url(review, comment.pk),
{'issue_status': 'dropped'},
expected_status=403)
self.assertEqual(rsp['stat'], 'fail')
self.assertEqual(rsp['err']['code'], PERMISSION_DENIED.code)
def test_put_deleted_screenshot_comment_issue_status(self):
"""Testing the
PUT review-requests/<id>/reviews/<id>/screenshot-comments/<id>
API with an issue and a deleted screenshot
"""
comment_text = "Test screenshot comment with an opened issue"
x, y, w, h = (2, 2, 10, 10)
review_request = self.create_review_request(publish=True,
submitter=self.user,
target_people=[self.user])
screenshot = self.create_screenshot(review_request)
review = self.create_review(review_request, user=self.user)
comment = self.create_screenshot_comment(review, screenshot,
comment_text, x, y, w, h,
issue_opened=True)
# First, let's ensure that the user that has created the comment
# cannot alter the issue_status while the review is unpublished.
rsp = self.api_put(
get_review_screenshot_comment_item_url(review, comment.pk),
{'issue_status': 'resolved'},
expected_mimetype=screenshot_comment_item_mimetype)
self.assertEqual(rsp['stat'], 'ok')
# The issue_status should still be "open"
self.assertEqual(rsp['screenshot_comment']['issue_status'], 'open')
# Next, let's publish the review, and try altering the issue_status.
# This should be allowed, since the review request was made by the
# current user.
review.public = True
review.save()
rsp = self.api_put(
rsp['screenshot_comment']['links']['self']['href'],
{'issue_status': 'resolved'},
expected_mimetype=screenshot_comment_item_mimetype)
self.assertEqual(rsp['stat'], 'ok')
self.assertEqual(rsp['screenshot_comment']['issue_status'], 'resolved')
# Delete the screenshot.
self._delete_screenshot(review_request, screenshot)
review_request.publish(review_request.submitter)
# Try altering the issue_status. This should be allowed.
rsp = self.api_put(
rsp['screenshot_comment']['links']['self']['href'],
{'issue_status': 'open'},
expected_mimetype=screenshot_comment_item_mimetype)
self.assertEqual(rsp['stat'], 'ok')
self.assertEqual(rsp['screenshot_comment']['issue_status'], 'open')
| 41.198529
| 79
| 0.636861
| 1,845
| 16,809
| 5.519241
| 0.106775
| 0.101836
| 0.049494
| 0.043209
| 0.728567
| 0.685554
| 0.644604
| 0.616223
| 0.611018
| 0.559756
| 0
| 0.001959
| 0.271164
| 16,809
| 407
| 80
| 41.299754
| 0.829238
| 0.127075
| 0
| 0.589552
| 0
| 0
| 0.076542
| 0.007877
| 0
| 0
| 0
| 0
| 0.156716
| 1
| 0.078358
| false
| 0
| 0.037313
| 0
| 0.179104
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
98caf2eb8158bde50b1d44dd5a0629d9a33340c7
| 1,163
|
py
|
Python
|
qbapi/app.py
|
dimddev/qb
|
fbf9f4cac8aaf14243229e3193960da7114bb7ba
|
[
"BSD-3-Clause"
] | null | null | null |
qbapi/app.py
|
dimddev/qb
|
fbf9f4cac8aaf14243229e3193960da7114bb7ba
|
[
"BSD-3-Clause"
] | null | null | null |
qbapi/app.py
|
dimddev/qb
|
fbf9f4cac8aaf14243229e3193960da7114bb7ba
|
[
"BSD-3-Clause"
] | null | null | null |
"""
Command line tool
"""
import asyncio
from qbapi.request import create_request
from qbapi.services.clients import Producer, Consumer
async def spider(user_data: tuple) -> None:
"""spider
:param user_data:
:type user_data: tuple
:rtype: None
"""
producer_queue = asyncio.Queue()
consumer_queue = asyncio.Queue()
max_workers = 0
for data in user_data:
await producer_queue.put(await create_request(data))
max_workers += 1
producer_tasks = []
consumer_tasks = []
for _ in range(max_workers):
producer_tasks.append(
asyncio.create_task(
Producer().process(producer_queue, consumer_queue)
)
)
consumer_tasks.append(
asyncio.create_task(
Consumer().process(consumer_queue)
)
)
await producer_queue.join()
await consumer_queue.join()
for i, task in enumerate(producer_tasks):
task.cancel()
consumer_tasks[i].cancel()
await asyncio.gather(*producer_tasks, return_exceptions=True)
await asyncio.gather(*consumer_tasks, return_exceptions=True)
| 21.943396
| 66
| 0.638005
| 130
| 1,163
| 5.476923
| 0.346154
| 0.044944
| 0.036517
| 0.067416
| 0.078652
| 0
| 0
| 0
| 0
| 0
| 0
| 0.002356
| 0.269991
| 1,163
| 52
| 67
| 22.365385
| 0.836278
| 0.014617
| 0
| 0.066667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.1
| 0
| 0.1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
98cd18a83142f071207fd03be7967e2e0520ebe6
| 9,063
|
py
|
Python
|
test/test_literal.py
|
hrnciar/rdflib
|
d507fdac93be2ec3e35882e3efaa5e7c7349fa93
|
[
"BSD-3-Clause"
] | null | null | null |
test/test_literal.py
|
hrnciar/rdflib
|
d507fdac93be2ec3e35882e3efaa5e7c7349fa93
|
[
"BSD-3-Clause"
] | null | null | null |
test/test_literal.py
|
hrnciar/rdflib
|
d507fdac93be2ec3e35882e3efaa5e7c7349fa93
|
[
"BSD-3-Clause"
] | null | null | null |
import unittest
import datetime
import rdflib # needed for eval(repr(...)) below
from rdflib.term import Literal, URIRef, _XSD_DOUBLE, bind, _XSD_BOOLEAN
from rdflib.namespace import XSD
def uformat(s):
return s.replace("u'", "'")
class TestLiteral(unittest.TestCase):
def setUp(self):
pass
def test_repr_apostrophe(self):
a = rdflib.Literal("'")
b = eval(repr(a))
self.assertEqual(a, b)
def test_repr_quote(self):
a = rdflib.Literal('"')
b = eval(repr(a))
self.assertEqual(a, b)
def test_backslash(self):
d = r"""
<rdf:RDF
xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
xmlns:foo="http://example.org/foo#">
<rdf:Description>
<foo:bar>a\b</foo:bar>
</rdf:Description>
</rdf:RDF>
"""
g = rdflib.Graph()
g.parse(data=d, format="xml")
a = rdflib.Literal("a\\b")
b = list(g.objects())[0]
self.assertEqual(a, b)
def test_literal_from_bool(self):
l = rdflib.Literal(True)
self.assertEqual(l.datatype, rdflib.XSD["boolean"])
class TestNew(unittest.TestCase):
def testCantPassLangAndDatatype(self):
self.assertRaises(
TypeError, Literal, "foo", lang="en", datatype=URIRef("http://example.com/")
)
def testFromOtherLiteral(self):
l = Literal(1)
l2 = Literal(l)
self.assertTrue(isinstance(l.value, int))
self.assertTrue(isinstance(l2.value, int))
# change datatype
l = Literal("1")
l2 = Literal(l, datatype=rdflib.XSD.integer)
self.assertTrue(isinstance(l2.value, int))
def testDatatypeGetsAutoURIRefConversion(self):
# drewp disapproves of this behavior, but it should be
# represented in the tests
x = Literal("foo", datatype="http://example.com/")
self.assertTrue(isinstance(x.datatype, URIRef))
x = Literal("foo", datatype=Literal("pennies"))
self.assertEqual(x.datatype, URIRef("pennies"))
class TestRepr(unittest.TestCase):
def testOmitsMissingDatatypeAndLang(self):
self.assertEqual(repr(Literal("foo")), uformat("rdflib.term.Literal(u'foo')"))
def testOmitsMissingDatatype(self):
self.assertEqual(
repr(Literal("foo", lang="en")),
uformat("rdflib.term.Literal(u'foo', lang='en')"),
)
def testOmitsMissingLang(self):
self.assertEqual(
repr(Literal("foo", datatype=URIRef("http://example.com/"))),
uformat(
"rdflib.term.Literal(u'foo', datatype=rdflib.term.URIRef(u'http://example.com/'))"
),
)
def testSubclassNameAppearsInRepr(self):
class MyLiteral(Literal):
pass
x = MyLiteral("foo")
self.assertEqual(repr(x), uformat("MyLiteral('foo')"))
class TestDoubleOutput(unittest.TestCase):
def testNoDanglingPoint(self):
"""confirms the fix for https://github.com/RDFLib/rdflib/issues/237"""
vv = Literal("0.88", datatype=_XSD_DOUBLE)
out = vv._literal_n3(use_plain=True)
self.assertTrue(out in ["8.8e-01", "0.88"], out)
class TestParseBoolean(unittest.TestCase):
"""confirms the fix for https://github.com/RDFLib/rdflib/issues/913"""
def testTrueBoolean(self):
test_value = Literal("tRue", datatype=_XSD_BOOLEAN)
self.assertTrue(test_value.value)
test_value = Literal("1", datatype=_XSD_BOOLEAN)
self.assertTrue(test_value.value)
def testFalseBoolean(self):
test_value = Literal("falsE", datatype=_XSD_BOOLEAN)
self.assertFalse(test_value.value)
test_value = Literal("0", datatype=_XSD_BOOLEAN)
self.assertFalse(test_value.value)
def testNonFalseBoolean(self):
test_value = Literal("abcd", datatype=_XSD_BOOLEAN)
self.assertRaises(DeprecationWarning)
self.assertFalse(test_value.value)
test_value = Literal("10", datatype=_XSD_BOOLEAN)
self.assertRaises(DeprecationWarning)
self.assertFalse(test_value.value)
class TestBindings(unittest.TestCase):
def testBinding(self):
class a:
def __init__(self, v):
self.v = v[3:-3]
def __str__(self):
return "<<<%s>>>" % self.v
dtA = rdflib.URIRef("urn:dt:a")
bind(dtA, a)
va = a("<<<2>>>")
la = Literal(va, normalize=True)
self.assertEqual(la.value, va)
self.assertEqual(la.datatype, dtA)
la2 = Literal("<<<2>>>", datatype=dtA)
self.assertTrue(isinstance(la2.value, a))
self.assertEqual(la2.value.v, va.v)
class b:
def __init__(self, v):
self.v = v[3:-3]
def __str__(self):
return "B%s" % self.v
dtB = rdflib.URIRef("urn:dt:b")
bind(dtB, b, None, lambda x: "<<<%s>>>" % x)
vb = b("<<<3>>>")
lb = Literal(vb, normalize=True)
self.assertEqual(lb.value, vb)
self.assertEqual(lb.datatype, dtB)
def testSpecificBinding(self):
def lexify(s):
return "--%s--" % s
def unlexify(s):
return s[2:-2]
datatype = rdflib.URIRef("urn:dt:mystring")
# Datatype-specific rule
bind(datatype, str, unlexify, lexify, datatype_specific=True)
s = "Hello"
normal_l = Literal(s)
self.assertEqual(str(normal_l), s)
self.assertEqual(normal_l.toPython(), s)
self.assertEqual(normal_l.datatype, None)
specific_l = Literal("--%s--" % s, datatype=datatype)
self.assertEqual(str(specific_l), lexify(s))
self.assertEqual(specific_l.toPython(), s)
self.assertEqual(specific_l.datatype, datatype)
class TestXsdLiterals(unittest.TestCase):
def test_make_literals(self):
"""
Tests literal construction.
"""
inputs = [
# these literals do not get conerted to python types
("ABCD", XSD.integer, None),
("ABCD", XSD.gYear, None),
("-10000", XSD.gYear, None),
("-1921-00", XSD.gYearMonth, None),
("1921-00", XSD.gMonthDay, None),
("1921-13", XSD.gMonthDay, None),
("-1921-00", XSD.gMonthDay, None),
("10", XSD.gDay, None),
("-1", XSD.gDay, None),
("0000", XSD.gYear, None),
("0000-00-00", XSD.date, None),
("NOT A VALID HEX STRING", XSD.hexBinary, None),
("NOT A VALID BASE64 STRING", XSD.base64Binary, None),
# these literals get converted to python types
("1921-05-01", XSD.date, datetime.date),
("1921-05-01T00:00:00", XSD.dateTime, datetime.datetime),
("1921-05", XSD.gYearMonth, datetime.date),
("0001-01", XSD.gYearMonth, datetime.date),
("0001-12", XSD.gYearMonth, datetime.date),
("2002-01", XSD.gYearMonth, datetime.date),
("9999-01", XSD.gYearMonth, datetime.date),
("9999-12", XSD.gYearMonth, datetime.date),
("1921", XSD.gYear, datetime.date),
("2000", XSD.gYear, datetime.date),
("0001", XSD.gYear, datetime.date),
("9999", XSD.gYear, datetime.date),
("1982", XSD.gYear, datetime.date),
("2002", XSD.gYear, datetime.date),
("1921-05-01T00:00:00+00:30", XSD.dateTime, datetime.datetime),
("1921-05-01T00:00:00-00:30", XSD.dateTime, datetime.datetime),
("abcdef0123", XSD.hexBinary, bytes),
("", XSD.hexBinary, bytes),
("UkRGTGli", XSD.base64Binary, bytes),
("", XSD.base64Binary, bytes),
]
self.check_make_literals(inputs)
@unittest.expectedFailure
def test_make_literals_ki(self):
"""
Known issues with literal construction.
"""
inputs = [
("1921-01Z", XSD.gYearMonth, datetime.date),
("1921Z", XSD.gYear, datetime.date),
("1921-00", XSD.gYearMonth, datetime.date),
("1921-05-01Z", XSD.date, datetime.date),
("1921-05-01+00:30", XSD.date, datetime.date),
("1921-05-01+00:30", XSD.date, datetime.date),
("1921-05-01+00:00", XSD.date, datetime.date),
("1921-05-01+00:00", XSD.date, datetime.date),
("1921-05-01T00:00:00Z", XSD.dateTime, datetime.datetime),
]
self.check_make_literals(inputs)
def check_make_literals(self, inputs):
for literal_pair in inputs:
(lexical, type, value_cls) = literal_pair
with self.subTest(f"tesing {literal_pair}"):
literal = Literal(lexical, datatype=type)
if value_cls is not None:
self.assertIsInstance(literal.value, value_cls)
else:
self.assertIsNone(literal.value)
self.assertEqual(lexical, f"{literal}")
if __name__ == "__main__":
unittest.main()
| 33.69145
| 98
| 0.580051
| 1,041
| 9,063
| 4.962536
| 0.215178
| 0.060976
| 0.030972
| 0.027875
| 0.353271
| 0.275648
| 0.183508
| 0.180798
| 0.131049
| 0.131049
| 0
| 0.046797
| 0.271433
| 9,063
| 268
| 99
| 33.817164
| 0.735575
| 0.04899
| 0
| 0.161765
| 0
| 0.009804
| 0.118513
| 0.017882
| 0
| 0
| 0
| 0
| 0.186275
| 1
| 0.137255
| false
| 0.014706
| 0.02451
| 0.02451
| 0.235294
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
98cecf3619ad0f5f809b91b86260d60284ee57d7
| 14,312
|
py
|
Python
|
extras/20190910/code/dummy_11a/resnet18_unet_softmax_01/train.py
|
pyaf/severstal-steel-defect-detection
|
68a0df4164e84803b6cba78597a079d3736b4e00
|
[
"MIT"
] | null | null | null |
extras/20190910/code/dummy_11a/resnet18_unet_softmax_01/train.py
|
pyaf/severstal-steel-defect-detection
|
68a0df4164e84803b6cba78597a079d3736b4e00
|
[
"MIT"
] | null | null | null |
extras/20190910/code/dummy_11a/resnet18_unet_softmax_01/train.py
|
pyaf/severstal-steel-defect-detection
|
68a0df4164e84803b6cba78597a079d3736b4e00
|
[
"MIT"
] | null | null | null |
import os
os.environ['CUDA_VISIBLE_DEVICES']='0'
from common import *
from dataset import *
from model import *
def valid_augment(image, mask, infor):
return image, mask, infor
def train_augment(image, mask, infor):
u=np.random.choice(3)
if u==0:
pass
elif u==1:
image, mask = do_random_crop_rescale(image, mask, 1600-(256-224), 224)
elif u==2:
image, mask = do_random_crop_rotate_rescale(image, mask, 1600-(256-224), 224)
if np.random.rand()>0.5:
image = do_random_log_contast(image)
if np.random.rand()>0.5:
image, mask = do_flip_lr(image, mask)
if np.random.rand()>0.5:
image, mask = do_flip_ud(image, mask)
if np.random.rand()>0.5:
image, mask = do_noise(image, mask)
return image, mask, infor
def null_collate(batch):
batch_size = len(batch)
input = []
truth_mask = []
truth_label = []
infor = []
for b in range(batch_size):
input.append(batch[b][0])
#truth_mask.append(batch[b][1])
infor.append(batch[b][2])
mask = batch[b][1]
label = (mask.reshape(4,-1).sum(1)>0).astype(np.int32)
num_class,H,W = mask.shape
mask = mask.transpose(1,2,0)*[1,2,3,4]
mask = mask.reshape(-1,4)
mask = mask.max(-1).reshape(1,H,W)
truth_mask.append(mask)
truth_label.append(label)
input = np.stack(input)
input = image_to_input(input, IMAGE_RGB_MEAN,IMAGE_RGB_STD)
input = torch.from_numpy(input).float()
truth_mask = np.stack(truth_mask)
truth_mask = torch.from_numpy(truth_mask).long()
truth_label = np.array(truth_label)
truth_label = torch.from_numpy(truth_label).float()
return input, truth_mask, truth_label, infor
#------------------------------------
def do_valid(net, valid_loader, out_dir=None):
#out_dir=None
valid_num = np.zeros(11, np.float32)
valid_loss = np.zeros(11, np.float32)
for t, (input, truth_mask, truth_label, infor) in enumerate(valid_loader):
#if b==5: break
net.eval()
input = input.cuda()
truth_mask = truth_mask.cuda()
truth_label = truth_label.cuda()
with torch.no_grad():
logit = data_parallel(net, input) #net(input)
loss = criterion(logit, truth_mask)
tn,tp, num_neg,num_pos = metric_hit(logit, truth_mask)
dn,dp, num_neg,num_pos = metric_dice(logit, truth_mask, threshold=0.5, sum_threshold=100)
#zz=0
#---
batch_size = len(infor)
l = np.array([ loss.item(), tn,*tp, dn,*dp ])
n = np.array([ batch_size, num_neg,*num_pos, num_neg,*num_pos ])
valid_loss += l*n
valid_num += n
# debug-----------------------------
if out_dir is not None:
probability = torch.softmax(logit,1)
image = input_to_image(input, IMAGE_RGB_MEAN,IMAGE_RGB_STD)
probability = one_hot_encode_predict(probability)
truth_mask = one_hot_encode_truth(truth_mask)
probability_mask = probability.data.cpu().numpy()
truth_label = truth_label.data.cpu().numpy()
truth_mask = truth_mask.data.cpu().numpy()
for b in range(0, batch_size, 4):
image_id = infor[b].image_id[:-4]
result = draw_predict_result(image[b], truth_mask[b], truth_label[b], probability_mask[b], stack='vertical')
draw_shadow_text(result,'%05d %s.jpg'%(valid_num[0]-batch_size+b, image_id),(5,24),1,[255,255,255],2)
image_show('result',result,resize=1)
cv2.imwrite(out_dir +'/valid/%s.png'%(infor[b].image_id[:-4]), result)
cv2.waitKey(1)
pass
# debug-----------------------------
#print(valid_loss)
print('\r %8d /%8d'%(valid_num[0], len(valid_loader.dataset)),end='',flush=True)
pass #-- end of one data loader --
assert(valid_num[0] == len(valid_loader.dataset))
valid_loss = valid_loss/valid_num
return valid_loss
def run_train():
out_dir = \
'/root/share/project/kaggle/2019/steel/result1/resnet18-seg-full-softmax-foldb1-1-4balance'
initial_checkpoint = \
'/root/share/project/kaggle/2019/steel/result1/resnet18-seg-full-softmax-foldb1-1-4balance/checkpoint/00114000_model.pth'
schduler = NullScheduler(lr=0.001)
batch_size = 8 #8
iter_accum = 4
loss_weight = None#[5,5,2,5] #
train_sampler = FourBalanceClassSampler #RandomSampler
## setup -----------------------------------------------------------------------------
for f in ['checkpoint','train','valid','backup'] : os.makedirs(out_dir +'/'+f, exist_ok=True)
backup_project_as_zip(PROJECT_PATH, out_dir +'/backup/code.train.%s.zip'%IDENTIFIER)
log = Logger()
log.open(out_dir+'/log.train.txt',mode='a')
log.write('\n--- [START %s] %s\n\n' % (IDENTIFIER, '-' * 64))
log.write('\t%s\n' % COMMON_STRING)
log.write('\n')
log.write('\tSEED = %u\n' % SEED)
log.write('\tPROJECT_PATH = %s\n' % PROJECT_PATH)
log.write('\t__file__ = %s\n' % __file__)
log.write('\tout_dir = %s\n' % out_dir)
log.write('\n')
## dataset ----------------------------------------
log.write('** dataset setting **\n')
train_dataset = SteelDataset(
mode = 'train',
csv = ['train.csv',],
split = ['train_b1_11568.npy',],
augment = train_augment,
)
train_loader = DataLoader(
train_dataset,
sampler = train_sampler(train_dataset),
batch_size = batch_size,
drop_last = True,
num_workers = 8,
pin_memory = True,
collate_fn = null_collate
)
valid_dataset = SteelDataset(
mode = 'train',
csv = ['train.csv',],
split = ['valid_b1_1000.npy',],
augment = valid_augment,
)
valid_loader = DataLoader(
valid_dataset,
sampler = SequentialSampler(valid_dataset),
batch_size = 4,
drop_last = False,
num_workers = 8,
pin_memory = True,
collate_fn = null_collate
)
assert(len(train_dataset)>=batch_size)
log.write('batch_size = %d\n'%(batch_size))
log.write('train_dataset : \n%s\n'%(train_dataset))
log.write('valid_dataset : \n%s\n'%(valid_dataset))
log.write('\n')
## net ----------------------------------------
log.write('** net setting **\n')
net = Net().cuda()
log.write('\tinitial_checkpoint = %s\n' % initial_checkpoint)
if initial_checkpoint is not None:
state_dict = torch.load(initial_checkpoint, map_location=lambda storage, loc: storage)
##for k in ['logit.weight','logit.bias']: state_dict.pop(k, None) #tramsfer sigmoid feature to softmax network
##net.load_state_dict(state_dict,strict=False)
net.load_state_dict(state_dict,strict=False)
else:
net.load_pretrain(skip=['logit'], is_print=False)
log.write('%s\n'%(type(net)))
log.write('\tloss_weight = %s\n' % str(loss_weight))
log.write('\ttrain_loader.sampler = %s\n' % str(train_loader.sampler))
log.write('\n')
## optimiser ----------------------------------
# if 0: ##freeze
# for p in net.encoder1.parameters(): p.requires_grad = False
# pass
#net.set_mode('train',is_freeze_bn=True)
#-----------------------------------------------
#optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad, net.parameters()),lr=schduler(0))
#optimizer = torch.optim.RMSprop(net.parameters(), lr =0.0005, alpha = 0.95)
optimizer = torch.optim.SGD(filter(lambda p: p.requires_grad, net.parameters()), lr=schduler(0), momentum=0.9, weight_decay=0.0001)
num_iters = 3000*1000
iter_smooth = 50
iter_log = 500
iter_valid = 1500
iter_save = [0, num_iters-1]\
+ list(range(0, num_iters, 1500))#1*1000
start_iter = 0
start_epoch= 0
rate = 0
if initial_checkpoint is not None:
initial_optimizer = initial_checkpoint.replace('_model.pth','_optimizer.pth')
if os.path.exists(initial_optimizer):
checkpoint = torch.load(initial_optimizer)
start_iter = checkpoint['iter' ]
start_epoch = checkpoint['epoch']
#optimizer.load_state_dict(checkpoint['optimizer'])
pass
log.write('optimizer\n %s\n'%(optimizer))
log.write('schduler\n %s\n'%(schduler))
log.write('\n')
## start training here! ##############################################
log.write('** start training here! **\n')
log.write(' batch_size=%d, iter_accum=%d\n'%(batch_size,iter_accum))
log.write(' experiment = %s\n' % __file__.split('/')[-2])
log.write(' |-------------------------------- VALID-----------------------------|---------- TRAIN/BATCH ------------------------------\n')
log.write('rate iter epoch | loss hit_neg,pos1,2,3,4 dice_neg,pos1,2,3,4 | loss hit_neg,pos1,2,3,4 | time \n')
log.write('------------------------------------------------------------------------------------------------------------------------------------------------\n')
#0.00000 0.0* 0.0 | 0.690 0.50 [0.00,1.00,0.00,1.00] 0.44 [0.00,0.02,0.00,0.15] | 0.000 0.00 [0.00,0.00,0.00,0.00] | 0 hr 00 min
train_loss = np.zeros(20,np.float32)
valid_loss = np.zeros(20,np.float32)
batch_loss = np.zeros(20,np.float32)
iter = 0
i = 0
start = timer()
while iter<num_iters:
sum_train_loss = np.zeros(20,np.float32)
sum = np.zeros(20,np.float32)
optimizer.zero_grad()
for t, (input, truth_mask, truth_label, infor) in enumerate(train_loader):
batch_size = len(infor)
iter = i + start_iter
epoch = (iter-start_iter)*batch_size/len(train_dataset) + start_epoch
#if 0:
if (iter % iter_valid==0):
valid_loss = do_valid(net, valid_loader, out_dir) #
#pass
if (iter % iter_log==0):
print('\r',end='',flush=True)
asterisk = '*' if iter in iter_save else ' '
log.write('%0.5f %5.1f%s %5.1f | %5.3f %4.2f [%4.2f,%4.2f,%4.2f,%4.2f] %4.2f [%4.2f,%4.2f,%4.2f,%4.2f] | %5.3f %4.2f [%4.2f,%4.2f,%4.2f,%4.2f] | %s' % (\
rate, iter/1000, asterisk, epoch,
*valid_loss[:11],
*train_loss[:6],
time_to_str((timer() - start),'min'))
)
log.write('\n')
#if 0:
if iter in iter_save:
torch.save(net.state_dict(),out_dir +'/checkpoint/%08d_model.pth'%(iter))
torch.save({
#'optimizer': optimizer.state_dict(),
'iter' : iter,
'epoch' : epoch,
}, out_dir +'/checkpoint/%08d_optimizer.pth'%(iter))
pass
# learning rate schduler -------------
lr = schduler(iter)
if lr<0 : break
adjust_learning_rate(optimizer, lr)
rate = get_learning_rate(optimizer)
# one iteration update -------------
#net.set_mode('train',is_freeze_bn=True)
net.train()
input = input.cuda()
truth_label = truth_label.cuda()
truth_mask = truth_mask.cuda()
logit = data_parallel(net,input) #net(input)
loss = criterion(logit, truth_mask, loss_weight)
tn,tp, num_neg,num_pos = metric_hit(logit, truth_mask)
(loss/iter_accum).backward()
if (iter % iter_accum)==0:
optimizer.step()
optimizer.zero_grad()
# print statistics ------------
l = np.array([ loss.item(), tn,*tp ])
n = np.array([ batch_size, num_neg,*num_pos ])
batch_loss[:6] = l
sum_train_loss[:6] += l*n
sum[:6] += n
if iter%iter_smooth == 0:
train_loss = sum_train_loss/(sum+1e-12)
sum_train_loss[...] = 0
sum[...] = 0
print('\r',end='',flush=True)
asterisk = ' '
print('%0.5f %5.1f%s %5.1f | %5.3f %4.2f [%4.2f,%4.2f,%4.2f,%4.2f] %4.2f [%4.2f,%4.2f,%4.2f,%4.2f] | %5.3f %4.2f [%4.2f,%4.2f,%4.2f,%4.2f] | %s' % (\
rate, iter/1000, asterisk, epoch,
*valid_loss[:11],
*batch_loss[:6],
time_to_str((timer() - start),'min'))
, end='',flush=True)
i=i+1
# debug-----------------------------
if 1:
for di in range(3):
if (iter+di)%1000==0:
probability = torch.softmax(logit,1)
image = input_to_image(input, IMAGE_RGB_MEAN,IMAGE_RGB_STD)
probability = one_hot_encode_predict(probability)
truth_mask = one_hot_encode_truth(truth_mask)
probability_mask = probability.data.cpu().numpy()
truth_label = truth_label.data.cpu().numpy()
truth_mask = truth_mask.data.cpu().numpy()
for b in range(batch_size):
result = draw_predict_result(image[b], truth_mask[b], truth_label[b], probability_mask[b], stack='vertical')
image_show('result',result,resize=1)
cv2.imwrite(out_dir +'/train/%05d.png'%(di*100+b), result)
cv2.waitKey(1)
pass
pass #-- end of one data loader --
pass #-- end of all iterations --
log.write('\n')
# main #################################################################
if __name__ == '__main__':
print( '%s: calling main function ... ' % os.path.basename(__file__))
run_train()
| 33.995249
| 180
| 0.523896
| 1,801
| 14,312
| 3.964464
| 0.177679
| 0.034734
| 0.014566
| 0.021849
| 0.435574
| 0.397619
| 0.342297
| 0.285854
| 0.247759
| 0.237395
| 0
| 0.043457
| 0.287731
| 14,312
| 420
| 181
| 34.07619
| 0.656955
| 0.103759
| 0
| 0.283088
| 0
| 0.018382
| 0.138558
| 0.05664
| 0
| 0
| 0
| 0
| 0.007353
| 1
| 0.018382
| false
| 0.029412
| 0.014706
| 0.003676
| 0.047794
| 0.022059
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
98d09bdc81e45d8b676af2c3e285dd5d038ee1da
| 1,283
|
py
|
Python
|
city_coord_download.py
|
Yuchen971/Chinese-city-level-geojson
|
51f8d3d336f3e335b15bbf37882a9f248f0e6461
|
[
"MIT"
] | null | null | null |
city_coord_download.py
|
Yuchen971/Chinese-city-level-geojson
|
51f8d3d336f3e335b15bbf37882a9f248f0e6461
|
[
"MIT"
] | null | null | null |
city_coord_download.py
|
Yuchen971/Chinese-city-level-geojson
|
51f8d3d336f3e335b15bbf37882a9f248f0e6461
|
[
"MIT"
] | null | null | null |
import requests
import os
def get_json(save_dir, adcode):
# 获取当前地图轮廓
base_url = 'https://geo.datav.aliyun.com/areas/bound/' + str(adcode) + '.json'
full_url = 'https://geo.datav.aliyun.com/areas/bound/' + str(adcode) + '_full.json'
base_r = requests.get(base_url)
if base_r.status_code == 200:
cur_obj_name = base_r.json()['features'][0]['properties']['name']
print(cur_obj_name)
cur_file_dir = os.path.join(save_dir, cur_obj_name)
if not os.path.exists(cur_file_dir):
os.mkdir(cur_file_dir)
base_json_file = os.path.join(cur_file_dir, str(adcode) + '.json')
with open(base_json_file, 'w') as file:
file.write(base_r.text)
# 获取当前地图子地图轮廓
full_r = requests.get(full_url)
if full_r.status_code == 200 and 'cur_obj_name' in vars():
full_json_file = os.path.join(cur_file_dir, str(adcode) + '_full.json')
with open(full_json_file, 'w') as file:
file.write(full_r.text)
for item in full_r.json()['features']:
chadcode = item['properties']['adcode']
if chadcode == adcode:
pass
else:
get_json(cur_file_dir, chadcode)
get_json('/Users/yuchenli/Downloads/city_geojson-master', 100000)
| 44.241379
| 87
| 0.626656
| 187
| 1,283
| 4.032086
| 0.326203
| 0.055703
| 0.079576
| 0.04244
| 0.278515
| 0.278515
| 0.278515
| 0.214854
| 0.214854
| 0.214854
| 0
| 0.013265
| 0.236165
| 1,283
| 29
| 88
| 44.241379
| 0.756122
| 0.015588
| 0
| 0
| 0
| 0
| 0.172086
| 0.035686
| 0
| 0
| 0
| 0
| 0
| 1
| 0.037037
| false
| 0.037037
| 0.074074
| 0
| 0.111111
| 0.037037
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
98d0b391f82dbbbda80cf6f637cf8415548b806e
| 1,881
|
py
|
Python
|
verticapy/tests/vDataFrame/test_vDF_create.py
|
sitingren/VerticaPy
|
aa18f4f1277e264005de2d1a8646c28acd1ba137
|
[
"Apache-2.0"
] | null | null | null |
verticapy/tests/vDataFrame/test_vDF_create.py
|
sitingren/VerticaPy
|
aa18f4f1277e264005de2d1a8646c28acd1ba137
|
[
"Apache-2.0"
] | null | null | null |
verticapy/tests/vDataFrame/test_vDF_create.py
|
sitingren/VerticaPy
|
aa18f4f1277e264005de2d1a8646c28acd1ba137
|
[
"Apache-2.0"
] | null | null | null |
# (c) Copyright [2018-2021] Micro Focus or one of its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest, warnings
from verticapy import vDataFrame, drop_table
from verticapy import set_option
set_option("print_info", False)
@pytest.fixture(scope="module")
def titanic_vd(base):
from verticapy.learn.datasets import load_titanic
titanic = load_titanic(cursor=base.cursor)
yield titanic
with warnings.catch_warnings(record=True) as w:
drop_table(name="public.titanic", cursor=base.cursor)
class TestvDFCreate:
def test_creating_vDF_using_input_relation(self, base, titanic_vd):
tvdf = vDataFrame(input_relation="public.titanic", cursor=base.cursor)
assert tvdf["pclass"].count() == 1234
def test_creating_vDF_using_input_relation_schema(self, base, titanic_vd):
tvdf = vDataFrame(input_relation="titanic", schema="public", cursor=base.cursor)
assert tvdf["pclass"].count() == 1234
def test_creating_vDF_using_input_relation_vcolumns(self, base, titanic_vd):
tvdf = vDataFrame(
input_relation="public.titanic",
usecols=["age", "survived"],
cursor=base.cursor,
)
assert tvdf["survived"].count() == 1234
@pytest.mark.skip(reason="test not implemented")
def test_creating_vDF_using_input_relation_dsn(self):
pass
| 34.2
| 88
| 0.725678
| 255
| 1,881
| 5.203922
| 0.486275
| 0.068576
| 0.060286
| 0.054258
| 0.330821
| 0.289375
| 0.289375
| 0.235117
| 0.201959
| 0.201959
| 0
| 0.015564
| 0.180223
| 1,881
| 54
| 89
| 34.833333
| 0.845006
| 0.308878
| 0
| 0.071429
| 0
| 0
| 0.094794
| 0
| 0
| 0
| 0
| 0
| 0.107143
| 1
| 0.178571
| false
| 0.035714
| 0.142857
| 0
| 0.357143
| 0.035714
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
98d33c72cdff1bb8b3302772a68873ef14217bfa
| 353
|
py
|
Python
|
Solutions/beta/beta_is_it_an_isogram.py
|
citrok25/Codewars-1
|
dc641c5079e2e8b5955eb027fd15427e5bdb2e26
|
[
"MIT"
] | 46
|
2017-08-24T09:27:57.000Z
|
2022-02-25T02:24:33.000Z
|
Solutions/beta/beta_is_it_an_isogram.py
|
abbhishek971/Codewars
|
9e761811db724da1e8aae44594df42b4ee879a16
|
[
"MIT"
] | null | null | null |
Solutions/beta/beta_is_it_an_isogram.py
|
abbhishek971/Codewars
|
9e761811db724da1e8aae44594df42b4ee879a16
|
[
"MIT"
] | 35
|
2017-08-01T22:09:48.000Z
|
2022-02-18T17:21:37.000Z
|
import re
from collections import Counter
def is_isogram(word):
if not isinstance(word, str) or word == '': return False
word = {j for i,j in Counter(
re.sub('[^a-z]', '', word.lower())
).most_common()
}
return len(word) == 1
| 32.090909
| 67
| 0.430595
| 38
| 353
| 3.947368
| 0.736842
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.005208
| 0.456091
| 353
| 10
| 68
| 35.3
| 0.776042
| 0
| 0
| 0
| 0
| 0
| 0.016997
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.111111
| false
| 0
| 0.222222
| 0
| 0.444444
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
98d39e717fc52a479b273f0813ba804a39854ac0
| 1,011
|
py
|
Python
|
p23_Merge_k_Sorted_Lists.py
|
bzhou26/leetcode_sol
|
82506521e2cc412f96cd1dfc3c8c3ab635f67f73
|
[
"MIT"
] | null | null | null |
p23_Merge_k_Sorted_Lists.py
|
bzhou26/leetcode_sol
|
82506521e2cc412f96cd1dfc3c8c3ab635f67f73
|
[
"MIT"
] | null | null | null |
p23_Merge_k_Sorted_Lists.py
|
bzhou26/leetcode_sol
|
82506521e2cc412f96cd1dfc3c8c3ab635f67f73
|
[
"MIT"
] | null | null | null |
'''
- Leetcode problem: 23
- Difficulty: Hard
- Brief problem description:
Merge k sorted linked lists and return it as one sorted list. Analyze and describe its complexity.
Example:
Input:
[
1->4->5,
1->3->4,
2->6
]
Output: 1->1->2->3->4->4->5->6
- Solution Summary:
- Used Resources:
--- Bo Zhou
'''
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, val=0, next=None):
# self.val = val
# self.next = next
class Solution:
def mergeKLists(self, lists: List[ListNode]) -> ListNode:
pq = []
for l in lists:
if l:
heapq.heappush(pq, (l.val, id(l), l))
newNode = ListNode()
result = newNode
while pq:
minVal, i, minNode = heapq.heappop(pq)
newNode.next = minNode
nextNode = minNode.next
newNode = minNode
if nextNode:
heapq.heappush(pq, (nextNode.val, id(nextNode), nextNode))
return result.next
| 20.22
| 98
| 0.565776
| 127
| 1,011
| 4.472441
| 0.511811
| 0.007042
| 0.052817
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.027221
| 0.309594
| 1,011
| 49
| 99
| 20.632653
| 0.786533
| 0.444115
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.0625
| false
| 0
| 0
| 0
| 0.1875
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
98d57c6c79fbcfbe80f6e85abd3550ed59d42da1
| 22,613
|
py
|
Python
|
src/LaminariaCore.py
|
MrKelpy/IFXG
|
695865a8140fdf258a643ee29d6439a59037bc99
|
[
"MIT"
] | null | null | null |
src/LaminariaCore.py
|
MrKelpy/IFXG
|
695865a8140fdf258a643ee29d6439a59037bc99
|
[
"MIT"
] | null | null | null |
src/LaminariaCore.py
|
MrKelpy/IFXG
|
695865a8140fdf258a643ee29d6439a59037bc99
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
This module is distributed as part of the Laminaria Core (Python Version).
Get the Source Code in GitHub:
https://github.com/MrKelpy/LaminariaCore
The LaminariaCore is Open Source and distributed under the
MIT License
"""
# Built-in Imports
import datetime
import random
import asyncio
import os
# Third Party Imports
import screeninfo
from discord.ext import commands
import discord
from fpdf import FPDF
# Local Application Imports
###############################################################################
### DATE & TIME ###
###############################################################################
def twochars(arg):
"""
Formats a string of two characters into the format of (0X), useful for date formatting.
:param arg: The string
:return: String
"""
if len(arg) == 1:
return f"0{arg}"
return arg
def get_formatted_date(date: datetime, include_seconds: bool = False):
"""
Returns a given date in the handy DD/MM/YY - HH:MM:SS format.
:param date: The date to be formatted -> datetime.datetime
:param include_seconds: If set to True, include seconds in the format.
:return: String
"""
date_string = f"{twochars(str(date.day))}/{twochars(str(date.month))}/{twochars(str(date.year))} - " \
f"{twochars(str(date.hour))}:{twochars(str(date.minute))}"
if include_seconds:
date_string += f":{twochars(str(date.second))}"
return date_string
def get_formatted_date_now(include_seconds: bool = False, formatting: int = 1):
"""
Returns the current date in the handy DD/MM/YY - HH:MM:SS format (default) or in the specified one.
:param formatting: Format type -> int
:param include_seconds: If set to True, include seconds in the format.
:return: String
"""
now = datetime.datetime.now()
if formatting == 1:
date_string = f"{twochars(str(now.day))}/{twochars(str(now.month))}/{twochars(str(now.year))} - " \
f"{twochars(str(now.hour))}:{twochars(str(now.minute))}"
elif formatting == 2:
date_string = f"{twochars(str(now.day))}.{twochars(str(now.month))}.{twochars(str(now.year))}_" \
f"{twochars(str(now.hour))}.{twochars(str(now.minute))}"
else:
date_string = f"{twochars(str(now.day))}/{twochars(str(now.month))}/{twochars(str(now.year))} - " \
f"{twochars(str(now.hour))}:{twochars(str(now.minute))}"
if include_seconds:
date_string += f":{twochars(str(now.second))}"
return date_string
def time_until_midnight():
"""
Get seconds left until midnight
"""
tomorrow = datetime.date.today() + datetime.timedelta(days=1)
timedelta_until_midnight = datetime.datetime.combine(tomorrow, datetime.time.min) - datetime.datetime.now()
return timedelta_until_midnight.seconds
###############################################################################
### GENERAL ###
###############################################################################
def get_absolute_screen_coords(relx, rely):
"""
Returns absolute screen coordinates based off the given relative
coordinates. For instance, in a 1920x720 screen, the x50, y50 input would be
x960, y360.
:param relx: Relative X Coordinate
:param rely: Relative Y Coordinate
:return: Absolute Coordinates
"""
monitor = screeninfo.get_monitors()[0]
x = (relx*monitor.width)/100
y = (rely*monitor.height)/100
return x, y
def get_relative_screen_coords(x, y):
"""
Returns relative screen coordinates based off the given absolute
coordinates. The relative coordinates are percentage-based values calculates
relatively to the monitor specs and the given coords.
:param x: Absolute X
:param y: Absolute Y
:return:
"""
monitor = screeninfo.get_monitors()[0]
relx = (x*100)/monitor.width
rely = (y*100)/monitor.height
return relx, rely
###############################################################################
### PLACEHOLDERS ###
###############################################################################
async def small_ipsum():
"""
Returns the small version of the lorem impsum placeholder
:return:
"""
return "Lorem ipsum dolor sit amet."
async def big_ipsum():
"""
Returns a bigger version of the lorem ipsum text than the small_ipsum function does.
:return:
"""
return "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt " \
"ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco " \
"laboris nisi ut aliquip ex ea commodo consequat."
###############################################################################
### DISCORD.PY ###
###############################################################################
async def hasrole(role: discord.Role, user: discord.Member, add: bool = False):
"""
Checks if a user has a certain role.
:param role: The role to be checked for. -> discord.Role
:param user: The user. -> discord.Member
:param add: If set to True, adds the role to the user, will always return True.
:return: True, if user has the role. False otherwise.
"""
for r in user.roles:
if r == role:
return True
else:
if add is True:
await user.add_roles(role)
return True
return False
async def getrolenamed(role: str, guild: discord.Guild, create: bool = False, exact: bool = True):
"""
Returns a role inside a Guild based on a given name.
:param role: The role to be gathered. -> str
:param guild: The guild to retrieve the role from. -> discord.Guild
:param create: If set to True, creates the role. (If non existant!)
:param exact: If set to True, matches the role exactly
:return: discord.Role, None if not found.
"""
for r in guild.roles:
if exact and r.name == role:
return r
elif role in r.name:
return r
else:
if create is True:
colours = [discord.Colour.red(), discord.Colour.dark_teal(), discord.Colour.teal(), discord.Colour.gold(),
discord.Colour.blurple(), discord.Colour.purple(), discord.Colour.green(),
discord.Colour.greyple(),
discord.Colour.orange(), discord.Colour.light_grey()]
return_role = await guild.create_role(name=role, colour=random.choice(colours))
return return_role
return None
async def get_textchannel_by_name(channel: str, guild: discord.Guild,
delete: bool = False, create: bool = False, category: str = None, exact: bool = True):
"""
Returns a text channel based on a given name.
:param channel: The channel to be gathered. -> str
:param guild: The guild to retrieve the channel from. -> discord.Guild
:param delete: If set to True, deletes the role. (If found!)
:param create: If set to True, creates the role. (If not found!)
:param category: The category to create the channel into. (If create is True!)
:param exact: If set to True, the channelname needs to match the channel at 100%. Else, no.
:return: discord.TextChannel, None if not found.
"""
for text_channel in guild.text_channels:
if exact:
if text_channel.name == channel.lower():
if delete is True:
await text_channel.delete()
continue
return text_channel
else:
if channel.lower() in text_channel.name:
if delete is True:
await text_channel.delete()
continue
return text_channel
if create is True:
text_channel = await guild.create_text_channel(channel, category=category)
return text_channel
return None
async def get_category_by_name(category_name: str, guild: discord.Guild, delete: bool = False, create: bool = False,
exact: bool = True):
"""
Returns a category based on a given name.
:param exact: If set to True, matches the name exactly as it is.*
:param category_name: The category to be gathered. -> str
:param guild: The guild to retrieve the category from. -> discord.Guild
:param delete: If set to True, deletes the category. (If found!)
:param create: If set to True, creates the category. (If not found!)
:return: discord.Category, None if not found.
"""
for category in guild.categories:
if exact and category.name == category_name:
if delete is True:
await category.delete()
continue
return category
elif not exact and category_name in category.name:
if delete is True:
await category.delete()
continue
return category
if create is True:
category = await guild.create_category(category_name)
return category
return None
async def twochars_async(arg):
"""
Formats a string of two characters into the format of (0X), useful for date formatting.
:param arg: The string
:return: String
"""
if len(arg) == 1:
return f"0{arg}"
return arg
async def as_get_formatted_date_now(include_seconds: bool = False, formatting: int = 1):
"""
Returns the current date in the handy DD/MM/YY - HH:MM:SS format (default) or in the specified one.
:param formatting: Format type -> int
:param include_seconds: If set to True, include seconds in the format.
:return: String
"""
now = datetime.datetime.now()
if formatting == 1:
date_string = f"{await twochars(str(now.day))}/{await twochars(str(now.month))}/{await twochars(str(now.year))} - " \
f"{await twochars(str(now.hour))}:{await twochars(str(now.minute))}"
elif formatting == 2:
date_string = f"{await twochars(str(now.day))}.{await twochars(str(now.month))}.{await twochars(str(now.year))}_" \
f"{await twochars(str(now.hour))}.{await twochars(str(now.minute))}"
else:
date_string = f"{await twochars(str(now.day))}/{await twochars(str(now.month))}/{await twochars(str(now.year))} - " \
f"{await twochars(str(now.hour))}:{await twochars(str(now.minute))}"
if include_seconds:
date_string += f":{await twochars(str(now.second))}"
return date_string
async def get_formatted_date_async(date: datetime, include_seconds: bool = False):
"""
Returns a given date in the handy DD/MM/YY - HH:MM:SS format.
:param date: The date to be formatted -> datetime.datetime
:param include_seconds: If set to True, include seconds in the format.
:return: String
"""
date_string = f"{await twochars(str(date.day))}/{await twochars(str(date.month))}/{await twochars(str(date.year))} - " \
f"{await twochars(str(date.hour))}:{await twochars(str(date.minute))}"
if include_seconds:
date_string += f":{await twochars(str(date.second))}"
return date_string
async def send_loading(channel: discord.TextChannel, colour=discord.Colour.red()):
"""
Sends a loading embed to a specified channel.
:param channel: The channel for the message to be sent to. -> discord.TextChannel
:param colour: The embed colour. -> discord.Colour
:return: discord.Embed
"""
loading_embed = discord.Embed(
title='Loading...',
colour=colour
)
loading = await channel.send(embed=loading_embed)
return loading
async def get_textchannel_chatlog(text_channel: discord.TextChannel, limit: int = None):
"""
Returns a TextChannel chatlog
:param text_channel: The text channel for the data to be gathered from
:param limit: An integer to limit the amount of messages retrieved.
:return: String
"""
all_messages = await text_channel.history(limit=limit).flatten()
all_messages.reverse()
# Parses out and legibilises the messages into a chatlog
chatlog = ""
for message in all_messages:
if message.embeds:
content = message.embeds[0].title
elif message.attachments:
content = f"FILE(s) :{[file.filename for file in message.attachments]}"
else:
content = message.content
content = content.split("```")
content = '\n'.join(content)
chatlog += f"[{await get_formatted_date_async(message.created_at, include_seconds=True)}] [- MSG ID: {message.id}]" \
f" [- AUTHOR ID: {message.author.id}] <{message.author}> {content}\n"
return chatlog
async def get_textchannel_firstmessage(text_channel: discord.TextChannel):
"""
Returns the first message on a TextChannel
:param text_channel: The textchannel to retrieve the message from. -> discord.TextChannel
:return: discord.Message
"""
all_messages = await text_channel.history(limit=None).flatten()
all_messages.reverse()
return all_messages[0]
async def get_member_object(member_id: int, guild: discord.Guild):
"""
Returns a discord.Member object of a member from a given ID
:param member_id: The member ID. -> int
:param guild: The guild to retrieve the member from. -> discord.Guild
:return: discord.Member, None if not found.
"""
for member in guild.members:
if int(member.id) == int(member_id):
return member
return None
async def show_help_menu(ctx, bot: commands.Bot, colour=discord.Colour.red(), reverse=False):
"""
Standard help menu used between bots created by Alex, with loads of quirks to make the UI more appealing.
The help menu is completely computer-generated.
Description management:
> Leaving the description of a command without text will it not be shown in the UI
> Writing |String| at the beggining of a command description will have it sorted into a category
(Replace "String" with the category name)
> Categories are sorted alphabetically, aswell as bot_commands.
> Not specifying a category will result in the command being thrown into a "General" category
:param reverse:
:param ctx: discord context.
:param bot: discord BOT instance.
:param colour: Help menu embed colour
:return: discord.Embed
"""
help_menu_base = discord.Embed(
title=f"{bot.user.name}'s Help Menu - ",
description=f"Prefix: `{ctx.prefix}`",
colour=colour
)
dev = await bot.fetch_user(740969223681212507)
commands_dictionary = dict()
embed_list = list()
for command in bot.commands:
# Iterates through all the registered bot_commands
if not command.description:
# Skips over the command if no description is provided
continue
category_name = "General"
if command.description.startswith("|") and command.description.count(
"|") == 2 and not command.description.endswith("||"):
# Parses out the category of a command if a match is detected
category_name = command.description.split("|")[1].strip().title()
command.description = command.description.split("|")[2].strip()
params = ""
alias_list = "No aliases found"
for param in command.clean_params:
# Parses out the command parameters for usage in the command info
params += f" <{param}> "
if command.aliases:
# If any command aliases exist, parse them out for usage in the command info
alias_list = ""
for alias in command.aliases:
alias_list += f"|{ctx.prefix}{alias}| "
# Build the dict update
try:
_ = commands_dictionary[category_name]
commands_dictionary[category_name].append([command.name, command.description, alias_list, params])
except KeyError:
command_registration = {category_name: [[command.name, command.description, alias_list, params]]}
commands_dictionary.update(command_registration)
for category in sorted(commands_dictionary):
# Loads in the categories with their bot_commands to the help menu
# Loads in the embed for the category
category_embed = help_menu_base.copy()
category_embed.title += f"{category} Commands"
for command in sorted(commands_dictionary[category]):
# Gets the command info
name = command[0]
description = command[1]
aliases = command[2]
params = command[3]
category_embed.add_field(name=name.title(), value=f"{description}\n`USAGE: {ctx.prefix}{name}{params}`\n"
f"`ALIASES: {aliases}`", inline=False)
category_embed.timestamp = datetime.datetime.now()
category_embed.set_footer(text=f"Developed by {dev}")
category_embed.set_thumbnail(url=bot.user.avatar_url)
embed_list.append(category_embed)
if reverse:
embed_list = reversed(embed_list)
for embed in embed_list:
# Sends all the embeds in the list
await ctx.send(embed=embed)
async def convert_txt_to_pdf(path: str):
"""
Converts a .txt file to a .pdf file
:param path: The path for the file. -> str
:return:
"""
pdf = FPDF()
pdf.add_page()
pdf.set_font("Arial", size=15)
output_path = str(os.path.splitext(path)[0]) + ".pdf"
with open(path, 'r') as txtfile:
lines = txtfile.readlines()
for line in lines:
if line == '\n':
pdf.cell(200, 10, txt='\n', ln=1, align="L")
continue
if line[0] == "|" and line[2] == "|":
pdf.cell(200, 10, txt=line[3:].strip(), ln=1, align=line[1])
continue
pdf.cell(200, 10, txt=line.strip(), ln=1, align="L")
pdf.output(output_path)
async def load_missing_perms_embed(colour=discord.Colour.red()):
"""
Quickly loads a missing permissions embed
:param colour: The embed colour
:return: discord.Embed
"""
embed = discord.Embed(
title="Missing permissions!",
description="Sorry, you can't use this command.",
colour=colour
)
embed.timestamp = datetime.datetime.now()
return embed
async def interactive_dialog(**kwargs):
"""
Creates an "interactive dialog" as i name it; An embed that uses the wait_for() function together to facilitate the
creation of dialogs.
:param kwargs: expects ctx, channel, check, title, body and optionally emojis, colour.
> PILLAR ARGUMENTS are arguments that are mandatory; Vital for the function to be used.
> OPTIONAL ARGUMENTS are... optional arguments. What did you expect?
> "Ctx" is the command context. (PILLAR ARGUMENT)
> "Check" is the type of event to happen, aswell as the wait_for check to perform on the response. (PILLAR ARGUMENT)
> "Title" is the dialog embed title. (PILLAR ARGUMENT)
> "Body" is the dialog embed description. (PILLAR ARGUMENT)
> "Channel" is the place where to send the dialog to. (OPTIONAL ARGUMENT)
> "Emojis" is a list with a list of reactions, (UTF-8 Symbols) to add into the dialog. (OPTIONAL ARGUMENT)
> "Colour" is the dialog embed colour. Defaults to discord.Colours.red() (OPTIONAL ARGUMENT)
> "Picture" is the dialog image, the big picture at the bottom of the embed. (OPTIONAL ARGUMENT)
> "Thumbnail" is the dialog embed thumbnail, the small picture that gets placed on the top right side of the embed. (OPTIONAL ARGUMENT)
> "Footer" is the dialog footer, the small text at the bottom of the embed. (OPTIONAL ARGUMENT)
:return: The user's response.
"""
# Performs a kwargs check to raise errors if any of the pillar arguments are missing
if "ctx" not in kwargs: raise TypeError("Missing CTX argument in interactive dialog.")
if "check" not in kwargs: raise TypeError("Missing CHECK argument in interactive dialog.")
if "title" not in kwargs: raise TypeError("Missing TITLE argument in interactive dialog.")
if "body" not in kwargs: raise TypeError("Missing BODY argument in interactive dialog.")
# Performs a kwargs check to default the arguments if any of the optional arguments are missing
if "channel" not in kwargs: kwargs["channel"] = kwargs["ctx"].author
if "emojis" not in kwargs: kwargs["emojis"] = None
if "colour" not in kwargs: kwargs["colour"] = discord.Colour.red()
if "picture" not in kwargs: kwargs["picture"] = None
if "thumbnail" not in kwargs: kwargs["thumbnail"] = None
if "footer" not in kwargs: kwargs["footer"] = None
# Loads the dialog embed
dialog_embed = discord.Embed(
title=kwargs["title"],
description=kwargs["body"],
colour=kwargs["colour"]
)
dialog_embed.timestamp = datetime.datetime.now()
dialog_embed.set_thumbnail(url=kwargs["thumbnail"])
dialog_embed.set_image(url=kwargs["picture"])
dialog_embed.set_footer(text=kwargs["footer"])
# Sends the embed to the desired channel
dialog_message = await kwargs["channel"].send(embed=dialog_embed)
# Starts the event type cheks, and their proper handles
if kwargs["check"][0] == "message":
try:
msg = await kwargs["ctx"].bot.wait_for("message", kwargs["check"][1], timeout=120.0)
return msg
except asyncio.TimeoutError:
# Returns an empty response if a timeout occurs.
return
if kwargs["check"][0] == "reaction":
if kwargs["emojis"] is not None:
# Adds the reactions to a message, if the emojis kwarg is not missing.
for emoji in kwargs["emojis"]:
await dialog_message.add_reaction(emoji)
try:
reaction, user = await kwargs["ctx"].bot.wait_for("message", kwargs["check"][1], timeout=120.0)
return reaction, user
except asyncio.TimeoutError:
# Returns an empty response if a timeout occurs.
return
| 35.388106
| 139
| 0.618007
| 2,849
| 22,613
| 4.837838
| 0.165672
| 0.035116
| 0.032504
| 0.022056
| 0.343176
| 0.297976
| 0.266923
| 0.238845
| 0.224262
| 0.222666
| 0
| 0.006702
| 0.254367
| 22,613
| 638
| 140
| 35.443574
| 0.810747
| 0.118472
| 0
| 0.288809
| 0
| 0.032491
| 0.188528
| 0.103161
| 0
| 0
| 0
| 0
| 0
| 1
| 0.021661
| false
| 0
| 0.028881
| 0
| 0.187726
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
98d5c4c121e4fde76563c5c0ac59d5c2ef8f0cbc
| 23,139
|
py
|
Python
|
linux-distro/package/nuxleus/Source/Vendor/Microsoft/IronPython-2.0.1/Lib/Kamaelia/Codec/YUV4MPEG.py
|
mdavid/nuxleus
|
653f1310d8bf08eaa5a7e3326c2349e56a6abdc2
|
[
"BSD-3-Clause"
] | 1
|
2017-03-28T06:41:51.000Z
|
2017-03-28T06:41:51.000Z
|
linux-distro/package/nuxleus/Source/Vendor/Microsoft/IronPython-2.0.1/Lib/Kamaelia/Codec/YUV4MPEG.py
|
mdavid/nuxleus
|
653f1310d8bf08eaa5a7e3326c2349e56a6abdc2
|
[
"BSD-3-Clause"
] | null | null | null |
linux-distro/package/nuxleus/Source/Vendor/Microsoft/IronPython-2.0.1/Lib/Kamaelia/Codec/YUV4MPEG.py
|
mdavid/nuxleus
|
653f1310d8bf08eaa5a7e3326c2349e56a6abdc2
|
[
"BSD-3-Clause"
] | 1
|
2016-12-13T21:08:58.000Z
|
2016-12-13T21:08:58.000Z
|
#!/usr/bin/env python
#
# Copyright (C) 2007 British Broadcasting Corporation and Kamaelia Contributors(1)
# All Rights Reserved.
#
# You may only modify and redistribute this under the terms of any of the
# following licenses(2): Mozilla Public License, V1.1, GNU General
# Public License, V2.0, GNU Lesser General Public License, V2.1
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://kamaelia.sourceforge.net/AUTHORS - please extend this file,
# not this notice.
# (2) Reproduced in the COPYING file, and at:
# http://kamaelia.sourceforge.net/COPYING
# Under section 3.5 of the MPL, we are using this text since we deem the MPL
# notice inappropriate for this file. As per MPL/GPL/LGPL removal of this
# notice is prohibited.
#
# Please contact us via: kamaelia-list-owner@lists.sourceforge.net
# to discuss alternative licensing.
# -------------------------------------------------------------------------
#
"""\
=============================================
Parsing and Creation of YUV4MPEG format files
=============================================
YUV4MPEGToFrame parses YUV4MPEG format data sent to its "inbox" inbox and sends
video fram data structures to its "outbox" outbox.
FrameToYUV4MPEG does the reverse - taking frame data structures sent to its
"inbox" inbox and outputting YUV4MPEG format data to its "outbox" outbox."
The YUV4MPEG file format is supported by many tools, such as mjpegtools,
mplayer/mencoder, and ffmpeg.
Example Usage
-------------
Playback a YUV4MPEG format file::
Pipeline( RateControlledFileReader("video.yuv4mpeg",readmode="bytes", ...),
YUV4MPEGToFrame(),
VideoOverlay()
).run()
Decode a dirac encoded video file to a YUV4MPEG format file::
Pipeline( RateControlledFileReader("video.dirac",readmode="bytes", ...),
DiracDecoder(),
FrameToYUV4MPEG(),
SimpleFileWriter("output.yuv4mpeg")
).run()
YUV4MPEGToFrame Behaviour
-------------------------
Send binary data as strings containing YUV4MPEG format data to the "inbox" inbox
and frame data structures will be sent out of the "outbox" outbox as soon as
they are parsed.
See below for a description of the uncompressed frame data structure format.
This component supports sending data out of its outbox to a size limited inbox.
If the size limited inbox is full, this component will pause until it is able
to send out the data. Data will not be consumed from the inbox if this component
is waiting to send to the outbox.
If a producerFinished message is received on the "control" inbox, this component
will complete parsing any data pending in its inbox, and finish sending any
resulting data to its outbox. It will then send the producerFinished message on
out of its "signal" outbox and terminate.
If a shutdownMicroprocess message is received on the "control" inbox, this
component will immediately send it on out of its "signal" outbox and immediately
terminate. It will not complete processing, or sending on any pending data.
FrameToYUV4MPEG Behaviour
-------------------------
Send frame data structures to the "inbox" inbox of this component. YUV4MPEG
format binary string data will be sent out of the "outbox" outbox.
See below for a description of the uncompressed frame data structure format.
The header data for the YUV4MPEG file is determined from the first frame.
All frames sent to this component must therefore be in the same pixel format and
size, otherwise the output data will not be valid YUV4MPEG.
This component supports sending data out of its outbox to a size limited inbox.
If the size limited inbox is full, this component will pause until it is able
to send out the data. Data will not be consumed from the inbox if this component
is waiting to send to the outbox.
If a producerFinished message is received on the "control" inbox, this component
will complete parsing any data pending in its inbox, and finish sending any
resulting data to its outbox. It will then send the producerFinished message on
out of its "signal" outbox and terminate.
If a shutdownMicroprocess message is received on the "control" inbox, this
component will immediately send it on out of its "signal" outbox and immediately
terminate. It will not complete processing, or sending on any pending data.
=========================
UNCOMPRESSED FRAME FORMAT
=========================
A frame is a dictionary data structure. It must, at minimum contain the first 3
("yuv", "size" and "pixformat")::
{
"yuv" : (y_data, u_data, v_data) # a tuple of strings
"size" : (width, height) # in pixels
"pixformat" : pixelformat # format of raw video data
"frame_rate" : fps # frames per second
"interlaced" : 0 or not 0 # non-zero if the frame is two interlaced fields
"topfieldfirst" : 0 or not 0 # non-zero the first field comes first in the data
"pixel_aspect" : fraction # aspect ratio of pixels
"sequence_meta" : metadata # string containing extended metadata
# (no whitespace or control characters)
}
All other fields are optional when providing frames to FrameToYUV4MPEG.
YUV4MPEGToFrame only guarantees to fill inthe YUV data itself. All other fields
will be filled in if the relevant header data is detected in the file.
The pixel formats recognised (and therefore supported) are::
"YUV420_planar"
"YUV411_planar"
"YUV422_planar"
"YUV444_planar"
"YUV4444_planar"
"Y_planar"
"""
from Axon.Component import component
#from Axon.Ipc import WaitComplete
from Axon.Ipc import shutdownMicroprocess, producerFinished
from Axon.AxonExceptions import noSpaceInBox
import re
from Kamaelia.Support.Data.Rationals import rational
class YUV4MPEGToFrame(component):
"""\
YUV4MPEGToFrame() -> new YUV4MPEGToFrame component.
Parses YUV4MPEG format binarydata, sent as strings to its "inbox" inbox
and outputs uncompressed video frame data structures to its "outbox" outbox.
"""
def __init__(self):
"""x.__init__(...) initializes x; see x.__class__.__doc__ for signature"""
super(YUV4MPEGToFrame,self).__init__()
self.remainder = ""
self.shutdownMsg = None
def checkShutdown(self):
"""\
Collects any new shutdown messages arriving at the "control" inbox, and
returns "NOW" if immediate shutdown is required, or "WHENEVER" if the
component can shutdown when it has finished processing pending data.
"""
while self.dataReady("control"):
newMsg = self.recv("control")
if isinstance(newMsg, shutdownMicroprocess):
self.shutdownMsg = newMsg
elif self.shutdownMsg is None and isinstance(newMsg, producerFinished):
self.shutdownMsg = newMsg
if isinstance(self.shutdownMsg, shutdownMicroprocess):
return "NOW"
elif self.shutdownMsg is not None:
return "WHENEVER"
else:
return None
def readline(self):
"""\
Generator.
Read up to the next newline char from the stream of chunks of binary
string data arriving at the "inbox" inbox.
Any excess data is placed into self.remainder ready for the next call
to self.readline or self.readbytes.
Data is only read from the inbox when required. It is not preemptively
fetched.
The read data is placed into self.bytesread
If a shutdown is detected, self.bytesread is set to "" and this
generator immediately returns.
"""
bytes = []
newdata = self.remainder
index = newdata.find("\x0a")
while index==-1:
bytes.append(newdata)
while not self.dataReady("inbox"):
if self.checkShutdown():
self.bytesread=""
return
self.pause()
yield 1
newdata = self.recv("inbox")
index = newdata.find("\x0a")
tail = newdata[:index+1]
self.remainder = newdata[index+1:]
bytes.append(tail)
self.bytesread = "".join(bytes)
return
def readbytes(self,size):
"""\
Generator.
Read the specified number of bytes from the stream of chunks of binary
string data arriving at the "inbox" inbox.
Any excess data is placed into self.remainder ready for the next call
to self.readline or self.readbytes.
Data is only read from the inbox when required. It is not preemptively
fetched.
The read data is placed into self.bytesread
If a shutdown is detected, self.bytesread is set to "" and this
generator immediately returns.
"""
buf = [self.remainder]
bufsize = len(self.remainder)
while bufsize < size:
if self.dataReady("inbox"):
newdata = self.recv("inbox")
buf.append(newdata)
bufsize += len(newdata)
shutdown = self.checkShutdown()
if shutdown == "NOW" or (shutdown and not self.dataReady("inbox") and bufsize<size):
self.bytesread=""
return
if bufsize<size and not self.anyReady():
self.pause()
yield 1
excess = bufsize-size
if excess:
wanted = buf[:-1]
tail, self.remainder = buf[-1][:-excess], buf[-1][-excess:]
wanted.append(tail)
else:
wanted = buf
self.remainder = ""
self.bytesread = "".join(wanted)
return
def safesend(self, data, boxname):
"""\
Generator.
Sends data out of the named outbox. If the destination is full
(noSpaceInBox exception) then it waits until there is space and retries
until it succeeds.
If a shutdownMicroprocess message is received, returns early.
"""
while 1:
try:
self.send(data, boxname)
return
except noSpaceInBox:
if self.checkShutdown() == "NOW":
return
self.pause()
yield 1
def main(self):
"""Main loop"""
# parse header
for _ in self.readline(): yield _
if self.checkShutdown() == "NOW" or (self.checkShutdown() and self.bytesread==""):
self.send(self.shutdownMsg,"signal")
return
line = self.bytesread
m = re.match("^YUV4MPEG2((?: .\S*)*)\n$", line)
assert(m)
fields = m.groups()[0]
seq_params = parse_seq_tags(fields)
yield 1
while 1:
for _ in self.readline(): yield _
line = self.bytesread
if self.checkShutdown() == "NOW" or (self.checkShutdown() and self.bytesread==""):
break
m = re.match("^FRAME((?: .\S*)*)\n$", line)
assert(m)
fields = m.groups()[0]
frame_params = parse_frame_tags(fields)
ysize = seq_params["size"][0] * seq_params["size"][1]
csize = seq_params["chroma_size"][0] * seq_params["chroma_size"][1]
for _ in self.readbytes(ysize): yield _
if self.checkShutdown() == "NOW" or (self.checkShutdown() and self.bytesread==""):
break
y = self.bytesread
for _ in self.readbytes(csize): yield _
if self.checkShutdown() == "NOW" or (self.checkShutdown() and self.bytesread==""):
break
u = self.bytesread
for _ in self.readbytes(csize): yield _
if self.checkShutdown() == "NOW" or (self.checkShutdown() and self.bytesread==""):
break
v = self.bytesread
frame = { "yuv" : (y,u,v) }
frame.update(seq_params)
frame.update(frame_params)
for _ in self.safesend(frame,"outbox"): yield _
if self.checkShutdown() == "NOW" or (self.checkShutdown() and not self.dataReady("inbox")):
break
yield 1
if self.shutdownMsg:
self.send(self.shutdownMsg, "signal")
else:
self.send(producerFinished(), "signal")
def parse_seq_tags(fields):
"""Parses YUV4MPEG header tags"""
params = {}
tags = {}
while fields:
m = re.match("^ (.)(\S*)(.*)$", fields)
(tag,value,fields) = m.groups()
tags[tag] = value
if "W" in tags and "H" in tags:
params['size'] = (int(tags["W"]), int(tags["H"]))
else:
raise
if "C" in tags:
C = tags["C"]
if C == "420jpeg": # 4:2:0 with JPEG/MPEG-1 siting (default)
params['pixformat'] = "YUV420_planar"
params['chroma_size'] = (params['size'][0]/2, params['size'][1]/2)
elif C == "420mpeg2": # 4:2:0 with MPEG-2 siting
params['pixformat'] = "YUV420_planar"
params['chroma_size'] = (params['size'][0]/2, params['size'][1]/2)
elif C == "420paldv": # 4:2:0 with PAL-DV siting
params['pixformat'] = "YUV420_planar"
params['chroma_size'] = (params['size'][0]/2, params['size'][1]/2)
elif C == "411": # 4:1:1, cosited
params['pixformat'] = "YUV411_planar"
params['chroma_size'] = (params['size'][0]/4, params['size'][1])
elif C == "422": # 4:2:2, cosited
params['pixformat'] = "YUV422_planar"
params['chroma_size'] = (params['size'][0]/2, params['size'][1])
elif C == "444": # 4:4:4 (no subsampling)
params['pixformat'] = "YUV444_planar"
params['chroma_size'] = (params['size'][0], params['size'][1])
elif C == "444alpha": # 4:4:4 with an alpha channel
params['pixformat'] = "YUV4444_planar"
params['chroma_size'] = (params['size'][0], params['size'][1])
elif C == "mono": # luma (Y') plane only
params['pixformat'] = "Y_planar"
params['chroma_size'] = (0,0)
else:
params['pixformat'] = "YUV420_planar"
params['chroma_size'] = (params['size'][0]/2, params['size'][1]/2)
if "I" in tags:
I = tags["I"]
if I == "?": # unknown (default)
pass
elif I == "p": # progressive/none
params["interlaced"] = False
elif I == "t": # top-field-first
params["interlaced"] = True
params["topfieldfirst"] = True
elif I == "b": # bottom-field-first
params["interlaced"] = True
params["topfieldfirst"] = False
elif I == "m": # mixed-mode: refer to 'I' tag in frame header
pass
if "F" in tags:
m = re.match("^(\d+):(\d+)$",tags["F"])
num, denom = float(m.groups()[0]), float(m.groups()[1])
if denom > 0:
params["frame_rate"] = num/denom
if "A" in tags:
m = re.match("^(\d+):(\d+)$",tags["A"])
num, denom = float(m.groups()[0]), float(m.groups()[1])
if denom > 0:
params["pixel_aspect"] = num/denom
if "X" in tags:
params["sequence_meta"] = tags["X"]
return params
def parse_frame_tags(fields):
"""\
Parses YUV4MPEG frame tags.
"""
params = {}
tags = {}
while fields:
m = re.match("^ (.)(\S*)(.*)$", fields)
(tag,value,fields) = m.groups()
tags[tag] = value
if "I" in tags:
x,y,z = tags["I"][0], tags["I"][1], tags["I"][2]
if x == "t": # top-field-first
params["interlaced"] = True
params["topfieldfirst"] = True
elif x == "T": # top-field-first and repeat
params["interlaced"] = True
params["topfieldfirst"] = True
elif x == "b": # bottom-field-first
params["interlaced"] = True
params["topfieldfirst"] = False
elif x == "B": # bottom-field-first and repeat
params["interlaced"] = True
params["topfieldfirst"] = False
elif x == "1": # single progressive frame
params["interlaced"] = False
elif x == "2": # double progressive frame (repeat)
params["interlaced"] = False
elif x == "3": # triple progressive frame (repeat)
params["interlaced"] = False
if y == "p": # fields sampled at same time
params["interlaced"] = False
elif y == "i": # fields sampled at different times
params["interlaced"] = True
if z == "p": # progressive (subsampling over whole frame)
pass
elif z == "i": # interlaced (each field subsampled independently)
pass
elif z == "?": # unknown (allowed only for non-4:2:0 subsampling)
pass
if "X" in tags:
params["meta"] = tags["X"]
return params
class FrameToYUV4MPEG(component):
"""\
FrameToYUV4MPEG() -> new FrameToYUV4MPEG component.
Parses uncompressed video frame data structures sent to its "inbox" inbox
and writes YUV4MPEG format binary data as strings to its "outbox" outbox.
"""
def checkShutdown(self):
"""\
Collects any new shutdown messages arriving at the "control" inbox, and
ensures self.shutdownMsg contains the highest priority one encountered
so far.
"""
while self.dataReady("control"):
msg = self.recv("control")
if isinstance(msg, producerFinished) and not isinstance(self.shutdownMsg,shutdownMicroprocess):
self.shutdownMsg = msg
elif isinstance(msg, shutdownMicroprocess):
self.shutdownMsg = msg
def canShutdown(self):
"""\
Returns true if the component should terminate when it has finished
processing any pending data.
"""
return isinstance(self.shutdownMsg, (producerFinished, shutdownMicroprocess))
def mustShutdown(self):
"""Returns true if the component should terminate immediately."""
return isinstance(self.shutdownMsg, shutdownMicroprocess)
def sendoutbox(self,data):
"""\
Generator.
Sends data out of the "outbox" outbox. If the destination is full
(noSpaceInBox exception) then it waits until there is space. It keeps
retrying until it succeeds.
If the component is ordered to immediately terminate then "STOP" is
raised as an exception.
"""
while 1:
try:
self.send(data,"outbox")
return
except noSpaceInBox:
self.checkShutdown()
if self.mustShutdown():
raise "STOP"
self.pause()
yield 1
self.checkShutdown()
if self.mustShutdown():
raise "STOP"
def main(self):
"""Main loop"""
self.shutdownMsg = None
try:
while not self.dataReady("inbox"):
self.checkShutdown()
if self.canShutdown():
raise "STOP"
self.pause()
yield 1
frame = self.recv("inbox")
for _ in self.write_header(frame):
yield _
for _ in self.write_frame(frame):
yield _
while 1:
while self.dataReady("inbox"):
frame = self.recv("inbox")
for _ in self.write_frame(frame):
yield _
self.checkShutdown()
if self.canShutdown():
raise "STOP"
self.pause()
yield 1
except "STOP":
self.send(self.shutdownMsg,"signal")
def write_header(self, frame):
"""\
Generator.
Sends the YUV4MPEG format header to the "outbox" outbox, based on
attributes of the supplied frame data structure.
"""
format = "YUV4MPEG2 W%d H%d" % tuple(frame['size'])
if frame['pixformat']=="YUV420_planar":
format += " C420mpeg2"
elif frame['pixformat']=="YUV411_planar":
format += " C411"
elif frame['pixformat']=="YUV422_planar":
format += " C422"
elif frame['pixformat']=="YUV444_planar":
format += " C444"
elif frame['pixformat']=="YUV4444_planar":
format += " C444alpha"
elif frame['pixformat']=="Y_planar":
format += " Cmono"
interlace = frame.get("interlaced",False)
topfieldfirst = frame.get("topfieldfirst",False)
if interlace and topfieldfirst:
format += " It"
elif interlace and not topfieldfirst:
format += " Ib"
elif not interlace:
format += " Ip"
rate = frame.get("frame_rate", 0)
if rate > 0:
num,denom = rational(rate)
format += " F%d:%d" % (num,denom)
rate = frame.get("pixel_aspect", 0)
if rate > 0:
num,denom = rational(rate)
format += " A%d:%d" % (num,denom)
if "sequence_meta" in frame:
format += " X"+frame['sequence_meta']
format += "\x0a"
for _ in self.sendoutbox(format):
yield _
def write_frame(self, frame):
"""\
Generator.
Writes out YUV4MPEG format frame marker and data.
"""
for _ in self.sendoutbox("FRAME\x0a"):
yield _
for component in frame['yuv']:
for _ in self.sendoutbox(component):
yield _
__kamaelia_components__ = ( YUV4MPEGToFrame, FrameToYUV4MPEG, )
if __name__ == "__main__":
from Kamaelia.Chassis.Pipeline import Pipeline
from Kamaelia.File.Reading import RateControlledFileReader
from Kamaelia.UI.Pygame.VideoOverlay import VideoOverlay
Pipeline( RateControlledFileReader("/data/stream.yuv",readmode="bytes",rate=25*(608256+128)),
YUV4MPEGToFrame(),
FrameToYUV4MPEG(),
YUV4MPEGToFrame(),
VideoOverlay(),
).run()
| 35.543779
| 107
| 0.563075
| 2,587
| 23,139
| 4.98879
| 0.177426
| 0.025027
| 0.008368
| 0.015342
| 0.446382
| 0.398109
| 0.37068
| 0.343794
| 0.322641
| 0.294049
| 0
| 0.017109
| 0.325554
| 23,139
| 650
| 108
| 35.598462
| 0.809881
| 0.390682
| 0
| 0.492492
| 0
| 0
| 0.099212
| 0
| 0
| 0
| 0
| 0
| 0.006006
| 1
| 0.045045
| false
| 0.015015
| 0.024024
| 0
| 0.12012
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
98d7520f9994f6836e73faaf42f63009eee0dc64
| 697
|
py
|
Python
|
project/cli/event.py
|
DanielGrams/gsevp
|
e94034f7b64de76f38754b56455e83092378261f
|
[
"MIT"
] | 1
|
2021-06-01T14:49:18.000Z
|
2021-06-01T14:49:18.000Z
|
project/cli/event.py
|
DanielGrams/gsevp
|
e94034f7b64de76f38754b56455e83092378261f
|
[
"MIT"
] | 286
|
2020-12-04T14:13:00.000Z
|
2022-03-09T19:05:16.000Z
|
project/cli/event.py
|
DanielGrams/gsevpt
|
a92f71694388e227e65ed1b24446246ee688d00e
|
[
"MIT"
] | null | null | null |
import click
from flask.cli import AppGroup
from project import app, db
from project.dateutils import berlin_tz
from project.services.event import (
get_recurring_events,
update_event_dates_with_recurrence_rule,
)
event_cli = AppGroup("event")
@event_cli.command("update-recurring-dates")
def update_recurring_dates():
# Setting the timezone is neccessary for cli command
db.session.execute("SET timezone TO :val;", {"val": berlin_tz.zone})
events = get_recurring_events()
for event in events:
update_event_dates_with_recurrence_rule(event)
db.session.commit()
click.echo(f"{len(events)} event(s) were updated.")
app.cli.add_command(event_cli)
| 24.034483
| 72
| 0.746055
| 98
| 697
| 5.081633
| 0.459184
| 0.066265
| 0.072289
| 0.088353
| 0.180723
| 0.180723
| 0.180723
| 0.180723
| 0
| 0
| 0
| 0
| 0.157819
| 697
| 28
| 73
| 24.892857
| 0.848382
| 0.071736
| 0
| 0
| 0
| 0
| 0.134884
| 0.034109
| 0
| 0
| 0
| 0
| 0
| 1
| 0.055556
| false
| 0
| 0.277778
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
98d900684301053ffd4e6344e16abaa1c0d10ed9
| 3,647
|
py
|
Python
|
test/functional/examples/test_examples.py
|
ymn1k/testplan
|
b1bde8495c449d75a74a7fe4e7c6501b0476f833
|
[
"Apache-2.0"
] | null | null | null |
test/functional/examples/test_examples.py
|
ymn1k/testplan
|
b1bde8495c449d75a74a7fe4e7c6501b0476f833
|
[
"Apache-2.0"
] | null | null | null |
test/functional/examples/test_examples.py
|
ymn1k/testplan
|
b1bde8495c449d75a74a7fe4e7c6501b0476f833
|
[
"Apache-2.0"
] | 1
|
2019-09-11T09:13:18.000Z
|
2019-09-11T09:13:18.000Z
|
import os
import re
import sys
import subprocess
import pytest
from testplan.common.utils.path import change_directory
import platform
ON_WINDOWS = platform.system() == 'Windows'
KNOWN_EXCEPTIONS = [
"TclError: Can't find a usable init\.tcl in the following directories:", # Matplotlib module improperly installed. Will skip Data Science example.
"ImportError: lib.*\.so\..+: cannot open shared object file: No such file or directory", # Matplotlib module improperly installed. Will skip Data Science example.
"ImportError: No module named sklearn.*", # Missing module sklearn. Will skip Data Science example.
"ImportError: No module named Tkinter", # Missing module Tkinter. Will skip Data Science example.
"ImportError: No module named _tkinter.*", # Missing module Tkinter. Will skip Data Science example.
"RuntimeError: Download pyfixmsg library .*", # Missing module pyfixmsg. Will skip FIX example.
"No spec file set\. You should download .*", # Missing FIX spec file. Will skip FIX example.
"AttributeError: 'module' object has no attribute 'poll'",
"RuntimeError: You need to compile test binary first." # Need to compile cpp binary first. Will skip GTest example.
]
SKIP_ON_WINDOWS = [
os.path.join('Cpp', 'GTest', 'test_plan.py'),
]
ROOT_DIR_CONTENTS = [
"setup.py",
"requirements.txt",
"README.rst",
"LICENSE.md"
]
def _depth_from_repo_root():
cwd = os.getcwd()
depth = []
while True:
contents = os.listdir(cwd)
if all([entry in contents for entry in ROOT_DIR_CONTENTS]):
return depth
parent_dir = os.path.dirname(cwd)
if os.path.realpath(cwd) == os.path.realpath(parent_dir):
raise RuntimeError('Could not find repo directory')
depth.append(os.pardir)
cwd = parent_dir
def _relative_dir(directory):
path_args = _depth_from_repo_root() + [directory]
return os.path.join(*path_args)
def _param_formatter(param):
if 'examples' in param:
return repr(param.rsplit('examples')[1])
return repr(param)
@pytest.mark.parametrize(
'root,filename',
[
(os.path.abspath(root), filename)
for root, _, files in os.walk(
_relative_dir(os.path.join('testplan', 'examples')))
for filename in files
if 'test_plan' in filename
],
ids=_param_formatter,
)
def test_example(root, filename):
file_path = os.path.join(root, filename)
if ON_WINDOWS and any(
[file_path.endswith(skip_name) for skip_name in SKIP_ON_WINDOWS]
):
pytest.skip()
with change_directory(root), open(filename) as file_obj:
file_obj.readline()
second_line = file_obj.readline()
try:
subprocess.check_output(
[sys.executable, filename],
stderr=subprocess.STDOUT
)
except subprocess.CalledProcessError as e:
out = e.output.decode()
for exception in KNOWN_EXCEPTIONS:
if re.search(exception, out):
pytest.xfail()
assert 'Exception in test_plan definition' not in out, \
'Exception raised in test_plan definition.'
assert 'Traceback (most recent call last):' not in out, \
'Exception raised during test:\n{}'.format(out)
assert \
('# This plan contains tests that demonstrate failures '
'as well.') == second_line.strip(), \
"Expected \'{}\' example to pass, it failed.\n{}".format(
file_path,
out
)
| 34.733333
| 166
| 0.636413
| 443
| 3,647
| 5.119639
| 0.388262
| 0.028219
| 0.026455
| 0.041887
| 0.168871
| 0.148589
| 0.148589
| 0.148589
| 0.148589
| 0.142857
| 0
| 0.000372
| 0.262682
| 3,647
| 104
| 167
| 35.067308
| 0.843064
| 0.127228
| 0
| 0
| 0
| 0
| 0.270951
| 0
| 0
| 0
| 0
| 0
| 0.034091
| 1
| 0.045455
| false
| 0.011364
| 0.125
| 0
| 0.215909
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
98db431598c035c6864fd313e00c493666f532f6
| 1,223
|
py
|
Python
|
peco/template/template.py
|
Tikubonn/peco
|
c77fc163ad31d3c271d299747914ce4ef3386987
|
[
"MIT"
] | null | null | null |
peco/template/template.py
|
Tikubonn/peco
|
c77fc163ad31d3c271d299747914ce4ef3386987
|
[
"MIT"
] | null | null | null |
peco/template/template.py
|
Tikubonn/peco
|
c77fc163ad31d3c271d299747914ce4ef3386987
|
[
"MIT"
] | null | null | null |
from io import StringIO
class Template:
"""
this has information that parsed source code.
you can get rendered text with .render() and .render_string()
"""
def __init__(self, sentencenode, scope):
self.sentencenode = sentencenode
self.scope = scope
def render(self, stream, **parameters):
"""
render template to stream with parameters.
Parameters
----------
stream: io.TextIOBase
this file-like object used to output.
parameters:
this used to rendering.
"""
with self.scope:
for name, value in parameters.items():
self.scope.set_value(name, value)
self.sentencenode.write(stream)
def render_string(self, **parameters):
"""
render template with parameters then return rendered text.
Parameters
----------
parameters:
this used to rendering.
Returns
-------
rendered: str
this is rendered string.
"""
with StringIO() as stream:
self.render(stream, **parameters)
rendered = stream.getvalue()
return rendered
| 23.519231
| 66
| 0.555192
| 119
| 1,223
| 5.647059
| 0.428571
| 0.071429
| 0.071429
| 0.059524
| 0.08631
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.349959
| 1,223
| 51
| 67
| 23.980392
| 0.845283
| 0.370401
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| false
| 0
| 0.066667
| 0
| 0.4
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
98dc08bcdfcddaf7d2d055024948658ae151bf17
| 2,342
|
py
|
Python
|
mtp_api/apps/credit/tests/test_views/test_credit_list/test_security_credit_list/test_credit_list_with_blank_string_filters.py
|
ministryofjustice/mtp-api
|
b1c34c29e4aa9f48598cb060abe1368ae7686e0b
|
[
"MIT"
] | 5
|
2016-01-05T12:21:35.000Z
|
2020-10-28T17:06:02.000Z
|
mtp_api/apps/credit/tests/test_views/test_credit_list/test_security_credit_list/test_credit_list_with_blank_string_filters.py
|
ministryofjustice/mtp-api
|
b1c34c29e4aa9f48598cb060abe1368ae7686e0b
|
[
"MIT"
] | 209
|
2015-06-12T09:39:41.000Z
|
2022-03-21T16:01:19.000Z
|
mtp_api/apps/credit/tests/test_views/test_credit_list/test_security_credit_list/test_credit_list_with_blank_string_filters.py
|
ministryofjustice/mtp-api
|
b1c34c29e4aa9f48598cb060abe1368ae7686e0b
|
[
"MIT"
] | 1
|
2021-04-11T06:19:23.000Z
|
2021-04-11T06:19:23.000Z
|
from core import getattr_path
from rest_framework import status
from credit.tests.test_views.test_credit_list.test_security_credit_list import SecurityCreditListTestCase
class CreditListWithBlankStringFiltersTestCase(SecurityCreditListTestCase):
def assertAllResponsesHaveBlankField(self, filters, blank_fields, expected_filter): # noqa: N802
expected_results = list(filter(expected_filter, self._get_managed_prison_credits()))
url = self._get_url(**filters)
response = self.client.get(
url, format='json',
HTTP_AUTHORIZATION=self.get_http_authorization_for_user(self._get_authorised_user())
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
results = []
for result in response.data.get('results', []):
results.append(result['id'])
for blank_field in blank_fields:
self.assertIn(result[blank_field], ['', None])
self.assertListEqual(
sorted(results),
sorted(expected_result.id for expected_result in expected_results)
)
def test_blank_sender_name(self):
self.assertAllResponsesHaveBlankField(
{
'sender_name__isblank': 'True'
},
['sender_name'],
lambda credit: getattr_path(credit, 'transaction.sender_name', None) == ''
)
def test_blank_sender_sort_code(self):
self.assertAllResponsesHaveBlankField(
{
'sender_sort_code__isblank': 'True'
},
['sender_sort_code'],
lambda credit: getattr_path(credit, 'transaction.sender_sort_code', None) == ''
)
def test_blank_sender_account_number(self):
self.assertAllResponsesHaveBlankField(
{
'sender_account_number__isblank': 'True'
},
['sender_account_number'],
lambda credit: getattr_path(credit, 'transaction.sender_account_number', None) == ''
)
def test_blank_sender_roll_number(self):
self.assertAllResponsesHaveBlankField(
{
'sender_roll_number__isblank': 'True'
},
['sender_roll_number'],
lambda credit: getattr_path(credit, 'transaction.sender_roll_number', None) == ''
)
| 36.59375
| 105
| 0.634927
| 223
| 2,342
| 6.295964
| 0.295964
| 0.039174
| 0.034188
| 0.051282
| 0.260684
| 0.139601
| 0.139601
| 0.074074
| 0
| 0
| 0
| 0.003519
| 0.27199
| 2,342
| 63
| 106
| 37.174603
| 0.819941
| 0.00427
| 0
| 0.075472
| 0
| 0
| 0.133476
| 0.093133
| 0
| 0
| 0
| 0
| 0.150943
| 1
| 0.09434
| false
| 0
| 0.056604
| 0
| 0.169811
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
98dc59660d9259931f06beb23b9db7e987e199a4
| 3,800
|
py
|
Python
|
vipermonkey/core/filetype.py
|
lap1nou/ViperMonkey
|
631d242f43108226bb25ed91e773a274012dc8c2
|
[
"Unlicense"
] | 874
|
2016-09-29T08:19:00.000Z
|
2022-03-28T03:34:16.000Z
|
vipermonkey/core/filetype.py
|
Mercury-180/ViperMonkey
|
1045dadcf7bebedc126ca36d25475e413196d053
|
[
"Unlicense"
] | 94
|
2016-09-30T17:03:36.000Z
|
2022-03-01T17:25:26.000Z
|
vipermonkey/core/filetype.py
|
Mercury-180/ViperMonkey
|
1045dadcf7bebedc126ca36d25475e413196d053
|
[
"Unlicense"
] | 186
|
2016-09-29T10:59:37.000Z
|
2022-03-26T10:20:38.000Z
|
"""
Check for Office file types
ViperMonkey is a specialized engine to parse, analyze and interpret Microsoft
VBA macros (Visual Basic for Applications), mainly for malware analysis.
Author: Philippe Lagadec - http://www.decalage.info
License: BSD, see source code or documentation
Project Repository:
https://github.com/decalage2/ViperMonkey
"""
# === LICENSE ==================================================================
# ViperMonkey is copyright (c) 2015-2016 Philippe Lagadec (http://www.decalage.info)
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Office magic numbers.
magic_nums = {
"office97" : "D0 CF 11 E0 A1 B1 1A E1", # Office 97
"office2007" : "50 4B 3 4", # Office 2007+ (PKZip)
}
# PE magic number.
pe_magic_num = "4D 5A"
def get_1st_8_bytes(fname, is_data):
info = None
is_data = (is_data or (len(fname) > 200))
if (not is_data):
try:
tmp = open(fname, 'rb')
tmp.close()
except:
is_data = True
if (not is_data):
with open(fname, 'rb') as f:
info = f.read(8)
else:
info = fname[:9]
curr_magic = ""
for b in info:
curr_magic += hex(ord(b)).replace("0x", "").upper() + " "
return curr_magic
def is_pe_file(fname, is_data):
"""
Check to see if the given file is a PE executable.
return - True if it is a PE file, False if not.
"""
# Read the 1st 8 bytes of the file.
curr_magic = get_1st_8_bytes(fname, is_data)
# See if we the known magic #.
return (curr_magic.startswith(pe_magic_num))
def is_office_file(fname, is_data):
"""
Check to see if the given file is a MS Office file format.
return - True if it is an Office file, False if not.
"""
# Read the 1st 8 bytes of the file.
curr_magic = get_1st_8_bytes(fname, is_data)
# See if we have 1 of the known magic #s.
for typ in magic_nums.keys():
magic = magic_nums[typ]
if (curr_magic.startswith(magic)):
return True
return False
def is_office97_file(fname, is_data):
# Read the 1st 8 bytes of the file.
curr_magic = get_1st_8_bytes(fname, is_data)
# See if we have the Office97 magic #.
return (curr_magic.startswith(magic_nums["office97"]))
def is_office2007_file(fname, is_data):
# Read the 1st 8 bytes of the file.
curr_magic = get_1st_8_bytes(fname, is_data)
# See if we have the Office 2007 magic #.
return (curr_magic.startswith(magic_nums["office2007"]))
| 33.043478
| 84
| 0.678158
| 560
| 3,800
| 4.5
| 0.373214
| 0.033333
| 0.032143
| 0.02381
| 0.329762
| 0.286111
| 0.259127
| 0.219048
| 0.219048
| 0.219048
| 0
| 0.026798
| 0.224211
| 3,800
| 114
| 85
| 33.333333
| 0.828019
| 0.612632
| 0
| 0.153846
| 0
| 0
| 0.057637
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.128205
| false
| 0
| 0
| 0
| 0.282051
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
98dec69515aeffc54b77de9f6161248b53aa1b30
| 2,699
|
py
|
Python
|
packs/kubernetes/tests/test_third_party_resource.py
|
userlocalhost2000/st2contrib
|
1a5f759e76401743ed9023d298a3d767e3885db1
|
[
"Apache-2.0"
] | 164
|
2015-01-17T16:08:33.000Z
|
2021-08-03T02:34:07.000Z
|
packs/kubernetes/tests/test_third_party_resource.py
|
userlocalhost2000/st2contrib
|
1a5f759e76401743ed9023d298a3d767e3885db1
|
[
"Apache-2.0"
] | 442
|
2015-01-01T11:19:01.000Z
|
2017-09-06T23:26:17.000Z
|
packs/kubernetes/tests/test_third_party_resource.py
|
userlocalhost2000/st2contrib
|
1a5f759e76401743ed9023d298a3d767e3885db1
|
[
"Apache-2.0"
] | 202
|
2015-01-13T00:37:40.000Z
|
2020-11-07T11:30:10.000Z
|
from st2tests.base import BaseSensorTestCase
from third_party_resource import ThirdPartyResource
class ThirdPartyResourceTestCase(BaseSensorTestCase):
sensor_cls = ThirdPartyResource
def test_k8s_object_to_st2_trigger_bad_object(self):
k8s_obj = {
'type': 'kanye',
'object': {
'kind': 'president',
'metadata': {
'name': 'west',
'namespace': 'westashians'
# uid missing
# label missing
}
}
}
sensor = self.get_sensor_instance()
self.assertRaises(KeyError, sensor._k8s_object_to_st2_trigger, k8s_obj)
def test_k8s_object_to_st2_trigger(self):
k8s_obj = {
'type': 'kanye',
'object': {
'kind': 'president',
'metadata': {
'name': 'west',
'namespace': 'westashians',
'uid': 'coinye',
'labels': ['rapper', 'train wrecker']
}
}
}
sensor = self.get_sensor_instance()
payload = sensor._k8s_object_to_st2_trigger(k8s_obj)
self.assertTrue('resource' in payload)
self.assertEqual(payload['resource'], k8s_obj['type'])
self.assertTrue('object_kind' in payload)
self.assertEqual(payload['object_kind'], k8s_obj['object']['kind'])
self.assertTrue('name' in payload)
self.assertEqual(payload['name'], k8s_obj['object']['metadata']['name'])
self.assertTrue('labels' in payload)
self.assertListEqual(payload['labels'], k8s_obj['object']['metadata']['labels'])
self.assertTrue('namespace' in payload)
self.assertEqual(payload['namespace'], k8s_obj['object']['metadata']['namespace'])
self.assertTrue('uid' in payload)
self.assertEqual(payload['uid'], k8s_obj['object']['metadata']['uid'])
def test_get_trigger_payload_from_line(self):
line = '{"object": {"kind": "president", ' + \
'"metadata": {"labels": ["rapper", "train wrecker"], ' + \
'"namespace": "westashians", ' + \
'"name": "west", "uid": "coinye"}}, "type": "kanye"}'
sensor = self.get_sensor_instance()
payload = sensor._get_trigger_payload_from_line(line)
self.assertTrue(payload is not None)
self.assertTrue('resource' in payload)
self.assertTrue('object_kind' in payload)
self.assertTrue('name' in payload)
self.assertTrue('labels' in payload)
self.assertTrue('namespace' in payload)
self.assertTrue('uid' in payload)
| 40.283582
| 90
| 0.567247
| 257
| 2,699
| 5.747082
| 0.206226
| 0.123223
| 0.096818
| 0.081246
| 0.607312
| 0.459039
| 0.287068
| 0.148951
| 0.104265
| 0.104265
| 0
| 0.01
| 0.296036
| 2,699
| 66
| 91
| 40.893939
| 0.767368
| 0.009263
| 0
| 0.465517
| 0
| 0
| 0.202546
| 0
| 0
| 0
| 0
| 0
| 0.344828
| 1
| 0.051724
| false
| 0
| 0.034483
| 0
| 0.12069
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
98e0601566ba652e64eedad746be214634e5e438
| 17,357
|
py
|
Python
|
MrWorldwide.py
|
AnonymousHacker1279/MrWorldwide
|
a782194e1ebe3a1cd73409e3d4dc9946700bcc0e
|
[
"MIT"
] | null | null | null |
MrWorldwide.py
|
AnonymousHacker1279/MrWorldwide
|
a782194e1ebe3a1cd73409e3d4dc9946700bcc0e
|
[
"MIT"
] | null | null | null |
MrWorldwide.py
|
AnonymousHacker1279/MrWorldwide
|
a782194e1ebe3a1cd73409e3d4dc9946700bcc0e
|
[
"MIT"
] | null | null | null |
from PyQt6.QtWidgets import QApplication, QWidget, QFileDialog
import PyQt6.QtCore as QtCore
import PyQt6.QtGui as QtGui
import sys, time, json, requests, traceback, configparser, os
import MrWorldwideUI, ConfigurationUI, UpdateManagerUI
version = "v1.0.0"
class LangTypes:
ENGLISH = "English"
ARABIC = "Arabic"
CHINESE = "Chinese"
DUTCH = "Dutch"
FRENCH = "French"
GERMAN = "German"
HINDI = "Hindi"
INDONESIAN = "Indonesian"
IRISH = "Irish"
ITALIAN = "Italian"
JAPANESE = "Japanese"
KOREAN = "Korean"
POLISH = "Polish"
PORTUGUESE = "Portuguese"
RUSSIAN = "Russian"
SPANISH = "Spanish"
TURKISH = "Turkish"
UKRANIAN = "Ukranian"
VIETNAMESE = "Vietnamese"
class WorkerSignals(QtCore.QObject):
callback = QtCore.pyqtSignal(str)
class Worker(QtCore.QRunnable):
def __init__(self, fn, *args, **kwargs):
super(Worker, self).__init__()
# Store constructor arguments (re-used for processing)
self.fn = fn
self.args = args
self.kwargs = kwargs
self.signals = WorkerSignals()
# Add the callback to our kwargs
self.kwargs['progressCallback'] = self.signals.callback
@QtCore.pyqtSlot()
def run(self):
# Retrieve args/kwargs here; and fire processing using them
try:
result = self.fn(*self.args, **self.kwargs)
except:
print(traceback.print_exc())
else:
self.signals.callback.emit(result)
def readConfigurationFile(config):
try:
configFile = open("config.ini")
configFile.close()
return config.read("config.ini")
except:
config['general'] = {}
config['general']['libretranslate_mirror'] = 'https://translate.astian.org/translate'
config['defaults'] = {}
config['defaults']['default_source_language'] = LangTypes.ENGLISH
config['defaults']['default_target_language'] = LangTypes.SPANISH
with open('config.ini', 'w') as configFile:
config.write(configFile)
configFile.close()
return config
class MrWorldwide(QWidget, MrWorldwideUI.Ui_Dialog, QtCore.QThread):
selectedFile = ""
selectedTargetLocation = ""
sourceFileKeys = []
sourceFileValues = []
totalLangFileLines = 0
shouldAbort = False
def run(self):
# Setup resources
logo = QtGui.QPixmap(resource_path("gui_resources/MrWorldwide.png"))
icon = QtGui.QIcon(resource_path("gui_resources/MrWorldwide.png"))
# Set the logos and images
self.setWindowIcon(icon) # TODO: Custom icon
self.logo.setPixmap(logo)
self.config = configparser.ConfigParser()
readConfigurationFile(self.config)
# Setup button actions
self.closeButton.clicked.connect(self.closeEvent)
self.abortButton.clicked.connect(self.abortEvent)
self.startButton.clicked.connect(self.preTranslate)
self.openFileButton.clicked.connect(self.openFileEvent)
self.targetLocationButton.clicked.connect(self.selectFileLocationEvent)
self.configButton.clicked.connect(self.openConfiguration)
# Setup dropdown boxes
self.sourceLangBox.addItems([LangTypes.ENGLISH, LangTypes.ARABIC, LangTypes.CHINESE, LangTypes.DUTCH, LangTypes.FRENCH, LangTypes.GERMAN, LangTypes.HINDI, LangTypes.INDONESIAN, LangTypes.IRISH, LangTypes.ITALIAN, LangTypes.JAPANESE, LangTypes.KOREAN, LangTypes.POLISH, LangTypes.PORTUGUESE, LangTypes.RUSSIAN, LangTypes.SPANISH, LangTypes.TURKISH, LangTypes.UKRANIAN, LangTypes.VIETNAMESE])
self.targetLangBox.addItems([LangTypes.ENGLISH, LangTypes.ARABIC, LangTypes.CHINESE, LangTypes.DUTCH, LangTypes.FRENCH, LangTypes.GERMAN, LangTypes.HINDI, LangTypes.INDONESIAN, LangTypes.IRISH, LangTypes.ITALIAN, LangTypes.JAPANESE, LangTypes.KOREAN, LangTypes.POLISH, LangTypes.PORTUGUESE, LangTypes.RUSSIAN, LangTypes.SPANISH, LangTypes.TURKISH, LangTypes.UKRANIAN, LangTypes.VIETNAMESE])
self.sourceLangBox.setCurrentText(self.config["defaults"]["default_source_language"])
self.targetLangBox.setCurrentText(self.config["defaults"]["default_target_language"])
self.apiMirror = self.config["general"]["libretranslate_mirror"]
# Open the configuration GUI
def openConfiguration(self, event):
self.configurationDialog = ConfigurationDialog()
self.configurationDialog.setup(self)
self.configurationDialog.show()
# Refresh the configuration
def refreshConfiguration(self):
readConfigurationFile(self.config)
self.sourceLangBox.setCurrentText(self.config["defaults"]["default_source_language"])
self.targetLangBox.setCurrentText(self.config["defaults"]["default_target_language"])
self.apiMirror = self.config["general"]["libretranslate_mirror"]
# Close event, for handling closing of the program
def closeEvent(self, event):
global app
self.close()
app.exit()
# Abort event, for shutting down translation functions
def abortEvent(self, event):
global shouldAbort
global totalLangFileLines
self.shouldAbort = True
self.progressBar.setValue(0)
self.progressBarLabel.setText("Idle")
self.logAction("ABORT: Translation process canceled.")
# Open file event, for selecting a language file and starting the read process
def openFileEvent(self, event):
self.totalLangFileLines = 0
self.selectedFile = QFileDialog.getOpenFileName(self, 'Select a Minecraft language file', '','JSON Files (*.json)')[0]
self.fileSelectionBox.setText(str(self.selectedFile))
self.readLangFile()
# Select output file location event, for setting the target location
def selectFileLocationEvent(self, event):
self.selectedTargetLocation = QFileDialog.getSaveFileName(self, 'Select an output location', 'target.json','JSON Files (*.json)')[0]
self.targetLocationBox.setText(str(self.selectedTargetLocation))
# Read a language file and get the keys, values, and set various content on the GUI
def readLangFile(self):
global sourceFileValues
global totalLangFileLines
self.sourceFileValues = []
self.sourceFileKeys = []
# Read input JSON and make it usable
startReadInputTime = time.time()
if self.selectedFile != "":
with open(self.selectedFile, 'r') as f:
data = json.load(f)
self.sourceFileKeys = data.keys()
for item in data:
if self.shouldAbort:
return
self.sourceFileValues.append(data[item])
self.totalLangFileLines = self.totalLangFileLines + 1
self.logAction("Reading input file took " + str(((time.time() - startReadInputTime) * 1000).__round__(3)) + " ms.")
self.langFileEntryCounter.display(self.totalLangFileLines)
self.logAction("Found " + str(self.totalLangFileLines) + " entries.")
def preTranslate(self, event):
global totalLangFileLines
global selectedFile
global selectedTargetLocation
canProceed = True
self.shouldAbort = False
if self.selectedFile == "":
self.logAction("ERROR: No language file selected.")
canProceed = False
elif self.totalLangFileLines == 0:
self.logAction("ERROR: The selected language file is empty.")
canProceed = False
elif self.selectedTargetLocation == "":
self.logAction("ERROR: No target location specified.")
canProceed = False
elif self.sourceLangBox.currentText() == self.targetLangBox.currentText():
self.logAction("ERROR: Target language is the same as the source")
canProceed = False
if canProceed:
self.logAction("Beginning translations with a source language of " + self.sourceLangBox.currentText() + " and a target language of " + self.targetLangBox.currentText())
self.logAction("Using LibreTranslate mirror: " + self.config["general"]["libretranslate_mirror"])
self.disableButtonsDuringTranslations()
self.threadpool = QtCore.QThreadPool()
self.worker = Worker(self.startTranslations)
self.worker.signals.callback.connect(self.threadCallbackHandler)
self.threadpool.start(self.worker)
def disableButtonsDuringTranslations(self):
self.startButton.setDisabled(True)
self.openFileButton.setDisabled(True)
self.targetLocationButton.setDisabled(True)
self.closeButton.setDisabled(True)
self.configButton.setDisabled(True)
def enableButtonsAfterTranslations(self):
self.startButton.setDisabled(False)
self.openFileButton.setDisabled(False)
self.targetLocationButton.setDisabled(False)
self.closeButton.setDisabled(False)
self.configButton.setDisabled(False)
def threadCallbackHandler(self, callback):
try:
exec(callback)
except:
traceback.print_exc()
exctype, value = sys.exc_info()[:2]
exctype, value, traceback.format_exc()
app.exit()
def startTranslations(self, progressCallback):
global sourceFileValues
global totalLangFileLines
global shouldAbort
progressCallback.emit('self.progressBarLabel.setText("Starting translations")')
# Set query headers
headers = {
'accept': 'application/json',
'Content-Type': 'application/x-www-form-urlencoded',
}
# Really inefficient but it works ¯\_(ツ)_/¯
startQueryTime = time.time()
responseJSON = []
progressCallback.emit('self.progressBarLabel.setText("Translating...")')
itemLoopIteration = 1
try:
requests.post(self.config["general"]["libretranslate_mirror"], headers=headers, data=None)
hasFailedResolve = False
except:
requests.post('https://translate.astian.org/translate', headers=headers, data=None)
progressCallback.emit('self.logAction("Failed to resolve LibreTranslate mirror. Defaulting to https://translate.astian.org/translate")')
hasFailedResolve = True
for item in self.sourceFileValues:
if self.shouldAbort:
return
# Setup the progress bar, by mapping the total translation count to 100
progressCallback.emit('self.progressBar.setValue(int(((' + str(itemLoopIteration) + ' / self.totalLangFileLines) * 100).__round__(0)))')
# Set query data
data = {
'q': item,
'source': self.getLangIdentifier(self.sourceLangBox.currentText()),
'target': self.getLangIdentifier(self.targetLangBox.currentText())
}
# Send the query and get the response
if hasFailedResolve == True:
response = requests.post('https://translate.astian.org/translate', headers=headers, data=data)
else:
response = requests.post(self.config["general"]["libretranslate_mirror"], headers=headers, data=data)
responseData = json.loads(response.content.decode(response.encoding))["translatedText"]
responseJSON.append(str(responseData).rstrip('"').replace('\u00ab', '').lstrip('"').replace('\u00bb', ''))
itemLoopIteration = itemLoopIteration + 1
progressCallback.emit('self.logAction("Query time was " + str(time.time() - ' + str(startQueryTime) + ') + " seconds.")')
progressCallback.emit('self.progressBarLabel.setText("Translations complete")')
progressCallback.emit('self.saveToFile(' + str(responseJSON) + ')')
# Save the JSON data to file
def saveToFile(self, responseJSON):
global sourceFileKeys
global shouldAbort
self.progressBarLabel.setText("Writing to file...")
self.progressBar.setValue(0)
with open(self.targetLocationBox.text(), 'w', encoding="UTF-8") as f:
compiledDict = dict()
responseJSONList = list(responseJSON)
currentIteration = 0
for item in self.sourceFileKeys:
if self.shouldAbort:
return
compiledDict.update({item: str(responseJSONList[currentIteration])})
currentIteration = currentIteration + 1
progBarVal = int(((currentIteration / self.totalLangFileLines) * 100).__round__(0))
self.progressBar.setValue(progBarVal)
json.dump(compiledDict, f, separators=(',', ': '), indent=" ", ensure_ascii=False)
self.enableButtonsAfterTranslations()
self.logAction("Translations written to file.")
self.progressBarLabel.setText("All tasks completed.")
# Log information to the console
def logAction(self, text: str):
if self.logBox.text() == "No log information available. ":
self.logBox.setText("")
preparedLogText = ">> " + text
else:
preparedLogText = self.logBox.text() + "\n>> " + text
self.logBox.setText(preparedLogText)
self.logBoxScrollArea.verticalScrollBar().setValue(self.logBoxScrollArea.verticalScrollBar().maximum())
def getLangIdentifier(self, lang):
if lang == LangTypes.ENGLISH:
return "en"
if lang == LangTypes.ARABIC:
return "ar"
if lang == LangTypes.CHINESE:
return "zh"
if lang == LangTypes.DUTCH:
return "nl"
if lang == LangTypes.FRENCH:
return "fr"
if lang == LangTypes.GERMAN:
return "de"
if lang == LangTypes.HINDI:
return "hi"
if lang == LangTypes.INDONESIAN:
return "id"
if lang == LangTypes.IRISH:
return "ga"
if lang == LangTypes.ITALIAN:
return "it"
if lang == LangTypes.JAPANESE:
return "ja"
if lang == LangTypes.KOREAN:
return "ko"
if lang == LangTypes.POLISH:
return "pl"
if lang == LangTypes.PORTUGUESE:
return "pt"
if lang == LangTypes.RUSSIAN:
return "ru"
if lang == LangTypes.SPANISH:
return "es"
if lang == LangTypes.TURKISH:
return "tr"
if lang == LangTypes.UKRANIAN:
return "uk"
if lang == LangTypes.VIETNAMESE:
return "vi"
# Initialize the program
def __init__(self, parent=None):
global app
super(MrWorldwide, self).__init__(parent)
self.setupUi(self)
self.run()
class ConfigurationDialog(QWidget, ConfigurationUI.Ui_Dialog):
def __init__(self, parent=None):
super(ConfigurationDialog, self).__init__(parent)
self.setupUi(self)
self.run()
def run(self):
# Setup resources
logo = QtGui.QPixmap(resource_path("gui_resources/Configuration.png"))
icon = QtGui.QIcon(resource_path("gui_resources/Configuration.png"))
# Set the logos and images
self.setWindowIcon(icon) # TODO: Custom icon
self.logo.setPixmap(logo)
# Read configuration
self.config = configparser.ConfigParser()
readConfigurationFile(self.config)
# Setup dropdown boxes
self.defaultSourceLangBox.addItems([LangTypes.ENGLISH, LangTypes.ARABIC, LangTypes.CHINESE, LangTypes.DUTCH, LangTypes.FRENCH, LangTypes.GERMAN, LangTypes.HINDI, LangTypes.INDONESIAN, LangTypes.IRISH, LangTypes.ITALIAN, LangTypes.JAPANESE, LangTypes.KOREAN, LangTypes.POLISH, LangTypes.PORTUGUESE, LangTypes.RUSSIAN, LangTypes.SPANISH, LangTypes.TURKISH, LangTypes.UKRANIAN, LangTypes.VIETNAMESE])
self.defaultTargetLangBox.addItems([LangTypes.ENGLISH, LangTypes.ARABIC, LangTypes.CHINESE, LangTypes.DUTCH, LangTypes.FRENCH, LangTypes.GERMAN, LangTypes.HINDI, LangTypes.INDONESIAN, LangTypes.IRISH, LangTypes.ITALIAN, LangTypes.JAPANESE, LangTypes.KOREAN, LangTypes.POLISH, LangTypes.PORTUGUESE, LangTypes.RUSSIAN, LangTypes.SPANISH, LangTypes.TURKISH, LangTypes.UKRANIAN, LangTypes.VIETNAMESE])
# Apply current configuration
self.apiMirror.setText(self.config["general"]["libretranslate_mirror"])
self.defaultSourceLangBox.setCurrentText(self.config["defaults"]["default_source_language"])
self.defaultTargetLangBox.setCurrentText(self.config["defaults"]["default_target_language"])
# Setup button actions
self.closeButton.clicked.connect(self.closeEvent)
self.applyButton.clicked.connect(self.applyEvent)
self.updateButton.clicked.connect(self.openUpdateManager)
# Setup variables
def setup(self, parent):
self.parent = parent
# Close event, for handling closing of the program
def closeEvent(self, event):
self.close()
# Update event, for opening the update manager
# Open the configuration GUI
def openUpdateManager(self, event):
self.updateManagerDialog = UpdateManagerDialog()
self.updateManagerDialog.setup(self)
self.updateManagerDialog.show()
# Apply event, for handling applying of configurations
def applyEvent(self, event):
self.config = configparser.ConfigParser()
self.config['general'] = {}
self.config['general']['libretranslate_mirror'] = self.apiMirror.text()
self.config['defaults'] = {}
self.config['defaults']['default_source_language'] = self.defaultSourceLangBox.currentText()
self.config['defaults']['default_target_language'] = self.defaultTargetLangBox.currentText()
with open('config.ini', 'w') as configFile:
self.config.write(configFile)
configFile.close()
self.parent.refreshConfiguration()
self.close()
class UpdateManagerDialog(QWidget, UpdateManagerUI.Ui_Dialog):
def __init__(self, parent=None):
super(UpdateManagerDialog, self).__init__(parent)
self.setupUi(self)
self.run()
def run(self):
# Setup resources
logo = QtGui.QPixmap(resource_path("gui_resources/Updates.png"))
icon = QtGui.QIcon(resource_path("gui_resources/Updates.png"))
# Set the logos and images
self.setWindowIcon(icon) # TODO: Custom icon
self.logo.setPixmap(logo)
# Setup button actions
self.closeButton.clicked.connect(self.closeEvent)
self.checkUpdatesButton.clicked.connect(self.checkForUpdatesEvent)
global version
self.currentVersionBox.setText(version)
# Setup variables
def setup(self, parent):
self.parent = parent
# Close event, for handling closing of the program
def closeEvent(self, event):
self.close()
# Check for updates event
def checkForUpdatesEvent(self, event):
self.updateData = json.loads(requests.get("https://raw.githubusercontent.com/AnonymousHacker1279/MrWorldwide/master/update.json").text)
self.latestVersionBox.setText(self.updateData["latest"])
self.changelogBox.setText(self.updateData["changelog"] + "\n\nDownload the update here: " + self.updateData["link"])
def main():
global app
app = QApplication(sys.argv)
app.setQuitOnLastWindowClosed(False)
app.setStyle("Fusion")
form = MrWorldwide()
form.show()
app.exec()
def resource_path(relative_path):
if hasattr(sys, '_MEIPASS'):
return os.path.join(sys._MEIPASS, relative_path)
return os.path.join(os.path.abspath('.'), relative_path)
if __name__ == '__main__':
main()
| 37.008529
| 399
| 0.75157
| 1,921
| 17,357
| 6.735554
| 0.210828
| 0.018549
| 0.022026
| 0.020403
| 0.331788
| 0.285416
| 0.271118
| 0.255738
| 0.223356
| 0.223356
| 0
| 0.002905
| 0.127499
| 17,357
| 469
| 400
| 37.008529
| 0.85136
| 0.082215
| 0
| 0.239669
| 0
| 0.002755
| 0.147857
| 0.052181
| 0
| 0
| 0
| 0.002132
| 0
| 1
| 0.090909
| false
| 0.00551
| 0.013774
| 0
| 0.264463
| 0.00551
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
98e15c2d42b427bf4ffb23842980cd80d4cd57bf
| 7,429
|
py
|
Python
|
tools/az_cli.py
|
google/cloud-forensics-utls
|
719093b4a229e5e97c30d93faabb1ccf3b6ee422
|
[
"Apache-2.0"
] | null | null | null |
tools/az_cli.py
|
google/cloud-forensics-utls
|
719093b4a229e5e97c30d93faabb1ccf3b6ee422
|
[
"Apache-2.0"
] | null | null | null |
tools/az_cli.py
|
google/cloud-forensics-utls
|
719093b4a229e5e97c30d93faabb1ccf3b6ee422
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2020 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Demo CLI tool for Azure."""
import os
from datetime import datetime
from typing import TYPE_CHECKING
from Crypto.PublicKey import RSA
from libcloudforensics import logging_utils
from libcloudforensics.providers.azure.internal import account
from libcloudforensics.providers.azure.internal import monitoring
from libcloudforensics.providers.azure import forensics
logging_utils.SetUpLogger(__name__)
logger = logging_utils.GetLogger(__name__)
if TYPE_CHECKING:
import argparse
def ListInstances(args: 'argparse.Namespace') -> None:
"""List instances in Azure subscription.
Args:
args (argparse.Namespace): Arguments from ArgumentParser.
"""
az_account = account.AZAccount(args.default_resource_group_name)
instances = az_account.compute.ListInstances(
resource_group_name=args.resource_group_name)
logger.info('Instances found:')
for instance in instances.values():
boot_disk = instance.GetBootDisk()
logger.info(
'Name: {0:s}, Boot disk: {1:s}'.format(instance.name, boot_disk.name))
def ListDisks(args: 'argparse.Namespace') -> None:
"""List disks in Azure subscription.
Args:
args (argparse.Namespace): Arguments from ArgumentParser.
"""
az_account = account.AZAccount(args.default_resource_group_name)
disks = az_account.compute.ListDisks(
resource_group_name=args.resource_group_name)
logger.info('Disks found:')
for disk_name, disk in disks.items():
logger.info('Name: {0:s}, Region: {1:s}'.format(disk_name, disk.region))
def CreateDiskCopy(args: 'argparse.Namespace') -> None:
"""Create an Azure disk copy.
Args:
args (argparse.Namespace): Arguments from ArgumentParser.
"""
logger.info('Starting disk copy...')
disk_copy = forensics.CreateDiskCopy(args.default_resource_group_name,
instance_name=args.instance_name,
disk_name=args.disk_name,
disk_type=args.disk_type,
region=args.region,
src_profile=args.src_profile,
dst_profile=args.dst_profile)
logger.info(
'Done! Disk {0:s} successfully created. You will find it in '
'your Azure subscription under the name {1:s}.'.format(
disk_copy.resource_id, disk_copy.name))
def StartAnalysisVm(args: 'argparse.Namespace') -> None:
"""Start forensic analysis VM.
Args:
args (argparse.Namespace): Arguments from ArgumentParser.
"""
attach_disks = []
if args.attach_disks:
attach_disks = args.attach_disks.split(',')
# Check if attach_disks parameter exists and if there
# are any empty entries.
if not (attach_disks and all(elements for elements in attach_disks)):
logger.error('error: parameter --attach_disks: {0:s}'.format(
args.attach_disks))
return
ssh_public_key = args.ssh_public_key
if not ssh_public_key:
# According to https://docs.microsoft.com/cs-cz/samples/azure-samples/
# resource-manager-python-template-deployment/resource-manager-python-
# template-deployment/ there's no API to generate a new SSH key pair in
# Azure, so we do this manually...
ssh_public_key = _GenerateSSHKeyPair(args.instance_name)
logger.info('Starting analysis VM...')
vm = forensics.StartAnalysisVm(args.default_resource_group_name,
args.instance_name,
int(args.disk_size),
ssh_public_key,
cpu_cores=int(args.cpu_cores),
memory_in_mb=int(args.memory_in_mb),
region=args.region,
attach_disks=attach_disks,
dst_profile=args.dst_profile)
logger.info('Analysis VM started.')
logger.info('Name: {0:s}, Started: {1:s}'.format(vm[0].name, str(vm[1])))
def _GenerateSSHKeyPair(vm_name: str) -> str:
"""Generate a SSH key pair and returns its public key.
Both public and private keys will be saved in the current directory.
Args:
vm_name (str): The VM name for which to generate the key pair.
Returns:
str: The public key for the generated SSH key pair.
Raises:
ValueError: If vm_name is None.
"""
if not vm_name:
raise ValueError('Parameter vm_name must not be None.')
logger.info('Generating a new SSH key pair for VM: {0:s}'.format(vm_name))
key = RSA.generate(2048)
key_name = '{0:s}-ssh'.format(vm_name)
public_key = key.publickey().exportKey('OpenSSH')
path_public_key = os.path.join(os.getcwd(), key_name + '.pub')
private_key = key.exportKey('PEM')
path_private_key = os.path.join(os.getcwd(), key_name + '.pem')
with open(path_private_key, 'wb') as f:
f.write(private_key)
with open(path_public_key, 'wb') as f:
f.write(public_key)
logger.info('SSH key pair generated. Public key saved in {0:s}, private key '
'saved in {1:s}'.format(path_public_key, path_private_key))
return public_key.decode('utf-8')
def ListMetrics(args: 'argparse.Namespace') -> None:
"""List Azure Monitoring metrics for a resource.
Args:
args (argparse.Namespace): Arguments from ArgumentParser.
"""
az_account = account.AZAccount(args.default_resource_group_name)
az_monitoring = monitoring.AZMonitoring(az_account)
metrics = az_monitoring.ListAvailableMetricsForResource(args.resource_id)
for metric in metrics:
logger.info('Available metric: {0:s}'.format(metric))
def QueryMetrics(args: 'argparse.Namespace') -> None:
"""Query Azure Monitoring metrics for a resource.
Args:
args (argparse.Namespace): Arguments from ArgumentParser.
Raises:
RuntimeError: If from_date or to_date could not be parsed.
"""
az_account = account.AZAccount(args.default_resource_group_name)
az_monitoring = monitoring.AZMonitoring(az_account)
from_date, to_date = args.from_date, args.to_date
if from_date and to_date:
try:
from_date = datetime.strptime(from_date, '%Y-%m-%dT%H:%M:%SZ')
to_date = datetime.strptime(to_date, '%Y-%m-%dT%H:%M:%SZ')
except ValueError as exception:
raise RuntimeError(
'Cannot parse date: {0!s}'.format(exception)) from exception
metrics = az_monitoring.GetMetricsForResource(
args.resource_id,
metrics=args.metrics,
from_date=from_date,
to_date=to_date,
interval=args.interval,
aggregation=args.aggregation or 'Total',
qfilter=args.qfilter)
for metric, metric_value in metrics.items():
logger.info('Metric: {0:s}'.format(metric))
for timestamp, value in metric_value.items():
logger.info(' Timestamp: {0:s}, value: {1:s}'.format(timestamp, value))
| 35.208531
| 79
| 0.679768
| 972
| 7,429
| 5.041152
| 0.254115
| 0.028571
| 0.051429
| 0.030612
| 0.290408
| 0.221633
| 0.195918
| 0.155918
| 0.14449
| 0.124898
| 0
| 0.005829
| 0.214834
| 7,429
| 210
| 80
| 35.37619
| 0.834219
| 0.256562
| 0
| 0.127273
| 0
| 0
| 0.138704
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.063636
| false
| 0
| 0.081818
| 0
| 0.163636
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
98e710a1b1cb3e42d4cbdb66250958e21888c440
| 804
|
py
|
Python
|
interface/inter5.py
|
CeciliaDornelas/Python
|
883959ed2e10cd8e8ace2b640e1944edc0c1d8a3
|
[
"MIT"
] | null | null | null |
interface/inter5.py
|
CeciliaDornelas/Python
|
883959ed2e10cd8e8ace2b640e1944edc0c1d8a3
|
[
"MIT"
] | null | null | null |
interface/inter5.py
|
CeciliaDornelas/Python
|
883959ed2e10cd8e8ace2b640e1944edc0c1d8a3
|
[
"MIT"
] | null | null | null |
import sys
from PyQt5 import QtCore, QtWidgets
from PyQt5.QtWidgets import QMainWindow, QLabel, QGridLayout, QWidget
from PyQt5.QtCore import QSize
class HelloWindow(QMainWindow):
def __init__(self):
QMainWindow.__init__(self)
self.setMinimumSize(QSize(280, 120))
self.setWindowTitle("Olá, Mundo! Exemplo PyQT5")
centralWidget = QWidget(self)
self.setCentralWidget(centralWidget)
gridLayout = QGridLayout(self)
centralWidget.setLayout(gridLayout)
title = QLabel("Olá Mundo para PyQt", self)
title.setAlignment(QtCore.Qt.AlignCenter)
gridLayout.addWidget(title, 0, 0)
if __name__ == "__main__":
app = QtWidgets.QApplication(sys.argv)
mainWin = HelloWindow()
mainWin.show()
sys.exit( app.exec_() )
| 26.8
| 69
| 0.691542
| 85
| 804
| 6.341176
| 0.529412
| 0.050093
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.018927
| 0.211443
| 804
| 29
| 70
| 27.724138
| 0.83123
| 0
| 0
| 0
| 0
| 0
| 0.064838
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.047619
| false
| 0
| 0.190476
| 0
| 0.285714
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
98e753afbcdb25feef4bb770897b167108c721b5
| 1,523
|
py
|
Python
|
setup.py
|
notwa/scipybiteopt
|
62e1510789b680483ad867984849af215a9848c5
|
[
"MIT"
] | null | null | null |
setup.py
|
notwa/scipybiteopt
|
62e1510789b680483ad867984849af215a9848c5
|
[
"MIT"
] | null | null | null |
setup.py
|
notwa/scipybiteopt
|
62e1510789b680483ad867984849af215a9848c5
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import os
import sys
import numpy
from setuptools import setup, Extension
#include markdown description in pip page
this_directory = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(this_directory, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
# https://github.com/pypa/packaging-problems/issues/84
# no sensible way to include header files by default
headers = ['scipybiteopt/biteopt.h',
'scipybiteopt/biteoptort.h',
'scipybiteopt/spheropt.h',
'scipybiteopt/biteaux.h',
'scipybiteopt/nmsopt.h']
def get_c_sources(files, include_headers=False):
return files + (headers if include_headers else [])
module1 = Extension('scipybiteopt.biteopt',
sources=get_c_sources(['scipybiteopt/biteopt_py_ext.cpp'], include_headers=(sys.argv[1] == "sdist")),
language="c++",
include_dirs=[numpy.get_include()],
extra_compile_args=['-std=c++11', '-O3'] if os.name != 'nt' else ['-O3'])
setup(name='scipybiteopt',
version='1.1.1',
description="Scipy style wrapper for Aleksey Vaneev's BiteOpt",
author='dschmitz89',
author_email='danielschmitzsiegen@gmail.com',
license='MIT',
long_description=long_description,
long_description_content_type='text/markdown',
url = 'https://github.com/dschmitz89/scipybiteopt',
packages = ['scipybiteopt'],
ext_modules = [module1],
install_requires=[
'numpy']
)
| 35.418605
| 119
| 0.670387
| 184
| 1,523
| 5.396739
| 0.586957
| 0.060423
| 0.028197
| 0.060423
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.013799
| 0.19107
| 1,523
| 42
| 120
| 36.261905
| 0.792208
| 0.107682
| 0
| 0
| 0
| 0
| 0.282657
| 0.127675
| 0
| 0
| 0
| 0
| 0
| 1
| 0.030303
| false
| 0
| 0.121212
| 0.030303
| 0.181818
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
98eb89e6efe4554abbe1506f10c8ccfbcb3dedf8
| 2,234
|
py
|
Python
|
HoverSlam.py
|
GiantWaffleCode/WafflePython
|
d3e85ce6d9c792e7338eb825307f7bb48113742a
|
[
"MIT"
] | 13
|
2020-10-13T00:19:21.000Z
|
2020-12-31T02:38:58.000Z
|
HoverSlam.py
|
GiantWaffleCode/WafflePython
|
d3e85ce6d9c792e7338eb825307f7bb48113742a
|
[
"MIT"
] | null | null | null |
HoverSlam.py
|
GiantWaffleCode/WafflePython
|
d3e85ce6d9c792e7338eb825307f7bb48113742a
|
[
"MIT"
] | 10
|
2020-10-13T00:19:52.000Z
|
2020-12-31T02:39:42.000Z
|
import krpc
import time
import math
from simple_pid import PID
conn = krpc.connect(name="UI Test")
vessel = conn.space_center.active_vessel
kerbin_frame = vessel.orbit.body.reference_frame
orb_frame = vessel.orbital_reference_frame
srf_frame = vessel.surface_reference_frame
surface_gravity = vessel.orbit.body.surface_gravity
current_met = conn.add_stream(getattr, vessel, 'met')
current_roll = conn.add_stream(getattr, vessel.flight(), 'roll')
current_pitch = conn.add_stream(getattr, vessel.flight(), 'pitch')
current_heading = conn.add_stream(getattr, vessel.flight(), 'heading')
current_alt = conn.add_stream(getattr, vessel.flight(), 'surface_altitude')
lowest = conn.add_stream(vessel.bounding_box, srf_frame)
current_drag = conn.add_stream(getattr, vessel.flight(), 'drag')
current_aero = conn.add_stream(getattr, vessel.flight(), 'aerodynamic_force')
current_speed = conn.add_stream(getattr, vessel.flight(kerbin_frame), 'speed')
vessel.control.activate_next_stage()
vessel.control.sas = True
time.sleep(.2)
vessel.control.sas_mode = conn.space_center.SASMode.retrograde
def bottom_altitude():
return max(0, current_alt() - abs(lowest()[0][0]))
for engine in vessel.parts.engines:
engine.gimbal_locked = True
while True:
aero_amp = math.sqrt(current_aero()[0] ** 2
+ current_aero()[1] ** 2
+ current_aero()[2] ** 2)
time_to_zero = current_speed() / ((((vessel.max_thrust * .9) + aero_amp) / vessel.mass)
+ vessel.orbit.body.surface_gravity)
if (time_to_zero * current_speed()) >= bottom_altitude() - current_speed():
print(current_speed())
print(f"Start Hover Slam Burn")
vessel.control.throttle = .9
break
while current_speed() > 50:
print(current_speed())
time.sleep(.01)
pass
print(f"Switch to Stab")
for leg in vessel.parts.legs:
leg.deployed = True
pid1 = PID(.15, 0, .5, setpoint=0)
pid1.output_limits = (0, 1)
pid1.sample_time = 0.01
while bottom_altitude() > 1:
vessel.control.throttle = pid1(bottom_altitude())
# pid1.setpoint *= .98
time.sleep(.01)
vessel.control.sas_mode = conn.space_center.SASMode.radial
vessel.control.throttle = 0
| 33.848485
| 91
| 0.705461
| 307
| 2,234
| 4.918567
| 0.338762
| 0.041722
| 0.077483
| 0.10596
| 0.288742
| 0.203974
| 0.055629
| 0.055629
| 0
| 0
| 0
| 0.019818
| 0.164279
| 2,234
| 65
| 92
| 34.369231
| 0.788966
| 0.008953
| 0
| 0.075472
| 0
| 0
| 0.046564
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.018868
| false
| 0.018868
| 0.075472
| 0.018868
| 0.113208
| 0.075472
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
98ee7596428318903272a404f3751220eec8a490
| 11,760
|
py
|
Python
|
datapackage_pipelines/web/server.py
|
gperonato/datapackage-pipelines
|
72b98918db1c19590586a3a85c5b087227cbbc3b
|
[
"MIT"
] | 109
|
2016-09-01T08:41:55.000Z
|
2021-11-10T10:08:35.000Z
|
datapackage_pipelines/web/server.py
|
gperonato/datapackage-pipelines
|
72b98918db1c19590586a3a85c5b087227cbbc3b
|
[
"MIT"
] | 144
|
2016-08-30T16:26:50.000Z
|
2021-04-18T09:06:12.000Z
|
datapackage_pipelines/web/server.py
|
gperonato/datapackage-pipelines
|
72b98918db1c19590586a3a85c5b087227cbbc3b
|
[
"MIT"
] | 34
|
2016-09-05T12:46:53.000Z
|
2022-03-05T01:53:49.000Z
|
import datetime
import os
from io import BytesIO
import logging
from functools import wraps
from copy import deepcopy
from collections import Counter
import slugify
import yaml
import mistune
import requests
from flask import \
Blueprint, Flask, render_template, abort, send_file, make_response
from flask_cors import CORS
from flask_jsonpify import jsonify
from flask_basicauth import BasicAuth
from datapackage_pipelines.status import status_mgr
from datapackage_pipelines.utilities.stat_utils import user_facing_stats
YAML_DUMPER = yaml.CDumper if 'CDumper' in yaml.__dict__ else yaml.Dumper
def datestr(x):
if x is None:
return ''
return str(datetime.datetime.fromtimestamp(x))
def yamlize(x):
ret = yaml.dump(x, default_flow_style=False, Dumper=YAML_DUMPER)
return ret
markdown = mistune.Markdown(hard_wrap=True)
status = status_mgr()
def make_hierarchies(statuses):
def group(lvl):
pipelines = list(filter(lambda x: len(x['id']) == 1, lvl))
children_ = list(filter(lambda x: len(x['id']) > 1, lvl))
groups_ = {}
for child in children_:
child_key = child['id'].pop(0)
groups_.setdefault(child_key, []).append(child)
children_ = dict(
(k, group(v))
for k, v in groups_.items()
)
for p in pipelines:
p['id'] = p['id'][0]
return {
'pipelines': pipelines,
'children': children_
}
def flatten(children_):
for k, v in children_.items():
v['children'] = flatten(v['children'])
child_keys = list(v['children'].keys())
if len(child_keys) == 1 and len(v['pipelines']) == 0:
child_key = child_keys[0]
children_['/'.join([k, child_key])] = v['children'][child_key]
del children_[k]
return children_
statuses = [
{
'id': st['id'].split('/'),
'title': st.get('title'),
'stats': st.get('stats'),
'slug': st.get('slug')
}
for st in statuses
]
groups = group(statuses)
children = groups.get('children', {})
groups['children'] = flatten(children)
return groups
def basic_auth_required(view_func):
"""
A decorator that can be used to protect specific views with HTTP basic
access authentication. Conditional on having BASIC_AUTH_USERNAME and
BASIC_AUTH_PASSWORD set as env vars.
"""
@wraps(view_func)
def wrapper(*args, **kwargs):
if app.config.get('BASIC_AUTH_ACTIVE', False):
if basic_auth.authenticate():
return view_func(*args, **kwargs)
else:
return basic_auth.challenge()
else:
return view_func(*args, **kwargs)
return wrapper
blueprint = Blueprint('dpp', 'dpp')
@blueprint.route("")
@blueprint.route("<path:pipeline_path>")
@basic_auth_required
def main(pipeline_path=None):
pipeline_ids = sorted(status.all_pipeline_ids())
# If we have a pipeline_path, filter the pipeline ids.
if pipeline_path is not None:
if not pipeline_path.startswith('./'):
pipeline_path = './' + pipeline_path
pipeline_ids = [p for p in pipeline_ids if p.startswith(pipeline_path)]
statuses = []
for pipeline_id in pipeline_ids:
pipeline_status = status.get(pipeline_id)
ex = pipeline_status.get_last_execution()
success_ex = pipeline_status.get_last_successful_execution()
pipeline_obj = {
'id': pipeline_id.lstrip('./'),
'title': pipeline_status.pipeline_details.get('title'),
'stats': user_facing_stats(ex.stats) if ex else None,
'slug': slugify.slugify(pipeline_id),
'trigger': ex.trigger if ex else None,
'error_log': pipeline_status.errors(),
'state': pipeline_status.state(),
'pipeline': pipeline_status.pipeline_details,
'message': pipeline_status.state().capitalize(),
'dirty': pipeline_status.dirty(),
'runnable': pipeline_status.runnable(),
'class': {'INIT': 'primary',
'QUEUED': 'primary',
'INVALID': 'danger',
'RUNNING': 'warning',
'SUCCEEDED': 'success',
'FAILED': 'danger'
}[pipeline_status.state()],
'ended': datestr(ex.finish_time) if ex else None,
'started': datestr(ex.start_time) if ex else None,
'last_success':
datestr(success_ex.finish_time) if success_ex else None,
}
statuses.append(pipeline_obj)
def state_and_not_dirty(state, p):
return p.get('state') == state and not p.get('dirty')
def state_or_dirty(state, p):
return p.get('state') == state or p.get('dirty')
categories = [
['ALL', 'All Pipelines', lambda _, __: True],
['INVALID', "Can't start", lambda _, p: not p['runnable']],
['QUEUED', 'Waiting to run', lambda state, p: p['state'] == state],
['RUNNING', 'Running', state_and_not_dirty],
['FAILED', 'Failed Execution', state_and_not_dirty],
['SUCCEEDED', 'Successful Execution', state_and_not_dirty],
]
for item in categories:
item.append([p for p in deepcopy(statuses)
if item[2](item[0], p)])
item.append(len(item[-1]))
item.append(make_hierarchies(item[-2]))
return render_template('dashboard.html',
categories=categories,
yamlize=yamlize,
markdown=markdown)
@blueprint.route("api/raw/status")
@basic_auth_required
def pipeline_raw_api_status():
pipelines = sorted(status.all_statuses(), key=lambda x: x.get('id'))
for pipeline in pipelines:
# can get the full details from api/raw/<path:pipeline_id>
for attr in ["pipeline", "reason", "error_log"]:
if attr in pipeline:
del pipeline[attr]
return jsonify(pipelines)
@blueprint.route("api/raw/<path:pipeline_id>")
@basic_auth_required
def pipeline_raw_api(pipeline_id):
if not pipeline_id.startswith('./'):
pipeline_id = './' + pipeline_id
pipeline_status = status.get(pipeline_id)
if not pipeline_status.pipeline_details:
abort(404)
last_execution = pipeline_status.get_last_execution()
last_successful_execution = pipeline_status.get_last_successful_execution()
ret = {
"id": pipeline_id,
"cache_hash": pipeline_status.cache_hash,
"dirty": pipeline_status.dirty(),
"queued": last_execution.queue_time if last_execution else None,
"started": last_execution.start_time if last_execution else None,
"ended": last_execution.finish_time if last_execution else None,
"reason": last_execution.log if last_execution else None,
"error_log": pipeline_status.errors(),
"stats": last_execution.stats if last_execution else None,
"success": last_execution.success if last_execution else None,
"last_success":
last_successful_execution.finish_time
if last_successful_execution else None,
"trigger": last_execution.trigger if last_execution else None,
"pipeline": pipeline_status.pipeline_details,
"source": pipeline_status.source_spec,
"message": pipeline_status.state().capitalize(),
"state": pipeline_status.state(),
}
return jsonify(ret)
@blueprint.route("api/<field>/<path:pipeline_id>")
@basic_auth_required
def pipeline_api(field, pipeline_id):
if not pipeline_id.startswith('./'):
pipeline_id = './' + pipeline_id
pipeline_status = status.get(pipeline_id)
if not pipeline_status.pipeline_details:
abort(404)
ret = None
if field == 'pipeline':
ret = pipeline_status.pipeline_details
ret = yamlize(ret)
elif field == 'source':
ret = pipeline_status.source_spec
ret = yamlize(ret)
elif field == 'log':
ex = pipeline_status.get_last_execution()
ret = ex.log if ex else ''
else:
abort(400)
ret = ret.split('\n')
ret = {'text': ret}
return jsonify(ret)
def _make_badge_response(subject, text, colour):
image_url = 'https://img.shields.io/badge/{}-{}-{}.svg'.format(
subject, text, colour)
r = requests.get(image_url)
buffer_image = BytesIO(r.content)
buffer_image.seek(0)
res = make_response(send_file(buffer_image, mimetype='image/svg+xml'))
res.headers['Cache-Control'] = \
'max-age=0, no-cache, no-store, must-revalidate'
res.headers['Expires'] = '0'
return res
@blueprint.route("badge/<path:pipeline_id>")
def badge(pipeline_id):
'''An individual pipeline status'''
if not pipeline_id.startswith('./'):
pipeline_id = './' + pipeline_id
pipeline_status = status.get(pipeline_id)
status_color = 'lightgray'
if pipeline_status.pipeline_details:
status_text = pipeline_status.state().lower()
last_execution = pipeline_status.get_last_execution()
success = last_execution.success if last_execution else None
if success is True:
stats = last_execution.stats if last_execution else None
record_count = stats.get('count_of_rows')
if record_count is not None:
status_text += ' (%d records)' % record_count
status_color = 'brightgreen'
elif success is False:
status_color = 'red'
else:
status_text = "not found"
return _make_badge_response('pipeline', status_text, status_color)
@blueprint.route("badge/collection/<path:pipeline_path>")
def badge_collection(pipeline_path):
'''Status badge for a collection of pipelines.'''
all_pipeline_ids = sorted(status.all_pipeline_ids())
if not pipeline_path.startswith('./'):
pipeline_path = './' + pipeline_path
# Filter pipeline ids to only include those that start with pipeline_path.
path_pipeline_ids = \
[p for p in all_pipeline_ids if p.startswith(pipeline_path)]
statuses = []
for pipeline_id in path_pipeline_ids:
pipeline_status = status.get(pipeline_id)
if pipeline_status is None:
abort(404)
status_text = pipeline_status.state().lower()
statuses.append(status_text)
status_color = 'lightgray'
status_counter = Counter(statuses)
if status_counter:
if len(status_counter) == 1 and status_counter['succeeded'] > 0:
status_color = 'brightgreen'
elif status_counter['failed'] > 0:
status_color = 'red'
elif status_counter['failed'] == 0:
status_color = 'yellow'
status_text = \
', '.join(['{} {}'.format(v, k)
for k, v in status_counter.items()])
else:
status_text = "not found"
return _make_badge_response('pipelines', status_text, status_color)
app = Flask(__name__)
app.config['JSONIFY_PRETTYPRINT_REGULAR'] = True
if os.environ.get('DPP_BASIC_AUTH_USERNAME', False) \
and os.environ.get('DPP_BASIC_AUTH_PASSWORD', False):
app.config['BASIC_AUTH_USERNAME'] = os.environ['DPP_BASIC_AUTH_USERNAME']
app.config['BASIC_AUTH_PASSWORD'] = os.environ['DPP_BASIC_AUTH_PASSWORD']
app.config['BASIC_AUTH_ACTIVE'] = True
basic_auth = BasicAuth(app)
CORS(app)
url_prefix = os.environ.get('DPP_BASE_PATH', '/')
if not url_prefix.endswith('/'):
url_prefix += '/'
logging.info('Serving on path %s', url_prefix)
app.register_blueprint(blueprint, url_prefix=url_prefix)
| 33.696275
| 79
| 0.631463
| 1,421
| 11,760
| 4.990852
| 0.18297
| 0.071066
| 0.023971
| 0.024112
| 0.329808
| 0.262549
| 0.21348
| 0.169769
| 0.126058
| 0.064298
| 0
| 0.003408
| 0.251446
| 11,760
| 348
| 80
| 33.793103
| 0.802227
| 0.036905
| 0
| 0.183453
| 0
| 0
| 0.11426
| 0.020903
| 0
| 0
| 0
| 0
| 0
| 1
| 0.057554
| false
| 0.007194
| 0.061151
| 0.007194
| 0.18705
| 0.035971
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
98eec9960afb05f934f3e80b57d22d6b3147c3f1
| 1,425
|
py
|
Python
|
MoveSim/code/models/losses.py
|
tobinsouth/privacy-preserving-synthetic-mobility-data
|
fd4d1851b47e3e7304761a894b460e8345fae5db
|
[
"MIT"
] | null | null | null |
MoveSim/code/models/losses.py
|
tobinsouth/privacy-preserving-synthetic-mobility-data
|
fd4d1851b47e3e7304761a894b460e8345fae5db
|
[
"MIT"
] | null | null | null |
MoveSim/code/models/losses.py
|
tobinsouth/privacy-preserving-synthetic-mobility-data
|
fd4d1851b47e3e7304761a894b460e8345fae5db
|
[
"MIT"
] | null | null | null |
# coding: utf-8
import numpy as np
import torch.nn as nn
class distance_loss(nn.Module):
def __init__(self):
with open('../data/raw/Cellular_Baselocation_baidu') as f:
gpss = f.readlines()
self.X = []
self.Y = []
for gps in gpss:
x, y = float(gps.split()[0]), float(gps.split()[1])
self.X.append(x)
self.Y.append(y)
self.X = torch.Tensor(np.array(self.X)).float()
self.Y = torch.Tensor(np.array(self.Y)).float()
def forward(self, x):
"""
:param x: generated sequence, batch_size * seq_len
:return:
"""
x1 = torch.index_select(self.X, 0, x[:, :-1].view(-1))
x2 = torch.index_select(self.X, 0, x[:, 1:].view(-1))
y1 = torch.index_select(self.Y, 0, x[:, :-1].view(-1))
y2 = torch.index_select(self.Y, 0, x[:, :-1].view(-1))
dx = x1 - x2
dy = y1 - y2
loss = dx**2 + dy**2
return loss
class period_loss(nn.Module):
def __init__(self, time_interval):
self.time_interval = time_interval
self.mse = nn.MSELoss()
def forward(self, x):
"""
:param x: generated sequence, batch_size * seq_len
:return:
"""
loss = 0.
for i in range(0, x.size(1) - self.time_interval):
loss += self.mse(x[:, i], x[:, i + self.time_interval])
return loss
| 27.403846
| 67
| 0.523509
| 203
| 1,425
| 3.551724
| 0.315271
| 0.055479
| 0.088766
| 0.110957
| 0.449376
| 0.38835
| 0.324549
| 0.324549
| 0.324549
| 0.324549
| 0
| 0.028571
| 0.312281
| 1,425
| 51
| 68
| 27.941176
| 0.707143
| 0.094035
| 0
| 0.125
| 0
| 0
| 0.031811
| 0.031811
| 0
| 0
| 0
| 0
| 0
| 1
| 0.125
| false
| 0
| 0.0625
| 0
| 0.3125
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
98ef6a5aa62915725ae521746cef94f51adfcf47
| 1,316
|
py
|
Python
|
board/game.py
|
petthauk/chess_ml
|
2a66ca8511fd4eef71607a7f56417d039d94dbf9
|
[
"MIT"
] | null | null | null |
board/game.py
|
petthauk/chess_ml
|
2a66ca8511fd4eef71607a7f56417d039d94dbf9
|
[
"MIT"
] | null | null | null |
board/game.py
|
petthauk/chess_ml
|
2a66ca8511fd4eef71607a7f56417d039d94dbf9
|
[
"MIT"
] | null | null | null |
import pygame as pg
from pygame.locals import *
import sys
import board.chess_board as board
w = 60 * 8
h = 60 * 8
class Game:
"""
Class to setup and start a game
"""
def __init__(self):
self.b = board.Board(w, h)
def get_board(self):
"""
Returns board
:return: Board-class
"""
return self.b
def run(self):
"""
Where the game is created and launched
:return:
"""
# While loop to show display
while True:
for event in pg.event.get():
# Quitting game
if event.type == QUIT:
pg.quit()
sys.exit()
# If game can continue
if self.b.get_status() == "-":
# Pressing mouse
if event.type == MOUSEBUTTONDOWN:
pos = pg.mouse.get_pos()
for r in self.b.get_board_array():
for square in r:
if square.get_visual().collidepoint(pos):
square.click()
self.b.update_board()
if __name__ == "__main__":
# Launch main-function if running this script
game = Game()
game.run()
| 24.830189
| 73
| 0.458207
| 144
| 1,316
| 4.048611
| 0.465278
| 0.042882
| 0.037736
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.00831
| 0.451368
| 1,316
| 52
| 74
| 25.307692
| 0.799169
| 0.179331
| 0
| 0
| 0
| 0
| 0.009018
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.107143
| false
| 0
| 0.142857
| 0
| 0.321429
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
98efb4404db7ca8bc8ddf99fbe40494ec2e70aa1
| 2,515
|
py
|
Python
|
pix2pix/Dataset_util.py
|
Atharva-Phatak/Season-Tranfer
|
d6a0d4d42e396677920ffb81ab0086b0aa05d3c3
|
[
"MIT"
] | 2
|
2019-07-02T14:00:15.000Z
|
2019-07-11T15:50:41.000Z
|
pix2pix/Dataset_util.py
|
Atharva-Phatak/Season-Tranfer
|
d6a0d4d42e396677920ffb81ab0086b0aa05d3c3
|
[
"MIT"
] | null | null | null |
pix2pix/Dataset_util.py
|
Atharva-Phatak/Season-Tranfer
|
d6a0d4d42e396677920ffb81ab0086b0aa05d3c3
|
[
"MIT"
] | null | null | null |
#importing libraries
import torch
import torch.utils.data as data
import os
import random
from PIL import Image
class CreateDataset(data.Dataset):
def __init__(self , imagedir , subfolder='train' , direction = 'AtoB' , flip = False , transform = None ,resize_scale = None , crop_size = None):
super(CreateDataset , self).__init__()
self.images_path = os.path.join(imagedir , subfolder)
self.image_filenames = [name for name in sorted(os.listdir(self.images_path))]
self.flip = flip
self.transform = transform
self.resize_scale = resize_scale
self.crop_size = crop_size
self.direction = direction
def __getitem__(self , index):
image_path = os.path.join(self.images_path , self.image_filenames[index])
img = Image.open(image_path)
if self.direction == 'AtoB':
inp_img = img.crop((0,0,img.width//2 , img.height))
target_img = img.crop((img.width//2 , 0 , img.width , img.height))
elif self.direction == 'BtoA':
inp_img = img.crop((img.width//2 , 0 , img.width , img.height))
target_img = img.crop((0,0,img.width//2 , img.height))
if self.resize_scale:
inp_img = inp_img.resize((self.resize_scale , self.resize_scale) , Image.BILINEAR)
target_img = target_img.resize((self.resize_scale , self.resize_scale) , Image.BILINEAR)
if self.crop_size:
x = random.randint(0 , self.resize_scale - self.crop_size + 1)
y = random.randint(0 , self.resize_scale - self.crop_size + 1)
inp_img = inp_img.crop((x , y , x + self.crop_size , y + self.crop_size))
target_img = target_img.crop((x , y , x + self.crop_size , y + self.crop_size))
if self.flip:
if random.random() < 0.5:
inp_img = inp_img.transpose(Image.FLIP_LEFT_RIGHT)
target_img = target_img.transpose(Image.FLIP_LEFT_RIGHT)
if self.transform is not None:
inp_img = self.transform(inp_img)
target_img = self.transform(target_img)
return inp_img , target_img
def __len__(self):
return len(self.image_filenames)
| 36.985294
| 150
| 0.553082
| 302
| 2,515
| 4.374172
| 0.221854
| 0.049962
| 0.09084
| 0.057532
| 0.36866
| 0.351249
| 0.296745
| 0.296745
| 0.296745
| 0.296745
| 0
| 0.009756
| 0.347913
| 2,515
| 68
| 151
| 36.985294
| 0.795732
| 0.007555
| 0
| 0
| 0
| 0
| 0.006999
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.071429
| false
| 0
| 0.119048
| 0.02381
| 0.261905
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
98efd5c91e56c42872a45ff29528b847156d1400
| 20,126
|
py
|
Python
|
crslab/system/C2CRS_System.py
|
Zyh716/WSDM2022-C2CRS
|
8ef2fa7c44bdba1799ab79f379ae7394bd468c02
|
[
"MIT"
] | 4
|
2022-03-24T02:14:50.000Z
|
2022-03-30T02:28:19.000Z
|
crslab/system/C2CRS_System.py
|
RUCAIBox/WSDM2022-C2CRS
|
8ef2fa7c44bdba1799ab79f379ae7394bd468c02
|
[
"MIT"
] | null | null | null |
crslab/system/C2CRS_System.py
|
RUCAIBox/WSDM2022-C2CRS
|
8ef2fa7c44bdba1799ab79f379ae7394bd468c02
|
[
"MIT"
] | 2
|
2022-03-23T02:24:24.000Z
|
2022-03-28T12:45:43.000Z
|
# @Time : 2022/1/1
# @Author : Yuanhang Zhou
# @email : sdzyh002@gmail.com
import os
from math import floor
import torch
from loguru import logger
from typing import List, Dict
from copy import copy, deepcopy
import pickle
import os
import numpy
import ipdb
from crslab.config import PRETRAIN_PATH, SAVE_PATH
from crslab.data import get_dataloader, dataset_language_map
from crslab.evaluator.metrics.base import AverageMetric
from crslab.evaluator.metrics.gen import PPLMetric
from crslab.system.base import BaseSystem
from crslab.system.utils.functions import ind2txt, ind2txt2
import random
from tqdm import tqdm
class C2CRS_System(BaseSystem):
"""This is the system for TGReDial model"""
def __init__(self, opt, train_dataloader, valid_dataloader, test_dataloader, vocab, side_data, restore_system=False,
interact=False, debug=False):
"""
Args:
opt (dict): Indicating the hyper parameters.
train_dataloader (BaseDataLoader): Indicating the train dataloader of corresponding dataset.
valid_dataloader (BaseDataLoader): Indicating the valid dataloader of corresponding dataset.
test_dataloader (BaseDataLoader): Indicating the test dataloader of corresponding dataset.
vocab (dict): Indicating the vocabulary.
side_data (dict): Indicating the side data.
restore_system (bool, optional): Indicating if we store system after training. Defaults to False.
interact (bool, optional): Indicating if we interact with system. Defaults to False.
debug (bool, optional): Indicating if we train in debug mode. Defaults to False.
"""
super(C2CRS_System, self).__init__(opt, train_dataloader, valid_dataloader,
test_dataloader, vocab, side_data, restore_system, interact, debug)
self._init_token_attribute(vocab)
self._init_rec_attribute(side_data, vocab)
self._init_conv_attribute(side_data, vocab)
self._init_pretrain_attribute(side_data, vocab)
self.language = dataset_language_map[self.opt['dataset']]
self.pertrain_save_epoches = [epoch-1 for epoch in eval(opt['pertrain_save_epoches'])]
def _init_token_attribute(self, vocab):
self.ind2tok = vocab['rec']['ind2tok']
self.end_token_idx = vocab['rec']['end']
self.unk_token_idx = vocab['rec']['unk']
self.unk = self.ind2tok.get(self.unk_token_idx, '<unk>')
def _init_rec_attribute(self, side_data, vocab):
self.item_ids = side_data['rec']['item_entity_ids']
self.id2entity = side_data['rec']['entity_kg']['id2entity']
self.dpath = side_data['rec']['dpath']
self.rec_ind2tok = vocab['rec']['ind2tok']
self.rec_optim_opt = deepcopy(self.opt['rec'])
self.rec_batch_size = self.opt['rec_batch_size'] if self.opt['rec_batch_size'] != -1 else self.rec_optim_opt['batch_size']
self.rec_epoch = self.opt['rec_epoch'] if self.opt['rec_epoch'] != -1 else self.rec_optim_opt['epoch']
def _init_conv_attribute(self, side_data, vocab):
self.conv_optim_opt = self.opt['conv']
if self.conv_optim_opt.get('lr_scheduler', None) and 'Transformers' in self.conv_optim_opt['lr_scheduler']['name']:
batch_num = 0
for _ in self.train_dataloader['rec'].get_conv_data(batch_size=self.conv_batch_size, shuffle=False):
batch_num += 1
conv_training_steps = self.conv_epoch * floor(batch_num / self.conv_optim_opt.get('update_freq', 1))
self.conv_optim_opt['lr_scheduler']['training_steps'] = conv_training_steps
self.conv_batch_size = self.opt['conv_batch_size'] if self.opt['conv_batch_size'] != -1 else self.conv_optim_opt['batch_size']
self.conv_epoch = self.opt['conv_epoch'] if self.opt['conv_epoch'] != -1 else self.conv_optim_opt['epoch']
def _init_pretrain_attribute(self, side_data, vocab):
if 'pretrain' in self.opt:
self.pretrain_optim_opt = deepcopy(self.opt['pretrain'])
self.pretrain_epoch = self.opt['pretrain_epoch'] if self.opt['pretrain_epoch'] != -1 else self.pretrain_optim_opt['pretrain_epoch']
self.pretrain_batch_size = self.opt['pretrain_batch_size'] if self.opt['pretrain_batch_size'] != -1 else self.pretrain_optim_opt['batch_size']
def rec_evaluate(self, rec_predict, item_label):
rec_predict = rec_predict.cpu()
rec_predict = rec_predict[:, self.item_ids]
_, rec_ranks = torch.topk(rec_predict, 50, dim=-1)
rec_ranks = rec_ranks.tolist()
item_label = item_label.tolist()
for rec_rank, item in zip(rec_ranks, item_label):
item = self.item_ids.index(item)
self.evaluator.rec_evaluate(rec_rank, item)
def rec_evaluate_and_return_score(self, rec_predict, item_label):
rec_predict = rec_predict.cpu()
rec_predict = rec_predict[:, self.item_ids]
_, rec_ranks = torch.topk(rec_predict, 50, dim=-1)
_, fully_rec_ranks = torch.topk(rec_predict, 50, dim=-1)
rec_ranks = rec_ranks.tolist()
fully_rec_ranks = fully_rec_ranks.tolist()
item_label = item_label.tolist()
scores = []
for rec_rank, item in zip(rec_ranks, item_label):
item = self.item_ids.index(item)
scores.append(self.evaluator.rec_evaluate_and_return_score(rec_rank, fully_rec_ranks, item, self.opt['score_type']))
return scores, rec_ranks
def conv_evaluate(self, prediction, response):
"""
Args:
prediction: torch.LongTensor, shape=(bs, response_truncate-1)
response: torch.LongTensor, shape=(bs, response_truncate)
the first token in response is <|endoftext|>, it is not in prediction
"""
prediction = prediction.tolist()
response = response.tolist()
for p, r in zip(prediction, response):
p_str, p_ListStr = ind2txt2(p, self.ind2tok, self.end_token_idx)
r_str, r_ListStr = ind2txt2(r[1:], self.ind2tok, self.end_token_idx)
self.evaluator.gen_evaluate(p_str, [r_str], p_ListStr, [r_ListStr])
def step(self, batch, stage, mode, epoch=-1):
batch, unbatchify_batch = batch
self.step_default(batch, stage, mode, epoch)
def step_default(self, batch, stage, mode, epoch=-1):
"""
stage: ['policy', 'rec', 'conv']
mode: ['train', 'val', 'test]
"""
for k, v in batch.items():
if isinstance(v, torch.Tensor):
batch[k] = v.to(self.device)
if stage == 'pretrain_rec':
loss = self.rec_model.pretrain(batch, mode, epoch)
if loss:
if mode == "train":
self.backward(loss)
loss = loss.item()
self.evaluator.optim_metrics.add("loss", AverageMetric(loss))
elif stage == 'policy':
if mode == 'train':
self.rec_model.train()
else:
self.rec_model.eval()
policy_loss, policy_predict = self.rec_model.guide(batch, mode)
if mode == "train" and policy_loss is not None:
self.backward(policy_loss)
else:
self.policy_evaluate(policy_predict, batch[-1])
if isinstance(policy_loss, torch.Tensor):
policy_loss = policy_loss.item()
self.evaluator.optim_metrics.add("policy_loss",
AverageMetric(policy_loss))
elif stage == 'rec':
if mode == 'train':
self.rec_model.train()
else:
self.rec_model.eval()
rec_loss, rec_predict = self.rec_model.recommend(batch, mode)
if mode == "train":
self.backward(rec_loss)
else:
self.rec_evaluate(rec_predict, batch['movie_to_rec'])
rec_loss = rec_loss.item()
self.evaluator.optim_metrics.add("rec_loss",
AverageMetric(rec_loss))
elif stage == "conv":
if mode != "test":
gen_loss, pred = self.rec_model.converse(batch, mode)
if mode == 'train':
self.backward(gen_loss)
else:
self.conv_evaluate(pred, batch['response'])
gen_loss = gen_loss.item()
self.evaluator.optim_metrics.add("gen_loss",
AverageMetric(gen_loss))
self.evaluator.gen_metrics.add("ppl", PPLMetric(gen_loss))
else:
# generate response in rec_model.step
_, pred = self.rec_model.converse(batch, mode)
response = batch['response']
self.conv_evaluate(pred, response)
self.record_conv_gt_pred(response, pred, epoch)
self.record_conv_gt(response, pred)
self.record_conv_pred(response, pred, epoch)
else:
raise
def record_conv_gt_pred(self, batch_response, batch_pred, epoch):
# (bs, response_truncate), (bs, response_truncate)
file_writer = self.get_file_writer(f'{epoch}_record_conv_gt_pred', '.txt')
for response, pred in zip(batch_response, batch_pred):
response_tok_list = self.convert_tensor_ids_to_tokens(response)
pred_tok_list = self.convert_tensor_ids_to_tokens(pred)
file_writer.writelines(' '.join(response_tok_list) + '\n')
file_writer.writelines(' '.join(pred_tok_list) + '\n')
file_writer.writelines('\n')
file_writer.close()
def record_conv_gt(self, batch_response, batch_pred):
# (bs, response_truncate), (bs, response_truncate)
file_writer = self.get_file_writer('record_conv_gt', '.txt')
for response, pred in zip(batch_response, batch_pred):
response_tok_list = self.convert_tensor_ids_to_tokens(response)
file_writer.writelines(' '.join(response_tok_list) + '\n')
file_writer.writelines('\n')
file_writer.close()
def record_conv_pred(self, batch_response, batch_pred, epoch):
# (bs, response_truncate), (bs, response_truncate)
file_writer = self.get_file_writer(f'{epoch}_record_conv_pred', '.txt')
for response, pred in zip(batch_response, batch_pred):
pred_tok_list = self.convert_tensor_ids_to_tokens(pred)
file_writer.writelines(' '.join(pred_tok_list) + '\n')
file_writer.writelines('\n')
file_writer.close()
def get_file_writer(self, file_keywords: str, file_type: str):
file_name = file_keywords + file_type
file_path = os.path.join(self.opt['LOG_PATH'], file_name)
if os.path.exists(file_path):
file_writer = open(file_path, 'a', encoding='utf-8')
else:
file_writer = open(file_path, 'w', encoding='utf-8')
return file_writer
def convert_tensor_ids_to_tokens(self, token_ids):
tokens = []
token_ids = token_ids.tolist() # List[int]
if not token_ids:
return tokens
for token_id in token_ids:
if token_id == self.end_token_idx:
return tokens
tokens.append(self.ind2tok.get(token_id, self.unk))
return tokens
def is_early_stop(self, valid_metric, epoch):
early_stop_result = self.early_stop(valid_metric)
# logger.info(f'valid_metric = {valid_metric}, early_stop_result = {early_stop_result}, stop_mode = {self.stop_mode}')
if early_stop_result == 'Stop':
return True
elif early_stop_result == 'New Model':
self.save_model(epoch=epoch, valid_metric=valid_metric)
elif early_stop_result == 'Patience':
pass
return False
def fit(self):
self.extend_datasets()
self.pre_training()
self.train_recommender_default()
self.train_conversation_using_rec_model()
def extend_datasets(self):
extend_train_dataset = self.train_dataloader['rec'].add_avi_info_to_init_dataset_u()
self.train_dataloader['rec'].replace_dataset(extend_train_dataset)
extend_train_dataset = self.valid_dataloader['rec'].add_avi_info_to_init_dataset_u()
self.valid_dataloader['rec'].replace_dataset(extend_train_dataset)
extend_train_dataset = self.test_dataloader['rec'].add_avi_info_to_init_dataset_u()
self.test_dataloader['rec'].replace_dataset(extend_train_dataset)
def pre_training(self):
self.init_pretrain_optim()
self.pretrain_recommender_convergence()
def init_pretrain_optim(self):
self.pretrain_optim_opt = deepcopy(self.opt['pretrain'])
# get params and training setting
bert_param = [p for n, p in self.rec_model.named_parameters() if 'bert' in n]
other_param = [p for n, p in self.rec_model.named_parameters() if 'bert' not in n]
params = [{'params': bert_param, 'lr': self.pretrain_optim_opt['lr_bert']},
{'params': other_param}]
logger.info('There are {} bert parameters unit, {} other parameters unit'
.format(len(bert_param), len(other_param)))
self.init_optim(deepcopy(self.pretrain_optim_opt), params)
def pretrain_recommender_convergence(self):
for epoch in range(self.pretrain_epoch):
self.pretrain_recommender_one_epoch(epoch)
valid_metric = self.valid_pretrain_recommender(epoch)
if epoch in self.pertrain_save_epoches:
self.save_model(post_fix='epoch_{}'.format(epoch), epoch=epoch, valid_metric=valid_metric)
if self.is_early_stop(valid_metric, epoch):
break
def pretrain_recommender_one_epoch(self, epoch):
logger.info(f'[{self.log_prefix}][Recommender | Pretrain | Epoch {str(epoch)}]')
self.evaluator.reset_metrics()
for batch in self.train_dataloader['rec'].get_rec_data(self.pretrain_batch_size,
shuffle=True):
self.step(batch, stage='pretrain_rec', mode='train', epoch=epoch)
self.evaluator.report()
def valid_pretrain_recommender(self, epoch):
logger.info(f'[{self.log_prefix}][Recommender | Valid | Epoch {str(epoch)}]')
with torch.no_grad():
self.evaluator.reset_metrics()
for batch in self.valid_dataloader['rec'].get_rec_data(self.pretrain_batch_size,
shuffle=False):
self.step(batch, stage='pretrain_rec', mode='val', epoch=epoch)
self.evaluator.report()
metric = self.evaluator.optim_metrics['loss']
return metric
def train_recommender_default(self):
self.init_rec_optim()
self.train_recommender_convergence()
# test
if self.rec_epoch != 0:
self.restore_model_from_save()
self.test_recommender('final')
def init_rec_optim(self):
self.rec_optim_opt = deepcopy(self.opt['rec'])
# get params and training setting
bert_param = [p for n, p in self.rec_model.named_parameters() if 'bert' in n]
other_param = [p for n, p in self.rec_model.named_parameters() if 'bert' not in n]
params = [{'params': bert_param, 'lr': self.rec_optim_opt['lr_bert']},
{'params': other_param}]
logger.info('There are {} bert parameters unit, {} other parameters unit'
.format(len(bert_param), len(other_param)))
self.init_optim(deepcopy(self.rec_optim_opt), params)
def train_recommender_convergence(self) -> float:
for epoch in range(self.rec_epoch):
self.train_recommender_one_epoch(epoch)
valid_metric = self.valid_recommender(epoch)
if self.is_early_stop(valid_metric, epoch):
break
def train_recommender_one_epoch(self, epoch):
logger.info(f'[{self.log_prefix}][Recommender | Train | Epoch {str(epoch)}]')
self.evaluator.reset_metrics()
for batch in self.train_dataloader['rec'].get_rec_data(self.rec_batch_size,
shuffle=True):
self.step(batch, stage='rec', mode='train', epoch=epoch)
self.evaluator.report()
def valid_recommender(self, epoch):
logger.info(f'[{self.log_prefix}][Recommender | Valid | Epoch {str(epoch)}]')
with torch.no_grad():
self.evaluator.reset_metrics()
for batch in self.valid_dataloader['rec'].get_rec_data(self.rec_batch_size,
shuffle=False):
self.step(batch, stage='rec', mode='val', epoch=epoch)
self.evaluator.report()
metric = self.evaluator.rec_metrics['hit@1'] + self.evaluator.rec_metrics['hit@50']
return metric
def test_recommender(self, epoch):
logger.info(f'[{self.log_prefix}][Recommender | Test ]')
with torch.no_grad():
self.evaluator.reset_metrics()
for batch in self.test_dataloader['rec'].get_rec_data(self.rec_batch_size,
shuffle=False):
self.step(batch, stage='rec', mode='test', epoch=epoch)
self.evaluator.report()
def train_conversation_using_rec_model(self):
self.init_optim(deepcopy(self.conv_optim_opt), self.rec_model.parameters())
if self.opt['freeze_parameters']:
self.rec_model.freeze_parameters()
self.train_conversation_convergence()
if self.conv_epoch != 0:
self.restore_model_from_save()
self.test_conversation('final')
def train_conversation_convergence(self):
for epoch in range(self.conv_epoch):
self.train_conversation_one_epoch(epoch)
valid_metric = self.valid_conversation(epoch)
self.test_conversation('final')
if self.is_early_stop(valid_metric, epoch):
break
def train_conversation_one_epoch(self, epoch):
logger.info(f'[{self.log_prefix}][Conversation | Train | epoch {str(epoch)}]')
self.evaluator.reset_metrics()
for batch in self.train_dataloader['rec'].get_conv_data(
batch_size=self.conv_batch_size, shuffle=True):
self.step(batch, stage='conv', mode='train', epoch=epoch)
self.evaluator.report()
def valid_conversation(self, epoch):
logger.info(f'[{self.log_prefix}][Conversation | Valid | epoch {str(epoch)}]')
with torch.no_grad():
self.evaluator.reset_metrics()
for batch in self.valid_dataloader['rec'].get_conv_data(
batch_size=self.conv_batch_size, shuffle=False):
self.step(batch, stage='conv', mode='val', epoch=epoch)
self.evaluator.report()
valid_metric = self.get_sum_dist_metric()
# early stop
return valid_metric
def get_sum_dist_metric(self):
sum_dist = 0
for k in range(1, 5):
try:
sum_dist += self.evaluator.gen_metrics[f'dist@{k}']
except:
pass
return sum_dist
def test_conversation(self, epoch):
logger.info(f'[{self.log_prefix}][Conversation | Test]')
with torch.no_grad():
self.evaluator.reset_metrics()
for batch in self.test_dataloader['rec'].get_conv_data(
batch_size=self.conv_batch_size, shuffle=False):
self.step(batch, stage='conv', mode='test', epoch=epoch)
self.evaluator.report()
def interact(self):
pass
| 42.549683
| 154
| 0.622131
| 2,483
| 20,126
| 4.764801
| 0.103504
| 0.018933
| 0.015214
| 0.010819
| 0.574085
| 0.504691
| 0.458203
| 0.419407
| 0.380272
| 0.368946
| 0
| 0.004221
| 0.270098
| 20,126
| 473
| 155
| 42.549683
| 0.801157
| 0.074282
| 0
| 0.327485
| 0
| 0
| 0.082326
| 0.017517
| 0
| 0
| 0
| 0
| 0
| 1
| 0.105263
| false
| 0.008772
| 0.052632
| 0
| 0.192982
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
98f428d0ea0b7f44539193898ee9647b5c6c689f
| 2,242
|
py
|
Python
|
marketDataRetrieval.py
|
amertx/Monte-Carlo-Simulation
|
6c3a616bc67e668d80a73247ca279e10f6d46cd5
|
[
"MIT"
] | null | null | null |
marketDataRetrieval.py
|
amertx/Monte-Carlo-Simulation
|
6c3a616bc67e668d80a73247ca279e10f6d46cd5
|
[
"MIT"
] | null | null | null |
marketDataRetrieval.py
|
amertx/Monte-Carlo-Simulation
|
6c3a616bc67e668d80a73247ca279e10f6d46cd5
|
[
"MIT"
] | null | null | null |
#Prediction model using an instance of the Monte Carlo simulation and Brownian Motion equation
#import of libraries
import numpy as np
import pandas as pd
from pandas_datareader import data as wb
import matplotlib.pyplot as plt
from scipy.stats import norm
#ticker selection
def mainFunction(tradingSymbol):
data = pd.DataFrame()
data[tradingSymbol] = wb.DataReader(tradingSymbol, data_source='yahoo', start='2019-1-1')['Adj Close']
#percent change of asset price
log_returns = np.log(1+ data.pct_change())
#graph showing growth over time beginning from 2015
data.plot(figsize = (10,6));
plt.show()
#graph of log returns of input ticker
#returns are normally distributed and have a consistent mean
log_returns.plot(figsize = (10,6))
plt.show()
#calculations
averageDailyReturn = log_returns.mean()
variance = log_returns.var()
drift = averageDailyReturn-(variance/2)
standardDeviation = log_returns.std()
#Brownian Motion equation
#r = drift + standardDeviation * (e^r)
#prediction of future stock price based on simulation below using numpy for storing data into array
np.array(drift)
drift.values
standardDeviation.values
#Brownian motion variable correlating to the distance between the mean and the number of standard deviation
norm.ppf(0.95)
#10 x 2 Matrix
x = np.random.rand(10,2)
norm.ppf(x)
#stores distances from the mean value, 0, into the 10 x 2 matrix
Z = norm.ppf(np.random.rand(10,2))
#time interval for the stock price forecast
timeInterval = 365
iterations = 5
#r = drift + standardDeviation * (e^r)
#10 sets of 365 random future stock prices of the ticker symbol
dailyReturns = np.exp(drift.values + standardDeviation.values * norm.ppf(np.random.rand(timeInterval,iterations)))
#returns into price points
presentPrice = data.iloc[-1]
priceList = np.zeros_like(dailyReturns)
priceList[0] = presentPrice
#iteration for the time interavl of 365
for t in range(1, timeInterval):
priceList[t] = priceList[t-1] * dailyReturns[t]
#showcases 10 paths of the future stock price
plt.figure(figsize =(10,6))
plt.plot(priceList)
plt.show()
| 29.116883
| 118
| 0.711864
| 314
| 2,242
| 5.05414
| 0.429936
| 0.037807
| 0.018904
| 0.024575
| 0.093258
| 0.026465
| 0
| 0
| 0
| 0
| 0
| 0.030761
| 0.202498
| 2,242
| 76
| 119
| 29.5
| 0.856823
| 0.402765
| 0
| 0.085714
| 0
| 0
| 0.016679
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.028571
| false
| 0
| 0.142857
| 0
| 0.171429
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
98f43fcd4c7844a9b69d2baa890a95f4841f18e8
| 31,716
|
py
|
Python
|
HelloDeepSpeed/train_bert_ds.py
|
mrwyattii/DeepSpeedExamples
|
6bd444a7c62e9d7d320dd4c1e1142062f50c861d
|
[
"MIT"
] | null | null | null |
HelloDeepSpeed/train_bert_ds.py
|
mrwyattii/DeepSpeedExamples
|
6bd444a7c62e9d7d320dd4c1e1142062f50c861d
|
[
"MIT"
] | null | null | null |
HelloDeepSpeed/train_bert_ds.py
|
mrwyattii/DeepSpeedExamples
|
6bd444a7c62e9d7d320dd4c1e1142062f50c861d
|
[
"MIT"
] | null | null | null |
"""
Modified version of train_bert.py that adds DeepSpeed
"""
import os
import datetime
import json
import pathlib
import re
import string
from functools import partial
from typing import Any, Callable, Dict, Iterable, List, Optional, Tuple, TypeVar, Union
import random
import datasets
import fire
import logging
import loguru
import numpy as np
import pytz
import sh
import torch
import torch.nn as nn
import deepspeed
from torch.utils.data import DataLoader, Dataset
from torch.utils.tensorboard import SummaryWriter
from transformers import AutoTokenizer, PreTrainedTokenizer, PreTrainedTokenizerFast
from transformers.models.roberta import RobertaConfig, RobertaModel
from transformers.models.roberta.modeling_roberta import (
RobertaLMHead,
RobertaPreTrainedModel,
)
def is_rank_0() -> bool:
return int(os.environ.get("RANK", "0")) == 0
######################################################################
####################### Logging Functions ############################
######################################################################
logger = loguru.logger
def log_dist(message: str,
ranks: List[int] = [],
level: int = logging.INFO) -> None:
"""Log messages for specified ranks only"""
my_rank = int(os.environ.get("RANK", "0"))
if my_rank in ranks:
if level == logging.INFO:
logger.info(f'[Rank {my_rank}] {message}')
if level == logging.ERROR:
logger.error(f'[Rank {my_rank}] {message}')
if level == logging.DEBUG:
logger.debug(f'[Rank {my_rank}] {message}')
######################################################################
############### Dataset Creation Related Functions ###################
######################################################################
TokenizerType = Union[PreTrainedTokenizer, PreTrainedTokenizerFast]
def collate_function(batch: List[Tuple[List[int], List[int]]],
pad_token_id: int) -> Dict[str, torch.Tensor]:
"""Collect a list of masked token indices, and labels, and
batch them, padding to max length in the batch.
"""
max_length = max(len(token_ids) for token_ids, _ in batch)
padded_token_ids = [
token_ids +
[pad_token_id for _ in range(0, max_length - len(token_ids))]
for token_ids, _ in batch
]
padded_labels = [
labels + [pad_token_id for _ in range(0, max_length - len(labels))]
for _, labels in batch
]
src_tokens = torch.LongTensor(padded_token_ids)
tgt_tokens = torch.LongTensor(padded_labels)
attention_mask = src_tokens.ne(pad_token_id).type_as(src_tokens)
return {
"src_tokens": src_tokens,
"tgt_tokens": tgt_tokens,
"attention_mask": attention_mask,
}
def masking_function(
text: str,
tokenizer: TokenizerType,
mask_prob: float,
random_replace_prob: float,
unmask_replace_prob: float,
max_length: int,
) -> Tuple[List[int], List[int]]:
"""Given a text string, randomly mask wordpieces for Bert MLM
training.
Args:
text (str):
The input text
tokenizer (TokenizerType):
The tokenizer for tokenization
mask_prob (float):
What fraction of tokens to mask
random_replace_prob (float):
Of the masked tokens, how many should be replaced with
random tokens (improves performance)
unmask_replace_prob (float):
Of the masked tokens, how many should be replaced with
the original token (improves performance)
max_length (int):
The maximum sequence length to consider. Note that for
Bert style models, this is a function of the number of
positional embeddings you learn
Returns:
Tuple[List[int], List[int]]:
The masked token ids (based on the tokenizer passed),
and the output labels (padded with `tokenizer.pad_token_id`)
"""
# Note: By default, encode does add the BOS and EOS token
# Disabling that behaviour to make this more clear
tokenized_ids = ([tokenizer.bos_token_id] +
tokenizer.encode(text,
add_special_tokens=False,
truncation=True,
max_length=max_length - 2) +
[tokenizer.eos_token_id])
seq_len = len(tokenized_ids)
tokenized_ids = np.array(tokenized_ids)
subword_mask = np.full(len(tokenized_ids), False)
# Masking the BOS and EOS token leads to slightly worse performance
low = 1
high = len(subword_mask) - 1
mask_choices = np.arange(low, high)
num_subwords_to_mask = max(
int((mask_prob * (high - low)) + np.random.rand()), 1)
subword_mask[np.random.choice(mask_choices,
num_subwords_to_mask,
replace=False)] = True
# Create the labels first
labels = np.full(seq_len, tokenizer.pad_token_id)
labels[subword_mask] = tokenized_ids[subword_mask]
tokenized_ids[subword_mask] = tokenizer.mask_token_id
# Now of the masked tokens, choose how many to replace with random and how many to unmask
rand_or_unmask_prob = random_replace_prob + unmask_replace_prob
if rand_or_unmask_prob > 0:
rand_or_unmask = subword_mask & (np.random.rand(len(tokenized_ids)) <
rand_or_unmask_prob)
if random_replace_prob == 0:
unmask = rand_or_unmask
rand_mask = None
elif unmask_replace_prob == 0:
unmask = None
rand_mask = rand_or_unmask
else:
unmask_prob = unmask_replace_prob / rand_or_unmask_prob
decision = np.random.rand(len(tokenized_ids)) < unmask_prob
unmask = rand_or_unmask & decision
rand_mask = rand_or_unmask & (~decision)
if unmask is not None:
tokenized_ids[unmask] = labels[unmask]
if rand_mask is not None:
weights = np.ones(tokenizer.vocab_size)
weights[tokenizer.all_special_ids] = 0
probs = weights / weights.sum()
num_rand = rand_mask.sum()
tokenized_ids[rand_mask] = np.random.choice(tokenizer.vocab_size,
num_rand,
p=probs)
return tokenized_ids.tolist(), labels.tolist()
class WikiTextMLMDataset(Dataset):
"""A [Map style dataset](https://pytorch.org/docs/stable/data.html)
for iterating over the wikitext dataset. Note that this assumes
the dataset can fit in memory. For larger datasets
you'd want to shard them and use an iterable dataset (eg: see
[Infinibatch](https://github.com/microsoft/infinibatch))
Args:
Dataset (datasets.arrow_dataset.Dataset):
The wikitext dataset
masking_function (Callable[[str], Tuple[List[int], List[int]]])
The masking function. To generate one training instance,
the masking function is applied to the `text` of a dataset
record
"""
def __init__(
self,
dataset: datasets.arrow_dataset.Dataset,
masking_function: Callable[[str], Tuple[List[int], List[int]]],
) -> None:
self.dataset = dataset
self.masking_function = masking_function
def __len__(self) -> int:
return len(self.dataset)
def __getitem__(self, idx: int) -> Tuple[List[int], List[int]]:
tokens, labels = self.masking_function(self.dataset[idx]["text"])
return (tokens, labels)
T = TypeVar("T")
class InfiniteIterator(object):
def __init__(self, iterable: Iterable[T]) -> None:
self._iterable = iterable
self._iterator = iter(self._iterable)
def __iter__(self):
return self
def __next__(self) -> T:
next_item = None
try:
next_item = next(self._iterator)
except StopIteration:
self._iterator = iter(self._iterable)
next_item = next(self._iterator)
return next_item
def create_data_iterator(
mask_prob: float,
random_replace_prob: float,
unmask_replace_prob: float,
batch_size: int,
max_seq_length: int = 512,
tokenizer: str = "roberta-base",
) -> InfiniteIterator:
"""Create the dataloader.
Args:
mask_prob (float):
Fraction of tokens to mask
random_replace_prob (float):
Fraction of masked tokens to replace with random token
unmask_replace_prob (float):
Fraction of masked tokens to replace with the actual token
batch_size (int):
The batch size of the generated tensors
max_seq_length (int, optional):
The maximum sequence length for the MLM task. Defaults to 512.
tokenizer (str, optional):
The tokenizer to use. Defaults to "roberta-base".
Returns:
InfiniteIterator:
The torch DataLoader, wrapped in an InfiniteIterator class, to
be able to continuously generate samples
"""
wikitext_dataset = datasets.load_dataset("wikitext",
"wikitext-2-v1",
split="train")
wikitext_dataset = wikitext_dataset.filter(
lambda record: record["text"] != "").map(
lambda record: {"text": record["text"].rstrip("\n")})
tokenizer = AutoTokenizer.from_pretrained(tokenizer)
masking_function_partial = partial(
masking_function,
tokenizer=tokenizer,
mask_prob=mask_prob,
random_replace_prob=random_replace_prob,
unmask_replace_prob=unmask_replace_prob,
max_length=max_seq_length,
)
dataset = WikiTextMLMDataset(wikitext_dataset, masking_function_partial)
collate_fn_partial = partial(collate_function,
pad_token_id=tokenizer.pad_token_id)
dataloader = DataLoader(dataset,
batch_size=batch_size,
shuffle=True,
collate_fn=collate_fn_partial)
return InfiniteIterator(dataloader)
######################################################################
############### Model Creation Related Functions #####################
######################################################################
class RobertaLMHeadWithMaskedPredict(RobertaLMHead):
def __init__(self,
config: RobertaConfig,
embedding_weight: Optional[torch.Tensor] = None) -> None:
super(RobertaLMHeadWithMaskedPredict, self).__init__(config)
if embedding_weight is not None:
self.decoder.weight = embedding_weight
def forward( # pylint: disable=arguments-differ
self,
features: torch.Tensor,
masked_token_indices: Optional[torch.Tensor] = None,
**kwargs,
) -> torch.Tensor:
"""The current `transformers` library does not provide support
for masked_token_indices. This function provides the support, by
running the final forward pass only for the masked indices. This saves
memory
Args:
features (torch.Tensor):
The features to select from. Shape (batch, seq_len, h_dim)
masked_token_indices (torch.Tensor, optional):
The indices of masked tokens for index select. Defaults to None.
Shape: (num_masked_tokens,)
Returns:
torch.Tensor:
The index selected features. Shape (num_masked_tokens, h_dim)
"""
if masked_token_indices is not None:
features = torch.index_select(
features.view(-1, features.shape[-1]), 0, masked_token_indices)
return super().forward(features)
class RobertaMLMModel(RobertaPreTrainedModel):
def __init__(self, config: RobertaConfig, encoder: RobertaModel) -> None:
super().__init__(config)
self.encoder = encoder
self.lm_head = RobertaLMHeadWithMaskedPredict(
config, self.encoder.embeddings.word_embeddings.weight)
self.lm_head.apply(self._init_weights)
def forward(
self,
src_tokens: torch.Tensor,
attention_mask: torch.Tensor,
tgt_tokens: torch.Tensor,
) -> torch.Tensor:
"""The forward pass for the MLM task
Args:
src_tokens (torch.Tensor):
The masked token indices. Shape: (batch, seq_len)
attention_mask (torch.Tensor):
The attention mask, since the batches are padded
to the largest sequence. Shape: (batch, seq_len)
tgt_tokens (torch.Tensor):
The output tokens (padded with `config.pad_token_id`)
Returns:
torch.Tensor:
The MLM loss
"""
# shape: (batch, seq_len, h_dim)
sequence_output, *_ = self.encoder(input_ids=src_tokens,
attention_mask=attention_mask,
return_dict=False)
pad_token_id = self.config.pad_token_id
# (labels have also been padded with pad_token_id)
# filter out all masked labels
# shape: (num_masked_tokens,)
masked_token_indexes = torch.nonzero(
(tgt_tokens != pad_token_id).view(-1)).view(-1)
# shape: (num_masked_tokens, vocab_size)
prediction_scores = self.lm_head(sequence_output, masked_token_indexes)
# shape: (num_masked_tokens,)
target = torch.index_select(tgt_tokens.view(-1), 0,
masked_token_indexes)
loss_fct = nn.CrossEntropyLoss(ignore_index=-1)
masked_lm_loss = loss_fct(
prediction_scores.view(-1, self.config.vocab_size), target)
return masked_lm_loss
def create_model(num_layers: int, num_heads: int, ff_dim: int, h_dim: int,
dropout: float) -> RobertaMLMModel:
"""Create a Bert model with the specified `num_heads`, `ff_dim`,
`h_dim` and `dropout`
Args:
num_layers (int):
The number of layers
num_heads (int):
The number of attention heads
ff_dim (int):
The intermediate hidden size of
the feed forward block of the
transformer
h_dim (int):
The hidden dim of the intermediate
representations of the transformer
dropout (float):
The value of dropout to be used.
Note that we apply the same dropout
to both the attention layers and the
FF layers
Returns:
RobertaMLMModel:
A Roberta model for MLM task
"""
roberta_config_dict = {
"attention_probs_dropout_prob": dropout,
"bos_token_id": 0,
"eos_token_id": 2,
"hidden_act": "gelu",
"hidden_dropout_prob": dropout,
"hidden_size": h_dim,
"initializer_range": 0.02,
"intermediate_size": ff_dim,
"layer_norm_eps": 1e-05,
"max_position_embeddings": 514,
"model_type": "roberta",
"num_attention_heads": num_heads,
"num_hidden_layers": num_layers,
"pad_token_id": 1,
"type_vocab_size": 1,
"vocab_size": 50265,
}
roberta_config = RobertaConfig.from_dict(roberta_config_dict)
roberta_encoder = RobertaModel(roberta_config)
roberta_model = RobertaMLMModel(roberta_config, roberta_encoder)
return roberta_model
######################################################################
########### Experiment Management Related Functions ##################
######################################################################
def get_unique_identifier(length: int = 8) -> str:
"""Create a unique identifier by choosing `length`
random characters from list of ascii characters and numbers
"""
alphabet = string.ascii_lowercase + string.digits
uuid = "".join(alphabet[ix]
for ix in np.random.choice(len(alphabet), length))
return uuid
def create_experiment_dir(checkpoint_dir: pathlib.Path,
all_arguments: Dict[str, Any]) -> pathlib.Path:
"""Create an experiment directory and save all arguments in it.
Additionally, also store the githash and gitdiff. Finally create
a directory for `Tensorboard` logs. The structure would look something
like
checkpoint_dir
`-experiment-name
|- hparams.json
|- githash.log
|- gitdiff.log
`- tb_dir/
Args:
checkpoint_dir (pathlib.Path):
The base checkpoint directory
all_arguments (Dict[str, Any]):
The arguments to save
Returns:
pathlib.Path: The experiment directory
"""
# experiment name follows the following convention
# {exp_type}.{YYYY}.{MM}.{DD}.{HH}.{MM}.{SS}.{uuid}
current_time = datetime.datetime.now(pytz.timezone("US/Pacific"))
expname = "bert_pretrain.{0}.{1}.{2}.{3}.{4}.{5}.{6}".format(
current_time.year,
current_time.month,
current_time.day,
current_time.hour,
current_time.minute,
current_time.second,
get_unique_identifier(),
)
exp_dir = checkpoint_dir / expname
if not is_rank_0():
return exp_dir
exp_dir.mkdir(exist_ok=False)
hparams_file = exp_dir / "hparams.json"
with hparams_file.open("w") as handle:
json.dump(obj=all_arguments, fp=handle, indent=2)
# Save the git hash
try:
gitlog = sh.git.log("-1", format="%H", _tty_out=False, _fg=False)
with (exp_dir / "githash.log").open("w") as handle:
handle.write(gitlog.stdout.decode("utf-8"))
except sh.ErrorReturnCode_128:
log_dist(
"Seems like the code is not running from"
" within a git repo, so hash will"
" not be stored. However, it"
" is strongly advised to use"
" version control.",
ranks=[0],
level=logging.INFO)
# And the git diff
try:
gitdiff = sh.git.diff(_fg=False, _tty_out=False)
with (exp_dir / "gitdiff.log").open("w") as handle:
handle.write(gitdiff.stdout.decode("utf-8"))
except sh.ErrorReturnCode_129:
log_dist(
"Seems like the code is not running from"
" within a git repo, so diff will"
" not be stored. However, it"
" is strongly advised to use"
" version control.",
ranks=[0],
level=logging.INFO)
# Finally create the Tensorboard Dir
tb_dir = exp_dir / "tb_dir"
tb_dir.mkdir(exist_ok=False)
return exp_dir
######################################################################
################ Checkpoint Related Functions ########################
######################################################################
def load_model_checkpoint(
load_checkpoint_dir: pathlib.Path,
model: torch.nn.Module,
optimizer: torch.optim.Optimizer,
) -> Tuple[int, torch.nn.Module, torch.optim.Optimizer]:
"""Loads the optimizer state dict and model state dict from the load_checkpoint_dir
into the passed model and optimizer. Searches for the most recent checkpoint to
load from
Args:
load_checkpoint_dir (pathlib.Path):
The base checkpoint directory to load from
model (torch.nn.Module):
The model to load the checkpoint weights into
optimizer (torch.optim.Optimizer):
The optimizer to load the checkpoint weigths into
Returns:
Tuple[int, torch.nn.Module, torch.optim.Optimizer]:
The checkpoint step, model with state_dict loaded and
optimizer with state_dict loaded
"""
log_dist(
f"Loading model and optimizer checkpoint from {load_checkpoint_dir}",
ranks=[0],
level=logging.INFO)
checkpoint_files = list(
filter(
lambda path: re.search(r"iter_(?P<iter_no>\d+)\.pt", path.name) is
not None,
load_checkpoint_dir.glob("*.pt"),
))
assert len(checkpoint_files) > 0, "No checkpoints found in directory"
checkpoint_files = sorted(
checkpoint_files,
key=lambda path: int(
re.search(r"iter_(?P<iter_no>\d+)\.pt", path.name).group("iter_no")
),
)
latest_checkpoint_path = checkpoint_files[-1]
checkpoint_step = int(
re.search(r"iter_(?P<iter_no>\d+)\.pt",
latest_checkpoint_path.name).group("iter_no"))
state_dict = torch.load(latest_checkpoint_path)
model.load_state_dict(state_dict["model"], strict=True)
optimizer.load_state_dict(state_dict["optimizer"])
log_dist(
f"Loading model and optimizer checkpoints done. Loaded from {latest_checkpoint_path}",
ranks=[0],
level=logging.INFO)
return checkpoint_step, model, optimizer
######################################################################
######################## Driver Functions ############################
######################################################################
def train(
checkpoint_dir: str = None,
load_checkpoint_dir: str = None,
# Dataset Parameters
mask_prob: float = 0.15,
random_replace_prob: float = 0.1,
unmask_replace_prob: float = 0.1,
max_seq_length: int = 512,
tokenizer: str = "roberta-base",
# Model Parameters
num_layers: int = 6,
num_heads: int = 8,
ff_dim: int = 512,
h_dim: int = 256,
dropout: float = 0.1,
# Training Parameters
batch_size: int = 8,
num_iterations: int = 10000,
checkpoint_every: int = 1000,
log_every: int = 10,
local_rank: int = -1,
) -> pathlib.Path:
"""Trains a [Bert style](https://arxiv.org/pdf/1810.04805.pdf)
(transformer encoder only) model for MLM Task
Args:
checkpoint_dir (str):
The base experiment directory to save experiments to
mask_prob (float, optional):
The fraction of tokens to mask. Defaults to 0.15.
random_replace_prob (float, optional):
The fraction of masked tokens to replace with random token.
Defaults to 0.1.
unmask_replace_prob (float, optional):
The fraction of masked tokens to leave unchanged.
Defaults to 0.1.
max_seq_length (int, optional):
The maximum sequence length of the examples. Defaults to 512.
tokenizer (str, optional):
The tokenizer to use. Defaults to "roberta-base".
num_layers (int, optional):
The number of layers in the Bert model. Defaults to 6.
num_heads (int, optional):
Number of attention heads to use. Defaults to 8.
ff_dim (int, optional):
Size of the intermediate dimension in the FF layer.
Defaults to 512.
h_dim (int, optional):
Size of intermediate representations.
Defaults to 256.
dropout (float, optional):
Amout of Dropout to use. Defaults to 0.1.
batch_size (int, optional):
The minibatch size. Defaults to 8.
num_iterations (int, optional):
Total number of iterations to run the model for.
Defaults to 10000.
checkpoint_every (int, optional):
Save checkpoint after these many steps.
..note ::
You want this to be frequent enough that you can
resume training in case it crashes, but not so much
that you fill up your entire storage !
Defaults to 1000.
log_every (int, optional):
Print logs after these many steps. Defaults to 10.
local_rank (int, optional):
Which GPU to run on (-1 for CPU). Defaults to -1.
Returns:
pathlib.Path: The final experiment directory
"""
device = (torch.device("cuda", local_rank) if (local_rank > -1)
and torch.cuda.is_available() else torch.device("cpu"))
################################
###### Create Exp. Dir #########
################################
if checkpoint_dir is None and load_checkpoint_dir is None:
log_dist(
"Need to specify one of checkpoint_dir"
" or load_checkpoint_dir",
ranks=[0],
level=logging.ERROR)
return
if checkpoint_dir is not None and load_checkpoint_dir is not None:
log_dist(
"Cannot specify both checkpoint_dir"
" and load_checkpoint_dir",
ranks=[0],
level=logging.ERROR)
return
if checkpoint_dir:
log_dist("Creating Experiment Directory",
ranks=[0],
level=logging.INFO)
checkpoint_dir = pathlib.Path(checkpoint_dir)
checkpoint_dir.mkdir(exist_ok=True)
all_arguments = {
# Dataset Params
"mask_prob": mask_prob,
"random_replace_prob": random_replace_prob,
"unmask_replace_prob": unmask_replace_prob,
"max_seq_length": max_seq_length,
"tokenizer": tokenizer,
# Model Params
"num_layers": num_layers,
"num_heads": num_heads,
"ff_dim": ff_dim,
"h_dim": h_dim,
"dropout": dropout,
# Training Params
"batch_size": batch_size,
"num_iterations": num_iterations,
"checkpoint_every": checkpoint_every,
}
exp_dir = create_experiment_dir(checkpoint_dir, all_arguments)
log_dist(f"Experiment Directory created at {exp_dir}",
ranks=[0],
level=logging.INFO)
else:
log_dist("Loading from Experiment Directory",
ranks=[0],
level=logging.INFO)
load_checkpoint_dir = pathlib.Path(load_checkpoint_dir)
assert load_checkpoint_dir.exists()
with (load_checkpoint_dir / "hparams.json").open("r") as handle:
hparams = json.load(handle)
# Set the hparams
# Dataset Params
mask_prob = hparams.get("mask_prob", mask_prob)
tokenizer = hparams.get("tokenizer", tokenizer)
random_replace_prob = hparams.get("random_replace_prob",
random_replace_prob)
unmask_replace_prob = hparams.get("unmask_replace_prob",
unmask_replace_prob)
max_seq_length = hparams.get("max_seq_length", max_seq_length)
# Model Params
ff_dim = hparams.get("ff_dim", ff_dim)
h_dim = hparams.get("h_dim", h_dim)
dropout = hparams.get("dropout", dropout)
num_layers = hparams.get("num_layers", num_layers)
num_heads = hparams.get("num_heads", num_heads)
# Training Params
batch_size = hparams.get("batch_size", batch_size)
_num_iterations = hparams.get("num_iterations", num_iterations)
num_iterations = max(num_iterations, _num_iterations)
checkpoint_every = hparams.get("checkpoint_every", checkpoint_every)
exp_dir = load_checkpoint_dir
# Tensorboard writer
if is_rank_0():
tb_dir = exp_dir / "tb_dir"
assert tb_dir.exists()
summary_writer = SummaryWriter(log_dir=tb_dir)
################################
###### Create Datasets #########
################################
log_dist("Creating Datasets", ranks=[0], level=logging.INFO)
data_iterator = create_data_iterator(
mask_prob=mask_prob,
random_replace_prob=random_replace_prob,
unmask_replace_prob=unmask_replace_prob,
tokenizer=tokenizer,
max_seq_length=max_seq_length,
batch_size=batch_size,
)
log_dist("Dataset Creation Done", ranks=[0], level=logging.INFO)
################################
###### Create Model ############
################################
log_dist("Creating Model", ranks=[0], level=logging.INFO)
model = create_model(
num_layers=num_layers,
num_heads=num_heads,
ff_dim=ff_dim,
h_dim=h_dim,
dropout=dropout,
)
log_dist("Model Creation Done", ranks=[0], level=logging.INFO)
################################
###### DeepSpeed engine ########
################################
log_dist("Creating DeepSpeed engine", ranks=[0], level=logging.INFO)
ds_config = {
"train_micro_batch_size_per_gpu": batch_size,
"optimizer": {
"type": "Adam",
"params": {
"lr": 1e-4
}
},
"fp16": {
"enabled": True
},
"zero_optimization": {
"stage": 1,
"offload_optimizer": {
"device": "cpu"
}
}
}
model, _, _, _ = deepspeed.initialize(model=model,
model_parameters=model.parameters(),
config=ds_config)
log_dist("DeepSpeed engine created", ranks=[0], level=logging.INFO)
################################
#### Load Model checkpoint #####
################################
start_step = 1
if load_checkpoint_dir is not None:
_, client_state = model.load_checkpoint(load_dir=load_checkpoint_dir)
checkpoint_step = client_state['checkpoint_step']
start_step = checkpoint_step + 1
################################
####### The Training Loop ######
################################
log_dist(
f"Total number of model parameters: {sum([p.numel() for p in model.parameters()]):,d}",
ranks=[0],
level=logging.INFO)
model.train()
losses = []
for step, batch in enumerate(data_iterator, start=start_step):
if step >= num_iterations:
break
# Move the tensors to device
for key, value in batch.items():
batch[key] = value.to(device)
# Forward pass
loss = model(**batch)
# Backward pass
model.backward(loss)
# Optimizer Step
model.step()
losses.append(loss.item())
if step % log_every == 0:
log_dist("Loss: {0:.4f}".format(np.mean(losses)),
ranks=[0],
level=logging.INFO)
if is_rank_0():
summary_writer.add_scalar(f"Train/loss", np.mean(losses), step)
if step % checkpoint_every == 0:
model.save_checkpoint(save_dir=exp_dir,
client_state={'checkpoint_step': step})
log_dist("Saved model to {0}".format(exp_dir),
ranks=[0],
level=logging.INFO)
# Save the last checkpoint if not saved yet
if step % checkpoint_every != 0:
model.save_checkpoint(save_dir=exp_dir,
client_state={'checkpoint_step': step})
log_dist("Saved model to {0}".format(exp_dir),
ranks=[0],
level=logging.INFO)
return exp_dir
if __name__ == "__main__":
torch.manual_seed(42)
np.random.seed(0)
random.seed(0)
fire.Fire(train)
| 36.836237
| 95
| 0.576334
| 3,561
| 31,716
| 4.919966
| 0.162595
| 0.021975
| 0.011929
| 0.019521
| 0.273345
| 0.224715
| 0.172945
| 0.147432
| 0.133505
| 0.108505
| 0
| 0.008284
| 0.292092
| 31,716
| 860
| 96
| 36.87907
| 0.772047
| 0.269296
| 0
| 0.18254
| 0
| 0.001984
| 0.101402
| 0.012863
| 0
| 0
| 0
| 0
| 0.005952
| 1
| 0.039683
| false
| 0
| 0.047619
| 0.005952
| 0.130952
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
98f5a9225473ea31a925278ee4add1b0f458f788
| 825
|
py
|
Python
|
programming/leetcode/linkedLists/PalindromeLinkedList/PalindromeLinkedList.py
|
vamsitallapudi/Coderefer-Python-Projects
|
a7acc682251661e296c64533f4a85d47e6eedda2
|
[
"Apache-2.0"
] | 1
|
2021-01-03T06:42:58.000Z
|
2021-01-03T06:42:58.000Z
|
programming/leetcode/linkedLists/PalindromeLinkedList/PalindromeLinkedList.py
|
vamsitallapudi/Coderefer-Python-Projects
|
a7acc682251661e296c64533f4a85d47e6eedda2
|
[
"Apache-2.0"
] | null | null | null |
programming/leetcode/linkedLists/PalindromeLinkedList/PalindromeLinkedList.py
|
vamsitallapudi/Coderefer-Python-Projects
|
a7acc682251661e296c64533f4a85d47e6eedda2
|
[
"Apache-2.0"
] | null | null | null |
# Given a singly linked list, determine if it is a palindrome.
# Definition for singly-linked list.
class ListNode:
def __init__(self, val=0, next=None):
self.val = val
self.next = next
class Solution:
def isPalindrome(self, head: ListNode) -> bool:
fast = slow = head
# find the mid node
while fast and fast.next:
slow = slow.next
fast = fast.next.next
# reverse the second half
node = None
while slow:
nxt = slow.next
slow.next = node
node = slow
slow = nxt
# compare first and second half of nodes
while node:
if node.val != head.val:
return False
node = node.next
head = head.next
return True
| 22.916667
| 62
| 0.530909
| 100
| 825
| 4.34
| 0.44
| 0.0553
| 0.073733
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.002024
| 0.401212
| 825
| 35
| 63
| 23.571429
| 0.876518
| 0.242424
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.090909
| false
| 0
| 0
| 0
| 0.272727
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
98f808b42f55c190413c10c0ee75bee408ae97c6
| 1,671
|
py
|
Python
|
calculator.py
|
harshitbansal373/Python-Games
|
4e879b0a97b4b420ed6d440cd2d6a0332a2109b7
|
[
"MIT"
] | null | null | null |
calculator.py
|
harshitbansal373/Python-Games
|
4e879b0a97b4b420ed6d440cd2d6a0332a2109b7
|
[
"MIT"
] | null | null | null |
calculator.py
|
harshitbansal373/Python-Games
|
4e879b0a97b4b420ed6d440cd2d6a0332a2109b7
|
[
"MIT"
] | null | null | null |
from tkinter import *
import time
root=Tk()
root.title('Calculator')
root.config(bg='wheat')
def display(x):
global s
s=s+x
text.set(s)
def solve():
global s
try:
s=str(eval(text.get()))
except Exception as e:
text.set(e)
s=''
else:
text.set(s)
def clear():
global s
s=''
text.set(s)
def clear1():
global s
s=text.get()
s=s[:len(s)-1]
text.set(s)
def con():
label['text']=time.ctime()
label.after(1000,con)
s=''
text=StringVar()
f=Frame(root,bg='#dcdde1')
e=Entry(f,textvariable=text,bg='#f5f6fa',fg='#353b48',font='roboto 34 bold',justify='right',relief=RAISED)
e.pack(side=LEFT,padx=10,pady=10,expand=YES,fill=BOTH)
f.pack(side=TOP,padx=10,pady=10,expand=YES,fill=BOTH)
l=['#aabbcc','#bbccdd','#ccddee','#ddeeff']
for i in ['789/','456*','123+','.0-=']:
f=Frame(root,bg=l.pop())
for j in i:
b=Button(f,text=j,bg='#00a8ff',fg='#353b48',font='roboto 34 italic',command=(lambda x=j:display(x)) if j!='=' else solve)
b.pack(side=LEFT,padx=10,pady=10,expand=YES,fill=BOTH)
f.pack(side=TOP,padx=10,pady=10,expand=YES,fill=BOTH)
f1=Frame(root,bg='#dcdde1')
clear=Button(f1,text='C',bg='#00a8ff',fg='#353b48',font='Roboto 34',command=clear)
clear.pack(side=LEFT,padx=10,pady=10,expand=YES,fill=BOTH)
clear1=Button(f1,text='CE',bg='#00a8ff',fg='#353b48',font='Roboto 34',command=clear1)
clear1.pack(side=LEFT,padx=10,pady=10,expand=YES,fill=BOTH)
f1.pack(side=TOP,padx=10,pady=10,expand=YES,fill=BOTH)
f2=Frame(root,bg='#dcdde1')
label=Label(f2,bg='#00a8ff',fg='#353b48',font='roboto 34')
label.pack(padx=10,pady=10,expand=YES,fill=BOTH)
f2.pack(padx=10,pady=10,expand=YES,fill=BOTH)
con()
root.mainloop()
| 25.318182
| 125
| 0.668462
| 305
| 1,671
| 3.662295
| 0.298361
| 0.048344
| 0.080573
| 0.096688
| 0.452999
| 0.435094
| 0.435094
| 0.38496
| 0.322292
| 0.256043
| 0
| 0.075497
| 0.096349
| 1,671
| 65
| 126
| 25.707692
| 0.664238
| 0
| 0
| 0.236364
| 0
| 0
| 0.131737
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.090909
| false
| 0
| 0.036364
| 0
| 0.127273
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
98f8ea06315deb6bd9599f36bf3f99bf2965db61
| 8,280
|
py
|
Python
|
src/Main.py
|
OlavH96/Master
|
f98476063e579b7b2a80b81a2c0ca4005f5fce80
|
[
"MIT"
] | null | null | null |
src/Main.py
|
OlavH96/Master
|
f98476063e579b7b2a80b81a2c0ca4005f5fce80
|
[
"MIT"
] | null | null | null |
src/Main.py
|
OlavH96/Master
|
f98476063e579b7b2a80b81a2c0ca4005f5fce80
|
[
"MIT"
] | null | null | null |
import glob
import os
import keras
import tensorflow as tf
from keras.models import load_model
from keras.callbacks import ModelCheckpoint
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import src.util.Files as Files
from src.util.ImageLoader import load_images_generator, resize_image, load_images_generator_with_filename
import numpy as np
import logging as log
import random
from src.util.Arguments import anomaly_arguments, get_model_choice
import src.util.Arguments as Arguments
from scipy.stats import norm
from PIL import Image
from src.train.Models import autoencoder, conv_autoencoder, vae_autoencoder, vae_loss, get_dummy_loss, from_argument_choice
import src.train.Models as Models
import src.util.Filenames as Filenames
import math
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
def image_generator(path, max_x, max_y, color_mode="RGB"):
for i in load_images_generator(path, color_mode=color_mode):
i = resize_image(i, max_x, max_y)
i = np.array(i)
i = np.expand_dims(i, axis=0)
i = i / 255
yield (i, i)
def image_generator_with_filename(path, max_x, max_y, color_mode="RGB"):
for i, f in load_images_generator_with_filename(path, color_mode=color_mode):
i = resize_image(i, max_x, max_y)
i = np.array(i)
i = np.expand_dims(i, axis=0)
i = i / 255
yield (i, f)
def centered_image_generator(path, max_x, max_y, color_mode="RGB"):
while True:
for i, o in image_generator(path, max_x, max_y, color_mode=color_mode):
yield (i, o)
def train_on_images(epochs, max_x, max_y, path, model_type, model_name, arg_steps, validation_path, color_mode="RGB"):
sess = tf.Session()
keras.backend.set_session(sess)
# max_x = max([i.shape[0] for i in images])
# max_y = max([i.shape[1] for i in images])
# max_x, max_y = find_max_min_image_size(path = 'detected_images/*.png')
# print(max_x, max_y) # 304, 298
epochs = epochs
shape = (max_y, max_x, 3)
model = Models.from_argument_choice(model_type, shape)
steps = len(glob.glob(path))
if arg_steps != 0:
steps = arg_steps
model.summary()
# define the checkpoint
checkpoint = ModelCheckpoint(model_name, monitor='loss', verbose=1, save_best_only=True, mode='min')
callbacks_list = [checkpoint]
log.info('Fitting model...')
if validation_path:
history = model.fit_generator(generator=centered_image_generator(path, max_x, max_y, color_mode=color_mode),
validation_data=centered_image_generator(validation_path, max_x, max_y, color_mode=color_mode),
validation_steps=100,
epochs=epochs,
steps_per_epoch=steps,
callbacks=callbacks_list)
else:
history = model.fit_generator(generator=centered_image_generator(path, max_x, max_y, color_mode=color_mode),
epochs=epochs,
steps_per_epoch=steps,
callbacks=callbacks_list)
model.save(model_name)
loss = history.history['loss']
try:
plt.plot(loss)
if validation_path:
val_loss = history.history['val_loss']
plt.plot(val_loss, color='g')
plt.title(model_name)
plt.ylabel("Loss")
plt.xlabel("Epoch")
plt.savefig(f'training_loss_{model_name}.png')
except:
log.info("Failed to create loss graph")
log.info('Finished fitting model')
return model
def load_model_and_predict(model_path, num_predictions, path, max_x, max_y, model_type, model=None, color_mode="RGB", template_only=False):
# vae_loss(image_shape=(max_x, max_y, 3), log_var=0.5, mu=0.5)
im_shape = (max_x, max_y, 3)
if model_type == get_model_choice(Arguments.VAE) and not model:
model = load_model(model_path, compile=False)#custom_objects={'custom_vae_loss': vae_loss(im_shape, log_var, mu)})
mu = model.get_layer('mu').output
log_var = model.get_layer('log').output
model.summary()
print(mu, log_var)
model.compile(optimizer='rmsprop', loss=vae_loss(im_shape, log_var, mu))
if model_type == get_model_choice(Arguments.CONVVAE) and not model:
model = load_model(model_path, compile=False)#custom_objects={'custom_vae_loss': vae_loss(im_shape, log_var, mu)})
encoder = model.get_layer('encoder')
decoder = model.get_layer('decoder')
mu = encoder.get_layer('mu').output
log_var = encoder.get_layer('log').output
model.compile(optimizer='adam', loss=vae_loss(im_shape, log_var, mu))
if model_type != get_model_choice(Arguments.VAE) and not model:
model = load_model(model_path)
model.summary()
print("Loaded Model", model, model.input_shape)
max_x = model.input_shape[1]
max_y = model.input_shape[2]
images = list(image_generator_with_filename(path, max_x, max_y, color_mode=color_mode))
random.shuffle(images)
index = 0
print(f'Loaded {len(images)} images')
model_name = model_path.split('.')[0]
save_dir = Files.makedir_else_cleardir(f'./predictions/{model_name}_{Filenames.remove_path(Filenames.strip_path_modifier(path))}')
for i, filename in images: # centered_image_generator(path, max_x, max_y):
hashed = Filenames.md5hash(filename)
anomaly = "anomaly" in filename
extra = "_anomaly_" if anomaly else "_normal_"
pred = model.predict(i)
print(pred.shape)
for ii in i:
if color_mode == 'HSV':
ii = Image.fromarray((ii * 255).astype(np.uint8), 'HSV')
ii = ii.convert("RGB")
ii = np.array(ii)
plt.imsave(str(save_dir / f'orig{extra}{hashed}_{index}.png'), ii)
#plt.imsave(str(save_dir / f'temp.png'), pred[0], vmin=0, vmax=1)
print("input shape",i.shape)
evaluate = model.evaluate(i, i)
if type(evaluate) is list:
evaluate = evaluate[0]
print(index, evaluate)
for p in pred:
#print("prediction",p)
p = p / np.max(p)
if color_mode == 'HSV':
p = Image.fromarray((p * 255).astype(np.uint8), 'HSV')
p = p.convert('RGB')
p = np.array(p)
if template_only:
# Hacky solution, oh well
template_path = './src/sign_detection/image_generation/images/signs/png/362.50/362_5.png'
im = Image.open(template_path)
im = im.convert('RGB')
im = im.resize(size=(64,64))
im = np.array(im)
score = image_mse(i[0], im)
plt.imsave(str(save_dir / f'pred{extra}{index}_{hashed}_{score}.png'), im)
else:
plt.imsave(str(save_dir / f'pred{extra}{index}_{hashed}_{str(evaluate)}.png'), p)
index += 1
if index == num_predictions:
break
def image_mse(imageA, imageB):
err = np.sum((imageA.astype("float") - imageB.astype("float")) ** 2)
err /= float(imageA.shape[0] * imageA.shape[1])
return err
if __name__ == '__main__':
args = anomaly_arguments()
log.info('Arguments', args)
print("Arguments", args)
model = None
if args.do_training:
model = train_on_images(
epochs=args.epochs,
path=args.path,
max_x=args.max_x,
max_y=args.max_y,
model_type=args.model_type,
model_name=args.model,
arg_steps=args.steps,
color_mode=args.color,
validation_path=args.validation_path
)
if args.do_predict:
load_model_and_predict(
model_path=args.model,
num_predictions=args.num_predictions,
max_x=args.max_x,
max_y=args.max_y,
path=args.pred_path if args.pred_path else args.path,
model_type=args.model_type,
model=model,
color_mode=args.color,
template_only=args.template
)
| 35.844156
| 139
| 0.620411
| 1,135
| 8,280
| 4.278414
| 0.188546
| 0.019769
| 0.02883
| 0.031301
| 0.347199
| 0.308896
| 0.271417
| 0.262356
| 0.255354
| 0.232084
| 0
| 0.010558
| 0.267874
| 8,280
| 230
| 140
| 36
| 0.790498
| 0.067512
| 0
| 0.196629
| 0
| 0
| 0.0785
| 0.039574
| 0
| 0
| 0
| 0
| 0
| 1
| 0.033708
| false
| 0
| 0.117978
| 0
| 0.162921
| 0.039326
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
98fd965b02157810b02af85a0eee51f0f9a9f9e1
| 5,040
|
py
|
Python
|
Udacity P3 Additional Files/model.py
|
sayeayed/Udacity-Project4
|
da39d0013d35d90818f9aa24ef097e185e705489
|
[
"MIT"
] | null | null | null |
Udacity P3 Additional Files/model.py
|
sayeayed/Udacity-Project4
|
da39d0013d35d90818f9aa24ef097e185e705489
|
[
"MIT"
] | null | null | null |
Udacity P3 Additional Files/model.py
|
sayeayed/Udacity-Project4
|
da39d0013d35d90818f9aa24ef097e185e705489
|
[
"MIT"
] | null | null | null |
import os
import csv
import numpy as np
from sklearn.utils import shuffle
## Read in frame data
samples = []
with open('/../opt/carnd_p3/data/driving_log.csv') as csvfile: #open the log file
reader = csv.reader(csvfile) #as a readable csv
for line in reader:
samples.append(line) #add each line of the log file to samples
samples = samples[1:] # to remove table header
samples = shuffle(samples) # shuffle entire sample set before splitting into training and validation so that training isn't biased
from sklearn.model_selection import train_test_split
train_samples, validation_samples = train_test_split(samples, test_size=0.2) #split samples into 80% training, 20% validation
from scipy import ndimage #because cv2.imread() imports the image as BGR, and we want RGB
## Define generator to handle small portions of images at a time so that training is not as memory-heavy
def generator(samples, batch_size=32):
num_samples = len(samples)
while 1: # Loop forever so the generator never terminates
# shuffle(samples) #shuffle within the training/validation sets, NOT NECESSARY SINCE SHUFFLING ALREADY SHUFFLED
for offset in range(0, num_samples, batch_size):
batch_samples = samples[offset:offset+batch_size] #collect the images for this batch
images = []
angles = []
for batch_sample in batch_samples:
path = '/../opt/carnd_p3/data/IMG/' #assign the location from which to read images
# read in images from all 3 cameras MAKING SURE TO READ IN AS RGB
center_image = ndimage.imread(path+batch_sample[0].split('/')[-1])
left_image = ndimage.imread(path+batch_sample[1].split('/')[-1])
right_image = ndimage.imread(path+batch_sample[2].split('/')[-1])
# read in steering angle
center_angle = float(batch_sample[3]) #read the steering angle
# apply a steering correction for the left and right images, in a way to generate "new" samples
correction = 0.2
left_angle = center_angle + correction
right_angle = center_angle - correction
# add images and angles to batch set
images.extend([center_image, left_image, right_image])
angles.extend([center_angle, left_angle, right_angle])
# copy all batches' images to final numpy array
X_train = np.array(images)
y_train = np.array(angles)
yield shuffle(X_train, y_train) #shuffle before yielding result
# compile and train the model using the generator function
train_generator = generator(train_samples, batch_size=32)
validation_generator = generator(validation_samples, batch_size=32)
ch, row, col = 3, 160, 320 # Full image format
#import Keras model layers
from keras.models import Sequential
from keras.layers.core import Dense, Activation, Flatten, Dropout, Lambda
from keras.layers.convolutional import Conv2D, Cropping2D
from keras.layers.pooling import MaxPooling2D
# BUILD MODEL
model = Sequential()
# Preprocess incoming data, centered around zero with small standard deviation
model.add(Lambda(lambda x: x/127.5 - 1.0, input_shape=(row,col,ch)))
# Crop incoming data (training, validation, and autonomous so that everything is consistent)
model.add(Cropping2D(cropping=((60,20), (0,0)))) # could be first layer to reduce memory used in Lambda calculation, and thus faster training
# Begin CNN (similar to NVIDIA architecture)
# Convolution layer 1-3, kernel size 5 with stride of 2
model.add(Conv2D(24,(5,5),strides=(2,2),activation='relu'))
model.add(Conv2D(36,(5,5),strides=(2,2),activation='relu'))
model.add(Conv2D(48,(5,5),strides=(2,2),activation='relu'))
# Convolution layers 4-5, kernel size 3 wth stride of 1
model.add(Conv2D(64,(3,3),activation='relu'))
model.add(Conv2D(64,(3,3),activation='relu'))
# Flatten convolution output to yield single numerical result
model.add(Flatten())
# Fully connected layers to complete computations, gradually decreasing in parameters until final value
model.add(Dense(100))
model.add(Dense(50))
model.add(Dense(10))
model.add(Dense(1))
## Training hyper parameters to play with
## Stop training checkpoints...
# save_path = 'model{epoch:02d}-{val_loss:.2f}.h5'
# checkpoint = ModelCheckpoint(filepath=save_path, monitor='val_loss', save_best_only=True)
# stopper = EarlyStopping(monitor='val_acc', min_delta=0.0003, patience=5)
## OR
batch_size = 32
epochs = 5 #***
## Compile and train the model
model.compile(loss='mse', optimizer='adam', metrics=['accuracy']) #use Mean Squared Error to measure loss, use Adam optimizer for tuning
model.fit_generator(train_generator, steps_per_epoch= len(train_samples)/batch_size,validation_data=validation_generator, validation_steps=len(validation_samples)/batch_size, epochs=5, verbose = 1) # train using generators
#save the trained model
model.save('model.h5')
| 48.461538
| 222
| 0.709127
| 719
| 5,040
| 4.876217
| 0.369958
| 0.027382
| 0.027382
| 0.015402
| 0.08899
| 0.07587
| 0.047633
| 0.040502
| 0.022248
| 0.022248
| 0
| 0.028051
| 0.193651
| 5,040
| 104
| 223
| 48.461538
| 0.834646
| 0.416667
| 0
| 0.033333
| 0
| 0
| 0.037716
| 0.021799
| 0
| 0
| 0
| 0
| 0
| 1
| 0.016667
| false
| 0
| 0.166667
| 0
| 0.183333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
98fe28b6ed135c40a04274c069f20df97e941299
| 2,357
|
py
|
Python
|
utils/wavelengthfit_prim.py
|
GeminiDRSoftware/GHOSTDR
|
79cd1ac81a7458e06668d6dac51fc6f9c9c61b31
|
[
"BSD-3-Clause"
] | 1
|
2019-09-05T15:29:25.000Z
|
2019-09-05T15:29:25.000Z
|
utils/wavelengthfit_prim.py
|
GeminiDRSoftware/GHOSTDR
|
79cd1ac81a7458e06668d6dac51fc6f9c9c61b31
|
[
"BSD-3-Clause"
] | null | null | null |
utils/wavelengthfit_prim.py
|
GeminiDRSoftware/GHOSTDR
|
79cd1ac81a7458e06668d6dac51fc6f9c9c61b31
|
[
"BSD-3-Clause"
] | 2
|
2017-10-10T23:23:53.000Z
|
2022-02-15T23:28:22.000Z
|
#!/usr/bin/env python3
""" A script containing the basic principles of the extraction primitive inner
workings"""
from __future__ import division, print_function
from ghostdr import polyfit
import numpy as pn
# Firstly, let's find all the needed files
fitsdir='/Users/mireland/data/ghost/cal_frames/'
#Define the files in use (NB xmod.txt and wavemod.txt should be correct)
arc_file = fitsdir+"arc_extracted.fits"
# load it in now:
extracted_flux,extracted_vars=pyfits.getdata(arc_file)
# Where is the default location for the model? By default it is a parameter
# in the ghost class. If this needs to be overwritten, go ahead.
# This is the xmod file. Wherever it is saved from the flat reduction.
xmodel_file=fitsdir+'GHOST_1_1_blue_std_xmodPolyfit.fits'
# All the other models... which are currently in the "test" directory.
wmodel_file=test_files_dir+'wparams_blue_std.fits'
spatmod_file=test_files_dir+'spatmod.fits'
specmod_file=test_files_dir+'specmod.fits'
rotmod_file=test_files_dir+'rotmod2.fits'
# Find the arc line list file
arclinefile='/home/jbento/code/ghostdr/ghostdr/ADCONFIG_GHOST/lookups/GHOST/Polyfit/mnras0378-0221-SD1.txt'
arcwaves, arcfluxes= np.loadtxt(arclinefile,usecols=[1,2]).T
#instantiate the ghost arm
arm = polyfit.GhostArm('blue',mode='std')
arm.spectral_format_with_matrix(xpars,wpars,spatpars,specpars,rotpars)
#Get the initial default model from the lookup location
xpars=pyfits.getdata(xmodel_file)
wpars=pyfits.getdata(wmodel_file)
spatpars=pyfits.getdata(spatmod_file)
specpars=pyfits.getdata(specmod_file)
rotpars=pyfits.getdata(rotmod_file)
slitview = polyfit.SlitView(image_array, flat_image_array, mode='std')
# The extractor is given the polyfit "arm" object, and a slitview object which has
# been instantiated with the slit viewer data.
extractor = polyfit.Extractor(arm, slitview)
#Now find the other lines, after first re-loading into the extractor.
# the inspect parameter is a verbose option for visualising the line
# finding results
lines_out=extractor.find_lines(extracted_flux, arcwaves, inspect=False)
#Now finally do the wavelength fit!
fitted_params, wave_and_resid = arm.read_lines_and_fit(wpars,lines_out,ydeg=3,xdeg=3)
# Optionally show residuals?
#Now write the output to a file, in whatever format suits the recipe system best.
pyfits.writeto('outputs.fits',fitted_params)
| 35.712121
| 107
| 0.801018
| 369
| 2,357
| 4.97019
| 0.498645
| 0.04253
| 0.028353
| 0.034896
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.008118
| 0.111583
| 2,357
| 65
| 108
| 36.261538
| 0.867717
| 0.439118
| 0
| 0
| 0
| 0.04
| 0.203089
| 0.144402
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.12
| 0
| 0.12
| 0.04
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
98ff5d19bcbfb3d13ae61a0ad4df7649e741ec52
| 1,506
|
py
|
Python
|
time_management/test/kronos_test.py
|
AyushRawal/time-management
|
a8876f7b681da837c41f17cf896eaa895017f17f
|
[
"MIT"
] | 1
|
2021-11-15T19:35:51.000Z
|
2021-11-15T19:35:51.000Z
|
time_management/test/kronos_test.py
|
AyushRawal/time-management
|
a8876f7b681da837c41f17cf896eaa895017f17f
|
[
"MIT"
] | null | null | null |
time_management/test/kronos_test.py
|
AyushRawal/time-management
|
a8876f7b681da837c41f17cf896eaa895017f17f
|
[
"MIT"
] | null | null | null |
import unittest
import datetime
import kronos
string_format_time = "%Y-%m-%d %H:%M:%S"
date_time_str = "2020-07-19 18:14:21"
class KronosTest(unittest.TestCase):
def test_get_day_of_week(self):
for i in range(len(kronos.week_days)):
date = kronos.get_date_time_from_string(f"2020-08-{10 + i} 13:00:00")
self.assertEqual(kronos.week_days.get(i), kronos.get_day_of_week(date))
def test_is_yesterday(self):
date_time = kronos.get_date_time_from_string("2020-07-20 18:14:21")
self.assertTrue(kronos.is_yesterday(date_time_str, today=date_time))
date_time = kronos.get_date_time_from_string("2020-07-19 18:14:21")
self.assertFalse(kronos.is_yesterday(date_time_str, today=date_time))
def test_is_previous_friday(self):
last_friday = "2020-08-14 13:00:00"
last_monday = kronos.get_date_time_from_string("2020-08-17 13:00:00")
self.assertTrue(kronos.is_previous_friday(last_friday, last_monday))
last_tuesday = kronos.get_date_time_from_string("2020-08-18 13:00:00")
self.assertFalse(kronos.is_previous_friday(last_friday, last_tuesday))
def test_is_overdue_checks_correctly(self):
creation_date = "2020-08-10 13:00:00"
completion_goal = 5
self.assertTrue(kronos.is_overdue(creation_date, completion_goal))
on_time_date = kronos.get_date_time_as_string()
on_time_goal = 100
self.assertFalse(kronos.is_overdue(on_time_date, on_time_goal))
| 41.833333
| 83
| 0.717131
| 239
| 1,506
| 4.179916
| 0.271967
| 0.104104
| 0.078078
| 0.102102
| 0.376376
| 0.351351
| 0.302302
| 0.23023
| 0.164164
| 0.082082
| 0
| 0.0928
| 0.169987
| 1,506
| 35
| 84
| 43.028571
| 0.7064
| 0
| 0
| 0
| 0
| 0
| 0.116202
| 0
| 0
| 0
| 0
| 0
| 0.25
| 1
| 0.142857
| false
| 0
| 0.107143
| 0
| 0.285714
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c70208d0f7ec90b8fef96ebe7d673c28540df5bc
| 2,558
|
py
|
Python
|
odm/dialects/postgresql/green.py
|
quantmind/pulsar-odm
|
5955c20beca0a89270c2b390335838deb7d5915e
|
[
"BSD-3-Clause"
] | 16
|
2015-02-17T22:23:48.000Z
|
2020-08-08T09:35:53.000Z
|
odm/dialects/postgresql/green.py
|
quantmind/pulsar-odm
|
5955c20beca0a89270c2b390335838deb7d5915e
|
[
"BSD-3-Clause"
] | 11
|
2015-02-25T11:37:09.000Z
|
2016-03-04T12:08:11.000Z
|
odm/dialects/postgresql/green.py
|
quantmind/pulsar-odm
|
5955c20beca0a89270c2b390335838deb7d5915e
|
[
"BSD-3-Clause"
] | 3
|
2017-02-27T10:24:31.000Z
|
2020-10-08T05:43:15.000Z
|
from asyncio import Future
from greenlet import getcurrent
import psycopg2
from psycopg2 import * # noqa
from psycopg2 import extensions, OperationalError
__version__ = psycopg2.__version__
def psycopg2_wait_callback(conn):
"""A wait callback to allow greenlet to work with Psycopg.
The caller must be from a greenlet other than the main one.
:param conn: psycopg2 connection or file number
This function must be invoked from a coroutine with parent, therefore
invoking it from the main greenlet will raise an exception.
"""
while True:
state = conn.poll()
if state == extensions.POLL_OK:
# Done with waiting
break
elif state == extensions.POLL_READ:
_wait_fd(conn)
elif state == extensions.POLL_WRITE:
_wait_fd(conn, read=False)
else: # pragma nocover
raise OperationalError("Bad result from poll: %r" % state)
# INTERNALS
def _wait_fd(conn, read=True):
'''Wait for an event on file descriptor ``fd``.
:param conn: file descriptor
:param read: wait for a read event if ``True``, otherwise a wait
for write event.
This function must be invoked from a coroutine with parent, therefore
invoking it from the main greenlet will raise an exception.
'''
current = getcurrent()
parent = current.parent
assert parent, '"_wait_fd" must be called by greenlet with a parent'
try:
fileno = conn.fileno()
except AttributeError:
fileno = conn
future = Future()
# When the event on fd occurs switch back to the current greenlet
if read:
future._loop.add_reader(fileno, _done_wait_fd, fileno, future, read)
else:
future._loop.add_writer(fileno, _done_wait_fd, fileno, future, read)
# switch back to parent greenlet
parent.switch(future)
# Back on the child greenlet. Raise error if there is one
future.result()
def _done_wait_fd(fd, future, read):
try:
if read:
future._loop.remove_reader(fd)
else:
future._loop.remove_writer(fd)
except Exception as exc:
future.set_exception(exc)
else:
future.set_result(None)
try:
extensions.POLL_OK
except AttributeError: # pragma nocover
from pulsar import ImproperlyConfigured
raise ImproperlyConfigured(
'Psycopg2 does not have support for asynchronous connections. '
'You need at least version 2.2.0 of Psycopg2.')
extensions.set_wait_callback(psycopg2_wait_callback)
| 29.744186
| 76
| 0.67631
| 333
| 2,558
| 5.06006
| 0.336336
| 0.024926
| 0.033828
| 0.021365
| 0.164985
| 0.164985
| 0.164985
| 0.127003
| 0.127003
| 0.127003
| 0
| 0.006312
| 0.256841
| 2,558
| 85
| 77
| 30.094118
| 0.880063
| 0.315872
| 0
| 0.22
| 0
| 0
| 0.106762
| 0
| 0
| 0
| 0
| 0
| 0.02
| 1
| 0.06
| false
| 0
| 0.12
| 0
| 0.18
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c70375d862917fab136e0bc4321aa240c2c6c44e
| 27,984
|
py
|
Python
|
test/test_replica_set_connection.py
|
h4ck3rm1k3/mongo-python-driver
|
dfaadd53e86a62c72ca8a7564fdacb30cd0ac01c
|
[
"Apache-2.0"
] | 1
|
2019-04-27T20:15:11.000Z
|
2019-04-27T20:15:11.000Z
|
test/test_replica_set_connection.py
|
h4ck3rm1k3/mongo-python-driver
|
dfaadd53e86a62c72ca8a7564fdacb30cd0ac01c
|
[
"Apache-2.0"
] | null | null | null |
test/test_replica_set_connection.py
|
h4ck3rm1k3/mongo-python-driver
|
dfaadd53e86a62c72ca8a7564fdacb30cd0ac01c
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2011-2012 10gen, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test the replica_set_connection module."""
import copy
import datetime
import os
import signal
import socket
import sys
import time
import thread
import traceback
import unittest
sys.path[0:0] = [""]
from nose.plugins.skip import SkipTest
from bson.son import SON
from bson.tz_util import utc
from pymongo.connection import Connection
from pymongo.read_preferences import ReadPreference
from pymongo.replica_set_connection import ReplicaSetConnection
from pymongo.replica_set_connection import _partition_node
from pymongo.database import Database
from pymongo.errors import (AutoReconnect,
ConfigurationError,
ConnectionFailure,
InvalidName,
OperationFailure)
from test import version
from test.utils import delay, assertReadFrom, assertReadFromAll, read_from_which_host
host = os.environ.get("DB_IP", 'localhost')
port = int(os.environ.get("DB_PORT", 27017))
pair = '%s:%d' % (host, port)
class TestReplicaSetConnectionAgainstStandalone(unittest.TestCase):
"""This is a funny beast -- we want to run tests for ReplicaSetConnection
but only if the database at DB_IP and DB_PORT is a standalone.
"""
def setUp(self):
conn = Connection(pair)
response = conn.admin.command('ismaster')
if 'setName' in response:
raise SkipTest()
def test_connect(self):
self.assertRaises(ConfigurationError, ReplicaSetConnection,
pair, replicaSet='anything',
connectTimeoutMS=600)
class TestConnectionReplicaSetBase(unittest.TestCase):
def setUp(self):
conn = Connection(pair)
response = conn.admin.command('ismaster')
if 'setName' in response:
self.name = str(response['setName'])
self.w = len(response['hosts'])
self.hosts = set([_partition_node(h)
for h in response["hosts"]])
self.arbiters = set([_partition_node(h)
for h in response.get("arbiters", [])])
repl_set_status = conn.admin.command('replSetGetStatus')
primary_info = [
m for m in repl_set_status['members']
if m['stateStr'] == 'PRIMARY'
][0]
self.primary = _partition_node(primary_info['name'])
self.secondaries = [
_partition_node(m['name']) for m in repl_set_status['members']
if m['stateStr'] == 'SECONDARY'
]
else:
raise SkipTest()
def _get_connection(self, **kwargs):
return ReplicaSetConnection(pair,
replicaSet=self.name,
**kwargs)
class TestConnection(TestConnectionReplicaSetBase):
def test_connect(self):
self.assertRaises(ConnectionFailure, ReplicaSetConnection,
"somedomainthatdoesntexist.org:27017",
replicaSet=self.name,
connectTimeoutMS=600)
self.assertRaises(ConfigurationError, ReplicaSetConnection,
pair, replicaSet='fdlksjfdslkjfd')
self.assertTrue(ReplicaSetConnection(pair, replicaSet=self.name))
def test_repr(self):
connection = self._get_connection()
self.assertEqual(repr(connection),
"ReplicaSetConnection(%r)" % (["%s:%d" % n
for n in
self.hosts],))
def test_properties(self):
c = ReplicaSetConnection(pair, replicaSet=self.name)
c.admin.command('ping')
self.assertEqual(c.primary, self.primary)
self.assertEqual(c.hosts, self.hosts)
self.assertEqual(c.arbiters, self.arbiters)
self.assertEqual(c.max_pool_size, 10)
self.assertEqual(c.document_class, dict)
self.assertEqual(c.tz_aware, False)
# Make sure RSC's properties are copied to Database and Collection
for obj in c, c.pymongo_test, c.pymongo_test.test:
self.assertEqual(obj.read_preference, ReadPreference.PRIMARY)
self.assertEqual(obj.tag_sets, [{}])
self.assertEqual(obj.secondary_acceptable_latency_ms, 15)
self.assertEqual(obj.slave_okay, False)
self.assertEqual(obj.safe, False)
cursor = c.pymongo_test.test.find()
self.assertEqual(
ReadPreference.PRIMARY, cursor._Cursor__read_preference)
self.assertEqual([{}], cursor._Cursor__tag_sets)
self.assertEqual(15, cursor._Cursor__secondary_acceptable_latency_ms)
self.assertEqual(False, cursor._Cursor__slave_okay)
c.close()
tag_sets = [{'dc': 'la', 'rack': '2'}, {'foo': 'bar'}]
c = ReplicaSetConnection(pair, replicaSet=self.name, max_pool_size=25,
document_class=SON, tz_aware=True,
slaveOk=False, safe=True,
read_preference=ReadPreference.SECONDARY,
tag_sets=copy.deepcopy(tag_sets),
secondary_acceptable_latency_ms=77)
c.admin.command('ping')
self.assertEqual(c.primary, self.primary)
self.assertEqual(c.hosts, self.hosts)
self.assertEqual(c.arbiters, self.arbiters)
self.assertEqual(c.max_pool_size, 25)
self.assertEqual(c.document_class, SON)
self.assertEqual(c.tz_aware, True)
for obj in c, c.pymongo_test, c.pymongo_test.test:
self.assertEqual(obj.read_preference, ReadPreference.SECONDARY)
self.assertEqual(obj.tag_sets, tag_sets)
self.assertEqual(obj.secondary_acceptable_latency_ms, 77)
self.assertEqual(obj.slave_okay, False)
self.assertEqual(obj.safe, True)
cursor = c.pymongo_test.test.find()
self.assertEqual(
ReadPreference.SECONDARY, cursor._Cursor__read_preference)
self.assertEqual(tag_sets, cursor._Cursor__tag_sets)
self.assertEqual(77, cursor._Cursor__secondary_acceptable_latency_ms)
self.assertEqual(False, cursor._Cursor__slave_okay)
cursor = c.pymongo_test.test.find(
read_preference=ReadPreference.NEAREST,
tag_sets=[{'dc':'ny'}, {}],
secondary_acceptable_latency_ms=123)
self.assertEqual(
ReadPreference.NEAREST, cursor._Cursor__read_preference)
self.assertEqual([{'dc':'ny'}, {}], cursor._Cursor__tag_sets)
self.assertEqual(123, cursor._Cursor__secondary_acceptable_latency_ms)
self.assertEqual(False, cursor._Cursor__slave_okay)
if version.at_least(c, (1, 7, 4)):
self.assertEqual(c.max_bson_size, 16777216)
else:
self.assertEqual(c.max_bson_size, 4194304)
c.close()
def test_get_db(self):
connection = self._get_connection()
def make_db(base, name):
return base[name]
self.assertRaises(InvalidName, make_db, connection, "")
self.assertRaises(InvalidName, make_db, connection, "te$t")
self.assertRaises(InvalidName, make_db, connection, "te.t")
self.assertRaises(InvalidName, make_db, connection, "te\\t")
self.assertRaises(InvalidName, make_db, connection, "te/t")
self.assertRaises(InvalidName, make_db, connection, "te st")
self.assertTrue(isinstance(connection.test, Database))
self.assertEqual(connection.test, connection["test"])
self.assertEqual(connection.test, Database(connection, "test"))
connection.close()
def test_auto_reconnect_exception_when_read_preference_is_secondary(self):
c = self._get_connection()
db = c.pymongo_test
def raise_socket_error(*args, **kwargs):
raise socket.error
old_sendall = socket.socket.sendall
socket.socket.sendall = raise_socket_error
try:
cursor = db.test.find(read_preference=ReadPreference.SECONDARY)
self.assertRaises(AutoReconnect, cursor.next)
finally:
socket.socket.sendall = old_sendall
def test_operations(self):
c = self._get_connection()
# Check explicitly for a case we've commonly hit in tests:
# a replica set is started with a tiny oplog, a previous
# test does a big insert that leaves the secondaries
# permanently "RECOVERING", and our insert(w=self.w) hangs
# forever.
rs_status = c.admin.command('replSetGetStatus')
members = rs_status['members']
self.assertFalse(
[m for m in members if m['stateStr'] == 'RECOVERING'],
"Replica set is recovering, try a larger oplogSize next time"
)
db = c.pymongo_test
db.test.remove({}, safe=True)
self.assertEqual(0, db.test.count())
db.test.insert({'foo': 'x'}, safe=True, w=self.w, wtimeout=10000)
self.assertEqual(1, db.test.count())
cursor = db.test.find()
doc = cursor.next()
self.assertEqual('x', doc['foo'])
# Ensure we read from the primary
self.assertEqual(c.primary, cursor._Cursor__connection_id)
cursor = db.test.find(read_preference=ReadPreference.SECONDARY)
doc = cursor.next()
self.assertEqual('x', doc['foo'])
# Ensure we didn't read from the primary
self.assertTrue(cursor._Cursor__connection_id in c.secondaries)
self.assertEqual(1, db.test.count())
db.test.remove({}, safe=True)
self.assertEqual(0, db.test.count())
db.test.drop()
c.close()
def test_database_names(self):
connection = self._get_connection()
connection.pymongo_test.test.save({"dummy": u"object"})
connection.pymongo_test_mike.test.save({"dummy": u"object"})
dbs = connection.database_names()
self.assertTrue("pymongo_test" in dbs)
self.assertTrue("pymongo_test_mike" in dbs)
connection.close()
def test_drop_database(self):
connection = self._get_connection()
self.assertRaises(TypeError, connection.drop_database, 5)
self.assertRaises(TypeError, connection.drop_database, None)
connection.pymongo_test.test.save({"dummy": u"object"})
dbs = connection.database_names()
self.assertTrue("pymongo_test" in dbs)
connection.drop_database("pymongo_test")
dbs = connection.database_names()
self.assertTrue("pymongo_test" not in dbs)
connection.pymongo_test.test.save({"dummy": u"object"})
dbs = connection.database_names()
self.assertTrue("pymongo_test" in dbs)
connection.drop_database(connection.pymongo_test)
dbs = connection.database_names()
self.assertTrue("pymongo_test" not in dbs)
connection.close()
def test_copy_db(self):
c = self._get_connection()
self.assertTrue(c.in_request())
self.assertRaises(TypeError, c.copy_database, 4, "foo")
self.assertRaises(TypeError, c.copy_database, "foo", 4)
self.assertRaises(InvalidName, c.copy_database, "foo", "$foo")
c.pymongo_test.test.drop()
c.drop_database("pymongo_test1")
c.drop_database("pymongo_test2")
c.pymongo_test.test.insert({"foo": "bar"})
self.assertFalse("pymongo_test1" in c.database_names())
self.assertFalse("pymongo_test2" in c.database_names())
c.copy_database("pymongo_test", "pymongo_test1")
# copy_database() didn't accidentally end the request
self.assertTrue(c.in_request())
self.assertTrue("pymongo_test1" in c.database_names())
self.assertEqual("bar", c.pymongo_test1.test.find_one()["foo"])
c.end_request()
self.assertFalse(c.in_request())
c.copy_database("pymongo_test", "pymongo_test2", pair)
# copy_database() didn't accidentally restart the request
self.assertFalse(c.in_request())
time.sleep(1)
self.assertTrue("pymongo_test2" in c.database_names())
self.assertEqual("bar", c.pymongo_test2.test.find_one()["foo"])
if version.at_least(c, (1, 3, 3, 1)):
c.drop_database("pymongo_test1")
c.pymongo_test.add_user("mike", "password")
self.assertRaises(OperationFailure, c.copy_database,
"pymongo_test", "pymongo_test1",
username="foo", password="bar")
self.assertFalse("pymongo_test1" in c.database_names())
self.assertRaises(OperationFailure, c.copy_database,
"pymongo_test", "pymongo_test1",
username="mike", password="bar")
self.assertFalse("pymongo_test1" in c.database_names())
c.copy_database("pymongo_test", "pymongo_test1",
username="mike", password="password")
self.assertTrue("pymongo_test1" in c.database_names())
time.sleep(2)
self.assertEqual("bar", c.pymongo_test1.test.find_one()["foo"])
c.close()
def test_iteration(self):
connection = self._get_connection()
def iterate():
[a for a in connection]
self.assertRaises(TypeError, iterate)
connection.close()
def test_disconnect(self):
c = self._get_connection()
coll = c.foo.bar
c.disconnect()
c.disconnect()
coll.count()
c.disconnect()
c.disconnect()
coll.count()
def test_fork(self):
"""Test using a connection before and after a fork.
"""
if sys.platform == "win32":
raise SkipTest()
try:
from multiprocessing import Process, Pipe
except ImportError:
raise SkipTest()
db = self._get_connection().pymongo_test
# Failure occurs if the connection is used before the fork
db.test.find_one()
#db.connection.end_request()
def loop(pipe):
while True:
try:
db.test.insert({"a": "b"}, safe=True)
for _ in db.test.find():
pass
except:
traceback.print_exc()
pipe.send(True)
os._exit(1)
cp1, cc1 = Pipe()
cp2, cc2 = Pipe()
p1 = Process(target=loop, args=(cc1,))
p2 = Process(target=loop, args=(cc2,))
p1.start()
p2.start()
p1.join(1)
p2.join(1)
p1.terminate()
p2.terminate()
p1.join()
p2.join()
cc1.close()
cc2.close()
# recv will only have data if the subprocess failed
try:
cp1.recv()
self.fail()
except EOFError:
pass
try:
cp2.recv()
self.fail()
except EOFError:
pass
db.connection.close()
def test_document_class(self):
c = self._get_connection()
db = c.pymongo_test
db.test.insert({"x": 1})
self.assertEqual(dict, c.document_class)
self.assertTrue(isinstance(db.test.find_one(), dict))
self.assertFalse(isinstance(db.test.find_one(), SON))
c.document_class = SON
self.assertEqual(SON, c.document_class)
self.assertTrue(isinstance(db.test.find_one(), SON))
self.assertFalse(isinstance(db.test.find_one(as_class=dict), SON))
c.close()
c = self._get_connection(document_class=SON)
db = c.pymongo_test
self.assertEqual(SON, c.document_class)
self.assertTrue(isinstance(db.test.find_one(), SON))
self.assertFalse(isinstance(db.test.find_one(as_class=dict), SON))
c.document_class = dict
self.assertEqual(dict, c.document_class)
self.assertTrue(isinstance(db.test.find_one(), dict))
self.assertFalse(isinstance(db.test.find_one(), SON))
c.close()
def test_network_timeout(self):
no_timeout = self._get_connection()
timeout_sec = 1
timeout = self._get_connection(socketTimeoutMS=timeout_sec*1000)
no_timeout.pymongo_test.drop_collection("test")
no_timeout.pymongo_test.test.insert({"x": 1}, safe=True)
# A $where clause that takes a second longer than the timeout
where_func = delay(1 + timeout_sec)
def get_x(db):
doc = db.test.find().where(where_func).next()
return doc["x"]
self.assertEqual(1, get_x(no_timeout.pymongo_test))
self.assertRaises(ConnectionFailure, get_x, timeout.pymongo_test)
def get_x_timeout(db, t):
doc = db.test.find(network_timeout=t).where(where_func).next()
return doc["x"]
self.assertEqual(1, get_x_timeout(timeout.pymongo_test, None))
self.assertRaises(ConnectionFailure, get_x_timeout,
no_timeout.pymongo_test, 0.1)
no_timeout.close()
timeout.close()
def test_tz_aware(self):
self.assertRaises(ConfigurationError, ReplicaSetConnection,
tz_aware='foo', replicaSet=self.name)
aware = self._get_connection(tz_aware=True)
naive = self._get_connection()
aware.pymongo_test.drop_collection("test")
now = datetime.datetime.utcnow()
aware.pymongo_test.test.insert({"x": now}, safe=True)
time.sleep(1)
self.assertEqual(None, naive.pymongo_test.test.find_one()["x"].tzinfo)
self.assertEqual(utc, aware.pymongo_test.test.find_one()["x"].tzinfo)
self.assertEqual(
aware.pymongo_test.test.find_one()["x"].replace(tzinfo=None),
naive.pymongo_test.test.find_one()["x"])
def test_ipv6(self):
try:
connection = ReplicaSetConnection("[::1]:%d" % (port,),
replicaSet=self.name)
except:
# Either mongod was started without --ipv6
# or the OS doesn't support it (or both).
raise SkipTest()
# Try a few simple things
connection = ReplicaSetConnection("mongodb://[::1]:%d" % (port,),
replicaSet=self.name)
connection = ReplicaSetConnection("mongodb://[::1]:%d/?safe=true;"
"replicaSet=%s" % (port, self.name))
connection = ReplicaSetConnection("[::1]:%d,localhost:"
"%d" % (port, port),
replicaSet=self.name)
connection = ReplicaSetConnection("localhost:%d,[::1]:"
"%d" % (port, port),
replicaSet=self.name)
connection.pymongo_test.test.save({"dummy": u"object"})
connection.pymongo_test_bernie.test.save({"dummy": u"object"})
dbs = connection.database_names()
self.assertTrue("pymongo_test" in dbs)
self.assertTrue("pymongo_test_bernie" in dbs)
connection.close()
def _test_kill_cursor_explicit(self, read_pref):
c = self._get_connection(read_preference=read_pref)
db = c.pymongo_test
db.drop_collection("test")
test = db.test
test.insert([{"i": i} for i in range(20)], w=1 + len(c.secondaries))
# Partially evaluate cursor so it's left alive, then kill it
cursor = test.find().batch_size(10)
cursor.next()
self.assertNotEqual(0, cursor.cursor_id)
connection_id = cursor._Cursor__connection_id
writer = c._ReplicaSetConnection__writer
if read_pref == ReadPreference.PRIMARY:
msg = "Expected cursor's connection_id to be %s, got %s" % (
writer, connection_id)
self.assertEqual(connection_id, writer, msg)
else:
self.assertNotEqual(connection_id, writer,
"Expected cursor's connection_id not to be primary")
cursor_id = cursor.cursor_id
# Cursor dead on server - trigger a getMore on the same cursor_id and
# check that the server returns an error.
cursor2 = cursor.clone()
cursor2._Cursor__id = cursor_id
if (sys.platform.startswith('java') or
'PyPy' in sys.version):
# Explicitly kill cursor.
cursor.close()
else:
# Implicitly kill it in CPython.
del cursor
self.assertRaises(OperationFailure, lambda: list(cursor2))
def test_kill_cursor_explicit_primary(self):
self._test_kill_cursor_explicit(ReadPreference.PRIMARY)
def test_kill_cursor_explicit_secondary(self):
self._test_kill_cursor_explicit(ReadPreference.SECONDARY)
def test_interrupt_signal(self):
if sys.platform.startswith('java'):
raise SkipTest("Can't test interrupts in Jython")
# Test fix for PYTHON-294 -- make sure Connection closes its
# socket if it gets an interrupt while waiting to recv() from it.
c = self._get_connection()
db = c.pymongo_test
# A $where clause which takes 1.5 sec to execute
where = delay(1.5)
# Need exactly 1 document so find() will execute its $where clause once
db.drop_collection('foo')
db.foo.insert({'_id': 1}, safe=True)
old_signal_handler = None
try:
# Platform-specific hacks for raising a KeyboardInterrupt on the main
# thread while find() is in-progress: On Windows, SIGALRM is unavailable
# so we use second thread. In our Bamboo setup on Linux, the thread
# technique causes an error in the test at sock.recv():
# TypeError: 'int' object is not callable
# We don't know what causes this in Bamboo, so we hack around it.
if sys.platform == 'win32':
def interrupter():
time.sleep(0.25)
# Raises KeyboardInterrupt in the main thread
thread.interrupt_main()
thread.start_new_thread(interrupter, ())
else:
# Convert SIGALRM to SIGINT -- it's hard to schedule a SIGINT for one
# second in the future, but easy to schedule SIGALRM.
def sigalarm(num, frame):
raise KeyboardInterrupt
old_signal_handler = signal.signal(signal.SIGALRM, sigalarm)
signal.alarm(1)
raised = False
try:
# Will be interrupted by a KeyboardInterrupt.
db.foo.find({'$where': where}).next()
except KeyboardInterrupt:
raised = True
# Can't use self.assertRaises() because it doesn't catch system
# exceptions
self.assertTrue(raised, "Didn't raise expected ConnectionFailure")
# Raises AssertionError due to PYTHON-294 -- Mongo's response to the
# previous find() is still waiting to be read on the socket, so the
# request id's don't match.
self.assertEqual(
{'_id': 1},
db.foo.find().next()
)
finally:
if old_signal_handler:
signal.signal(signal.SIGALRM, old_signal_handler)
def test_auto_start_request(self):
for bad_horrible_value in (None, 5, 'hi!'):
self.assertRaises(
(TypeError, ConfigurationError),
lambda: self._get_connection(auto_start_request=bad_horrible_value)
)
# auto_start_request should default to True
conn = self._get_connection()
pools = [mongo.pool for mongo in
conn._ReplicaSetConnection__members.values()]
self.assertTrue(conn.auto_start_request)
self.assertTrue(conn.in_request())
# Trigger the RSC to actually start a request
conn.test.test.find_one()
for pool in pools:
self.assertTrue(pool.in_request())
conn.end_request()
self.assertFalse(conn.in_request())
for pool in pools:
self.assertFalse(pool.in_request())
conn.start_request()
self.assertTrue(conn.in_request())
conn.close()
conn = self._get_connection(auto_start_request=False)
self.assertFalse(conn.in_request())
conn.start_request()
self.assertTrue(conn.in_request())
conn.end_request()
self.assertFalse(conn.in_request())
conn.close()
def test_schedule_refresh(self):
# Monitor thread starts waiting for _refresh_interval, 30 seconds
conn = self._get_connection()
# Reconnect if necessary
conn.pymongo_test.test.find_one()
secondaries = conn.secondaries
for secondary in secondaries:
conn._ReplicaSetConnection__members[secondary].up = False
conn._ReplicaSetConnection__members[conn.primary].up = False
# Wake up monitor thread
conn._ReplicaSetConnection__schedule_refresh()
# Refresh interval is 30 seconds; scheduling a refresh tells the
# monitor thread / greenlet to start a refresh now. We still need to
# sleep a few seconds for it to complete.
time.sleep(5)
for secondary in secondaries:
self.assertTrue(conn._ReplicaSetConnection__members[secondary].up,
"ReplicaSetConnection didn't detect secondary is up")
self.assertTrue(conn._ReplicaSetConnection__members[conn.primary].up,
"ReplicaSetConnection didn't detect primary is up")
conn.close()
def test_pinned_member(self):
latency = 1000 * 1000
conn = self._get_connection(
auto_start_request=False, secondary_acceptable_latency_ms=latency)
host = read_from_which_host(conn, ReadPreference.SECONDARY)
self.assertTrue(host in conn.secondaries)
# No pinning since we're not in a request
assertReadFromAll(
self, conn, conn.secondaries,
ReadPreference.SECONDARY, None, latency)
assertReadFromAll(
self, conn, list(conn.secondaries) + [conn.primary],
ReadPreference.NEAREST, None, latency)
conn.start_request()
host = read_from_which_host(conn, ReadPreference.SECONDARY)
self.assertTrue(host in conn.secondaries)
assertReadFrom(self, conn, host, ReadPreference.SECONDARY)
# Repin
primary = read_from_which_host(conn, ReadPreference.PRIMARY)
self.assertEqual(conn.primary, primary)
assertReadFrom(self, conn, primary, ReadPreference.NEAREST)
# Repin again
host = read_from_which_host(conn, ReadPreference.SECONDARY)
self.assertTrue(host in conn.secondaries)
assertReadFrom(self, conn, host, ReadPreference.SECONDARY)
# Unpin
conn.end_request()
assertReadFromAll(
self, conn, list(conn.secondaries) + [conn.primary],
ReadPreference.NEAREST, None, latency)
if __name__ == "__main__":
unittest.main()
| 36.966975
| 85
| 0.611885
| 3,159
| 27,984
| 5.248496
| 0.158594
| 0.055187
| 0.023583
| 0.007057
| 0.493848
| 0.428408
| 0.308987
| 0.290531
| 0.266828
| 0.240651
| 0
| 0.009876
| 0.287164
| 27,984
| 756
| 86
| 37.015873
| 0.821285
| 0.116209
| 0
| 0.385338
| 0
| 0
| 0.05584
| 0.003612
| 0
| 0
| 0
| 0
| 0.261278
| 1
| 0.065789
| false
| 0.013158
| 0.043233
| 0.003759
| 0.12218
| 0.00188
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c703e56a113105edf215384785217acba5d2eb75
| 2,177
|
py
|
Python
|
jqi/cmd.py
|
jan-g/jqi
|
f304f9fda33ac9b9eae98848d2a64acbe0893131
|
[
"CC-BY-3.0",
"Apache-2.0"
] | 3
|
2020-04-15T13:40:59.000Z
|
2021-06-30T10:09:33.000Z
|
jqi/cmd.py
|
jan-g/jqi
|
f304f9fda33ac9b9eae98848d2a64acbe0893131
|
[
"CC-BY-3.0",
"Apache-2.0"
] | null | null | null |
jqi/cmd.py
|
jan-g/jqi
|
f304f9fda33ac9b9eae98848d2a64acbe0893131
|
[
"CC-BY-3.0",
"Apache-2.0"
] | null | null | null |
import argparse_helper as argparse
import config_dir
import sys
from .editor import Editor
def main(*args):
if len(args) > 0:
args = [args]
parser = argparse.ArgumentParser()
parser.add_argument("-f", dest="cfg_file", help="query save name")
parser.add_argument("-x", default=False, action="store_true", dest="run", help="run immediately")
parser.add_argument("-l", default=False, action="count", dest="list", help="list saved queries")
parser.add_argument("-p", default=False, action="store_true", dest="previous", help="use previous query")
parser.add_argument("pattern", nargs="?", help="override saved pattern")
parser.add_argument("file", nargs="?", help="file to operate on")
args = parser.parse_args(*args)
if args.cfg_file is None and args.previous:
args.cfg_file = "previous"
if args.cfg_file is not None and args.file is None:
args.file = args.pattern
args.pattern = None
editor = Editor(file=args.cfg_file, pattern=args.pattern)
if args.list > 0:
if args.cfg_file is not None:
cfg = config_dir.load_config(name=".jqi", sub_dir="query", sub_name=args.cfg_file, create=False)
print(cfg["pattern"])
else:
list_stored(args.list > 1)
return
if args.file is None:
text = sys.stdin.read()
else:
with open(args.file) as f:
text = f.read()
if args.run:
editor.jq(text, stdio=True)
else:
result = editor.run(text)
if result == 0:
editor.save()
editor.save("previous")
else:
sys.exit(result)
def list_stored(long=False):
d = config_dir.config_dir(name=".jqi", sub_dir="query")
for f in d.iterdir():
name = f.name
cfg = config_dir.load_config(name=".jqi", sub_dir="query", sub_name=name, create=False)
if long:
print(name)
for line in cfg["pattern"].splitlines():
print("\t{}".format(line))
else:
print("{}\t{}".format(name, cfg["pattern"].splitlines()[0]))
if __name__ == '__main__':
main("-f", "foo", "/tmp/x")
| 31.550725
| 109
| 0.601746
| 296
| 2,177
| 4.293919
| 0.280405
| 0.038552
| 0.080252
| 0.030685
| 0.18332
| 0.157356
| 0.108576
| 0.073958
| 0.073958
| 0.073958
| 0
| 0.003064
| 0.250345
| 2,177
| 68
| 110
| 32.014706
| 0.775735
| 0
| 0
| 0.090909
| 0
| 0
| 0.123105
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.036364
| false
| 0
| 0.072727
| 0
| 0.127273
| 0.072727
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c704a4dc1d06546eaf240da05c092e6fa0ab7b9d
| 1,704
|
py
|
Python
|
visual_dynamics/policies/random_offset_camera_target_policy.py
|
alexlee-gk/visual_dynamics
|
90227bb0d0aebb1989117b5c25ca311655ca7cc7
|
[
"MIT"
] | 30
|
2017-04-05T12:55:09.000Z
|
2022-03-14T14:31:31.000Z
|
visual_dynamics/policies/random_offset_camera_target_policy.py
|
alexlee-gk/visual_dynamics
|
90227bb0d0aebb1989117b5c25ca311655ca7cc7
|
[
"MIT"
] | 1
|
2017-06-19T02:39:03.000Z
|
2017-06-19T02:39:03.000Z
|
visual_dynamics/policies/random_offset_camera_target_policy.py
|
alexlee-gk/visual_dynamics
|
90227bb0d0aebb1989117b5c25ca311655ca7cc7
|
[
"MIT"
] | 13
|
2017-04-05T12:55:09.000Z
|
2021-03-16T01:59:12.000Z
|
import numpy as np
from visual_dynamics.policies import CameraTargetPolicy
class RandomOffsetCameraTargetPolicy(CameraTargetPolicy):
def __init__(self, env, target_env, camera_node_name, agent_node_name, target_node_name,
height=12.0, radius=16.0, angle=(-np.pi/4, np.pi/4), tightness=0.1, hra_interpolation=True):
self.height = height
self.radius = radius
self.angle = angle
offset = self.sample_offset()
super(RandomOffsetCameraTargetPolicy, self).__init__(env, target_env, camera_node_name, agent_node_name,
target_node_name, offset, tightness=tightness,
hra_interpolation=hra_interpolation)
def reset(self):
self.offset = self.sample_offset()
state = super(RandomOffsetCameraTargetPolicy, self).reset()
# self.offset = self.sample_offset()
return state
def sample_offset(self):
height = np.random.uniform(*self.height) if isinstance(self.height, (list, tuple)) else self.height
radius = np.random.uniform(*self.radius) if isinstance(self.radius, (list, tuple)) else self.radius
angle = np.random.uniform(*self.angle) if isinstance(self.angle, (list, tuple)) else self.angle
return np.array([radius * np.sin(angle), -radius * np.cos(angle), height])
def _get_config(self):
config = super(RandomOffsetCameraTargetPolicy, self)._get_config()
config.pop('offset')
config.update({'height': self.height,
'radius': self.radius,
'angle': self.angle})
return config
| 47.333333
| 112
| 0.629695
| 190
| 1,704
| 5.468421
| 0.278947
| 0.046198
| 0.046198
| 0.063523
| 0.152069
| 0.102021
| 0.102021
| 0.102021
| 0.102021
| 0.102021
| 0
| 0.008
| 0.266432
| 1,704
| 35
| 113
| 48.685714
| 0.8232
| 0.019953
| 0
| 0
| 0
| 0
| 0.013789
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.142857
| false
| 0
| 0.071429
| 0
| 0.357143
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c7075ad8e2a1229e14b617586ca8b05a9f86dd2f
| 1,920
|
py
|
Python
|
mir/tools/mir_repo_utils.py
|
fenrir-z/ymir-cmd
|
6fbffd3c1ff5dd1c9a44b55de411523b50567661
|
[
"Apache-2.0"
] | 1
|
2022-01-12T03:12:47.000Z
|
2022-01-12T03:12:47.000Z
|
mir/tools/mir_repo_utils.py
|
fenrir-z/ymir-cmd
|
6fbffd3c1ff5dd1c9a44b55de411523b50567661
|
[
"Apache-2.0"
] | null | null | null |
mir/tools/mir_repo_utils.py
|
fenrir-z/ymir-cmd
|
6fbffd3c1ff5dd1c9a44b55de411523b50567661
|
[
"Apache-2.0"
] | null | null | null |
import json
import logging
import os
from typing import Optional
from mir import scm
from mir.tools import mir_storage
def mir_check_repo_dvc_dirty(mir_root: str = ".") -> bool:
names = [name for name in mir_storage.get_all_mir_paths() if os.path.isfile(os.path.join(mir_root, name))]
if names:
dvc_cmd_args = ["--show-json", "--targets"]
dvc_cmd_args.extend(names)
dvc_scm = scm.Scm(mir_root, scm_executable="dvc")
dvc_result = dvc_scm.diff(dvc_cmd_args)
json_object = json.loads(dvc_result)
keys = ['added', 'deleted', 'modified', 'renamed', 'not in cache']
dvc_dirty = False
for key in keys:
dirty_value = json_object.get(key, None)
if dirty_value:
logging.info(f"{key}: {dirty_value}")
dvc_dirty = True
return dvc_dirty
else:
# if no mir files in this mir repo, it's clean
return False
def mir_check_repo_git_dirty(mir_root: str = ".") -> bool:
git_scm = scm.Scm(mir_root, scm_executable="git")
git_result = git_scm.status("-s") # if clean, returns nothing
if (git_result or len(git_result) > 0):
logging.info(f"{git_result}")
return True
return False # clean
def mir_check_repo_dirty(mir_root: str = '.') -> bool:
return mir_check_repo_dvc_dirty(mir_root) or mir_check_repo_git_dirty(mir_root)
def mir_check_branch_exists(mir_root: str, branch: str) -> bool:
try:
git_scm = scm.Scm(mir_root, scm_executable="git")
git_scm.rev_parse(branch)
return True
except Exception:
# git rev-parse will return non-zero code when can not find branch
# and cmd.py packs non-zero return code as an error
return False
def work_dir_to_monitor_file(work_dir: Optional[str]) -> Optional[str]:
return os.path.join(work_dir, 'out', 'monitor.txt') if work_dir else None
| 32.542373
| 110
| 0.655729
| 292
| 1,920
| 4.058219
| 0.328767
| 0.059072
| 0.050633
| 0.037975
| 0.207595
| 0.179747
| 0.179747
| 0.064135
| 0.064135
| 0.064135
| 0
| 0.000684
| 0.239063
| 1,920
| 58
| 111
| 33.103448
| 0.810404
| 0.099479
| 0
| 0.166667
| 0
| 0
| 0.069066
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.119048
| false
| 0
| 0.142857
| 0.047619
| 0.452381
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c70864a5d3c270e78a0bc9da8738245a6e27664f
| 3,624
|
py
|
Python
|
utils/edit_utils.py
|
ermekaitygulov/STIT
|
93dca8d589b555fa99a5c5438a8517a52d8898c3
|
[
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | 6
|
2022-03-11T23:42:12.000Z
|
2022-03-28T09:39:25.000Z
|
utils/edit_utils.py
|
bycloudai/STIT-Windows
|
cadb2a01457bfd1c90bcd8d220587b48e1c2327a
|
[
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | null | null | null |
utils/edit_utils.py
|
bycloudai/STIT-Windows
|
cadb2a01457bfd1c90bcd8d220587b48e1c2327a
|
[
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | null | null | null |
import argparse
import math
import os
import pickle
from typing import List
import cv2
import numpy as np
import torch
from PIL import Image, ImageDraw, ImageFont
import configs.paths_config
from configs import paths_config
from training.networks import SynthesisBlock
def add_texts_to_image_vertical(texts, pivot_images):
images_height = pivot_images.height
images_width = pivot_images.width
text_height = 256 + 16 - images_height % 16
num_images = len(texts)
image_width = images_width // num_images
text_image = Image.new('RGB', (images_width, text_height), (255, 255, 255))
draw = ImageDraw.Draw(text_image)
font_size = int(math.ceil(24 * image_width / 256))
try:
font = ImageFont.truetype("truetype/freefont/FreeSans.ttf", font_size)
except OSError:
font = ImageFont.load_default()
for i, text in enumerate(texts):
draw.text((image_width // 2 + i * image_width, text_height // 2), text, fill='black', anchor='ms', font=font)
out_image = Image.new('RGB', (pivot_images.width, pivot_images.height + text_image.height))
out_image.paste(text_image, (0, 0))
out_image.paste(pivot_images, (0, text_image.height))
return out_image
def get_affine_layers(synthesis):
blocks: List[SynthesisBlock] = [getattr(synthesis, f'b{res}') for res in synthesis.block_resolutions]
affine_layers = []
for block in blocks:
if hasattr(block, 'conv0'):
affine_layers.append((block.conv0.affine, True))
affine_layers.append((block.conv1.affine, True))
affine_layers.append((block.torgb.affine, False))
return affine_layers
def load_stylespace_std():
with open(paths_config.stylespace_mean_std, 'rb') as f:
_, s_std = pickle.load(f)
s_std = [torch.from_numpy(s).cuda() for s in s_std]
return s_std
def to_styles(edit: torch.Tensor, affine_layers):
idx = 0
styles = []
for layer, is_conv in affine_layers:
layer_dim = layer.weight.shape[0]
if is_conv:
styles.append(edit[idx:idx + layer_dim].clone())
idx += layer_dim
else:
styles.append(torch.zeros(layer_dim, device=edit.device, dtype=edit.dtype))
return styles
def w_to_styles(w, affine_layers):
w_idx = 0
styles = []
for affine, is_conv in affine_layers:
styles.append(affine(w[:, w_idx]))
if is_conv:
w_idx += 1
return styles
def paste_image_mask(inverse_transform, image, dst_image, mask, radius=0, sigma=0.0):
image_masked = image.copy().convert('RGBA')
pasted_image = dst_image.copy().convert('RGBA')
if radius != 0:
mask_np = np.array(mask)
kernel_size = (radius * 2 + 1, radius * 2 + 1)
kernel = np.ones(kernel_size)
eroded = cv2.erode(mask_np, kernel, borderType=cv2.BORDER_CONSTANT, borderValue=0)
blurred_mask = cv2.GaussianBlur(eroded, kernel_size, sigmaX=sigma)
blurred_mask = Image.fromarray(blurred_mask)
image_masked.putalpha(blurred_mask)
else:
image_masked.putalpha(mask)
projected = image_masked.transform(dst_image.size, Image.PERSPECTIVE, inverse_transform,
Image.BILINEAR)
pasted_image.alpha_composite(projected)
return pasted_image
def paste_image(inverse_transform, img, orig_image):
pasted_image = orig_image.copy().convert('RGBA')
projected = img.convert('RGBA').transform(orig_image.size, Image.PERSPECTIVE, inverse_transform, Image.BILINEAR)
pasted_image.paste(projected, (0, 0), mask=projected)
return pasted_image
| 32.648649
| 117
| 0.683499
| 494
| 3,624
| 4.793522
| 0.291498
| 0.050676
| 0.019003
| 0.029139
| 0.099662
| 0.08277
| 0.054899
| 0.054899
| 0.054899
| 0.054899
| 0
| 0.016754
| 0.209437
| 3,624
| 110
| 118
| 32.945455
| 0.809773
| 0
| 0
| 0.116279
| 0
| 0
| 0.019868
| 0.008278
| 0
| 0
| 0
| 0
| 0
| 1
| 0.081395
| false
| 0
| 0.139535
| 0
| 0.302326
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c7087550ae8556b1933bc7961a3ed0e9783aaa07
| 6,845
|
py
|
Python
|
conll_df/conll_df.py
|
interrogator/conll-df
|
35611f295e3f8230f574142151e3a19098edfdca
|
[
"MIT"
] | 27
|
2017-03-17T15:39:16.000Z
|
2021-11-23T09:10:10.000Z
|
conll_df/conll_df.py
|
interrogator/conll-df
|
35611f295e3f8230f574142151e3a19098edfdca
|
[
"MIT"
] | 2
|
2017-11-21T05:33:04.000Z
|
2018-09-22T13:05:06.000Z
|
conll_df/conll_df.py
|
interrogator/conll-df
|
35611f295e3f8230f574142151e3a19098edfdca
|
[
"MIT"
] | 8
|
2017-03-17T14:59:34.000Z
|
2022-02-25T19:09:27.000Z
|
import pandas as pd
# UD 1.0
CONLL_COLUMNS = ['i', 'w', 'l', 'p', 'n', 'm', 'g', 'f', 'd', 'c']
# UD 2.0
CONLL_COLUMNS_V2 = ['i', 'w', 'l', 'x', 'p', 'm', 'g', 'f', 'e', 'o']
# possible morphological attributes
MORPH_ATTS = ['type',
'animacy',
#'gender',
'number'
"Abbr",
"Animacy",
"Aspect",
"Case",
"Definite",
"Degree",
"Evident",
"Foreign",
"Gender",
"Mood",
"NumType",
"Number",
"Person",
"Polarity",
"Polite",
"Poss",
"PronType",
"Reflex",
"Tense",
"VerbForm",
"Voice",
"Type"]
def _make_sent_csv(sentstring, fname, meta, splitter, i, skip_meta=False):
"""
Take one CONLL-U sentence and add all metadata to each row
Return: str (CSV data) and dict (sent level metadata)
"""
fixed_lines = []
raw_lines = sentstring.splitlines()
for line in raw_lines:
if not line:
continue
if line.startswith('#'):
if not skip_meta:
try:
k, v = line.lstrip('# ').split(splitter, 1)
except ValueError:
k, v = line.lstrip('# ').split(splitter.strip(), 1)
meta[k.lower().strip()] = v.strip()
else:
line = '%s\t%s\t%s' % (fname, i, line)
fixed_lines.append(line)
return '\n'.join(fixed_lines), meta
def _add_governors_to_df(df):
"""
Add governor info to a DF. Increases memory usage quite a bit.
"""
# save the original index
i = df.index.get_level_values('i')
# add g
dfg = df.set_index('g', append=True)
# remove i
dfg = dfg.reset_index('i')
dfg = df.loc[dfg.index]
dfg = dfg[['w', 'l', 'p', 'f']]
dfg['i'] = i
dfg = dfg.set_index('i', append=True)
dfg.index.names = ['file', 's', 'g', 'i']
dfg = dfg.reset_index('g', drop=True)
for c in list(dfg.columns):
try:
dfg[c] = dfg[c].cat.add_categories(['ROOT'])
except (AttributeError, ValueError):
pass
dfg = dfg.fillna('ROOT')
dfg.columns = ['gw', 'gl', 'gp', 'gf']
dfg = df.join(dfg, how="inner")
return dfg
def conll_df(path,
corpus_name=False,
corp_folder=False,
v2="auto",
skip_morph=False,
skip_meta=False,
add_gov=False,
drop=['text', 'newdoc id'],
file_index=True,
categories=True,
extra_fields='auto',
drop_redundant=True,
**kwargs):
"""
Optimised CONLL-U reader for v2.0 data
Args:
path (str): the file to prepare
Returns:
pd.DataFrame: 2d array representation of file data
"""
import os
import re
try:
from io import StringIO
except ImportError:
from StringIO import StringIO
splitter = ' = ' if v2 else '='
with open(path, 'r') as fo:
data = fo.read().strip('\n')
if v2 == 'auto':
v2 = 'sent_id = ' in data[:9999]
fname = os.path.basename(path)
# metadata that applies filewide
# a little bonus for those with annual data
basedict = {}
if not skip_meta:
year = re.search(r'[12][0-9][0-9][0-9]', fname)
if year:
basedict['year'] = year.group(0)
sents = data.split('\n\n')
sents_meta = [_make_sent_csv(sstring, fname, dict(basedict), splitter, i, skip_meta=skip_meta) \
for i, sstring in enumerate(sents, start=1)]
sents, metadata = zip(*sents_meta)
# make the sent df
sents = '\n\n'.join(sents)
sents = StringIO(sents)
if v2:
cols = ['file', 's'] + CONLL_COLUMNS_V2
else:
cols = ['file', 's'] + CONLL_COLUMNS
df = pd.read_csv(sents, sep="\t", header=None, names=cols, quoting=kwargs.pop('quoting', 3),
index_col=[0, 1, 2], engine='c', na_filter=False, **kwargs)
if v2 and not skip_morph:
df['m'] = df['m'].fillna('')
df['o'] = df['o'].fillna('')
if extra_fields == 'auto':
# evil line to get all possible keys in the final column
extra_fields = list(df['o'].str.extractall(r'(?:^|\|)([^=]+?)=')[0].unique())
cats = MORPH_ATTS + extra_fields
if 'SpaceAfter' not in cats:
cats.append('SpaceAfter')
cats = list(set(cats))
om = df['o'].str.cat(df['m'], sep='|').str.strip('|_')
# this is a very slow list comp, but i can't think of a better way to do it.
# the 'extractall' solution makes columns for not just the value, but the key...
extra = [om.str.extract('%s=([^|$]+)' % cat.title(), expand=True) for cat in cats]
extra = pd.concat(extra, axis=1)
extra.columns = cats
df = pd.concat([df, extra], axis=1)
# make and join the meta df
if not skip_meta:
metadata = {i: d for i, d in enumerate(metadata, start=1)}
metadata = pd.DataFrame(metadata).T
metadata.index.name = 's'
df = metadata.join(df, how='inner')
# we never want these to show up as a dataframe column
badcols = ['sent_id', 's', 'i', 'file']
# if we aren't parsing morph and extra columns, we should at least keep them
if not skip_morph:
badcols += ['o', 'm']
if drop:
badcols = badcols + drop
df = df.drop(badcols, axis=1, errors='ignore')
# some evil code to handle conll-u files where g col could be a string
if 'g' in df.columns:
df['g'] = df['g'].fillna(0)
if df['g'].dtype in [object, str]:
df['g'] = df['g'].str.replace('_', '0').astype(int)
df['g'] = df['g'].astype(int)
df = df.fillna('_')
# attempt to categorise data
if categories:
for c in list(df.columns):
if c in ['g', 'date']:
continue
try:
df[c] = df[c].astype('category')
except:
pass
if add_gov:
df = _add_governors_to_df(df)
if not file_index:
df.index = df.index.droplevel('file')
if drop_redundant:
empty_cols = []
for c in df.columns:
if len(df[c].unique()) == 1:
empty_cols.append(c)
df = df.drop(empty_cols, axis=1)
#reorder columns so that important things are first
firsts = CONLL_COLUMNS_V2 if v2 else CONLL_COLUMNS
firsts = [i for i in firsts if i in list(df.columns)]
lasts = [i for i in list(df.columns) if i not in firsts]
df = df[firsts + lasts]
return df
| 30.154185
| 100
| 0.512491
| 898
| 6,845
| 3.825167
| 0.312918
| 0.016303
| 0.01048
| 0.011354
| 0.062009
| 0.014556
| 0
| 0
| 0
| 0
| 0
| 0.010191
| 0.340541
| 6,845
| 226
| 101
| 30.287611
| 0.750775
| 0.148868
| 0
| 0.08125
| 0
| 0
| 0.075597
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.01875
| false
| 0.0125
| 0.0375
| 0
| 0.075
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c708da26fb5e59e5b2a82edc62ad3d6177cc9df2
| 2,491
|
py
|
Python
|
scripts/postgres_to_lmdb_bars_60m.py
|
alexanu/atpy
|
3f4b5cfe7de7633ef053d2feaddae421806a9799
|
[
"MIT"
] | 24
|
2018-03-22T06:22:11.000Z
|
2022-03-14T09:04:44.000Z
|
scripts/postgres_to_lmdb_bars_60m.py
|
alexanu/atpy
|
3f4b5cfe7de7633ef053d2feaddae421806a9799
|
[
"MIT"
] | null | null | null |
scripts/postgres_to_lmdb_bars_60m.py
|
alexanu/atpy
|
3f4b5cfe7de7633ef053d2feaddae421806a9799
|
[
"MIT"
] | 9
|
2018-03-22T06:22:11.000Z
|
2020-09-19T16:47:13.000Z
|
#!/bin/python3
import argparse
import datetime
import functools
import logging
import os
import psycopg2
from dateutil.relativedelta import relativedelta
from atpy.data.cache.lmdb_cache import *
from atpy.data.cache.postgres_cache import BarsInPeriodProvider
from atpy.data.cache.postgres_cache import request_adjustments
from atpy.data.splits_dividends import adjust_df
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
parser = argparse.ArgumentParser(description="PostgreSQL to LMDB configuration")
parser.add_argument('-lmdb_path', type=str, default=None, help="LMDB Path")
parser.add_argument('-delta_back', type=int, default=8, help="Default number of years to look back")
parser.add_argument('-adjust_splits', action='store_true', default=True, help="Adjust splits before saving")
parser.add_argument('-adjust_dividends', action='store_true', default=False, help="Adjust dividends before saving")
args = parser.parse_args()
lmdb_path = args.lmdb_path if args.lmdb_path is not None else os.environ['ATPY_LMDB_PATH']
con = psycopg2.connect(os.environ['POSTGRESQL_CACHE'])
adjustments = None
if args.adjust_splits and args.adjust_dividends:
adjustments = request_adjustments(conn=con, table_name='splits_dividends')
elif args.adjust_splits:
adjustments = request_adjustments(conn=con, table_name='splits_dividends', adj_type='split')
elif args.adjust_dividends:
adjustments = request_adjustments(conn=con, table_name='splits_dividends', adj_type='dividend')
now = datetime.datetime.now()
bgn_prd = datetime.datetime(now.year - args.delta_back, 1, 1)
bgn_prd = bgn_prd + relativedelta(days=7 - bgn_prd.weekday())
cache_read = functools.partial(read_pickle, lmdb_path=lmdb_path)
bars_in_period = BarsInPeriodProvider(conn=con, interval_len=3600, interval_type='s', bars_table='bars_60m', bgn_prd=bgn_prd, delta=relativedelta(days=7),
overlap=relativedelta(microseconds=-1), cache=cache_read)
for i, df in enumerate(bars_in_period):
if cache_read(bars_in_period.current_cache_key()) is None:
if adjustments is not None:
adjust_df(df, adjustments)
write(bars_in_period.current_cache_key(), df, lmdb_path)
logging.info('Saving ' + bars_in_period.current_cache_key())
else:
logging.info('Cache hit on ' + bars_in_period.current_cache_key())
| 43.701754
| 158
| 0.733842
| 331
| 2,491
| 5.265861
| 0.308157
| 0.041308
| 0.041308
| 0.043603
| 0.236374
| 0.236374
| 0.174412
| 0.133104
| 0.133104
| 0.133104
| 0
| 0.007208
| 0.164593
| 2,491
| 56
| 159
| 44.482143
| 0.83037
| 0.005219
| 0
| 0
| 0
| 0
| 0.134841
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.261905
| 0
| 0.261905
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c7091f356e0452faea68f2b17a6227d31b0f1d34
| 746
|
py
|
Python
|
src/download_pdf.py
|
luccanunes/class-url-automation
|
6ccb77feaa9aede4c8475d9f79149cc8c2c31cc4
|
[
"MIT"
] | 1
|
2020-10-17T02:08:10.000Z
|
2020-10-17T02:08:10.000Z
|
src/download_pdf.py
|
luccanunes/class-url-automation
|
6ccb77feaa9aede4c8475d9f79149cc8c2c31cc4
|
[
"MIT"
] | null | null | null |
src/download_pdf.py
|
luccanunes/class-url-automation
|
6ccb77feaa9aede4c8475d9f79149cc8c2c31cc4
|
[
"MIT"
] | 1
|
2020-12-20T23:53:30.000Z
|
2020-12-20T23:53:30.000Z
|
def download_pdf(URL):
from selenium import webdriver
from time import sleep
URL = URL
options = webdriver.ChromeOptions()
options.add_experimental_option('prefs', {
# Change default directory for downloads
"download.default_directory": r"E:\coding\other\class-url-automation\src\pdfs",
"download.prompt_for_download": False, # To auto download the file
"download.directory_upgrade": True,
# It will not show PDF directly in chrome
"plugins.always_open_pdf_externally": True
})
options.add_argument("--headless")
driver = webdriver.Chrome(
executable_path=r'E:\coding\python\chromedriver.exe', chrome_options=options
)
driver.get(URL)
sleep(5)
| 35.52381
| 87
| 0.687668
| 90
| 746
| 5.555556
| 0.644444
| 0.04
| 0.032
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.001704
| 0.213137
| 746
| 21
| 88
| 35.52381
| 0.850085
| 0.13941
| 0
| 0
| 0
| 0
| 0.323944
| 0.300469
| 0
| 0
| 0
| 0
| 0
| 1
| 0.058824
| false
| 0
| 0.117647
| 0
| 0.176471
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c70a49b112aadc6ae32c90aac8b9581dc39ca540
| 1,491
|
py
|
Python
|
examples/custom_shape/stages.py
|
oksumoron/locust
|
fddfefe7ef1082bc5284cd2dd8477221484dfb0c
|
[
"MIT"
] | 18,336
|
2015-01-03T20:38:40.000Z
|
2022-03-31T16:02:35.000Z
|
examples/custom_shape/stages.py
|
oksumoron/locust
|
fddfefe7ef1082bc5284cd2dd8477221484dfb0c
|
[
"MIT"
] | 1,779
|
2015-01-01T02:09:30.000Z
|
2022-03-31T09:58:10.000Z
|
examples/custom_shape/stages.py
|
oksumoron/locust
|
fddfefe7ef1082bc5284cd2dd8477221484dfb0c
|
[
"MIT"
] | 2,689
|
2015-01-05T02:01:50.000Z
|
2022-03-31T13:13:09.000Z
|
from locust import HttpUser, TaskSet, task, constant
from locust import LoadTestShape
class UserTasks(TaskSet):
@task
def get_root(self):
self.client.get("/")
class WebsiteUser(HttpUser):
wait_time = constant(0.5)
tasks = [UserTasks]
class StagesShape(LoadTestShape):
"""
A simply load test shape class that has different user and spawn_rate at
different stages.
Keyword arguments:
stages -- A list of dicts, each representing a stage with the following keys:
duration -- When this many seconds pass the test is advanced to the next stage
users -- Total user count
spawn_rate -- Number of users to start/stop per second
stop -- A boolean that can stop that test at a specific stage
stop_at_end -- Can be set to stop once all stages have run.
"""
stages = [
{"duration": 60, "users": 10, "spawn_rate": 10},
{"duration": 100, "users": 50, "spawn_rate": 10},
{"duration": 180, "users": 100, "spawn_rate": 10},
{"duration": 220, "users": 30, "spawn_rate": 10},
{"duration": 230, "users": 10, "spawn_rate": 10},
{"duration": 240, "users": 1, "spawn_rate": 1},
]
def tick(self):
run_time = self.get_run_time()
for stage in self.stages:
if run_time < stage["duration"]:
tick_data = (stage["users"], stage["spawn_rate"])
return tick_data
return None
| 29.82
| 90
| 0.602951
| 193
| 1,491
| 4.559585
| 0.481865
| 0.092045
| 0.0625
| 0.107955
| 0.059091
| 0.059091
| 0
| 0
| 0
| 0
| 0
| 0.039363
| 0.284373
| 1,491
| 49
| 91
| 30.428571
| 0.78538
| 0.343394
| 0
| 0
| 0
| 0
| 0.174006
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.08
| false
| 0
| 0.08
| 0
| 0.48
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c70b23f1cce14640f16607fb8ec77754089292bc
| 2,115
|
py
|
Python
|
db/seed_ids.py
|
xtuyaowu/jtyd_python_spider
|
ca5c3efd5519f592c0d587c22f03812e7756c8ea
|
[
"MIT"
] | 7
|
2017-08-19T22:36:29.000Z
|
2018-06-03T07:02:04.000Z
|
db/seed_ids.py
|
xtuyaowu/jtyd_python_spider
|
ca5c3efd5519f592c0d587c22f03812e7756c8ea
|
[
"MIT"
] | 2
|
2021-04-30T20:37:14.000Z
|
2021-12-13T19:46:29.000Z
|
db/seed_ids.py
|
xtuyaowu/jtyd_python_spider
|
ca5c3efd5519f592c0d587c22f03812e7756c8ea
|
[
"MIT"
] | 4
|
2017-09-06T03:00:11.000Z
|
2017-12-10T08:04:21.000Z
|
# coding:utf-8
from sqlalchemy import text
from db.basic_db import db_session
from db.models import SeedIds
from decorators.decorator import db_commit_decorator
def get_seed():
"""
Get all user id to be crawled
:return: user ids
"""
return db_session.query(SeedIds).filter(text('status=0')).all()
def get_seed_ids():
"""
Get all user id to be crawled
:return: user ids
"""
return db_session.query(SeedIds.uid).filter(text('is_crawled=0')).all()
def get_home_ids():
"""
Get all user id who's home pages need to be crawled
:return: user ids
"""
return db_session.query(SeedIds.uid).filter(text('home_crawled=0')).all()
@db_commit_decorator
def set_seed_crawled(uid, result):
"""
:param uid: user id that is crawled
:param result: crawling result
:return: None
"""
seed = db_session.query(SeedIds).filter(SeedIds.uid == uid).first()
if seed:
if seed.is_crawled == 0:
seed.is_crawled = result
else:
seed = SeedIds(uid=uid, is_crawled=result)
db_session.add(seed)
db_session.commit()
def get_seed_by_id(uid):
return db_session.query(SeedIds).filter(SeedIds.uid == uid).first()
@db_commit_decorator
def insert_seeds(ids):
db_session.execute(SeedIds.__table__.insert().prefix_with('IGNORE'), [{'uid': i} for i in ids])
db_session.commit()
@db_commit_decorator
def set_seed_other_crawled(uid):
"""
update it if user id already exists, else insert
:param uid: user id
:return: None
"""
seed = get_seed_by_id(uid)
if seed is None:
seed = SeedIds(uid=uid, is_crawled=1, other_crawled=1, home_crawled=1)
db_session.add(seed)
else:
seed.other_crawled = 1
db_session.commit()
@db_commit_decorator
def set_seed_home_crawled(uid):
"""
:param uid: user id
:return: None
"""
seed = get_seed_by_id(uid)
if seed is None:
seed = SeedIds(uid=uid, is_crawled=0, other_crawled=0, home_crawled=1)
db_session.add(seed)
else:
seed.home_crawled = 1
db_session.commit()
| 24.593023
| 99
| 0.659102
| 314
| 2,115
| 4.229299
| 0.203822
| 0.09488
| 0.064006
| 0.075301
| 0.557982
| 0.50753
| 0.458584
| 0.458584
| 0.458584
| 0.273343
| 0
| 0.007903
| 0.222222
| 2,115
| 85
| 100
| 24.882353
| 0.799392
| 0.178251
| 0
| 0.409091
| 0
| 0
| 0.026625
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.181818
| false
| 0
| 0.090909
| 0.022727
| 0.363636
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c70b35ed30f0bbb93f6ab0a59185f9e44b410fce
| 16,745
|
py
|
Python
|
tobler/area_weighted/area_interpolate.py
|
sjsrey/tobler
|
8e3ebd5d01de459e4387fabd57cbb12cb6735596
|
[
"BSD-3-Clause"
] | 1
|
2019-06-21T19:32:22.000Z
|
2019-06-21T19:32:22.000Z
|
tobler/area_weighted/area_interpolate.py
|
sjsrey/tobler
|
8e3ebd5d01de459e4387fabd57cbb12cb6735596
|
[
"BSD-3-Clause"
] | null | null | null |
tobler/area_weighted/area_interpolate.py
|
sjsrey/tobler
|
8e3ebd5d01de459e4387fabd57cbb12cb6735596
|
[
"BSD-3-Clause"
] | null | null | null |
"""
Area Weighted Interpolation
"""
import numpy as np
import geopandas as gpd
from ._vectorized_raster_interpolation import _fast_append_profile_in_gdf
import warnings
from scipy.sparse import dok_matrix, diags, coo_matrix
import pandas as pd
from tobler.util.util import _check_crs, _nan_check, _inf_check, _check_presence_of_crs
def _area_tables_binning(source_df, target_df, spatial_index):
"""Construct area allocation and source-target correspondence tables using a spatial indexing approach
...
NOTE: this currently relies on Geopandas' spatial index machinery
Parameters
----------
source_df : geopandas.GeoDataFrame
GeoDataFrame containing input data and polygons
target_df : geopandas.GeoDataFramee
GeoDataFrame defining the output geometries
spatial_index : str
Spatial index to use to build the allocation of area from source to
target tables. It currently support the following values:
- "source": build the spatial index on `source_df`
- "target": build the spatial index on `target_df`
- "auto": attempts to guess the most efficient alternative.
Currently, this option uses the largest table to build the
index, and performs a `bulk_query` on the shorter table.
Returns
-------
tables : scipy.sparse.dok_matrix
"""
if _check_crs(source_df, target_df):
pass
else:
return None
df1 = source_df.copy()
df2 = target_df.copy()
# it is generally more performant to use the longer df as spatial index
if spatial_index == "auto":
if df1.shape[0] > df2.shape[0]:
spatial_index = "source"
else:
spatial_index = "target"
if spatial_index == "source":
ids_tgt, ids_src = df1.sindex.query_bulk(df2.geometry, predicate="intersects")
elif spatial_index == "target":
ids_src, ids_tgt = df2.sindex.query_bulk(df1.geometry, predicate="intersects")
else:
raise ValueError(
f"'{spatial_index}' is not a valid option. Use 'auto', 'source' or 'target'."
)
areas = df1.geometry.values[ids_src].intersection(df2.geometry.values[ids_tgt]).area
table = coo_matrix(
(areas, (ids_src, ids_tgt),),
shape=(df1.shape[0], df2.shape[0]),
dtype=np.float32,
)
table = table.todok()
return table
def _area_tables(source_df, target_df):
"""
Construct area allocation and source-target correspondence tables.
Parameters
----------
source_df : geopandas.GeoDataFrame
target_df : geopandas.GeoDataFrame
Returns
-------
tables : tuple (optional)
two 2-D numpy arrays
SU: area of intersection of source geometry i with union geometry j
UT: binary mapping of union geometry j to target geometry t
Notes
-----
The assumption is both dataframes have the same coordinate reference system.
Union geometry is a geometry formed by the intersection of a source geometry and a target geometry
SU Maps source geometry to union geometry, UT maps union geometry to target geometry
"""
if _check_crs(source_df, target_df):
pass
else:
return None
source_df = source_df.copy()
source_df = source_df.copy()
n_s = source_df.shape[0]
n_t = target_df.shape[0]
_left = np.arange(n_s)
_right = np.arange(n_t)
source_df.loc[:, "_left"] = _left # create temporary index for union
target_df.loc[:, "_right"] = _right # create temporary index for union
res_union = gpd.overlay(source_df, target_df, how="union")
n_u, _ = res_union.shape
SU = np.zeros(
(n_s, n_u)
) # holds area of intersection of source geom with union geom
UT = np.zeros((n_u, n_t)) # binary table mapping union geom to target geom
for index, row in res_union.iterrows():
# only union polygons that intersect both a source and a target geometry matter
if not np.isnan(row["_left"]) and not np.isnan(row["_right"]):
s_id = int(row["_left"])
t_id = int(row["_right"])
SU[s_id, index] = row[row.geometry.name].area
UT[index, t_id] = 1
source_df.drop(["_left"], axis=1, inplace=True)
target_df.drop(["_right"], axis=1, inplace=True)
return SU, UT
def _area_interpolate_binning(
source_df,
target_df,
extensive_variables=None,
intensive_variables=None,
table=None,
allocate_total=True,
spatial_index="auto",
):
"""
Area interpolation for extensive and intensive variables.
Parameters
----------
source_df : geopandas.GeoDataFrame
target_df : geopandas.GeoDataFrame
extensive_variables : list
[Optional. Default=None] Columns in dataframes for extensive variables
intensive_variables : list
[Optional. Default=None] Columns in dataframes for intensive variables
table : scipy.sparse.dok_matrix
[Optional. Default=None] Area allocation source-target correspondence
table. If not provided, it will be built from `source_df` and
`target_df` using `tobler.area_interpolate._area_tables_binning`
allocate_total : boolean
[Optional. Default=True] True if total value of source area should be
allocated. False if denominator is area of i. Note that the two cases
would be identical when the area of the source polygon is exhausted by
intersections. See Notes for more details.
spatial_index : str
[Optional. Default="auto"] Spatial index to use to build the
allocation of area from source to target tables. It currently support
the following values:
- "source": build the spatial index on `source_df`
- "target": build the spatial index on `target_df`
- "auto": attempts to guess the most efficient alternative.
Currently, this option uses the largest table to build the
index, and performs a `bulk_query` on the shorter table.
Returns
-------
estimates : geopandas.GeoDataFrame
new geodaraframe with interpolated variables as columns and target_df geometry
as output geometry
Notes
-----
The assumption is both dataframes have the same coordinate reference system.
For an extensive variable, the estimate at target polygon j (default case) is:
.. math::
v_j = \\sum_i v_i w_{i,j}
w_{i,j} = a_{i,j} / \\sum_k a_{i,k}
If the area of the source polygon is not exhausted by intersections with
target polygons and there is reason to not allocate the complete value of
an extensive attribute, then setting allocate_total=False will use the
following weights:
.. math::
v_j = \\sum_i v_i w_{i,j}
w_{i,j} = a_{i,j} / a_i
where a_i is the total area of source polygon i.
For an intensive variable, the estimate at target polygon j is:
.. math::
v_j = \\sum_i v_i w_{i,j}
w_{i,j} = a_{i,j} / \\sum_k a_{k,j}
"""
source_df = source_df.copy()
target_df = target_df.copy()
if _check_crs(source_df, target_df):
pass
else:
return None
if table is None:
table = _area_tables_binning(source_df, target_df, spatial_index)
den = source_df[source_df.geometry.name].area.values
if allocate_total:
den = np.asarray(table.sum(axis=1))
den = den + (den == 0)
den = 1.0 / den
n = den.shape[0]
den = den.reshape((n,))
den = diags([den], [0])
weights = den.dot(table) # row standardize table
dfs = []
extensive = []
if extensive_variables:
for variable in extensive_variables:
vals = _nan_check(source_df, variable)
vals = _inf_check(source_df, variable)
estimates = diags([vals], [0]).dot(weights)
estimates = estimates.sum(axis=0)
extensive.append(estimates.tolist()[0])
extensive = np.asarray(extensive)
extensive = np.array(extensive)
extensive = pd.DataFrame(extensive.T, columns=extensive_variables)
area = np.asarray(table.sum(axis=0))
den = 1.0 / (area + (area == 0))
n, k = den.shape
den = den.reshape((k,))
den = diags([den], [0])
weights = table.dot(den)
intensive = []
if intensive_variables:
for variable in intensive_variables:
vals = _nan_check(source_df, variable)
vals = _inf_check(source_df, variable)
n = vals.shape[0]
vals = vals.reshape((n,))
estimates = diags([vals], [0])
estimates = estimates.dot(weights).sum(axis=0)
intensive.append(estimates.tolist()[0])
intensive = np.asarray(intensive)
intensive = pd.DataFrame(intensive.T, columns=intensive_variables)
if extensive_variables:
dfs.append(extensive)
if intensive_variables:
dfs.append(intensive)
df = pd.concat(dfs, axis=1)
df["geometry"] = target_df[target_df.geometry.name].reset_index(drop=True)
df = gpd.GeoDataFrame(df.replace(np.inf, np.nan))
return df
def _area_interpolate(
source_df,
target_df,
extensive_variables=None,
intensive_variables=None,
tables=None,
allocate_total=True,
):
"""
Area interpolation for extensive and intensive variables.
Parameters
----------
source_df : geopandas.GeoDataFrame (required)
geodataframe with polygon geometries
target_df : geopandas.GeoDataFrame (required)
geodataframe with polygon geometries
extensive_variables : list, (optional)
columns in dataframes for extensive variables
intensive_variables : list, (optional)
columns in dataframes for intensive variables
tables : tuple (optional)
two 2-D numpy arrays
SU: area of intersection of source geometry i with union geometry j
UT: binary mapping of union geometry j to target geometry t
allocate_total : boolean
True if total value of source area should be allocated.
False if denominator is area of i. Note that the two cases
would be identical when the area of the source polygon is
exhausted by intersections. See Notes for more details.
Returns
-------
estimates : geopandas.GeoDataFrame
new geodaraframe with interpolated variables as columns and target_df geometry
as output geometry
Notes
-----
The assumption is both dataframes have the same coordinate reference system.
For an extensive variable, the estimate at target polygon j (default case) is:
v_j = \sum_i v_i w_{i,j}
w_{i,j} = a_{i,j} / \sum_k a_{i,k}
If the area of the source polygon is not exhausted by intersections with
target polygons and there is reason to not allocate the complete value of
an extensive attribute, then setting allocate_total=False will use the
following weights:
v_j = \sum_i v_i w_{i,j}
w_{i,j} = a_{i,j} / a_i
where a_i is the total area of source polygon i.
For an intensive variable, the estimate at target polygon j is:
v_j = \sum_i v_i w_{i,j}
w_{i,j} = a_{i,j} / \sum_k a_{k,j}
"""
source_df = source_df.copy()
target_df = target_df.copy()
if _check_crs(source_df, target_df):
pass
else:
return None
if tables is None:
SU, UT = _area_tables(source_df, target_df)
else:
SU, UT = tables
den = source_df[source_df.geometry.name].area.values
if allocate_total:
den = SU.sum(axis=1)
den = den + (den == 0)
weights = np.dot(np.diag(1 / den), SU)
dfs = []
extensive = []
if extensive_variables:
for variable in extensive_variables:
vals = _nan_check(source_df, variable)
vals = _inf_check(source_df, variable)
estimates = np.dot(np.diag(vals), weights)
estimates = np.dot(estimates, UT)
estimates = estimates.sum(axis=0)
extensive.append(estimates)
extensive = np.array(extensive)
extensive = pd.DataFrame(extensive.T, columns=extensive_variables)
ST = np.dot(SU, UT)
area = ST.sum(axis=0)
den = np.diag(1.0 / (area + (area == 0)))
weights = np.dot(ST, den)
intensive = []
if intensive_variables:
for variable in intensive_variables:
vals = _nan_check(source_df, variable)
vals = _inf_check(source_df, variable)
vals.shape = (len(vals), 1)
est = (vals * weights).sum(axis=0)
intensive.append(est)
intensive = np.array(intensive)
intensive = pd.DataFrame(intensive.T, columns=intensive_variables)
if extensive_variables:
dfs.append(extensive)
if intensive_variables:
dfs.append(intensive)
df = pd.concat(dfs, axis=1)
df["geometry"] = target_df[target_df.geometry.name].reset_index(drop=True)
df = gpd.GeoDataFrame(df.replace(np.inf, np.nan))
return df
def _area_tables_raster(
source_df, target_df, raster_path, codes=[21, 22, 23, 24], force_crs_match=True
):
"""
Construct area allocation and source-target correspondence tables according to a raster 'populated' areas
Parameters
----------
source_df : geopandas.GeoDataFrame
geeodataframe with geometry column of polygon type
target_df : geopandas.GeoDataFrame
geodataframe with geometry column of polygon type
raster_path : str
the path to the associated raster image.
codes : list
list of integer code values that should be considered as 'populated'.
Since this draw inspiration using the National Land Cover Database (NLCD), the default is 21 (Developed, Open Space), 22 (Developed, Low Intensity), 23 (Developed, Medium Intensity) and 24 (Developed, High Intensity).
The description of each code can be found here: https://www.mrlc.gov/sites/default/files/metadata/landcover.html
Only taken into consideration for harmonization raster based.
force_crs_match : bool (default is True)
Whether the Coordinate Reference System (CRS) of the polygon will be reprojected to the CRS of the raster file.
It is recommended to let this argument as True.
Returns
-------
tables: tuple (optional)
two 2-D numpy arrays
SU: area of intersection of source geometry i with union geometry j
UT: binary mapping of union geometry j to target geometry t
Notes
-----
The assumption is both dataframes have the same coordinate reference system.
Union geometry is a geometry formed by the intersection of a source geometry and a target geometry
SU Maps source geometry to union geometry, UT maps union geometry to target geometry
"""
if _check_crs(source_df, target_df):
pass
else:
return None
source_df = source_df.copy()
target_df = target_df.copy()
n_s = source_df.shape[0]
n_t = target_df.shape[0]
_left = np.arange(n_s)
_right = np.arange(n_t)
source_df.loc[:, "_left"] = _left # create temporary index for union
target_df.loc[:, "_right"] = _right # create temporary index for union
res_union_pre = gpd.overlay(source_df, target_df, how="union")
# Establishing a CRS for the generated union
warnings.warn(
"The CRS for the generated union will be set to be the same as source_df."
)
res_union_pre.crs = source_df.crs
# The 'append_profile_in_gdf' function is present in nlcd.py script
res_union = _fast_append_profile_in_gdf(
res_union_pre, raster_path, force_crs_match=force_crs_match
)
str_codes = [str(i) for i in codes]
str_list = ["Type_" + i for i in str_codes]
# Extract list of code names that actually appear in the appended dataset
str_list_ok = [col for col in res_union.columns if col in str_list]
res_union["Populated_Pixels"] = res_union[str_list_ok].sum(axis=1)
n_u, _ = res_union.shape
SU = np.zeros(
(n_s, n_u)
) # holds area of intersection of source geom with union geom
UT = np.zeros((n_u, n_t)) # binary table mapping union geom to target geom
for index, row in res_union.iterrows():
# only union polygons that intersect both a source and a target geometry matter
if not np.isnan(row["_left"]) and not np.isnan(row["_right"]):
s_id = int(row["_left"])
t_id = int(row["_right"])
SU[s_id, index] = row["Populated_Pixels"]
UT[index, t_id] = 1
source_df.drop(["_left"], axis=1, inplace=True)
target_df.drop(["_right"], axis=1, inplace=True)
return SU, UT
| 33.828283
| 225
| 0.657928
| 2,315
| 16,745
| 4.600432
| 0.141253
| 0.039812
| 0.01784
| 0.021033
| 0.715399
| 0.688357
| 0.665258
| 0.65784
| 0.615869
| 0.592207
| 0
| 0.006242
| 0.253688
| 16,745
| 494
| 226
| 33.896761
| 0.845963
| 0.473873
| 0
| 0.595556
| 0
| 0
| 0.043076
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.022222
| false
| 0.022222
| 0.031111
| 0
| 0.097778
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c70c23e78ecc9c77169196b937ad121dbbab19c4
| 1,345
|
py
|
Python
|
ansiblemetrics/playbook/num_deprecated_modules.py
|
radon-h2020/AnsibleMetrics
|
8a8e27d9b54fc1578d00526c8663184a2e686cb2
|
[
"Apache-2.0"
] | 1
|
2020-04-24T16:09:14.000Z
|
2020-04-24T16:09:14.000Z
|
ansiblemetrics/playbook/num_deprecated_modules.py
|
radon-h2020/AnsibleMetrics
|
8a8e27d9b54fc1578d00526c8663184a2e686cb2
|
[
"Apache-2.0"
] | null | null | null |
ansiblemetrics/playbook/num_deprecated_modules.py
|
radon-h2020/AnsibleMetrics
|
8a8e27d9b54fc1578d00526c8663184a2e686cb2
|
[
"Apache-2.0"
] | null | null | null |
from ansiblemetrics.ansible_modules import DEPRECATED_MODULES_LIST
from ansiblemetrics.ansible_metric import AnsibleMetric
class NumDeprecatedModules(AnsibleMetric):
""" This class measures the number of times tasks use deprecated modules."""
def count(self):
"""Return the deprecated modules occurrence.
Example
-------
.. highlight:: python
.. code-block:: python
from ansiblemetrics.general.num_deprecated_modules import NumDeprecatedModules
playbook = '''
- name: Include unique username from register.yml
include_vars: # non deprecated module
file: username_info.yml
- name: Create a service
oc: # deprecated module
state: present
name: myservice
namespace: mynamespace
kind: Service
'''
NumDeprecatedModules(playbook).count()
>> 1
Returns
-------
int
deprecated modules occurrence
"""
modules = []
for task in self.tasks:
if not task:
continue
for key in task:
if key in DEPRECATED_MODULES_LIST:
modules.append(key)
return len(modules)
| 25.377358
| 90
| 0.553903
| 117
| 1,345
| 6.282051
| 0.555556
| 0.138776
| 0.068027
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.001199
| 0.379926
| 1,345
| 52
| 91
| 25.865385
| 0.880096
| 0.511524
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.083333
| false
| 0
| 0.166667
| 0
| 0.416667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c70da4e644f1e748e2087d4c879dc99b2751ebd0
| 2,710
|
py
|
Python
|
bin/find_latest_versions.py
|
ebreton/ghost-in-a-shell
|
8b3382d60a86322c74c6ee1b52f068dfcfc3d79e
|
[
"MIT"
] | 2
|
2018-05-31T08:56:16.000Z
|
2020-01-23T15:12:44.000Z
|
bin/find_latest_versions.py
|
ebreton/ghost-in-a-shell
|
8b3382d60a86322c74c6ee1b52f068dfcfc3d79e
|
[
"MIT"
] | null | null | null |
bin/find_latest_versions.py
|
ebreton/ghost-in-a-shell
|
8b3382d60a86322c74c6ee1b52f068dfcfc3d79e
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
from distutils.version import LooseVersion
import argparse
import logging
import requests
import re
session = requests.Session()
# authorization token
TOKEN_URL = "https://auth.docker.io/token?service=registry.docker.io&scope=repository:%s:pull"
# find all tags
TAGS_URL = "https://index.docker.io/v2/%s/tags/list"
TAG_RE = re.compile("^[\d]+(\.[\d]+)*$")
# get image digest for target
TARGET_DIGEST = "https://index.docker.io/v2/%(repository)s/manifests/%(tag)s"
class Fetcher:
DIGEST_HEADER = {}
def __init__(self, repository):
self.repository = repository
self.token = self.get_token()
self.headers = {"Authorization": "Bearer %s"% self.token}
self.headers_for_tags = {
"Authorization": "Bearer %s"% self.token,
"Accept": "application/vnd.docker.distribution.manifest.v2+json"
}
logging.debug("initialized fetcher for %s", self.repository)
def get_token(self):
response = session.get(TOKEN_URL % self.repository)
response.raise_for_status()
token = response.json().get("token")
logging.debug("got token: %s", token)
return token
def get_versions(self):
response = session.get(TAGS_URL % self.repository, headers=self.headers_for_tags)
response.raise_for_status()
all_tags = response.json().get("tags")
numbered_tags = filter(lambda x: TAG_RE.match(x), all_tags)
versions = map(LooseVersion, numbered_tags)
logging.debug("got tags: %s", versions)
return versions
def find_latest(repository):
fetcher = Fetcher(repository)
all_tags = fetcher.get_versions()
return max(all_tags)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
usage="""Version checker script
This file retreives the latest version of ghost container image from docker hub
It can be run with both python 2.7 and 3.6""")
parser.add_argument("repository", nargs='?',
help="repository name [default:library/ghost]",
default="library/ghost")
parser.add_argument('-d', '--debug', action='store_true')
parser.add_argument('-q', '--quiet', action='store_true')
args = parser.parse_args()
# set up level of logging
level = logging.INFO
if args.quiet:
level = logging.WARNING
elif args.debug:
level = logging.DEBUG
# set up logging to console
logging.basicConfig(format='%(levelname)s - %(funcName)s - %(message)s')
logger = logging.getLogger()
logger.setLevel(level)
logging.debug(args)
# version needs to be print to output in order to be retrieved by Makefile
print(find_latest(args.repository))
| 30.449438
| 94
| 0.667897
| 345
| 2,710
| 5.113043
| 0.391304
| 0.019841
| 0.028912
| 0.020408
| 0.055556
| 0
| 0
| 0
| 0
| 0
| 0
| 0.003245
| 0.204059
| 2,710
| 88
| 95
| 30.795455
| 0.814557
| 0.07417
| 0
| 0.033333
| 0
| 0.016667
| 0.261391
| 0.029976
| 0
| 0
| 0
| 0
| 0
| 1
| 0.066667
| false
| 0
| 0.083333
| 0
| 0.233333
| 0.016667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c70f068d9386d59199952ccdcd03582e192c0909
| 2,933
|
py
|
Python
|
pelicanconf.py
|
myrle-krantz/treasurer-site
|
e0beca3d0d724ae09300974f7020a5611fbd3034
|
[
"Apache-2.0"
] | 1
|
2021-11-09T21:42:44.000Z
|
2021-11-09T21:42:44.000Z
|
pelicanconf.py
|
myrle-krantz/treasurer-site
|
e0beca3d0d724ae09300974f7020a5611fbd3034
|
[
"Apache-2.0"
] | 1
|
2021-11-01T11:14:10.000Z
|
2021-11-01T11:14:10.000Z
|
pelicanconf.py
|
isabella232/treasurer-site
|
9a2e33c85e040183df049d63814ef6b1b0bb7a46
|
[
"Apache-2.0"
] | 3
|
2021-06-04T09:07:48.000Z
|
2021-11-09T21:42:31.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*- #
# vim: encoding=utf-8
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import unicode_literals
from datetime import date
# import os
# import sys
PATH = 'content'
TIMEZONE = 'UTC'
DEFAULT_LANG = u'en'
AUTHOR = u'Treasurer Team'
SITENAME = u'Apache Treasurer'
SITEDOMAIN = 'treasurer.apache.org'
SITEURL = 'https://treasurer.apache.org'
# SITELOGO = 'https://treasurer.apache.org/images/logo.png'
# SITEDESC = u'<blank>'
SITEREPOSITORY = 'https://github.com/apache/treasurer-site/blob/main/content/pages/'
TRADEMARKS = u'Apache and the Apache feather logo are trademarks or registered trademarks'
CURRENTYEAR = date.today().year
# Save pages using full directory preservation
PAGES_PATHS = ['content']
# PATH_METADATA= '(?P<path_no_ext>.*)\..*'
# PAGE_SAVE_AS= '{path_no_ext}.html'
PAGE_URL = '{slug}.html'
SLUGIFY_SOURCE = 'basename'
PAGE_SAVE_AS = '{slug}.html'
# We want to serve any images
STATIC_PATHS = ['.htaccess', 'images']
# We don't use articles, but we don't want pelican to think
# that content/ contains articles.
ARTICLE_PATHS = ['articles']
# Disable these pages
ARCHIVES_SAVE_AS = ''
ARTICLE_SAVE_AS = ''
AUTHORS_SAVE_AS = ''
CATEGORIES_SAVE_AS = ''
INDEX_SAVE_AS = ''
TAGS_SAVE_AS = ''
# Enable ATOM feed and Disable other feeds
FEED_DOMAIN = SITEURL
FEED_ALL_ATOM = 'feeds/all.atom.xml'
CATEGORY_FEED_ATOM = None
TRANSLATION_FEED_ATOM = None
AUTHOR_FEED_ATOM = None
AUTHOR_FEED_RSS = None
# Pelican Plugins
# The provided location. If the buildbot does not have a new plugin then look into requirements.txt
PLUGIN_PATHS = ['./theme/plugins']
PLUGINS = ['toc', 'pelican-gfm', 'sitemap']
# TOC Generator
TOC_HEADERS = r"h[1-6]"
# Sitemap Generator
SITEMAP = {
"exclude": ["tag/", "category/"],
"format": "xml",
"priorities": {
"articles": 0.1,
"indexes": 0.1,
"pages": 0.8
},
"changefreqs": {
"articles": "never",
"indexes": "never",
"pages": "monthly"
}
}
# Unused links
LINKS = ( )
SOCIAL = ( )
DEFAULT_PAGINATION = False
# Uncomment following line if you want document-relative URLs when developing
# RELATIVE_URLS = True
| 27.411215
| 99
| 0.715309
| 408
| 2,933
| 5.026961
| 0.539216
| 0.023403
| 0.026329
| 0.015602
| 0.019503
| 0
| 0
| 0
| 0
| 0
| 0
| 0.005745
| 0.16911
| 2,933
| 106
| 100
| 27.669811
| 0.835864
| 0.502898
| 0
| 0
| 0
| 0
| 0.32158
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.04
| 0
| 0.04
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c70f37923d6264953c0f43a70aaafcb143563524
| 10,935
|
py
|
Python
|
TurtleArt/taturtle.py
|
sugar-activities/4585-activity
|
38e6efd7b4fcb9cf820efaf7406ce7abde92406e
|
[
"MIT"
] | null | null | null |
TurtleArt/taturtle.py
|
sugar-activities/4585-activity
|
38e6efd7b4fcb9cf820efaf7406ce7abde92406e
|
[
"MIT"
] | null | null | null |
TurtleArt/taturtle.py
|
sugar-activities/4585-activity
|
38e6efd7b4fcb9cf820efaf7406ce7abde92406e
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
#Copyright (c) 2010,12 Walter Bender
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#The above copyright notice and this permission notice shall be included in
#all copies or substantial portions of the Software.
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
#THE SOFTWARE.
from random import uniform
from math import sin, cos, pi, sqrt
from gettext import gettext as _
import gtk
import cairo
from taconstants import TURTLE_LAYER, DEFAULT_TURTLE_COLORS
from tasprite_factory import SVG, svg_str_to_pixbuf
from tacanvas import wrap100, COLOR_TABLE
from sprites import Sprite
from tautils import debug_output
SHAPES = 36
def generate_turtle_pixbufs(colors):
""" Generate pixbufs for generic turtles """
shapes = []
svg = SVG()
svg.set_scale(1.0)
for i in range(SHAPES):
svg.set_orientation(i * 10)
shapes.append(svg_str_to_pixbuf(svg.turtle(colors)))
return shapes
class Turtles:
def __init__(self, sprite_list):
""" Class to hold turtles """
self.dict = dict()
self.sprite_list = sprite_list
self.default_pixbufs = []
def get_turtle(self, k, append=False, colors=None):
""" Find a turtle """
if k in self.dict:
return self.dict[k]
elif not append:
return None
else:
if colors == None:
Turtle(self, k)
elif type(colors) in [list, tuple]:
Turtle(self, k, colors)
else:
Turtle(self, k, colors.split(','))
return self.dict[k]
def get_turtle_key(self, turtle):
""" Find a turtle's name """
for k in iter(self.dict):
if self.dict[k] == turtle:
return k
return None
def turtle_count(self):
""" How many turtles are there? """
return(len(self.dict))
def add_to_dict(self, k, turtle):
""" Add a new turtle """
self.dict[k] = turtle
def remove_from_dict(self, k):
""" Delete a turtle """
if k in self.dict:
del(self.dict[k])
def show_all(self):
""" Make all turtles visible """
for k in iter(self.dict):
self.dict[k].show()
def spr_to_turtle(self, spr):
""" Find the turtle that corresponds to sprite spr. """
for k in iter(self.dict):
if spr == self.dict[k].spr:
return self.dict[k]
return None
def get_pixbufs(self):
""" Get the pixbufs for the default turtle shapes. """
if self.default_pixbufs == []:
self.default_pixbufs = generate_turtle_pixbufs(
["#008000", "#00A000"])
return(self.default_pixbufs)
class Turtle:
def __init__(self, turtles, key, turtle_colors=None):
""" The turtle is not a block, just a sprite with an orientation """
self.x = 0
self.y = 0
self.hidden = False
self.shapes = []
self.custom_shapes = False
self.type = 'turtle'
self.name = key
self.heading = 0
self.pen_shade = 50
self.pen_color = 0
self.pen_gray = 100
self.pen_size = 5
self.pen_state = True
self.label_block = None
self._prep_shapes(key, turtles, turtle_colors)
# Choose a random angle from which to attach the turtle label.
if turtles.sprite_list is not None:
self.spr = Sprite(turtles.sprite_list, 0, 0, self.shapes[0])
angle = uniform(0, pi * 4 / 3.0) # 240 degrees
w = self.shapes[0].get_width()
r = w * 0.67
# Restrict angle the the sides 30-150; 210-330
if angle > pi * 2 / 3.0:
angle += pi / 2.0 # + 90
self.label_xy = [int(r * sin(angle)),
int(r * cos(angle) + w / 2.0)]
else:
angle += pi / 6.0 # + 30
self.label_xy = [int(r * sin(angle) + w / 2.0),
int(r * cos(angle) + w / 2.0)]
else:
self.spr = None
turtles.add_to_dict(key, self)
def _prep_shapes(self, name, turtles=None, turtle_colors=None):
# If the turtle name is an int, we'll use a palette color as the
# turtle color
try:
int_key = int(name)
use_color_table = True
except ValueError:
use_color_table = False
if turtle_colors is not None:
self.colors = turtle_colors[:]
self.shapes = generate_turtle_pixbufs(self.colors)
elif use_color_table:
fill = wrap100(int_key)
stroke = wrap100(fill + 10)
self.colors = ['#%06x' % (COLOR_TABLE[fill]),
'#%06x' % (COLOR_TABLE[stroke])]
self.shapes = generate_turtle_pixbufs(self.colors)
else:
if turtles is not None:
self.colors = DEFAULT_TURTLE_COLORS
self.shapes = turtles.get_pixbufs()
def set_turtle_colors(self, turtle_colors):
''' reset the colors of a preloaded turtle '''
if turtle_colors is not None:
self.colors = turtle_colors[:]
self.shapes = generate_turtle_pixbufs(self.colors)
self.set_heading(self.heading)
def set_shapes(self, shapes, i=0):
""" Reskin the turtle """
n = len(shapes)
if n == 1 and i > 0: # set shape[i]
if i < len(self.shapes):
self.shapes[i] = shapes[0]
elif n == SHAPES: # all shapes have been precomputed
self.shapes = shapes[:]
else: # rotate shapes
if n != 1:
debug_output("%d images passed to set_shapes: ignoring" % (n),
self.tw.running_sugar)
if self.heading == 0: # rotate the shapes
images = []
w, h = shapes[0].get_width(), shapes[0].get_height()
nw = nh = int(sqrt(w * w + h * h))
for i in range(SHAPES):
surface = cairo.ImageSurface(cairo.FORMAT_ARGB32, nw, nh)
context = cairo.Context(surface)
context = gtk.gdk.CairoContext(context)
context.translate(nw / 2., nh / 2.)
context.rotate(i * 10 * pi / 180.)
context.translate(-nw / 2., -nh / 2.)
context.set_source_pixbuf(shapes[0], (nw - w) / 2.,
(nh - h) / 2.)
context.rectangle(0, 0, nw, nh)
context.fill()
images.append(surface)
self.shapes = images[:]
else: # associate shape with image at current heading
j = int(self.heading + 5) % 360 / (360 / SHAPES)
self.shapes[j] = shapes[0]
self.custom_shapes = True
self.show()
def reset_shapes(self):
""" Reset the shapes to the standard turtle """
if self.custom_shapes:
self.shapes = generate_turtle_pixbufs(self.colors)
self.custom_shapes = False
def set_heading(self, heading):
""" Set the turtle heading (one shape per 360/SHAPES degrees) """
self.heading = heading
i = (int(self.heading + 5) % 360) / (360 / SHAPES)
if not self.hidden and self.spr is not None:
try:
self.spr.set_shape(self.shapes[i])
except IndexError:
self.spr.set_shape(self.shapes[0])
def set_color(self, color):
""" Set the pen color for this turtle. """
self.pen_color = color
def set_gray(self, gray):
""" Set the pen gray level for this turtle. """
self.pen_gray = gray
def set_shade(self, shade):
""" Set the pen shade for this turtle. """
self.pen_shade = shade
def set_pen_size(self, pen_size):
""" Set the pen size for this turtle. """
self.pen_size = pen_size
def set_pen_state(self, pen_state):
""" Set the pen state (down==True) for this turtle. """
self.pen_state = pen_state
def hide(self):
""" Hide the turtle. """
if self.spr is not None:
self.spr.hide()
if self.label_block is not None:
self.label_block.spr.hide()
self.hidden = True
def show(self):
""" Show the turtle. """
if self.spr is not None:
self.spr.set_layer(TURTLE_LAYER)
self.hidden = False
self.move((self.x, self.y))
self.set_heading(self.heading)
if self.label_block is not None:
self.label_block.spr.move((self.x + self.label_xy[0],
self.y + self.label_xy[1]))
self.label_block.spr.set_layer(TURTLE_LAYER + 1)
def move(self, pos):
""" Move the turtle. """
self.x, self.y = int(pos[0]), int(pos[1])
if not self.hidden and self.spr is not None:
self.spr.move(pos)
if self.label_block is not None:
self.label_block.spr.move((pos[0] + self.label_xy[0],
pos[1] + self.label_xy[1]))
return(self.x, self.y)
def get_name(self):
''' return turtle name (key) '''
return self.name
def get_xy(self):
""" Return the turtle's x, y coordinates. """
return(self.x, self.y)
def get_heading(self):
""" Return the turtle's heading. """
return(self.heading)
def get_color(self):
""" Return the turtle's color. """
return(self.pen_color)
def get_gray(self):
""" Return the turtle's gray level. """
return(self.pen_gray)
def get_shade(self):
""" Return the turtle's shade. """
return(self.pen_shade)
def get_pen_size(self):
""" Return the turtle's pen size. """
return(self.pen_size)
def get_pen_state(self):
""" Return the turtle's pen state. """
return(self.pen_state)
| 34.936102
| 78
| 0.561225
| 1,436
| 10,935
| 4.162953
| 0.197075
| 0.025594
| 0.016561
| 0.021746
| 0.222315
| 0.162763
| 0.143694
| 0.088491
| 0.073938
| 0.073938
| 0
| 0.019635
| 0.333973
| 10,935
| 312
| 79
| 35.048077
| 0.801181
| 0.219021
| 0
| 0.219626
| 0
| 0
| 0.008529
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.149533
| false
| 0.004673
| 0.046729
| 0
| 0.247664
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c71003847371f17bbe96951b791e894ed7483c4a
| 1,384
|
py
|
Python
|
django_backend/group.py
|
holg/django_backend
|
6cef76a378664e6621619862e6db476788a58992
|
[
"BSD-3-Clause"
] | null | null | null |
django_backend/group.py
|
holg/django_backend
|
6cef76a378664e6621619862e6db476788a58992
|
[
"BSD-3-Clause"
] | null | null | null |
django_backend/group.py
|
holg/django_backend
|
6cef76a378664e6621619862e6db476788a58992
|
[
"BSD-3-Clause"
] | null | null | null |
try:
from django.forms.utils import pretty_name
except ImportError:
from django.forms.forms import pretty_name
from django.template import Context
from django.template.loader import render_to_string
from .compat import context_flatten
class Group(list):
"""
A simplistic representation of backends that are related and should be
displayed as one "group" in the backend (e.g. as one box in the sidebar).
"""
template_name = 'django_backend/_group.html'
def __init__(self, id, name=None, position=0, template_name=None):
self.id = id
if name is None:
name = pretty_name(id)
self.template_name = template_name or self.template_name
self.name = name
self.position = position
super(Group, self).__init__()
@property
def backends(self):
return list(self)
def get_context_data(self, context, **kwargs):
data = {
'group': self,
}
data.update(kwargs)
return data
def get_template_name(self):
return self.template_name
def render(self, context):
context_data = {}
if isinstance(context, Context):
context_data.update(context_flatten(context))
context_data = self.get_context_data(context, **context_data)
return render_to_string(self.get_template_name(), context_data)
| 28.833333
| 77
| 0.66474
| 177
| 1,384
| 4.983051
| 0.344633
| 0.108844
| 0.081633
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.000965
| 0.251445
| 1,384
| 47
| 78
| 29.446809
| 0.850386
| 0.104046
| 0
| 0
| 0
| 0
| 0.02541
| 0.021311
| 0
| 0
| 0
| 0
| 0
| 1
| 0.147059
| false
| 0
| 0.176471
| 0.058824
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c711b732931b1daa135dbab87c710f6b0e8237b0
| 1,444
|
py
|
Python
|
server/main.py
|
KejiaQiang/Spicy_pot_search
|
72aaa9618e54178da513371802c2bcb751037bb0
|
[
"MIT"
] | 1
|
2021-03-04T09:02:05.000Z
|
2021-03-04T09:02:05.000Z
|
server/main.py
|
yanansong0930/Spicy_pot_search
|
72aaa9618e54178da513371802c2bcb751037bb0
|
[
"MIT"
] | null | null | null |
server/main.py
|
yanansong0930/Spicy_pot_search
|
72aaa9618e54178da513371802c2bcb751037bb0
|
[
"MIT"
] | 1
|
2021-03-04T08:59:02.000Z
|
2021-03-04T08:59:02.000Z
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from flask import Flask, request, abort, render_template
from datetime import timedelta
import pymysql
from search import start_search, decorate
page_dir = "E:/WEBPAGES_RAW"
app = Flask(__name__)
app.config['DEBUG'] = True
app.config['SEND_FILE_MAX_AGE_DEFAULT'] = timedelta(seconds=1)
connection = pymysql.connect(host="localhost",port=3306,user="root",db="spicy_pot")
cursor = connection.cursor()
@app.route('/')
def homepage():
return render_template("root.html")
@app.route('/search')
def search():
word = request.args.get('s')
page = int(request.args.get('p'))
all_res = start_search(word,cursor)
if len(all_res) == 0:
return render_template("result.html",result={"word":word,"pages":-1,"currentPage":1,"res":[]})
pages = ((len(all_res)-1)//10) + 1
res = decorate(all_res[(page-1)*10:page*10])
content = {"word":word,"pages":pages,"currentPage":page,"res":res}
return render_template("result.html",result=content)
@app.route('/cache')
def cache():
p = request.args.get('p')
c = request.args.get('c')
read = open(page_dir+"/"+p+"/"+c,'r',encoding="utf-8")
save = open("templates/temp.html",'w',encoding="utf-8")
for line in read:
save.write(line)
read.close()
save.close()
return render_template("temp.html")
app.run(host='0.0.0.0',port=80,debug=True)
| 29.469388
| 103
| 0.637812
| 205
| 1,444
| 4.380488
| 0.419512
| 0.077951
| 0.089087
| 0.033408
| 0.080178
| 0.080178
| 0
| 0
| 0
| 0
| 0
| 0.021757
| 0.172438
| 1,444
| 48
| 104
| 30.083333
| 0.729707
| 0.026316
| 0
| 0
| 0
| 0
| 0.155605
| 0.018437
| 0
| 0
| 0
| 0
| 0
| 1
| 0.083333
| false
| 0
| 0.111111
| 0.027778
| 0.305556
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c711e0dd9090b2b45a4e1e0eca15dbcffe106551
| 5,355
|
py
|
Python
|
examples/3d/subduction/viz/plot_dispwarp.py
|
cehanagan/pylith
|
cf5c1c34040460a82f79b6eb54df894ed1b1ee93
|
[
"MIT"
] | 93
|
2015-01-08T16:41:22.000Z
|
2022-02-25T13:40:02.000Z
|
examples/3d/subduction/viz/plot_dispwarp.py
|
sloppyjuicy/pylith
|
ac2c1587f87e45c948638b19560813d4d5b6a9e3
|
[
"MIT"
] | 277
|
2015-02-20T16:27:35.000Z
|
2022-03-30T21:13:09.000Z
|
examples/3d/subduction/viz/plot_dispwarp.py
|
sloppyjuicy/pylith
|
ac2c1587f87e45c948638b19560813d4d5b6a9e3
|
[
"MIT"
] | 71
|
2015-03-24T12:11:08.000Z
|
2022-03-03T04:26:02.000Z
|
#!/usr/bin/env pvpython
# -*- Python -*- (syntax highlighting)
# ----------------------------------------------------------------------
#
# Brad T. Aagaard, U.S. Geological Survey
# Charles A. Williams, GNS Science
# Matthew G. Knepley, University at Buffalo
#
# This code was developed as part of the Computational Infrastructure
# for Geodynamics (http://geodynamics.org).
#
# Copyright (c) 2010-2021 University of California, Davis
#
# See LICENSE.md.md for license information.
#
# ----------------------------------------------------------------------
# Plot the undeformed domain as a gray wireframe and then the deformed
# domain, colored by the value of the x-displacemenet.
# User-specified parameters.
#
# Default values for parameters. To use different values, overwrite
# them in the ParaView Python shell or on the command line. For
# example, set OUTPUT_DIR to the absolute path if not starting
# ParaView from the terminal shell where you ran PyLith:
#
# import os
# OUTPUT_DIR = os.path.join(os.environ["HOME"], "src", "pylith", "examples", "2d", "subduction", "output")
DEFAULTS = {
"OUTPUT_DIR": "output",
"SIM": "step02",
"WARP_SCALE": 10.0e+3,
"FIELD": "displacement",
"FIELD_COMPONENT": "Magnitude",
"TIMESTEP": 0, # Use 0 for first, -1 for last.
}
# ----------------------------------------------------------------------
from paraview.simple import *
import os
def visualize(parameters):
# Disable automatic camera reset on "Show"
paraview.simple._DisableFirstRenderCameraReset()
# Read data
filename = os.path.join(parameters.output_dir, "%s-domain.xmf" % parameters.sim)
if not os.path.isfile(filename):
raise IOError("File '%s' does not exist." % filename)
dataDomain = XDMFReader(FileNames=[filename])
RenameSource("%s-domain" % parameters.sim, dataDomain)
scene = GetAnimationScene()
scene.UpdateAnimationUsingDataTimeSteps()
if parameters.timestep == -1:
scene.GoToLast()
view = GetActiveViewOrCreate('RenderView')
# Gray wireframe for undeformed domain.
domainDisplay = Show(dataDomain, view)
domainDisplay.Representation = 'Wireframe'
domainDisplay.AmbientColor = [0.5, 0.5, 0.5]
# Warp domain to show deformation
warp = WarpByVector(Input=dataDomain)
warp.Vectors = ['POINTS', 'displacement']
warp.ScaleFactor = parameters.warp_scale
warpDisplay = Show(warp, view)
ColorBy(warpDisplay, ('POINTS', parameters.field, parameters.field_component))
warpDisplay.RescaleTransferFunctionToDataRange(True)
warpDisplay.SetScalarBarVisibility(view, True)
warpDisplay.SetRepresentationType('Surface With Edges')
# Rescale color bar to exactly fit the current data range
warpDisplay.RescaleTransferFunctionToDataRange(False, False)
# Customize colorbar
displacementLUT = GetColorTransferFunction(parameters.field)
colorbar = GetScalarBar(displacementLUT, view)
if parameters.field_component.lower() == "magnitude":
colorbar.Title = "Displacement Mag. (m)"
else:
colorbar.Title = "%s-displacement (m)" % parameters.field_component.lower()
colorbar.ComponentTitle = ""
# Annotate time
tstamp = AnnotateTimeFilter(warp)
tstamp.Format = 'Time: %2.0f yr'
tstamp.Scale = 3.168808781402895e-08 # seconds to years
tstampDisplay = Show(tstamp, view)
tstampDisplay.FontFamily = "Courier"
tstampDisplay.FontSize = 14
view.ResetCamera()
view.Update()
Render()
class Parameters(object):
keys = ("OUTPUT_DIR", "SIM", "WARP_SCALE", "FIELD", "FIELD_COMPONENT", "TIMESTEP")
def __init__(self):
globalVars = globals()
for key in Parameters.keys:
if key in globalVars.keys():
setattr(self, key.lower(), globalVars[key])
else:
setattr(self, key.lower(), DEFAULTS[key])
return
# ----------------------------------------------------------------------
if __name__ == "__main__":
# Running from outside the ParaView GUI via pvpython
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--output-dir", action="store", dest="output_dir", default=DEFAULTS["OUTPUT_DIR"])
parser.add_argument("--sim", action="store", dest="sim", default=DEFAULTS["SIM"])
parser.add_argument("--warp-scale", action="store", type=float, dest="warp_scale", default=DEFAULTS["WARP_SCALE"])
parser.add_argument("--field", action="store", dest="field", default=DEFAULTS["FIELD"])
parser.add_argument("--component", action="store", dest="field_component", default=DEFAULTS["FIELD_COMPONENT"])
parser.add_argument("--timestep", action="store", dest="timestep", default=-1)
parser.add_argument("--screenshot", action="store", dest="screenshot")
args = parser.parse_args()
visualize(args)
view = GetRenderView()
view.CameraPosition = [78002.89373974672, -1531813.1739094853, 595774.2094961794]
view.CameraFocalPoint = [-45014.6313325238, 149523.68421156122, -335271.271063906]
view.CameraViewUp = [0.0, 0.0, 1.0]
view.ViewSize = [960, 540]
view.Update()
if args.screenshot:
WriteImage(args.screenshot)
Interact()
else:
# Running inside the ParaView GUI
visualize(Parameters())
# End of file
| 35
| 118
| 0.651727
| 570
| 5,355
| 6.052632
| 0.45614
| 0.02087
| 0.034493
| 0.002319
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.035463
| 0.178525
| 5,355
| 152
| 119
| 35.230263
| 0.748807
| 0.285154
| 0
| 0.060241
| 0
| 0
| 0.139688
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.024096
| false
| 0
| 0.036145
| 0
| 0.096386
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c714251263633c1447c106182ffec957c2c483cc
| 1,775
|
py
|
Python
|
script/upload-checksums.py
|
fireball-x/atom-shell
|
d229338e40058a9b4323b2544f62818a3c55748c
|
[
"MIT"
] | 4
|
2016-04-02T14:53:54.000Z
|
2017-07-26T05:47:43.000Z
|
script/upload-checksums.py
|
cocos-creator/atom-shell
|
d229338e40058a9b4323b2544f62818a3c55748c
|
[
"MIT"
] | null | null | null |
script/upload-checksums.py
|
cocos-creator/atom-shell
|
d229338e40058a9b4323b2544f62818a3c55748c
|
[
"MIT"
] | 2
|
2015-07-18T09:31:03.000Z
|
2019-12-24T09:55:03.000Z
|
#!/usr/bin/env python
import argparse
import hashlib
import os
import tempfile
from lib.config import s3_config
from lib.util import download, rm_rf, s3put
DIST_URL = 'https://atom.io/download/atom-shell/'
def main():
args = parse_args()
url = DIST_URL + args.version + '/'
directory, files = download_files(url, get_files_list(args.version))
checksums = [
create_checksum('sha1', directory, 'SHASUMS.txt', files),
create_checksum('sha256', directory, 'SHASUMS256.txt', files)
]
bucket, access_key, secret_key = s3_config()
s3put(bucket, access_key, secret_key, directory,
'atom-shell/dist/{0}'.format(args.version), checksums)
rm_rf(directory)
def parse_args():
parser = argparse.ArgumentParser(description='upload sumsha file')
parser.add_argument('-v', '--version', help='Specify the version',
required=True)
return parser.parse_args()
def get_files_list(version):
return [
'node-{0}.tar.gz'.format(version),
'iojs-{0}.tar.gz'.format(version),
'node.lib',
'x64/node.lib',
'win-x86/iojs.lib',
'win-x64/iojs.lib',
]
def download_files(url, files):
directory = tempfile.mkdtemp(prefix='electron-tmp')
return directory, [
download(f, url + f, os.path.join(directory, f))
for f in files
]
def create_checksum(algorithm, directory, filename, files):
lines = []
for path in files:
h = hashlib.new(algorithm)
with open(path, 'r') as f:
h.update(f.read())
lines.append(h.hexdigest() + ' ' + os.path.relpath(path, directory))
checksum_file = os.path.join(directory, filename)
with open(checksum_file, 'w') as f:
f.write('\n'.join(lines) + '\n')
return checksum_file
if __name__ == '__main__':
import sys
sys.exit(main())
| 23.666667
| 75
| 0.668169
| 242
| 1,775
| 4.760331
| 0.409091
| 0.023438
| 0.027778
| 0.036458
| 0.074653
| 0
| 0
| 0
| 0
| 0
| 0
| 0.013746
| 0.180282
| 1,775
| 74
| 76
| 23.986486
| 0.778007
| 0.011268
| 0
| 0
| 0
| 0
| 0.141961
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.09434
| false
| 0
| 0.132075
| 0.018868
| 0.301887
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c7162d1c243872610bbf29a5583204c35093859d
| 1,691
|
py
|
Python
|
src/json_sort/lib.py
|
cdumay/json-sort
|
a76fe2deaad649264e8ca0d1cc096d9741c60a04
|
[
"Apache-2.0"
] | 3
|
2017-01-03T14:36:25.000Z
|
2021-03-06T05:42:08.000Z
|
src/json_sort/lib.py
|
cdumay/json-sort
|
a76fe2deaad649264e8ca0d1cc096d9741c60a04
|
[
"Apache-2.0"
] | null | null | null |
src/json_sort/lib.py
|
cdumay/json-sort
|
a76fe2deaad649264e8ca0d1cc096d9741c60a04
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
.. codeauthor:: Cédric Dumay <cedric.dumay@gmail.com>
"""
import logging
import sys, os, json
from cdumay_rest_client.client import RESTClient
from cdumay_rest_client.exceptions import NotFound, HTTPException
class NoSuchFile(NotFound):
"""NoSuchFile"""
def oncritical(exc):
"""description of oncritical"""
if isinstance(exc, HTTPException):
logging.critical(exc.message)
else:
logging.critical(str(exc))
sys.exit(1)
def file_exists(filename):
"""description of file_exists"""
filename = os.path.realpath(filename)
logging.debug("Checking file: {}".format(filename))
if not os.path.exists(filename):
raise NoSuchFile(
message="No such file '{}'".format(filename),
extra=dict(filename=filename)
)
return filename
def file_write(dst, data):
"""description of file_write"""
if dst:
dst = os.path.realpath(dst)
logging.debug("Saving to: {}".format(dst))
out = open(dst, "w")
else:
logging.debug("Current std will be used")
out = sys.stdout
json.dump(
data, out, ensure_ascii=False, sort_keys=True, indent=2,
separators=(',', ': ')
)
def from_local(src, dst=None):
"""description of from_local"""
try:
file_write(dst, json.load(open(file_exists(src), "r")))
except Exception as exc:
oncritical(exc)
def from_remote(src, dst=None):
"""description of fromurl"""
try:
file_write(
dst, RESTClient(server=src).do_request(method="GET", path="")
)
except Exception as exc:
oncritical(exc)
| 23.486111
| 73
| 0.622708
| 204
| 1,691
| 5.078431
| 0.470588
| 0.062741
| 0.034749
| 0.03861
| 0.108108
| 0.063707
| 0
| 0
| 0
| 0
| 0
| 0.002326
| 0.237138
| 1,691
| 71
| 74
| 23.816901
| 0.800775
| 0.138971
| 0
| 0.181818
| 0
| 0
| 0.05583
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.113636
| false
| 0
| 0.090909
| 0
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c7165074ee0affcd71c302a41edf2c2139ea9a06
| 4,484
|
py
|
Python
|
test/test_create_dataset.py
|
gregstarr/ttools
|
fc8dcbf094370e9885311126724697830167d931
|
[
"MIT"
] | null | null | null |
test/test_create_dataset.py
|
gregstarr/ttools
|
fc8dcbf094370e9885311126724697830167d931
|
[
"MIT"
] | null | null | null |
test/test_create_dataset.py
|
gregstarr/ttools
|
fc8dcbf094370e9885311126724697830167d931
|
[
"MIT"
] | null | null | null |
import numpy as np
import pytest
import apexpy
import tempfile
import os
import h5py
from ttools import create_dataset, config, io, utils
map_periods = [np.timedelta64(10, 'm'), np.timedelta64(30, 'm'), np.timedelta64(1, 'h'), np.timedelta64(2, 'h')]
@pytest.fixture
def times():
yield np.datetime64('2010-01-01T00:00:00') + np.arange(100) * np.timedelta64(5, 'm')
@pytest.mark.parametrize('map_period', map_periods)
def test_assemble_args(times, map_period):
mlat = np.arange(10)
mlt = np.arange(10)
ssmlon = np.random.rand(times.shape[0])
mlt, mlat = np.meshgrid(mlt, mlat)
mlat = mlat[None, :, :] * np.ones((times.shape[0], 1, 1))
mlt = mlt[None, :, :] * np.ones((times.shape[0], 1, 1))
tec = np.random.rand(*mlat.shape)
bin_edges = np.arange(-.5, 10)
bins = [bin_edges, bin_edges]
args = create_dataset.assemble_binning_args(mlat, mlt, tec, times, ssmlon, bins, map_period)
assert len(args) == np.ceil((times[-1] - times[0]) / map_period)
assert args[0][3][0] == times[0]
assert args[-1][3][0] + map_period >= times[-1]
assert args[-1][3][0] < times[-1]
assert args[-1][3][-1] == times[-1]
for i in range(len(args) - 1):
assert args[i][3][-1] == args[i + 1][3][0] - np.timedelta64(5, 'm')
@pytest.mark.parametrize('map_period', map_periods)
def test_process_file(madrigal_data_dir, map_period):
"""not that good of a test: wait for bugs and add asserts
"""
start_date = np.datetime64('2012-06-08')
end_date = np.datetime64('2012-06-13')
converter = apexpy.Apex()
mlat, mlon = create_dataset.get_mag_grid(config.madrigal_lat, config.madrigal_lon, converter)
bin_edges = np.arange(-.5, 10)
bins = [bin_edges + 30, bin_edges]
times, tec, ssmlon, n, std = create_dataset.process_file(start_date, end_date, mlat, mlon, converter, bins,
map_period, madrigal_data_dir)
assert times.shape[0] == tec.shape[0] == n.shape[0] == std.shape[0] == ssmlon.shape[0]
assert np.isnan(tec[times < np.datetime64('2012-06-10')]).all()
assert np.isnan(tec[times >= np.datetime64('2012-06-11')]).all()
assert np.isfinite(tec[(times >= np.datetime64('2012-06-10')) * (times < np.datetime64('2012-06-11'))]).any()
assert not np.isnan(tec).all(axis=(0, 1)).any()
assert not np.isnan(tec).all(axis=(0, 2)).any()
def test_calculate_bins():
mlat = np.arange(10)[None, :, None] * np.ones((1, 1, 10))
mlt = np.arange(10)[None, None, :] * np.ones((1, 10, 1))
tec = np.zeros((1, 10, 10))
tec[0, 0, 0] = 10
tec[0, 0, -1] = 20
tec[0, -1, 0] = 30
times = ssmlon = np.ones(1) * np.nan
be = np.array([-.5, 4.5, 9.5])
bins = [be, be]
out_t, out_tec, out_ssm, out_n, out_std = create_dataset.calculate_bins(mlat.ravel(), mlt.ravel(), tec.ravel(),
times, ssmlon, bins)
assert np.isnan(out_t)
assert np.isnan(out_ssm)
assert out_tec.shape == (2, 2)
assert out_tec[0, 0] == 10 / 25
assert out_tec[0, 1] == 20 / 25
assert out_tec[1, 0] == 30 / 25
assert out_tec[1, 1] == 0
assert np.all(out_n == 25)
def test_process_dataset():
start_date = np.datetime64("2012-03-07")
end_date = np.datetime64("2012-03-08")
file_dt = np.timedelta64(12, 'h')
mlat_bins = np.array([35, 45, 55, 65])
mlt_bins = np.array([-1.5, -.5, .5, 1.5])
def fn_pattern(date):
return f"{date.astype('datetime64[h]')}.h5"
dates = np.arange(start_date, end_date, file_dt)
with tempfile.TemporaryDirectory() as tempdir:
files = [os.path.join(tempdir, fn_pattern(d)) for d in dates]
create_dataset.process_dataset(start_date, end_date, mlat_bins, mlt_bins, apex_dt=np.timedelta64(365, 'D'),
file_dt=file_dt, output_dir=tempdir, file_name_pattern=fn_pattern)
grid_fn = os.path.join(tempdir, 'grid.h5')
assert os.path.exists(grid_fn)
with h5py.File(grid_fn, 'r') as f:
mlt_vals = f['mlt'][()]
mlat_vals = f['mlat'][()]
assert np.all(mlt_vals == [-1, 0, 1])
assert np.all(mlat_vals == [40, 50, 60])
for f, d in zip(files, dates):
assert os.path.exists(f)
tec, times, ssmlon, n, std = io.open_tec_file(f)
assert tec.shape == (12, 3, 3)
assert utils.datetime64_to_timestamp(d) == times[0]
| 40.396396
| 115
| 0.599242
| 703
| 4,484
| 3.6899
| 0.216216
| 0.041635
| 0.049345
| 0.041635
| 0.277949
| 0.197764
| 0.173477
| 0.161912
| 0.124904
| 0.047803
| 0
| 0.080589
| 0.227922
| 4,484
| 110
| 116
| 40.763636
| 0.668689
| 0.012043
| 0
| 0.043956
| 0
| 0
| 0.039602
| 0.007468
| 0
| 0
| 0
| 0
| 0.285714
| 1
| 0.065934
| false
| 0
| 0.076923
| 0.010989
| 0.153846
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c719cc42bfa09eeceed2d7963f0cd71faeceedf7
| 14,277
|
py
|
Python
|
mdemanipulation/src/mdeoperation.py
|
modelia/ai-for-model-manipulation
|
0b15b9d59b0f6009a5709b20db4e55b7d511ac38
|
[
"BSD-3-Clause"
] | null | null | null |
mdemanipulation/src/mdeoperation.py
|
modelia/ai-for-model-manipulation
|
0b15b9d59b0f6009a5709b20db4e55b7d511ac38
|
[
"BSD-3-Clause"
] | 1
|
2022-01-10T14:16:48.000Z
|
2022-01-10T14:16:48.000Z
|
mdemanipulation/src/mdeoperation.py
|
modelia/ai-for-model-manipulation
|
0b15b9d59b0f6009a5709b20db4e55b7d511ac38
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python2
import math
import os
import random
import sys
import time
import logging
import argparse
import numpy as np
from six.moves import xrange
import json
import torch
import torch.nn as nn
import torch.optim as optim
from torch import cuda
from torch.autograd import Variable
from torch.nn.utils import clip_grad_norm
import data_utils
import network
import cPickle as pickle
import datetime
def create_model(source_vocab_size, target_vocab_size, source_vocab_list, target_vocab_list, dropout_rate,
max_source_len, max_target_len):
model = network.Tree2TreeModel(
source_vocab_size,
target_vocab_size,
source_vocab_list,
target_vocab_list,
args.max_depth,
args.embedding_size,
args.hidden_size,
args.num_layers,
args.max_gradient_norm,
args.batch_size,
args.learning_rate,
dropout_rate,
args.no_pf,
args.no_attention)
if cuda.is_available():
model.cuda()
if args.load_model:
print("Reading model parameters from %s" % args.load_model)
pretrained_model = torch.load(args.load_model)
model.load_state_dict(pretrained_model)
else:
print("Created model with fresh parameters.")
model.init_weights(args.param_init)
return model
def step_tree2tree(model, encoder_inputs, init_decoder_inputs, feed_previous=False):
if feed_previous == False:
model.dropout_rate = args.dropout_rate
else:
model.dropout_rate = 0.0
predictions_per_batch, prediction_managers = model(encoder_inputs, init_decoder_inputs, feed_previous=feed_previous)
total_loss = None
for (predictions, target) in predictions_per_batch:
loss = model.loss_function(predictions, target)
if total_loss is None:
total_loss = loss
else:
total_loss += loss
total_loss /= len(encoder_inputs)
if feed_previous:
output_predictions = []
for prediction_manager in prediction_managers:
output_predictions.append(model.tree2seq(prediction_manager, 1))
if feed_previous == False:
model.optimizer.zero_grad()
total_loss.backward()
if args.max_gradient_norm > 0:
clip_grad_norm(model.parameters(), args.max_gradient_norm)
model.optimizer.step()
for idx in range(len(encoder_inputs)):
encoder_inputs[idx].clear_states()
if feed_previous:
return total_loss.data[0], output_predictions
else:
return total_loss.data[0]
def evaluate(model, test_set, source_vocab, target_vocab, source_vocab_list, target_vocab_list):
test_loss = 0
acc_tokens = 0
tot_tokens = 0
acc_programs = 0
tot_programs = len(test_set)
res = []
for idx in xrange(0, len(test_set), args.batch_size):
encoder_inputs, decoder_inputs = model.get_batch(test_set, start_idx=idx)
eval_loss, raw_outputs = step_tree2tree(model, encoder_inputs, decoder_inputs, feed_previous=True)
test_loss += len(encoder_inputs) * eval_loss
for i in xrange(len(encoder_inputs)):
if idx + i >= len(test_set):
break
current_output = []
for j in xrange(len(raw_outputs[i])):
current_output.append(raw_outputs[i][j])
current_source, current_target, current_source_manager, current_target_manager = test_set[idx + i]
current_target_print = data_utils.serialize_tree_with_vocabulary(current_target, target_vocab)
current_target = data_utils.serialize_tree(current_target)
current_source_print = data_utils.serialize_tree_with_vocabulary(current_source, source_vocab)
current_source = data_utils.serialize_tree(current_source)
# print("Evaluation time: %s seconds" % (datetime.datetime.now() - start_evaluation_datetime))
# print((datetime.datetime.now() - start_evaluation_datetime))
res.append((current_source, current_target, current_output))
current_output_print = data_utils.serialize_seq_with_vocabulary(current_output, target_vocab)
# print("--Current source / Current target / Current output--")
print(current_source_print)
print(current_target_print)
print(current_output_print)
# print(source_vocab)
print("---")
tot_tokens += len(current_target)
all_correct = 1
wrong_tokens = 0
for j in xrange(len(current_output)):
if j >= len(current_target):
break
if current_output[j] == current_target[j]:
acc_tokens += 1
else:
all_correct = 0
wrong_tokens += 1
acc_programs += all_correct
print(acc_tokens, tot_tokens, acc_programs, tot_programs)
test_loss /= tot_programs
print(" eval: loss %.2f" % test_loss)
print(" eval: accuracy of tokens %.2f" % (acc_tokens * 1.0 / tot_tokens))
print(" eval: accuracy of programs %.2f" % (acc_programs * 1.0 / tot_programs))
print(acc_tokens, tot_tokens, acc_programs, tot_programs)
def train(training_dataset, validation_dataset, source_vocab, target_vocab, source_vocab_list, target_vocab_list, no_train):
train_model = not no_train;
time_training = 0;
# build_from_scratch = True;
# pretrained_model_path = "/home/lola/nn/neuralnetwork.pth";
if (train_model):
print ("Reading training and val data :")
train_set = data_utils.prepare_data(training_dataset, source_vocab, target_vocab)
val_set = data_utils.prepare_data(validation_dataset, source_vocab, target_vocab)
if not os.path.isdir(args.train_dir_checkpoints):
os.makedirs(args.train_dir_checkpoints)
start_time = time.time()
start_datetime = datetime.datetime.now()
# if (build_from_scratch):
print("Creating %d layers of %d units." % (args.num_layers, args.hidden_size))
model = create_model(len(source_vocab), len(target_vocab), source_vocab_list, target_vocab_list, args.dropout_rate,
args.max_source_len, args.max_target_len)
# else:
# print("Loading pretrained model")
# pretrained_model = torch.load(pretrained_model_path)
# model.load_state_dict(pretrained_model)
print("Training model")
step_time, loss = 0.0, 0.0
current_step = 0
previous_losses = []
training_dataset_size = len(train_set)
for epoch in range(args.num_epochs):
print("epoch: %s/%s" % (epoch+1, args.num_epochs))
batch = 0
random.shuffle(train_set)
for batch_idx in range(0, training_dataset_size, args.batch_size):
batch += 1
start_time = time.time()
encoder_inputs, decoder_inputs = model.get_batch(train_set, start_idx=batch_idx)
step_loss = step_tree2tree(model, encoder_inputs, decoder_inputs, feed_previous=False)
step_time += (time.time() - start_time) / args.steps_per_checkpoint
loss += step_loss / args.steps_per_checkpoint
current_step += 1
print(" batch: %s/%s" % (batch, training_dataset_size/args.batch_size))
if current_step % args.learning_rate_decay_steps == 0 and model.learning_rate > 0.0001:
model.decay_learning_rate(args.learning_rate_decay_factor)
if current_step % args.steps_per_checkpoint == 0:
print ("learning rate %.4f step-time %.2f loss "
"%.2f" % (model.learning_rate, step_time, loss))
previous_losses.append(loss)
ckpt_path = os.path.join(args.train_dir_checkpoints, "translate_" + str(current_step) + ".ckpt")
ckpt = model.state_dict()
torch.save(ckpt, ckpt_path)
step_time, loss = 0.0, 0.0
encoder_inputs, decoder_inputs = model.get_batch(val_set, start_idx=0)
eval_loss, decoder_outputs = step_tree2tree(model, encoder_inputs, decoder_inputs, feed_previous=True)
print(" eval: loss %.2f" % eval_loss)
sys.stdout.flush()
time_training = (datetime.datetime.now() - start_datetime)
print("Saving model")
torch.save(model.state_dict(), "/home/lola/nn/neuralnetwork.pth")
else : # not train_model
print("Loading the pretrained model")
model = create_model(len(source_vocab), len(target_vocab), source_vocab_list, target_vocab_list,
args.dropout_rate,
args.max_source_len, args.max_target_len)
print("Evaluating model")
start_evaluation_datetime = datetime.datetime.now()
test_dataset = json.load(open(args.test_dataset, 'r'))
test_set = data_utils.prepare_data(test_dataset, source_vocab, target_vocab)
evaluate(model, test_set, source_vocab, target_vocab, source_vocab_list, target_vocab_list)
if (train_model):
print("Training time: %s seconds" % time_training)
print("Total Evaluation time: %s seconds" % (datetime.datetime.now() - start_evaluation_datetime))
def test(test_dataset, source_vocab, target_vocab, source_vocab_list, target_vocab_list):
model = create_model(len(source_vocab), len(target_vocab), source_vocab_list, target_vocab_list, 0.0,
args.max_source_len, args.max_target_len)
test_set = data_utils.prepare_data(test_dataset, source_vocab, target_vocab)
evaluate(model, test_set, source_vocab, target_vocab, source_vocab_list, target_vocab_list)
parser = argparse.ArgumentParser()
parser.add_argument('--param_init', type=float, default=0.1,
help='Parameters are initialized over uniform distribution in (-param_init, param_init)')
parser.add_argument('--num_epochs', type=int, default=30, help='number of training epochs') #default 30
parser.add_argument('--learning_rate', type=float, default=0.005, # default 0.005
help='learning rate')
parser.add_argument('--learning_rate_decay_factor', type=float, default=0.8,
help='learning rate decays by this much')
parser.add_argument('--learning_rate_decay_steps', type=int, default=2000, # default=2000
help='decay the learning rate after certain steps')
parser.add_argument('--max_gradient_norm', type=float, default=5.0,
help='clip gradients to this norm')
parser.add_argument('--batch_size', type=int, default=64, #default 100
help='batch size')
parser.add_argument('--max_depth', type=int, default=100,
help='max depth for tree models')
parser.add_argument('--hidden_size', type=int, default=256,
help='size of each model layer')
parser.add_argument('--embedding_size', type=int, default=256,
help='size of the embedding')
parser.add_argument('--dropout_rate', type=float, default=0.75, # default=0.5
help='dropout rate')
parser.add_argument('--num_layers', type=int, default=1, # default=1,
help='number of layers in the model')
parser.add_argument('--source_vocab_size', type=int, default=0,
help='source vocabulary size (0: no limit)')
parser.add_argument('--target_vocab_size', type=int, default=0,
help='target vocabulary size (0: no limit)')
parser.add_argument('--train_dir_checkpoints', type=str, default='/home/lola/nn/checkpoints', # default='../model_ckpts/tree2tree/',
help='training directory - checkpoints')
parser.add_argument('--training_dataset', type=str, default='/home/lola/nn/models_train.json', # default='../data/CS-JS/BL/preprocessed_progs_train.json',
help='training dataset path')
parser.add_argument('--validation_dataset', type=str, default='/home/lola/nn/models_valid.json', #default='../data/CS-JS/BL/preprocessed_progs_valid.json',
help='validation dataset path')
parser.add_argument('--test_dataset', type=str, default='/home/lola/nn/models_test.json', #default='../data/CS-JS/BL/preprocessed_progs_test.json',
help='test dataset path')
parser.add_argument('--load_model', type=str, default='/home/lola/nn/neuralnetwork.pth', # default=None
help='path to the pretrained model')
parser.add_argument('--vocab_filename', type=str, default=None,
help='filename for the vocabularies')
parser.add_argument('--steps_per_checkpoint', type=int, default=500,
help='number of training steps per checkpoint')
parser.add_argument('--max_source_len', type=int, default=115,
help='max length for input')
parser.add_argument('--max_target_len', type=int, default=315,
help='max length for output')
parser.add_argument('--test', action='store_true', help='set to true for testing')
parser.add_argument('--no_attention', action='store_true', help='set to true to disable attention')
parser.add_argument('--no_pf', action='store_true', help='set to true to disable parent attention feeding')
parser.add_argument('--no_train', help='set to true to prevent the network from training', action='store_true')
args = parser.parse_args()
def main():
if args.no_attention:
args.no_pf = True
training_dataset = json.load(open(args.training_dataset, 'r'))
source_vocab, target_vocab, source_vocab_list, target_vocab_list = data_utils.build_vocab(training_dataset, args.vocab_filename)
if args.test:
test_dataset = json.load(open(args.test_dataset, 'r'))
test(test_dataset, source_vocab, target_vocab, source_vocab_list, target_vocab_list)
else:
validation_dataset = json.load(open(args.validation_dataset, 'r'))
# print("Val data %s" % validation_dataset)
train(training_dataset, validation_dataset, source_vocab, target_vocab, source_vocab_list, target_vocab_list, args.no_train)
main()
| 43.794479
| 155
| 0.665826
| 1,823
| 14,277
| 4.927592
| 0.134942
| 0.04041
| 0.051097
| 0.030391
| 0.397863
| 0.32606
| 0.280975
| 0.254592
| 0.189246
| 0.16431
| 0
| 0.010943
| 0.231911
| 14,277
| 325
| 156
| 43.929231
| 0.808225
| 0.062618
| 0
| 0.118577
| 0
| 0
| 0.141114
| 0.020886
| 0
| 0
| 0
| 0
| 0
| 1
| 0.023715
| false
| 0
| 0.079051
| 0
| 0.114625
| 0.102767
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|