hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1f91353a20a4df1f47f560adb81212d32083f6b2
| 6,137
|
py
|
Python
|
build_loss.py
|
nightinwhite/key_target
|
465be12aee4673823582ed82ad935b5d8ad60990
|
[
"Apache-2.0"
] | null | null | null |
build_loss.py
|
nightinwhite/key_target
|
465be12aee4673823582ed82ad935b5d8ad60990
|
[
"Apache-2.0"
] | null | null | null |
build_loss.py
|
nightinwhite/key_target
|
465be12aee4673823582ed82ad935b5d8ad60990
|
[
"Apache-2.0"
] | null | null | null |
import tensorflow as tf
from tensorflow.python.platform.flags import FLAGS
def smooth_l1(x):
l2 = 0.5 * (x**2.0)
l1 = tf.abs(x) - 0.5
condition = tf.less(tf.abs(x), 1.0)
condition = tf.to_float(condition)
re = condition * l2 + (1 - condition) * l1
return re
def build_loss(pred_labels, pred_locs, anno_labels, anno_locs, anno_masks, anno_logist_length):
with tf.variable_scope("Loss"):
loss_alpha = FLAGS.loss_alpha
pred_top_labels = tf.nn.softmax(pred_labels)
pred_top_labels = tf.reduce_max(pred_top_labels, -1)
positives_mask = 1 - anno_masks
positives_num = tf.reduce_sum(positives_mask, axis=1)
negatives_num = positives_num * FLAGS.negatives_scale
negatives_num = tf.minimum(negatives_num, anno_logist_length*6)
negatives_num = tf.to_int32(negatives_num)
pred_negatives_top_labels = pred_top_labels * (anno_masks)
pred_negatives_min_value = []
for i in range(FLAGS.batch_size):
tmp_pred_negatives_min_value, _ = tf.nn.top_k(pred_negatives_top_labels[i, :], negatives_num[i], True)
pred_negatives_min_value.append(tmp_pred_negatives_min_value[-1])
pred_negatives_min_value = tf.stack(pred_negatives_min_value)
pred_negatives_min_value = tf.expand_dims(pred_negatives_min_value, -1)
pred_negatives_mask = pred_negatives_top_labels - pred_negatives_min_value
pred_negatives_mask = pred_negatives_mask >= 0
pred_negatives_mask = tf.cast(pred_negatives_mask, tf.float32)
active_mask = positives_mask + pred_negatives_mask
class_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=pred_labels,
labels=anno_labels) * active_mask
class_loss = tf.reduce_sum(class_loss, axis=1) / (1e-5 + tf.reduce_sum(active_mask, axis=1))
sum_class_loss = tf.reduce_mean(class_loss)
loc_loss = tf.reduce_sum(smooth_l1(pred_locs - anno_locs), axis=2) * active_mask
loc_loss = tf.reduce_sum(loc_loss, axis=1) / (1e-5 + tf.reduce_sum(active_mask, axis=1))* 10
sum_loc_loss = tf.reduce_mean(loc_loss)
total_loss = tf.reduce_mean(loss_alpha * class_loss + (1.0 - loss_alpha) * loc_loss) * 2
acc = tf.reduce_sum(tf.to_float(tf.equal(tf.to_int32(tf.argmax(pred_labels, -1)), anno_labels))*(1 - anno_masks))
acc = acc / tf.reduce_sum((positives_mask))
return sum_class_loss, sum_loc_loss, total_loss, acc
def build_loss_v2(pred_labels, pred_locs, anno_labels, anno_locs, anno_masks, anno_logist_length):
with tf.variable_scope("Loss"):
loss_alpha = FLAGS.loss_alpha
pred_top_labels = tf.nn.softmax(pred_labels)
pred_top_labels = pred_top_labels[:, :, -1]
positives_mask = 1 - anno_masks
pred_negatives_top_labels = pred_top_labels * (anno_masks)
pred_negatives_mask = pred_negatives_top_labels - 0.2
pred_negatives_mask = pred_negatives_mask < 0
pred_negatives_mask = tf.cast(pred_negatives_mask, tf.float32)
# return pred_negatives_mask, pred_negatives_top_labels
active_mask = positives_mask + pred_negatives_mask
class_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=pred_labels,
labels=anno_labels) * active_mask
class_loss = tf.reduce_sum(class_loss, axis=1) / (1e-5 + tf.reduce_sum(active_mask, axis=1))
sum_class_loss = tf.reduce_mean(class_loss)
loc_loss = tf.reduce_sum(smooth_l1(pred_locs - anno_locs), axis=2) * positives_mask
loc_loss = tf.reduce_sum(loc_loss, axis=1) / (1e-5 + tf.reduce_sum(positives_mask, axis=1)) * 10
sum_loc_loss = tf.reduce_mean(loc_loss)
total_loss = tf.reduce_mean(loss_alpha * class_loss + (1.0 - loss_alpha) * loc_loss) * 2
acc = tf.reduce_sum(
tf.to_float(tf.equal(tf.to_int32(tf.argmax(pred_labels, -1)), anno_labels)) * active_mask)
acc = acc / tf.reduce_sum(active_mask)
return sum_class_loss, sum_loc_loss, total_loss, acc
def test_build_loss(pred_labels, pred_locs, anno_labels, anno_locs, anno_masks, anno_logist_length):
with tf.variable_scope("Loss"):
loss_alpha = FLAGS.loss_alpha
pred_top_labels = tf.nn.softmax(pred_labels)
pred_top_labels = pred_top_labels[:, :, -1]
positives_mask = 1 - anno_masks
pred_negatives_top_labels = pred_top_labels * (anno_masks)
pred_negatives_mask = pred_negatives_top_labels - 0.2
pred_negatives_mask = pred_negatives_mask < 0
pred_negatives_mask = tf.cast(pred_negatives_mask, tf.float32)
return pred_negatives_mask, pred_negatives_top_labels
active_mask = positives_mask + pred_negatives_mask
class_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=pred_labels,
labels=anno_labels) * active_mask
class_loss = tf.reduce_sum(class_loss, axis=1) / (1e-5 + tf.reduce_sum(active_mask, axis=1))
sum_class_loss = tf.reduce_mean(class_loss)
loc_loss = tf.reduce_sum(smooth_l1(pred_locs - anno_locs), axis=2) * active_mask
loc_loss = tf.reduce_sum(loc_loss, axis=1) / (1e-5 + tf.reduce_sum(active_mask, axis=1)) * 10
sum_loc_loss = tf.reduce_mean(loc_loss)
total_loss = tf.reduce_mean(loss_alpha * class_loss + (1.0 - loss_alpha) * loc_loss) * 2
acc = tf.reduce_sum(
tf.to_float(tf.equal(tf.to_int32(tf.argmax(pred_labels, -1)), anno_labels)) * (1 - anno_masks))
acc = acc / tf.reduce_sum((positives_mask))
return sum_class_loss, sum_loc_loss, total_loss, acc
def build_accuracy(pred_labels, anno_labels):
with tf.variable_scope("Accuracy"):
class_acc = tf.reduce_mean(tf.to_float(tf.equal(tf.to_int32(tf.argmax(pred_labels, -1)), anno_labels)))
return class_acc
| 51.141667
| 122
| 0.669871
| 887
| 6,137
| 4.227734
| 0.096956
| 0.131733
| 0.064533
| 0.0528
| 0.8488
| 0.8296
| 0.814933
| 0.7792
| 0.7792
| 0.769333
| 0
| 0.021104
| 0.23562
| 6,137
| 119
| 123
| 51.571429
| 0.778299
| 0.008636
| 0
| 0.608696
| 0
| 0
| 0.003357
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.054348
| false
| 0
| 0.021739
| 0
| 0.141304
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
2f3bcfde9b894f693d085d401fc823d537e611b7
| 6,088
|
py
|
Python
|
tests/parser/expressions/binary_operation/comparison_binary_test.py
|
OtavioHenrique/yalul
|
ce99e32365ed5607527b9f2f39705ad5d9e20ba2
|
[
"MIT"
] | 1
|
2021-04-01T20:22:36.000Z
|
2021-04-01T20:22:36.000Z
|
tests/parser/expressions/binary_operation/comparison_binary_test.py
|
OtavioHenrique/yalul
|
ce99e32365ed5607527b9f2f39705ad5d9e20ba2
|
[
"MIT"
] | 1
|
2020-11-20T22:24:38.000Z
|
2020-11-20T22:24:38.000Z
|
tests/parser/expressions/binary_operation/comparison_binary_test.py
|
OtavioHenrique/yalul
|
ce99e32365ed5607527b9f2f39705ad5d9e20ba2
|
[
"MIT"
] | null | null | null |
from yalul.parser import Parser
from yalul.parsers.ast.nodes.statements.expressions.binary import Binary
from yalul.lex.token import Token
from yalul.lex.token_type import TokenType
from yalul.parsers.ast.nodes.statements.expressions.values.integer import Integer
class TestParseBinaryComparison:
"""Test parser generating binary comparison operations expressions"""
def test_parser_run_generates_correct_ast_single_binary_expression_comparison_greater(self):
"""
Validates if parser is generating a correct AST to a single binary operation comparison greater, like 2 > 1
"""
tokens = [
Token(TokenType.INTEGER, 42),
Token(TokenType.GREATER, ">"),
Token(TokenType.INTEGER, 1),
Token(TokenType.END_STATEMENT, "End of Statement"),
Token(TokenType.EOF, "End of File")
]
parser_response = Parser(tokens).parse()
assert len(parser_response.errors()) == 0
node = parser_response.ast.statements[0]
assert type(node) is Binary
assert node.operator.type == TokenType.GREATER
assert type(node.left) == Integer
assert node.left.value == 42
assert node.right.value == 1
assert type(node.right) == Integer
def test_parser_run_generates_correct_ast_single_binary_expression_comparison_less(self):
"""
Validates if parser is generating a correct AST to a single binary operation comparison less, like 2 < 1
"""
tokens = [
Token(TokenType.INTEGER, 42),
Token(TokenType.LESS, "<"),
Token(TokenType.INTEGER, 1),
Token(TokenType.EOF, "End of File")
]
parser_response = Parser(tokens).parse()
node = parser_response.ast.statements[0]
assert type(node) is Binary
assert node.operator.type == TokenType.LESS
assert type(node.left) == Integer
assert node.left.value == 42
assert node.right.value == 1
assert type(node.right) == Integer
def test_parser_run_generates_correct_ast_single_binary_expression_comparison_different(self):
"""
Validates if parser is generating a correct AST to a single binary operation comparison different, like 2 != 1
"""
tokens = [
Token(TokenType.INTEGER, 42),
Token(TokenType.BANG_EQUAL, "!="),
Token(TokenType.INTEGER, 1),
Token(TokenType.EOF, "End of File")
]
parser_response = Parser(tokens).parse()
node = parser_response.ast.statements[0]
assert type(node) is Binary
assert node.operator.type == TokenType.BANG_EQUAL
assert type(node.left) == Integer
assert node.left.value == 42
assert node.right.value == 1
assert type(node.right) == Integer
def test_parser_run_generates_correct_ast_single_binary_expression_comparison_equal_equal(self):
"""
Validates if parser is generating a correct AST to a single binary operation comparison equal, like 2 == 1
"""
tokens = [
Token(TokenType.INTEGER, 42),
Token(TokenType.EQUAL_EQUAL, "=="),
Token(TokenType.INTEGER, 1),
Token(TokenType.EOF, "End of File")
]
parser_response = Parser(tokens).parse()
node = parser_response.ast.statements[0]
assert type(node) is Binary
assert node.operator.type == TokenType.EQUAL_EQUAL
assert type(node.left) == Integer
assert node.left.value == 42
assert node.right.value == 1
assert type(node.right) == Integer
def test_parser_run_generates_correct_ast_single_binary_expression_comparison_greater_equal(self):
"""
Validates if parser is generating a correct AST to a single binary operation comparison greater equal, like 2 >= 1
"""
tokens = [
Token(TokenType.INTEGER, 42),
Token(TokenType.GREATER_EQUAL, ">="),
Token(TokenType.INTEGER, 1),
Token(TokenType.EOF, "End of File")
]
parser_response = Parser(tokens).parse()
node = parser_response.ast.statements[0]
assert type(node) is Binary
assert node.operator.type == TokenType.GREATER_EQUAL
assert type(node.left) == Integer
assert node.left.value == 42
assert node.right.value == 1
assert type(node.right) == Integer
def test_parser_run_generates_correct_ast_single_binary_expression_comparison_less_equal(self):
"""
Validates if parser is generating a correct AST to a single binary operation comparison greater equal, like 2 <= 1
"""
tokens = [
Token(TokenType.INTEGER, 42),
Token(TokenType.LESS_EQUAL, "<="),
Token(TokenType.INTEGER, 1),
Token(TokenType.EOF, "End of File")
]
parser_response = Parser(tokens).parse()
node = parser_response.ast.statements[0]
assert type(node) is Binary
assert node.operator.type == TokenType.LESS_EQUAL
assert type(node.left) == Integer
assert node.left.value == 42
assert node.right.value == 1
assert type(node.right) == Integer
def test_parser_run_generates_correct_ast_single_binary_expression_comparison_bang(self):
"""
Validates if parser is generating a correct AST to a single binary operation comparison bang, like 2 ! 1
"""
tokens = [
Token(TokenType.INTEGER, 42),
Token(TokenType.BANG, "!"),
Token(TokenType.INTEGER, 1),
Token(TokenType.EOF, "End of File")
]
parser_response = Parser(tokens).parse()
node = parser_response.ast.statements[0]
assert type(node) is Binary
assert node.operator.type == TokenType.BANG
assert type(node.left) == Integer
assert node.left.value == 42
assert node.right.value == 1
assert type(node.right) == Integer
| 33.822222
| 122
| 0.633213
| 711
| 6,088
| 5.284107
| 0.085795
| 0.108065
| 0.078254
| 0.029811
| 0.914027
| 0.914027
| 0.904445
| 0.88049
| 0.88049
| 0.88049
| 0
| 0.01448
| 0.273982
| 6,088
| 179
| 123
| 34.011173
| 0.83552
| 0.13617
| 0
| 0.681416
| 0
| 0
| 0.020424
| 0
| 0
| 0
| 0
| 0
| 0.380531
| 1
| 0.061947
| false
| 0
| 0.044248
| 0
| 0.115044
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
2f3dd330c2f0cb670987778763c22094ed652f77
| 25
|
py
|
Python
|
PyMess/Tools/Gamma/__init__.py
|
mattkjames7/PyMess
|
f2c68285a7845a24d98284e20ed4292ed5e58138
|
[
"MIT"
] | null | null | null |
PyMess/Tools/Gamma/__init__.py
|
mattkjames7/PyMess
|
f2c68285a7845a24d98284e20ed4292ed5e58138
|
[
"MIT"
] | 1
|
2021-06-10T22:51:09.000Z
|
2021-06-10T22:51:09.000Z
|
PyMess/Tools/Gamma/__init__.py
|
mattkjames7/PyMess
|
f2c68285a7845a24d98284e20ed4292ed5e58138
|
[
"MIT"
] | null | null | null |
from .Gamma import Gamma
| 12.5
| 24
| 0.8
| 4
| 25
| 5
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.16
| 25
| 1
| 25
| 25
| 0.952381
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
2f87562a25a6e0e1e04ca9301c98d44826b8cf79
| 7,476
|
py
|
Python
|
MessageService_pb2_grpc.py
|
tzolov/poc-python-grpc
|
ec784f7ede6dd2c69dd2107e7c54c477bfab0e60
|
[
"Apache-2.0"
] | null | null | null |
MessageService_pb2_grpc.py
|
tzolov/poc-python-grpc
|
ec784f7ede6dd2c69dd2107e7c54c477bfab0e60
|
[
"Apache-2.0"
] | null | null | null |
MessageService_pb2_grpc.py
|
tzolov/poc-python-grpc
|
ec784f7ede6dd2c69dd2107e7c54c477bfab0e60
|
[
"Apache-2.0"
] | null | null | null |
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
"""Client and server classes corresponding to protobuf-defined services."""
import grpc
import MessageService_pb2 as MessageService__pb2
class MessagingServiceStub(object):
"""Missing associated documentation comment in .proto file."""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.biStream = channel.stream_stream(
'/org.springframework.cloud.function.grpc.MessagingService/biStream',
request_serializer=MessageService__pb2.GrpcMessage.SerializeToString,
response_deserializer=MessageService__pb2.GrpcMessage.FromString,
)
self.clientStream = channel.stream_unary(
'/org.springframework.cloud.function.grpc.MessagingService/clientStream',
request_serializer=MessageService__pb2.GrpcMessage.SerializeToString,
response_deserializer=MessageService__pb2.GrpcMessage.FromString,
)
self.serverStream = channel.unary_stream(
'/org.springframework.cloud.function.grpc.MessagingService/serverStream',
request_serializer=MessageService__pb2.GrpcMessage.SerializeToString,
response_deserializer=MessageService__pb2.GrpcMessage.FromString,
)
self.requestReply = channel.unary_unary(
'/org.springframework.cloud.function.grpc.MessagingService/requestReply',
request_serializer=MessageService__pb2.GrpcMessage.SerializeToString,
response_deserializer=MessageService__pb2.GrpcMessage.FromString,
)
class MessagingServiceServicer(object):
"""Missing associated documentation comment in .proto file."""
def biStream(self, request_iterator, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def clientStream(self, request_iterator, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def serverStream(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def requestReply(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_MessagingServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
'biStream': grpc.stream_stream_rpc_method_handler(
servicer.biStream,
request_deserializer=MessageService__pb2.GrpcMessage.FromString,
response_serializer=MessageService__pb2.GrpcMessage.SerializeToString,
),
'clientStream': grpc.stream_unary_rpc_method_handler(
servicer.clientStream,
request_deserializer=MessageService__pb2.GrpcMessage.FromString,
response_serializer=MessageService__pb2.GrpcMessage.SerializeToString,
),
'serverStream': grpc.unary_stream_rpc_method_handler(
servicer.serverStream,
request_deserializer=MessageService__pb2.GrpcMessage.FromString,
response_serializer=MessageService__pb2.GrpcMessage.SerializeToString,
),
'requestReply': grpc.unary_unary_rpc_method_handler(
servicer.requestReply,
request_deserializer=MessageService__pb2.GrpcMessage.FromString,
response_serializer=MessageService__pb2.GrpcMessage.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'org.springframework.cloud.function.grpc.MessagingService', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
class MessagingService(object):
"""Missing associated documentation comment in .proto file."""
@staticmethod
def biStream(request_iterator,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.stream_stream(request_iterator, target, '/org.springframework.cloud.function.grpc.MessagingService/biStream',
MessageService__pb2.GrpcMessage.SerializeToString,
MessageService__pb2.GrpcMessage.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def clientStream(request_iterator,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.stream_unary(request_iterator, target, '/org.springframework.cloud.function.grpc.MessagingService/clientStream',
MessageService__pb2.GrpcMessage.SerializeToString,
MessageService__pb2.GrpcMessage.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def serverStream(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_stream(request, target, '/org.springframework.cloud.function.grpc.MessagingService/serverStream',
MessageService__pb2.GrpcMessage.SerializeToString,
MessageService__pb2.GrpcMessage.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def requestReply(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/org.springframework.cloud.function.grpc.MessagingService/requestReply',
MessageService__pb2.GrpcMessage.SerializeToString,
MessageService__pb2.GrpcMessage.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
| 45.036145
| 145
| 0.673355
| 643
| 7,476
| 7.586314
| 0.153966
| 0.090611
| 0.137761
| 0.110701
| 0.826978
| 0.802788
| 0.792333
| 0.727962
| 0.690652
| 0.637761
| 0
| 0.00464
| 0.250401
| 7,476
| 165
| 146
| 45.309091
| 0.86581
| 0.084136
| 0
| 0.666667
| 1
| 0
| 0.123395
| 0.089742
| 0
| 0
| 0
| 0
| 0
| 1
| 0.075758
| false
| 0
| 0.015152
| 0.030303
| 0.143939
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
c06e230fe42ec9c8f4ccc8fc0734c8b1292df78e
| 43
|
py
|
Python
|
topsis/__init__.py
|
Anurag-Aggarwal/101703088-topsis
|
8d40174be3ebf938316514131410c7d4b11f730e
|
[
"MIT"
] | null | null | null |
topsis/__init__.py
|
Anurag-Aggarwal/101703088-topsis
|
8d40174be3ebf938316514131410c7d4b11f730e
|
[
"MIT"
] | null | null | null |
topsis/__init__.py
|
Anurag-Aggarwal/101703088-topsis
|
8d40174be3ebf938316514131410c7d4b11f730e
|
[
"MIT"
] | null | null | null |
from 101703088-topsis.topsis import Topsis
| 21.5
| 42
| 0.860465
| 6
| 43
| 6.166667
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.230769
| 0.093023
| 43
| 1
| 43
| 43
| 0.717949
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 1
| null | null | 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
c0f13a0c784cfa1f117dbde7893456e40e805dd8
| 273
|
py
|
Python
|
URI/1 - INICIANTE/Python/1044 - Multiplos.py
|
william-james-pj/LogicaProgramacao
|
629f746e34da2e829dc7ea2e489ac36bb1b1fb13
|
[
"MIT"
] | 1
|
2020-04-14T16:48:16.000Z
|
2020-04-14T16:48:16.000Z
|
URI/1 - INICIANTE/Python/1044 - Multiplos.py
|
william-james-pj/LogicaProgramacao
|
629f746e34da2e829dc7ea2e489ac36bb1b1fb13
|
[
"MIT"
] | null | null | null |
URI/1 - INICIANTE/Python/1044 - Multiplos.py
|
william-james-pj/LogicaProgramacao
|
629f746e34da2e829dc7ea2e489ac36bb1b1fb13
|
[
"MIT"
] | null | null | null |
a, b = input().split()
a = int(a)
b = int(b)
if a < b:
c = b % a
if(c == 0):
print('Sao Multiplos')
else:
print('Nao sao Multiplos')
else:
c = a % b
if (c == 0):
print('Sao Multiplos')
else:
print('Nao sao Multiplos')
| 18.2
| 34
| 0.461538
| 42
| 273
| 3
| 0.309524
| 0.063492
| 0.380952
| 0.142857
| 0.714286
| 0.714286
| 0.714286
| 0.714286
| 0.714286
| 0.714286
| 0
| 0.011494
| 0.362637
| 273
| 15
| 35
| 18.2
| 0.712644
| 0
| 0
| 0.6
| 0
| 0
| 0.218978
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.266667
| 0
| 0
| 0
| null | 0
| 1
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
c0f8f2099128122074a4041a58bc081a49fecf22
| 11,739
|
py
|
Python
|
tests/python/test_stats_reporter.py
|
mr-c/CWL-assembly
|
4f98aa0ff6fd9a6c0712e35869c8a36f8bb22e52
|
[
"Apache-2.0"
] | null | null | null |
tests/python/test_stats_reporter.py
|
mr-c/CWL-assembly
|
4f98aa0ff6fd9a6c0712e35869c8a36f8bb22e52
|
[
"Apache-2.0"
] | null | null | null |
tests/python/test_stats_reporter.py
|
mr-c/CWL-assembly
|
4f98aa0ff6fd9a6c0712e35869c8a36f8bb22e52
|
[
"Apache-2.0"
] | 1
|
2021-02-22T14:59:08.000Z
|
2021-02-22T14:59:08.000Z
|
import pytest
import json
from cwl.stats.stats_report import gen_stats_report
from tests.python.utils import write_empty_file, copy_fixture
class TestStatsReporter(object):
def test_coverage_report_fixture_empty_coverage_file(self, tmpdir):
tmpdir = str(tmpdir)
coverage_file = write_empty_file(tmpdir + '/tmp.tab')
contig_file = write_empty_file(tmpdir + 'contigs.fasta')
output_file = write_empty_file(tmpdir + 'output.json')
with pytest.raises(ValueError) as exc:
args = gen_stats_report.parse_args(
['106000', contig_file, coverage_file, output_file, '500', 'metaspades'])
gen_stats_report.calc_coverage(args)
# Assert error comes from coverage file message, not fasta parsing error
assert 'Coverage file' in str(exc)
def test_coverage_report_fixture_empty_fasta_file(self, tmpdir):
tmpdir = str(tmpdir)
coverage_file = copy_fixture('SRP0741/SRP074153/SRR6257/SRR6257420/megahit/001/coverage.tab',
tmpdir + 'tmp.tab')
contig_file = write_empty_file(tmpdir + 'contigs.fasta')
output_file = write_empty_file(tmpdir + 'output.json')
open(coverage_file, 'a').close()
args = gen_stats_report.parse_args(
['106000', contig_file, coverage_file, output_file, '500', 'metaspades'])
coverage = gen_stats_report.calc_coverage(args)
assert coverage == 14.2
def test_main_metaspades(self, tmpdir):
tmpdir = str(tmpdir)
contig_file = copy_fixture('ERP0102/ERP010229/ERR8665/ERR866589/metaspades/001/contigs.fasta',
tmpdir + 'contigs.fasta')
coverage_file = copy_fixture('ERP0102/ERP010229/ERR8665/ERR866589/metaspades/001/coverage.tab',
tmpdir + 'coverage.tab')
output_file = write_empty_file(tmpdir + 'output.json')
base_count = 106000
args = gen_stats_report.parse_args(
[str(base_count), contig_file, coverage_file, output_file, '0', 'metaspades'])
try:
gen_stats_report.main(args)
except SystemExit:
pass
expected_report = {
'Base count': base_count,
'Coverage': 0.01,
'Min length 1000 bp': {'num_contigs': 0, 'total_base_pairs': 0},
'Min length 10000 bp': {'num_contigs': 0, 'total_base_pairs': 0},
'Min length 50000 bp': {'num_contigs': 0, 'total_base_pairs': 0},
'num_contigs': 5,
'total_assembled_pairs': 262 + 245 + 116 + 87 + 60,
'largest_contig': 262,
'n50': 2,
'l50': 4
}
with open(output_file) as output:
report = json.load(output)
assert expected_report == report
def test_main_megahit(self, tmpdir):
tmpdir = str(tmpdir)
contig_file = copy_fixture('SRP0741/SRP074153/SRR6257/SRR6257420/megahit/001/final.contigs.fa',
tmpdir + 'contigs.fasta')
coverage_file = copy_fixture('SRP0741/SRP074153/SRR6257/SRR6257420/megahit/001/coverage.tab',
tmpdir + 'coverage.tab')
output_file = write_empty_file(tmpdir + 'output.json')
base_count = 106000
args = gen_stats_report.parse_args([str(base_count), contig_file, coverage_file, output_file, '0', 'megahit'])
try:
gen_stats_report.main(args)
except SystemExit:
pass
expected_report = {
'Base count': base_count,
'Coverage': 14.2,
'Min length 1000 bp': {'num_contigs': 1, 'total_base_pairs': 2473},
'Min length 10000 bp': {'num_contigs': 0, 'total_base_pairs': 0},
'Min length 50000 bp': {'num_contigs': 0, 'total_base_pairs': 0},
'num_contigs': 22,
'total_assembled_pairs': 11716,
'largest_contig': 2473,
'n50': 9,
'l50': 14
}
with open(output_file) as output:
report = json.load(output)
assert expected_report == report
def test_raises_error_on_invalid_basecount(self, tmpdir):
tmpdir = str(tmpdir)
contig_file = copy_fixture('SRP0741/SRP074153/SRR6257/SRR6257420/megahit/001/final.contigs.fa',
tmpdir + 'contigs.fasta')
coverage_file = copy_fixture('SRP0741/SRP074153/SRR6257/SRR6257420/megahit/001/coverage.tab',
tmpdir + 'coverage.tab')
output_file = write_empty_file(tmpdir + 'output.json')
with pytest.raises(ValueError) as exc:
args = gen_stats_report.parse_args(['0', contig_file, coverage_file, output_file, '0', 'metaspades'])
gen_stats_report.main(args)
# Assert error comes from coverage file message, not fasta parsing error
assert 'Base count (0) cannot be <= 0.' in str(exc)
class TestFastaStats(object):
def test_supported_assemblers(self):
supported = ['metaspades', 'spades', 'megahit']
for assembler in supported:
fstats = gen_stats_report.FastaStats('contigs.fasta', 500, assembler)
assert fstats.assembler == assembler
def test_unsupported_assemblers(self):
unsupported = ['minia', 'invalid_assembler']
for assembler in unsupported:
with pytest.raises(ValueError):
gen_stats_report.FastaStats('contigs.fasta', 500, assembler)
def test_stats_empty_fasta(self, tmpdir):
tmpdir = str(tmpdir)
contig_file = write_empty_file(tmpdir + 'contigs.fasta')
with open(contig_file) as f:
fstats = gen_stats_report.FastaStats(f, 500, 'metaspades')
fstats.parse_file()
assert fstats.get_largest_contig() == 0
assert fstats.get_n50() == 0
assert fstats.get_l50() == 0
assert fstats.get_total_pairs() == 0
assert fstats.get_filtered_stats(100) == {'num_contigs': 0, 'total_base_pairs': 0}
def test_stats_valid_metaspades_fasta_no_contig_filtering(self, tmpdir):
tmpdir = str(tmpdir)
contig_file = copy_fixture('ERP0102/ERP010229/ERR8665/ERR866589/metaspades/001/contigs.fasta',
tmpdir + 'contigs.fasta')
with open(contig_file) as f:
fstats = gen_stats_report.FastaStats(f, 0, 'metaspades')
fstats.parse_file()
expected_report = {
'Min length 1000 bp': {'num_contigs': 0, 'total_base_pairs': 0},
'Min length 10000 bp': {'num_contigs': 0, 'total_base_pairs': 0},
'Min length 50000 bp': {'num_contigs': 0, 'total_base_pairs': 0},
'num_contigs': 5,
'total_assembled_pairs': 262 + 245 + 116 + 87 + 60,
'largest_contig': 262,
'n50': 2,
'l50': 4
}
assert fstats.get_largest_contig() == expected_report['largest_contig']
assert fstats.get_n50() == expected_report['n50']
assert fstats.get_l50() == expected_report['l50']
assert fstats.get_total_pairs() == expected_report['total_assembled_pairs']
assert fstats.get_filtered_stats(100) == {'num_contigs': 3, 'total_base_pairs': 262 + 245 + 116}
assert fstats.gen_report() == expected_report
def test_stats_valid_metaspades_fasta_with_contig_filtering(self, tmpdir):
tmpdir = str(tmpdir)
contig_file = copy_fixture('ERP0102/ERP010229/ERR8665/ERR866589/metaspades/001/contigs.fasta',
tmpdir + 'contigs.fasta')
with open(contig_file) as f:
fstats = gen_stats_report.FastaStats(f, 100, 'metaspades')
fstats.parse_file()
expected_report = {
'Min length 1000 bp': {'num_contigs': 0, 'total_base_pairs': 0},
'Min length 10000 bp': {'num_contigs': 0, 'total_base_pairs': 0},
'Min length 50000 bp': {'num_contigs': 0, 'total_base_pairs': 0},
'num_contigs': 3,
'total_assembled_pairs': 262 + 245 + 116,
'largest_contig': 262,
'n50': 2,
'l50': 2
}
assert fstats.get_largest_contig() == expected_report['largest_contig']
assert fstats.get_n50() == expected_report['n50']
assert fstats.get_l50() == expected_report['l50']
assert fstats.get_total_pairs() == expected_report['total_assembled_pairs']
assert fstats.get_filtered_stats(100) == {'num_contigs': 3, 'total_base_pairs': 262 + 245 + 116}
assert fstats.gen_report() == expected_report
def test_stats_valid_megahit_fasta_no_contig_filtering(self, tmpdir):
tmpdir = str(tmpdir)
contig_file = copy_fixture('SRP0741/SRP074153/SRR6257/SRR6257420/megahit/001/final.contigs.fa',
tmpdir + 'contigs.fasta')
with open(contig_file) as f:
fstats = gen_stats_report.FastaStats(f, 0, 'megahit')
fstats.parse_file()
contig_lengths = 11716
expected_report = {
'Min length 1000 bp': {'num_contigs': 1, 'total_base_pairs': 2473},
'Min length 10000 bp': {'num_contigs': 0, 'total_base_pairs': 0},
'Min length 50000 bp': {'num_contigs': 0, 'total_base_pairs': 0},
'num_contigs': 22,
'total_assembled_pairs': contig_lengths,
'largest_contig': 2473,
'n50': 9,
'l50': 14
}
assert fstats.get_largest_contig() == expected_report['largest_contig']
assert fstats.get_n50() == expected_report['n50']
assert fstats.get_l50() == expected_report['l50']
assert fstats.get_total_pairs() == expected_report['total_assembled_pairs']
assert fstats.get_filtered_stats(700) == {'num_contigs': 3, 'total_base_pairs': 2473 + 767 + 730}
assert fstats.gen_report() == expected_report
def test_stats_valid_megahit_fasta_with_contig_filtering(self, tmpdir):
tmpdir = str(tmpdir)
contig_file = copy_fixture('SRP0741/SRP074153/SRR6257/SRR6257420/megahit/001/final.contigs.fa',
tmpdir + 'contigs.fasta')
with open(contig_file) as f:
fstats = gen_stats_report.FastaStats(f, 700, 'megahit')
fstats.parse_file()
contig_lengths = 2473 + 767 + 730
expected_report = {
'Min length 1000 bp': {
'num_contigs': 1,
'total_base_pairs': 2473
},
'Min length 10000 bp': {
'num_contigs': 0,
'total_base_pairs': 0
},
'Min length 50000 bp': {
'num_contigs': 0,
'total_base_pairs': 0
},
'num_contigs': 3,
'total_assembled_pairs': contig_lengths,
'largest_contig': 2473,
'n50': 1,
'l50': 3
}
assert fstats.get_largest_contig() == expected_report['largest_contig']
assert fstats.get_n50() == expected_report['n50']
assert fstats.get_l50() == expected_report['l50']
assert fstats.get_total_pairs(), expected_report['total_assembled_pairs']
assert fstats.get_filtered_stats(2000) == {'num_contigs': 1, 'total_base_pairs': 2473}
assert fstats.gen_report() == expected_report
| 48.308642
| 118
| 0.590681
| 1,313
| 11,739
| 5.000762
| 0.09901
| 0.054828
| 0.057112
| 0.038989
| 0.891258
| 0.877399
| 0.837039
| 0.810844
| 0.767591
| 0.748858
| 0
| 0.081252
| 0.300707
| 11,739
| 242
| 119
| 48.508264
| 0.718602
| 0.012011
| 0
| 0.6621
| 0
| 0
| 0.226908
| 0.07831
| 0
| 0
| 0
| 0
| 0.159817
| 1
| 0.054795
| false
| 0.009132
| 0.018265
| 0
| 0.082192
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
23906d00ac6fa55a850cf7fb42589d821582d5b6
| 26
|
py
|
Python
|
games/__init__.py
|
Sennevs/twoseventy
|
12ebd6047072a323b41581e8c7b38b1829b6682a
|
[
"MIT"
] | null | null | null |
games/__init__.py
|
Sennevs/twoseventy
|
12ebd6047072a323b41581e8c7b38b1829b6682a
|
[
"MIT"
] | null | null | null |
games/__init__.py
|
Sennevs/twoseventy
|
12ebd6047072a323b41581e8c7b38b1829b6682a
|
[
"MIT"
] | null | null | null |
from games.env import Env
| 13
| 25
| 0.807692
| 5
| 26
| 4.2
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.153846
| 26
| 1
| 26
| 26
| 0.954545
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
f19b407f6026c32c6c779428378496d99cee0630
| 93
|
py
|
Python
|
dink/__init__.py
|
anthonyjb/dink-python
|
b3774f435beb77b44e05f4e0f3f2619c052968c5
|
[
"MIT"
] | null | null | null |
dink/__init__.py
|
anthonyjb/dink-python
|
b3774f435beb77b44e05f4e0f3f2619c052968c5
|
[
"MIT"
] | 1
|
2021-07-15T05:39:04.000Z
|
2021-07-15T05:39:04.000Z
|
dink/__init__.py
|
anthonyjb/dink-python
|
b3774f435beb77b44e05f4e0f3f2619c052968c5
|
[
"MIT"
] | null | null | null |
from .client import *
from . import charts
from . import exceptions
from . import resources
| 15.5
| 24
| 0.763441
| 12
| 93
| 5.916667
| 0.5
| 0.422535
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.182796
| 93
| 5
| 25
| 18.6
| 0.934211
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
f1c3317ceb311953f594b2fdd06e6f9dedd0e479
| 79
|
py
|
Python
|
Reading Data/lesson-16-discover-starwars-people/tests/test_starwars_people_1.py
|
danielgarm/Data-Science-and-Machine-Learning
|
fa3e85cc42eb2e9f964ab5abb34d1c93e16d1cd9
|
[
"MIT"
] | null | null | null |
Reading Data/lesson-16-discover-starwars-people/tests/test_starwars_people_1.py
|
danielgarm/Data-Science-and-Machine-Learning
|
fa3e85cc42eb2e9f964ab5abb34d1c93e16d1cd9
|
[
"MIT"
] | 2
|
2022-01-11T21:04:51.000Z
|
2022-01-11T21:05:05.000Z
|
Reading Data/lesson-16-discover-starwars-people/tests/test_starwars_people_1.py
|
danielgarm/Data-Science-and-Machine-Learning
|
fa3e85cc42eb2e9f964ab5abb34d1c93e16d1cd9
|
[
"MIT"
] | null | null | null |
def test_starwars_people_1():
assert starwars_people_df.shape == (10,16)
| 26.333333
| 47
| 0.734177
| 12
| 79
| 4.416667
| 0.833333
| 0.528302
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.074627
| 0.151899
| 79
| 2
| 48
| 39.5
| 0.716418
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.5
| 1
| 0.5
| true
| 0
| 0
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
7b3a97966577133b5f896bcec5a6aebeb5fdad34
| 222
|
py
|
Python
|
pyequalizer/nas_utils.py
|
Atamisk/pyEqualizer
|
bb38aa5fed1c2ec82203432842ab4ebe2079c5bf
|
[
"MIT"
] | null | null | null |
pyequalizer/nas_utils.py
|
Atamisk/pyEqualizer
|
bb38aa5fed1c2ec82203432842ab4ebe2079c5bf
|
[
"MIT"
] | null | null | null |
pyequalizer/nas_utils.py
|
Atamisk/pyEqualizer
|
bb38aa5fed1c2ec82203432842ab4ebe2079c5bf
|
[
"MIT"
] | null | null | null |
from pyequalizer.fileops import to_nas_real
def to_nas_force(sid,g,cid,f,n1,n2,n3):
return ['FORCE',str(int(sid)),str(int(g)),str(int(cid)),to_nas_real(f),to_nas_real(n1),
to_nas_real(n2),to_nas_real(n3)]
| 37
| 91
| 0.698198
| 45
| 222
| 3.177778
| 0.422222
| 0.20979
| 0.314685
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.030612
| 0.117117
| 222
| 5
| 92
| 44.4
| 0.69898
| 0
| 0
| 0
| 0
| 0
| 0.022523
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.25
| 0.25
| 0.75
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
7b65472f16a71182aac46ae3d87fff02ac7c3c95
| 10,692
|
py
|
Python
|
qp/interp_pdf.py
|
joezuntz/qp
|
44e3fcd7e17c59fc39242a715303f1bdeca3f6ea
|
[
"MIT"
] | null | null | null |
qp/interp_pdf.py
|
joezuntz/qp
|
44e3fcd7e17c59fc39242a715303f1bdeca3f6ea
|
[
"MIT"
] | null | null | null |
qp/interp_pdf.py
|
joezuntz/qp
|
44e3fcd7e17c59fc39242a715303f1bdeca3f6ea
|
[
"MIT"
] | null | null | null |
"""This module implements a PDT distribution sub-class using interpolated grids
"""
import numpy as np
from scipy.stats import rv_continuous
from qp.pdf_gen import Pdf_rows_gen
from qp.conversion_funcs import extract_vals_at_x, extract_xy_vals, extract_xy_sparse
from qp.plotting import get_axes_and_xlims, plot_pdf_on_axes
from qp.utils import normalize_interp1d,\
interpolate_unfactored_multi_x_multi_y, interpolate_unfactored_multi_x_y, interpolate_unfactored_x_multi_y,\
interpolate_multi_x_multi_y, interpolate_multi_x_y, interpolate_x_multi_y, reshape_to_pdf_size
from qp.test_data import XBINS, XARRAY, YARRAY, TEST_XVALS
from qp.factory import add_class
class interp_gen(Pdf_rows_gen):
"""Interpolator based distribution
Notes
-----
This implements a PDF using a set of interpolated values.
It simply takes a set of x and y values and uses `scipy.interpolate.interp1d` to
build the PDF.
"""
# pylint: disable=protected-access
name = 'interp'
version = 0
_support_mask = rv_continuous._support_mask
def __init__(self, xvals, yvals, *args, **kwargs):
"""
Create a new distribution by interpolating the given values
Parameters
----------
xvals : array_like
The x-values used to do the interpolation
yvals : array_like
The y-values used to do the interpolation
"""
if xvals.size != np.sum(yvals.shape[1:]): # pragma: no cover
raise ValueError("Shape of xbins in xvals (%s) != shape of xbins in yvals (%s)" % (xvals.size, np.sum(yvals.shape[1:])))
self._xvals = xvals
# Set support
kwargs['a'] = self.a = np.min(self._xvals)
kwargs['b'] = self.b = np.max(self._xvals)
kwargs['shape'] = yvals.shape[:-1]
#self._yvals = normalize_interp1d(xvals, yvals)
self._yvals = reshape_to_pdf_size(yvals, -1)
check_input = kwargs.pop('check_input', True)
if check_input:
self._compute_ycumul()
self._yvals = (self._yvals.T / self._ycumul[:,-1]).T
self._ycumul = (self._ycumul.T / self._ycumul[:,-1]).T
else: # pragma: no cover
self._ycumul = None
super(interp_gen, self).__init__(*args, **kwargs)
self._addmetadata('xvals', self._xvals)
self._addobjdata('yvals', self._yvals)
def _compute_ycumul(self):
copy_shape = np.array(self._yvals.shape)
self._ycumul = np.ndarray(copy_shape)
self._ycumul[:, 0] = 0.5 * self._yvals[:, 0] * (self._xvals[1] - self._xvals[0])
self._ycumul[:, 1:] = np.cumsum((self._xvals[1:] - self._xvals[:-1]) *
0.5 * np.add(self._yvals[:,1:],
self._yvals[:,:-1]), axis=1)
@property
def xvals(self):
"""Return the x-values used to do the interpolation"""
return self._xvals
@property
def yvals(self):
"""Return the y-valus used to do the interpolation"""
return self._yvals
def _pdf(self, x, row):
# pylint: disable=arguments-differ
factored, xr, rr, _ = self._sliceargs(x, row)
if factored:
return interpolate_x_multi_y(xr, self._xvals, self._yvals[rr], bounds_error=False,
fill_value=0.).reshape(x.shape)
return interpolate_unfactored_x_multi_y(xr, rr, self._xvals, self._yvals,
bounds_error=False, fill_value=0.)
def _cdf(self, x, row):
# pylint: disable=arguments-differ
if self._ycumul is None: # pragma: no cover
self._compute_ycumul()
factored, xr, rr, _ = self._sliceargs(x, row)
if factored:
return interpolate_x_multi_y(xr, self._xvals, self._ycumul[rr],
bounds_error=False, fill_value=(0.,1.)).reshape(x.shape)
return interpolate_unfactored_x_multi_y(xr, rr, self._xvals, self._ycumul,
bounds_error=False, fill_value=(0.,1.))
def _ppf(self, x, row):
# pylint: disable=arguments-differ
factored, xr, rr, _ = self._sliceargs(x, row)
if self._ycumul is None: # pragma: no cover
self._compute_ycumul()
if factored:
return interpolate_multi_x_y(xr, self._ycumul[rr], self._xvals, bounds_error=False,
fill_value=(0.,1.)).reshape(x.shape)
return interpolate_unfactored_multi_x_y(xr, rr, self._ycumul, self._xvals,
bounds_error=False, fill_value=(0.,1.))
def _updated_ctor_param(self):
"""
Set the bins as additional constructor argument
"""
dct = super(interp_gen, self)._updated_ctor_param()
dct['xvals'] = self._xvals
dct['yvals'] = self._yvals
return dct
@classmethod
def plot_native(cls, pdf, **kwargs):
"""Plot the PDF in a way that is particular to this type of distibution
For a interpolated PDF this uses the interpolation points
"""
axes, _, kw = get_axes_and_xlims(**kwargs)
return plot_pdf_on_axes(axes, pdf, pdf.dist.xvals, **kw)
@classmethod
def add_mappings(cls):
"""
Add this classes mappings to the conversion dictionary
"""
cls._add_creation_method(cls.create, None)
cls._add_extraction_method(extract_vals_at_x, None)
interp = interp_gen.create
class interp_irregular_gen(Pdf_rows_gen):
"""Interpolator based distribution
Notes
-----
This implements a PDF using a set of interpolated values.
It simply takes a set of x and y values and uses `scipy.interpolate.interp1d` to
build the PDF.
"""
# pylint: disable=protected-access
name = 'interp_irregular'
version = 0
_support_mask = rv_continuous._support_mask
def __init__(self, xvals, yvals, *args, **kwargs):
"""
Create a new distribution by interpolating the given values
Parameters
----------
xvals : array_like
The x-values used to do the interpolation
yvals : array_like
The y-values used to do the interpolation
"""
if xvals.shape != yvals.shape: # pragma: no cover
raise ValueError("Shape of xvals (%s) != shape of yvals (%s)" % (xvals.shape, yvals.shape))
self._xvals = reshape_to_pdf_size(xvals, -1)
# Set support
kwargs['a'] = self.a = np.min(self._xvals)
kwargs['b'] = self.b = np.max(self._xvals)
kwargs['shape'] = xvals.shape[:-1]
check_input = kwargs.pop('check_input', True)
self._yvals = reshape_to_pdf_size(yvals, -1)
if check_input:
self._yvals = normalize_interp1d(self._xvals, self._yvals)
self._ycumul = None
super(interp_irregular_gen, self).__init__(*args, **kwargs)
self._addobjdata('xvals', self._xvals)
self._addobjdata('yvals', self._yvals)
def _compute_ycumul(self):
copy_shape = np.array(self._yvals.shape)
self._ycumul = np.ndarray(copy_shape)
self._ycumul[:,0] = 0.
self._ycumul[:,1:] = np.cumsum(self._xvals[:,1:]*self._yvals[:,1:] - self._xvals[:,:-1]*self._yvals[:,1:], axis=1)
@property
def xvals(self):
"""Return the x-values used to do the interpolation"""
return self._xvals
@property
def yvals(self):
"""Return the y-valus used to do the interpolation"""
return self._yvals
def _pdf(self, x, row):
# pylint: disable=arguments-differ
factored, xr, rr, _ = self._sliceargs(x, row)
if factored:
return interpolate_multi_x_multi_y(xr, self._xvals[rr], self._yvals[rr], bounds_error=False, fill_value=0.).reshape(x.shape)
return interpolate_unfactored_multi_x_multi_y(xr, rr, self._xvals, self._yvals, bounds_error=False, fill_value=0.)
def _cdf(self, x, row):
# pylint: disable=arguments-differ
if self._ycumul is None: # pragma: no cover
self._compute_ycumul()
factored, xr, rr, _ = self._sliceargs(x, row)
if factored:
return interpolate_multi_x_multi_y(xr, self._xvals[rr], self._ycumul[rr], bounds_error=False, fill_value=(0., 1.)).reshape(x.shape)
return interpolate_unfactored_multi_x_multi_y(xr, rr, self._xvals, self._ycumul, bounds_error=False, fill_value=(0., 1.))
def _ppf(self, x, row):
# pylint: disable=arguments-differ
if self._ycumul is None: # pragma: no cover
self._compute_ycumul()
factored, xr, rr, _ = self._sliceargs(x, row)
if factored:
return interpolate_multi_x_multi_y(xr, self._ycumul[rr], self._xvals[rr], bounds_error=False,
fill_value=(self.a, self.b)).reshape(x.shape)
return interpolate_unfactored_multi_x_multi_y(xr, rr, self._ycumul, self._xvals, bounds_error=False,
fill_value=(self.a, self.b))
def _updated_ctor_param(self):
"""
Set the bins as additional constructor argument
"""
dct = super(interp_irregular_gen, self)._updated_ctor_param()
dct['xvals'] = self._xvals
dct['yvals'] = self._yvals
return dct
@classmethod
def plot_native(cls, pdf, **kwargs):
"""Plot the PDF in a way that is particular to this type of distibution
For a interpolated PDF this uses the interpolation points
"""
axes, _, kw = get_axes_and_xlims(**kwargs)
xvals_row = pdf.dist.xvals
return plot_pdf_on_axes(axes, pdf, xvals_row, **kw)
@classmethod
def add_mappings(cls):
"""
Add this classes mappings to the conversion dictionary
"""
cls._add_creation_method(cls.create, None)
cls._add_extraction_method(extract_xy_vals, None)
cls._add_extraction_method(extract_xy_sparse, None)
interp_irregular = interp_irregular_gen.create
interp_irregular_gen.test_data = dict(interp_irregular=dict(gen_func=interp_irregular, ctor_data=dict(xvals=XARRAY, yvals=YARRAY),\
convert_data=dict(xvals=XBINS), test_xvals=TEST_XVALS))
interp_gen.test_data = dict(interp=dict(gen_func=interp, ctor_data=dict(xvals=XBINS, yvals=YARRAY),\
convert_data=dict(xvals=XBINS), test_xvals=TEST_XVALS))
add_class(interp_gen)
add_class(interp_irregular_gen)
| 38.049822
| 143
| 0.619155
| 1,383
| 10,692
| 4.511931
| 0.126537
| 0.047596
| 0.015705
| 0.038462
| 0.823077
| 0.789583
| 0.760737
| 0.722917
| 0.702244
| 0.693269
| 0
| 0.006938
| 0.272073
| 10,692
| 280
| 144
| 38.185714
| 0.794809
| 0.18743
| 0
| 0.566879
| 0
| 0
| 0.024079
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.127389
| false
| 0
| 0.050955
| 0
| 0.356688
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
7b73d30ae2e5f42fa91253eadf4bc4943837ac0f
| 215
|
py
|
Python
|
src/ccapi/util/gevent.py
|
cellcollective/ccapi
|
a7649f589cfc66e05d4610c4995bd1c75ad265eb
|
[
"MIT"
] | 9
|
2020-05-12T08:16:35.000Z
|
2022-01-06T03:22:18.000Z
|
src/ccapi/util/gevent.py
|
cellcollective/ccapi
|
a7649f589cfc66e05d4610c4995bd1c75ad265eb
|
[
"MIT"
] | 3
|
2020-10-14T16:29:24.000Z
|
2021-10-04T07:24:34.000Z
|
src/ccapi/util/gevent.py
|
cellcollective/ccapi
|
a7649f589cfc66e05d4610c4995bd1c75ad265eb
|
[
"MIT"
] | null | null | null |
def patch():
# https://github.com/gevent/gevent/issues/1016#issuecomment-328529454
# Monkey-Patch
from gevent import monkey as curious_george
curious_george.patch_all(thread = False, select = False)
| 35.833333
| 73
| 0.739535
| 28
| 215
| 5.571429
| 0.714286
| 0.166667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.071823
| 0.15814
| 215
| 6
| 74
| 35.833333
| 0.790055
| 0.367442
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| true
| 0
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
c88768a83cd6c0baeaa32d044d034b6fad2a9aaf
| 79
|
py
|
Python
|
examples/include-files/sources/index.py
|
DmitryBogomolov/aws-cloudformation-sample
|
f0454b203973e07027a4cdf5f36468d137d310fd
|
[
"MIT"
] | null | null | null |
examples/include-files/sources/index.py
|
DmitryBogomolov/aws-cloudformation-sample
|
f0454b203973e07027a4cdf5f36468d137d310fd
|
[
"MIT"
] | 36
|
2018-04-20T06:11:41.000Z
|
2018-07-07T21:55:55.000Z
|
examples/include-files/sources/index.py
|
DmitryBogomolov/aws-cloudformation-sample
|
f0454b203973e07027a4cdf5f36468d137d310fd
|
[
"MIT"
] | null | null | null |
from os import getenv
def handler(event, context):
return getenv('VALUE')
| 15.8
| 28
| 0.721519
| 11
| 79
| 5.181818
| 0.909091
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.177215
| 79
| 4
| 29
| 19.75
| 0.876923
| 0
| 0
| 0
| 0
| 0
| 0.063291
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 6
|
c8ae70b036d519a7353d5e031a2965c9ecdc3725
| 4,811
|
py
|
Python
|
ditto/twitter/templatetags/ditto_twitter.py
|
garrettc/django-ditto
|
fcf15beb8f9b4d61634efd4a88064df12ee16a6f
|
[
"MIT"
] | 54
|
2016-08-15T17:32:41.000Z
|
2022-02-27T03:32:05.000Z
|
ditto/twitter/templatetags/ditto_twitter.py
|
garrettc/django-ditto
|
fcf15beb8f9b4d61634efd4a88064df12ee16a6f
|
[
"MIT"
] | 229
|
2015-07-23T12:50:47.000Z
|
2022-03-24T10:33:20.000Z
|
ditto/twitter/templatetags/ditto_twitter.py
|
garrettc/django-ditto
|
fcf15beb8f9b4d61634efd4a88064df12ee16a6f
|
[
"MIT"
] | 8
|
2015-09-10T17:10:35.000Z
|
2022-03-25T13:05:01.000Z
|
import datetime
import pytz
from django import template
from ..models import Tweet, User
from ...core.utils import get_annual_item_counts
register = template.Library()
@register.simple_tag
def recent_tweets(screen_name=None, limit=10):
"""Returns a QuerySet of recent public Tweets, in reverse-chronological
order.
Keyword arguments:
screen_name -- A Twitter user's screen_name. If not supplied, we fetch
Tweets for all Twitter users that have Accounts.
limit -- Maximum number to fetch. Default is 10.
"""
tweets = Tweet.public_tweet_objects.all()
if screen_name is not None:
tweets = tweets.filter(user__screen_name=screen_name)
return tweets.prefetch_related("user")[:limit]
@register.simple_tag
def recent_favorites(screen_name=None, limit=10):
"""Returns a QuerySet of recent Tweets favorited by the Account associated
with the Twitter User with screen_name.
Keyword arguments:
screen_name -- A Twitter user's screen_name. If not supplied, we fetch
Tweets favorited by all public Accounts.
limit -- Maximum number to fetch. Default is 10.
"""
if screen_name is None:
tweets = Tweet.public_favorite_objects.all()
else:
user = User.objects.get(screen_name=screen_name)
if user.is_private:
tweets = Tweet.objects.none()
else:
tweets = Tweet.public_favorite_objects.filter(favoriting_users=user)
return tweets.prefetch_related("user")[:limit]
@register.simple_tag
def day_tweets(date, screen_name=None):
"""Returns a QuerySet of Tweets posted on a specific date by public
Accounts.
Arguments:
date -- A date object.
Keyword arguments:
screen_name -- A Twitter user's screen_name. If not supplied, we fetch
all public Tweets.
"""
start = datetime.datetime.combine(date, datetime.time.min).replace(tzinfo=pytz.utc)
end = datetime.datetime.combine(date, datetime.time.max).replace(tzinfo=pytz.utc)
tweets = Tweet.public_tweet_objects.filter(post_time__range=[start, end])
if screen_name is not None:
tweets = tweets.filter(user__screen_name=screen_name)
tweets = tweets.prefetch_related("user")
return tweets
@register.simple_tag
def day_favorites(date, screen_name=None):
"""Returns a QuerySet of Tweets posted on a specific date that have been
favorited by public Accounts.
NOTE: It is not the date on which the Tweets were favorited.
The Twitter API doesn't supply that.
Arguments:
date -- A date object.
Keyword arguments:
screen_name -- A Twitter user's screen_name. If not supplied, we fetch
all public Tweets.
"""
start = datetime.datetime.combine(date, datetime.time.min).replace(tzinfo=pytz.utc)
end = datetime.datetime.combine(date, datetime.time.max).replace(tzinfo=pytz.utc)
if screen_name is None:
tweets = Tweet.public_favorite_objects.filter(post_time__range=[start, end])
else:
user = User.objects.get(screen_name=screen_name)
if user.is_private:
tweets = Tweet.objects.none()
else:
tweets = Tweet.public_favorite_objects.filter(
post_time__range=[start, end]
).filter(favoriting_users=user)
tweets = tweets.prefetch_related("user")
return tweets
@register.simple_tag
def annual_tweet_counts(screen_name=None):
"""
Get the number of public Tweets per year.
Returns a list of dicts, sorted by year, like:
[ {'year': 2015, 'count': 1234}, {'year': 2016, 'count': 9876} ]
Keyword arguments:
screen_name -- A Twitter user's screen_name. If not supplied, we fetch
all public Tweets.
"""
tweets = Tweet.public_tweet_objects
if screen_name is not None:
tweets = tweets.filter(user__screen_name=screen_name)
return get_annual_item_counts(tweets)
@register.simple_tag
def annual_favorite_counts(screen_name=None):
"""
Get the number of public Favorites per year.
(i.e. the Tweets are from those years, not that they were favorited then.)
Returns a list of dicts, sorted by year, like:
[ {'year': 2015, 'count': 1234}, {'year': 2016, 'count': 9876} ]
Keyword arguments:
screen_name -- A Twitter user's screen_name. If not supplied, we fetch
all public favorited Tweets.
"""
if screen_name is None:
tweets = Tweet.public_favorite_objects
else:
user = User.objects.get(screen_name=screen_name)
if user.is_private:
tweets = Tweet.objects.none()
else:
tweets = Tweet.public_favorite_objects.filter(favoriting_users=user)
return get_annual_item_counts(tweets)
| 33.17931
| 87
| 0.681771
| 645
| 4,811
| 4.925581
| 0.179845
| 0.116462
| 0.033994
| 0.037771
| 0.813975
| 0.776833
| 0.751023
| 0.740321
| 0.740321
| 0.687441
| 0
| 0.010811
| 0.230929
| 4,811
| 144
| 88
| 33.409722
| 0.847838
| 0.383912
| 0
| 0.676923
| 0
| 0
| 0.005812
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.092308
| false
| 0
| 0.076923
| 0
| 0.261538
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
c8b7f61778399bee249dc77f72953fb14fc4d462
| 153
|
py
|
Python
|
ghosted/routes/__init__.py
|
tannerstephens/ghosted-2019
|
7b0ee210b31c5b0bb67d2e0cc65bd258d4d06f50
|
[
"MIT"
] | null | null | null |
ghosted/routes/__init__.py
|
tannerstephens/ghosted-2019
|
7b0ee210b31c5b0bb67d2e0cc65bd258d4d06f50
|
[
"MIT"
] | 4
|
2021-06-08T20:30:21.000Z
|
2022-03-12T00:06:05.000Z
|
ghosted/routes/__init__.py
|
tannerstephens/ghosted
|
7b0ee210b31c5b0bb67d2e0cc65bd258d4d06f50
|
[
"MIT"
] | null | null | null |
from .views import views
from .ghost_api import ghost_api
def register_routes(app):
app.register_blueprint(views)
app.register_blueprint(ghost_api)
| 21.857143
| 35
| 0.816993
| 23
| 153
| 5.173913
| 0.434783
| 0.201681
| 0.336134
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.111111
| 153
| 6
| 36
| 25.5
| 0.875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| false
| 0
| 0.4
| 0
| 0.6
| 0.4
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
cde2304167b03a9012a8c7416e16f4f2b7bb3fc0
| 173
|
py
|
Python
|
travelist/suggestionui.py
|
ibz/travelist
|
45b90ab01ad1fe2d37d5e70b20c2f46dd8d8caa9
|
[
"MIT"
] | null | null | null |
travelist/suggestionui.py
|
ibz/travelist
|
45b90ab01ad1fe2d37d5e70b20c2f46dd8d8caa9
|
[
"MIT"
] | null | null | null |
travelist/suggestionui.py
|
ibz/travelist
|
45b90ab01ad1fe2d37d5e70b20c2f46dd8d8caa9
|
[
"MIT"
] | null | null | null |
from travelist import models
from travelist import ui
class EditForm(ui.ModelForm):
class Meta:
model = models.Suggestion
fields = ('type', 'comments')
| 21.625
| 37
| 0.682081
| 20
| 173
| 5.9
| 0.7
| 0.220339
| 0.322034
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.231214
| 173
| 7
| 38
| 24.714286
| 0.887218
| 0
| 0
| 0
| 0
| 0
| 0.069364
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
b554426f324b025f5e25140155615bbf99a1a925
| 206
|
py
|
Python
|
api/common/__init__.py
|
carreath/SWE4103-Project
|
1cdb1f20f982f769799de349511197c8b80c0119
|
[
"MIT"
] | null | null | null |
api/common/__init__.py
|
carreath/SWE4103-Project
|
1cdb1f20f982f769799de349511197c8b80c0119
|
[
"MIT"
] | 2
|
2021-03-20T05:00:23.000Z
|
2021-06-02T03:00:38.000Z
|
api/common/__init__.py
|
carreath/SWE4103-Project
|
1cdb1f20f982f769799de349511197c8b80c0119
|
[
"MIT"
] | null | null | null |
from common.DatabaseConnector import DatabaseConnector
from common.DatabaseMigrator import DatabaseMigrator
from common.TokenHandler import TokenHandler
from common.PrivilegeHandler import PrivilegeHandler
| 41.2
| 54
| 0.902913
| 20
| 206
| 9.3
| 0.35
| 0.215054
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.07767
| 206
| 4
| 55
| 51.5
| 0.978947
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
b5b0c5b61ec76ed656d6e31ed65fab8fe2f5f42e
| 49
|
py
|
Python
|
lopc/lopc.py
|
lukauskas/python-lopc
|
6b2f65a4f8184299d3286a868b25972610acefc9
|
[
"MIT"
] | null | null | null |
lopc/lopc.py
|
lukauskas/python-lopc
|
6b2f65a4f8184299d3286a868b25972610acefc9
|
[
"MIT"
] | null | null | null |
lopc/lopc.py
|
lukauskas/python-lopc
|
6b2f65a4f8184299d3286a868b25972610acefc9
|
[
"MIT"
] | null | null | null |
def lopc(data, threshold):
return None, None
| 16.333333
| 26
| 0.693878
| 7
| 49
| 4.857143
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.204082
| 49
| 3
| 27
| 16.333333
| 0.871795
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0.5
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
a938ef3326857c3cd9013256d5e7b72e568cff26
| 117
|
py
|
Python
|
VisModel/__init__.py
|
philbull/VisModel
|
ccdad81064082efced9d4ba940cb42873d9326c9
|
[
"MIT"
] | null | null | null |
VisModel/__init__.py
|
philbull/VisModel
|
ccdad81064082efced9d4ba940cb42873d9326c9
|
[
"MIT"
] | null | null | null |
VisModel/__init__.py
|
philbull/VisModel
|
ccdad81064082efced9d4ba940cb42873d9326c9
|
[
"MIT"
] | null | null | null |
from .vismodel import *
from .vislike import *
from .fisher import *
from . import gains, sources, transform, utils
| 19.5
| 46
| 0.74359
| 15
| 117
| 5.8
| 0.6
| 0.344828
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.17094
| 117
| 5
| 47
| 23.4
| 0.896907
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
a93c4ceeb997b76eead8c96ba1abb38203220363
| 31
|
py
|
Python
|
ravenpy/extractors/__init__.py
|
CSHS-CWRA/RavenPy
|
279505d7270c3f796500f2cb992af1cd66dfb44c
|
[
"MIT"
] | 12
|
2020-12-07T23:07:13.000Z
|
2022-03-08T20:50:58.000Z
|
ravenpy/extractors/__init__.py
|
CSHS-CWRA/RavenPy
|
279505d7270c3f796500f2cb992af1cd66dfb44c
|
[
"MIT"
] | 119
|
2020-08-25T08:17:17.000Z
|
2022-03-30T16:12:19.000Z
|
ravenpy/extractors/__init__.py
|
CSHS-CWRA/RavenPy
|
279505d7270c3f796500f2cb992af1cd66dfb44c
|
[
"MIT"
] | 3
|
2020-12-02T17:33:13.000Z
|
2021-08-31T15:39:26.000Z
|
from .routing_product import *
| 15.5
| 30
| 0.806452
| 4
| 31
| 6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.129032
| 31
| 1
| 31
| 31
| 0.888889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
8d6179f90833b5eec137a978ad0e6f575212c368
| 43
|
py
|
Python
|
apps/User/serializers/__init__.py
|
Eduardo-RFarias/DjangoReactBackend
|
b8183ea4b24be5c0aa557ffbc79fc23e0777b8ad
|
[
"MIT"
] | null | null | null |
apps/User/serializers/__init__.py
|
Eduardo-RFarias/DjangoReactBackend
|
b8183ea4b24be5c0aa557ffbc79fc23e0777b8ad
|
[
"MIT"
] | null | null | null |
apps/User/serializers/__init__.py
|
Eduardo-RFarias/DjangoReactBackend
|
b8183ea4b24be5c0aa557ffbc79fc23e0777b8ad
|
[
"MIT"
] | null | null | null |
from .UserSerializer import UserSerializer
| 21.5
| 42
| 0.883721
| 4
| 43
| 9.5
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.093023
| 43
| 1
| 43
| 43
| 0.974359
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
8d730e943594aa7197f0e6fbfe77a55033528858
| 27
|
py
|
Python
|
iromlab/socketserver/__init__.py
|
djpillen/iromlab
|
4e9d2ae4d4c542b07a63725fe19a9e68852adde0
|
[
"Apache-2.0"
] | 23
|
2016-11-18T15:12:33.000Z
|
2021-09-07T13:29:15.000Z
|
iromlab/socketserver/__init__.py
|
djpillen/iromlab
|
4e9d2ae4d4c542b07a63725fe19a9e68852adde0
|
[
"Apache-2.0"
] | 103
|
2016-10-31T14:05:43.000Z
|
2022-02-03T19:07:28.000Z
|
iromlab/socketserver/__init__.py
|
djpillen/iromlab
|
4e9d2ae4d4c542b07a63725fe19a9e68852adde0
|
[
"Apache-2.0"
] | 4
|
2017-06-04T15:38:12.000Z
|
2022-02-03T00:24:08.000Z
|
from .server import server
| 13.5
| 26
| 0.814815
| 4
| 27
| 5.5
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.148148
| 27
| 1
| 27
| 27
| 0.956522
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
8d7dd60f2959729718fe8bf09c7bce47afc43b1c
| 44
|
py
|
Python
|
src/blockchain/__init__.py
|
dontru/data-blockchain
|
9f6c88c12567c9384e832bbee681b82516beecff
|
[
"MIT"
] | null | null | null |
src/blockchain/__init__.py
|
dontru/data-blockchain
|
9f6c88c12567c9384e832bbee681b82516beecff
|
[
"MIT"
] | null | null | null |
src/blockchain/__init__.py
|
dontru/data-blockchain
|
9f6c88c12567c9384e832bbee681b82516beecff
|
[
"MIT"
] | null | null | null |
from .Block import Block
from .DB import DB
| 14.666667
| 24
| 0.772727
| 8
| 44
| 4.25
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.181818
| 44
| 2
| 25
| 22
| 0.944444
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
8d93e3f4c5a1b179fd237193a06b3df3d4cd2941
| 208
|
py
|
Python
|
skilletlib/exceptions.py
|
scotchoaf/skilletlib-1
|
10ce50fdac0538d465ec20168e83cda7d25e18ce
|
[
"Apache-2.0"
] | null | null | null |
skilletlib/exceptions.py
|
scotchoaf/skilletlib-1
|
10ce50fdac0538d465ec20168e83cda7d25e18ce
|
[
"Apache-2.0"
] | null | null | null |
skilletlib/exceptions.py
|
scotchoaf/skilletlib-1
|
10ce50fdac0538d465ec20168e83cda7d25e18ce
|
[
"Apache-2.0"
] | null | null | null |
class SkilletLoaderException(BaseException):
pass
class LoginException(BaseException):
pass
class PanoplyException(BaseException):
pass
class NodeNotFoundException(BaseException):
pass
| 13
| 44
| 0.774038
| 16
| 208
| 10.0625
| 0.4375
| 0.42236
| 0.409938
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.168269
| 208
| 15
| 45
| 13.866667
| 0.930636
| 0
| 0
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.5
| 0
| 0
| 0.5
| 0
| 1
| 0
| 1
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 6
|
a5eae7d02a0840c83eeb9066f9830efe505c939d
| 140
|
py
|
Python
|
tools/bin/pythonSrc/pychecker-0.8.18/test_input/test12.py
|
YangHao666666/hawq
|
10cff8350f1ba806c6fec64eb67e0e6f6f24786c
|
[
"Artistic-1.0-Perl",
"ISC",
"bzip2-1.0.5",
"TCL",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"PostgreSQL",
"BSD-3-Clause"
] | 450
|
2015-09-05T09:12:51.000Z
|
2018-08-30T01:45:36.000Z
|
tools/bin/pythonSrc/pychecker-0.8.18/test_input/test12.py
|
YangHao666666/hawq
|
10cff8350f1ba806c6fec64eb67e0e6f6f24786c
|
[
"Artistic-1.0-Perl",
"ISC",
"bzip2-1.0.5",
"TCL",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"PostgreSQL",
"BSD-3-Clause"
] | 1,274
|
2015-09-22T20:06:16.000Z
|
2018-08-31T22:14:00.000Z
|
tools/bin/pythonSrc/pychecker-0.8.18/test_input/test12.py
|
YangHao666666/hawq
|
10cff8350f1ba806c6fec64eb67e0e6f6f24786c
|
[
"Artistic-1.0-Perl",
"ISC",
"bzip2-1.0.5",
"TCL",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"PostgreSQL",
"BSD-3-Clause"
] | 278
|
2015-09-21T19:15:06.000Z
|
2018-08-31T00:36:51.000Z
|
"shouldn't produce any warnings"
from xml.sax import handler
class GetGUI(handler.DTDHandler):
"shouldn't produce any warnings"
pass
| 15.555556
| 34
| 0.764286
| 20
| 140
| 5.35
| 0.7
| 0.149533
| 0.280374
| 0.336449
| 0.485981
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.157143
| 140
| 8
| 35
| 17.5
| 0.90678
| 0.435714
| 0
| 0.4
| 0
| 0
| 0.431655
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.2
| 0.2
| 0
| 0.4
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 6
|
5735a3a7a20c289792b93e6b5b5b8856c07bb501
| 64
|
py
|
Python
|
src/api/controller/__init__.py
|
sonlhcsuit/togo
|
68e79e1df3ac5b9b8b834a53345028f332abbda8
|
[
"MIT"
] | null | null | null |
src/api/controller/__init__.py
|
sonlhcsuit/togo
|
68e79e1df3ac5b9b8b834a53345028f332abbda8
|
[
"MIT"
] | null | null | null |
src/api/controller/__init__.py
|
sonlhcsuit/togo
|
68e79e1df3ac5b9b8b834a53345028f332abbda8
|
[
"MIT"
] | null | null | null |
from .auth import *
from .subscript import *
from .task import *
| 21.333333
| 24
| 0.734375
| 9
| 64
| 5.222222
| 0.555556
| 0.425532
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.171875
| 64
| 3
| 25
| 21.333333
| 0.886792
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
5754a65b704ef13bc34026175ca32344f000df7f
| 11,184
|
py
|
Python
|
foreman/data_refinery_foreman/foreman/test_processor_job_manager.py
|
AlexsLemonade/refinebio
|
52f44947f902adedaccf270d5f9dbd56ab47e40a
|
[
"BSD-3-Clause"
] | 106
|
2018-03-05T16:24:47.000Z
|
2022-03-19T19:12:25.000Z
|
foreman/data_refinery_foreman/foreman/test_processor_job_manager.py
|
AlexsLemonade/refinebio
|
52f44947f902adedaccf270d5f9dbd56ab47e40a
|
[
"BSD-3-Clause"
] | 1,494
|
2018-02-27T17:02:21.000Z
|
2022-03-24T15:10:30.000Z
|
foreman/data_refinery_foreman/foreman/test_processor_job_manager.py
|
AlexsLemonade/refinebio
|
52f44947f902adedaccf270d5f9dbd56ab47e40a
|
[
"BSD-3-Clause"
] | 15
|
2019-02-03T01:34:59.000Z
|
2022-03-29T01:59:13.000Z
|
import datetime
from unittest.mock import patch
from django.conf import settings
from django.test import TestCase
from django.utils import timezone
from data_refinery_common.models import ProcessorJob
from data_refinery_foreman.foreman import processor_job_manager, utils
from data_refinery_foreman.foreman.test_utils import create_processor_job
# For use in tests that test the JOB_CREATED_AT_CUTOFF functionality.
DAY_BEFORE_JOB_CUTOFF = utils.JOB_CREATED_AT_CUTOFF - datetime.timedelta(days=1)
EMPTY_LIST_JOBS_QUEUE_RESPONSE = {"jobSummaryList": []}
EMPTY_DESCRIBE_JOBS_QUEUE_RESPONSE = {"jobs": []}
def fake_send_job(job_type, job, is_dispatch=False):
job.batch_job_queue = settings.AWS_BATCH_QUEUE_WORKERS_NAMES[0]
job.save()
return True
class ProcessorJobManagerTestCase(TestCase):
@patch("data_refinery_foreman.foreman.job_requeuing.send_job")
@patch("data_refinery_common.message_queue.batch.list_jobs")
def test_repeated_processor_failures(self, mock_list_jobs, mock_send_job):
"""Jobs will be repeatedly retried."""
mock_send_job.side_effect = fake_send_job
mock_list_jobs.return_value = EMPTY_LIST_JOBS_QUEUE_RESPONSE
job = create_processor_job()
for i in range(utils.MAX_NUM_RETRIES):
processor_job_manager.handle_processor_jobs([job])
self.assertEqual(i + 1, len(mock_send_job.mock_calls))
jobs = ProcessorJob.objects.all().order_by("-id")
previous_job = jobs[1]
self.assertTrue(previous_job.retried)
self.assertEqual(previous_job.num_retries, i)
self.assertFalse(previous_job.success)
job = jobs[0]
self.assertFalse(job.retried)
self.assertEqual(job.num_retries, i + 1)
# Once MAX_NUM_RETRIES has been hit handle_repeated_failure
# should be called.
processor_job_manager.handle_processor_jobs([job])
last_job = ProcessorJob.objects.all().order_by("-id")[0]
self.assertTrue(last_job.retried)
self.assertEqual(last_job.num_retries, utils.MAX_NUM_RETRIES)
self.assertFalse(last_job.success)
@patch("data_refinery_foreman.foreman.job_requeuing.send_job")
@patch("data_refinery_common.message_queue.batch.list_jobs")
def test_retrying_failed_processor_jobs(self, mock_list_jobs, mock_send_job):
mock_send_job.side_effect = fake_send_job
mock_list_jobs.return_value = EMPTY_LIST_JOBS_QUEUE_RESPONSE
job = create_processor_job()
job.success = False
job.save()
processor_job_manager.retry_failed_processor_jobs()
self.assertEqual(len(mock_send_job.mock_calls), 1)
jobs = ProcessorJob.objects.order_by("id")
original_job = jobs[0]
self.assertTrue(original_job.retried)
self.assertEqual(original_job.num_retries, 0)
self.assertFalse(original_job.success)
retried_job = jobs[1]
self.assertEqual(retried_job.num_retries, 1)
@patch("data_refinery_foreman.foreman.job_requeuing.send_job")
@patch("data_refinery_common.message_queue.batch.list_jobs")
@patch("data_refinery_foreman.foreman.utils.batch.describe_jobs")
def test_retrying_hung_processor_jobs(self, mock_describe_jobs, mock_list_jobs, mock_send_job):
mock_send_job.side_effect = fake_send_job
mock_list_jobs.return_value = EMPTY_LIST_JOBS_QUEUE_RESPONSE
mock_describe_jobs.return_value = {"jobs": [{"jobId": "FINDME", "status": "FAILED"}]}
job = create_processor_job()
job.start_time = timezone.now()
job.batch_job_id = "FINDME"
job.save()
job2 = create_processor_job()
job2.start_time = timezone.now()
job2.batch_job_id = "MISSING"
job2.save()
processor_job_manager.retry_hung_processor_jobs()
self.assertEqual(len(mock_send_job.mock_calls), 2)
jobs = ProcessorJob.objects.order_by("id")
original_job = jobs[0]
self.assertTrue(original_job.retried)
self.assertEqual(original_job.num_retries, 0)
self.assertFalse(original_job.success)
original_job2 = jobs[1]
self.assertTrue(original_job2.retried)
self.assertEqual(original_job2.num_retries, 0)
self.assertFalse(original_job2.success)
retried_job = jobs[2]
self.assertEqual(retried_job.num_retries, 1)
retried_job2 = jobs[3]
self.assertEqual(retried_job2.num_retries, 1)
@patch("data_refinery_foreman.foreman.job_requeuing.send_job")
@patch("data_refinery_common.message_queue.batch.list_jobs")
@patch("data_refinery_foreman.foreman.utils.batch.describe_jobs")
def test_not_retrying_hung_processor_jobs(
self, mock_describe_jobs, mock_list_jobs, mock_send_job
):
"""Tests that we don't restart processor jobs that are still running."""
mock_send_job.side_effect = fake_send_job
mock_list_jobs.return_value = EMPTY_LIST_JOBS_QUEUE_RESPONSE
mock_describe_jobs.return_value = {"jobs": [{"jobId": "FINDME", "status": "RUNNING"}]}
job = create_processor_job()
job.start_time = timezone.now()
job.batch_job_id = "FINDME"
job.save()
processor_job_manager.retry_hung_processor_jobs()
self.assertEqual(len(mock_send_job.mock_calls), 0)
jobs = ProcessorJob.objects.order_by("id")
original_job = jobs[0]
self.assertFalse(original_job.retried)
self.assertEqual(original_job.num_retries, 0)
self.assertEqual(original_job.success, None)
self.assertEqual(jobs.count(), 1)
@patch("data_refinery_foreman.foreman.job_requeuing.send_job")
@patch("data_refinery_common.message_queue.batch.list_jobs")
@patch("data_refinery_foreman.foreman.utils.batch.describe_jobs")
def test_retrying_lost_processor_jobs(self, mock_describe_jobs, mock_list_jobs, mock_send_job):
mock_send_job.side_effect = fake_send_job
mock_list_jobs.return_value = EMPTY_LIST_JOBS_QUEUE_RESPONSE
mock_describe_jobs.return_value = EMPTY_DESCRIBE_JOBS_QUEUE_RESPONSE
job = create_processor_job()
job.save()
job2 = create_processor_job()
job2.batch_job_id = "MISSING"
job2.save()
processor_job_manager.retry_lost_processor_jobs()
self.assertEqual(len(mock_send_job.mock_calls), 2)
jobs = ProcessorJob.objects.order_by("id")
original_job = jobs[0]
self.assertTrue(original_job.retried)
self.assertEqual(original_job.num_retries, 0)
self.assertFalse(original_job.success)
original_job2 = jobs[1]
self.assertTrue(original_job2.retried)
self.assertEqual(original_job2.num_retries, 0)
self.assertFalse(original_job2.success)
retried_job = jobs[2]
self.assertEqual(retried_job.num_retries, 1)
retried_job2 = jobs[3]
self.assertEqual(retried_job2.num_retries, 1)
@patch("data_refinery_foreman.foreman.job_requeuing.send_job")
@patch("data_refinery_common.message_queue.batch.list_jobs")
@patch("data_refinery_foreman.foreman.utils.batch.describe_jobs")
def test_retrying_lost_smasher_jobs(self, mock_describe_jobs, mock_list_jobs, mock_send_job):
"""Make sure that the smasher jobs will get retried even though they
don't have a volume_index.
I'm not entirely sure this test is still necessary but we'll
need a separate smasher compute environment so this could test
that once it's done.
"""
mock_send_job.side_effect = fake_send_job
mock_list_jobs.return_value = EMPTY_LIST_JOBS_QUEUE_RESPONSE
mock_describe_jobs.return_value = EMPTY_DESCRIBE_JOBS_QUEUE_RESPONSE
job = create_processor_job(pipeline="SMASHER")
job.volume_index = None # Smasher jobs won't have a volume_index.
job.save()
processor_job_manager.retry_lost_processor_jobs()
self.assertEqual(len(mock_send_job.mock_calls), 1)
jobs = ProcessorJob.objects.order_by("id")
original_job = jobs[0]
self.assertTrue(original_job.retried)
self.assertEqual(original_job.num_retries, 0)
self.assertFalse(original_job.success)
retried_job = jobs[1]
self.assertEqual(retried_job.num_retries, 1)
@patch("data_refinery_foreman.foreman.job_requeuing.send_job")
@patch("data_refinery_common.message_queue.batch.list_jobs")
@patch("data_refinery_foreman.foreman.utils.batch.describe_jobs")
def test_not_retrying_old_processor_jobs(
self, mock_describe_jobs, mock_list_jobs, mock_send_job
):
"""Makes sure temporary logic to limit the Foreman's scope works."""
mock_send_job.side_effect = fake_send_job
mock_list_jobs.return_value = EMPTY_LIST_JOBS_QUEUE_RESPONSE
mock_describe_jobs.return_value = EMPTY_DESCRIBE_JOBS_QUEUE_RESPONSE
job = create_processor_job()
job.created_at = DAY_BEFORE_JOB_CUTOFF
job.save()
processor_job_manager.retry_lost_processor_jobs()
self.assertEqual(len(mock_send_job.mock_calls), 0)
self.assertEqual(1, ProcessorJob.objects.all().count())
@patch("data_refinery_foreman.foreman.job_requeuing.send_job")
@patch("data_refinery_common.message_queue.batch.list_jobs")
@patch("data_refinery_foreman.foreman.utils.batch.describe_jobs")
def test_not_retrying_lost_processor_jobs(
self, mock_describe_jobs, mock_list_jobs, mock_send_job
):
"""Make sure that we don't retry processor jobs we shouldn't."""
mock_send_job.side_effect = fake_send_job
mock_list_jobs.return_value = EMPTY_LIST_JOBS_QUEUE_RESPONSE
mock_describe_jobs.return_value = {"jobs": [{"jobId": "FINDME", "status": "RUNNABLE"}]}
job = create_processor_job()
job.batch_job_id = "FINDME"
job.save()
processor_job_manager.retry_lost_processor_jobs()
self.assertEqual(len(mock_send_job.mock_calls), 0)
jobs = ProcessorJob.objects.order_by("id")
original_job = jobs[0]
self.assertFalse(original_job.retried)
self.assertEqual(original_job.num_retries, 0)
self.assertEqual(original_job.success, None)
# Make sure no additional job was created.
self.assertEqual(jobs.count(), 1)
@patch("data_refinery_foreman.foreman.job_requeuing.send_job")
@patch("data_refinery_common.message_queue.batch.list_jobs")
@patch("data_refinery_foreman.foreman.utils.batch.describe_jobs")
def test_not_retrying_janitor_jobs(self, mock_describe_jobs, mock_list_jobs, mock_send_job):
mock_send_job.side_effect = fake_send_job
mock_list_jobs.return_value = EMPTY_LIST_JOBS_QUEUE_RESPONSE
mock_describe_jobs.return_value = EMPTY_DESCRIBE_JOBS_QUEUE_RESPONSE
job = create_processor_job(pipeline="JANITOR")
job.save()
processor_job_manager.retry_lost_processor_jobs()
self.assertEqual(len(mock_send_job.mock_calls), 0)
jobs = ProcessorJob.objects.order_by("id")
self.assertEqual(len(jobs), 1)
| 40.669091
| 99
| 0.719868
| 1,479
| 11,184
| 5.060176
| 0.11359
| 0.043025
| 0.039685
| 0.062533
| 0.795697
| 0.769776
| 0.754276
| 0.732229
| 0.731828
| 0.731828
| 0
| 0.007285
| 0.189914
| 11,184
| 274
| 100
| 40.817518
| 0.818764
| 0.061338
| 0
| 0.75
| 0
| 0
| 0.141076
| 0.124964
| 0
| 0
| 0
| 0
| 0.255
| 1
| 0.05
| false
| 0
| 0.04
| 0
| 0.1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
9390a131ed634929c5736ca4434dd254524ba6f4
| 33
|
py
|
Python
|
src/tf_service/__init__.py
|
magazino/tf_service
|
da63e90b062a57eb1280b589ef8f249be5d422c4
|
[
"Apache-2.0"
] | 17
|
2019-12-11T14:26:21.000Z
|
2022-01-30T03:41:40.000Z
|
src/tf_service/__init__.py
|
jspricke/tf_service
|
bc32d03f3fb567c0be15d048ed989dfe17150744
|
[
"Apache-2.0"
] | 8
|
2019-12-13T14:45:32.000Z
|
2022-02-14T16:22:30.000Z
|
src/tf_service/__init__.py
|
jspricke/tf_service
|
bc32d03f3fb567c0be15d048ed989dfe17150744
|
[
"Apache-2.0"
] | 2
|
2020-07-29T08:47:50.000Z
|
2021-12-13T10:38:39.000Z
|
from .client import BufferClient
| 16.5
| 32
| 0.848485
| 4
| 33
| 7
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.121212
| 33
| 1
| 33
| 33
| 0.965517
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
93c44889510820fe000da734dfeefcb5cdd8a36d
| 40
|
py
|
Python
|
rdfizer/rdfizer/semantify.py
|
daniel-dona/SDM-RDFizer
|
05a281c03fa32a2266d7dc735f6683f0dff99b81
|
[
"Apache-2.0"
] | null | null | null |
rdfizer/rdfizer/semantify.py
|
daniel-dona/SDM-RDFizer
|
05a281c03fa32a2266d7dc735f6683f0dff99b81
|
[
"Apache-2.0"
] | null | null | null |
rdfizer/rdfizer/semantify.py
|
daniel-dona/SDM-RDFizer
|
05a281c03fa32a2266d7dc735f6683f0dff99b81
|
[
"Apache-2.0"
] | null | null | null |
print("CODE MOVED TO __init__.py !!!")
| 13.333333
| 38
| 0.65
| 6
| 40
| 3.666667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.15
| 40
| 2
| 39
| 20
| 0.647059
| 0
| 0
| 0
| 0
| 0
| 0.74359
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
9e102722d66e6c925b828d77c017e80a731bf4b2
| 145
|
py
|
Python
|
example/mapping.py
|
oarepo/invenio-oarepo-oai-pmh-harvester
|
399ef743ac9da23d36e655e072aa72ee1b332372
|
[
"MIT"
] | null | null | null |
example/mapping.py
|
oarepo/invenio-oarepo-oai-pmh-harvester
|
399ef743ac9da23d36e655e072aa72ee1b332372
|
[
"MIT"
] | 13
|
2020-11-04T13:47:55.000Z
|
2021-04-15T17:56:33.000Z
|
example/mapping.py
|
oarepo/oarepo-oai-pmh-harvester
|
399ef743ac9da23d36e655e072aa72ee1b332372
|
[
"MIT"
] | 1
|
2020-05-14T07:59:12.000Z
|
2020-05-14T07:59:12.000Z
|
from oarepo_oai_pmh_harvester.decorators import endpoint_handler
@endpoint_handler("uk", "xoai")
def mapping_handler(data):
return "recid"
| 20.714286
| 64
| 0.793103
| 19
| 145
| 5.736842
| 0.842105
| 0.275229
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.110345
| 145
| 6
| 65
| 24.166667
| 0.844961
| 0
| 0
| 0
| 0
| 0
| 0.075862
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.25
| 0.25
| 0.75
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
f5160c9aeffc7e8355d1a85f6c323f3b4d26c768
| 29
|
py
|
Python
|
spopt/region/spenclib/__init__.py
|
fiendskrah/spopt
|
b0f4b682f9246670241c415c4023fcb3e596c372
|
[
"BSD-3-Clause"
] | 135
|
2019-03-01T19:27:06.000Z
|
2022-03-15T18:47:40.000Z
|
spopt/region/spenclib/__init__.py
|
fiendskrah/spopt
|
b0f4b682f9246670241c415c4023fcb3e596c372
|
[
"BSD-3-Clause"
] | 166
|
2019-03-02T00:23:53.000Z
|
2022-03-31T00:33:32.000Z
|
spopt/region/spenclib/__init__.py
|
fiendskrah/spopt
|
b0f4b682f9246670241c415c4023fcb3e596c372
|
[
"BSD-3-Clause"
] | 25
|
2019-03-01T19:16:00.000Z
|
2022-03-09T15:15:34.000Z
|
from .abstracts import SPENC
| 14.5
| 28
| 0.827586
| 4
| 29
| 6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.137931
| 29
| 1
| 29
| 29
| 0.96
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
192d458f3ebd4044e04aa2f3613df9ab32bfb278
| 123
|
py
|
Python
|
UTrackGUI/widgets/__init__.py
|
uetke/UTrack
|
efab70bf2e1dddf76e1b7e3a0efbdd611ea856de
|
[
"MIT"
] | null | null | null |
UTrackGUI/widgets/__init__.py
|
uetke/UTrack
|
efab70bf2e1dddf76e1b7e3a0efbdd611ea856de
|
[
"MIT"
] | null | null | null |
UTrackGUI/widgets/__init__.py
|
uetke/UTrack
|
efab70bf2e1dddf76e1b7e3a0efbdd611ea856de
|
[
"MIT"
] | null | null | null |
from .video_widget import VideoWidget
from .options_widget import OptionsWidget
from .analysis_widget import AnalysisWidget
| 41
| 43
| 0.886179
| 15
| 123
| 7.066667
| 0.6
| 0.339623
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.089431
| 123
| 3
| 43
| 41
| 0.946429
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
1943548230dd8b0b8cf01e0665ee7aba4acdcddc
| 3,227
|
py
|
Python
|
ModelPool.py
|
batumoglu/Home_Credit
|
bf3f918bafdc0e9be1c24809068fac1242fff881
|
[
"Apache-2.0"
] | 1
|
2019-11-04T08:49:34.000Z
|
2019-11-04T08:49:34.000Z
|
ModelPool.py
|
batumoglu/Home_Credit
|
bf3f918bafdc0e9be1c24809068fac1242fff881
|
[
"Apache-2.0"
] | null | null | null |
ModelPool.py
|
batumoglu/Home_Credit
|
bf3f918bafdc0e9be1c24809068fac1242fff881
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon May 28 18:40:02 2018
@author: ozkan
"""
""" Standard Libraries """
import pandas as pd
import numpy as np
from sklearn.metrics import roc_auc_score
from sklearn.model_selection import KFold
import gc
""" ModelRunner functions """
from Tasks import Task
""" Models """
from catboost import CatBoostClassifier
from lightgbm import LGBMClassifier
# Define CatBoost_v1 model to run it on model runner framework
class CatBoost_v1(Task):
def __init__(self, name):
Task.__init__(self, name)
def Run(self):
# Datasets
x_train = self.Data.X_Train
x_test = self.Data.X_Test
y_train = self.Data.Y_Train
# Model
oof_preds = np.zeros(x_train.shape[0])
sub_preds = np.zeros(x_test.shape[0])
folds = KFold(n_splits=5, shuffle=True, random_state=1453)
for n_fold, (trn_idx, val_idx) in enumerate(folds.split(x_train)):
trn_X, trn_y = x_train.iloc[trn_idx], y_train.iloc[trn_idx]
val_X, val_y = x_train.iloc[val_idx], y_train.iloc[val_idx]
clf = CatBoostClassifier(eval_metric='AUC')
clf.fit(trn_X, trn_y)
oof_preds[val_idx] = clf.predict_proba(val_X)[:,1]
sub_preds += clf.predict_proba(x_test)[:,1] / folds.n_splits
del clf, trn_X, trn_y, val_X, val_y
gc.collect()
# Calculate and submit score
roc_auc = roc_auc_score(y_train, oof_preds)
self.SubmitScore("AUC",roc_auc)
# Prepare submission results
sub = pd.read_csv('../input/sample_submission.csv')
sub['TARGET'] = sub_preds
sub.to_csv('AllData_v3_Installments_CatBoost_v1.csv', index=False)
# Define LightGBM_v1 model to run it on model runner framework
class LightGBM_v1(Task):
def __init__(self, name):
Task.__init__(self, name)
def Run(self):
# Datasets
x_train = self.Data.X_Train
x_test = self.Data.X_Test
y_train = self.Data.Y_Train
# Model
oof_preds = np.zeros(x_train.shape[0])
sub_preds = np.zeros(x_test.shape[0])
folds = KFold(n_splits=5, shuffle=True, random_state=1453)
for n_fold, (trn_idx, val_idx) in enumerate(folds.split(x_train)):
trn_X, trn_y = x_train.iloc[trn_idx], y_train.iloc[trn_idx]
val_X, val_y = x_train.iloc[val_idx], y_train.iloc[val_idx]
clf = LGBMClassifier()
clf.fit(trn_X, trn_y, eval_metric='auc')
oof_preds[val_idx] = clf.predict_proba(val_X)[:,1]
sub_preds += clf.predict_proba(x_test)[:,1] / folds.n_splits
del clf, trn_X, trn_y, val_X, val_y
gc.collect()
# Calculate and submit score
roc_auc = roc_auc_score(y_train, oof_preds)
self.SubmitScore("AUC",roc_auc)
# Prepare submission results
sub = pd.read_csv('../input/sample_submission.csv')
sub['TARGET'] = sub_preds
sub.to_csv('AllData_v3_Installments_LightGBM_v1.csv', index=False)
| 32.928571
| 75
| 0.610164
| 460
| 3,227
| 3.982609
| 0.245652
| 0.039301
| 0.022926
| 0.026201
| 0.753275
| 0.753275
| 0.737991
| 0.737991
| 0.737991
| 0.737991
| 0
| 0.017286
| 0.282925
| 3,227
| 97
| 76
| 33.268041
| 0.774417
| 0.110629
| 0
| 0.714286
| 0
| 0
| 0.060335
| 0.051397
| 0
| 0
| 0
| 0
| 0
| 1
| 0.071429
| false
| 0
| 0.142857
| 0
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
1950176a6d1c8006615b13836f8ff275f7ac8088
| 215
|
py
|
Python
|
doctr/transforms/modules/__init__.py
|
Pandinosaurus/doctr
|
3d645ce7d3d4fe36aa53537d4e4f92507f6cd422
|
[
"Apache-2.0"
] | 628
|
2021-02-13T21:49:37.000Z
|
2022-03-31T19:48:57.000Z
|
__init__.py
|
jyotidabass/document_text_recognition
|
7bbdf4b1e5f7e9a28a7047dcd13eb2a5501643ef
|
[
"Apache-2.0"
] | 694
|
2021-02-08T15:23:38.000Z
|
2022-03-31T07:24:59.000Z
|
__init__.py
|
jyotidabass/document_text_recognition
|
7bbdf4b1e5f7e9a28a7047dcd13eb2a5501643ef
|
[
"Apache-2.0"
] | 90
|
2021-04-28T05:39:02.000Z
|
2022-03-31T06:48:36.000Z
|
from doctr.file_utils import is_tf_available, is_torch_available
from .base import *
if is_tf_available():
from .tensorflow import *
elif is_torch_available():
from .pytorch import * # type: ignore[misc]
| 23.888889
| 64
| 0.753488
| 31
| 215
| 4.935484
| 0.548387
| 0.254902
| 0.169935
| 0.261438
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.162791
| 215
| 8
| 65
| 26.875
| 0.85
| 0.083721
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 0
| 0
| 0
| null | 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
196acb797fffb584decf31359bc95f8af70caf66
| 131
|
py
|
Python
|
supermario/supermario 1117/mygame.py
|
Kimmiryeong/2DGP_GameProject
|
ad3fb197aab27227fc92fd404b2c310f8d0827ca
|
[
"MIT"
] | null | null | null |
supermario/supermario 1117/mygame.py
|
Kimmiryeong/2DGP_GameProject
|
ad3fb197aab27227fc92fd404b2c310f8d0827ca
|
[
"MIT"
] | null | null | null |
supermario/supermario 1117/mygame.py
|
Kimmiryeong/2DGP_GameProject
|
ad3fb197aab27227fc92fd404b2c310f8d0827ca
|
[
"MIT"
] | null | null | null |
import game_framework
import pico2d
import main_state
pico2d.open_canvas()
game_framework.run(main_state)
pico2d.close_canvas()
| 13.1
| 30
| 0.839695
| 19
| 131
| 5.473684
| 0.526316
| 0.25
| 0.288462
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.02521
| 0.091603
| 131
| 9
| 31
| 14.555556
| 0.84874
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
196b1d82d62d47b9013a2c2b1abad91cb46366c4
| 1,059
|
py
|
Python
|
examples/multi_ex_em_matrix.py
|
herzig/cary_reader
|
2fd70a9aaf4313914ea823517556069eadebc74b
|
[
"MIT"
] | 1
|
2020-10-15T13:00:26.000Z
|
2020-10-15T13:00:26.000Z
|
examples/multi_ex_em_matrix.py
|
herzig/cary_reader
|
2fd70a9aaf4313914ea823517556069eadebc74b
|
[
"MIT"
] | null | null | null |
examples/multi_ex_em_matrix.py
|
herzig/cary_reader
|
2fd70a9aaf4313914ea823517556069eadebc74b
|
[
"MIT"
] | null | null | null |
# allows import of package from parent directory
import os
import sys
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
from cary_reader import CaryData
# four samples
data = CaryData.from_csv('test_data/multi_sample_matrix_4s.csv', skiplog=True)
dataframes = data.get_multisample_ex_em_matrix()
# dataframes is now a dictionary of pandas data frames with the sample name as key. Each dataframe is an excitation emission matrix
# some basic tests
assert len(dataframes) == 4
assert all([s.shape == (226,31) for _,s in dataframes.items()]) # all ex-em matrices must have the same shape
# three samples
data = CaryData.from_csv('test_data/multi_sample_matrix_3s.csv', skiplog=True)
dataframes = data.get_multisample_ex_em_matrix()
# dataframes is now a dictionary of pandas data frames with the sample name as key. Each dataframe is an excitation emission matrix
# some basic tests
assert len(dataframes) == 3
assert all([s.shape == (226,31) for _,s in dataframes.items()]) # all ex-em matrices must have the same shape
| 37.821429
| 131
| 0.771483
| 172
| 1,059
| 4.604651
| 0.418605
| 0.020202
| 0.04798
| 0.058081
| 0.805556
| 0.805556
| 0.805556
| 0.805556
| 0.805556
| 0.805556
| 0
| 0.016376
| 0.135033
| 1,059
| 27
| 132
| 39.222222
| 0.848253
| 0.429651
| 0
| 0.333333
| 0
| 0
| 0.124579
| 0.121212
| 0
| 0
| 0
| 0
| 0.333333
| 1
| 0
| false
| 0
| 0.25
| 0
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
1981e1a7e15289cb2e14c40ead2b376614634722
| 96
|
py
|
Python
|
venv/lib/python3.8/site-packages/pip/_vendor/tenacity/retry.py
|
Retraces/UkraineBot
|
3d5d7f8aaa58fa0cb8b98733b8808e5dfbdb8b71
|
[
"MIT"
] | 2
|
2022-03-13T01:58:52.000Z
|
2022-03-31T06:07:54.000Z
|
venv/lib/python3.8/site-packages/pip/_vendor/tenacity/retry.py
|
DesmoSearch/Desmobot
|
b70b45df3485351f471080deb5c785c4bc5c4beb
|
[
"MIT"
] | 19
|
2021-11-20T04:09:18.000Z
|
2022-03-23T15:05:55.000Z
|
venv/lib/python3.8/site-packages/pip/_vendor/tenacity/retry.py
|
DesmoSearch/Desmobot
|
b70b45df3485351f471080deb5c785c4bc5c4beb
|
[
"MIT"
] | null | null | null |
/home/runner/.cache/pip/pool/c6/c9/0b/19ad7912c3613cf3a621470a4bb0aa8da440b56c4614d3cd4638f3f545
| 96
| 96
| 0.895833
| 9
| 96
| 9.555556
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.427083
| 0
| 96
| 1
| 96
| 96
| 0.46875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
19aa4fc38d12d7eceb47e01ddb1245d0110bcfa2
| 368
|
py
|
Python
|
esst/commands/__init__.py
|
etcher-be/esst
|
ac41cd0c07af8ca8532997f533756c529c9609a4
|
[
"MIT"
] | 4
|
2018-06-24T14:03:44.000Z
|
2019-01-21T01:20:02.000Z
|
esst/commands/__init__.py
|
etcher-be/esst
|
ac41cd0c07af8ca8532997f533756c529c9609a4
|
[
"MIT"
] | 106
|
2018-06-24T13:59:52.000Z
|
2019-11-26T09:05:14.000Z
|
esst/commands/__init__.py
|
theendsofinvention/esst
|
ac41cd0c07af8ca8532997f533756c529c9609a4
|
[
"MIT"
] | null | null | null |
# coding=utf-8
"""
Globally available commands
"""
# noinspection PyUnresolvedReferences
from esst.dcs.commands import DCS
# noinspection PyUnresolvedReferences
from esst.discord_bot.commands import DISCORD
# noinspection PyUnresolvedReferences
from esst.listener.commands import LISTENER
# noinspection PyUnresolvedReferences
from esst.server.commands import SERVER
| 26.285714
| 45
| 0.845109
| 39
| 368
| 7.948718
| 0.410256
| 0.43871
| 0.490323
| 0.541935
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.003012
| 0.097826
| 368
| 13
| 46
| 28.307692
| 0.930723
| 0.502717
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
5fea5f8b54d9687121881cf83014ab96c9d6c994
| 93
|
py
|
Python
|
models/__init__.py
|
YooshinCho/pytorch_Convolutional_Unit_Optimization
|
5e405eb410a7cf07839b1dcaf8fb0a422f07d1a7
|
[
"MIT"
] | 8
|
2021-09-11T01:30:47.000Z
|
2022-03-14T06:06:39.000Z
|
models/__init__.py
|
YooshinCho/pytorch_Convolutional_Unit_Optimization
|
5e405eb410a7cf07839b1dcaf8fb0a422f07d1a7
|
[
"MIT"
] | 1
|
2021-09-10T22:59:39.000Z
|
2021-09-12T09:11:39.000Z
|
models/__init__.py
|
YooshinCho/pytorch_Convolutional_Unit_Optimization
|
5e405eb410a7cf07839b1dcaf8fb0a422f07d1a7
|
[
"MIT"
] | 1
|
2021-08-24T02:21:10.000Z
|
2021-08-24T02:21:10.000Z
|
from .resnet import *
from .shiftresnet import *
from .shiftnetA import *
from .wrn import *
| 18.6
| 26
| 0.741935
| 12
| 93
| 5.75
| 0.5
| 0.434783
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.172043
| 93
| 4
| 27
| 23.25
| 0.896104
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
277dc2e0873f50ca158b6b3256ff5e64967d096b
| 23
|
py
|
Python
|
__init__.py
|
abonaca/myutils
|
1cd9522f08ed7bc856a22ea7442e69b11ab12c2f
|
[
"MIT"
] | null | null | null |
__init__.py
|
abonaca/myutils
|
1cd9522f08ed7bc856a22ea7442e69b11ab12c2f
|
[
"MIT"
] | null | null | null |
__init__.py
|
abonaca/myutils
|
1cd9522f08ed7bc856a22ea7442e69b11ab12c2f
|
[
"MIT"
] | null | null | null |
from .myutils import *
| 11.5
| 22
| 0.73913
| 3
| 23
| 5.666667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.173913
| 23
| 1
| 23
| 23
| 0.894737
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
fd7e22a8e6b16f3187f1cebe0ad0f98cdecfc86a
| 62
|
py
|
Python
|
tartiflette/types/helpers/__init__.py
|
erezsh/tartiflette
|
c945b02e9025e2524393c1eaec2191745bfc38f4
|
[
"MIT"
] | null | null | null |
tartiflette/types/helpers/__init__.py
|
erezsh/tartiflette
|
c945b02e9025e2524393c1eaec2191745bfc38f4
|
[
"MIT"
] | null | null | null |
tartiflette/types/helpers/__init__.py
|
erezsh/tartiflette
|
c945b02e9025e2524393c1eaec2191745bfc38f4
|
[
"MIT"
] | null | null | null |
from tartiflette.types.helpers.reduce_type import reduce_type
| 31
| 61
| 0.887097
| 9
| 62
| 5.888889
| 0.777778
| 0.377358
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.064516
| 62
| 1
| 62
| 62
| 0.913793
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
fdc116e773d084f7c3c8c3c18643ee69dafe2d70
| 3,057
|
py
|
Python
|
pycones/proposals/migrations/0006_auto_20150713_1710.py
|
python-spain/PyConES2015
|
af78ad7f1d7df747a2f5428be87a5b061457dd24
|
[
"MIT"
] | null | null | null |
pycones/proposals/migrations/0006_auto_20150713_1710.py
|
python-spain/PyConES2015
|
af78ad7f1d7df747a2f5428be87a5b061457dd24
|
[
"MIT"
] | null | null | null |
pycones/proposals/migrations/0006_auto_20150713_1710.py
|
python-spain/PyConES2015
|
af78ad7f1d7df747a2f5428be87a5b061457dd24
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import markupfield.fields
class Migration(migrations.Migration):
dependencies = [
('proposals', '0005_auto_20150713_1705'),
]
operations = [
migrations.AlterField(
model_name='proposalbase',
name='abstract',
field=markupfield.fields.MarkupField(verbose_name='Resumen detallado', rendered_field=True, help_text="Detailed outline. Will be made public if your proposal is accepted. Edit using <a href='http://daringfireball.net/projects/markdown/basics' target='_blank'>Markdown</a>.", default=''),
),
migrations.AlterField(
model_name='proposalbase',
name='abstract_en',
field=markupfield.fields.MarkupField(null=True, verbose_name='Resumen detallado', rendered_field=True, help_text="Detailed outline. Will be made public if your proposal is accepted. Edit using <a href='http://daringfireball.net/projects/markdown/basics' target='_blank'>Markdown</a>.", default=''),
),
migrations.AlterField(
model_name='proposalbase',
name='abstract_es',
field=markupfield.fields.MarkupField(null=True, verbose_name='Resumen detallado', rendered_field=True, help_text="Detailed outline. Will be made public if your proposal is accepted. Edit using <a href='http://daringfireball.net/projects/markdown/basics' target='_blank'>Markdown</a>.", default=''),
),
migrations.AlterField(
model_name='proposalbase',
name='additional_notes',
field=markupfield.fields.MarkupField(verbose_name='Notas adicionales', blank=True, rendered_field=True, help_text="Anything else you'd like the program committee to know when making their selection: your past experience, etc. This is not made public. Edit using <a href='http://daringfireball.net/projects/markdown/basics' target='_blank'>Markdown</a>.", default=''),
),
migrations.AlterField(
model_name='proposalbase',
name='additional_notes_en',
field=markupfield.fields.MarkupField(null=True, rendered_field=True, help_text="Anything else you'd like the program committee to know when making their selection: your past experience, etc. This is not made public. Edit using <a href='http://daringfireball.net/projects/markdown/basics' target='_blank'>Markdown</a>.", verbose_name='Notas adicionales', default='', blank=True),
),
migrations.AlterField(
model_name='proposalbase',
name='additional_notes_es',
field=markupfield.fields.MarkupField(null=True, rendered_field=True, help_text="Anything else you'd like the program committee to know when making their selection: your past experience, etc. This is not made public. Edit using <a href='http://daringfireball.net/projects/markdown/basics' target='_blank'>Markdown</a>.", verbose_name='Notas adicionales', default='', blank=True),
),
]
| 66.456522
| 390
| 0.699706
| 361
| 3,057
| 5.800554
| 0.238227
| 0.056829
| 0.071633
| 0.083095
| 0.903056
| 0.903056
| 0.866285
| 0.837154
| 0.8085
| 0.8085
| 0
| 0.006789
| 0.180896
| 3,057
| 45
| 391
| 67.933333
| 0.829473
| 0.006869
| 0
| 0.564103
| 0
| 0.153846
| 0.497034
| 0.064931
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.076923
| 0
| 0.153846
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
fdf0beb28b14217f34923c1b669a06817d8607a9
| 180
|
py
|
Python
|
src/siism2015/views.py
|
moertoe1/moertoe123.github.io
|
02cb11b0a5f7b83200ee941aca750d620eb00ed5
|
[
"MIT"
] | null | null | null |
src/siism2015/views.py
|
moertoe1/moertoe123.github.io
|
02cb11b0a5f7b83200ee941aca750d620eb00ed5
|
[
"MIT"
] | null | null | null |
src/siism2015/views.py
|
moertoe1/moertoe123.github.io
|
02cb11b0a5f7b83200ee941aca750d620eb00ed5
|
[
"MIT"
] | null | null | null |
from django.views import generic
class HomePage(generic.TemplateView):
template_name = "index.html"
class AboutPage(generic.TemplateView):
template_name = "about.html"
| 18
| 38
| 0.761111
| 21
| 180
| 6.428571
| 0.666667
| 0.281481
| 0.4
| 0.459259
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.144444
| 180
| 9
| 39
| 20
| 0.876623
| 0
| 0
| 0
| 0
| 0
| 0.111111
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.2
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 6
|
e32f2cc3368107823778ece080e6d1a4d2473dc8
| 207
|
py
|
Python
|
codes_/1480_Running_Sum_of_1d_Array.py
|
SaitoTsutomu/leetcode
|
4656d66ab721a5c7bc59890db9a2331c6823b2bf
|
[
"MIT"
] | null | null | null |
codes_/1480_Running_Sum_of_1d_Array.py
|
SaitoTsutomu/leetcode
|
4656d66ab721a5c7bc59890db9a2331c6823b2bf
|
[
"MIT"
] | null | null | null |
codes_/1480_Running_Sum_of_1d_Array.py
|
SaitoTsutomu/leetcode
|
4656d66ab721a5c7bc59890db9a2331c6823b2bf
|
[
"MIT"
] | null | null | null |
# %% [1480. Running Sum of 1d Array](https://leetcode.com/problems/running-sum-of-1d-array/)
class Solution:
def runningSum(self, nums: List[int]) -> List[int]:
return itertools.accumulate(nums)
| 41.4
| 92
| 0.690821
| 29
| 207
| 4.931034
| 0.724138
| 0.13986
| 0.167832
| 0.195804
| 0.265734
| 0
| 0
| 0
| 0
| 0
| 0
| 0.033898
| 0.144928
| 207
| 4
| 93
| 51.75
| 0.774011
| 0.434783
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
e36d5d426ccf47d582e07910540adae239c728a1
| 103
|
py
|
Python
|
openpds/visualization/views.py
|
pmundt/openPDS
|
6287d5946627cdafbfb3b0bc617b21eb2431f55e
|
[
"MIT"
] | 67
|
2015-01-05T17:13:34.000Z
|
2021-08-17T16:30:10.000Z
|
openpds/visualization/views.py
|
pmundt/openPDS
|
6287d5946627cdafbfb3b0bc617b21eb2431f55e
|
[
"MIT"
] | 19
|
2015-01-22T21:37:16.000Z
|
2018-12-02T00:58:37.000Z
|
openpds/visualization/views.py
|
pmundt/openPDS
|
6287d5946627cdafbfb3b0bc617b21eb2431f55e
|
[
"MIT"
] | 29
|
2015-01-05T17:13:40.000Z
|
2019-07-08T03:21:48.000Z
|
from django.shortcuts import render_to_response
from django.template import RequestContext
import pdb
| 20.6
| 47
| 0.873786
| 14
| 103
| 6.285714
| 0.714286
| 0.227273
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.106796
| 103
| 4
| 48
| 25.75
| 0.956522
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
8b66604d95d0c588f97aa5871b583522f0fdcb45
| 40
|
py
|
Python
|
noisytest.py
|
xifle/noisytest
|
5b55819b6be55563a7c88c142ddaa2ba5efdc0cb
|
[
"MIT"
] | null | null | null |
noisytest.py
|
xifle/noisytest
|
5b55819b6be55563a7c88c142ddaa2ba5efdc0cb
|
[
"MIT"
] | null | null | null |
noisytest.py
|
xifle/noisytest
|
5b55819b6be55563a7c88c142ddaa2ba5efdc0cb
|
[
"MIT"
] | null | null | null |
import noisytest.ui
noisytest.ui.run()
| 10
| 19
| 0.775
| 6
| 40
| 5.166667
| 0.666667
| 0.709677
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.1
| 40
| 3
| 20
| 13.333333
| 0.861111
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 1
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
473c3bbba1b0ff3fb528f7d2322ec449d45fa825
| 179
|
py
|
Python
|
seqreppy/config.py
|
ednilsonlomazi/seqreppy
|
7e7155e1a17b42a8a381f5e8f76029dc4a69c61a
|
[
"BSD-3-Clause"
] | 1
|
2021-03-22T15:44:59.000Z
|
2021-03-22T15:44:59.000Z
|
seqreppy/config.py
|
ednilsonlomazi/seqreppy
|
7e7155e1a17b42a8a381f5e8f76029dc4a69c61a
|
[
"BSD-3-Clause"
] | null | null | null |
seqreppy/config.py
|
ednilsonlomazi/seqreppy
|
7e7155e1a17b42a8a381f5e8f76029dc4a69c61a
|
[
"BSD-3-Clause"
] | null | null | null |
import pathlib
import sys
default_results_txt = str(pathlib.Path(__file__).parent.parent)
default_results_img = str(pathlib.Path(__file__).parent.parent)
sys.tracebacklimit = 0
| 25.571429
| 64
| 0.815642
| 25
| 179
| 5.36
| 0.52
| 0.208955
| 0.208955
| 0.268657
| 0.447761
| 0.447761
| 0
| 0
| 0
| 0
| 0
| 0.006098
| 0.083799
| 179
| 7
| 65
| 25.571429
| 0.810976
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.4
| 0
| 0.4
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
4744758b71512517f48424f56f0ea86edca1abbc
| 102
|
py
|
Python
|
run.py
|
Luvinahlc/ProjectTimeline
|
b363a1de50b14e85d759e1149c2882e8481e1d51
|
[
"BSD-3-Clause"
] | null | null | null |
run.py
|
Luvinahlc/ProjectTimeline
|
b363a1de50b14e85d759e1149c2882e8481e1d51
|
[
"BSD-3-Clause"
] | null | null | null |
run.py
|
Luvinahlc/ProjectTimeline
|
b363a1de50b14e85d759e1149c2882e8481e1d51
|
[
"BSD-3-Clause"
] | null | null | null |
#!flask/bin/python
from app import app
#app.run(debug = True)
app.run(debug = True, host = '0.0.0.0')
| 20.4
| 39
| 0.666667
| 20
| 102
| 3.4
| 0.55
| 0.088235
| 0.323529
| 0.441176
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.045455
| 0.137255
| 102
| 4
| 40
| 25.5
| 0.727273
| 0.372549
| 0
| 0
| 0
| 0
| 0.112903
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
4750f764a787101f6a28f22661e6c85c232121be
| 109
|
py
|
Python
|
land.py
|
Vl-tb/Quad-Drone
|
3b7e165c13a95c8340fad7b10adf386bc3c01744
|
[
"MIT"
] | null | null | null |
land.py
|
Vl-tb/Quad-Drone
|
3b7e165c13a95c8340fad7b10adf386bc3c01744
|
[
"MIT"
] | null | null | null |
land.py
|
Vl-tb/Quad-Drone
|
3b7e165c13a95c8340fad7b10adf386bc3c01744
|
[
"MIT"
] | null | null | null |
from dronekit import Vehicle, VehicleMode
def land(vehicle: Vehicle):
vehicle.mode = VehicleMode("LAND")
| 27.25
| 41
| 0.761468
| 13
| 109
| 6.384615
| 0.615385
| 0.337349
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.137615
| 109
| 4
| 42
| 27.25
| 0.882979
| 0
| 0
| 0
| 0
| 0
| 0.036364
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
4781c5074d8b4e6ef03df78e29abb1671590e2ca
| 664
|
py
|
Python
|
codes/gender/Entity.py
|
yangzhou6666/BiasHeal
|
7fa060047c40e0cb569ecb42c4c2f597b62d62da
|
[
"Apache-2.0"
] | 1
|
2021-06-11T12:45:00.000Z
|
2021-06-11T12:45:00.000Z
|
bias_rv/country/Entity.py
|
soarsmu/BiasRV
|
95b4132d90babad5f453fdf1933d3ce34f9b8a5d
|
[
"MIT"
] | null | null | null |
bias_rv/country/Entity.py
|
soarsmu/BiasRV
|
95b4132d90babad5f453fdf1933d3ce34f9b8a5d
|
[
"MIT"
] | 1
|
2021-12-22T11:02:43.000Z
|
2021-12-22T11:02:43.000Z
|
class Entity:
word = ""
start = 0
end = 0
ent_type = ""
def __init__(self, word, start, end, ent_type) :
self.word = word
self.start = start
self.end = end
self.ent_type = ent_type
def __str__(self) :
return self.word
def __repr__(self) :
return self.word
def getWord(self):
return self.word
def getStart(self):
return self.start
def getEnd(self):
return self.end
def getEntityType(self):
return self.ent_type
def isPerson(self):
return self.ent_type == "PERSON" and self.word[-2:] != "'s"
| 21.419355
| 67
| 0.534639
| 81
| 664
| 4.160494
| 0.283951
| 0.207715
| 0.290801
| 0.160237
| 0.311573
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007126
| 0.365964
| 664
| 31
| 67
| 21.419355
| 0.793349
| 0
| 0
| 0.125
| 0
| 0
| 0.01203
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0
| 0.291667
| 0.833333
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
47889ecbc9e4707be0ab1ca068c1990e2f6445b3
| 32
|
py
|
Python
|
tests/test_code/py/exclude_modules_two_files/exclude_modules_b.py
|
FreddyZeng/code2flow
|
37e45ca4340289f8ceec79b3fe5131c401387c58
|
[
"MIT"
] | 2,248
|
2015-01-13T21:44:22.000Z
|
2022-03-31T07:55:22.000Z
|
tests/test_code/py/exclude_modules_two_files/exclude_modules_b.py
|
FreddyZeng/code2flow
|
37e45ca4340289f8ceec79b3fe5131c401387c58
|
[
"MIT"
] | 44
|
2015-04-09T18:37:01.000Z
|
2022-03-25T19:56:11.000Z
|
tests/test_code/py/exclude_modules_two_files/exclude_modules_b.py
|
FreddyZeng/code2flow
|
37e45ca4340289f8ceec79b3fe5131c401387c58
|
[
"MIT"
] | 220
|
2015-02-02T06:35:09.000Z
|
2022-03-31T09:21:09.000Z
|
def match():
print("match")
| 10.666667
| 18
| 0.5625
| 4
| 32
| 4.5
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.21875
| 32
| 2
| 19
| 16
| 0.72
| 0
| 0
| 0
| 0
| 0
| 0.15625
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| true
| 0
| 0
| 0
| 0.5
| 0.5
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
47cbd65b8deacfe1eaaa2bdd890950c93d65af78
| 227
|
py
|
Python
|
amocrm_asterisk_ng/crm/amocrm/kernel/calls/calls_logging/__init__.py
|
iqtek/amocrn_asterisk_ng
|
429a8d0823b951c855a49c1d44ab0e05263c54dc
|
[
"MIT"
] | null | null | null |
amocrm_asterisk_ng/crm/amocrm/kernel/calls/calls_logging/__init__.py
|
iqtek/amocrn_asterisk_ng
|
429a8d0823b951c855a49c1d44ab0e05263c54dc
|
[
"MIT"
] | null | null | null |
amocrm_asterisk_ng/crm/amocrm/kernel/calls/calls_logging/__init__.py
|
iqtek/amocrn_asterisk_ng
|
429a8d0823b951c855a49c1d44ab0e05263c54dc
|
[
"MIT"
] | null | null | null |
from .AddCallToAnalyticsCommand import AddCallToAnalyticsCommand
from .AddCallToUnsortedCommand import AddCallToUnsortedCommand
from .MakeLinkFunction import IMakeLinkFunction
from .MakeLinkFunction import MakeLinkFunctionImpl
| 45.4
| 64
| 0.911894
| 16
| 227
| 12.9375
| 0.4375
| 0.193237
| 0.251208
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.070485
| 227
| 4
| 65
| 56.75
| 0.981043
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 1
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
47e842ba4a828f1e3b4af39d76104d94e87208be
| 73
|
py
|
Python
|
mych/__init__.py
|
BhagyaDeepika/mych
|
8023337d39bb2afd9bca7db8d8b3b8bf7ce552ae
|
[
"MIT"
] | null | null | null |
mych/__init__.py
|
BhagyaDeepika/mych
|
8023337d39bb2afd9bca7db8d8b3b8bf7ce552ae
|
[
"MIT"
] | null | null | null |
mych/__init__.py
|
BhagyaDeepika/mych
|
8023337d39bb2afd9bca7db8d8b3b8bf7ce552ae
|
[
"MIT"
] | null | null | null |
from mych.functions import average, power
from mych.greet import SayHello
| 36.5
| 41
| 0.849315
| 11
| 73
| 5.636364
| 0.727273
| 0.258065
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.109589
| 73
| 2
| 42
| 36.5
| 0.953846
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
9a00ef7f420cb0055e03065e9b624c54fedb39ae
| 45
|
py
|
Python
|
production_linux/configure_kubuntu.py
|
erikhvatum/zplab-IT
|
2a474a122fd4f790e9199056a0b4733a52585f81
|
[
"MIT"
] | null | null | null |
production_linux/configure_kubuntu.py
|
erikhvatum/zplab-IT
|
2a474a122fd4f790e9199056a0b4733a52585f81
|
[
"MIT"
] | null | null | null |
production_linux/configure_kubuntu.py
|
erikhvatum/zplab-IT
|
2a474a122fd4f790e9199056a0b4733a52585f81
|
[
"MIT"
] | null | null | null |
#
from pathlib import Path
import subprocess
| 11.25
| 24
| 0.822222
| 6
| 45
| 6.166667
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.155556
| 45
| 4
| 25
| 11.25
| 0.973684
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
9a280710a96875d6f5c6ea34e8e2f053282e95b1
| 7,139
|
py
|
Python
|
Supported Languages/Python/smash/controllers/advanced_logging.py
|
SMASH-INC/API
|
d0679f199f786aa24f0510df078b4318c27dcc0f
|
[
"MIT"
] | null | null | null |
Supported Languages/Python/smash/controllers/advanced_logging.py
|
SMASH-INC/API
|
d0679f199f786aa24f0510df078b4318c27dcc0f
|
[
"MIT"
] | null | null | null |
Supported Languages/Python/smash/controllers/advanced_logging.py
|
SMASH-INC/API
|
d0679f199f786aa24f0510df078b4318c27dcc0f
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
smash.controllers.advanced_logging
This file was automatically generated for SMASH by SMASH v2.0 ( https://smashlabs.io ).
"""
import logging
from .base_controller import BaseController
from ..api_helper import APIHelper
from ..configuration import Configuration
from ..http.auth.custom_auth import CustomAuth
from ..models.logging_setup_model_response import LoggingSetupModelResponse
from ..models.logging_logs_model_response import LoggingLogsModelResponse
class AdvancedLogging(BaseController):
"""A Controller to access Endpoints in the smash API."""
def __init__(self, client=None, call_back=None):
super(AdvancedLogging, self).__init__(client, call_back)
self.logger = logging.getLogger(__name__)
def logging_configuration(self,
options=dict()):
"""Does a GET request to /s/l.
WAF Log Configuration
Args:
options (dict, optional): Key-value pairs for any of the
parameters to this API Endpoint. All parameters to the
endpoint are supplied through the dictionary with their names
being the key and their desired values being the value. A list
of parameters that can be used are::
name -- string -- Name of the WAF zone
origin -- string -- IP Address of the Web Application you
wish to configure logging on
activate -- string -- Activate or Disable
Returns:
LoggingSetupModelResponse: Response from the API.
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
try:
self.logger.info('logging_configuration called.')
# Validate required parameters
self.logger.info('Validating required parameters for logging_configuration.')
self.validate_parameters(name=options.get("name"),
origin=options.get("origin"),
activate=options.get("activate"))
# Prepare query URL
self.logger.info('Preparing query URL for logging_configuration.')
_query_builder = Configuration.get_base_uri(Configuration.Server.PATH)
_query_builder += '/s/l'
_query_parameters = {
'name': options.get('name', None),
'origin': options.get('origin', None),
'activate': options.get('activate', None)
}
_query_builder = APIHelper.append_url_with_query_parameters(_query_builder,
_query_parameters, Configuration.array_serialization)
_query_url = APIHelper.clean_url(_query_builder)
# Prepare and execute request
self.logger.info('Preparing and executing request for logging_configuration.')
_request = self.http_client.get(_query_url)
CustomAuth.apply(_request)
_context = self.execute_request(_request, name = 'logging_configuration')
# Endpoint and global error handling using HTTP status codes.
self.logger.info('Validating response for logging_configuration.')
if _context.response.status_code == 404:
self.logger.info('Status code 404 received for logging_configuration. Returning nil.')
return None
self.validate_response(_context)
# Return appropriate type
return APIHelper.json_deserialize(_context.response.raw_body, LoggingSetupModelResponse.from_dictionary)
except Exception as e:
self.logger.error(e, exc_info = True)
raise
def logging_info(self,
options=dict()):
"""Does a GET request to /s/l/i.
WAF Log Info
Args:
options (dict, optional): Key-value pairs for any of the
parameters to this API Endpoint. All parameters to the
endpoint are supplied through the dictionary with their names
being the key and their desired values being the value. A list
of parameters that can be used are::
name -- string -- Name of your WAF zone
origin -- string -- IP Address of the Web Application
time -- string -- Specific times or dates to lookup
separated by a comma in MMDDYYHHMMSS Format ( Month
Month Day Day Year Year Year Hour Hour Minute Minute
Second Second [01/01/0101;24:59:01])
Returns:
LoggingLogsModelResponse: Response from the API.
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
try:
self.logger.info('logging_info called.')
# Validate required parameters
self.logger.info('Validating required parameters for logging_info.')
self.validate_parameters(name=options.get("name"),
origin=options.get("origin"))
# Prepare query URL
self.logger.info('Preparing query URL for logging_info.')
_query_builder = Configuration.get_base_uri(Configuration.Server.PATH)
_query_builder += '/s/l/i'
_query_parameters = {
'name': options.get('name', None),
'origin': options.get('origin', None),
'time': options.get('time', None)
}
_query_builder = APIHelper.append_url_with_query_parameters(_query_builder,
_query_parameters, Configuration.array_serialization)
_query_url = APIHelper.clean_url(_query_builder)
# Prepare and execute request
self.logger.info('Preparing and executing request for logging_info.')
_request = self.http_client.get(_query_url)
CustomAuth.apply(_request)
_context = self.execute_request(_request, name = 'logging_info')
# Endpoint and global error handling using HTTP status codes.
self.logger.info('Validating response for logging_info.')
if _context.response.status_code == 404:
self.logger.info('Status code 404 received for logging_info. Returning nil.')
return None
self.validate_response(_context)
# Return appropriate type
return APIHelper.json_deserialize(_context.response.raw_body, LoggingLogsModelResponse.from_dictionary)
except Exception as e:
self.logger.error(e, exc_info = True)
raise
| 43.266667
| 116
| 0.610169
| 767
| 7,139
| 5.517601
| 0.245111
| 0.035444
| 0.039698
| 0.022684
| 0.727316
| 0.727316
| 0.727316
| 0.727316
| 0.727316
| 0.727316
| 0
| 0.005973
| 0.319933
| 7,139
| 164
| 117
| 43.530488
| 0.865705
| 0.341364
| 0
| 0.486486
| 1
| 0
| 0.157943
| 0.035047
| 0
| 0
| 0
| 0
| 0
| 1
| 0.040541
| false
| 0
| 0.094595
| 0
| 0.202703
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
7bf78f99c2ca9233117dba24c5564be0807b02eb
| 9,946
|
py
|
Python
|
v3/matrix-demo/python_comprehension-1.test.py
|
fkorling/OnlinePythonTutor
|
64f02f78143481ea267f830639990871183263fe
|
[
"Unlicense"
] | 1
|
2015-08-07T09:38:35.000Z
|
2015-08-07T09:38:35.000Z
|
v3/matrix-demo/python_comprehension-1.test.py
|
fkorling/OnlinePythonTutor
|
64f02f78143481ea267f830639990871183263fe
|
[
"Unlicense"
] | null | null | null |
v3/matrix-demo/python_comprehension-1.test.py
|
fkorling/OnlinePythonTutor
|
64f02f78143481ea267f830639990871183263fe
|
[
"Unlicense"
] | 1
|
2021-06-30T03:38:20.000Z
|
2021-06-30T03:38:20.000Z
|
'''
>>> increments([1, 5, 7])
[2, 6, 8]
>>> increments([0, 0, 0, 0, 0])
[1, 1, 1, 1, 1]
>>> increments([0.5, 1.5, 1.75, 2.5])
[1.5, 2.5, 2.75, 3.5]
>>> increments([570, 968, 723, 179, 762, 377, 845, 320, 475, 952, 680, 874, 708, 493, 901, 896, 164, 165, 404, 147, 917, 936, 205, 615, 518, 254, 856, 584, 287, 336, 452, 551, 914, 706, 558, 842, 52, 593, 733, 398, 119, 874, 769, 585, 572, 261, 440, 404, 293, 176, 575, 224, 647, 241, 319, 974, 5, 373, 367, 609, 661, 691, 47, 64, 79, 744, 606, 205, 424, 88, 648, 419, 165, 399, 594, 760, 348, 638, 385, 754, 491, 284, 531, 258, 745, 634, 51, 557, 346, 577, 375, 979, 773, 523, 441, 952, 50, 534, 641, 621, 813, 511, 279, 565, 228, 86, 187, 395, 261, 287, 717, 989, 614, 92, 8, 229, 372, 378, 53, 350, 936, 654, 74, 750, 20, 978, 506, 793, 148, 944, 23, 962, 996, 586, 404, 216, 148, 284, 797, 805, 501, 161, 64, 608, 287, 127, 136, 902, 879, 433, 553, 366, 155, 763, 728, 117, 300, 990, 345, 982, 767, 279, 814, 516, 342, 291, 410, 612, 961, 445, 472, 507, 251, 832, 737, 62, 384, 273, 352, 752, 455, 216, 731, 7, 868, 111, 42, 190, 841, 283, 215, 860, 628, 835, 145, 97, 337, 57, 791, 443, 271, 925, 666, 452, 601, 571, 218, 901, 479, 75, 912, 708, 33, 575, 252, 753, 857, 150, 625, 852, 921, 178, 832, 126, 929, 16, 427, 533, 119, 256, 937, 107, 740, 607, 801, 827, 667, 776, 95, 940, 66, 982, 930, 825, 878, 512, 961, 701, 657, 584, 204, 348, 564, 505, 303, 562, 399, 415, 784, 588, 2, 729, 478, 396, 314, 130, 493, 947, 724, 540, 608, 431, 107, 497, 68, 791, 521, 583, 359, 221, 713, 683, 945, 274, 568, 666, 517, 241, 401, 437, 958, 572, 561, 929, 342, 149, 971, 762, 249, 538, 277, 761, 489, 728, 372, 131, 366, 702, 73, 382, 58, 223, 423, 642, 628, 6, 158, 946, 710, 232, 211, 747, 215, 579, 396, 521, 597, 966, 401, 749, 546, 310, 786, 691, 333, 817, 162, 961, 674, 132, 235, 481, 410, 477, 311, 932, 352, 64, 771, 837, 609, 654, 535, 530, 346, 294, 441, 532, 824, 422, 912, 99, 894, 246, 99, 111, 806, 360, 652, 753, 489, 735, 996, 8, 742, 793, 341, 498, 790, 402, 542, 892, 573, 78, 994, 676, 225, 675, 904, 196, 156, 819, 959, 501, 554, 381, 525, 608, 401, 937, 875, 373, 803, 258, 530, 901, 175, 656, 533, 91, 304, 497, 321, 906, 893, 995, 238, 51, 419, 70, 673, 479, 852, 864, 143, 224, 911, 207, 41, 603, 824, 764, 257, 653, 521, 28, 673, 333, 536, 748, 92, 98, 951, 655, 278, 437, 167, 253, 849, 343, 554, 313, 333, 556, 919, 636, 21, 841, 854, 550, 993, 291, 324, 224, 48, 927, 784, 387, 276, 652, 860, 100, 386, 153, 988, 805, 419, 75, 365, 920, 957, 23, 592, 280, 814, 800, 154, 776, 169, 635, 379, 919, 742, 145, 784, 201, 711, 209, 36, 317, 718, 84, 974, 768, 518, 884, 374, 447, 160, 295, 29, 23, 421, 384, 104, 123, 40, 945, 765, 32, 243, 696, 603, 129, 650, 957, 659, 863, 582, 165, 681, 33, 738, 917, 410, 803, 821, 636, 162, 662, 231, 75, 799, 591, 258, 722, 131, 805, 600, 704, 995, 793, 502, 624, 656, 43, 597, 353, 867, 116, 568, 26, 16, 251, 78, 764, 799, 287, 575, 190, 718, 619, 377, 465, 267, 688, 772, 359, 451, 459, 139, 71, 821, 312, 334, 988, 929, 797, 830, 26, 3, 90, 450, 715, 174, 910, 258, 229, 325, 517, 37, 260, 950, 20, 881, 156, 231, 114, 670, 287, 631, 982, 855, 841, 72, 561, 368, 289, 829, 428, 815, 207, 844, 68, 143, 707, 259, 669, 362, 943, 550, 133, 367, 900, 233, 109, 504, 803, 985, 333, 318, 680, 952, 408, 268, 890, 101, 423, 261, 641, 500, 389, 885, 76, 682, 811, 941, 142, 552, 401, 429, 973, 287, 472, 630, 383, 569, 630, 135, 823, 49, 507, 433, 550, 660, 403, 88, 879, 697, 571, 790, 896, 252, 172, 911, 485, 30, 657, 821, 412, 204, 801, 763, 329, 199, 315, 940, 515, 29, 22, 66, 221, 63, 678, 368, 545, 560, 301, 292, 987, 673, 573, 399, 148, 326, 418, 687, 85, 167, 774, 657, 754, 168, 113, 412, 353, 234, 923, 720, 691, 319, 711, 1000, 188, 969, 123, 547, 127, 69, 782, 533, 898, 574, 214, 848, 599, 112, 833, 26, 750, 462, 480, 511, 644, 929, 725, 310, 41, 559, 961, 399, 527, 960, 352, 468, 755, 732, 944, 115, 408, 642, 888, 922, 780, 727, 459, 473, 122, 716, 908, 576, 498, 196, 647, 912, 275, 238, 79, 75, 427, 299, 470, 347, 792, 969, 21, 424, 596, 88, 98, 475, 917, 683, 47, 843, 742, 673, 702, 983, 996, 430, 53, 327, 769, 666, 453, 93, 498, 942, 299, 200, 968, 202, 193, 508, 706, 247, 51, 721, 327, 484, 855, 565, 777, 33, 816, 827, 36, 962, 235, 297, 666, 111, 453, 445, 111, 653, 690, 325, 36, 187, 633, 854, 829, 74, 840, 744, 375, 124, 694, 236, 222, 88, 449, 134, 542, 812, 325, 373, 975, 131, 78, 390, 114, 969, 633, 57, 110, 635, 396, 947, 913, 148, 215, 465, 72, 463, 830, 885, 532, 728, 701, 31, 541, 54, 411, 916, 268, 596, 72, 971, 907, 856, 65, 55, 108, 222, 24, 482, 150, 864, 768, 332, 40, 961, 80, 745, 984, 170, 424, 28, 442, 146, 724, 32, 786, 985, 386, 326, 840, 416, 931, 606, 746, 39, 295, 355, 80, 663, 463, 716, 849, 606, 83, 512, 144, 854, 384, 976, 675, 549, 318, 893, 193, 562, 419, 444, 427, 612, 362, 567, 529, 273, 807, 381, 120, 66, 397, 738, 948, 99, 427, 560, 916, 283, 722, 111, 740, 156, 942, 215, 67, 944, 161, 544, 597, 468, 441, 483, 961, 503, 162, 706, 57, 37, 307, 142, 537, 861, 944])
[571, 969, 724, 180, 763, 378, 846, 321, 476, 953, 681, 875, 709, 494, 902, 897, 165, 166, 405, 148, 918, 937, 206, 616, 519, 255, 857, 585, 288, 337, 453, 552, 915, 707, 559, 843, 53, 594, 734, 399, 120, 875, 770, 586, 573, 262, 441, 405, 294, 177, 576, 225, 648, 242, 320, 975, 6, 374, 368, 610, 662, 692, 48, 65, 80, 745, 607, 206, 425, 89, 649, 420, 166, 400, 595, 761, 349, 639, 386, 755, 492, 285, 532, 259, 746, 635, 52, 558, 347, 578, 376, 980, 774, 524, 442, 953, 51, 535, 642, 622, 814, 512, 280, 566, 229, 87, 188, 396, 262, 288, 718, 990, 615, 93, 9, 230, 373, 379, 54, 351, 937, 655, 75, 751, 21, 979, 507, 794, 149, 945, 24, 963, 997, 587, 405, 217, 149, 285, 798, 806, 502, 162, 65, 609, 288, 128, 137, 903, 880, 434, 554, 367, 156, 764, 729, 118, 301, 991, 346, 983, 768, 280, 815, 517, 343, 292, 411, 613, 962, 446, 473, 508, 252, 833, 738, 63, 385, 274, 353, 753, 456, 217, 732, 8, 869, 112, 43, 191, 842, 284, 216, 861, 629, 836, 146, 98, 338, 58, 792, 444, 272, 926, 667, 453, 602, 572, 219, 902, 480, 76, 913, 709, 34, 576, 253, 754, 858, 151, 626, 853, 922, 179, 833, 127, 930, 17, 428, 534, 120, 257, 938, 108, 741, 608, 802, 828, 668, 777, 96, 941, 67, 983, 931, 826, 879, 513, 962, 702, 658, 585, 205, 349, 565, 506, 304, 563, 400, 416, 785, 589, 3, 730, 479, 397, 315, 131, 494, 948, 725, 541, 609, 432, 108, 498, 69, 792, 522, 584, 360, 222, 714, 684, 946, 275, 569, 667, 518, 242, 402, 438, 959, 573, 562, 930, 343, 150, 972, 763, 250, 539, 278, 762, 490, 729, 373, 132, 367, 703, 74, 383, 59, 224, 424, 643, 629, 7, 159, 947, 711, 233, 212, 748, 216, 580, 397, 522, 598, 967, 402, 750, 547, 311, 787, 692, 334, 818, 163, 962, 675, 133, 236, 482, 411, 478, 312, 933, 353, 65, 772, 838, 610, 655, 536, 531, 347, 295, 442, 533, 825, 423, 913, 100, 895, 247, 100, 112, 807, 361, 653, 754, 490, 736, 997, 9, 743, 794, 342, 499, 791, 403, 543, 893, 574, 79, 995, 677, 226, 676, 905, 197, 157, 820, 960, 502, 555, 382, 526, 609, 402, 938, 876, 374, 804, 259, 531, 902, 176, 657, 534, 92, 305, 498, 322, 907, 894, 996, 239, 52, 420, 71, 674, 480, 853, 865, 144, 225, 912, 208, 42, 604, 825, 765, 258, 654, 522, 29, 674, 334, 537, 749, 93, 99, 952, 656, 279, 438, 168, 254, 850, 344, 555, 314, 334, 557, 920, 637, 22, 842, 855, 551, 994, 292, 325, 225, 49, 928, 785, 388, 277, 653, 861, 101, 387, 154, 989, 806, 420, 76, 366, 921, 958, 24, 593, 281, 815, 801, 155, 777, 170, 636, 380, 920, 743, 146, 785, 202, 712, 210, 37, 318, 719, 85, 975, 769, 519, 885, 375, 448, 161, 296, 30, 24, 422, 385, 105, 124, 41, 946, 766, 33, 244, 697, 604, 130, 651, 958, 660, 864, 583, 166, 682, 34, 739, 918, 411, 804, 822, 637, 163, 663, 232, 76, 800, 592, 259, 723, 132, 806, 601, 705, 996, 794, 503, 625, 657, 44, 598, 354, 868, 117, 569, 27, 17, 252, 79, 765, 800, 288, 576, 191, 719, 620, 378, 466, 268, 689, 773, 360, 452, 460, 140, 72, 822, 313, 335, 989, 930, 798, 831, 27, 4, 91, 451, 716, 175, 911, 259, 230, 326, 518, 38, 261, 951, 21, 882, 157, 232, 115, 671, 288, 632, 983, 856, 842, 73, 562, 369, 290, 830, 429, 816, 208, 845, 69, 144, 708, 260, 670, 363, 944, 551, 134, 368, 901, 234, 110, 505, 804, 986, 334, 319, 681, 953, 409, 269, 891, 102, 424, 262, 642, 501, 390, 886, 77, 683, 812, 942, 143, 553, 402, 430, 974, 288, 473, 631, 384, 570, 631, 136, 824, 50, 508, 434, 551, 661, 404, 89, 880, 698, 572, 791, 897, 253, 173, 912, 486, 31, 658, 822, 413, 205, 802, 764, 330, 200, 316, 941, 516, 30, 23, 67, 222, 64, 679, 369, 546, 561, 302, 293, 988, 674, 574, 400, 149, 327, 419, 688, 86, 168, 775, 658, 755, 169, 114, 413, 354, 235, 924, 721, 692, 320, 712, 1001, 189, 970, 124, 548, 128, 70, 783, 534, 899, 575, 215, 849, 600, 113, 834, 27, 751, 463, 481, 512, 645, 930, 726, 311, 42, 560, 962, 400, 528, 961, 353, 469, 756, 733, 945, 116, 409, 643, 889, 923, 781, 728, 460, 474, 123, 717, 909, 577, 499, 197, 648, 913, 276, 239, 80, 76, 428, 300, 471, 348, 793, 970, 22, 425, 597, 89, 99, 476, 918, 684, 48, 844, 743, 674, 703, 984, 997, 431, 54, 328, 770, 667, 454, 94, 499, 943, 300, 201, 969, 203, 194, 509, 707, 248, 52, 722, 328, 485, 856, 566, 778, 34, 817, 828, 37, 963, 236, 298, 667, 112, 454, 446, 112, 654, 691, 326, 37, 188, 634, 855, 830, 75, 841, 745, 376, 125, 695, 237, 223, 89, 450, 135, 543, 813, 326, 374, 976, 132, 79, 391, 115, 970, 634, 58, 111, 636, 397, 948, 914, 149, 216, 466, 73, 464, 831, 886, 533, 729, 702, 32, 542, 55, 412, 917, 269, 597, 73, 972, 908, 857, 66, 56, 109, 223, 25, 483, 151, 865, 769, 333, 41, 962, 81, 746, 985, 171, 425, 29, 443, 147, 725, 33, 787, 986, 387, 327, 841, 417, 932, 607, 747, 40, 296, 356, 81, 664, 464, 717, 850, 607, 84, 513, 145, 855, 385, 977, 676, 550, 319, 894, 194, 563, 420, 445, 428, 613, 363, 568, 530, 274, 808, 382, 121, 67, 398, 739, 949, 100, 428, 561, 917, 284, 723, 112, 741, 157, 943, 216, 68, 945, 162, 545, 598, 469, 442, 484, 962, 504, 163, 707, 58, 38, 308, 143, 538, 862, 945]
'''
| 621.625
| 4,900
| 0.586869
| 2,036
| 9,946
| 2.866896
| 0.43222
| 0.001371
| 0.001542
| 0.001371
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.733519
| 0.205409
| 9,946
| 15
| 4,901
| 663.066667
| 0.005061
| 0.998894
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
d01f441aaa55e6335b61133b010384c263fbae06
| 29
|
py
|
Python
|
plugins/plugin_ccsds122/__init__.py
|
G-AshwinKumar/experiment-notebook
|
aae1c5fb9ef8f84dce5d75989ed8975797282f37
|
[
"MIT"
] | null | null | null |
plugins/plugin_ccsds122/__init__.py
|
G-AshwinKumar/experiment-notebook
|
aae1c5fb9ef8f84dce5d75989ed8975797282f37
|
[
"MIT"
] | null | null | null |
plugins/plugin_ccsds122/__init__.py
|
G-AshwinKumar/experiment-notebook
|
aae1c5fb9ef8f84dce5d75989ed8975797282f37
|
[
"MIT"
] | null | null | null |
from . import ccsds122_codec
| 14.5
| 28
| 0.827586
| 4
| 29
| 5.75
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.12
| 0.137931
| 29
| 1
| 29
| 29
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
d03ede0419efe4cc4e18fb8f285302ea8d7913c2
| 28
|
py
|
Python
|
test1.py
|
ksinghnote/Dummy_projects1
|
cf1d85e9329a438cc84efd4f5d70d965786832f0
|
[
"Apache-2.0"
] | null | null | null |
test1.py
|
ksinghnote/Dummy_projects1
|
cf1d85e9329a438cc84efd4f5d70d965786832f0
|
[
"Apache-2.0"
] | null | null | null |
test1.py
|
ksinghnote/Dummy_projects1
|
cf1d85e9329a438cc84efd4f5d70d965786832f0
|
[
"Apache-2.0"
] | null | null | null |
print("Welcome to Github")
| 9.333333
| 26
| 0.714286
| 4
| 28
| 5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.142857
| 28
| 2
| 27
| 14
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0.62963
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
d0a066487594fbdceea30b978b4a3e34d8050e26
| 552
|
py
|
Python
|
profit/dataset/parsers/__init__.py
|
ayushkarnawat/profit
|
f3c4d601078b52513af6832c3faf75ddafc59ac5
|
[
"MIT"
] | null | null | null |
profit/dataset/parsers/__init__.py
|
ayushkarnawat/profit
|
f3c4d601078b52513af6832c3faf75ddafc59ac5
|
[
"MIT"
] | 1
|
2021-09-15T13:13:12.000Z
|
2021-09-15T13:13:12.000Z
|
profit/dataset/parsers/__init__.py
|
ayushkarnawat/profit
|
f3c4d601078b52513af6832c3faf75ddafc59ac5
|
[
"MIT"
] | null | null | null |
from profit.dataset.parsers import base_parser
from profit.dataset.parsers import csv_parser
from profit.dataset.parsers import data_frame_parser
from profit.dataset.parsers import json_parser
from profit.dataset.parsers import sdf_parser
from profit.dataset.parsers.base_parser import BaseFileParser
from profit.dataset.parsers.csv_parser import CSVFileParser
from profit.dataset.parsers.data_frame_parser import DataFrameParser
from profit.dataset.parsers.json_parser import JSONFileParser
from profit.dataset.parsers.sdf_parser import SDFFileParser
| 50.181818
| 68
| 0.882246
| 77
| 552
| 6.168831
| 0.220779
| 0.210526
| 0.357895
| 0.505263
| 0.429474
| 0.303158
| 0
| 0
| 0
| 0
| 0
| 0
| 0.072464
| 552
| 11
| 69
| 50.181818
| 0.927734
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
d0ca6164f2f5c8577170274ea9b9973f3f543c31
| 25
|
bzl
|
Python
|
container/defs.bzl
|
alexeagle/rules_container
|
885067eee3899ae71f61d99572157af8c09b5c35
|
[
"Apache-2.0"
] | null | null | null |
container/defs.bzl
|
alexeagle/rules_container
|
885067eee3899ae71f61d99572157af8c09b5c35
|
[
"Apache-2.0"
] | null | null | null |
container/defs.bzl
|
alexeagle/rules_container
|
885067eee3899ae71f61d99572157af8c09b5c35
|
[
"Apache-2.0"
] | null | null | null |
"Public API re-exports"
| 8.333333
| 23
| 0.72
| 4
| 25
| 4.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.16
| 25
| 2
| 24
| 12.5
| 0.857143
| 0.84
| 0
| 0
| 0
| 0
| 0.875
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
d0ce9e77306ca1c7f2022857ef4bd1f0c9df4c4d
| 100
|
py
|
Python
|
back/run.py
|
openmindsclub/IP10-website
|
a5e9b155d1c09ba4abcece0ff33c5b7733f4ade5
|
[
"MIT"
] | null | null | null |
back/run.py
|
openmindsclub/IP10-website
|
a5e9b155d1c09ba4abcece0ff33c5b7733f4ade5
|
[
"MIT"
] | null | null | null |
back/run.py
|
openmindsclub/IP10-website
|
a5e9b155d1c09ba4abcece0ff33c5b7733f4ade5
|
[
"MIT"
] | null | null | null |
from my_app import app
from my_app import views
if __name__ == "__main__":
app.run(debug=True)
| 16.666667
| 26
| 0.73
| 17
| 100
| 3.705882
| 0.647059
| 0.190476
| 0.285714
| 0.47619
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.18
| 100
| 5
| 27
| 20
| 0.768293
| 0
| 0
| 0
| 0
| 0
| 0.08
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
56019354e30f50dae107a584b38c2c7f46253b06
| 21
|
py
|
Python
|
__init__.py
|
olgabot/people
|
3ada8bdf9ff8780f2178dbeaf9529cc6b0898b3e
|
[
"MIT"
] | null | null | null |
__init__.py
|
olgabot/people
|
3ada8bdf9ff8780f2178dbeaf9529cc6b0898b3e
|
[
"MIT"
] | null | null | null |
__init__.py
|
olgabot/people
|
3ada8bdf9ff8780f2178dbeaf9529cc6b0898b3e
|
[
"MIT"
] | null | null | null |
from .people import *
| 21
| 21
| 0.761905
| 3
| 21
| 5.333333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.142857
| 21
| 1
| 21
| 21
| 0.888889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
ef42ebab8b5492e68f4acf41445e9693cbdaa63a
| 3,614
|
py
|
Python
|
src/bplot/percentile.py
|
CampbellCrowley/bplot
|
b5e5080cdcdc9c4d3e5114c13702cbb2f49fbb8c
|
[
"BSD-3-Clause"
] | null | null | null |
src/bplot/percentile.py
|
CampbellCrowley/bplot
|
b5e5080cdcdc9c4d3e5114c13702cbb2f49fbb8c
|
[
"BSD-3-Clause"
] | null | null | null |
src/bplot/percentile.py
|
CampbellCrowley/bplot
|
b5e5080cdcdc9c4d3e5114c13702cbb2f49fbb8c
|
[
"BSD-3-Clause"
] | null | null | null |
from bplot.check_data import check_data
from bplot.line import line_h, line_v
from bplot.point import point
import numpy as np
def percentile(
x,
y,
outer=0.8,
inner=0.5,
color="tab:blue",
label="",
style="o",
alpha=1.0,
ax=None,
**kws
):
"""Draw vertical percentile interval.
Parameters
----------
x : scalar
The location along the x-axis at which the interval is placed.
y : {numpy.array, pandas.core.series.Series}
The vector of data for which the `outer` percentile interval is sought.
outer : float, 0.8 by default
The outer interval percentage.
inner : float, 0.5 by default
The inner interval percentage.
color : string, 'tab:blue' by default
The color of the box.
label : string, '' (empty) by default
The label within a potential legend.
style : string, 'o' by default
The shape of the median within the box.
alpha : float, 1.0 by default
The transparency of the color. Values between 0 (transparent) and 1 (opague) are allowed.
ax : matplotlib.pyplot.Axes, None by default
The axis onto which the box is drawn. If left as None,
matplotlib.pyplot.gca() is called to get the current `Axes`.
Returns
-------
out : matplotlib.pyplot.Axes
The `Axes` onto which the box was drawn.
"""
_, y, ax = check_data(None, y, ax)
alpha_l, alpha_lm = (1 - outer) / 2, (1 - inner) / 2
l, lm, m, um, u = alpha_l, alpha_lm, 0.5, 1 - alpha_lm, 1 - alpha_l
q_l, q_lm, q_m, q_um, q_u = np.percentile(y, np.array([l, lm, m, um, u]) * 100)
line_v(x, q_l, q_u, size=2, color=color, alpha=alpha)
line_v(x, q_lm, q_um, size=5, color=color, alpha=alpha)
out = point(x, q_m, size=2, style=style, color=color, label=label, alpha=alpha)
return out
def percentile_h(
x,
y,
outer=0.8,
inner=0.5,
color="tab:blue",
label="",
style="o",
alpha=1,
ax=None,
**kws
):
"""Draw horizontal percentile interval.
Parameters
----------
x : {numpy.array, pandas.core.series.Series}
The vector of data for which the `outer` percentile interval is sought.
y : int
The location along the y-axis at which the interval is placed.
outer : float, 0.8 by default
The outer interval percentage.
inner : float, 0.5 by default
The inner interval percentage.
color : string, 'tab:blue' by default
The color of the box.
label : string, '' (empty) by default
The label within a potential legend.
style : string, 'o' by default
The shape of the median within the box.
alpha : float, 1.0 by default
The transparency of the color. Values between 0 (transparent) and 1 (opague) are allowed.
ax : matplotlib.pyplot.Axes, None by default
The axis onto which the box is drawn. If left as None,
matplotlib.pyplot.gca() is called to get the current `Axes`.
Returns
-------
out : matplotlib.pyplot.Axes
The `Axes` onto which the box was drawn.
"""
x, _, ax = check_data(x, None, ax)
alpha_l, alpha_lm = (1 - outer) / 2, (1 - inner) / 2
l, lm, m, um, u = alpha_l, alpha_lm, 0.5, 1 - alpha_lm, 1 - alpha_l
q_l, q_lm, q_m, q_um, q_u = np.percentile(x, np.array([l, lm, m, um, u]) * 100)
line_h(y, q_l, q_u, size=2, color=color, alpha=alpha)
line_h(y, q_lm, q_um, size=5, color=color, alpha=alpha)
out = point(q_m, y, size=2, style=style, color=color, label=label, alpha=alpha)
return out
| 26.379562
| 98
| 0.612618
| 574
| 3,614
| 3.778746
| 0.175958
| 0.058091
| 0.077455
| 0.027663
| 0.846473
| 0.846473
| 0.846473
| 0.818811
| 0.818811
| 0.799447
| 0
| 0.020921
| 0.272551
| 3,614
| 136
| 99
| 26.573529
| 0.804108
| 0.553957
| 0
| 0.590909
| 0
| 0
| 0.01294
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.045455
| false
| 0
| 0.090909
| 0
| 0.181818
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
ef47b08230e51d172e07768a518d6190936aa383
| 89
|
py
|
Python
|
exercises/slide_104/static-police/staticpolice/analyses/__init__.py
|
Ruide/angr-dev
|
964dc80c758e25c698c2cbcc454ef5954c5fa0a0
|
[
"BSD-2-Clause"
] | null | null | null |
exercises/slide_104/static-police/staticpolice/analyses/__init__.py
|
Ruide/angr-dev
|
964dc80c758e25c698c2cbcc454ef5954c5fa0a0
|
[
"BSD-2-Clause"
] | null | null | null |
exercises/slide_104/static-police/staticpolice/analyses/__init__.py
|
Ruide/angr-dev
|
964dc80c758e25c698c2cbcc454ef5954c5fa0a0
|
[
"BSD-2-Clause"
] | null | null | null |
from .return_values import ReturnValueAnalysis, UnknownReturnValue, ConstantReturnValue
| 29.666667
| 87
| 0.88764
| 7
| 89
| 11.142857
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.078652
| 89
| 2
| 88
| 44.5
| 0.95122
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
324d34a67debcfd9551b2d82702502b2d2c39ea0
| 156
|
py
|
Python
|
odarchive/__init__.py
|
drummonds/odarchive
|
59cd0caa7bd8906bd411f1354461ebd3ad03898e
|
[
"MIT"
] | null | null | null |
odarchive/__init__.py
|
drummonds/odarchive
|
59cd0caa7bd8906bd411f1354461ebd3ad03898e
|
[
"MIT"
] | 4
|
2020-03-24T16:27:34.000Z
|
2021-06-01T23:16:44.000Z
|
odarchive/__init__.py
|
drummonds/odarchive
|
59cd0caa7bd8906bd411f1354461ebd3ad03898e
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""Top-level package for odarchive."""
from ._version import *
from .archive import *
from .disc_info import *
from .cli import *
| 17.333333
| 38
| 0.666667
| 21
| 156
| 4.857143
| 0.714286
| 0.294118
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007752
| 0.173077
| 156
| 8
| 39
| 19.5
| 0.782946
| 0.352564
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
08675b7dfda45ed30d54803f2daee9264d0c79bd
| 21
|
py
|
Python
|
example_project/some_modules/third_modules/a153.py
|
Yuriy-Leonov/cython_imports_limit_issue
|
2f9e7c02798fb52185dabfe6ce3811c439ca2839
|
[
"MIT"
] | null | null | null |
example_project/some_modules/third_modules/a153.py
|
Yuriy-Leonov/cython_imports_limit_issue
|
2f9e7c02798fb52185dabfe6ce3811c439ca2839
|
[
"MIT"
] | null | null | null |
example_project/some_modules/third_modules/a153.py
|
Yuriy-Leonov/cython_imports_limit_issue
|
2f9e7c02798fb52185dabfe6ce3811c439ca2839
|
[
"MIT"
] | null | null | null |
class A153:
pass
| 7
| 11
| 0.619048
| 3
| 21
| 4.333333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.214286
| 0.333333
| 21
| 2
| 12
| 10.5
| 0.714286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.5
| 0
| 0
| 0.5
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 6
|
08bebbb4a5002918e8a9c1a65cde3b0655353a49
| 53
|
py
|
Python
|
vyper/codegen/function_definitions/__init__.py
|
onlymaresia/vyper
|
e46466aae2c8cc124fdb403a768551fe4a05bb4b
|
[
"Apache-2.0"
] | 1,471
|
2017-12-25T05:47:57.000Z
|
2019-11-19T07:47:53.000Z
|
vyper/codegen/function_definitions/__init__.py
|
onlymaresia/vyper
|
e46466aae2c8cc124fdb403a768551fe4a05bb4b
|
[
"Apache-2.0"
] | 895
|
2017-12-25T08:18:23.000Z
|
2019-11-20T06:29:03.000Z
|
vyper/codegen/function_definitions/__init__.py
|
onlymaresia/vyper
|
e46466aae2c8cc124fdb403a768551fe4a05bb4b
|
[
"Apache-2.0"
] | 321
|
2017-12-25T16:37:21.000Z
|
2019-11-15T17:44:06.000Z
|
from .common import generate_ir_for_function # noqa
| 26.5
| 52
| 0.830189
| 8
| 53
| 5.125
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.132075
| 53
| 1
| 53
| 53
| 0.891304
| 0.075472
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
08c60821777883bf7f8d6ae21de5d6862e9fe9fc
| 16,359
|
py
|
Python
|
synestia-book/_build/jupyter_execute/docs/01What_Are_Synestias.py
|
ststewart/synestiabook2
|
9c530cb7ed5a33c82bccccf828bb8969f9609b8b
|
[
"MIT"
] | null | null | null |
synestia-book/_build/jupyter_execute/docs/01What_Are_Synestias.py
|
ststewart/synestiabook2
|
9c530cb7ed5a33c82bccccf828bb8969f9609b8b
|
[
"MIT"
] | null | null | null |
synestia-book/_build/jupyter_execute/docs/01What_Are_Synestias.py
|
ststewart/synestiabook2
|
9c530cb7ed5a33c82bccccf828bb8969f9609b8b
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# coding: utf-8
# # What Are Synestias?
#
# ## How Were Synestias Discovered?
#
# Synestias are a new type of planetary structure (think planets, moons, and planetary disks) discovered by [(Lock & Stewart, 2017)](https://agupubs.onlinelibrary.wiley.com/doi/full/10.1002/2016JE005239). Lock and Stewart were searching for an alternative hypothesis that could account for some of the loose ends of the prevailing Moon-formation model. There is strong evidence that a giant impact formed the Moon ([Canup, 2019](https://astronomy.com/news/2019/05/giant-impact-hypothesis-an-evolving-legacy-of-apollo); [Stevenson & Halliday, 2014](https://royalsocietypublishing.org/doi/full/10.1098/rsta.2014.0289)). However, the specifics of the Moon-forming giant impact have been hotly debated within the lunar science community.
#
# Since the 1970's, the main hypothesis for the formation of the Moon (now known as the <i>canonical model</i>) has been that a Mars-sized object slowly glanced a young Earth ([Canup & Asphaug, 2001](https://www.nature.com/articles/35089010)). The canonical model is the <i>giant impact hypothesis</i> (Hartmann & Davis, 1974; [Hartmann & Davis, 1975](https://courses.seas.harvard.edu/climate/eli/Courses/EPS281r/Sources/Origin-of-the-Moon/more/Hartmann-Davis-1975.pdf); [Cameron & Ward, 1976](http://adsabs.harvard.edu/full/1976LPI.....7..120C)) that most people outside lunar research circles are most familiar with. However, perhaps unbeknownst to the public, the lunar community has extensively debated the validity of the canonical model. It is common for scientists to investigate all aspects of popular hypotheses. The great debate within the lunar community stems from the inadequacy of the canonical model to explain why the Moon and Earth have similar chemistry and isotopes. Predictions from numerical simulations of the Moon-forming giant impact based on the canonical model does not seem to agree with lunar geochemical data.
#
# The giant impact simulations for the canonical model assume that the angular momentum of the Earth-Moon system has not changed throughout the life of our solar system. As a consequence,
# 1. the possible initial conditions (and type) of the giant impact that formed the Moon is narrow in range ([Canup, 2004](https://www.sciencedirect.com/science/article/abs/pii/S0019103503002999); [Canup, 2008a](https://royalsocietypublishing.org/doi/full/10.1098/rsta.2008.0101?casa_token=jM1JNhwK2aoAAAAA%3AjvTFUaNZZrwa_mRcj3wE066mtfSh4skoauLgGFR7YLvJi4t8O8b3iIQIv6q0Pxx6wiq65Db9PPx2B5s)),
# 2. the giant impact does not have enough rotational energy to homogenize material between the proto-Earth and impactor to the extent observed in the isotopically similar Earth-Moon system ([Melosh, 2014](https://royalsocietypublishing.org/doi/full/10.1098/rsta.2013.0168)), and
# 3. giant impact simulations of the canonical model struggle to produce a lunar-mass moon in the disk ([Salmon & Canup, 2012](https://iopscience.iop.org/article/10.1088/0004-637X/760/1/83/meta); [Salmon & Canup, 2014](https://royalsocietypublishing.org/doi/full/10.1098/rsta.2013.0256); [Charnoz & Michaut, 2015](https://www.sciencedirect.com/science/article/abs/pii/S0019103515003097); [Lock et al., 2018](https://agupubs.onlinelibrary.wiley.com/doi/full/10.1002/2017JE005333)).
#
# Conservation of angular momentum requires that the angular momentum of the Earth-Moon system cannot change unless acted upon by a force external to the system. However, there is not a definitive reason as to why external forces, such as those that arise from tidal interactions with the sun, would not be expected to act on the Earth-Moon system and decrease the angular momentum of the Earth-Moon system over time. If the Earth-Moon system had a greater-than-present-day angular momentum at the time of the giant impact, then it would be easier for the impacting materials to mix and reproduce geochemical observations.
#
# It took a while for the science of giant impacts and orbital dynamics to catch up with the canonical model. [Ćuk & Stewart, 2012](https://science.sciencemag.org/content/338/6110/1047.abstract?casa_token=fuxhIqblxBMAAAAA:5c4Rh-dF3eZ46Pbe4hmNX5YqM7oo8sa_T3PqPRQsFgiWSnlZ0IZHMeZtTGPIAp4fa-MCRPJkQY9MwkQ) were able to provide a mechanism for decreasing the angular momentum of the Earth-Moon system over time, and ([Ćuk & Stewart, 2012](https://science.sciencemag.org/content/338/6110/1047.abstract?casa_token=fuxhIqblxBMAAAAA:5c4Rh-dF3eZ46Pbe4hmNX5YqM7oo8sa_T3PqPRQsFgiWSnlZ0IZHMeZtTGPIAp4fa-MCRPJkQY9MwkQ)) and ([Canup, 2012](https://science.sciencemag.org/content/338/6110/1052.abstract?casa_token=gohyLkSGfaEAAAAA:KSj2bUWUDNNqCytTXbreHIrDIL5otuwq0S6Q_uyNAGSD9cJHE2ogf4P3HcdJ3in75AsQ3Fx5-6zDlEA)) simulated higher-energy, higher-angular-momentum giant impacts with a better understanding of the behavior of rocky materials under extreme pressure and temperature conditions.
#
# As Lock and Stewart studied a range of initial conditions for their database of higher-energy, higher-angular-momentum Moon-forming giant impacts, they noticed that some of the impacts resulted in a structure that was not a typical moon-forming disk. They had a flared shape similar to that of traditional disks (see image below) but did not have the same dynamics.
# 
#
# <i>Caption</i>. An example of a flared (disk-like) shape with a quarter cut-out (yellow reveals interior). Credit: G. O. Hollyday.
# These new structures were continuous bodies (as opposed to a distinct planet with a disk that separately orbits the planet). They had rapidly rotating centers, emplacing more mass in their moon-forming regions, attached to a gaseous <i>disk-like</i> (flared) structure, resulting in higher pressures (thus higher chemical equilibrium) in their moon-forming regions. Lock and Stewart called these structures synestias, after "syn-" meaning together and the goddess Hestia, who rules over architecture.
# ## What Does a Synestia Look Like?
# 
# <i>Caption</i>. Artistic rendering of a synestia (left) with fully formed moon outside the synestia (right). Credit: G. O. Hollyday.
# Synestias have a physical shape resembling that of a giant doughnut (about 8,000 times as wide as Earth is now) with no hole at the center. The image above is a conceptual rendering of a synestia. It is based on researchers' best knowledge of what a synestia might look like based on fluid-particle simulations of giant impacts (a synestia has not been observed in space yet). The easiest way to form a synestia is through a <i>giant impact</i>: a collision between a planet-sized body with enough energy to liquefy and vaporize a sizeable proportion of the impacting body. The colliding bodies will quickly (on the order of an Earth day or two) form one continuous swirling mass of molten and vaporous rock - a synestia. Most of the interior of a synestia is made of turbulent rock vapor, which is opaque to visible light (e.g. we can't see through a synestia) and gives a synestia its flared shape. The surface of a synestia is essentially a cloud cover; it's comprised of many molten rock droplets that have condensed from gas after radiating their heat into space. These condensates are very hot (2300 K) so the surface of a synestia glows like magma does on Earth. The condensates fall towards the interior of the synestia and collect together into moonlets which ultimately combine into a moon. Thus, the wholly-grown moon forms within a synestia.
#
# In the artistic rendering above, you'll notice there is a moon (small glowing sphere to the right) orbiting the synestia. This moon forms within the synestia but the synestia has cooled and contracted to the point that it has retracted from the moon. The fully-formed moon is now physically separate from the synestia.
#
# At the surface of a synestia, rock vapor is free to glow and radiate its heat away into cold space (in other words, the rock vapor cools) until it condenses into droplets. The surrounding gas cannot support the droplets, so they rain towards the midplane. Small moonlets coalesce from this rock rain over time, and eventually accrete into one large moon. Since the Moon is essentially a large, hot, liquid, spherical rock at this point, it too will glow (like lava) as heat radiates from its surface, cooling the interior. The Moon appears darker than the synestia in the image above because the Moon's surface cools more quickly in the absence of gas.
# ## How Does a Synestia Become the Earth We Know Today?
#
# What is so interesting about the origin of the Moon is that the Moon-forming giant impact shapes both the Moon and Earth. If the giant impact produces a synestia, then the Moon forms from within a synestia. This means <b>the Moon forms within Earth, because Earth would have been the synestia</b>. See the video below for conceptual animation of the formation of a synestia via a giant impact.
# In[1]:
from IPython.display import YouTubeVideo
YouTubeVideo('7e_6oyROHCU', width=640, height=360)
# <i>Caption</i>. Time evolution: from giant impact to synestia. Video zooms out. A small body quickly initiates a giant impact with a rapidly rotating early Earth. This is one type of impact that forms an Earth-mass synestia. The impacting bodies continue to collide. High pressures and temperatures generated from shocks in the giant impact vaporize and melt material. The system settles and thermally equilibrates into a synestia. The dark orange flared shape represents the vapor-dominated disk-like region. The golden ellipsoid is the liquid-dominated planet-like region. Credit: Sarah T. Stewart, U. of California, Davis ([Stewart et al., 2019]()). Visualization by Advanced Visualization Lab, National Center for Supercomputing Applications, U. of Illinois. Funded in part by the National Science Foundation as part of the CADENS project.
# For comparison, the video below (with audio) shows the formation of a planet-disk system via a giant impact.
# In[2]:
from IPython.display import Video
#video sourced from https://mediaspace.illinois.edu/media/t/1_f9bmmfsu
Video("https://cdnapisec.kaltura.com/p/1329972/sp/132997200/playManifest/entryId/1_f9bmmfsu/flavorId/1_pssa3j97/format/url/protocol/http/a.mp4", width=770, height=467)
# <i>Caption</i>. Time evolution: from canonical giant impact to planet-disk system. Video zooms out. A Mars-size body "Theia" quickly initiates a grazing giant impact with early Earth. This is the canonical model. The impacting bodies continue to collide. Thermal energy from the giant impact melts material, while rotational energy spins the system. The system settles into distinct planet and disk components. Liquid moonlets (orange clumps) and lack of vapor distinguish the disk from the liquid-dominated planet (golden sphere). A moon is visible at the top. Credit: Robin M. Canup, Southwest Research Institute ([Canup et al., 2018]()). Visualization by Advanced Visualization Lab, National Center for Supercomputing Applications, U. of Illinois. Funded in part by the National Science Foundation as part of the CADENS project.
# The conceptual art of a synestia in the section above ("What Does a Synestia Look Like?") shows the final step of moon formation in a synestia not seen in the video above of the formation of a synestia titled "Making a Synestia". The conceptual image reveals what a synestia looks like a couple of days after the giant impact that formed it. A synestia is not a static body. Unlike a planet, a synestia will drastically evolve with time. This transition is quick relative to geological timescales; it can be on the order of 10's of years.
#
# A synestia's shape will remain flared for some time. The <i>photosphere</i> of a synestia, the optically thin layer enveloping a synestia, is in contact with the cold (200 K) vacuum of space. The photosphere is the surface where vapor saturates and is able to condense. Think of it as a synestia's cloud layer. The photosphere will radiate away heat, causing the outer layers of vapor to condense into rock rain. As a synestia continues to shrink and condense, its outer edges recede with time. Eventually, a synestia will cool and shrink to a more spherical shape and transition into a rapidly rotating, molten planet. Since the planet is rapidly rotating, it will have a bulge around its equator, taking on an ellipsoidal shape that is described as <i>oblate</i>.
# ## References
#
# Cameron, A. G. W., & Ward, W. (1976). The origin of the Moon. In <i>Proc. 7th Lunar Science Conference</i>. Lunar and Planetary Institute.
#
# Canup, R. M. (2004). Simulations of a late lunar-forming impact. <i>Icarus</i>, 168 (2), 433-456.
#
# Canup, R. M. (2008a). Accretion of the Earth. <i>Philosophical Transactions of the Royal Society. Series A: Mathematical, Physical, and Engineering Sciences</i>, 366 (1883), 4061-4075.
#
# Canup, R. M. (2012). Forming a Moon with an Earth-like Composition via a Giant Impact. <i>Science (American Association for the Advancement of Science)</i>, 338 (6110), 1052-1055.
#
# Canup, R. M. (2019). Giant Impact Hypothesis: An evolving legacy of Apollo. Retrieved from https://astronomy.com/news/2019/05/giant-impact-hypothesis-an-evolving-legacy-of-apollo (Astronomy)
#
# Canup, R. M., & Asphaug, E. (2001). Origin of the Moon in a giant impact near the end of the Earth's formation. <i>Nature</i>, 412 , 708-712.
#
# Canup, R. M., Cox, D., Patterson, R., Levy, S., Borkiewicz, K., & Christensen, A. J. (2018). <i>Birth of Planet Earth: Collision that formed the Moon</i>. Southwest Research Institute and University of Illinois at Urbana-Champaign, National Center for Supercomputing Applications, Advanced Simulation Lab. Funded in part by the National Science Foundation as part of the CADENS project. Retrieved from https://mediaspace.illinois.edu/media/t/1_f9bmmfsu (Media Space Illinois)
#
# Charnoz, S., & Michaut, C. (2015). Evolution of the protolunar disk: Dynamics, cooling timescale and implantation of volatiles onto the Earth. <i>Icarus</i>, 260 , 440-463.
#
# Ćuk, M., & Stewart, S. T. (2012). Making the Moon from a fast-spinning Earth: A giant impact followed by resonant despinning. <i>Science (American Association for the Advancement of Science)</i>, 338 (6110), 1047-1052.
#
# Hartmann, W. K., & Davis, D. R. (1974). Satellite-sized planetesimals. In J. A. Burns (Ed.), <i>Proc. International Astronomical Union Colloquium No. 28: Planetary Satellites</i>. Arizona University Press.
#
# Hartmann, W. K., & Davis, D. R. (1975). Satellite-sized planetesimals and lunar origin. <i>Icarus</i>, 24 , 504{515.
#
# Lock, S. J., & Stewart, S. T. (2017). The structure of terrestrial bodies: Impact heating, corotation limits, and synestias. <i>Journal of Geophysical Research: Planets (American Geophysical Union)</i>, 122 (5), 950-982.
#
# Lock, S. J., Stewart, S. T., Petaev, M. I., Leinhardt, Z. M., Mace, M. T., Jacobsen, S. B., & Ćuk, M. (2018). The origin of the Moon within a terrestrial synestia. <i>Journal of Geophysical Research: Planets (American Geophysical Union)</i>, 123 (4), 910-951.
#
# Melosh, H. J. (2014). New approaches to the Moon's isotopic crisis. <i>Philosophical Transactions of the Royal Society. Series A: Mathematical, Physical, and Engineering Sciences</i>, 372 (20130168), 1-12.
#
# Salmon, J., & Canup, R. M. (2012). Lunar accretion from a Roche-interior fluid disk. <i>The Astrophysical Journal</i>, 760 (83), 1-18.
#
# Salmon, J., & Canup, R. M. (2014). Accretion of the Moon from non-canonical discs. <i>Philosophical Transactions of the Royal Society. Series A: Mathematical, Physical, and Engineering Sciences</i>, 372 (20130256), 1-14.
#
# Stevenson, D. J., & Halliday, A. N. (2014). The origin of the Moon. <i>Philosophical Transactions of the Royal Society. Series A: Mathematical, Physical, and Engineering Sciences</i>, 372 (20140289), 1-3.
#
# Stewart, S. T., SubbaRao, M., Cox, D., Patterson, R., Levy, S., Christensen, A. J., & Borkiewicz, K. (2019). <i>Making a Synestia</i>. University of Illinois at Urbana-Champaign, National Center for Supercomputing Applications, Advanced Simulation Lab. Funded in part by the National Science Foundation. Retrieved from https://www.youtube.com/watch?v=7e_6oyROHCU (YouTube)
| 151.472222
| 1,355
| 0.776148
| 2,566
| 16,359
| 4.942712
| 0.303196
| 0.014981
| 0.007096
| 0.007096
| 0.256722
| 0.224789
| 0.197982
| 0.194828
| 0.177245
| 0.152251
| 0
| 0.045267
| 0.14249
| 16,359
| 107
| 1,356
| 152.88785
| 0.858854
| 0.97023
| 0
| 0
| 0
| 0.25
| 0.367758
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
3ee91757f00706a4a1f0f73e1e75b73773c54b46
| 92
|
py
|
Python
|
src/util/__init__.py
|
kirill-kundik/CinemaChallengeBackend
|
aea4ac801a9a5c907f36f07b67df162b4bd85044
|
[
"MIT"
] | null | null | null |
src/util/__init__.py
|
kirill-kundik/CinemaChallengeBackend
|
aea4ac801a9a5c907f36f07b67df162b4bd85044
|
[
"MIT"
] | null | null | null |
src/util/__init__.py
|
kirill-kundik/CinemaChallengeBackend
|
aea4ac801a9a5c907f36f07b67df162b4bd85044
|
[
"MIT"
] | null | null | null |
from .parse_params import parse_params
from .responses import render_error, render_resource
| 30.666667
| 52
| 0.869565
| 13
| 92
| 5.846154
| 0.615385
| 0.289474
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.097826
| 92
| 2
| 53
| 46
| 0.915663
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
4118244bf55a171804bee7926bc15d5d41432c57
| 72
|
py
|
Python
|
py_tdlib/constructors/get_recent_inline_bots.py
|
Mr-TelegramBot/python-tdlib
|
2e2d21a742ebcd439971a32357f2d0abd0ce61eb
|
[
"MIT"
] | 24
|
2018-10-05T13:04:30.000Z
|
2020-05-12T08:45:34.000Z
|
py_tdlib/constructors/get_recent_inline_bots.py
|
MrMahdi313/python-tdlib
|
2e2d21a742ebcd439971a32357f2d0abd0ce61eb
|
[
"MIT"
] | 3
|
2019-06-26T07:20:20.000Z
|
2021-05-24T13:06:56.000Z
|
py_tdlib/constructors/get_recent_inline_bots.py
|
MrMahdi313/python-tdlib
|
2e2d21a742ebcd439971a32357f2d0abd0ce61eb
|
[
"MIT"
] | 5
|
2018-10-05T14:29:28.000Z
|
2020-08-11T15:04:10.000Z
|
from ..factory import Method
class getRecentInlineBots(Method):
pass
| 12
| 34
| 0.791667
| 8
| 72
| 7.125
| 0.875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.138889
| 72
| 5
| 35
| 14.4
| 0.919355
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
f5e79cfd3a6cf6f61af1b24a2cb329d23d854474
| 46,331
|
py
|
Python
|
scripts/winddata.py
|
glasscathedrals/ondisapy
|
f34dd2e3f424d71efa1f285272793e78eaf0be96
|
[
"MIT"
] | null | null | null |
scripts/winddata.py
|
glasscathedrals/ondisapy
|
f34dd2e3f424d71efa1f285272793e78eaf0be96
|
[
"MIT"
] | null | null | null |
scripts/winddata.py
|
glasscathedrals/ondisapy
|
f34dd2e3f424d71efa1f285272793e78eaf0be96
|
[
"MIT"
] | null | null | null |
import datetime
import os
import sys
import tkinter as tk
import warnings
from tkinter import filedialog, messagebox
import ipywidgets as widgets
import matplotlib.cm as cm
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from ipywidgets import Button, HBox, Layout, VBox
sys.path.insert(0, os.path.join(os.path.dirname(os.getcwd()), 'scripts'))
from windroses import *
warnings.simplefilter("ignore")
class WidgetsMain(object):
def __init__(self):
self.path = os.path.dirname(os.getcwd())
def display(self):
create_project_button = widgets.Button(description='Criar projeto', tooltip='Cria um novo projeto', layout=Layout(
width='30%'), style={'description_width': 'initial'})
create_project_button.on_click(self.create_project_button_click)
load_project_button = widgets.Button(description='Importar projeto', tooltip='Importa o .csv de um projeto criado', layout=Layout(
width='30%'), style={'description_width': 'initial'})
load_project_button.on_click(self.load_project_button_click)
project_accordion = widgets.Accordion(
children=[create_project_button, load_project_button])
project_accordion.set_title(0, 'Criar projeto')
project_accordion.set_title(1, 'Importar projeto')
tab_contents = ['Projetos']
tab_children = [project_accordion]
tab = widgets.Tab()
tab.children = tab_children
for i in range(len(tab_children)):
tab.set_title(i, tab_contents[i])
return tab
def create_project_button_click(self, b):
self.project_dirs = self.create_project()
return self.project_dirs
def create_project(self):
if not os.path.exists(os.path.join(self.path, 'proj')):
os.makedirs(os.path.join(self.path, 'proj'))
sys._enablelegacywindowsfsencoding()
root = tk.Tk()
root.call('wm', 'attributes', '.', '-topmost', '1')
root.withdraw()
root.iconbitmap(os.path.join(self.path, 'logo.ico'))
root.update_idletasks()
create_project_asksaveasfilename_dir = filedialog.asksaveasfilename(initialdir=os.path.join(
self.path, 'proj'), title="Insira o nome desejado para seu projeto:", filetypes=[("Nome do projeto", ".")])
if create_project_asksaveasfilename_dir == '':
messagebox.showwarning("ondisapy", "Nenhum projeto criado.")
return None
else:
if not os.path.exists(create_project_asksaveasfilename_dir):
os.makedirs(create_project_asksaveasfilename_dir)
project_data_dir = (os.path.join(
create_project_asksaveasfilename_dir, 'data').replace('\\', '/'))
project_waves_dir = (os.path.join(
project_data_dir, 'wind_waves').replace('\\', '/'))
project_winds_dir = (os.path.join(
project_data_dir, 'wind_data').replace('\\', '/'))
project_wind_fetchs_dir = (os.path.join(
project_data_dir, 'wind_fetchs').replace('\\', '/'))
project_img_dir = (os.path.join(
create_project_asksaveasfilename_dir, 'img').replace('\\', '/'))
project_grid_dir = (os.path.join(
create_project_asksaveasfilename_dir, 'grid').replace('\\', '/'))
project_dirs_list = [project_data_dir, project_waves_dir, project_winds_dir,
project_wind_fetchs_dir, project_img_dir, project_grid_dir]
print("Diretórios de projeto criados:")
for i in project_dirs_list:
try:
os.makedirs(i)
print("%s" % i)
except OSError as Error:
if os.path.exists(i):
print("%s já existe." % i)
project_file_dir = (os.path.join(
create_project_asksaveasfilename_dir, 'dir.csv').replace('\\', '/'))
if not os.path.exists(project_file_dir):
project_name = os.path.basename(
create_project_asksaveasfilename_dir)
project_dirs_list.append(project_name)
project_dirs_dataframe = pd.DataFrame(
data={"dir": project_dirs_list})
project_dirs_dataframe.to_csv(
project_file_dir, sep='\t', index=False, header=True, encoding='utf-8')
messagebox.showinfo(
"ondisapy", "Projeto criado com sucesso:\n%s" % project_file_dir)
print("\nProjeto criado:\n%s\n" % project_file_dir)
return project_dirs_dataframe
else:
print("%s já existe.\n" % project_file_dir)
print("\n")
def load_project_button_click(self, b):
self.project_dirs = self.load_project()
return self.project_dirs
def load_project(self):
sys._enablelegacywindowsfsencoding()
root = tk.Tk()
root.call('wm', 'attributes', '.', '-topmost', '1')
root.withdraw()
root.iconbitmap(os.path.join(self.path, 'logo.ico'))
root.update_idletasks()
load_project_askopenfilename_dir = filedialog.askopenfilename(initialdir=os.path.join(
self.path, 'proj'), title="Confirme o diretório de importação do arquivo '.csv' do seu projeto:", filetypes=[(".csv", "*.csv")])
if load_project_askopenfilename_dir == '':
messagebox.showwarning("ondisapy", "Nenhum projeto importado.")
return None
else:
if not ('dir.csv') in str(load_project_askopenfilename_dir):
messagebox.showwarning(
"ondisapy", "Erro: arquivo inválido.\nO arquivo realmente é um .csv de projeto criado?")
return None
else:
project_dirs_dataframe = pd.read_csv(
load_project_askopenfilename_dir, sep='\t', engine='python', header=0, encoding='utf-8')
messagebox.showinfo(
"ondisapy", "Projeto importado com sucesso:\n%s" % load_project_askopenfilename_dir)
print("Projeto importado:\n%s\n" %
load_project_askopenfilename_dir)
return (project_dirs_dataframe)
class WidgetsWindData(object):
def __init__(self):
self.path = os.path.dirname(os.getcwd())
def display(self):
load_csat3_wind_data_button = widgets.Button(description='Importar modelo de dados de ventos CSAT3',
tooltip='Importa um modelo de dados de ventos CSAT3 para leitura', layout=Layout(width='30%'), style={'description_width': 'initial'})
load_csat3_wind_data_button.on_click(
self.load_csat3_wind_data_button_click)
load_windsonic_wind_data_button = widgets.Button(description='Importar modelo de dados de ventos Windsonic',
tooltip='Importa um modelo de dados de ventos Windsonic para leitura', layout=Layout(width='30%'), style={'description_width': 'initial'})
load_windsonic_wind_data_button.on_click(
self.load_windsonic_wind_data_button_click)
self.height_adjustment_checkbox = widgets.Checkbox(
description='Ajustar alturas (Soma vetorial)', value=False, layout=Layout(width='30%'), style={'description_width': 'initial'})
self.rl_checkbox = widgets.Checkbox(description='Utilizar RL', value=False, layout=Layout(
width='30%'), style={'description_width': 'initial'})
self.rt_checkbox = widgets.Checkbox(description='Utilizar RT', value=False, layout=Layout(
width='30%'), style={'description_width': 'initial'})
self.uz_checkbox = widgets.Checkbox(description='Utilizar U(z) (CSAT3)', value=False, layout=Layout(
width='30%'), style={'description_width': 'initial'})
self.bins_int_text = widgets.IntText(description='Intervalos:', value=10, layout=Layout(
width='30%'), style={'description_width': 'initial'})
self.step_int_text = widgets.IntText(description='Redutor:', value=1, layout=Layout(
width='30%'), style={'description_width': 'initial'})
self.speed_unit_text = widgets.Text(
description='Unidade (m/s):', value='m/s', layout=Layout(width='30%'), style={'description_width': 'initial'})
self.windrose_percentage_angle_float_text = widgets.FloatText(
description='Ângulo (°):', value=33.75, layout=Layout(width='30%'), style={'description_width': 'initial'})
wind_data_accordion = widgets.Accordion(
children=[load_csat3_wind_data_button, load_windsonic_wind_data_button])
wind_data_accordion.set_title(
0, 'Importar modelo de dados de ventos CSAT3')
wind_data_accordion.set_title(
1, 'Importar modelo dados de ventos Windsonic')
wind_adjustments_vbox = widgets.VBox(
[self.height_adjustment_checkbox, self.rl_checkbox, self.rt_checkbox, self.uz_checkbox])
wind_adjustments_accordion = widgets.Accordion(
children=[wind_adjustments_vbox])
wind_adjustments_accordion.set_title(
0, 'Ajustes a serem incluídos nos cálculos de velocidades processadas')
other_adjustments_accordion = widgets.Accordion(
children=[self.windrose_percentage_angle_float_text, self.bins_int_text, self.step_int_text, self.speed_unit_text])
other_adjustments_accordion.set_title(
0, 'Ângulo para a rosa dos ventos')
other_adjustments_accordion.set_title(1, 'Intervalos')
other_adjustments_accordion.set_title(2, 'Amostragem de dados')
other_adjustments_accordion.set_title(3, 'Unidade de velocidade')
tab_contents = ['Dados de Ventos',
'Ajustes de Cálculo', 'Outros Ajustes']
tab_children = [wind_data_accordion,
wind_adjustments_accordion, other_adjustments_accordion]
tab = widgets.Tab()
tab.children = tab_children
for i in range(len(tab_children)):
tab.set_title(i, tab_contents[i])
display(tab)
def load_csat3_wind_data_button_click(self, b):
self.csat3_wind_data = self.load_csat3_wind_data()
def load_csat3_wind_data(self):
sys._enablelegacywindowsfsencoding()
root = tk.Tk()
root.call('wm', 'attributes', '.', '-topmost', '1')
root.withdraw()
root.iconbitmap(os.path.join(self.path, 'logo.ico'))
root.update_idletasks()
load_csat3_askopenfilename_dir = filedialog.askopenfilename(
initialdir=self.path, title="Confirme o diretório de importação do arquivo '.csv' do seu modelo de dados de ventos CSAT3:", filetypes=[(".csv", "*.csv")])
if load_csat3_askopenfilename_dir == '':
messagebox.showwarning(
"ondisapy", "Nenhum modelo de dados de ventos CSAT3 importado.")
return None
else:
csat3_dataframe = pd.read_csv(
load_csat3_askopenfilename_dir, sep=';', engine='python', encoding='utf-8', decimal=',')
messagebox.showinfo(
"ondisapy", "Modelo de dados de ventos CSAT3 importado com sucesso:\n%s" % load_csat3_askopenfilename_dir)
print("Modelo de dados de ventos CSAT3 importado:\n%s\n" %
load_csat3_askopenfilename_dir)
return csat3_dataframe
def csat3_wind_data_dataframe(self, csat3_dataframe, project_dirs):
self.csat3_dataframe = csat3_dataframe.copy()
self.project_dirs = project_dirs
if len(self.csat3_dataframe.filter(regex='Unnamed').columns) != 0:
self.csat3_dataframe = self.csat3_dataframe[self.csat3_dataframe.columns.drop(
list(self.csat3_dataframe.filter(regex='Unnamed')))]
if False in self.csat3_dataframe.columns.isin(['TimeStamp', 'Ux', 'Uy', 'Uz', 'Ts', 'batt_volt', 'panel_temp', 'wnd_dir_csat3', 'wnd_dir_compass', 'height_measurement', 'RL', 'RT']):
messagebox.showwarning(
"ondisapy", "Modelo de dados de ventos CSAT3 com colunas nomeadas de forma diferente do modelo fornecido para uso.\nVerifique se seu arquivo .csv é proveniente do modelo correto para prosseguir com as análises.")
return None
else:
self.csat3_dataframe[['Ux', 'Uy', 'Uz', 'Ts', 'batt_volt', 'panel_temp', 'wnd_dir_csat3', 'wnd_dir_compass', 'height_measurement', 'RL', 'RT']] = self.csat3_dataframe[[
'Ux', 'Uy', 'Uz', 'Ts', 'batt_volt', 'panel_temp', 'wnd_dir_csat3', 'wnd_dir_compass', 'height_measurement', 'RL', 'RT']].astype('float64')
csat3_dataframe_len = len(self.csat3_dataframe)
self.csat3_dataframe = self.csat3_dataframe.dropna(
subset=['TimeStamp', 'Ux', 'Uy', 'Uz', 'Ts', 'batt_volt', 'panel_temp', 'wnd_dir_csat3', 'wnd_dir_compass', 'height_measurement'])
self.csat3_dataframe = self.csat3_dataframe.fillna(value='')
csat3_dataframe_drop_na_len = len(self.csat3_dataframe)
if self.uz_checkbox.value == False:
if self.height_adjustment_checkbox.value == True:
processed_wind_speeds_list = [((((float(self.csat3_dataframe['Ux'][i]))**2)+((float(self.csat3_dataframe['Uy'][i]))**2))**(
0.5))*((10/self.csat3_dataframe['height_measurement'][i])**(1/7)) for i in self.csat3_dataframe.index]
else:
processed_wind_speeds_list = [((((float(self.csat3_dataframe['Ux'][i]))**2)+(
(float(self.csat3_dataframe['Uy'][i]))**2))**(0.5)) for i in self.csat3_dataframe.index]
if self.rl_checkbox.value == True:
processed_wind_speeds_list = [
i*self.csat3_dataframe['RL'][0] for i in processed_wind_speeds_list]
if self.rt_checkbox.value == True:
processed_wind_speeds_list = [
i*self.csat3_dataframe['RT'][0] for i in processed_wind_speeds_list]
self.csat3_dataframe['U'] = pd.Series(
processed_wind_speeds_list).values
self.csat3_dataframe['TimeStamp'] = pd.to_datetime(
self.csat3_dataframe['TimeStamp'])
print("Total de linhas sem valores utilizáveis removidas: %i de %i.\n" % (
csat3_dataframe_len-csat3_dataframe_drop_na_len, csat3_dataframe_len))
self.csat3_dataframe = self.csat3_dataframe.iloc[::self.step_int_text.value]
self.csat3_dataframe.reset_index(inplace=True, drop=True)
self.csat3_dataframe.to_csv(os.path.join(self.project_dirs['dir'][2], self.project_dirs['dir'][6].lower(
).replace(' ', '_')+'_csat3'+'.csv').replace('\\', '/'), encoding='utf-8', sep=';', index=True)
display(self.csat3_dataframe)
print("\nDados salvos em:\n%s\n" % os.path.join(self.project_dirs['dir'][2], self.project_dirs['dir'][6].lower(
).replace(' ', '_')+'_csat3'+'.csv').replace('\\', '/').replace('\\', '/'))
return self.csat3_dataframe
elif self.uz_checkbox.value == True:
if self.height_adjustment_checkbox.value == True:
processed_wind_speeds_list = [((((float(self.csat3_dataframe['Ux'][i]))**2)+((float(self.csat3_dataframe['Uy'][i]))**2)+((float(
self.csat3_dataframe['Uz'][i]))**2))**(0.5))*((10/self.csat3_dataframe['height_measurement'][i])**(1/7)) for i in self.csat3_dataframe.index]
else:
processed_wind_speeds_list = [((((float(self.csat3_dataframe['Ux'][i]))**2)+((float(self.csat3_dataframe['Uy'][i]))**2)+(
(float(self.csat3_dataframe['Uz'][i]))**2))**(0.5)) for i in self.csat3_dataframe.index]
if self.rl_checkbox.value == True:
processed_wind_speeds_list = [
i*self.csat3_dataframe['RL'][0] for i in processed_wind_speeds_list]
if self.rt_checkbox.value == True:
processed_wind_speeds_list = [
i*self.csat3_dataframe['RT'][0] for i in processed_wind_speeds_list]
self.csat3_dataframe['U'] = pd.Series(
processed_wind_speeds_list).values
self.csat3_dataframe['TimeStamp'] = pd.to_datetime(
self.csat3_dataframe['TimeStamp'])
print("Total de linhas sem valores utilizáveis removidas: %i de %i." % (
csat3_dataframe_len-csat3_dataframe_drop_na_len, csat3_dataframe_len))
self.csat3_dataframe = self.csat3_dataframe.iloc[::self.step_int_text.value]
self.csat3_dataframe.reset_index(inplace=True, drop=True)
self.csat3_dataframe.to_csv(os.path.join(self.project_dirs['dir'][2], self.project_dirs['dir'][6].lower(
).replace(' ', '_')+'_csat3'+'.csv').replace('\\', '/'), encoding='utf-8', sep=';', index=True)
display(self.csat3_dataframe)
print("\nDados salvos em:\n%s\n" % os.path.join(self.project_dirs['dir'][2], self.project_dirs['dir'][6].lower(
).replace(' ', '_')+'_csat3'+'.csv').replace('\\', '/'))
return self.csat3_dataframe
def csat3_wind_data_windrose(self, csat3_dataframe, project_dirs):
self.csat3_dataframe = csat3_dataframe
self.project_dirs = project_dirs
figure = plt.figure(figsize=(12, 12))
axes = figure.add_axes([0, 0, 1, 1])
axes.set_visible(False)
csat3_windrose_dataframe = pd.DataFrame({'speed': pd.to_numeric(
self.csat3_dataframe['U']), 'direction': pd.to_numeric(self.csat3_dataframe['wnd_dir_compass'])})
axes = WindroseAxes.from_ax(fig=figure)
axes.radii_angle = self.windrose_percentage_angle_float_text.value
axes.bar(csat3_windrose_dataframe['direction'], csat3_windrose_dataframe['speed'],
normed=True, bins=self.bins_int_text.value, opening=0.7, edgecolor='white')
legend_title = ('Velocidades (%s)') % self.speed_unit_text.value
axes.legend(bbox_to_anchor=(1.3, 1), loc=1, title=legend_title)
axes.grid(linewidth=0.5, antialiased=True)
csat3_windrose_outputs_dir = os.path.join(
self.project_dirs['dir'][4], self.project_dirs['dir'][6].lower().replace(' ', '_')+'_wind_data')
try:
os.makedirs(csat3_windrose_outputs_dir)
except OSError as Error:
if os.path.exists(csat3_windrose_outputs_dir):
pass
figure.savefig(os.path.join(csat3_windrose_outputs_dir, self.project_dirs['dir'][6].lower().replace(
' ', '_')+'_windrose_csat3'+'.png').replace('\\', '/'), dpi=600, frameon=False, bbox_inches="tight")
plt.show()
print("\nImagem salva em:\n%s\n" % os.path.join(csat3_windrose_outputs_dir,
self.project_dirs['dir'][6].lower().replace(' ', '_')+'_windrose_csat3'+'.png').replace('\\', '/'))
return(figure, axes)
def csat3_wind_frequencies(self, csat3_windrose, project_dirs):
self.csat3_windrose = csat3_windrose
self.project_dirs = project_dirs
windrose_table = self.csat3_windrose[1]._info['table']
windrose_frequencies = np.sum(windrose_table, axis=0)
windrose_labels = ['N', 'NNE', 'NE', 'ENE', 'E', 'ESE', 'SE',
'SSE', 'S', 'SSW', 'SW', 'WSW', 'W', 'WNW', 'NW', 'NNW']
figure = plt.figure(figsize=(9, 9))
axes = figure.add_axes([0, 0, 1, 1])
plt.ylabel('Frequências percentuais (%)')
plt.xlabel('Direção (°)')
axes.bar(np.arange(16), windrose_frequencies, align='center',
tick_label=windrose_labels, facecolor='limegreen', zorder=3)
axes_ticks = axes.get_yticks()
axes.set_yticklabels(['{:.1f}%'.format(value) for value in axes_ticks])
axes.grid(axis='y', zorder=0, linestyle='-', color='grey',
linewidth=0.5, antialiased=True, alpha=0.5)
csat3_wind_frequencies_outputs_dir = os.path.join(
self.project_dirs['dir'][4], self.project_dirs['dir'][6].lower().replace(' ', '_')+'_wind_data')
try:
os.makedirs(csat3_wind_frequencies_outputs_dir)
except OSError as Error:
if os.path.exists(csat3_wind_frequencies_outputs_dir):
pass
figure.savefig(os.path.join(csat3_wind_frequencies_outputs_dir, self.project_dirs['dir'][6].lower().replace(
' ', '_')+'_wind_frequencies_csat3'+'.png').replace('\\', '/'), dpi=600, frameon=False, bbox_inches="tight")
plt.show()
print("\nImagem salva em:\n%s\n" % os.path.join(csat3_wind_frequencies_outputs_dir,
self.project_dirs['dir'][6].lower().replace(' ', '_')+'_wind_frequencies_csat3'+'.png').replace('\\', '/'))
def csat3_wind_stats(self, csat3_dataframe, csat3_windrose, project_dirs):
self.csat3_dataframe = csat3_dataframe
self.csat3_windrose = csat3_windrose
self.project_dirs = project_dirs
windrose_directions_array = np.array(
self.csat3_windrose[1]._info['dir'])
windrose_directions_array = np.delete(windrose_directions_array, 0)
windrose_directions_array = np.append(
windrose_directions_array, 348.75)
windrose_directions_list = []
windrose_first_north_direction_split = self.csat3_dataframe[self.csat3_dataframe['wnd_dir_compass'].between(
348.75, 360)]['U']
windrose_second_north_direction_split = self.csat3_dataframe[self.csat3_dataframe['wnd_dir_compass'].between(
0, 11.25)]['U']
windrose_north_direction = pd.concat(
[windrose_first_north_direction_split, windrose_second_north_direction_split], axis=0)
windrose_directions_list.append([len(windrose_north_direction), windrose_north_direction.mean(
), windrose_north_direction.std(), windrose_north_direction.min(), windrose_north_direction.max()])
for i, j in zip(windrose_directions_array[:-1], windrose_directions_array[1:]):
sample_size = len(
self.csat3_dataframe[self.csat3_dataframe['wnd_dir_compass'].between(i, j)]['U'])
mean = self.csat3_dataframe[self.csat3_dataframe['wnd_dir_compass'].between(
i, j)]['U'].mean()
std = self.csat3_dataframe[self.csat3_dataframe['wnd_dir_compass'].between(
i, j)]['U'].std()
mininum = self.csat3_dataframe[self.csat3_dataframe['wnd_dir_compass'].between(
i, j)]['U'].min()
maximum = self.csat3_dataframe[self.csat3_dataframe['wnd_dir_compass'].between(
i, j)]['U'].max()
windrose_directions_list.append(
[sample_size, mean, std, mininum, maximum])
wind_stats_directions_dataframe = pd.DataFrame(
windrose_directions_list)
windrose_table = self.csat3_windrose[1]._info['table']
windrose_frequencies = np.sum(windrose_table, axis=0)
windrose_labels = ['N', 'NNE', 'NE', 'ENE', 'E', 'ESE', 'SE',
'SSE', 'S', 'SSW', 'SW', 'WSW', 'W', 'WNW', 'NW', 'NNW']
wind_stats_directions_dataframe['direction'] = windrose_labels
wind_stats_directions_dataframe['frequency'] = windrose_frequencies
wind_stats_directions_dataframe = wind_stats_directions_dataframe.round(
decimals=2)
wind_stats_directions_dataframe = wind_stats_directions_dataframe.rename(
columns={0: 'sample_size', 1: 'mean', 2: 'std', 3: 'min', 4: 'max'})
wind_stats_directions_dataframe = wind_stats_directions_dataframe[[
'direction', 'sample_size', 'frequency', 'mean', 'std', 'min', 'max']]
wind_stats_directions_dataframe.to_csv(os.path.join(self.project_dirs['dir'][2], self.project_dirs['dir'][6].lower(
).replace(' ', '_')+'_wind_stats_csat3'+'.csv').replace('\\', '/'), encoding='utf-8', sep=';', index=True)
display(wind_stats_directions_dataframe)
print("\nDados salvos em:\n%s\n" % os.path.join(self.project_dirs['dir'][2], self.project_dirs['dir'][6].lower(
).replace(' ', '_')+'_wind_stats_csat3'+'.csv').replace('\\', '/'))
def csat3_wind_bins(self, csat3_dataframe, csat3_windrose, project_dirs):
self.csat3_dataframe = csat3_dataframe
self.csat3_windrose = csat3_windrose
self.project_dirs = project_dirs
windrose_directions_array = np.array(
self.csat3_windrose[1]._info['dir'])
windrose_directions_array = np.delete(windrose_directions_array, 0)
windrose_directions_array = np.append(
windrose_directions_array, 348.75)
windrose_directions_list = []
windrose_first_north_direction_split = self.csat3_dataframe[self.csat3_dataframe['wnd_dir_compass'].between(
348.75, 360)]['U']
windrose_second_north_direction_split = self.csat3_dataframe[self.csat3_dataframe['wnd_dir_compass'].between(
0, 11.25)]['U']
windrose_north_direction = pd.concat(
[windrose_first_north_direction_split, windrose_second_north_direction_split], axis=0)
windrose_directions_list.append(windrose_north_direction)
for i, j in zip(windrose_directions_array[:-1], windrose_directions_array[1:]):
windrose_direction_speeds = self.csat3_dataframe[self.csat3_dataframe['wnd_dir_compass'].between(
i, j)]['U']
windrose_directions_list.append(windrose_direction_speeds)
windrose_labels = ['N', 'NNE', 'NE', 'ENE', 'E', 'ESE', 'SE',
'SSE', 'S', 'SSW', 'SW', 'WSW', 'W', 'WNW', 'NW', 'NNW']
windrose_directions_dict = {
windrose_labels[i]: windrose_directions_list[i] for i in range(0, len(windrose_labels))}
for i, j in windrose_directions_dict.items():
figure = plt.figure(figsize=(9, 9))
axes = figure.add_axes([0, 0, 1, 1])
windrose_bins = self.csat3_windrose[1]._info['bins']
windrose_formatted_bins = []
for k in range(0, len(windrose_bins[:-2])):
windrose_bins_interval = str(
'%.1f'+' – '+'%.1f') % (windrose_bins[k], windrose_bins[k+1])
windrose_formatted_bins.append(windrose_bins_interval)
windrose_last_bin = str('≧ '+'%.1f') % windrose_bins[-2]
windrose_formatted_bins.append(windrose_last_bin)
windrose_direction_speeds_dataframe = pd.DataFrame(j)
windrose_direction_speeds_dataframe = windrose_direction_speeds_dataframe.groupby(pd.cut(
windrose_direction_speeds_dataframe['U'], bins=windrose_bins, labels=windrose_formatted_bins, right=False)).count()
windrose_direction_speeds_dataframe['%'] = [
(k/sum(windrose_direction_speeds_dataframe['U']))*100 for k in windrose_direction_speeds_dataframe['U']]
windrose_direction_speeds_dataframe['%'].plot(
ax=axes, kind='bar', legend=False, colormap=None)
axes.set_title('Direção %s' % i)
axes.set_xlabel('Intervalos (%s)' % self.speed_unit_text.value)
axes.set_ylabel('Porcentagem (%)')
axes.autoscale(enable=True, axis='x', tight=None)
for k in axes.get_xticklabels():
k.set_rotation(45)
bins_title = str('_wind_bins_%s' % i)
csat3_wind_bins_outputs_dir = os.path.join(
self.project_dirs['dir'][4], self.project_dirs['dir'][6].lower().replace(' ', '_')+'_wind_data')
try:
os.makedirs(csat3_wind_bins_outputs_dir)
except OSError as Error:
if os.path.exists(csat3_wind_bins_outputs_dir):
pass
figure.savefig(os.path.join(csat3_wind_bins_outputs_dir, self.project_dirs['dir'][6].lower().replace(
' ', '_')+bins_title+'_csat3'+'.png').replace('\\', '/'), dpi=600, frameon=False, bbox_inches="tight", format='png')
plt.show()
print("\nImagem salva em:\n%s\n" % os.path.join(csat3_wind_bins_outputs_dir,
self.project_dirs['dir'][6].lower().replace(' ', '_')+bins_title+'_csat3'+'.png').replace('\\', '/'))
def load_windsonic_wind_data_button_click(self, b):
self.windsonic_wind_data = self.load_windsonic_wind_data()
def load_windsonic_wind_data(self):
sys._enablelegacywindowsfsencoding()
root = tk.Tk()
root.call('wm', 'attributes', '.', '-topmost', '1')
root.withdraw()
root.iconbitmap(os.path.join(self.path, 'logo.ico'))
root.update_idletasks()
load_windsonic_askopenfilename_dir = filedialog.askopenfilename(
initialdir=self.path, title="Confirme o diretório de importação do arquivo '.csv' do seu modelo de dados de ventos Windsonic:", filetypes=[(".csv", "*.csv")])
if load_windsonic_askopenfilename_dir == '':
messagebox.showwarning(
"ondisapy", "Nenhum modelo de dados de ventos Windsonic importado.")
return None
else:
windsonic_dataframe = pd.read_csv(
load_windsonic_askopenfilename_dir, sep=';', engine='python', encoding='utf-8', decimal=',')
messagebox.showinfo(
"ondisapy", "Modelo de dados de ventos Windsonic importado com sucesso:\n%s" % load_windsonic_askopenfilename_dir)
print("Modelo Windsonic importado:\n%s\n" %
load_windsonic_askopenfilename_dir)
return windsonic_dataframe
def windsonic_wind_data_dataframe(self, windsonic_dataframe, project_dirs):
self.windsonic_dataframe = windsonic_dataframe.copy()
self.project_dirs = project_dirs
if len(self.windsonic_dataframe.filter(regex='Unnamed').columns) != 0:
self.windsonic_dataframe = self.windsonic_dataframe[self.windsonic_dataframe.columns.drop(
list(self.windsonic_dataframe.filter(regex='Unnamed')))]
if False in self.windsonic_dataframe.columns.isin(['TIMESTAMP', 'mean_wind_speed', 'mean_wind_direction', 'height_measurement', 'RL', 'RT']):
messagebox.showwarning(
"ondisapy", "Arquivo de dados de vento com colunas nomeadas de forma diferente do modelo fornecido para uso.\nVerifique se seu arquivo .csv é proveniente do modelo correto para prosseguir com as análises.")
return None
else:
self.windsonic_dataframe[['mean_wind_speed', 'mean_wind_direction', 'height_measurement', 'RL', 'RT']] = self.windsonic_dataframe[[
'mean_wind_speed', 'mean_wind_direction', 'height_measurement', 'RL', 'RT']].astype('float64')
windsonic_dataframe_len = len(self.windsonic_dataframe)
self.windsonic_dataframe = self.windsonic_dataframe.dropna(
subset=['TIMESTAMP', 'mean_wind_speed', 'mean_wind_direction', 'height_measurement'])
self.windsonic_dataframe = self.windsonic_dataframe.fillna(
value='')
windsonic_dataframe_drop_na_len = len(self.windsonic_dataframe)
if self.height_adjustment_checkbox.value == True:
processed_wind_speeds_list = [float(self.windsonic_dataframe['mean_wind_speed'][i]*(
(10/self.windsonic_dataframe['height_measurement'][i])**(1/7))) for i in self.windsonic_dataframe.index]
else:
processed_wind_speeds_list = [float(
self.windsonic_dataframe['mean_wind_speed'][i]) for i in self.windsonic_dataframe.index]
if self.rl_checkbox.value == True:
processed_wind_speeds_list = [
i*self.windsonic_dataframe['RL'][0] for i in processed_wind_speeds_list]
if self.rt_checkbox.value == True:
processed_wind_speeds_list = [
i*self.windsonic_dataframe['RT'][0] for i in processed_wind_speeds_list]
self.windsonic_dataframe['U'] = pd.Series(
processed_wind_speeds_list).values
self.windsonic_dataframe['TIMESTAMP'] = pd.to_datetime(
self.windsonic_dataframe['TIMESTAMP'])
print("Total de linhas sem valores utilizáveis removidas: %i de %i.\n" % (
windsonic_dataframe_len-windsonic_dataframe_drop_na_len, windsonic_dataframe_len))
self.windsonic_dataframe = self.windsonic_dataframe.iloc[::self.step_int_text.value]
self.windsonic_dataframe.reset_index(inplace=True, drop=True)
self.windsonic_dataframe.to_csv(os.path.join(self.project_dirs['dir'][2], self.project_dirs['dir'][6].lower(
).replace(' ', '_')+'_windsonic'+'.csv').replace('\\', '/'), encoding='utf-8', sep=';', index=True)
display(self.windsonic_dataframe)
print("\nDados salvos em:\n%s\n" % os.path.join(self.project_dirs['dir'][2], self.project_dirs['dir'][6].lower(
).replace(' ', '_')+'_windsonic'+'.csv').replace('\\', '/'))
return self.windsonic_dataframe
def windsonic_wind_data_windrose(self, windsonic_dataframe, project_dirs):
self.windsonic_dataframe = windsonic_dataframe
self.project_dirs = project_dirs
figure = plt.figure(figsize=(12, 12))
axes = figure.add_axes([0, 0, 1, 1])
axes.set_visible(False)
windsonic_windrose_dataframe = pd.DataFrame({'speed': pd.to_numeric(
self.windsonic_dataframe['U']), 'direction': pd.to_numeric(self.windsonic_dataframe['mean_wind_direction'])})
axes = WindroseAxes.from_ax(fig=figure)
axes.radii_angle = self.windrose_percentage_angle_float_text.value
axes.bar(windsonic_windrose_dataframe['direction'], windsonic_windrose_dataframe['speed'],
normed=True, bins=self.bins_int_text.value, opening=0.7, edgecolor='white')
legend_title = ('Velocidades (%s)') % self.speed_unit_text.value
axes.legend(bbox_to_anchor=(1.3, 1), loc=1, title=legend_title)
axes.grid(linewidth=0.5, antialiased=True)
windsonic_windrose_outputs_dir = os.path.join(
self.project_dirs['dir'][4], self.project_dirs['dir'][6].lower().replace(' ', '_')+'_wind_data')
try:
os.makedirs(windsonic_windrose_outputs_dir)
except OSError as Error:
if os.path.exists(windsonic_windrose_outputs_dir):
pass
figure.savefig(os.path.join(windsonic_windrose_outputs_dir, self.project_dirs['dir'][6].lower().replace(
' ', '_')+'_windrose_windsonic'+'.png').replace('\\', '/'), dpi=600, frameon=False, bbox_inches="tight")
plt.show()
print("\nImagem salva em:\n%s\n" % os.path.join(windsonic_windrose_outputs_dir,
self.project_dirs['dir'][6].lower().replace(' ', '_')+'_windrose_windsonic'+'.png').replace('\\', '/'))
return(figure, axes)
def windsonic_wind_frequencies(self, windsonic_windrose, project_dirs):
self.windsonic_windrose = windsonic_windrose
self.project_dirs = project_dirs
windrose_table = self.windsonic_windrose[1]._info['table']
windrose_frequencies = np.sum(windrose_table, axis=0)
windrose_labels = ['N', 'NNE', 'NE', 'ENE', 'E', 'ESE', 'SE',
'SSE', 'S', 'SSW', 'SW', 'WSW', 'W', 'WNW', 'NW', 'NNW']
figure = plt.figure(figsize=(9, 9))
axes = figure.add_axes([0, 0, 1, 1])
plt.ylabel('Frequências percentuais (%)')
plt.xlabel('Direção (°)')
axes.bar(np.arange(16), windrose_frequencies, align='center',
tick_label=windrose_labels, facecolor='limegreen', zorder=3)
axes_ticks = axes.get_yticks()
axes.set_yticklabels(['{:.1f}%'.format(value) for value in axes_ticks])
axes.grid(axis='y', zorder=0, linestyle='-', color='grey',
linewidth=0.5, antialiased=True, alpha=0.5)
windsonic_wind_frequencies_outputs_dir = os.path.join(
self.project_dirs['dir'][4], self.project_dirs['dir'][6].lower().replace(' ', '_')+'_wind_data')
try:
os.makedirs(windsonic_wind_frequencies_outputs_dir)
except OSError as Error:
if os.path.exists(windsonic_wind_frequencies_outputs_dir):
pass
figure.savefig(os.path.join(windsonic_wind_frequencies_outputs_dir, self.project_dirs['dir'][6].lower().replace(
' ', '_')+'_wind_frequencies_windsonic'+'.png').replace('\\', '/'), dpi=600, frameon=False, bbox_inches="tight")
plt.show()
print("\nImagem salva em:\n%s\n" % os.path.join(windsonic_wind_frequencies_outputs_dir,
self.project_dirs['dir'][6].lower().replace(' ', '_')+'_wind_frequencies_windsonic'+'.png').replace('\\', '/'))
def windsonic_wind_stats(self, windsonic_dataframe, windsonic_windrose, project_dirs):
self.windsonic_dataframe = windsonic_dataframe
self.windsonic_windrose = windsonic_windrose
self.project_dirs = project_dirs
windrose_directions_array = np.array(
self.windsonic_windrose[1]._info['dir'])
windrose_directions_array = np.delete(windrose_directions_array, 0)
windrose_directions_array = np.append(
windrose_directions_array, 348.75)
windrose_directions_list = []
windrose_first_north_direction_split = self.windsonic_dataframe[self.windsonic_dataframe['mean_wind_direction'].between(
348.75, 360)]['U']
windrose_second_north_direction_split = self.windsonic_dataframe[self.windsonic_dataframe['mean_wind_direction'].between(
0, 11.25)]['U']
windrose_north_direction = pd.concat(
[windrose_first_north_direction_split, windrose_second_north_direction_split], axis=0)
windrose_directions_list.append([len(windrose_north_direction), windrose_north_direction.mean(
), windrose_north_direction.std(), windrose_north_direction.min(), windrose_north_direction.max()])
for i, j in zip(windrose_directions_array[:-1], windrose_directions_array[1:]):
sample_size = len(
self.windsonic_dataframe[self.windsonic_dataframe['mean_wind_direction'].between(i, j)]['U'])
mean = self.windsonic_dataframe[self.windsonic_dataframe['mean_wind_direction'].between(
i, j)]['U'].mean()
std = self.windsonic_dataframe[self.windsonic_dataframe['mean_wind_direction'].between(
i, j)]['U'].std()
mininum = self.windsonic_dataframe[self.windsonic_dataframe['mean_wind_direction'].between(
i, j)]['U'].min()
maximum = self.windsonic_dataframe[self.windsonic_dataframe['mean_wind_direction'].between(
i, j)]['U'].max()
windrose_directions_list.append(
[sample_size, mean, std, mininum, maximum])
wind_stats_directions_dataframe = pd.DataFrame(
windrose_directions_list)
windrose_table = self.windsonic_windrose[1]._info['table']
windrose_frequencies = np.sum(windrose_table, axis=0)
windrose_labels = ['N', 'NNE', 'NE', 'ENE', 'E', 'ESE', 'SE',
'SSE', 'S', 'SSW', 'SW', 'WSW', 'W', 'WNW', 'NW', 'NNW']
wind_stats_directions_dataframe['direction'] = windrose_labels
wind_stats_directions_dataframe['frequency'] = windrose_frequencies
wind_stats_directions_dataframe = wind_stats_directions_dataframe.round(
decimals=2)
wind_stats_directions_dataframe = wind_stats_directions_dataframe.rename(
columns={0: 'sample_size', 1: 'mean', 2: 'std', 3: 'min', 4: 'max'})
wind_stats_directions_dataframe = wind_stats_directions_dataframe[[
'direction', 'sample_size', 'frequency', 'mean', 'std', 'min', 'max']]
wind_stats_directions_dataframe.to_csv(os.path.join(self.project_dirs['dir'][2], self.project_dirs['dir'][6].lower(
).replace(' ', '_')+'_wind_stats_windsonic'+'.csv').replace('\\', '/'), encoding='utf-8', sep=';', index=True)
display(wind_stats_directions_dataframe)
print("\nDados salvos em:\n%s\n" % os.path.join(self.project_dirs['dir'][2], self.project_dirs['dir'][6].lower(
).replace(' ', '_')+'_wind_stats_windsonic'+'.csv').replace('\\', '/'))
def windsonic_wind_bins(self, windsonic_dataframe, windsonic_windrose, project_dirs):
self.windsonic_dataframe = windsonic_dataframe
self.windsonic_windrose = windsonic_windrose
self.project_dirs = project_dirs
windrose_directions_array = np.array(
self.windsonic_windrose[1]._info['dir'])
windrose_directions_array = np.delete(windrose_directions_array, 0)
windrose_directions_array = np.append(
windrose_directions_array, 348.75)
windrose_directions_list = []
windrose_first_north_direction_split = self.windsonic_dataframe[self.windsonic_dataframe['mean_wind_direction'].between(
348.75, 360)]['U']
windrose_second_north_direction_split = self.windsonic_dataframe[self.windsonic_dataframe['mean_wind_direction'].between(
0, 11.25)]['U']
windrose_north_direction = pd.concat(
[windrose_first_north_direction_split, windrose_second_north_direction_split], axis=0)
windrose_directions_list.append(windrose_north_direction)
for i, j in zip(windrose_directions_array[:-1], windrose_directions_array[1:]):
windrose_direction_speeds = self.windsonic_dataframe[self.windsonic_dataframe['mean_wind_direction'].between(
i, j)]['U']
windrose_directions_list.append(windrose_direction_speeds)
windrose_labels = ['N', 'NNE', 'NE', 'ENE', 'E', 'ESE', 'SE',
'SSE', 'S', 'SSW', 'SW', 'WSW', 'W', 'WNW', 'NW', 'NNW']
windrose_directions_dict = {
windrose_labels[i]: windrose_directions_list[i] for i in range(0, len(windrose_labels))}
for i, j in windrose_directions_dict.items():
figure = plt.figure(figsize=(9, 9))
axes = figure.add_axes([0, 0, 1, 1])
windrose_bins = self.windsonic_windrose[1]._info['bins']
windrose_formatted_bins = []
for k in range(0, len(windrose_bins[:-2])):
windrose_bins_interval = str(
'%.1f'+' – '+'%.1f') % (windrose_bins[k], windrose_bins[k+1])
windrose_formatted_bins.append(windrose_bins_interval)
windrose_last_bin = str('≧ '+'%.1f') % windrose_bins[-2]
windrose_formatted_bins.append(windrose_last_bin)
windrose_direction_speeds_dataframe = pd.DataFrame(j)
windrose_direction_speeds_dataframe = windrose_direction_speeds_dataframe.groupby(pd.cut(
windrose_direction_speeds_dataframe['U'], bins=windrose_bins, labels=windrose_formatted_bins, right=False)).count()
windrose_direction_speeds_dataframe['%'] = [
(k/sum(windrose_direction_speeds_dataframe['U']))*100 for k in windrose_direction_speeds_dataframe['U']]
windrose_direction_speeds_dataframe['%'].plot(
ax=axes, kind='bar', legend=False, colormap=None)
axes.set_title('Direção %s' % i)
axes.set_xlabel('Intervalos (%s)' % self.speed_unit_text.value)
axes.set_ylabel('Porcentagem (%)')
axes.autoscale(enable=True, axis='x', tight=None)
for k in axes.get_xticklabels():
k.set_rotation(45)
bins_title = str('_wind_bins_%s' % i)
windsonic_wind_bins_outputs_dir = os.path.join(
self.project_dirs['dir'][4], self.project_dirs['dir'][6].lower().replace(' ', '_')+'_wind_data')
try:
os.makedirs(windsonic_wind_bins_outputs_dir)
except OSError as Error:
if os.path.exists(windsonic_wind_bins_outputs_dir):
pass
figure.savefig(os.path.join(windsonic_wind_bins_outputs_dir, self.project_dirs['dir'][6].lower().replace(
' ', '_')+bins_title+'_windsonic'+'.png').replace('\\', '/'), dpi=600, frameon=False, bbox_inches="tight", format='png')
plt.show()
print("\nImagem salva em:\n%s\n" % os.path.join(windsonic_wind_bins_outputs_dir,
self.project_dirs['dir'][6].lower().replace(' ', '_')+bins_title+'_windsonic'+'.png').replace('\\', '/'))
| 48.769474
| 229
| 0.614966
| 5,278
| 46,331
| 5.104775
| 0.079197
| 0.049883
| 0.054782
| 0.029395
| 0.876369
| 0.82103
| 0.787774
| 0.756486
| 0.730245
| 0.701407
| 0
| 0.015746
| 0.254322
| 46,331
| 949
| 230
| 48.820864
| 0.763923
| 0
| 0
| 0.575
| 0
| 0.002941
| 0.130977
| 0.003129
| 0
| 0
| 0
| 0
| 0
| 1
| 0.032353
| false
| 0.030882
| 0.048529
| 0
| 0.108824
| 0.033824
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
de9aa22093e5c15c2c4511fff64931009f764c84
| 5,535
|
py
|
Python
|
test/valid/test_definitions_with_refs.py
|
MikeDombo/jsonschema2popo
|
f4bddc82a307307ec7cafad17180e6fc85eb84e3
|
[
"MIT"
] | null | null | null |
test/valid/test_definitions_with_refs.py
|
MikeDombo/jsonschema2popo
|
f4bddc82a307307ec7cafad17180e6fc85eb84e3
|
[
"MIT"
] | null | null | null |
test/valid/test_definitions_with_refs.py
|
MikeDombo/jsonschema2popo
|
f4bddc82a307307ec7cafad17180e6fc85eb84e3
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env/python
class ABcd:
_types_map = {
"Child1": {"type": int, "subtype": None},
"Child2": {"type": str, "subtype": None},
}
_formats_map = {}
def __init__(self, Child1=None, Child2=None):
pass
self.__Child1 = Child1
self.__Child2 = Child2
def _get_Child1(self):
return self.__Child1
def _set_Child1(self, value):
if not isinstance(value, int):
raise TypeError("Child1 must be int")
self.__Child1 = value
Child1 = property(_get_Child1, _set_Child1)
def _get_Child2(self):
return self.__Child2
def _set_Child2(self, value):
if not isinstance(value, str):
raise TypeError("Child2 must be str")
self.__Child2 = value
Child2 = property(_get_Child2, _set_Child2)
@staticmethod
def from_dict(d):
v = {}
if "Child1" in d:
if not isinstance(d["Child1"], int):
raise TypeError("Child1 must be int")
v["Child1"] = (
int.from_dict(d["Child1"]) if hasattr(int, "from_dict") else d["Child1"]
)
if "Child2" in d:
if not isinstance(d["Child2"], str):
raise TypeError("Child2 must be str")
v["Child2"] = (
str.from_dict(d["Child2"]) if hasattr(str, "from_dict") else d["Child2"]
)
return ABcd(**v)
def as_dict(self):
d = {}
if self.__Child1 is not None:
d["Child1"] = (
self.__Child1.as_dict()
if hasattr(self.__Child1, "as_dict")
else self.__Child1
)
if self.__Child2 is not None:
d["Child2"] = (
self.__Child2.as_dict()
if hasattr(self.__Child2, "as_dict")
else self.__Child2
)
return d
def __repr__(self):
return "<Class ABcd. Child1: {}, Child2: {}>".format(
self.__Child1, self.__Child2
)
class SubRef:
_types_map = {"ChildA": {"type": ABcd, "subtype": None}}
_formats_map = {}
def __init__(self, ChildA=None):
pass
self.__ChildA = ChildA
def _get_ChildA(self):
return self.__ChildA
def _set_ChildA(self, value):
if not isinstance(value, ABcd):
raise TypeError("ChildA must be ABcd")
self.__ChildA = value
ChildA = property(_get_ChildA, _set_ChildA)
@staticmethod
def from_dict(d):
v = {}
if "ChildA" in d:
if not isinstance(d["ChildA"], ABcd):
raise TypeError("ChildA must be ABcd")
v["ChildA"] = (
ABcd.from_dict(d["ChildA"])
if hasattr(ABcd, "from_dict")
else d["ChildA"]
)
return SubRef(**v)
def as_dict(self):
d = {}
if self.__ChildA is not None:
d["ChildA"] = (
self.__ChildA.as_dict()
if hasattr(self.__ChildA, "as_dict")
else self.__ChildA
)
return d
def __repr__(self):
return "<Class SubRef. ChildA: {}>".format(self.__ChildA)
class DirectRef:
_types_map = {
"Child1": {"type": int, "subtype": None},
"Child2": {"type": str, "subtype": None},
}
_formats_map = {}
def __init__(self, Child1=None, Child2=None):
pass
self.__Child1 = Child1
self.__Child2 = Child2
def _get_Child1(self):
return self.__Child1
def _set_Child1(self, value):
if not isinstance(value, int):
raise TypeError("Child1 must be int")
self.__Child1 = value
Child1 = property(_get_Child1, _set_Child1)
def _get_Child2(self):
return self.__Child2
def _set_Child2(self, value):
if not isinstance(value, str):
raise TypeError("Child2 must be str")
self.__Child2 = value
Child2 = property(_get_Child2, _set_Child2)
@staticmethod
def from_dict(d):
v = {}
if "Child1" in d:
if not isinstance(d["Child1"], int):
raise TypeError("Child1 must be int")
v["Child1"] = (
int.from_dict(d["Child1"]) if hasattr(int, "from_dict") else d["Child1"]
)
if "Child2" in d:
if not isinstance(d["Child2"], str):
raise TypeError("Child2 must be str")
v["Child2"] = (
str.from_dict(d["Child2"]) if hasattr(str, "from_dict") else d["Child2"]
)
return DirectRef(**v)
def as_dict(self):
d = {}
if self.__Child1 is not None:
d["Child1"] = (
self.__Child1.as_dict()
if hasattr(self.__Child1, "as_dict")
else self.__Child1
)
if self.__Child2 is not None:
d["Child2"] = (
self.__Child2.as_dict()
if hasattr(self.__Child2, "as_dict")
else self.__Child2
)
return d
def __repr__(self):
return "<Class DirectRef. Child1: {}, Child2: {}>".format(
self.__Child1, self.__Child2
)
class RootObject:
def __init__(self):
pass
@staticmethod
def from_dict(d):
v = {}
return RootObject(**v)
def as_dict(self):
d = {}
return d
def __repr__(self):
return "<Class RootObject. >".format()
| 25.273973
| 88
| 0.518338
| 617
| 5,535
| 4.338736
| 0.077796
| 0.067239
| 0.056033
| 0.026149
| 0.836384
| 0.829287
| 0.796414
| 0.727307
| 0.687337
| 0.687337
| 0
| 0.028337
| 0.362421
| 5,535
| 218
| 89
| 25.389908
| 0.730235
| 0.003613
| 0
| 0.716867
| 0
| 0
| 0.117882
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.156627
| false
| 0.024096
| 0
| 0.054217
| 0.349398
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
9d11cc342221ce15f8736828a1bdb0f1c1c69194
| 213
|
py
|
Python
|
visualdl/server/model/paddle/__init__.py
|
liran05/VisualDL
|
d2c63ee514e751dbb99fb243b4b08208ba48f642
|
[
"Apache-2.0"
] | 1
|
2019-08-23T08:42:44.000Z
|
2019-08-23T08:42:44.000Z
|
visualdl/server/model/paddle/__init__.py
|
liran05/VisualDL
|
d2c63ee514e751dbb99fb243b4b08208ba48f642
|
[
"Apache-2.0"
] | null | null | null |
visualdl/server/model/paddle/__init__.py
|
liran05/VisualDL
|
d2c63ee514e751dbb99fb243b4b08208ba48f642
|
[
"Apache-2.0"
] | 1
|
2020-01-29T03:38:35.000Z
|
2020-01-29T03:38:35.000Z
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from .paddle2graph import PaddleModel
__all__ = [PaddleModel]
| 23.666667
| 39
| 0.86385
| 25
| 213
| 6.44
| 0.48
| 0.248447
| 0.397516
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.005319
| 0.117371
| 213
| 8
| 40
| 26.625
| 0.851064
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.833333
| 0
| 0.833333
| 0.166667
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
9d207b8797ee0a77cf98b741bea6690973d4a8a5
| 46
|
py
|
Python
|
crslab/data/dataset/tgredial/__init__.py
|
Xiaolong-Qi/CRSLab
|
d507378c86f4996727bf062482e1f224486d4533
|
[
"MIT"
] | 1
|
2021-01-06T10:39:10.000Z
|
2021-01-06T10:39:10.000Z
|
crslab/data/dataset/tgredial/__init__.py
|
Xiaolong-Qi/CRSLab
|
d507378c86f4996727bf062482e1f224486d4533
|
[
"MIT"
] | null | null | null |
crslab/data/dataset/tgredial/__init__.py
|
Xiaolong-Qi/CRSLab
|
d507378c86f4996727bf062482e1f224486d4533
|
[
"MIT"
] | null | null | null |
from .tgredial_dataset import TGReDialDataset
| 23
| 45
| 0.891304
| 5
| 46
| 8
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.086957
| 46
| 1
| 46
| 46
| 0.952381
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
9d490ff77f460ee5f98662d20ba4c1882502a63b
| 5,959
|
py
|
Python
|
src/ui/ddd_widgets.py
|
PeachyPrinter/peachyprinter
|
6d82b9eaaa03129870aa637eabdc0cb66e90b626
|
[
"Apache-2.0"
] | 12
|
2016-05-12T14:05:30.000Z
|
2021-04-03T06:03:37.000Z
|
src/ui/ddd_widgets.py
|
PeachyPrinter/peachyprinter
|
6d82b9eaaa03129870aa637eabdc0cb66e90b626
|
[
"Apache-2.0"
] | 1
|
2016-02-03T21:46:19.000Z
|
2016-02-04T01:48:31.000Z
|
src/ui/ddd_widgets.py
|
PeachyPrinter/peachyprinter
|
6d82b9eaaa03129870aa637eabdc0cb66e90b626
|
[
"Apache-2.0"
] | 12
|
2016-01-27T15:14:25.000Z
|
2020-08-21T00:44:43.000Z
|
from kivy.clock import Clock
from kivy.uix.widget import Widget
from kivy.graphics.transformation import Matrix
from kivy.graphics.opengl import *
from kivy.graphics import *
from kivy.lang import Builder
from kivy.properties import StringProperty
from kivy.uix.button import Button
from kivy.core.window import Window
from kivy.resources import resource_find
from infrastructure.object_loader import ObjFile
from infrastructure.langtools import _
from ui.custom_widgets import *
import time
Builder.load_file('ui/ddd_widgets.kv')
class I18NObjImageButton(Button):
model = StringProperty()
text_source = StringProperty()
key = StringProperty()
def start_animations(self):
self.ids.renderer.start_animations()
def stop_animations(self):
self.ids.renderer.stop_animations()
class Renderer(Widget):
model = StringProperty(allow_none=True)
def __init__(self, **kwargs):
self.canvas = RenderContext()
shader = resource_find('simple.glsl')
if not shader:
Logger.error("Shader not found")
self.canvas.shader.source = shader
self._running = False
super(Renderer, self).__init__(**kwargs)
def start_animations(self):
Clock.schedule_interval(self.update_glsl, 1 / 60.)
self._running = True
self.on_model(self, self.model)
def stop_animations(self):
self._running = False
self.canvas.clear()
Clock.unschedule(self.update_glsl)
def on_model(self, instance, value):
if self._running:
if value:
self.canvas.clear()
self.scene = ObjFile(self.model)
with self.canvas:
self.cb = Callback(self.setup_gl_context)
PushMatrix()
self.setup_scene()
PopMatrix()
self.cb = Callback(self.reset_gl_context)
def setup_gl_context(self, *args):
glEnable(GL_DEPTH_TEST)
def reset_gl_context(self, *args):
glDisable(GL_DEPTH_TEST)
def update_glsl(self, *largs):
asp = max(self.width / float(self.height),1)
proj = Matrix().view_clip(-asp, asp, -1, 1, 1, 100, 1)
tx = ((self.center_x / float(Window.width)) * 2.0) - 1.0
ty = ((self.center_y / float(Window.height)) * 2.0) - 1.0
trans = Matrix().translate(tx,ty,0)
self.canvas['projection_mat'] = proj
self.canvas['diffuse_light'] = (1.0, 1.0, 0.8)
self.canvas['ambient_light'] = (0.1, 0.1, 0.1)
self.canvas['translate_mat'] = trans
self.rotate_y.angle += 1
def setup_scene(self):
Color(1, 1, 1, 1)
PushMatrix()
Translate(0,0,-3)
Rotate(15, 1, 0, 0)
self.rotate_y = Rotate(1, 0, 1, 0)
m = list(self.scene.objects.values())[0]
UpdateNormalMatrix()
self.mesh = Mesh(
vertices=m.vertices,
indices=m.indices,
fmt=m.vertex_format,
mode='triangles',
)
PopMatrix()
class ObjectManipulator(BoxLayout):
model = StringProperty(allow_none=True)
def __init__(self, **kwargs):
self.canvas = RenderContext()
shader = resource_find('simple.glsl')
if not shader:
Logger.error("Shader not found")
self.canvas.shader.source = shader
self._running = False
super(ObjectManipulator, self).__init__(**kwargs)
def start_animations(self):
Clock.schedule_interval(self.update_glsl, 1 / 60.)
self._running = True
self.on_model(self, self.model)
def stop_animations(self):
self._running = False
self.canvas.clear()
Clock.unschedule(self.update_glsl)
def on_model(self, instance, value):
if self._running:
if value:
self.canvas.clear()
self.scene = ObjFile(self.model)
with self.canvas:
self.cb = Callback(self.setup_gl_context)
PushMatrix()
self.setup_scene()
PopMatrix()
self.cb = Callback(self.reset_gl_context)
def setup_gl_context(self, *args):
glEnable(GL_DEPTH_TEST)
def reset_gl_context(self, *args):
glDisable(GL_DEPTH_TEST)
def update_glsl(self, *largs):
asp = max(self.width / float(self.height),1)
proj = Matrix().view_clip(-asp, asp, -1, 1, 1, 100, 1)
tx = ((self.center_x / float(Window.width)) * 2.0) - 1.0
ty = ((self.center_y / float(Window.height)) * 2.0) - 1.0
trans = Matrix().translate(tx, ty, 0)
self.canvas['projection_mat'] = proj
self.canvas['diffuse_light'] = (1.0, 1.0, 0.8)
self.canvas['ambient_light'] = (0.1, 0.1, 0.1)
self.canvas['translate_mat'] = trans
if self.rotate_x.angle < 360:
self.rotate_x.angle += 1
else:
if self.rotate_y.angle < 360:
self.rotate_y.angle += 1
else:
if self.rotate_z.angle < 360:
self.rotate_z.angle += 1
else:
self.rotate_x.angle = 0
self.rotate_y.angle = 0
self.rotate_z.angle = 0
def setup_scene(self):
Color(1, 1, 1, 1)
PushMatrix()
Translate(0, 0, -2)
Rotate(15, 1, 0, 0)
Translate(0, 0.5, 0)
self.rotate_x = Rotate(0, 1, 0, 0)
Translate(0, -0.5, 0)
self.rotate_y = Rotate(1, 0, 1, 0)
Translate(0, 0.5, 0)
self.rotate_z = Rotate(0, 0, 0, 1)
Translate(0, -0.5, 0)
m = list(self.scene.objects.values())[0]
UpdateNormalMatrix()
self.mesh = Mesh(
vertices=m.vertices,
indices=m.indices,
fmt=m.vertex_format,
mode='triangles',
)
PopMatrix()
| 31.86631
| 65
| 0.575432
| 740
| 5,959
| 4.481081
| 0.186486
| 0.01146
| 0.011761
| 0.007238
| 0.762967
| 0.725875
| 0.712606
| 0.712606
| 0.705669
| 0.696019
| 0
| 0.033923
| 0.307434
| 5,959
| 187
| 66
| 31.86631
| 0.769566
| 0
| 0
| 0.753165
| 0
| 0
| 0.032718
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.113924
| false
| 0
| 0.088608
| 0
| 0.253165
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
9d5a060a658b3c793e3d39dd5e4a28df7944b73c
| 717
|
py
|
Python
|
AdventOfCode2019Day01/test/test_day01.py
|
bdlepla/AdventOfCode2019
|
27a8289bae8510f8af457658b2fa10d5345f9426
|
[
"Unlicense"
] | null | null | null |
AdventOfCode2019Day01/test/test_day01.py
|
bdlepla/AdventOfCode2019
|
27a8289bae8510f8af457658b2fa10d5345f9426
|
[
"Unlicense"
] | null | null | null |
AdventOfCode2019Day01/test/test_day01.py
|
bdlepla/AdventOfCode2019
|
27a8289bae8510f8af457658b2fa10d5345f9426
|
[
"Unlicense"
] | null | null | null |
def test_solve_part_1():
import day01
raw_lines = """
12
14
1969
100756
""".split("\n")
trimmed_lines = map(lambda s: s.strip(), raw_lines)
lines = filter(None, trimmed_lines)
day01 = day01.Day01(lines)
actual = day01.solve_part_1()
expected = 2 + 2 + 654 + 33583
assert expected == actual
def test_solve_part_2():
import day01
raw_lines = """
12
14
1969
100756
""".split("\n")
trimmed_lines = map(lambda s: s.strip(), raw_lines)
lines = filter(None, trimmed_lines)
day01 = day01.Day01(lines)
actual = day01.solve_part_2()
expected = 2 + 2 + 966 + 50346
assert expected == actual
| 23.9
| 55
| 0.577406
| 92
| 717
| 4.304348
| 0.326087
| 0.090909
| 0.060606
| 0.080808
| 0.717172
| 0.717172
| 0.717172
| 0.717172
| 0.717172
| 0.717172
| 0
| 0.144578
| 0.305439
| 717
| 30
| 56
| 23.9
| 0.650602
| 0
| 0
| 0.785714
| 0
| 0
| 0.158996
| 0
| 0
| 0
| 0
| 0
| 0.071429
| 1
| 0.071429
| false
| 0
| 0.071429
| 0
| 0.142857
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
dfb2e7c005142e0e9b52cce8ef86be1964568d69
| 2,266
|
py
|
Python
|
剑指offer/37_FirstCommonNodesInLists(两个链表的第一个公共结点).py
|
PegasusWang/python_data_structures_and_algorithms
|
513547526d2926f8e8bff36e9b83905085aa3ee5
|
[
"MIT"
] | 2,468
|
2018-04-20T02:58:20.000Z
|
2022-03-29T13:41:38.000Z
|
剑指offer/37_FirstCommonNodesInLists(两个链表的第一个公共结点).py
|
PegasusWang/python_data_structures_and_algorithms
|
513547526d2926f8e8bff36e9b83905085aa3ee5
|
[
"MIT"
] | 31
|
2018-05-12T08:40:02.000Z
|
2021-05-27T02:51:52.000Z
|
剑指offer/37_FirstCommonNodesInLists(两个链表的第一个公共结点).py
|
PegasusWang/python_data_structures_and_algorithms
|
513547526d2926f8e8bff36e9b83905085aa3ee5
|
[
"MIT"
] | 829
|
2018-04-20T05:40:18.000Z
|
2022-03-28T14:33:56.000Z
|
"""
面试题37:两个链表的第一个公共结点
题目:输入两个链表,找出它们的第一个公共结点。链表结点定义如下:
https://leetcode.com/problems/intersection-of-two-linked-lists/
思路:
两个链表连接以后,之后的节点都是一样的了。
1. 使用两个栈push 所有节点,然后比较栈顶元素,如果一样就 都 pop继续比较。如果栈顶不一样,结果就是上一次 pop 的值。
2. 先分别遍历两个链表,找到各自长度,然后让一个链表先走 diff(len1-len2)步骤,之后一起往前走,找到的第一个就是。
"""
# Definition for singly-linked list.
class Node(object):
def __init__(self, x, next=None):
self.val = x
self.next = next
class _Solution(object):
def getIntersectionNode(self, headA, headB):
"""
:type head1, head1: ListNode
:rtype: ListNode
"""
if headA is None or headB is None or (headA is None and headB is None):
return None
len1 = 0
cura = headA
while cura:
len1 += 1
cura = cura.next
len2 = 0
curb = headB
while curb:
len2 += 1
curb = curb.next
difflen = abs(len1 - len2)
if len1 > len2:
for i in range(difflen):
headA = headA.next
else:
for i in range(difflen):
headB = headB.next
while headA and headB:
if headA == headB: # headA.val == headB.val and headA.next == headB.next
return headA
headA = headA.next
headB = headB.next
return None
class Solution(object):
def getIntersectionNode(self, headA, headB):
"""
:type head1, head1: ListNode
:rtype: ListNode
"""
if headA is None or headB is None:
return None
len1 = 0
cura = headA
while cura:
len1 += 1
cura = cura.next
len2 = 0
curb = headB
while curb:
len2 += 1
curb = curb.next
difflen = abs(len1 - len2)
if len1 > len2:
for i in range(difflen):
headA = headA.next
else:
for i in range(difflen):
headB = headB.next
while headA and headB:
if headA == headB: # headA.val == headB.val and headA.next == headB.next
return headA
headA = headA.next
headB = headB.next
return None
| 22.888889
| 87
| 0.519859
| 259
| 2,266
| 4.528958
| 0.293436
| 0.030691
| 0.02046
| 0.037511
| 0.72208
| 0.72208
| 0.72208
| 0.72208
| 0.72208
| 0.72208
| 0
| 0.024781
| 0.394528
| 2,266
| 98
| 88
| 23.122449
| 0.830175
| 0.225508
| 0
| 0.862069
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.051724
| false
| 0
| 0
| 0
| 0.206897
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
dffaf24a5695aea3c6bbd84278f9eba54204645c
| 74
|
py
|
Python
|
AmbieNet/users/serializers/__init__.py
|
sansuaza/Backend-AmbieNet
|
97613bc7f3bb52e1ff0a679a15867dd85648a6b7
|
[
"MIT"
] | null | null | null |
AmbieNet/users/serializers/__init__.py
|
sansuaza/Backend-AmbieNet
|
97613bc7f3bb52e1ff0a679a15867dd85648a6b7
|
[
"MIT"
] | 6
|
2021-05-23T17:03:45.000Z
|
2021-06-10T23:08:38.000Z
|
AmbieNet/users/serializers/__init__.py
|
sansuaza/Backend-AmbieNet
|
97613bc7f3bb52e1ff0a679a15867dd85648a6b7
|
[
"MIT"
] | null | null | null |
from .users import *
from .role_requests import *
from .profiles import *
| 18.5
| 28
| 0.756757
| 10
| 74
| 5.5
| 0.6
| 0.363636
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.162162
| 74
| 3
| 29
| 24.666667
| 0.887097
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
5f0874d1612b58643538b1f557d2ff522136e5fc
| 32
|
py
|
Python
|
utilDwarf/__main__.py
|
luluci/utilDwarf
|
38c2213b5c39a605e4428481840dc0383818965d
|
[
"MIT"
] | null | null | null |
utilDwarf/__main__.py
|
luluci/utilDwarf
|
38c2213b5c39a605e4428481840dc0383818965d
|
[
"MIT"
] | null | null | null |
utilDwarf/__main__.py
|
luluci/utilDwarf
|
38c2213b5c39a605e4428481840dc0383818965d
|
[
"MIT"
] | null | null | null |
from utilDwarf import utilDwarf
| 16
| 31
| 0.875
| 4
| 32
| 7
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.125
| 32
| 1
| 32
| 32
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
a02bba6a8cf3379d61f5cc173ac43e5c57fa6f75
| 165
|
py
|
Python
|
livetv_api/admin.py
|
kamruljaman26/livetvapp
|
f8be82abf9c5de5666ca5c4d69535482d8d7f488
|
[
"MIT"
] | null | null | null |
livetv_api/admin.py
|
kamruljaman26/livetvapp
|
f8be82abf9c5de5666ca5c4d69535482d8d7f488
|
[
"MIT"
] | 3
|
2021-03-19T07:56:23.000Z
|
2021-06-10T19:39:35.000Z
|
livetv_api/admin.py
|
kamruljaman26/livetvapp
|
f8be82abf9c5de5666ca5c4d69535482d8d7f488
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from livetv_api import models
# Register your models here.
admin.site.register(models.TvLink)
admin.site.register(models.AdsService)
| 33
| 38
| 0.836364
| 24
| 165
| 5.708333
| 0.583333
| 0.131387
| 0.248175
| 0.335766
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.084848
| 165
| 5
| 38
| 33
| 0.907285
| 0.157576
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
a02bbdab98fbb46803adbc23f1297b76198821e5
| 46
|
pyde
|
Python
|
light/listing 68/listing68/listing68.pyde
|
Drozdnik/2019-fall-polytech-cs
|
02154dd152c454c25bdce93a0643267e8f65eee4
|
[
"MIT"
] | null | null | null |
light/listing 68/listing68/listing68.pyde
|
Drozdnik/2019-fall-polytech-cs
|
02154dd152c454c25bdce93a0643267e8f65eee4
|
[
"MIT"
] | null | null | null |
light/listing 68/listing68/listing68.pyde
|
Drozdnik/2019-fall-polytech-cs
|
02154dd152c454c25bdce93a0643267e8f65eee4
|
[
"MIT"
] | null | null | null |
a = [[3,5]]
a[0] = [7]
a[1] = [0]
a[2] = null
| 9.2
| 11
| 0.326087
| 12
| 46
| 1.25
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.205882
| 0.26087
| 46
| 4
| 12
| 11.5
| 0.235294
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
a05248cd22c143d6eb5b899f077f4034bc844ea9
| 29,194
|
py
|
Python
|
chixma_run_on_aml.py
|
JarvisUSTC/DARDet
|
debbf476e9750030db67f030a40cf8d4f03e46ee
|
[
"Apache-2.0"
] | null | null | null |
chixma_run_on_aml.py
|
JarvisUSTC/DARDet
|
debbf476e9750030db67f030a40cf8d4f03e46ee
|
[
"Apache-2.0"
] | null | null | null |
chixma_run_on_aml.py
|
JarvisUSTC/DARDet
|
debbf476e9750030db67f030a40cf8d4f03e46ee
|
[
"Apache-2.0"
] | null | null | null |
import argparse
import os
import json
import numpy as np
def parse_args():
parser = argparse.ArgumentParser(description="PyTorch Object Detection Training")
parser.add_argument(
"--expName",
dest="expName",
default="",
help="name of Exp",
type=str,
)
parser.add_argument(
"--expCode",
dest="expCode",
default="",
help="name of Exp",
type=str,
)
parser.add_argument(
"--expVersion",
dest="expVersion",
default="",
help="name of Exp",
type=str,
)
parser.add_argument(
"--node_nums",
help="num of used nodes",
type=int,
required=True
)
parser.add_argument(
"--gpus_per_node",
help="num of gpus per node is used for multi nodes or gpus used if use single node",
type=int,
required=True
)
args, unparsed = parser.parse_known_args()
extra_args = " ".join(unparsed)
return args, extra_args
def main():
args, extra_args = parse_args()
init_on_aml()
root_path = "/blob/workstation/mmdetection"
#<========================Change Here==========================================
output_dir = "{}/{}/{}/{}/output/".format(root_path, args.expName, args.expCode, args.expVersion)
tmp_outdir = "./output/"
#<========================Change Here==========================================
config_file = "./configs/{}/{}.py".format(args.expName, args.expCode) #DARDet/exp1.py
copy_data_from_blob(config_file)
from mmcv import Config
cfg = Config.fromfile(config_file)
if args.node_nums > 1:
assert args.node_nums == 1, "for now, only support single node"
else:
cmd = "local=$(pwd) \n export PYTHONPATH=${local}/mmdet \n"
cmd += "export MKL_THREADING_LAYER=GNU \n"
cmd += "mkdir -p {} \n".format(tmp_outdir)
cmd += "cp {}/* {} \n".format(output_dir, tmp_outdir)
cmd += "python -m torch.distributed.launch --nproc_per_node={} tools/train.py {} --launcher pytorch --work-dir {} --no-validate".format(args.gpus_per_node,config_file,tmp_outdir)
print(cmd)
os.system(cmd)
# As it is too slow when writing logs into Azure Blob in realtime, we first log it in the dorcker image and copy it into Azure Blob finally.
cmd = "mkdir -p {} \n".format(output_dir)
cmd += "cp {}/* {} \n".format(tmp_outdir, output_dir)
print(cmd)
os.system(cmd)
# # Make result file for MLT2017 RPN_ONLY test
# cmd = "sudo mkdir /origin_results \n"
# cmd += "sudo chmod -R 777 /origin_results \n"
# cmd += "local=$(pwd) \n export PYTHONPATH=${local}/detectron2 \n"
# cmd += "python /detectron/tools/demo_af_rpn.py --config-file {} --im_or_folder /detectron/datasets/icdar2015_mlt_test/JPEGImages/ --output /origin_results/ --confidence-threshold 0.5 --no_demo --num_loader 64 --opts MODEL.WEIGHTS {}/{}/{}/{}/rpn/model_final.pth MODEL.RPN_ONLY True DEBUG False \n".format(config_file, root_path, args.expName, args.expCode, args.expVersion)
# cmd += "cd /origin_results/txt \n"
# cmd += "sudo find ./ -mindepth 1 -maxdepth 1 -name '*.txt' | sudo zip /res_txt.zip -@ \n"
# #<========================Change Here===========================================
# cmd += "sudo cp /res_txt.zip {}/{}/{}/{}/rpn/res_txt_50.zip \n".format(root_path, args.expName, args.expCode, args.expVersion)
# # Change threshold
# cmd += "sudo mkdir /result \n"
# cmd += "sudo cp /blob/workstation/scripts/change_threshold.py /change_threshold.py \n"
# cmd += "cd /result \n"
# for th in range(500, 975, 25):
# th = th / 1000.0
# cmd += "python /change_threshold.py /origin_results/txt {} \n".format(th)
# cmd += "sudo zip -r /test_scratch_{:.3f}.zip ./ \n".format(th)
# cmd += "sudo cp /test_scratch_{:.3f}.zip {}/{}/{}/{}/rpn/test_scratch_{:.3f}.zip \n".format(th, root_path, args.expName, args.expCode, args.expVersion, th)
# print(cmd)
# os.system(cmd)
# # Make result file for MLT2017 FRCN test
# cmd = "sudo mkdir /origin_results \n"
# cmd += "sudo chmod -R 777 /origin_results \n"
# cmd += "local=$(pwd) \n export PYTHONPATH=${local}/detectron2 \n"
# cmd += "python /detectron/tools/demo_af_rpn.py --config-file {} --im_or_folder /detectron/datasets/icdar2015_mlt_test/JPEGImages/ --output /origin_results/ --confidence-threshold 0.5 --no_demo --num_loader 64 --opts MODEL.WEIGHTS {}/{}/{}/{}/frcn/model_final.pth MODEL.RPN_ONLY False DEBUG False \n".format(config_file, root_path, args.expName, args.expCode, args.expVersion)
# cmd += "cd /origin_results/txt \n"
# cmd += "sudo find ./ -mindepth 1 -maxdepth 1 -name '*.txt' | sudo zip /res_txt.zip -@ \n"
# #<========================Change Here============================================
# cmd += "sudo cp /res_txt.zip {}/{}/{}/{}/frcn/res_txt_50.zip \n".format(root_path, args.expName, args.expCode, args.expVersion)
# # Change threshold
# cmd += "sudo mkdir /result \n"
# cmd += "sudo cp /blob/workstation/scripts/change_threshold.py /change_threshold.py \n"
# cmd += "cd /result \n"
# for th in range(500, 975, 25):
# th = th / 1000.0
# cmd += "python /change_threshold.py /origin_results/txt {} \n".format(th)
# cmd += "sudo zip -r /test_scratch_{:.3f}.zip ./ \n".format(th)
# cmd += "sudo cp /test_scratch_{:.3f}.zip {}/{}/{}/{}/frcn/test_scratch_{:.3f}.zip \n".format(th, root_path, args.expName, args.expCode, args.expVersion, th)
# print(cmd)
# os.system(cmd)
# Make result file for MSRA_POD FRCN test
cmd = "sudo mkdir /origin_results \n"
cmd += "sudo chmod -R 777 /origin_results \n"
cmd += "local=$(pwd) \n export PYTHONPATH=${local}/mmdet \n"
cmd += "rm -r /mmdetection/tools/msra_pod_measurement_tool/test_dataset \n"
cmd += "mkdir -p /mmdetection/tools/msra_pod_measurement_tool/test_dataset \n"
cmd += "ln -s /mmdetection/datasets/POD_RevB_combined/raw_images_horizontal /mmdetection/tools/msra_pod_measurement_tool/test_dataset/images \n"
cmd += "ln -s /mmdetection/datasets/POD_RevB_combined/xml /mmdetection/tools/msra_pod_measurement_tool/test_dataset/xml \n"
cmd += "python /mmdetection/demo/inference_demo_pod.py --config {} --im_or_folder /mmdetection/datasets/POD_RevB_combined/raw_images_horizontal/ --output /origin_results/ --num_loader 64 --checkpoint {}/{}/{}/{}/output/latest.pth \n".format(config_file, root_path, args.expName, args.expCode, args.expVersion)
cmd += "cd /origin_results/txt \n"
cmd += "sudo find ./ -mindepth 1 -maxdepth 1 -name '*.txt' | sudo zip -q /res_txt.zip -@ \n"
#<========================Change Here============================================
cmd += "sudo cp /res_txt.zip {}/{}/{}/{}/output/res_txt_th0.5_horizontal.zip \n".format(root_path, args.expName, args.expCode, args.expVersion)
cmd += "cd /origin_results/image \n"
cmd += "sudo find ./ -mindepth 1 -maxdepth 1 -name '*.jpg' | sudo zip -q /res_ims.zip -@ \n"
#<========================Change Here============================================
cmd += "sudo cp /res_ims.zip {}/{}/{}/{}/output/res_ims_th0.5_horizontal.zip \n".format(root_path, args.expName, args.expCode, args.expVersion)
# cmd += "cd /detectron \n"
# cmd += "python /detectron/tools/demo_pod.py --config-file {} --im_or_folder /blob/data/kownlege_lake_testset/test_images/ --output /origin_results2/ --confidence-threshold 0.1 --num_loader 64 --opts MODEL.WEIGHTS {}/{}/{}/{}/frcn/model_final.pth MODEL.RPN_ONLY False DEBUG False \n".format(config_file, root_path, args.expName, args.expCode, args.expVersion)
#<========================Change Here============================================
# cmd += "cd /origin_results2/image \n"
# cmd += "sudo find ./ -mindepth 1 -maxdepth 1 -name '*.jpg' | sudo zip -q /res_ims_kl.zip -@ \n"
# cmd += "sudo cp /res_ims_kl.zip {}/{}/{}/{}/frcn/res_ims_kl_th0.5.zip \n".format(root_path, args.expName, args.expCode, args.expVersion)
print(cmd)
os.system(cmd)
# Make result file for Rotated_360_MSRA_POD FRCN test
cmd = "sudo mkdir /origin_results \n"
cmd += "sudo chmod -R 777 /origin_results \n"
cmd += "local=$(pwd) \n export PYTHONPATH=${local}/mmdet \n"
cmd += "rm -r /mmdetection/tools/msra_pod_measurement_tool/test_dataset \n"
cmd += "rm -rf /mmdetection/tools/msra_pod_measurement_tool/annots.pkl \n"
cmd += "mkdir -p /mmdetection/tools/msra_pod_measurement_tool/test_dataset \n"
cmd += "ln -s /mmdetection/datasets/rotated_360_POD_RevB_combined/raw_images_horizontal /mmdetection/tools/msra_pod_measurement_tool/test_dataset/images \n"
cmd += "ln -s /mmdetection/datasets/rotated_360_POD_RevB_combined/xml /mmdetection/tools/msra_pod_measurement_tool/test_dataset/xml \n"
cmd += "python /mmdetection/demo/inference_demo_pod.py --config {} --im_or_folder /mmdetection/datasets/rotated_360_POD_RevB_combined/raw_images_horizontal/ --output /origin_results/ --num_loader 64 --checkpoint {}/{}/{}/{}/output/latest.pth \n".format(config_file, root_path, args.expName, args.expCode, args.expVersion)
cmd += "cd /origin_results/txt \n"
cmd += "sudo find ./ -mindepth 1 -maxdepth 1 -name '*.txt' | sudo zip -q /res_txt.zip -@ \n"
#<========================Change Here============================================
cmd += "sudo cp /res_txt.zip {}/{}/{}/{}/output/res_txt_th0.5_360.zip \n".format(root_path, args.expName, args.expCode, args.expVersion)
cmd += "cd /origin_results/image \n"
cmd += "sudo find ./ -mindepth 1 -maxdepth 1 -name '*.jpg' | sudo zip -q /res_ims.zip -@ \n"
#<========================Change Here============================================
cmd += "sudo cp /res_ims.zip {}/{}/{}/{}/output/res_ims_th0.5_360.zip \n".format(root_path, args.expName, args.expCode, args.expVersion)
# cmd += "cd /detectron \n"
# cmd += "python /detectron/tools/demo_pod.py --config-file {} --im_or_folder /blob/data/kownlege_lake_testset/test_images/ --output /origin_results2/ --confidence-threshold 0.1 --num_loader 64 --opts MODEL.WEIGHTS {}/{}/{}/{}/frcn/model_final.pth MODEL.RPN_ONLY False DEBUG False \n".format(config_file, root_path, args.expName, args.expCode, args.expVersion)
# #<========================Change Here============================================
# cmd += "cd /origin_results2/image \n"
# cmd += "sudo find ./ -mindepth 1 -maxdepth 1 -name '*.jpg' | sudo zip -q /res_ims_kl.zip -@ \n"
# cmd += "sudo cp /res_ims_kl.zip {}/{}/{}/{}/frcn/res_ims_kl_th0.5.zip \n".format(root_path, args.expName, args.expCode, args.expVersion)
print(cmd)
os.system(cmd)
# Make result file for Rotated_45_MSRA_POD FRCN test
cmd = "sudo mkdir /origin_results \n"
cmd += "sudo chmod -R 777 /origin_results \n"
cmd += "local=$(pwd) \n export PYTHONPATH=${local}/mmdet \n"
cmd += "rm -r /mmdetection/tools/msra_pod_measurement_tool/test_dataset \n"
cmd += "rm -rf /mmdetection/tools/msra_pod_measurement_tool/annots.pkl \n"
cmd += "mkdir -p /mmdetection/tools/msra_pod_measurement_tool/test_dataset \n"
cmd += "ln -s /mmdetection/datasets/rotated_45_POD_RevB_combined/raw_images_horizontal /mmdetection/tools/msra_pod_measurement_tool/test_dataset/images \n"
cmd += "ln -s /mmdetection/datasets/rotated_45_POD_RevB_combined/xml /mmdetection/tools/msra_pod_measurement_tool/test_dataset/xml \n"
cmd += "python /mmdetection/demo/inference_demo_pod.py --config {} --im_or_folder /mmdetection/datasets/rotated_45_POD_RevB_combined/raw_images_horizontal/ --output /origin_results/ --num_loader 64 --checkpoint {}/{}/{}/{}/output/latest.pth \n".format(config_file, root_path, args.expName, args.expCode, args.expVersion)
cmd += "cd /origin_results/txt \n"
cmd += "sudo find ./ -mindepth 1 -maxdepth 1 -name '*.txt' | sudo zip -q /res_txt.zip -@ \n"
#<========================Change Here============================================
cmd += "sudo cp /res_txt.zip {}/{}/{}/{}/output/res_txt_th0.5_45.zip \n".format(root_path, args.expName, args.expCode, args.expVersion)
cmd += "cd /origin_results/image \n"
cmd += "sudo find ./ -mindepth 1 -maxdepth 1 -name '*.jpg' | sudo zip -q /res_ims.zip -@ \n"
#<========================Change Here============================================
cmd += "sudo cp /res_ims.zip {}/{}/{}/{}/output/res_ims_th0.5_45.zip \n".format(root_path, args.expName, args.expCode, args.expVersion)
# cmd += "cd /detectron \n"
# cmd += "python /detectron/tools/demo_pod.py --config-file {} --im_or_folder /blob/data/kownlege_lake_testset/test_images/ --output /origin_results2/ --confidence-threshold 0.1 --num_loader 64 --opts MODEL.WEIGHTS {}/{}/{}/{}/frcn/model_final.pth MODEL.RPN_ONLY False DEBUG False \n".format(config_file, root_path, args.expName, args.expCode, args.expVersion)
# #<========================Change Here============================================
# cmd += "cd /origin_results2/image \n"
# cmd += "sudo find ./ -mindepth 1 -maxdepth 1 -name '*.jpg' | sudo zip -q /res_ims_kl.zip -@ \n"
# cmd += "sudo cp /res_ims_kl.zip {}/{}/{}/{}/frcn/res_ims_kl_th0.5.zip \n".format(root_path, args.expName, args.expCode, args.expVersion)
print(cmd)
os.system(cmd)
# # Make result file for cTDaR2019_TRACKA FRCN test
# cmd = "sudo mkdir /origin_results \n"
# cmd += "sudo chmod -R 777 /origin_results \n"
# cmd += "local=$(pwd) \n export PYTHONPATH=${local}/detectron2 \n"
# cmd += "rm -r /detectron/tools/ctdar_measurement_tool/annotations/cTDaR2019/trackA \n"
# cmd += "ln -s /detectron/datasets/cTDaR2019_TRACKA/testing_data/raw_xml /detectron/tools/ctdar_measurement_tool/annotations/cTDaR2019/trackA \n"
# cmd += "python /detectron/tools/demo_table_detection.py --config-file {} --im_or_folder /detectron/datasets/cTDaR2019_TRACKA/testing_data/converted_jpg_ims/ --output /origin_results/ --confidence-threshold 0.5 --no_demo --do_eval --num_loader 64 --opts MODEL.WEIGHTS {}/{}/{}/{}/frcn/model_final.pth MODEL.RPN_ONLY False DEBUG False \n".format(config_file, root_path, args.expName, args.expCode, args.expVersion)
# cmd += "cd /origin_results/txt \n"
# cmd += "sudo find ./ -mindepth 1 -maxdepth 1 -name '*.txt' | sudo zip -q /res_txt.zip -@ \n"
# #<========================Change Here============================================
# cmd += "sudo cp /res_txt.zip {}/{}/{}/{}/frcn/res_txt_th0.5.zip \n".format(root_path, args.expName, args.expCode, args.expVersion)
# print(cmd)
# os.system(cmd)
# # Make result file for publaynet_val FRCN test
# cmd = "sudo mkdir /origin_results \n"
# cmd += "sudo chmod -R 777 /origin_results \n"
# cmd += "local=$(pwd) \n export PYTHONPATH=${local}/detectron2 \n"
# cmd += "rm -r /detectron/tools/ctdar_measurement_tool/annotations/publaynet_val/trackA \n"
# cmd += "ln -s /detectron/datasets/publaynet_val/val_gt_xml_for_table_only /detectron/tools/ctdar_measurement_tool/annotations/publaynet_val/trackA \n"
# cmd += "rm -r /detectron/tools/coco_eval_tool/publaynet_val.json \n"
# cmd += "ln -s /detectron/datasets/publaynet_val/val.json /detectron/tools/coco_eval_tool/publaynet_val.json \n"
# cmd += "python /detectron/tools/demo_table_detection.py --config-file {} --im_or_folder /detectron/datasets/publaynet_val/val/ --output /origin_results/ --confidence-threshold 0.5 --no_demo --do_eval --num_loader 64 --opts MODEL.WEIGHTS {}/{}/{}/{}/frcn/model_final.pth MODEL.RPN_ONLY False DEBUG False \n".format(config_file, root_path, args.expName, args.expCode, args.expVersion)
# cmd += "cd /origin_results/txt \n"
# cmd += "sudo find ./ -mindepth 1 -maxdepth 1 -name '*.txt' | sudo zip -q /res_txt.zip -@ \n"
# #<========================Change Here============================================
# cmd += "sudo cp /res_txt.zip {}/{}/{}/{}/frcn/res_txt_th0.5.zip \n".format(root_path, args.expName, args.expCode, args.expVersion)
# print(cmd)
# os.system(cmd)
# # Make result file for IIIT-AR-13K_val/test FRCN test
# cmd = "sudo mkdir /origin_results \n"
# cmd += "sudo chmod -R 777 /origin_results \n"
# cmd += "local=$(pwd) \n export PYTHONPATH=${local}/detectron2 \n"
# cmd += "rm -r /detectron/tools/IIIT-AR-13K_evaluation_tool/input/val/ground-truth \n"
# cmd += "ln -s /blob/data/IIIT-AR-13K_val/gt_txt_for_eval_tool /detectron/tools/IIIT-AR-13K_evaluation_tool/input/val/ground-truth \n"
# cmd += "rm -r /detectron/tools/IIIT-AR-13K_evaluation_tool/input/test/ground-truth \n"
# cmd += "ln -s /blob/data/IIIT-AR-13K_test/gt_txt_for_eval_tool /detectron/tools/IIIT-AR-13K_evaluation_tool/input/test/ground-truth \n"
# cmd += "python /detectron/tools/demo_table_detection.py --config-file {} --im_or_folder /detectron/datasets/IIIT-AR-13K_val/validation_images --output /origin_results/ --confidence-threshold 0.5 --do_eval --num_loader 64 --opts MODEL.WEIGHTS {}/{}/{}/{}/frcn/model_final.pth MODEL.RPN_ONLY False DEBUG False \n".format(config_file, root_path, args.expName, args.expCode, args.expVersion)
# cmd += "cd /origin_results/txt \n"
# cmd += "sudo find ./ -mindepth 1 -maxdepth 1 -name '*.txt' | sudo zip -q /res_txt.zip -@ \n"
# #<========================Change Here============================================
# cmd += "sudo cp /res_txt.zip {}/{}/{}/{}/frcn/val_res_txt_th0.5.zip \n".format(root_path, args.expName, args.expCode, args.expVersion)
# cmd += "cd /origin_results/image \n"
# cmd += "sudo find ./ -mindepth 1 -maxdepth 1 -name '*.jpg' | sudo zip -q /res_ims.zip -@ \n"
# #<========================Change Here============================================
# cmd += "sudo cp /res_ims.zip {}/{}/{}/{}/frcn/val_res_ims.zip \n".format(root_path, args.expName, args.expCode, args.expVersion)
# cmd += "cd /detectron \n"
# cmd += "sudo mkdir /origin_results2 \n"
# cmd += "sudo chmod -R 777 /origin_results2 \n"
# cmd += "python /detectron/tools/demo_table_detection.py --config-file {} --im_or_folder /detectron/datasets/IIIT-AR-13K_test/test_images --output /origin_results2/ --confidence-threshold 0.5 --do_eval --num_loader 64 --opts MODEL.WEIGHTS {}/{}/{}/{}/frcn/model_final.pth MODEL.RPN_ONLY False DEBUG False \n".format(config_file, root_path, args.expName, args.expCode, args.expVersion)
# cmd += "cd /origin_results2/txt \n"
# cmd += "sudo find ./ -mindepth 1 -maxdepth 1 -name '*.txt' | sudo zip -q /res_txt2.zip -@ \n"
# #<========================Change Here============================================
# cmd += "sudo cp /res_txt2.zip {}/{}/{}/{}/frcn/test_res_txt_th0.5.zip \n".format(root_path, args.expName, args.expCode, args.expVersion)
# cmd += "cd /origin_results2/image \n"
# cmd += "sudo find ./ -mindepth 1 -maxdepth 1 -name '*.jpg' | sudo zip -q /res_ims2.zip -@ \n"
# #<========================Change Here============================================
# cmd += "sudo cp /res_ims2.zip {}/{}/{}/{}/frcn/test_res_ims.zip \n".format(root_path, args.expName, args.expCode, args.expVersion)
# print(cmd)
# os.system(cmd)
# # Make result file for TableBank val/test FRCN test
# cmd = "sudo mkdir /origin_results \n"
# cmd += "sudo chmod -R 777 /origin_results \n"
# cmd += "local=$(pwd) \n export PYTHONPATH=${local}/detectron2 \n"
# cmd += "rm -r /detectron/tools/coco_eval_tool/tablebank_word_val.json \n"
# cmd += "rm -r /detectron/tools/coco_eval_tool/tablebank_word_test.json \n"
# cmd += "rm -r /detectron/tools/coco_eval_tool/tablebank_latex_val.json \n"
# cmd += "rm -r /detectron/tools/coco_eval_tool/tablebank_latex_test.json \n"
# cmd += "rm -r /detectron/tools/coco_eval_tool/tablebank_word_latex_val.json \n"
# cmd += "rm -r /detectron/tools/coco_eval_tool/tablebank_word_latex_test.json \n"
# cmd += "ln -s /blob/data/TableBank/annotations/tablebank_word_val.json /detectron/tools/coco_eval_tool/tablebank_word_val.json \n"
# cmd += "ln -s /blob/data/TableBank/annotations/tablebank_word_test.json /detectron/tools/coco_eval_tool/tablebank_word_test.json \n"
# cmd += "ln -s /blob/data/TableBank/annotations/tablebank_latex_val.json /detectron/tools/coco_eval_tool/tablebank_latex_val.json \n"
# cmd += "ln -s /blob/data/TableBank/annotations/tablebank_latex_test.json /detectron/tools/coco_eval_tool/tablebank_latex_test.json \n"
# cmd += "ln -s /blob/data/TableBank/annotations/tablebank_word_latex_val.json /detectron/tools/coco_eval_tool/tablebank_word_latex_val.json \n"
# cmd += "ln -s /blob/data/TableBank/annotations/tablebank_word_latex_test.json /detectron/tools/coco_eval_tool/tablebank_word_latex_test.json \n"
# cmd += "python /detectron/tools/demo_table_detection.py --config-file {} --im_or_folder /detectron/datasets/TableBank/images/ --name_list /blob/data/TableBank/namelists/tablebank_word_val.txt --output /origin_results/tablebank_word_val/ --confidence-threshold 0.5 --no_demo --num_loader 64 --opts MODEL.WEIGHTS {}/{}/{}/{}/frcn/model_final.pth MODEL.RPN_ONLY False DEBUG False \n".format(config_file, root_path, args.expName, args.expCode, args.expVersion)
# cmd += "python /detectron/tools/demo_table_detection.py --config-file {} --im_or_folder /detectron/datasets/TableBank/images/ --name_list /blob/data/TableBank/namelists/tablebank_word_test.txt --output /origin_results/tablebank_word_test/ --confidence-threshold 0.5 --no_demo --num_loader 64 --opts MODEL.WEIGHTS {}/{}/{}/{}/frcn/model_final.pth MODEL.RPN_ONLY False DEBUG False \n".format(config_file, root_path, args.expName, args.expCode, args.expVersion)
# cmd += "python /detectron/tools/demo_table_detection.py --config-file {} --im_or_folder /detectron/datasets/TableBank/images/ --name_list /blob/data/TableBank/namelists/tablebank_latex_val.txt --output /origin_results/tablebank_latex_val/ --confidence-threshold 0.5 --no_demo --num_loader 64 --opts MODEL.WEIGHTS {}/{}/{}/{}/frcn/model_final.pth MODEL.RPN_ONLY False DEBUG False \n".format(config_file, root_path, args.expName, args.expCode, args.expVersion)
# cmd += "python /detectron/tools/demo_table_detection.py --config-file {} --im_or_folder /detectron/datasets/TableBank/images/ --name_list /blob/data/TableBank/namelists/tablebank_latex_test.txt --output /origin_results/tablebank_latex_test/ --confidence-threshold 0.5 --no_demo --num_loader 64 --opts MODEL.WEIGHTS {}/{}/{}/{}/frcn/model_final.pth MODEL.RPN_ONLY False DEBUG False \n".format(config_file, root_path, args.expName, args.expCode, args.expVersion)
# cmd += "cd /origin_results/ \n"
# cmd += "sudo zip -q -r /results.zip ./ \n"
# cmd += "sudo cp /results.zip {}/{}/{}/{}/frcn/results.zip \n".format(root_path, args.expName, args.expCode, args.expVersion)
# cmd += "cd /detectron/tools/coco_eval_tool/ \n"
# cmd += "python ./evaluate_tablebank.py -tablebank_word_val /origin_results/tablebank_word_val/txt/ \n"
# cmd += "echo tablebank_word_val \n"
# cmd += "python ./evaluate_tablebank.py -tablebank_latex_val /origin_results/tablebank_latex_val/txt/ \n"
# cmd += "echo tablebank_latex_val \n"
# cmd += "python ./evaluate_tablebank.py -tablebank_word_latex_val /origin_results/tablebank_word_val/txt/ /origin_results/tablebank_latex_val/txt/ \n"
# cmd += "echo tablebank_word_latex_val \n"
# cmd += "python ./evaluate_tablebank.py -tablebank_word_test /origin_results/tablebank_word_test/txt/ \n"
# cmd += "echo tablebank_word_test \n"
# cmd += "python ./evaluate_tablebank.py -tablebank_latex_test /origin_results/tablebank_latex_test/txt/ \n"
# cmd += "echo tablebank_latex_test \n"
# cmd += "python ./evaluate_tablebank.py -tablebank_word_latex_test /origin_results/tablebank_word_test/txt/ /origin_results/tablebank_latex_test/txt/ \n"
# cmd += "echo tablebank_word_latex_test \n"
# print(cmd)
# os.system(cmd)
# # Make result file for MSRA_TSR test
# cmd = "sudo mkdir /origin_results \n"
# cmd += "sudo chmod -R 777 /origin_results \n"
# cmd += "local=$(pwd) \n export PYTHONPATH=${local}/detectron2 \n"
# cmd += "rm -r /detectron/tools/ctdar_measurement_tool/annotations/MSRA_TSR/trackB1 \n"
# cmd += "rm -r /detectron/tools/ctdar_measurement_tool/annotations/MSRA_TSR/cropped_textbox_xml_for_eval_tool \n"
# cmd += "ln -s /blob/data/MSRA_TSR_test/cropped_gt_xml_for_eval_tool /detectron/tools/ctdar_measurement_tool/annotations/MSRA_TSR/trackB1 \n"
# cmd += "ln -s /blob/data/MSRA_TSR_test/cropped_textbox_xml_for_eval_tool /detectron/tools/ctdar_measurement_tool/annotations/MSRA_TSR/cropped_textbox_xml_for_eval_tool \n"
# cmd += "python /detectron/tools/demo_TSR.py --config-file {} --im_or_folder /blob/data/MSRA_TSR_test/cropped_table_image/ --output /origin_results/ --no_demo --do_eval --num_loader 64 --opts MODEL.DEVICE cuda MODEL.WEIGHTS {}/{}/{}/{}/rpn/model_final.pth MODEL.MERGE_HEAD_ON False DEBUG False MODEL.SPLIT_HEAD.USE_CURVE_FITTING.ENABLED False \n".format(config_file, root_path, args.expName, args.expCode, args.expVersion)
# cmd += "python /detectron/tools/demo_TSR.py --config-file {} --im_or_folder /blob/data/MSRA_TSR_test/cropped_table_image/ --output /origin_results/ --do_eval --num_loader 64 --opts MODEL.DEVICE cuda MODEL.WEIGHTS {}/{}/{}/{}/frcn/model_final.pth MODEL.MERGE_HEAD_ON True DEBUG False MODEL.SPLIT_HEAD.USE_CURVE_FITTING.ENABLED False \n".format(config_file, root_path, args.expName, args.expCode, args.expVersion)
# cmd += "cd /origin_results/txt \n"
# cmd += "sudo find ./ -mindepth 1 -maxdepth 1 -name '*.xml' | sudo zip -q /res_xml.zip -@ \n"
# #<========================Change Here============================================
# cmd += "sudo cp /res_xml.zip {}/{}/{}/{}/frcn/res_xml.zip \n".format(root_path, args.expName, args.expCode, args.expVersion)
# cmd += "cd /origin_results/image \n"
# cmd += "sudo find ./ -mindepth 1 -maxdepth 1 -name '*.jpg' | sudo zip -q /res_ims.zip -@ \n"
# #<========================Change Here============================================
# cmd += "sudo cp /res_ims.zip {}/{}/{}/{}/frcn/res_ims.zip \n".format(root_path, args.expName, args.expCode, args.expVersion)
# print(cmd)
# os.system(cmd)
# # Make result file for cTDaR2019_TSR test
# cmd = "sudo mkdir /origin_results \n"
# cmd += "sudo chmod -R 777 /origin_results \n"
# cmd += "local=$(pwd) \n export PYTHONPATH=${local}/detectron2 \n"
# cmd += "rm -r /detectron/tools/ctdar_measurement_tool/annotations/cTDaR2019/trackB1 \n"
# cmd += "rm -r /detectron/tools/ctdar_measurement_tool/annotations/cTDaR2019/cropped_textbox_xml_for_eval_tool \n"
# cmd += "ln -s /detectron/datasets/cTDaR2019_TSR_test/cropped_gt_xml_for_eval_tool /detectron/tools/ctdar_measurement_tool/annotations/cTDaR2019/trackB1 \n"
# cmd += "ln -s /detectron/datasets/cTDaR2019_TSR_test/cropped_textbox_xml_for_eval_tool /detectron/tools/ctdar_measurement_tool/annotations/cTDaR2019/cropped_textbox_xml_for_eval_tool \n"
# cmd += "python /detectron/tools/demo_TSR.py --config-file {} --im_or_folder /detectron/datasets/cTDaR2019_TSR_test/cropped_table_image/ --output /origin_results/ --no_demo --do_eval --num_loader 64 --opts MODEL.DEVICE cuda MODEL.WEIGHTS {}/{}/{}/{}/rpn/model_final.pth MODEL.MERGE_HEAD_ON False DEBUG False MODEL.SPLIT_HEAD.USE_CURVE_FITTING.ENABLED False \n".format(config_file, root_path, args.expName, args.expCode, args.expVersion)
# cmd += "python /detectron/tools/demo_TSR.py --config-file {} --im_or_folder /detectron/datasets/cTDaR2019_TSR_test/cropped_table_image/ --output /origin_results/ --do_eval --num_loader 64 --opts MODEL.DEVICE cuda MODEL.WEIGHTS {}/{}/{}/{}/frcn/model_final.pth MODEL.MERGE_HEAD_ON True DEBUG False MODEL.SPLIT_HEAD.USE_CURVE_FITTING.ENABLED False \n".format(config_file, root_path, args.expName, args.expCode, args.expVersion)
# cmd += "cd /origin_results/txt \n"
# cmd += "sudo find ./ -mindepth 1 -maxdepth 1 -name '*.xml' | sudo zip -q /res_xml.zip -@ \n"
# #<========================Change Here============================================
# cmd += "sudo cp /res_xml.zip {}/{}/{}/{}/frcn/res_xml.zip \n".format(root_path, args.expName, args.expCode, args.expVersion)
# cmd += "cd /origin_results/image \n"
# cmd += "sudo find ./ -mindepth 1 -maxdepth 1 -name '*.jpg' | sudo zip -q /res_ims.zip -@ \n"
# #<========================Change Here============================================
# cmd += "sudo cp /res_ims.zip {}/{}/{}/{}/frcn/res_ims.zip \n".format(root_path, args.expName, args.expCode, args.expVersion)
# print(cmd)
# os.system(cmd)
def init_on_aml():
cmd = "bash chixma_init_on_aml.sh"
print(cmd)
os.system(cmd)
def copy_data_from_blob(config_file):
cmd = "local=$(pwd) \n export PYTHONPATH=${local}/detectron2 \n"
cmd += "echo $PYTHONPATH \n"
cmd += "python copy_datasets_from_blob.py --config-file {}".format(config_file)
print(cmd)
os.system(cmd)
if __name__ == '__main__':
main()
| 82.005618
| 466
| 0.65397
| 4,053
| 29,194
| 4.494202
| 0.064397
| 0.028328
| 0.037881
| 0.055559
| 0.921493
| 0.914356
| 0.899039
| 0.882844
| 0.870711
| 0.858139
| 0
| 0.014221
| 0.140097
| 29,194
| 355
| 467
| 82.23662
| 0.711361
| 0.705933
| 0
| 0.476563
| 0
| 0.125
| 0.501137
| 0.259308
| 0
| 0
| 0
| 0
| 0.007813
| 1
| 0.03125
| false
| 0
| 0.039063
| 0
| 0.078125
| 0.054688
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
a080f7ef8ee49efebff796bed6d7fdd5b14599eb
| 81,377
|
py
|
Python
|
cottonformation/res/gamelift.py
|
MacHu-GWU/cottonformation-project
|
23e28c08cfb5a7cc0db6dbfdb1d7e1585c773f3b
|
[
"BSD-2-Clause"
] | 5
|
2021-07-22T03:45:59.000Z
|
2021-12-17T21:07:14.000Z
|
cottonformation/res/gamelift.py
|
MacHu-GWU/cottonformation-project
|
23e28c08cfb5a7cc0db6dbfdb1d7e1585c773f3b
|
[
"BSD-2-Clause"
] | 1
|
2021-06-25T18:01:31.000Z
|
2021-06-25T18:01:31.000Z
|
cottonformation/res/gamelift.py
|
MacHu-GWU/cottonformation-project
|
23e28c08cfb5a7cc0db6dbfdb1d7e1585c773f3b
|
[
"BSD-2-Clause"
] | 2
|
2021-06-27T03:08:21.000Z
|
2021-06-28T22:15:51.000Z
|
# -*- coding: utf-8 -*-
"""
This module
"""
import attr
import typing
from ..core.model import (
Property, Resource, Tag, GetAtt, TypeHint, TypeCheck,
)
from ..core.constant import AttrMeta
#--- Property declaration ---
@attr.s
class PropGameServerGroupTargetTrackingConfiguration(Property):
"""
AWS Object Type = "AWS::GameLift::GameServerGroup.TargetTrackingConfiguration"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-gameservergroup-targettrackingconfiguration.html
Property Document:
- ``rp_TargetValue``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-gameservergroup-targettrackingconfiguration.html#cfn-gamelift-gameservergroup-targettrackingconfiguration-targetvalue
"""
AWS_OBJECT_TYPE = "AWS::GameLift::GameServerGroup.TargetTrackingConfiguration"
rp_TargetValue: float = attr.ib(
default=None,
validator=attr.validators.instance_of(float),
metadata={AttrMeta.PROPERTY_NAME: "TargetValue"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-gameservergroup-targettrackingconfiguration.html#cfn-gamelift-gameservergroup-targettrackingconfiguration-targetvalue"""
@attr.s
class PropFleetLocationCapacity(Property):
"""
AWS Object Type = "AWS::GameLift::Fleet.LocationCapacity"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-fleet-locationcapacity.html
Property Document:
- ``rp_DesiredEC2Instances``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-fleet-locationcapacity.html#cfn-gamelift-fleet-locationcapacity-desiredec2instances
- ``rp_MaxSize``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-fleet-locationcapacity.html#cfn-gamelift-fleet-locationcapacity-maxsize
- ``rp_MinSize``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-fleet-locationcapacity.html#cfn-gamelift-fleet-locationcapacity-minsize
"""
AWS_OBJECT_TYPE = "AWS::GameLift::Fleet.LocationCapacity"
rp_DesiredEC2Instances: int = attr.ib(
default=None,
validator=attr.validators.instance_of(int),
metadata={AttrMeta.PROPERTY_NAME: "DesiredEC2Instances"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-fleet-locationcapacity.html#cfn-gamelift-fleet-locationcapacity-desiredec2instances"""
rp_MaxSize: int = attr.ib(
default=None,
validator=attr.validators.instance_of(int),
metadata={AttrMeta.PROPERTY_NAME: "MaxSize"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-fleet-locationcapacity.html#cfn-gamelift-fleet-locationcapacity-maxsize"""
rp_MinSize: int = attr.ib(
default=None,
validator=attr.validators.instance_of(int),
metadata={AttrMeta.PROPERTY_NAME: "MinSize"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-fleet-locationcapacity.html#cfn-gamelift-fleet-locationcapacity-minsize"""
@attr.s
class PropBuildS3Location(Property):
"""
AWS Object Type = "AWS::GameLift::Build.S3Location"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-build-storagelocation.html
Property Document:
- ``rp_Bucket``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-build-storagelocation.html#cfn-gamelift-build-storage-bucket
- ``rp_Key``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-build-storagelocation.html#cfn-gamelift-build-storage-key
- ``rp_RoleArn``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-build-storagelocation.html#cfn-gamelift-build-storage-rolearn
- ``p_ObjectVersion``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-build-storagelocation.html#cfn-gamelift-build-object-verison
"""
AWS_OBJECT_TYPE = "AWS::GameLift::Build.S3Location"
rp_Bucket: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "Bucket"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-build-storagelocation.html#cfn-gamelift-build-storage-bucket"""
rp_Key: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "Key"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-build-storagelocation.html#cfn-gamelift-build-storage-key"""
rp_RoleArn: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "RoleArn"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-build-storagelocation.html#cfn-gamelift-build-storage-rolearn"""
p_ObjectVersion: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "ObjectVersion"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-build-storagelocation.html#cfn-gamelift-build-object-verison"""
@attr.s
class PropAliasRoutingStrategy(Property):
"""
AWS Object Type = "AWS::GameLift::Alias.RoutingStrategy"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-alias-routingstrategy.html
Property Document:
- ``rp_Type``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-alias-routingstrategy.html#cfn-gamelift-alias-routingstrategy-type
- ``p_FleetId``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-alias-routingstrategy.html#cfn-gamelift-alias-routingstrategy-fleetid
- ``p_Message``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-alias-routingstrategy.html#cfn-gamelift-alias-routingstrategy-message
"""
AWS_OBJECT_TYPE = "AWS::GameLift::Alias.RoutingStrategy"
rp_Type: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "Type"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-alias-routingstrategy.html#cfn-gamelift-alias-routingstrategy-type"""
p_FleetId: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "FleetId"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-alias-routingstrategy.html#cfn-gamelift-alias-routingstrategy-fleetid"""
p_Message: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "Message"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-alias-routingstrategy.html#cfn-gamelift-alias-routingstrategy-message"""
@attr.s
class PropGameServerGroupLaunchTemplate(Property):
"""
AWS Object Type = "AWS::GameLift::GameServerGroup.LaunchTemplate"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-gameservergroup-launchtemplate.html
Property Document:
- ``p_LaunchTemplateId``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-gameservergroup-launchtemplate.html#cfn-gamelift-gameservergroup-launchtemplate-launchtemplateid
- ``p_LaunchTemplateName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-gameservergroup-launchtemplate.html#cfn-gamelift-gameservergroup-launchtemplate-launchtemplatename
- ``p_Version``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-gameservergroup-launchtemplate.html#cfn-gamelift-gameservergroup-launchtemplate-version
"""
AWS_OBJECT_TYPE = "AWS::GameLift::GameServerGroup.LaunchTemplate"
p_LaunchTemplateId: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "LaunchTemplateId"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-gameservergroup-launchtemplate.html#cfn-gamelift-gameservergroup-launchtemplate-launchtemplateid"""
p_LaunchTemplateName: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "LaunchTemplateName"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-gameservergroup-launchtemplate.html#cfn-gamelift-gameservergroup-launchtemplate-launchtemplatename"""
p_Version: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "Version"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-gameservergroup-launchtemplate.html#cfn-gamelift-gameservergroup-launchtemplate-version"""
@attr.s
class PropFleetCertificateConfiguration(Property):
"""
AWS Object Type = "AWS::GameLift::Fleet.CertificateConfiguration"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-fleet-certificateconfiguration.html
Property Document:
- ``rp_CertificateType``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-fleet-certificateconfiguration.html#cfn-gamelift-fleet-certificateconfiguration-certificatetype
"""
AWS_OBJECT_TYPE = "AWS::GameLift::Fleet.CertificateConfiguration"
rp_CertificateType: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "CertificateType"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-fleet-certificateconfiguration.html#cfn-gamelift-fleet-certificateconfiguration-certificatetype"""
@attr.s
class PropScriptS3Location(Property):
"""
AWS Object Type = "AWS::GameLift::Script.S3Location"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-script-s3location.html
Property Document:
- ``rp_Bucket``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-script-s3location.html#cfn-gamelift-script-s3location-bucket
- ``rp_Key``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-script-s3location.html#cfn-gamelift-script-s3location-key
- ``rp_RoleArn``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-script-s3location.html#cfn-gamelift-script-s3location-rolearn
- ``p_ObjectVersion``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-script-s3location.html#cfn-gamelift-script-s3location-objectversion
"""
AWS_OBJECT_TYPE = "AWS::GameLift::Script.S3Location"
rp_Bucket: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "Bucket"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-script-s3location.html#cfn-gamelift-script-s3location-bucket"""
rp_Key: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "Key"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-script-s3location.html#cfn-gamelift-script-s3location-key"""
rp_RoleArn: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "RoleArn"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-script-s3location.html#cfn-gamelift-script-s3location-rolearn"""
p_ObjectVersion: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "ObjectVersion"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-script-s3location.html#cfn-gamelift-script-s3location-objectversion"""
@attr.s
class PropGameServerGroupAutoScalingPolicy(Property):
"""
AWS Object Type = "AWS::GameLift::GameServerGroup.AutoScalingPolicy"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-gameservergroup-autoscalingpolicy.html
Property Document:
- ``rp_TargetTrackingConfiguration``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-gameservergroup-autoscalingpolicy.html#cfn-gamelift-gameservergroup-autoscalingpolicy-targettrackingconfiguration
- ``p_EstimatedInstanceWarmup``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-gameservergroup-autoscalingpolicy.html#cfn-gamelift-gameservergroup-autoscalingpolicy-estimatedinstancewarmup
"""
AWS_OBJECT_TYPE = "AWS::GameLift::GameServerGroup.AutoScalingPolicy"
rp_TargetTrackingConfiguration: typing.Union['PropGameServerGroupTargetTrackingConfiguration', dict] = attr.ib(
default=None,
converter=PropGameServerGroupTargetTrackingConfiguration.from_dict,
validator=attr.validators.instance_of(PropGameServerGroupTargetTrackingConfiguration),
metadata={AttrMeta.PROPERTY_NAME: "TargetTrackingConfiguration"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-gameservergroup-autoscalingpolicy.html#cfn-gamelift-gameservergroup-autoscalingpolicy-targettrackingconfiguration"""
p_EstimatedInstanceWarmup: float = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(float)),
metadata={AttrMeta.PROPERTY_NAME: "EstimatedInstanceWarmup"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-gameservergroup-autoscalingpolicy.html#cfn-gamelift-gameservergroup-autoscalingpolicy-estimatedinstancewarmup"""
@attr.s
class PropGameSessionQueuePlayerLatencyPolicy(Property):
"""
AWS Object Type = "AWS::GameLift::GameSessionQueue.PlayerLatencyPolicy"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-gamesessionqueue-playerlatencypolicy.html
Property Document:
- ``p_MaximumIndividualPlayerLatencyMilliseconds``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-gamesessionqueue-playerlatencypolicy.html#cfn-gamelift-gamesessionqueue-playerlatencypolicy-maximumindividualplayerlatencymilliseconds
- ``p_PolicyDurationSeconds``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-gamesessionqueue-playerlatencypolicy.html#cfn-gamelift-gamesessionqueue-playerlatencypolicy-policydurationseconds
"""
AWS_OBJECT_TYPE = "AWS::GameLift::GameSessionQueue.PlayerLatencyPolicy"
p_MaximumIndividualPlayerLatencyMilliseconds: int = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(int)),
metadata={AttrMeta.PROPERTY_NAME: "MaximumIndividualPlayerLatencyMilliseconds"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-gamesessionqueue-playerlatencypolicy.html#cfn-gamelift-gamesessionqueue-playerlatencypolicy-maximumindividualplayerlatencymilliseconds"""
p_PolicyDurationSeconds: int = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(int)),
metadata={AttrMeta.PROPERTY_NAME: "PolicyDurationSeconds"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-gamesessionqueue-playerlatencypolicy.html#cfn-gamelift-gamesessionqueue-playerlatencypolicy-policydurationseconds"""
@attr.s
class PropGameSessionQueueDestination(Property):
"""
AWS Object Type = "AWS::GameLift::GameSessionQueue.Destination"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-gamesessionqueue-destination.html
Property Document:
- ``p_DestinationArn``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-gamesessionqueue-destination.html#cfn-gamelift-gamesessionqueue-destination-destinationarn
"""
AWS_OBJECT_TYPE = "AWS::GameLift::GameSessionQueue.Destination"
p_DestinationArn: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "DestinationArn"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-gamesessionqueue-destination.html#cfn-gamelift-gamesessionqueue-destination-destinationarn"""
@attr.s
class PropFleetLocationConfiguration(Property):
"""
AWS Object Type = "AWS::GameLift::Fleet.LocationConfiguration"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-fleet-locationconfiguration.html
Property Document:
- ``rp_Location``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-fleet-locationconfiguration.html#cfn-gamelift-fleet-locationconfiguration-location
- ``p_LocationCapacity``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-fleet-locationconfiguration.html#cfn-gamelift-fleet-locationconfiguration-locationcapacity
"""
AWS_OBJECT_TYPE = "AWS::GameLift::Fleet.LocationConfiguration"
rp_Location: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "Location"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-fleet-locationconfiguration.html#cfn-gamelift-fleet-locationconfiguration-location"""
p_LocationCapacity: typing.Union['PropFleetLocationCapacity', dict] = attr.ib(
default=None,
converter=PropFleetLocationCapacity.from_dict,
validator=attr.validators.optional(attr.validators.instance_of(PropFleetLocationCapacity)),
metadata={AttrMeta.PROPERTY_NAME: "LocationCapacity"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-fleet-locationconfiguration.html#cfn-gamelift-fleet-locationconfiguration-locationcapacity"""
@attr.s
class PropFleetIpPermission(Property):
"""
AWS Object Type = "AWS::GameLift::Fleet.IpPermission"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-fleet-ippermission.html
Property Document:
- ``rp_FromPort``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-fleet-ippermission.html#cfn-gamelift-fleet-ippermission-fromport
- ``rp_IpRange``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-fleet-ippermission.html#cfn-gamelift-fleet-ippermission-iprange
- ``rp_Protocol``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-fleet-ippermission.html#cfn-gamelift-fleet-ippermission-protocol
- ``rp_ToPort``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-fleet-ippermission.html#cfn-gamelift-fleet-ippermission-toport
"""
AWS_OBJECT_TYPE = "AWS::GameLift::Fleet.IpPermission"
rp_FromPort: int = attr.ib(
default=None,
validator=attr.validators.instance_of(int),
metadata={AttrMeta.PROPERTY_NAME: "FromPort"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-fleet-ippermission.html#cfn-gamelift-fleet-ippermission-fromport"""
rp_IpRange: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "IpRange"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-fleet-ippermission.html#cfn-gamelift-fleet-ippermission-iprange"""
rp_Protocol: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "Protocol"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-fleet-ippermission.html#cfn-gamelift-fleet-ippermission-protocol"""
rp_ToPort: int = attr.ib(
default=None,
validator=attr.validators.instance_of(int),
metadata={AttrMeta.PROPERTY_NAME: "ToPort"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-fleet-ippermission.html#cfn-gamelift-fleet-ippermission-toport"""
@attr.s
class PropGameSessionQueueFilterConfiguration(Property):
"""
AWS Object Type = "AWS::GameLift::GameSessionQueue.FilterConfiguration"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-gamesessionqueue-filterconfiguration.html
Property Document:
- ``p_AllowedLocations``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-gamesessionqueue-filterconfiguration.html#cfn-gamelift-gamesessionqueue-filterconfiguration-allowedlocations
"""
AWS_OBJECT_TYPE = "AWS::GameLift::GameSessionQueue.FilterConfiguration"
p_AllowedLocations: typing.List[TypeHint.intrinsic_str] = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.deep_iterable(member_validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), iterable_validator=attr.validators.instance_of(list))),
metadata={AttrMeta.PROPERTY_NAME: "AllowedLocations"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-gamesessionqueue-filterconfiguration.html#cfn-gamelift-gamesessionqueue-filterconfiguration-allowedlocations"""
@attr.s
class PropFleetServerProcess(Property):
"""
AWS Object Type = "AWS::GameLift::Fleet.ServerProcess"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-fleet-serverprocess.html
Property Document:
- ``rp_ConcurrentExecutions``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-fleet-serverprocess.html#cfn-gamelift-fleet-serverprocess-concurrentexecutions
- ``rp_LaunchPath``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-fleet-serverprocess.html#cfn-gamelift-fleet-serverprocess-launchpath
- ``p_Parameters``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-fleet-serverprocess.html#cfn-gamelift-fleet-serverprocess-parameters
"""
AWS_OBJECT_TYPE = "AWS::GameLift::Fleet.ServerProcess"
rp_ConcurrentExecutions: int = attr.ib(
default=None,
validator=attr.validators.instance_of(int),
metadata={AttrMeta.PROPERTY_NAME: "ConcurrentExecutions"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-fleet-serverprocess.html#cfn-gamelift-fleet-serverprocess-concurrentexecutions"""
rp_LaunchPath: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "LaunchPath"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-fleet-serverprocess.html#cfn-gamelift-fleet-serverprocess-launchpath"""
p_Parameters: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "Parameters"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-fleet-serverprocess.html#cfn-gamelift-fleet-serverprocess-parameters"""
@attr.s
class PropFleetResourceCreationLimitPolicy(Property):
"""
AWS Object Type = "AWS::GameLift::Fleet.ResourceCreationLimitPolicy"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-fleet-resourcecreationlimitpolicy.html
Property Document:
- ``p_NewGameSessionsPerCreator``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-fleet-resourcecreationlimitpolicy.html#cfn-gamelift-fleet-resourcecreationlimitpolicy-newgamesessionspercreator
- ``p_PolicyPeriodInMinutes``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-fleet-resourcecreationlimitpolicy.html#cfn-gamelift-fleet-resourcecreationlimitpolicy-policyperiodinminutes
"""
AWS_OBJECT_TYPE = "AWS::GameLift::Fleet.ResourceCreationLimitPolicy"
p_NewGameSessionsPerCreator: int = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(int)),
metadata={AttrMeta.PROPERTY_NAME: "NewGameSessionsPerCreator"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-fleet-resourcecreationlimitpolicy.html#cfn-gamelift-fleet-resourcecreationlimitpolicy-newgamesessionspercreator"""
p_PolicyPeriodInMinutes: int = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(int)),
metadata={AttrMeta.PROPERTY_NAME: "PolicyPeriodInMinutes"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-fleet-resourcecreationlimitpolicy.html#cfn-gamelift-fleet-resourcecreationlimitpolicy-policyperiodinminutes"""
@attr.s
class PropGameServerGroupInstanceDefinition(Property):
"""
AWS Object Type = "AWS::GameLift::GameServerGroup.InstanceDefinition"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-gameservergroup-instancedefinition.html
Property Document:
- ``rp_InstanceType``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-gameservergroup-instancedefinition.html#cfn-gamelift-gameservergroup-instancedefinition-instancetype
- ``p_WeightedCapacity``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-gameservergroup-instancedefinition.html#cfn-gamelift-gameservergroup-instancedefinition-weightedcapacity
"""
AWS_OBJECT_TYPE = "AWS::GameLift::GameServerGroup.InstanceDefinition"
rp_InstanceType: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "InstanceType"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-gameservergroup-instancedefinition.html#cfn-gamelift-gameservergroup-instancedefinition-instancetype"""
p_WeightedCapacity: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "WeightedCapacity"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-gameservergroup-instancedefinition.html#cfn-gamelift-gameservergroup-instancedefinition-weightedcapacity"""
@attr.s
class PropFleetRuntimeConfiguration(Property):
"""
AWS Object Type = "AWS::GameLift::Fleet.RuntimeConfiguration"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-fleet-runtimeconfiguration.html
Property Document:
- ``p_GameSessionActivationTimeoutSeconds``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-fleet-runtimeconfiguration.html#cfn-gamelift-fleet-runtimeconfiguration-gamesessionactivationtimeoutseconds
- ``p_MaxConcurrentGameSessionActivations``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-fleet-runtimeconfiguration.html#cfn-gamelift-fleet-runtimeconfiguration-maxconcurrentgamesessionactivations
- ``p_ServerProcesses``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-fleet-runtimeconfiguration.html#cfn-gamelift-fleet-runtimeconfiguration-serverprocesses
"""
AWS_OBJECT_TYPE = "AWS::GameLift::Fleet.RuntimeConfiguration"
p_GameSessionActivationTimeoutSeconds: int = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(int)),
metadata={AttrMeta.PROPERTY_NAME: "GameSessionActivationTimeoutSeconds"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-fleet-runtimeconfiguration.html#cfn-gamelift-fleet-runtimeconfiguration-gamesessionactivationtimeoutseconds"""
p_MaxConcurrentGameSessionActivations: int = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(int)),
metadata={AttrMeta.PROPERTY_NAME: "MaxConcurrentGameSessionActivations"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-fleet-runtimeconfiguration.html#cfn-gamelift-fleet-runtimeconfiguration-maxconcurrentgamesessionactivations"""
p_ServerProcesses: typing.List[typing.Union['PropFleetServerProcess', dict]] = attr.ib(
default=None,
converter=PropFleetServerProcess.from_list,
validator=attr.validators.optional(attr.validators.deep_iterable(member_validator=attr.validators.instance_of(PropFleetServerProcess), iterable_validator=attr.validators.instance_of(list))),
metadata={AttrMeta.PROPERTY_NAME: "ServerProcesses"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-fleet-runtimeconfiguration.html#cfn-gamelift-fleet-runtimeconfiguration-serverprocesses"""
@attr.s
class PropGameSessionQueuePriorityConfiguration(Property):
"""
AWS Object Type = "AWS::GameLift::GameSessionQueue.PriorityConfiguration"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-gamesessionqueue-priorityconfiguration.html
Property Document:
- ``p_LocationOrder``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-gamesessionqueue-priorityconfiguration.html#cfn-gamelift-gamesessionqueue-priorityconfiguration-locationorder
- ``p_PriorityOrder``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-gamesessionqueue-priorityconfiguration.html#cfn-gamelift-gamesessionqueue-priorityconfiguration-priorityorder
"""
AWS_OBJECT_TYPE = "AWS::GameLift::GameSessionQueue.PriorityConfiguration"
p_LocationOrder: typing.List[TypeHint.intrinsic_str] = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.deep_iterable(member_validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), iterable_validator=attr.validators.instance_of(list))),
metadata={AttrMeta.PROPERTY_NAME: "LocationOrder"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-gamesessionqueue-priorityconfiguration.html#cfn-gamelift-gamesessionqueue-priorityconfiguration-locationorder"""
p_PriorityOrder: typing.List[TypeHint.intrinsic_str] = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.deep_iterable(member_validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), iterable_validator=attr.validators.instance_of(list))),
metadata={AttrMeta.PROPERTY_NAME: "PriorityOrder"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-gamesessionqueue-priorityconfiguration.html#cfn-gamelift-gamesessionqueue-priorityconfiguration-priorityorder"""
@attr.s
class PropMatchmakingConfigurationGameProperty(Property):
"""
AWS Object Type = "AWS::GameLift::MatchmakingConfiguration.GameProperty"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-matchmakingconfiguration-gameproperty.html
Property Document:
- ``rp_Key``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-matchmakingconfiguration-gameproperty.html#cfn-gamelift-matchmakingconfiguration-gameproperty-key
- ``rp_Value``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-matchmakingconfiguration-gameproperty.html#cfn-gamelift-matchmakingconfiguration-gameproperty-value
"""
AWS_OBJECT_TYPE = "AWS::GameLift::MatchmakingConfiguration.GameProperty"
rp_Key: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "Key"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-matchmakingconfiguration-gameproperty.html#cfn-gamelift-matchmakingconfiguration-gameproperty-key"""
rp_Value: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "Value"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-matchmakingconfiguration-gameproperty.html#cfn-gamelift-matchmakingconfiguration-gameproperty-value"""
#--- Resource declaration ---
@attr.s
class Alias(Resource):
"""
AWS Object Type = "AWS::GameLift::Alias"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-gamelift-alias.html
Property Document:
- ``rp_Name``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-gamelift-alias.html#cfn-gamelift-alias-name
- ``rp_RoutingStrategy``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-gamelift-alias.html#cfn-gamelift-alias-routingstrategy
- ``p_Description``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-gamelift-alias.html#cfn-gamelift-alias-description
"""
AWS_OBJECT_TYPE = "AWS::GameLift::Alias"
rp_Name: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "Name"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-gamelift-alias.html#cfn-gamelift-alias-name"""
rp_RoutingStrategy: typing.Union['PropAliasRoutingStrategy', dict] = attr.ib(
default=None,
converter=PropAliasRoutingStrategy.from_dict,
validator=attr.validators.instance_of(PropAliasRoutingStrategy),
metadata={AttrMeta.PROPERTY_NAME: "RoutingStrategy"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-gamelift-alias.html#cfn-gamelift-alias-routingstrategy"""
p_Description: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "Description"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-gamelift-alias.html#cfn-gamelift-alias-description"""
@property
def rv_AliasId(self) -> GetAtt:
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-gamelift-alias.html#aws-resource-gamelift-alias-return-values"""
return GetAtt(resource=self, attr_name="AliasId")
@attr.s
class Build(Resource):
"""
AWS Object Type = "AWS::GameLift::Build"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-gamelift-build.html
Property Document:
- ``p_Name``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-gamelift-build.html#cfn-gamelift-build-name
- ``p_OperatingSystem``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-gamelift-build.html#cfn-gamelift-build-operatingsystem
- ``p_StorageLocation``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-gamelift-build.html#cfn-gamelift-build-storagelocation
- ``p_Version``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-gamelift-build.html#cfn-gamelift-build-version
"""
AWS_OBJECT_TYPE = "AWS::GameLift::Build"
p_Name: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "Name"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-gamelift-build.html#cfn-gamelift-build-name"""
p_OperatingSystem: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "OperatingSystem"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-gamelift-build.html#cfn-gamelift-build-operatingsystem"""
p_StorageLocation: typing.Union['PropBuildS3Location', dict] = attr.ib(
default=None,
converter=PropBuildS3Location.from_dict,
validator=attr.validators.optional(attr.validators.instance_of(PropBuildS3Location)),
metadata={AttrMeta.PROPERTY_NAME: "StorageLocation"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-gamelift-build.html#cfn-gamelift-build-storagelocation"""
p_Version: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "Version"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-gamelift-build.html#cfn-gamelift-build-version"""
@attr.s
class Script(Resource):
"""
AWS Object Type = "AWS::GameLift::Script"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-gamelift-script.html
Property Document:
- ``rp_StorageLocation``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-gamelift-script.html#cfn-gamelift-script-storagelocation
- ``p_Name``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-gamelift-script.html#cfn-gamelift-script-name
- ``p_Version``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-gamelift-script.html#cfn-gamelift-script-version
"""
AWS_OBJECT_TYPE = "AWS::GameLift::Script"
rp_StorageLocation: typing.Union['PropScriptS3Location', dict] = attr.ib(
default=None,
converter=PropScriptS3Location.from_dict,
validator=attr.validators.instance_of(PropScriptS3Location),
metadata={AttrMeta.PROPERTY_NAME: "StorageLocation"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-gamelift-script.html#cfn-gamelift-script-storagelocation"""
p_Name: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "Name"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-gamelift-script.html#cfn-gamelift-script-name"""
p_Version: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "Version"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-gamelift-script.html#cfn-gamelift-script-version"""
@property
def rv_Id(self) -> GetAtt:
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-gamelift-script.html#aws-resource-gamelift-script-return-values"""
return GetAtt(resource=self, attr_name="Id")
@property
def rv_Arn(self) -> GetAtt:
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-gamelift-script.html#aws-resource-gamelift-script-return-values"""
return GetAtt(resource=self, attr_name="Arn")
@attr.s
class GameServerGroup(Resource):
"""
AWS Object Type = "AWS::GameLift::GameServerGroup"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-gamelift-gameservergroup.html
Property Document:
- ``rp_GameServerGroupName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-gamelift-gameservergroup.html#cfn-gamelift-gameservergroup-gameservergroupname
- ``rp_InstanceDefinitions``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-gamelift-gameservergroup.html#cfn-gamelift-gameservergroup-instancedefinitions
- ``rp_LaunchTemplate``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-gamelift-gameservergroup.html#cfn-gamelift-gameservergroup-launchtemplate
- ``rp_RoleArn``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-gamelift-gameservergroup.html#cfn-gamelift-gameservergroup-rolearn
- ``p_AutoScalingPolicy``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-gamelift-gameservergroup.html#cfn-gamelift-gameservergroup-autoscalingpolicy
- ``p_BalancingStrategy``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-gamelift-gameservergroup.html#cfn-gamelift-gameservergroup-balancingstrategy
- ``p_DeleteOption``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-gamelift-gameservergroup.html#cfn-gamelift-gameservergroup-deleteoption
- ``p_GameServerProtectionPolicy``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-gamelift-gameservergroup.html#cfn-gamelift-gameservergroup-gameserverprotectionpolicy
- ``p_MaxSize``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-gamelift-gameservergroup.html#cfn-gamelift-gameservergroup-maxsize
- ``p_MinSize``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-gamelift-gameservergroup.html#cfn-gamelift-gameservergroup-minsize
- ``p_VpcSubnets``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-gamelift-gameservergroup.html#cfn-gamelift-gameservergroup-vpcsubnets
- ``p_Tags``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-gamelift-gameservergroup.html#cfn-gamelift-gameservergroup-tags
"""
AWS_OBJECT_TYPE = "AWS::GameLift::GameServerGroup"
rp_GameServerGroupName: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "GameServerGroupName"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-gamelift-gameservergroup.html#cfn-gamelift-gameservergroup-gameservergroupname"""
rp_InstanceDefinitions: typing.List[typing.Union['PropGameServerGroupInstanceDefinition', dict]] = attr.ib(
default=None,
converter=PropGameServerGroupInstanceDefinition.from_list,
validator=attr.validators.deep_iterable(member_validator=attr.validators.instance_of(PropGameServerGroupInstanceDefinition), iterable_validator=attr.validators.instance_of(list)),
metadata={AttrMeta.PROPERTY_NAME: "InstanceDefinitions"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-gamelift-gameservergroup.html#cfn-gamelift-gameservergroup-instancedefinitions"""
rp_LaunchTemplate: typing.Union['PropGameServerGroupLaunchTemplate', dict] = attr.ib(
default=None,
converter=PropGameServerGroupLaunchTemplate.from_dict,
validator=attr.validators.instance_of(PropGameServerGroupLaunchTemplate),
metadata={AttrMeta.PROPERTY_NAME: "LaunchTemplate"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-gamelift-gameservergroup.html#cfn-gamelift-gameservergroup-launchtemplate"""
rp_RoleArn: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "RoleArn"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-gamelift-gameservergroup.html#cfn-gamelift-gameservergroup-rolearn"""
p_AutoScalingPolicy: typing.Union['PropGameServerGroupAutoScalingPolicy', dict] = attr.ib(
default=None,
converter=PropGameServerGroupAutoScalingPolicy.from_dict,
validator=attr.validators.optional(attr.validators.instance_of(PropGameServerGroupAutoScalingPolicy)),
metadata={AttrMeta.PROPERTY_NAME: "AutoScalingPolicy"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-gamelift-gameservergroup.html#cfn-gamelift-gameservergroup-autoscalingpolicy"""
p_BalancingStrategy: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "BalancingStrategy"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-gamelift-gameservergroup.html#cfn-gamelift-gameservergroup-balancingstrategy"""
p_DeleteOption: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "DeleteOption"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-gamelift-gameservergroup.html#cfn-gamelift-gameservergroup-deleteoption"""
p_GameServerProtectionPolicy: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "GameServerProtectionPolicy"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-gamelift-gameservergroup.html#cfn-gamelift-gameservergroup-gameserverprotectionpolicy"""
p_MaxSize: float = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(float)),
metadata={AttrMeta.PROPERTY_NAME: "MaxSize"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-gamelift-gameservergroup.html#cfn-gamelift-gameservergroup-maxsize"""
p_MinSize: float = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(float)),
metadata={AttrMeta.PROPERTY_NAME: "MinSize"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-gamelift-gameservergroup.html#cfn-gamelift-gameservergroup-minsize"""
p_VpcSubnets: typing.List[TypeHint.intrinsic_str] = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.deep_iterable(member_validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), iterable_validator=attr.validators.instance_of(list))),
metadata={AttrMeta.PROPERTY_NAME: "VpcSubnets"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-gamelift-gameservergroup.html#cfn-gamelift-gameservergroup-vpcsubnets"""
p_Tags: typing.List[typing.Union[Tag, dict]] = attr.ib(
default=None,
converter=Tag.from_list,
validator=attr.validators.optional(attr.validators.deep_iterable(member_validator=attr.validators.instance_of(Tag), iterable_validator=attr.validators.instance_of(list))),
metadata={AttrMeta.PROPERTY_NAME: "Tags"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-gamelift-gameservergroup.html#cfn-gamelift-gameservergroup-tags"""
@property
def rv_AutoScalingGroupArn(self) -> GetAtt:
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-gamelift-gameservergroup.html#aws-resource-gamelift-gameservergroup-return-values"""
return GetAtt(resource=self, attr_name="AutoScalingGroupArn")
@property
def rv_GameServerGroupArn(self) -> GetAtt:
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-gamelift-gameservergroup.html#aws-resource-gamelift-gameservergroup-return-values"""
return GetAtt(resource=self, attr_name="GameServerGroupArn")
@attr.s
class Fleet(Resource):
"""
AWS Object Type = "AWS::GameLift::Fleet"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-gamelift-fleet.html
Property Document:
- ``p_BuildId``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-gamelift-fleet.html#cfn-gamelift-fleet-buildid
- ``p_CertificateConfiguration``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-gamelift-fleet.html#cfn-gamelift-fleet-certificateconfiguration
- ``p_Description``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-gamelift-fleet.html#cfn-gamelift-fleet-description
- ``p_DesiredEC2Instances``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-gamelift-fleet.html#cfn-gamelift-fleet-desiredec2instances
- ``p_EC2InboundPermissions``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-gamelift-fleet.html#cfn-gamelift-fleet-ec2inboundpermissions
- ``p_EC2InstanceType``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-gamelift-fleet.html#cfn-gamelift-fleet-ec2instancetype
- ``p_FleetType``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-gamelift-fleet.html#cfn-gamelift-fleet-fleettype
- ``p_InstanceRoleARN``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-gamelift-fleet.html#cfn-gamelift-fleet-instancerolearn
- ``p_Locations``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-gamelift-fleet.html#cfn-gamelift-fleet-locations
- ``p_MaxSize``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-gamelift-fleet.html#cfn-gamelift-fleet-maxsize
- ``p_MetricGroups``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-gamelift-fleet.html#cfn-gamelift-fleet-metricgroups
- ``p_MinSize``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-gamelift-fleet.html#cfn-gamelift-fleet-minsize
- ``p_Name``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-gamelift-fleet.html#cfn-gamelift-fleet-name
- ``p_NewGameSessionProtectionPolicy``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-gamelift-fleet.html#cfn-gamelift-fleet-newgamesessionprotectionpolicy
- ``p_PeerVpcAwsAccountId``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-gamelift-fleet.html#cfn-gamelift-fleet-peervpcawsaccountid
- ``p_PeerVpcId``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-gamelift-fleet.html#cfn-gamelift-fleet-peervpcid
- ``p_ResourceCreationLimitPolicy``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-gamelift-fleet.html#cfn-gamelift-fleet-resourcecreationlimitpolicy
- ``p_RuntimeConfiguration``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-gamelift-fleet.html#cfn-gamelift-fleet-runtimeconfiguration
- ``p_ScriptId``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-gamelift-fleet.html#cfn-gamelift-fleet-scriptid
"""
AWS_OBJECT_TYPE = "AWS::GameLift::Fleet"
p_BuildId: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "BuildId"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-gamelift-fleet.html#cfn-gamelift-fleet-buildid"""
p_CertificateConfiguration: typing.Union['PropFleetCertificateConfiguration', dict] = attr.ib(
default=None,
converter=PropFleetCertificateConfiguration.from_dict,
validator=attr.validators.optional(attr.validators.instance_of(PropFleetCertificateConfiguration)),
metadata={AttrMeta.PROPERTY_NAME: "CertificateConfiguration"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-gamelift-fleet.html#cfn-gamelift-fleet-certificateconfiguration"""
p_Description: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "Description"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-gamelift-fleet.html#cfn-gamelift-fleet-description"""
p_DesiredEC2Instances: int = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(int)),
metadata={AttrMeta.PROPERTY_NAME: "DesiredEC2Instances"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-gamelift-fleet.html#cfn-gamelift-fleet-desiredec2instances"""
p_EC2InboundPermissions: typing.List[typing.Union['PropFleetIpPermission', dict]] = attr.ib(
default=None,
converter=PropFleetIpPermission.from_list,
validator=attr.validators.optional(attr.validators.deep_iterable(member_validator=attr.validators.instance_of(PropFleetIpPermission), iterable_validator=attr.validators.instance_of(list))),
metadata={AttrMeta.PROPERTY_NAME: "EC2InboundPermissions"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-gamelift-fleet.html#cfn-gamelift-fleet-ec2inboundpermissions"""
p_EC2InstanceType: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "EC2InstanceType"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-gamelift-fleet.html#cfn-gamelift-fleet-ec2instancetype"""
p_FleetType: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "FleetType"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-gamelift-fleet.html#cfn-gamelift-fleet-fleettype"""
p_InstanceRoleARN: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "InstanceRoleARN"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-gamelift-fleet.html#cfn-gamelift-fleet-instancerolearn"""
p_Locations: typing.List[typing.Union['PropFleetLocationConfiguration', dict]] = attr.ib(
default=None,
converter=PropFleetLocationConfiguration.from_list,
validator=attr.validators.optional(attr.validators.deep_iterable(member_validator=attr.validators.instance_of(PropFleetLocationConfiguration), iterable_validator=attr.validators.instance_of(list))),
metadata={AttrMeta.PROPERTY_NAME: "Locations"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-gamelift-fleet.html#cfn-gamelift-fleet-locations"""
p_MaxSize: int = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(int)),
metadata={AttrMeta.PROPERTY_NAME: "MaxSize"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-gamelift-fleet.html#cfn-gamelift-fleet-maxsize"""
p_MetricGroups: typing.List[TypeHint.intrinsic_str] = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.deep_iterable(member_validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), iterable_validator=attr.validators.instance_of(list))),
metadata={AttrMeta.PROPERTY_NAME: "MetricGroups"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-gamelift-fleet.html#cfn-gamelift-fleet-metricgroups"""
p_MinSize: int = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(int)),
metadata={AttrMeta.PROPERTY_NAME: "MinSize"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-gamelift-fleet.html#cfn-gamelift-fleet-minsize"""
p_Name: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "Name"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-gamelift-fleet.html#cfn-gamelift-fleet-name"""
p_NewGameSessionProtectionPolicy: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "NewGameSessionProtectionPolicy"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-gamelift-fleet.html#cfn-gamelift-fleet-newgamesessionprotectionpolicy"""
p_PeerVpcAwsAccountId: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "PeerVpcAwsAccountId"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-gamelift-fleet.html#cfn-gamelift-fleet-peervpcawsaccountid"""
p_PeerVpcId: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "PeerVpcId"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-gamelift-fleet.html#cfn-gamelift-fleet-peervpcid"""
p_ResourceCreationLimitPolicy: typing.Union['PropFleetResourceCreationLimitPolicy', dict] = attr.ib(
default=None,
converter=PropFleetResourceCreationLimitPolicy.from_dict,
validator=attr.validators.optional(attr.validators.instance_of(PropFleetResourceCreationLimitPolicy)),
metadata={AttrMeta.PROPERTY_NAME: "ResourceCreationLimitPolicy"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-gamelift-fleet.html#cfn-gamelift-fleet-resourcecreationlimitpolicy"""
p_RuntimeConfiguration: typing.Union['PropFleetRuntimeConfiguration', dict] = attr.ib(
default=None,
converter=PropFleetRuntimeConfiguration.from_dict,
validator=attr.validators.optional(attr.validators.instance_of(PropFleetRuntimeConfiguration)),
metadata={AttrMeta.PROPERTY_NAME: "RuntimeConfiguration"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-gamelift-fleet.html#cfn-gamelift-fleet-runtimeconfiguration"""
p_ScriptId: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "ScriptId"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-gamelift-fleet.html#cfn-gamelift-fleet-scriptid"""
@property
def rv_FleetId(self) -> GetAtt:
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-gamelift-fleet.html#aws-resource-gamelift-fleet-return-values"""
return GetAtt(resource=self, attr_name="FleetId")
@attr.s
class MatchmakingConfiguration(Resource):
"""
AWS Object Type = "AWS::GameLift::MatchmakingConfiguration"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-gamelift-matchmakingconfiguration.html
Property Document:
- ``rp_AcceptanceRequired``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-gamelift-matchmakingconfiguration.html#cfn-gamelift-matchmakingconfiguration-acceptancerequired
- ``rp_Name``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-gamelift-matchmakingconfiguration.html#cfn-gamelift-matchmakingconfiguration-name
- ``rp_RequestTimeoutSeconds``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-gamelift-matchmakingconfiguration.html#cfn-gamelift-matchmakingconfiguration-requesttimeoutseconds
- ``rp_RuleSetName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-gamelift-matchmakingconfiguration.html#cfn-gamelift-matchmakingconfiguration-rulesetname
- ``p_AcceptanceTimeoutSeconds``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-gamelift-matchmakingconfiguration.html#cfn-gamelift-matchmakingconfiguration-acceptancetimeoutseconds
- ``p_AdditionalPlayerCount``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-gamelift-matchmakingconfiguration.html#cfn-gamelift-matchmakingconfiguration-additionalplayercount
- ``p_BackfillMode``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-gamelift-matchmakingconfiguration.html#cfn-gamelift-matchmakingconfiguration-backfillmode
- ``p_CustomEventData``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-gamelift-matchmakingconfiguration.html#cfn-gamelift-matchmakingconfiguration-customeventdata
- ``p_Description``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-gamelift-matchmakingconfiguration.html#cfn-gamelift-matchmakingconfiguration-description
- ``p_FlexMatchMode``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-gamelift-matchmakingconfiguration.html#cfn-gamelift-matchmakingconfiguration-flexmatchmode
- ``p_GameProperties``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-gamelift-matchmakingconfiguration.html#cfn-gamelift-matchmakingconfiguration-gameproperties
- ``p_GameSessionData``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-gamelift-matchmakingconfiguration.html#cfn-gamelift-matchmakingconfiguration-gamesessiondata
- ``p_GameSessionQueueArns``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-gamelift-matchmakingconfiguration.html#cfn-gamelift-matchmakingconfiguration-gamesessionqueuearns
- ``p_NotificationTarget``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-gamelift-matchmakingconfiguration.html#cfn-gamelift-matchmakingconfiguration-notificationtarget
"""
AWS_OBJECT_TYPE = "AWS::GameLift::MatchmakingConfiguration"
rp_AcceptanceRequired: bool = attr.ib(
default=None,
validator=attr.validators.instance_of(bool),
metadata={AttrMeta.PROPERTY_NAME: "AcceptanceRequired"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-gamelift-matchmakingconfiguration.html#cfn-gamelift-matchmakingconfiguration-acceptancerequired"""
rp_Name: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "Name"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-gamelift-matchmakingconfiguration.html#cfn-gamelift-matchmakingconfiguration-name"""
rp_RequestTimeoutSeconds: int = attr.ib(
default=None,
validator=attr.validators.instance_of(int),
metadata={AttrMeta.PROPERTY_NAME: "RequestTimeoutSeconds"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-gamelift-matchmakingconfiguration.html#cfn-gamelift-matchmakingconfiguration-requesttimeoutseconds"""
rp_RuleSetName: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "RuleSetName"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-gamelift-matchmakingconfiguration.html#cfn-gamelift-matchmakingconfiguration-rulesetname"""
p_AcceptanceTimeoutSeconds: int = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(int)),
metadata={AttrMeta.PROPERTY_NAME: "AcceptanceTimeoutSeconds"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-gamelift-matchmakingconfiguration.html#cfn-gamelift-matchmakingconfiguration-acceptancetimeoutseconds"""
p_AdditionalPlayerCount: int = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(int)),
metadata={AttrMeta.PROPERTY_NAME: "AdditionalPlayerCount"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-gamelift-matchmakingconfiguration.html#cfn-gamelift-matchmakingconfiguration-additionalplayercount"""
p_BackfillMode: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "BackfillMode"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-gamelift-matchmakingconfiguration.html#cfn-gamelift-matchmakingconfiguration-backfillmode"""
p_CustomEventData: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "CustomEventData"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-gamelift-matchmakingconfiguration.html#cfn-gamelift-matchmakingconfiguration-customeventdata"""
p_Description: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "Description"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-gamelift-matchmakingconfiguration.html#cfn-gamelift-matchmakingconfiguration-description"""
p_FlexMatchMode: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "FlexMatchMode"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-gamelift-matchmakingconfiguration.html#cfn-gamelift-matchmakingconfiguration-flexmatchmode"""
p_GameProperties: typing.List[typing.Union['PropMatchmakingConfigurationGameProperty', dict]] = attr.ib(
default=None,
converter=PropMatchmakingConfigurationGameProperty.from_list,
validator=attr.validators.optional(attr.validators.deep_iterable(member_validator=attr.validators.instance_of(PropMatchmakingConfigurationGameProperty), iterable_validator=attr.validators.instance_of(list))),
metadata={AttrMeta.PROPERTY_NAME: "GameProperties"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-gamelift-matchmakingconfiguration.html#cfn-gamelift-matchmakingconfiguration-gameproperties"""
p_GameSessionData: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "GameSessionData"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-gamelift-matchmakingconfiguration.html#cfn-gamelift-matchmakingconfiguration-gamesessiondata"""
p_GameSessionQueueArns: typing.List[TypeHint.intrinsic_str] = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.deep_iterable(member_validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), iterable_validator=attr.validators.instance_of(list))),
metadata={AttrMeta.PROPERTY_NAME: "GameSessionQueueArns"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-gamelift-matchmakingconfiguration.html#cfn-gamelift-matchmakingconfiguration-gamesessionqueuearns"""
p_NotificationTarget: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "NotificationTarget"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-gamelift-matchmakingconfiguration.html#cfn-gamelift-matchmakingconfiguration-notificationtarget"""
@property
def rv_Arn(self) -> GetAtt:
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-gamelift-matchmakingconfiguration.html#aws-resource-gamelift-matchmakingconfiguration-return-values"""
return GetAtt(resource=self, attr_name="Arn")
@property
def rv_Name(self) -> GetAtt:
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-gamelift-matchmakingconfiguration.html#aws-resource-gamelift-matchmakingconfiguration-return-values"""
return GetAtt(resource=self, attr_name="Name")
@attr.s
class MatchmakingRuleSet(Resource):
"""
AWS Object Type = "AWS::GameLift::MatchmakingRuleSet"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-gamelift-matchmakingruleset.html
Property Document:
- ``rp_Name``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-gamelift-matchmakingruleset.html#cfn-gamelift-matchmakingruleset-name
- ``rp_RuleSetBody``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-gamelift-matchmakingruleset.html#cfn-gamelift-matchmakingruleset-rulesetbody
"""
AWS_OBJECT_TYPE = "AWS::GameLift::MatchmakingRuleSet"
rp_Name: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "Name"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-gamelift-matchmakingruleset.html#cfn-gamelift-matchmakingruleset-name"""
rp_RuleSetBody: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "RuleSetBody"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-gamelift-matchmakingruleset.html#cfn-gamelift-matchmakingruleset-rulesetbody"""
@property
def rv_Arn(self) -> GetAtt:
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-gamelift-matchmakingruleset.html#aws-resource-gamelift-matchmakingruleset-return-values"""
return GetAtt(resource=self, attr_name="Arn")
@property
def rv_Name(self) -> GetAtt:
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-gamelift-matchmakingruleset.html#aws-resource-gamelift-matchmakingruleset-return-values"""
return GetAtt(resource=self, attr_name="Name")
@attr.s
class GameSessionQueue(Resource):
"""
AWS Object Type = "AWS::GameLift::GameSessionQueue"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-gamelift-gamesessionqueue.html
Property Document:
- ``rp_Name``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-gamelift-gamesessionqueue.html#cfn-gamelift-gamesessionqueue-name
- ``p_CustomEventData``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-gamelift-gamesessionqueue.html#cfn-gamelift-gamesessionqueue-customeventdata
- ``p_Destinations``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-gamelift-gamesessionqueue.html#cfn-gamelift-gamesessionqueue-destinations
- ``p_FilterConfiguration``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-gamelift-gamesessionqueue.html#cfn-gamelift-gamesessionqueue-filterconfiguration
- ``p_NotificationTarget``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-gamelift-gamesessionqueue.html#cfn-gamelift-gamesessionqueue-notificationtarget
- ``p_PlayerLatencyPolicies``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-gamelift-gamesessionqueue.html#cfn-gamelift-gamesessionqueue-playerlatencypolicies
- ``p_PriorityConfiguration``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-gamelift-gamesessionqueue.html#cfn-gamelift-gamesessionqueue-priorityconfiguration
- ``p_TimeoutInSeconds``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-gamelift-gamesessionqueue.html#cfn-gamelift-gamesessionqueue-timeoutinseconds
"""
AWS_OBJECT_TYPE = "AWS::GameLift::GameSessionQueue"
rp_Name: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "Name"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-gamelift-gamesessionqueue.html#cfn-gamelift-gamesessionqueue-name"""
p_CustomEventData: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "CustomEventData"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-gamelift-gamesessionqueue.html#cfn-gamelift-gamesessionqueue-customeventdata"""
p_Destinations: typing.List[typing.Union['PropGameSessionQueueDestination', dict]] = attr.ib(
default=None,
converter=PropGameSessionQueueDestination.from_list,
validator=attr.validators.optional(attr.validators.deep_iterable(member_validator=attr.validators.instance_of(PropGameSessionQueueDestination), iterable_validator=attr.validators.instance_of(list))),
metadata={AttrMeta.PROPERTY_NAME: "Destinations"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-gamelift-gamesessionqueue.html#cfn-gamelift-gamesessionqueue-destinations"""
p_FilterConfiguration: typing.Union['PropGameSessionQueueFilterConfiguration', dict] = attr.ib(
default=None,
converter=PropGameSessionQueueFilterConfiguration.from_dict,
validator=attr.validators.optional(attr.validators.instance_of(PropGameSessionQueueFilterConfiguration)),
metadata={AttrMeta.PROPERTY_NAME: "FilterConfiguration"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-gamelift-gamesessionqueue.html#cfn-gamelift-gamesessionqueue-filterconfiguration"""
p_NotificationTarget: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "NotificationTarget"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-gamelift-gamesessionqueue.html#cfn-gamelift-gamesessionqueue-notificationtarget"""
p_PlayerLatencyPolicies: typing.List[typing.Union['PropGameSessionQueuePlayerLatencyPolicy', dict]] = attr.ib(
default=None,
converter=PropGameSessionQueuePlayerLatencyPolicy.from_list,
validator=attr.validators.optional(attr.validators.deep_iterable(member_validator=attr.validators.instance_of(PropGameSessionQueuePlayerLatencyPolicy), iterable_validator=attr.validators.instance_of(list))),
metadata={AttrMeta.PROPERTY_NAME: "PlayerLatencyPolicies"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-gamelift-gamesessionqueue.html#cfn-gamelift-gamesessionqueue-playerlatencypolicies"""
p_PriorityConfiguration: typing.Union['PropGameSessionQueuePriorityConfiguration', dict] = attr.ib(
default=None,
converter=PropGameSessionQueuePriorityConfiguration.from_dict,
validator=attr.validators.optional(attr.validators.instance_of(PropGameSessionQueuePriorityConfiguration)),
metadata={AttrMeta.PROPERTY_NAME: "PriorityConfiguration"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-gamelift-gamesessionqueue.html#cfn-gamelift-gamesessionqueue-priorityconfiguration"""
p_TimeoutInSeconds: int = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(int)),
metadata={AttrMeta.PROPERTY_NAME: "TimeoutInSeconds"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-gamelift-gamesessionqueue.html#cfn-gamelift-gamesessionqueue-timeoutinseconds"""
@property
def rv_Arn(self) -> GetAtt:
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-gamelift-gamesessionqueue.html#aws-resource-gamelift-gamesessionqueue-return-values"""
return GetAtt(resource=self, attr_name="Arn")
@property
def rv_Name(self) -> GetAtt:
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-gamelift-gamesessionqueue.html#aws-resource-gamelift-gamesessionqueue-return-values"""
return GetAtt(resource=self, attr_name="Name")
| 65.415595
| 276
| 0.771754
| 8,528
| 81,377
| 7.271459
| 0.022631
| 0.033413
| 0.045943
| 0.071004
| 0.903566
| 0.903291
| 0.882843
| 0.847269
| 0.845769
| 0.843205
| 0
| 0.000683
| 0.100876
| 81,377
| 1,243
| 277
| 65.468222
| 0.846832
| 0.339138
| 0
| 0.454545
| 0
| 0
| 0.093649
| 0.058585
| 0
| 0
| 0
| 0
| 0
| 1
| 0.017316
| false
| 0
| 0.005772
| 0
| 0.277056
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
a09e419f3aa0a8fc2ff678cd497150a9348cef91
| 44,488
|
py
|
Python
|
seg_models.py
|
smthomas-sci/SkinCancerSegmentation
|
9e80d8a490e38d6d503afb2c947657200154f44e
|
[
"Apache-2.0"
] | 8
|
2020-12-02T06:55:08.000Z
|
2022-02-15T09:34:02.000Z
|
seg_models.py
|
smthomas-sci/SkinCancerSegmentation
|
9e80d8a490e38d6d503afb2c947657200154f44e
|
[
"Apache-2.0"
] | null | null | null |
seg_models.py
|
smthomas-sci/SkinCancerSegmentation
|
9e80d8a490e38d6d503afb2c947657200154f44e
|
[
"Apache-2.0"
] | 2
|
2021-12-14T02:56:12.000Z
|
2022-01-04T06:04:03.000Z
|
"""
A collection of Encoder-Decoder networks, namely U-net and
U-net like decoders combined with regular CNNs e.g. VGG, ResNEt etc.)
The model architectures are suitbale for training Semantic Segmentation only.
You will need to save the trained model and rebuilt so it can take any input
size.
Author: Simon Thomas
Email: simon.thomas@uq.edu.au
Start Date: 26/10/18
Last Update: 04/02/19
"""
# Required for custom layer / model
from keras.layers import Softmax, Reshape, Layer
from keras.initializers import Constant
from keras.models import Model
def VGG_UNet(dim, num_classes, channels=3):
"""
Returns a VGG16 Nework with a U-Net
like upsampling stage. Inlcudes 3 skip connections
from previous VGG layers.
Input:
dim - the size of the input image. Note that is should be
a square of 2 so that downsampling and upsampling
always match. ie. 128 -> 64 -> 32 -> 64 -> 128
num_classes - the number of classes in the whole problem. Used to
determine the dimension of output map. i.e. model.predict()
returns array that can be reshaped to (dim, dim,
num_classes).
channels - number of channels in input image. Defaut of 3 for RGB
Output:
model - an uncompied keras model. Check output shape before use.
"""
import keras.backend as K
from keras.models import Model
from keras.layers import Input
from keras.layers import Conv2D, MaxPooling2D
from keras.layers import UpSampling2D, Reshape, concatenate
from keras.applications.vgg16 import VGG16
# Import a headless VGG16 - extract weighs and then delete
vgg16 = VGG16(include_top=False)
weights = []
for layer in vgg16.layers[1::]:
weights.append(layer.get_weights())
del vgg16
K.clear_session()
# Build VGG-Unet using functional API
input_image = Input(shape=(dim, dim, channels))
# Conv Block 1
block1_conv1 = Conv2D(64, (3, 3), activation='relu',
padding='same',name='block1_conv1')(input_image)
block1_conv2 = Conv2D(64, (3, 3), activation='relu', padding='same',
name='block1_conv2')(block1_conv1)
block1_pool = MaxPooling2D((2, 2), strides=(2, 2),
name="block1_pool")(block1_conv2)
# Conv Block 2
block2_conv1 = Conv2D(128, (3, 3), activation='relu', padding='same',
name='block2_conv1')(block1_pool)
block2_conv2 = Conv2D(128, (3, 3), activation='relu', padding='same',
name='block2_conv2')(block2_conv1)
block2_pool = MaxPooling2D((2, 2), strides=(2, 2),
name="block2_pool")(block2_conv2)
# Conv Block 3
block3_conv1 = Conv2D(256, (3, 3), activation='relu', padding='same',
name='block3_conv1')(block2_pool)
block3_conv2 = Conv2D(256, (3, 3), activation='relu', padding='same',
name='block3_conv2')(block3_conv1)
block3_conv3 = Conv2D(256, (3, 3),activation='relu',padding='same',
name='block3_conv3')(block3_conv2)
block3_pool = MaxPooling2D((2, 2), strides=(2, 2),
name="block3_pool")(block3_conv3)
# Conv Block 4
block4_conv1 = Conv2D(512, (3, 3), activation='relu', padding='same',
name='block4_conv1')(block3_pool)
block4_conv2 = Conv2D(512, (3, 3), activation='relu', padding='same',
name='block4_conv2')(block4_conv1)
block4_conv3 = Conv2D(512, (3, 3), activation='relu', padding='same',
name='block4_conv3')(block4_conv2)
block4_pool = MaxPooling2D((2, 2), strides=(2, 2),
name="block4_pool")(block4_conv3)
# Conv Block 5
block5_conv1 = Conv2D(512, (3, 3), activation='relu', padding='same',
name='block5_conv1')(block4_pool)
block5_conv2 = Conv2D(512, (3, 3), activation='relu', padding='same',
name='block5_conv2')(block5_conv1)
block5_conv3 = Conv2D(512, (3, 3), activation='relu', padding='same',
name='block5_conv3')(block5_conv2)
block5_pool = MaxPooling2D((2, 2), strides=(2, 2),
name="block5_pool")(block5_conv3)
# Upsampling 1
up1 = UpSampling2D(size=(2,2))(block5_pool)
up1_conv = Conv2D(512, 2, activation = 'relu', padding = 'same',
kernel_initializer = 'he_normal')(up1)
merge1 = concatenate([block5_conv3,up1_conv], axis = 3)
merge1_conv1 = Conv2D(512, 3, activation = 'relu', padding = 'same',
kernel_initializer = 'he_normal')(merge1)
merge1_conv2 = Conv2D(512, 3, activation = 'relu', padding = 'same',
kernel_initializer = 'he_normal')(merge1_conv1)
# Upsampling 2
up2 = UpSampling2D(size = (2,2))(merge1_conv2)
up2_conv = Conv2D(256, 2, activation = 'relu', padding = 'same',
kernel_initializer = 'he_normal')(up2)
merge2 = concatenate([block4_conv3,up2_conv], axis = 3)
merge2_conv1 = Conv2D(256, 3, activation = 'relu', padding = 'same',
kernel_initializer = 'he_normal')(merge2)
merge2_conv2 = Conv2D(256, 3, activation = 'relu', padding = 'same',
kernel_initializer = 'he_normal')(merge2_conv1)
# Upsampling 3
up3 = UpSampling2D(size = (2,2))(merge2_conv2)
up3_conv = Conv2D(128, 2, activation = 'relu', padding = 'same',
kernel_initializer = 'he_normal')(up3)
merge3 = concatenate([block3_conv3,up3_conv], axis = 3)
merge3_conv1 = Conv2D(128, 3, activation = 'relu', padding = 'same',
kernel_initializer = 'he_normal')(merge3)
merge3_conv2 = Conv2D(128, 3, activation = 'relu', padding = 'same',
kernel_initializer = 'he_normal')(merge3_conv1)
# Upsampling 4
up4 = UpSampling2D(size=(2,2))(merge3_conv2)
up4_conv = Conv2D(64, 2, activation = 'relu', padding = 'same',
kernel_initializer = 'he_normal')(up4)
merge4 = concatenate([block2_conv2,up4_conv], axis = 3)
merge4_conv1 = Conv2D(64, 3, activation = 'relu', padding = 'same',
kernel_initializer = 'he_normal')(merge4)
merge4_conv2 = Conv2D(64, 3, activation = 'relu', padding = 'same',
kernel_initializer = 'he_normal')(merge4_conv1)
# Upsamplig 5
up5 = UpSampling2D(size = (2,2))(merge4_conv2)
up5_conv = Conv2D(64, 2, activation = 'relu', padding = 'same',
kernel_initializer = 'he_normal')(up5)
merge5 = concatenate([block1_conv2,up5_conv], axis = 3)
merge5_conv1 = Conv2D(64, 3, activation = 'relu', padding = 'same',
kernel_initializer = 'he_normal')(merge5)
merge5_conv2 = Conv2D(64, 3, activation = 'relu', padding = 'same',
kernel_initializer = 'he_normal')(merge5_conv1)
# Activation and reshape for training
activation = Conv2D(num_classes, 1, activation = "softmax")(merge5_conv2)
output = Reshape((dim*dim, num_classes))(activation)
# Link model
model = Model(inputs=[input_image], outputs=output)
# Set VGG weights and lock from training
for layer, weight in zip(model.layers[1:19], weights):
# Set
layer.set_weights(weight)
# Lock
layer.trainable = False
return model
def ResNet_UNet(dim=512, num_classes=6):
"""
Returns a ResNet50 Nework with a U-Net
like upsampling stage. Inlcudes 3 skip connections
from previous VGG layers.
Input:
dim - the size of the input image. Note that is should be
a square of 2 so that downsampling and upsampling
always match. ie. 128 -> 64 -> 32 -> 64 -> 128
This is only needed for training.
num_classes - the number of classes in the whole problem. Used to
determine the dimension of output map. i.e. model.predict()
returns array that can be reshaped to (dim, dim,
num_classes).
Output:
model - an uncompiled keras model. Check output shape before use.
"""
from keras.models import Model
from keras.layers import Conv2D
from keras.layers import UpSampling2D, Reshape, concatenate
from keras.applications.resnet50 import ResNet50
# Import a headless ResNet50
resnet = ResNet50(input_shape = (None, None, 3), include_top=False)
# Attached U-net from second last layer - activation_49
res_out = resnet.layers[-2].output
# Standard U-Net upsampling 512 -> 256 -> 128 -> 64
# Upsampling 1 - 512
fs = 32
up1 = UpSampling2D(size=(2,2))(res_out)
up1_conv = Conv2D(fs, 2, activation = 'relu', padding = 'same',
kernel_initializer = 'he_normal')(up1)
prev_layer = resnet.get_layer("activation_40").output
merge1 = concatenate([prev_layer,up1_conv], axis = 3)
merge1_conv1 = Conv2D(fs, 3, activation = 'relu', padding = 'same',
kernel_initializer = 'he_normal')(merge1)
merge1_conv2 = Conv2D(fs, 3, activation = 'relu', padding = 'same',
kernel_initializer='he_normal')(merge1_conv1)
# Upsampling 2 - 256
fs = 32
up2 = UpSampling2D(size = (2,2))(merge1_conv2)
up2_conv = Conv2D(fs, 2, activation = 'relu', padding = 'same',
kernel_initializer = 'he_normal')(up2)
prev_layer = resnet.get_layer("activation_22").output
merge2 = concatenate([prev_layer,up2_conv], axis = 3)
merge2_conv1 = Conv2D(fs, 3, activation = 'relu', padding = 'same',
kernel_initializer = 'he_normal')(merge2)
merge2_conv2 = Conv2D(fs, 3, activation = 'relu', padding = 'same',
kernel_initializer = 'he_normal')(merge2_conv1)
# Upsampling 3 & 4 - 128
fs = 32
up3 = UpSampling2D(size = (2,2))(merge2_conv2)
up3_conv1 = Conv2D(fs, 2, activation = 'relu', padding = 'same',
kernel_initializer = 'he_normal')(up3)
up3_conv2 = Conv2D(fs, 2, activation = 'relu', padding = 'same',
kernel_initializer = 'he_normal')(up3_conv1)
up4 = UpSampling2D(size = (2,2))(up3_conv2)
up4_conv = Conv2D(fs, 2, activation = 'relu', padding = 'same',
kernel_initializer = 'he_normal')(up4)
prev_layer = resnet.get_layer("activation_1").output
merge3 = concatenate([prev_layer,up4_conv], axis = 3)
merge3_conv1 = Conv2D(fs, 3, activation = 'relu', padding = 'same',
kernel_initializer = 'he_normal')(merge3)
merge3_conv2 = Conv2D(fs, 3, activation = 'relu', padding = 'same',
kernel_initializer = 'he_normal')(merge3_conv1)
# Upsample 5 - 64
fs = 32
up5 = UpSampling2D(size = (2,2))(merge3_conv2)
up5_conv = Conv2D(fs, 2, activation = 'relu', padding = 'same',
kernel_initializer = 'he_normal')(up5)
merge5_conv1 = Conv2D(fs, 3, activation = 'relu', padding = 'same',
kernel_initializer = 'he_normal')(up5_conv)
merge5_conv2 = Conv2D(fs, 3, activation = 'relu', padding = 'same',
kernel_initializer = 'he_normal')(merge5_conv1)
# Activation and reshape for training
activation = Conv2D(num_classes, 1, activation = "softmax")(merge5_conv2)
output = Reshape((dim*dim, num_classes))(activation)
# Build model
model = Model(inputs=[resnet.input], outputs=[output])
return model
def ResNet_UNet_ExtraConv(dim=512, num_classes=6):
"""
Returns a ResNet50 Nework with a U-Net
like upsampling stage. Inlcudes 3 skip connections
from previous VGG layers.
Input:
dim - the size of the input image. Note that is should be
a square of 2 so that downsampling and upsampling
always match. ie. 128 -> 64 -> 32 -> 64 -> 128
This is only needed for training.
num_classes - the number of classes in the whole problem. Used to
determine the dimension of output map. i.e. model.predict()
returns array that can be reshaped to (dim, dim,
num_classes).
Output:
model - an uncompiled keras model. Check output shape before use.
"""
from keras.models import Model
from keras.layers import Conv2D
from keras.layers import UpSampling2D, Reshape, concatenate
from keras.applications.resnet50 import ResNet50
# Import a headless ResNet50
resnet = ResNet50(input_shape = (None, None, 3), include_top=False)
# Attached U-net from second last layer - activation_49
res_out = resnet.layers[-2].output
# Standard U-Net upsampling 512 -> 256 -> 128 -> 64
# Upsampling 1
up1 = UpSampling2D(size=(2,2))(res_out)
up1_conv = Conv2D(512, 2, activation = 'relu', padding = 'same',
kernel_initializer = 'he_normal')(up1)
prev_layer = resnet.get_layer("activation_40").output
merge1 = concatenate([prev_layer,up1_conv], axis = 3)
merge1_conv1 = Conv2D(512, 3, activation = 'relu', padding = 'same',
kernel_initializer = 'he_normal')(merge1)
merge1_conv2 = Conv2D(512, 3, activation = 'relu', padding = 'same',
kernel_initializer = 'he_normal')(merge1_conv1)
# Upsampling 2
up2 = UpSampling2D(size = (2,2))(merge1_conv2)
up2_conv = Conv2D(256, 2, activation = 'relu', padding = 'same',
kernel_initializer = 'he_normal')(up2)
prev_layer = resnet.get_layer("activation_22").output
merge2 = concatenate([prev_layer,up2_conv], axis = 3)
merge2_conv1 = Conv2D(256, 3, activation = 'relu', padding = 'same',
kernel_initializer = 'he_normal')(merge2)
merge2_conv2 = Conv2D(256, 3, activation = 'relu', padding = 'same',
kernel_initializer = 'he_normal')(merge2_conv1)
# Upsampling 3 & 4
up3 = UpSampling2D(size = (2,2))(merge2_conv2)
up3_conv1 = Conv2D(128, 2, activation = 'relu', padding = 'same',
kernel_initializer = 'he_normal')(up3)
up3_conv2 = Conv2D(128, 2, activation = 'relu', padding = 'same',
kernel_initializer = 'he_normal')(up3_conv1)
up4 = UpSampling2D(size = (2,2))(up3_conv2)
up4_conv = Conv2D(128, 2, activation = 'relu', padding = 'same',
kernel_initializer = 'he_normal')(up4)
prev_layer = resnet.get_layer("activation_1").output
merge3 = concatenate([prev_layer,up4_conv], axis = 3)
merge3_conv1 = Conv2D(128, 3, activation = 'relu', padding = 'same',
kernel_initializer = 'he_normal')(merge3)
merge3_conv2 = Conv2D(128, 3, activation = 'relu', padding = 'same',
kernel_initializer = 'he_normal')(merge3_conv1)
# Upsample 5
up5 = UpSampling2D(size = (2,2))(merge3_conv2)
up5_conv = Conv2D(64, 2, activation = 'relu', padding = 'same',
kernel_initializer = 'he_normal')(up5)
merge5_conv1 = Conv2D(64, 3, activation = 'relu', padding = 'same',
kernel_initializer = 'he_normal')(up5_conv)
merge5_conv2 = Conv2D(64, 3, activation = 'relu', padding = 'same',
kernel_initializer = 'he_normal')(merge5_conv1)
# Activation and reshape for training
activation = Conv2D(num_classes, 1, activation = "softmax")(merge5_conv2)
# Smoothing
smooth_conv1 = Conv2D(12, 7, activation='relu', padding='same',
kernel_initializer='he_normal')(activation)
smooth_conv2 = Conv2D(12, 7, activation='relu', padding='same',
kernel_initializer='he_normal')(smooth_conv1)
# Final classification
classification = Conv2D(num_classes, 1, activation = "softmax")(smooth_conv2)
output = Reshape((dim*dim, num_classes))(classification)
# Build model
model = Model(inputs=[resnet.input], outputs=[output])
return model
def ResNet_UNet_More_Params(dim=512, num_classes=6):
"""
Returns a ResNet50 Nework with a U-Net
like upsampling stage. Inlcudes 3 skip connections
from previous VGG layers.
Input:
dim - the size of the input image. Note that is should be
a square of 2 so that downsampling and upsampling
always match. ie. 128 -> 64 -> 32 -> 64 -> 128
This is only needed for training.
num_classes - the number of classes in the whole problem. Used to
determine the dimension of output map. i.e. model.predict()
returns array that can be reshaped to (dim, dim,
num_classes).
Output:
model - an uncompiled keras model. Check output shape before use.
"""
from keras.models import Model
from keras.layers import Conv2D
from keras.layers import UpSampling2D, Reshape, concatenate
from keras.applications.resnet50 import ResNet50
# Import a headless ResNet50
resnet = ResNet50(input_shape = (None, None, 3), include_top=False)
# Attached U-net from second last layer - activation_49
res_out = resnet.layers[-2].output
# Standard U-Net upsampling 512 -> 256 -> 128 -> 64
# Upsampling 1
up1 = UpSampling2D(size=(2,2))(res_out)
up1_conv = Conv2D(512, 2, activation = 'relu', padding = 'same',
kernel_initializer = 'he_normal')(up1)
prev_layer = resnet.get_layer("activation_40").output
merge1 = concatenate([prev_layer,up1_conv], axis = 3)
merge1_conv1 = Conv2D(512, 3, activation = 'relu', padding = 'same',
kernel_initializer = 'he_normal')(merge1)
merge1_conv2 = Conv2D(512, 3, activation = 'relu', padding = 'same',
kernel_initializer = 'he_normal')(merge1_conv1)
# Upsampling 2
up2 = UpSampling2D(size = (2,2))(merge1_conv2)
up2_conv = Conv2D(256, 2, activation = 'relu', padding = 'same',
kernel_initializer = 'he_normal')(up2)
prev_layer = resnet.get_layer("activation_22").output
merge2 = concatenate([prev_layer,up2_conv], axis = 3)
merge2_conv1 = Conv2D(256, 3, activation = 'relu', padding = 'same',
kernel_initializer = 'he_normal')(merge2)
merge2_conv2 = Conv2D(256, 3, activation = 'relu', padding = 'same',
kernel_initializer = 'he_normal')(merge2_conv1)
# Upsampling 3 & 4
up3 = UpSampling2D(size = (2,2))(merge2_conv2)
up3_conv1 = Conv2D(256, 2, activation = 'relu', padding = 'same',
kernel_initializer = 'he_normal')(up3)
up3_conv2 = Conv2D(256, 2, activation = 'relu', padding = 'same',
kernel_initializer = 'he_normal')(up3_conv1)
up4 = UpSampling2D(size = (2,2))(up3_conv2)
up4_conv = Conv2D(256, 2, activation = 'relu', padding = 'same',
kernel_initializer = 'he_normal')(up4)
prev_layer = resnet.get_layer("activation_1").output
merge3 = concatenate([prev_layer,up4_conv], axis = 3)
merge3_conv1 = Conv2D(256, 3, activation = 'relu', padding = 'same',
kernel_initializer = 'he_normal')(merge3)
merge3_conv2 = Conv2D(256, 3, activation = 'relu', padding = 'same',
kernel_initializer = 'he_normal')(merge3_conv1)
# Upsample 5
up5 = UpSampling2D(size = (2,2))(merge3_conv2)
up5_conv = Conv2D(256, 2, activation = 'relu', padding = 'same',
kernel_initializer = 'he_normal')(up5)
merge5_conv1 = Conv2D(256, 3, activation = 'relu', padding = 'same',
kernel_initializer = 'he_normal')(up5_conv)
merge5_conv2 = Conv2D(256, 3, activation = 'relu', padding = 'same',
kernel_initializer = 'he_normal')(merge5_conv1)
# Activation and reshape for training
activation = Conv2D(num_classes, 1, activation = "softmax")(merge5_conv2)
output = Reshape((dim*dim, num_classes))(activation)
# Build model
model = Model(inputs=[resnet.input], outputs=[output])
return model
def ResNet_UNet_BN(dim=512, num_classes=6):
"""
Returns a ResNet50 Nework with a U-Net
like upsampling stage. Inlcudes 3 skip connections
from previous VGG layers.
Input:
dim - the size of the input image. Note that is should be
a square of 2 so that downsampling and upsampling
always match. ie. 128 -> 64 -> 32 -> 64 -> 128
This is only needed for training.
num_classes - the number of classes in the whole problem. Used to
determine the dimension of output map. i.e. model.predict()
returns array that can be reshaped to (dim, dim,
num_classes).
Output:
model - an uncompiled keras model. Check output shape before use.
"""
from keras.models import Model
from keras.layers import Conv2D, BatchNormalization
from keras.layers import UpSampling2D, Reshape, concatenate
from keras.activations import relu
from keras.applications.resnet50 import ResNet50
# Import a headless ResNet50
resnet = ResNet50(input_shape = (None, None, 3), include_top=False)
# Attached U-net from second last layer - activation_49
res_out = resnet.layers[-2].output
# Standard U-Net upsampling 512 -> 256 -> 128 -> 64
# Upsampling 1
up1 = UpSampling2D(size=(2, 2))(res_out)
up1_conv = Conv2D(512, 2, activation='relu', padding='same', kernel_initializer='he_normal')(up1)
up1_conv = BatchNormalization()(up1_conv)
#up1_conv = relu(up1_conv)
prev_layer = resnet.get_layer("activation_40").output
merge1 = concatenate([prev_layer,up1_conv], axis = 3)
merge1_conv1 = Conv2D(512, 3, activation='relu', padding='same', kernel_initializer='he_normal')(merge1)
merge1_conv1 = BatchNormalization()(merge1_conv1)
#merge1_conv1 = relu(merge1_conv1)
merge1_conv2 = Conv2D(512, 3, activation='relu', padding='same', kernel_initializer = 'he_normal')(merge1_conv1)
merge1_conv2 = BatchNormalization()(merge1_conv2)
#merge1_conv2 = relu(merge1_conv2)
# Upsampling 2
up2 = UpSampling2D(size=(2, 2))(merge1_conv2)
up2_conv = Conv2D(256, 2, activation='relu', padding='same', kernel_initializer='he_normal')(up2)
up2_conv = BatchNormalization()(up2_conv)
#up2_conv = relu(up2_conv)
prev_layer = resnet.get_layer("activation_22").output
merge2 = concatenate([prev_layer,up2_conv], axis = 3)
merge2_conv1 = Conv2D(256, 3, activation='relu', padding='same', kernel_initializer='he_normal')(merge2)
merge2_conv1 = BatchNormalization()(merge2_conv1)
#merge2_conv1 = relu(merge2_conv1)
merge2_conv2 = Conv2D(256, 3, activation='relu', padding='same', kernel_initializer='he_normal')(merge2_conv1)
merge2_conv2 = BatchNormalization()(merge2_conv2)
#merge2_conv2 = relu(merge2_conv2)
# Upsampling 3
up3 = UpSampling2D(size=(2,2))(merge2_conv2)
up3_conv1 = Conv2D(128, 2, activation='relu', padding='same', kernel_initializer='he_normal')(up3)
up3_conv1 = BatchNormalization()(up3_conv1)
#up3_conv1 = relu(up3_conv1)
up3_conv2 = Conv2D(128, 2, activation='relu', padding='same', kernel_initializer='he_normal')(up3_conv1)
up3_conv2 = BatchNormalization()(up3_conv2)
#up3_conv2 = relu(up3_conv2)
# Upsampling 4
up4 = UpSampling2D(size=(2,2))(up3_conv2)
up4_conv = Conv2D(128, 2, activation='relu', padding='same', kernel_initializer='he_normal')(up4)
up4_conv = BatchNormalization()(up4_conv)
#up4_conv = relu(up4_conv)
prev_layer = resnet.get_layer("activation_1").output
merge3 = concatenate([prev_layer, up4_conv], axis=3)
merge3_conv1 = Conv2D(128, 3, activation='relu', padding='same', kernel_initializer='he_normal')(merge3)
merge3_conv1 = BatchNormalization()(merge3_conv1)
#merge3_conv1 = relu(merge3_conv1)
merge3_conv2 = Conv2D(128, 3, activation='relu', padding='same', kernel_initializer='he_normal')(merge3_conv1)
merge3_conv2 = BatchNormalization()(merge3_conv2)
#merge3_conv2 = relu(merge3_conv2)
# Upsample 5
up5 = UpSampling2D(size=(2,2))(merge3_conv2)
up5_conv = Conv2D(64, 2, activation='relu', padding='same', kernel_initializer='he_normal')(up5)
up5_conv = BatchNormalization()(up5_conv)
#up5_conv = relu(up5_conv)
merge5_conv1 = Conv2D(64, 3, activation='relu', padding='same', kernel_initializer='he_normal')(up5_conv)
merge5_conv1 = BatchNormalization()(merge5_conv1)
#merge5_conv1 = relu(merge5_conv1)
merge5_conv2 = Conv2D(64, 3, activation='relu', padding='same', kernel_initializer='he_normal')(merge5_conv1)
merge5_conv2 = BatchNormalization()(merge5_conv2)
#merge5_conv2 = relu(merge5_conv2)
# Activation and reshape for training
activation = Conv2D(num_classes, 1, activation="softmax")(merge5_conv2)
output = Reshape((dim*dim, num_classes))(activation)
# Build model
model = Model(inputs=[resnet.input], outputs=[output])
return model
def ResNet_UNet_Dropout(dim=512, num_classes=6, dropout=0.5, final_activation=True):
"""
Returns a ResNet50 Nework with a U-Net
like upsampling stage. Inlcudes skip connections
from previous ResNet50 layers.
Uses a SpatialDrop on the final layer as introduced
in https://arxiv.org/pdf/1411.4280.pdf, 2015.
Input:
dim - the size of the input image. Note that is should be
a square of 2 so that downsampling and upsampling
always match. ie. 128 -> 64 -> 32 -> 64 -> 128
This is only needed for training.
num_classes - the number of classes in the whole problem. Used to
determine the dimension of output map. i.e. model.predict()
returns array that can be reshaped to (dim, dim,
num_classes).
Output:
model - an uncompiled keras model. Check output shape before use.
"""
from keras.models import Model
from keras.layers import Conv2D, SpatialDropout2D
from keras.layers import UpSampling2D, Reshape, concatenate
from keras.applications.resnet50 import ResNet50
# Import a headless ResNet50
resnet = ResNet50(input_shape = (None, None, 3), include_top=False)
# Attached U-net from second last layer - activation_49
res_out = resnet.layers[-2].output
# Standard U-Net upsampling 512 -> 256 -> 128 -> 64
# Upsampling 1 - 512
fs = 32
up1 = UpSampling2D(size=(2,2))(res_out)
up1_conv = Conv2D(fs, 2, activation = 'relu', padding = 'same',
kernel_initializer = 'he_normal')(up1)
prev_layer = resnet.get_layer("activation_40").output
merge1 = concatenate([prev_layer,up1_conv], axis = 3)
merge1_conv1 = Conv2D(fs, 3, activation = 'relu', padding = 'same',
kernel_initializer = 'he_normal')(merge1)
merge1_conv2 = Conv2D(fs, 3, activation = 'relu', padding = 'same',
kernel_initializer = 'he_normal')(merge1_conv1)
# Upsampling 2 - 256
fs = 32
up2 = UpSampling2D(size = (2,2))(merge1_conv2)
up2_conv = Conv2D(fs, 2, activation = 'relu', padding = 'same',
kernel_initializer = 'he_normal')(up2)
prev_layer = resnet.get_layer("activation_22").output
merge2 = concatenate([prev_layer,up2_conv], axis = 3)
merge2_conv1 = Conv2D(fs, 3, activation = 'relu', padding = 'same',
kernel_initializer = 'he_normal')(merge2)
merge2_conv2 = Conv2D(fs, 3, activation = 'relu', padding = 'same',
kernel_initializer = 'he_normal')(merge2_conv1)
# Upsampling 3 & 4 - 128
fs = 32
up3 = UpSampling2D(size = (2,2))(merge2_conv2)
up3_conv1 = Conv2D(fs, 2, activation = 'relu', padding = 'same',
kernel_initializer = 'he_normal')(up3)
up3_conv2 = Conv2D(fs, 2, activation = 'relu', padding = 'same',
kernel_initializer = 'he_normal')(up3_conv1)
up4 = UpSampling2D(size = (2,2))(up3_conv2)
up4_conv = Conv2D(fs, 2, activation = 'relu', padding = 'same',
kernel_initializer = 'he_normal')(up4)
prev_layer = resnet.get_layer("activation_1").output
merge3 = concatenate([prev_layer,up4_conv], axis = 3)
merge3_conv1 = Conv2D(fs, 3, activation = 'relu', padding = 'same',
kernel_initializer = 'he_normal')(merge3)
merge3_conv2 = Conv2D(fs, 3, activation = 'relu', padding = 'same',
kernel_initializer = 'he_normal')(merge3_conv1)
# Upsample 5 - 64
fs = 32
up5 = UpSampling2D(size=(2,2))(merge3_conv2)
up5_conv = Conv2D(fs, 2, activation = 'relu', padding = 'same',
kernel_initializer = 'he_normal')(up5)
merge5_conv1 = Conv2D(fs, 3, activation = 'relu', padding = 'same',
kernel_initializer = 'he_normal')(up5_conv)
merge5_conv2 = Conv2D(fs, 3, activation = 'relu', padding = 'same',
kernel_initializer = 'he_normal')(merge5_conv1)
# Drop Out
do = SpatialDropout2D(dropout)(merge5_conv2)
# Activation and reshape for training
if final_activation:
activation = Conv2D(num_classes, 1, activation="softmax")(do)
else:
activation = Conv2D(num_classes, 1, activation=None)(do)
output = Reshape((dim*dim, num_classes))(activation)
# Build model
model = Model(inputs=[resnet.input], outputs=[output])
return model
def ResNet_UNet_Reg(dim=512, num_classes=6, reg=5e-4):
"""
Returns a ResNet50 Nework with a U-Net
like upsampling stage. Inlcudes skip connections
from previous ResNet50 layers.
Uses a SpatialDrop on the final layer as introduced
in https://arxiv.org/pdf/1411.4280.pdf, 2015.
Input:
dim - the size of the input image. Note that is should be
a square of 2 so that downsampling and upsampling
always match. ie. 128 -> 64 -> 32 -> 64 -> 128
This is only needed for training.
num_classes - the number of classes in the whole problem. Used to
determine the dimension of output map. i.e. model.predict()
returns array that can be reshaped to (dim, dim,
num_classes).
Output:
model - an uncompiled keras model. Check output shape before use.
"""
from keras.models import Model
from keras.layers import Conv2D, SpatialDropout2D
from keras.layers import UpSampling2D, Reshape, concatenate
from keras.regularizers import l2
from keras.applications.resnet50 import ResNet50
# Import a headless ResNet50
resnet = ResNet50(input_shape = (None, None, 3), include_top=False)
# Attached U-net from second last layer - activation_49
res_out = resnet.layers[-2].output
# Standard U-Net upsampling 512 -> 256 -> 128 -> 64
# Upsampling 1 - 512
fs = 32
up1 = UpSampling2D(size=(2,2))(res_out)
up1_conv = Conv2D(fs, 2, activation = 'relu', padding = 'same',
kernel_initializer = 'he_normal', kernel_regularizer = l2(reg), bias_regularizer = l2(reg))(up1)
prev_layer = resnet.get_layer("activation_40").output
merge1 = concatenate([prev_layer,up1_conv], axis = 3)
merge1_conv1 = Conv2D(fs, 3, activation = 'relu', padding = 'same',
kernel_initializer = 'he_normal', kernel_regularizer = l2(reg), bias_regularizer = l2(reg))(merge1)
merge1_conv2 = Conv2D(fs, 3, activation = 'relu', padding = 'same',
kernel_initializer = 'he_normal', kernel_regularizer = l2(reg), bias_regularizer = l2(reg))(merge1_conv1)
# Upsampling 2 - 256
fs = 32
up2 = UpSampling2D(size = (2,2))(merge1_conv2)
up2_conv = Conv2D(fs, 2, activation = 'relu', padding = 'same',
kernel_initializer = 'he_normal', kernel_regularizer = l2(reg), bias_regularizer = l2(reg))(up2)
prev_layer = resnet.get_layer("activation_22").output
merge2 = concatenate([prev_layer,up2_conv], axis = 3)
merge2_conv1 = Conv2D(fs, 3, activation = 'relu', padding = 'same',
kernel_initializer = 'he_normal', kernel_regularizer = l2(reg), bias_regularizer = l2(reg))(merge2)
merge2_conv2 = Conv2D(fs, 3, activation = 'relu', padding = 'same',
kernel_initializer = 'he_normal', kernel_regularizer = l2(reg), bias_regularizer = l2(reg))(merge2_conv1)
# Upsampling 3 & 4 - 128
fs = 32
up3 = UpSampling2D(size = (2,2))(merge2_conv2)
up3_conv1 = Conv2D(fs, 2, activation = 'relu', padding = 'same',
kernel_initializer = 'he_normal', kernel_regularizer = l2(reg), bias_regularizer = l2(reg))(up3)
up3_conv2 = Conv2D(fs, 2, activation = 'relu', padding = 'same',
kernel_initializer = 'he_normal', kernel_regularizer = l2(reg), bias_regularizer = l2(reg))(up3_conv1)
up4 = UpSampling2D(size = (2,2))(up3_conv2)
up4_conv = Conv2D(fs, 2, activation = 'relu', padding = 'same',
kernel_initializer = 'he_normal', kernel_regularizer = l2(reg), bias_regularizer = l2(reg))(up4)
prev_layer = resnet.get_layer("activation_1").output
merge3 = concatenate([prev_layer,up4_conv], axis = 3)
merge3_conv1 = Conv2D(fs, 3, activation = 'relu', padding = 'same',
kernel_initializer = 'he_normal', kernel_regularizer = l2(reg), bias_regularizer = l2(reg))(merge3)
merge3_conv2 = Conv2D(fs, 3, activation = 'relu', padding = 'same',
kernel_initializer = 'he_normal', kernel_regularizer = l2(reg), bias_regularizer = l2(reg))(merge3_conv1)
# Upsample 5 - 64
fs = 32
up5 = UpSampling2D(size=(2,2))(merge3_conv2)
up5_conv = Conv2D(fs, 2, activation = 'relu', padding = 'same',
kernel_initializer = 'he_normal', kernel_regularizer = l2(reg), bias_regularizer = l2(reg))(up5)
merge5_conv1 = Conv2D(fs, 3, activation = 'relu', padding = 'same',
kernel_initializer = 'he_normal', kernel_regularizer = l2(reg), bias_regularizer = l2(reg))(up5_conv)
merge5_conv2 = Conv2D(fs, 3, activation = 'relu', padding = 'same',
kernel_initializer = 'he_normal', kernel_regularizer = l2(reg), bias_regularizer = l2(reg))(merge5_conv1)
# Activation and reshape for training
activation = Conv2D(num_classes, 1, activation = "softmax")(merge5_conv2)
output = Reshape((dim*dim, num_classes))(activation)
# Build model
model = Model(inputs=[resnet.input], outputs=[output])
return model
def UNet(dim=512, num_classes=12):
"""
Standard U-Net architecture for segmentation
"""
from keras.models import Model
from keras.layers import Input
from keras.layers import Conv2D, SpatialDropout2D, MaxPool2D
from keras.layers import UpSampling2D, Reshape, concatenate
from keras.regularizers import l2
input = Input(shape=(None, None, 3))
# Down 1
fs = 64
conv1 = Conv2D(fs, 3, activation="relu", padding="same")(input)
conv2 = Conv2D(fs, 3, activation="relu", padding="same")(conv1)
pool1 = MaxPool2D((2, 2))(conv2)
# Down 2
fs = 128
conv3 = Conv2D(fs, 3, activation="relu", padding="same")(pool1)
conv4 = Conv2D(fs, 3, activation="relu", padding="same")(conv3)
pool2 = MaxPool2D((2, 2))(conv4)
# Down 3
fs = 256
conv5 = Conv2D(fs, 3, activation="relu", padding="same")(pool2)
conv6 = Conv2D(fs, 3, activation="relu", padding="same")(conv5)
pool3 = MaxPool2D((2, 2))(conv6)
# Down 4
fs = 512
conv7 = Conv2D(fs, 3, activation="relu", padding="same")(pool3)
conv8 = Conv2D(fs, 3, activation="relu", padding="same")(conv7)
pool4 = MaxPool2D((2, 2))(conv8)
# Bottom
fs = 1024
conv9 = Conv2D(fs, 3, activation="relu", padding="same")(pool4)
conv10 = Conv2D(fs, 3, activation="relu", padding="same")(conv9)
# Up 1
f2 = 512
up1 = UpSampling2D(size=(2,2))(conv10)
up1_merge = concatenate([up1, conv8], axis=3)
up1_conv1 = Conv2D(fs, 3, activation="relu", padding="same")(up1_merge)
up1_conv2 = Conv2D(fs, 3, activation="relu", padding="same")(up1_conv1)
# Up 2
fs = 256
up2 = UpSampling2D(size=(2, 2))(up1_conv2)
up2_merge = concatenate([up2, conv6], axis=3)
up2_conv1 = Conv2D(fs, 3, activation="relu", padding="same")(up2_merge)
up2_conv2 = Conv2D(fs, 3, activation="relu", padding="same")(up2_conv1)
# Up 3
fs = 128
up3 = UpSampling2D(size=(2, 2))(up2_conv2)
up3_merge = concatenate([up3, conv4], axis=3)
up3_conv1 = Conv2D(fs, 3, activation="relu", padding="same")(up3_merge)
up3_conv2 = Conv2D(fs, 3, activation="relu", padding="same")(up3_conv1)
# Up 4
fs = 64
up4 = UpSampling2D(size=(2, 2))(up3_conv2)
up4_merge = concatenate([up4, conv2], axis=3)
up4_conv1 = Conv2D(fs, 3, activation="relu", padding="same")(up4_merge)
up4_conv2 = Conv2D(fs, 3, activation="relu", padding="same")(up4_conv1)
# Activation and reshape for training
if num_classes > 2:
activation = Conv2D(num_classes, 1, activation="softmax")(up4_conv2)
output = Reshape((dim * dim, num_classes))(activation)
else:
activation = Conv2D(1, 1, activation="sigmoid")(up4_conv2)
output = Reshape((dim * dim, 1))(activation)
# Build model
model = Model(inputs=[input], outputs=[output])
return model
# ---------------------------------------------------------------------------- #
def ResNet_UNet_Generator(dim=512, num_classes=6):
"""
Returns a ResNet50 Nework with a U-Net
like upsampling stage. Inlcudes 3 skip connections
from previous VGG layers.
Input:
dim - the size of the input image. Note that is should be
a square of 2 so that downsampling and upsampling
always match. ie. 128 -> 64 -> 32 -> 64 -> 128
This is only needed for training.
num_classes - the number of classes in the whole problem. Used to
determine the dimension of output map. i.e. model.predict()
returns array that can be reshaped to (dim, dim,
num_classes).
Output:
model - an uncompiled keras model. Check output shape before use.
"""
from keras.models import Model
from keras.layers import Conv2D, LeakyReLU, Softmax
from keras.layers import UpSampling2D, Reshape, concatenate
from keras.applications.resnet50 import ResNet50
# Import a headless ResNet50
resnet = ResNet50(input_shape = (None, None, 3), include_top=False)
# Attached U-net from second last layer - activation_49
res_out = resnet.layers[-2].output
# Standard U-Net upsampling 512 -> 256 -> 128 -> 64
# Upsampling 1
up1 = UpSampling2D(size=(2,2))(res_out)
up1_conv = Conv2D(512, 2, activation=None, padding = 'same',
kernel_initializer='he_normal')(up1)
up1_conv = LeakyReLU()(up1_conv)
prev_layer = resnet.get_layer("activation_40").output
merge1 = concatenate([prev_layer, up1_conv], axis = 3)
merge1_conv1 = Conv2D(512, 3, activation=None, padding = 'same',
kernel_initializer='he_normal')(merge1)
merge1_conv1 = LeakyReLU()(merge1_conv1)
merge1_conv2 = Conv2D(512, 3, activation=None, padding = 'same',
kernel_initializer='he_normal')(merge1_conv1)
merge1_conv2 = LeakyReLU()(merge1_conv2)
# Upsampling 2
up2 = UpSampling2D(size = (2,2))(merge1_conv2)
up2_conv = Conv2D(256, 2, activation=None, padding = 'same',
kernel_initializer='he_normal')(up2)
up2_conv = LeakyReLU()(up2_conv)
prev_layer = resnet.get_layer("activation_22").output
merge2 = concatenate([prev_layer,up2_conv], axis = 3)
merge2_conv1 = Conv2D(256, 3, activation=None, padding = 'same',
kernel_initializer='he_normal')(merge2)
merge2_conv1 = LeakyReLU()(merge2_conv1)
merge2_conv2 = Conv2D(256, 3, activation=None, padding = 'same',
kernel_initializer='he_normal')(merge2_conv1)
merge2_conv2 = LeakyReLU()(merge2_conv2)
# Upsampling 3 & 4
up3 = UpSampling2D(size=(2, 2))(merge2_conv2)
up3_conv1 = Conv2D(128, 2, activation=None, padding='same',
kernel_initializer='he_normal')(up3)
up3_conv1 = LeakyReLU()(up3_conv1)
up3_conv2 = Conv2D(128, 2, activation=None, padding='same',
kernel_initializer='he_normal')(up3_conv1)
up3_conv2 = LeakyReLU()(up3_conv2)
up4 = UpSampling2D(size=(2, 2))(up3_conv2)
up4_conv = Conv2D(128, 2, activation=None, padding='same',
kernel_initializer='he_normal')(up4)
up4_conv = LeakyReLU()(up4_conv)
prev_layer = resnet.get_layer("activation_1").output
merge3 = concatenate([prev_layer, up4_conv], axis = 3)
merge3_conv1 = Conv2D(128, 3, activation=None, padding = 'same',
kernel_initializer='he_normal')(merge3)
merge3_conv1 = LeakyReLU()(merge3_conv1)
merge3_conv2 = Conv2D(128, 3, activation=None, padding = 'same',
kernel_initializer='he_normal')(merge3_conv1)
merge3_conv2 = LeakyReLU()(merge3_conv2)
# Upsample 5
up5 = UpSampling2D(size=(2, 2))(merge3_conv2)
up5_conv = Conv2D(64, 2, activation=None, padding='same',
kernel_initializer='he_normal')(up5)
up5_conv = LeakyReLU()(up5_conv)
merge5_conv1 = Conv2D(64, 3, activation=None, padding='same',
kernel_initializer='he_normal')(up5_conv)
merge5_conv1 = LeakyReLU()(merge5_conv1)
merge5_conv2 = Conv2D(64, 3, activation=None, padding='same',
kernel_initializer='he_normal')(merge5_conv1)
merge5_conv2 = LeakyReLU()(merge5_conv2)
# Activation and reshape for training
activation = Conv2D(num_classes, 1, activation="softmax")(merge5_conv2)
# NOT RESHAPE
# Build model
model = Model(inputs=[resnet.input], outputs=[activation])
return model
def ResNet_UNet_Descriminator(dim=512, num_classes=12):
"""
Descriminator for adversarial training...
"""
from keras.models import Model
from keras.layers import Conv2D, LeakyReLU, Dropout, Input, Flatten, Dense
discriminator_input = Input(shape=(dim, dim, num_classes))
x = Conv2D(128, 3)(discriminator_input)
x = LeakyReLU()(x)
x = Conv2D(128, 4, strides=2)(x)
x = LeakyReLU()(x)
x = Conv2D(128, 4, strides=2)(x)
x = LeakyReLU()(x)
x = Conv2D(128, 4, strides=2)(x)
x = LeakyReLU()(x)
x = Flatten()(x)
# One dropout layer - an important trick
x = Dropout(0.5)(x)
# Classificiation layer
x = Dense(1, activation='sigmoid')(x)
model = Model(discriminator_input, x)
return model
from keras.activations import softmax
class TemperatureScaling(Layer):
def __init__(self, T=1, T_is_trainable=True, use_activation=False, **kwargs):
self.T = T
self.T_is_trainable = T_is_trainable
self.use_activation = use_activation
super(TemperatureScaling, self).__init__(**kwargs)
def build(self, input_shape):
# Create a trainable weight variable for this layer.
self.kernel = self.add_weight(name='T',
shape=(1,),
initializer=Constant(value=self.T),
trainable=self.T_is_trainable)
super(TemperatureScaling, self).build(input_shape) # Be sure to call this at the end
def call(self, x):
if self.use_activation:
return softmax(x / self.kernel) # Cust
return x / self.kernel
def compute_output_shape(self, input_shape):
return input_shape
def generate_temperature_model(model, dim=256, T=1, trainable=True, extra_reshape=False):
# Add Temperature Scaling
inputs = model.get_input_at(0)
x = model.layers[-2].output
x = TemperatureScaling(T, trainable)(x)
x = Reshape((dim*dim, 12))(x)
activation = Softmax(axis=-1)(x)
if extra_reshape:
activation = Reshape((dim, dim, 12))(activation)
return Model(inputs=[inputs], outputs=[activation])
if __name__ == "__main__":
dim = 256
num_classes = 2
model = UNet(dim, num_classes)
model.summary()
| 40.517304
| 125
| 0.63743
| 5,523
| 44,488
| 4.97266
| 0.055948
| 0.058477
| 0.100932
| 0.120157
| 0.839535
| 0.829267
| 0.821038
| 0.801158
| 0.767951
| 0.7567
| 0
| 0.067813
| 0.243257
| 44,488
| 1,097
| 126
| 40.554239
| 0.747965
| 0.202144
| 0
| 0.574576
| 0
| 0
| 0.078153
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.025424
| false
| 0
| 0.079661
| 0.001695
| 0.130508
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
264463f2c9f28464e736fbc0243cb833b7365ec5
| 169
|
py
|
Python
|
YOLO/Stronger-yolo-pytorch/models/__init__.py
|
ForrestPi/ObjectDetection
|
54e0821e73f67be5360c36f01229a123c34ab3b3
|
[
"MIT"
] | 12
|
2020-03-25T01:24:22.000Z
|
2021-09-18T06:40:16.000Z
|
YOLO/Stronger-yolo-pytorch/models/__init__.py
|
ForrestPi/ObjectDetection
|
54e0821e73f67be5360c36f01229a123c34ab3b3
|
[
"MIT"
] | 1
|
2020-04-22T07:52:36.000Z
|
2020-04-22T07:52:36.000Z
|
YOLO/Stronger-yolo-pytorch/models/__init__.py
|
ForrestPi/ObjectDetection
|
54e0821e73f67be5360c36f01229a123c34ab3b3
|
[
"MIT"
] | 4
|
2020-03-25T01:24:26.000Z
|
2020-09-20T11:29:09.000Z
|
from .strongerv1 import StrongerV1
from .strongerv3 import StrongerV3
from .strongerv3kl import StrongerV3KL
from .strongerv3_US import StrongerV3_US,StrongerV3_US_dummy
| 42.25
| 60
| 0.881657
| 21
| 169
| 6.904762
| 0.333333
| 0.248276
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.058442
| 0.088757
| 169
| 4
| 60
| 42.25
| 0.883117
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
cd07aaf06d760891a4b6dbbe515606e131cbbe22
| 35
|
py
|
Python
|
je_editor/utils/file/__init__.py
|
JE-Chen/je_editor
|
2f18dedb6f0eb27c38668dc53f520739c8d5c6c6
|
[
"MIT"
] | 1
|
2021-12-10T14:57:15.000Z
|
2021-12-10T14:57:15.000Z
|
je_editor/utils/file/__init__.py
|
JE-Chen/je_editor
|
2f18dedb6f0eb27c38668dc53f520739c8d5c6c6
|
[
"MIT"
] | null | null | null |
je_editor/utils/file/__init__.py
|
JE-Chen/je_editor
|
2f18dedb6f0eb27c38668dc53f520739c8d5c6c6
|
[
"MIT"
] | null | null | null |
from je_editor.utils.file import *
| 17.5
| 34
| 0.8
| 6
| 35
| 4.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.114286
| 35
| 1
| 35
| 35
| 0.870968
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
cd1a35d1e4a4ce911c01edfff419353ef0aa1e85
| 111
|
py
|
Python
|
scvi/utils/__init__.py
|
SarahND97/scvi-tools
|
fbb4acf72b09cef6e4a9465255a7f95caf3f3eb5
|
[
"BSD-3-Clause"
] | 280
|
2020-09-18T06:26:28.000Z
|
2022-03-01T20:28:14.000Z
|
scvi/utils/__init__.py
|
SarahND97/scvi-tools
|
fbb4acf72b09cef6e4a9465255a7f95caf3f3eb5
|
[
"BSD-3-Clause"
] | 594
|
2020-09-17T00:03:34.000Z
|
2022-03-02T21:45:17.000Z
|
scvi/utils/__init__.py
|
SarahND97/scvi-tools
|
fbb4acf72b09cef6e4a9465255a7f95caf3f3eb5
|
[
"BSD-3-Clause"
] | 96
|
2020-09-19T21:26:00.000Z
|
2022-02-25T05:38:05.000Z
|
from ._docstrings import setup_anndata_dsp
from ._track import track
__all__ = ["track", "setup_anndata_dsp"]
| 22.2
| 42
| 0.792793
| 15
| 111
| 5.2
| 0.533333
| 0.307692
| 0.384615
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.117117
| 111
| 4
| 43
| 27.75
| 0.795918
| 0
| 0
| 0
| 0
| 0
| 0.198198
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
cd2d4d4356bb4a322d2d442d1e635e8d4f07528f
| 901
|
py
|
Python
|
DESAFIO-102.py
|
Lukones/Evolution-Projetos-Python
|
d979f3702f0e22ab5256b19fd957dba587c44f85
|
[
"MIT"
] | null | null | null |
DESAFIO-102.py
|
Lukones/Evolution-Projetos-Python
|
d979f3702f0e22ab5256b19fd957dba587c44f85
|
[
"MIT"
] | null | null | null |
DESAFIO-102.py
|
Lukones/Evolution-Projetos-Python
|
d979f3702f0e22ab5256b19fd957dba587c44f85
|
[
"MIT"
] | null | null | null |
def leiaInt(msg):
while True:
try:
n = int(input(msg))
except (ValueError, TypeError):
print('\033[31mERRO: por favor, digite um número inteiro válido.\033[m')
continue
except (KeyboardInterrupt):
print('\033[31m de dados interrompida pelo usuário.\033[m')
return 0
else:
return n
def leiaFloat(msg):
while True:
try:
n = float(input(msg))
except (ValueError, TypeError):
print('\033[31mERRO: por favor, digite um número inteiro válido.\033[m')
continue
except (KeyboardInterrupt):
print('\033[31m de dados interrompida pelo usuário.\033[m')
return 0
else:
return n
n1 = leiaInt('Digite um valor: ')
n2 = leiaFloat('Digite um valor: ')
print(f'O valor digitado foi {n1} e {n2}')
| 26.5
| 84
| 0.552719
| 105
| 901
| 4.742857
| 0.419048
| 0.064257
| 0.048193
| 0.060241
| 0.791165
| 0.726908
| 0.726908
| 0.726908
| 0.726908
| 0.726908
| 0
| 0.063973
| 0.340733
| 901
| 33
| 85
| 27.30303
| 0.774411
| 0
| 0
| 0.740741
| 0
| 0
| 0.324805
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.074074
| false
| 0
| 0
| 0
| 0.222222
| 0.185185
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
cd35b8d033069cd1368eccd89ab68225e32e9907
| 18
|
py
|
Python
|
test_fixtures/plugins/d/__init__.py
|
MSLars/allennlp
|
2cdb8742c8c8c3c38ace4bdfadbdc750a1aa2475
|
[
"Apache-2.0"
] | 11,433
|
2017-06-27T03:08:46.000Z
|
2022-03-31T18:14:33.000Z
|
test_fixtures/plugins/d/__init__.py
|
MSLars/allennlp
|
2cdb8742c8c8c3c38ace4bdfadbdc750a1aa2475
|
[
"Apache-2.0"
] | 4,006
|
2017-06-26T21:45:43.000Z
|
2022-03-31T02:11:10.000Z
|
test_fixtures/plugins/d/__init__.py
|
MSLars/allennlp
|
2cdb8742c8c8c3c38ace4bdfadbdc750a1aa2475
|
[
"Apache-2.0"
] | 2,560
|
2017-06-26T21:16:53.000Z
|
2022-03-30T07:55:46.000Z
|
from d.d import D
| 9
| 17
| 0.722222
| 5
| 18
| 2.6
| 0.6
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.222222
| 18
| 1
| 18
| 18
| 0.928571
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
cd521f75878b0338e71fbdada18d4b539f5520da
| 28
|
py
|
Python
|
src/jenova/config/__init__.py
|
inova-tecnologias/jenova
|
c975f0894b8663c6a9c9fdc7fa33590a219a6ad3
|
[
"Apache-2.0"
] | 2
|
2016-08-10T15:08:47.000Z
|
2016-10-25T14:27:51.000Z
|
src/jenova/config/__init__.py
|
inova-tecnologias/jenova
|
c975f0894b8663c6a9c9fdc7fa33590a219a6ad3
|
[
"Apache-2.0"
] | 41
|
2016-08-04T20:19:49.000Z
|
2017-03-07T20:05:53.000Z
|
src/jenova/config/__init__.py
|
inova-tecnologias/jenova
|
c975f0894b8663c6a9c9fdc7fa33590a219a6ad3
|
[
"Apache-2.0"
] | 3
|
2016-09-26T19:04:51.000Z
|
2017-10-26T22:13:45.000Z
|
from . import rq_dash_config
| 28
| 28
| 0.857143
| 5
| 28
| 4.4
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.107143
| 28
| 1
| 28
| 28
| 0.88
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
cd530f2e85a07b9ef14ec22d99b91bdc501a87d6
| 72
|
py
|
Python
|
xpclr/__init__.py
|
hardingnj/xpclr
|
a555c442b6ce9deff5ecff6e3080a5bde0acb557
|
[
"MIT"
] | 61
|
2018-07-25T14:05:22.000Z
|
2022-02-02T19:38:51.000Z
|
xpclr/__init__.py
|
hardingnj/xpclr
|
a555c442b6ce9deff5ecff6e3080a5bde0acb557
|
[
"MIT"
] | 62
|
2016-09-21T10:01:02.000Z
|
2022-03-10T19:59:36.000Z
|
xpclr/__init__.py
|
hardingnj/xpclr
|
a555c442b6ce9deff5ecff6e3080a5bde0acb557
|
[
"MIT"
] | 20
|
2018-04-17T07:55:16.000Z
|
2022-03-25T09:12:52.000Z
|
__version__ = "1.1.2"
from xpclr import methods
from xpclr import util
| 14.4
| 25
| 0.763889
| 12
| 72
| 4.25
| 0.666667
| 0.352941
| 0.588235
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.05
| 0.166667
| 72
| 4
| 26
| 18
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0.069444
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
cd54135d70c6235c98b4663857bb39edbb5bc9d4
| 194
|
py
|
Python
|
chat_wars_database/app/setup/help_conn.py
|
ricardochaves/chat-wars-database
|
597f192fb6ddf290c6c7477cf8c7d0ca654925f6
|
[
"MIT"
] | 1
|
2019-12-30T19:16:52.000Z
|
2019-12-30T19:16:52.000Z
|
chat_wars_database/app/setup/help_conn.py
|
ricardochaves/chat-wars-database
|
597f192fb6ddf290c6c7477cf8c7d0ca654925f6
|
[
"MIT"
] | null | null | null |
chat_wars_database/app/setup/help_conn.py
|
ricardochaves/chat-wars-database
|
597f192fb6ddf290c6c7477cf8c7d0ca654925f6
|
[
"MIT"
] | null | null | null |
from django import db
from chat_wars_database.settings import COMMAND_CLOSE_CONNECTIONS
def close_connections() -> None:
if COMMAND_CLOSE_CONNECTIONS:
db.connections.close_all()
| 19.4
| 65
| 0.783505
| 25
| 194
| 5.76
| 0.6
| 0.333333
| 0.319444
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.159794
| 194
| 9
| 66
| 21.555556
| 0.883436
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| true
| 0
| 0.4
| 0
| 0.6
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
cd5d0efef4c59184db188d3ffe999fe346741aa1
| 2,890
|
py
|
Python
|
chess/lib/heuristics.py
|
SamMatzko/My-PyChess
|
4b1b30b03b85679a2480e86b649614917a156ab0
|
[
"MIT"
] | 64
|
2019-10-27T06:49:57.000Z
|
2022-03-29T11:07:07.000Z
|
chess/lib/heuristics.py
|
SamMatzko/My-PyChess
|
4b1b30b03b85679a2480e86b649614917a156ab0
|
[
"MIT"
] | 11
|
2020-01-28T08:16:25.000Z
|
2021-12-13T18:44:12.000Z
|
chess/lib/heuristics.py
|
SamMatzko/My-PyChess
|
4b1b30b03b85679a2480e86b649614917a156ab0
|
[
"MIT"
] | 30
|
2020-01-09T10:05:01.000Z
|
2022-03-18T18:16:30.000Z
|
"""
This file is a part of My-PyChess application.
In this file, we define heuristic constants required for the python chess
engine.
"""
pawnEvalWhite = (
(0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0),
(8.0, 8.0, 8.0, 8.0, 8.0, 8.0, 8.0, 8.0),
(2.0, 2.0, 3.0, 5.0, 5.0, 3.0, 2.0, 2.0),
(0.5, 0.5, 1.0, 2.5, 2.5, 1.0, 0.5, 0.5),
(0.0, 0.0, 0.5, 2.0, 2.0, 0.5, 0.0, 0.0),
(0.5, -0.5, -1.0, 0.0, 0.0, -1.0, -0.5, 0.5),
(0.5, 1.0, 0.5, -2.0, -2.0, 0.5, 1.0, 0.5),
(0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0),
)
pawnEvalBlack = tuple(reversed(pawnEvalWhite))
knightEval = (
(-5.0, -4.0, -3.0, -3.0, -3.0, -3.0, -4.0, -5.0),
(-4.0, -2.0, 0.0, 0.0, 0.0, 0.0, -2.0, -4.0),
(-3.0, 0.0, 1.0, 1.5, 1.5, 1.0, 0.0, -3.0),
(-3.0, 0.5, 1.5, 2.0, 2.0, 1.5, 0.5, -3.0),
(-3.0, 0.0, 1.5, 2.0, 2.0, 1.5, 0.0, -3.0),
(-3.0, 0.5, 1.0, 1.5, 1.5, 1.0, 0.5, -3.0),
(-4.0, -2.0, 0.0, 0.5, 0.5, 0.0, -2.0, -4.0),
(-5.0, -4.0, -3.0, -3.0, -3.0, -3.0, -4.0, -5.0),
)
bishopEvalWhite = (
(-2.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -2.0),
(-1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -1.0),
(-1.0, 0.0, 0.5, 1.0, 1.0, 0.5, 0.0, -1.0),
(-1.0, 0.5, 0.5, 1.0, 1.0, 0.5, 0.5, -1.0),
(-1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, -1.0),
(-1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, -1.0),
(-1.0, 0.5, 0.0, 0.0, 0.0, 0.0, 0.5, -1.0),
(-2.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -2.0),
)
bishopEvalBlack = tuple(reversed(bishopEvalWhite))
rookEvalWhite = (
(0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0),
(0.5, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.5),
(-0.5, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.5),
(-0.5, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.5),
(-0.5, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.5),
(-0.5, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.5),
(-0.5, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.5),
(0.0, 0.0, 0.0, 0.5, 0.5, 0.0, 0.0, 0.0),
)
rookEvalBlack = tuple(reversed(rookEvalWhite))
queenEval = (
(-2.0, -1.0, -1.0, -0.5, -0.5, -1.0, -1.0, -2.0),
(-1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -1.0),
(-1.0, 0.0, 0.5, 0.5, 0.5, 0.5, 0.0, -1.0),
(-0.5, 0.0, 0.5, 0.5, 0.5, 0.5, 0.0, -0.5),
(0.0, 0.0, 0.5, 0.5, 0.5, 0.5, 0.0, -0.5),
(-1.0, 0.5, 0.5, 0.5, 0.5, 0.5, 0.0, -1.0),
(-1.0, 0.0, 0.5, 0.0, 0.0, 0.0, 0.0, -1.0),
(-2.0, -1.0, -1.0, -0.5, -0.5, -1.0, -1.0, -2.0),
)
kingEvalWhite = (
(-3.0, -4.0, -4.0, -5.0, -5.0, -4.0, -4.0, -3.0),
(-3.0, -4.0, -4.0, -5.0, -5.0, -4.0, -4.0, -3.0),
(-3.0, -4.0, -4.0, -5.0, -5.0, -4.0, -4.0, -3.0),
(-3.0, -4.0, -4.0, -5.0, -5.0, -4.0, -4.0, -3.0),
(-2.0, -3.0, -3.0, -4.0, -4.0, -3.0, -3.0, -2.0),
(-1.0, -2.0, -2.0, -2.0, -2.0, -2.0, -2.0, -1.0),
(2.0, 2.0, 0.0, 0.0, 0.0, 0.0, 2.0, 2.0),
(2.0, 3.0, 3.0, 0.0, 0.0, 1.0, 3.0, 2.0),
)
kingEvalBlack = tuple(reversed(kingEvalWhite))
| 36.582278
| 74
| 0.382353
| 812
| 2,890
| 1.360837
| 0.04803
| 0.441629
| 0.507692
| 0.557466
| 0.691403
| 0.681448
| 0.668778
| 0.632579
| 0.61629
| 0.527602
| 0
| 0.354244
| 0.249827
| 2,890
| 79
| 75
| 36.582278
| 0.155443
| 0.044291
| 0
| 0.3125
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
2699ad10909fd9b03872f82a48498b3a9ea557ad
| 3,695
|
py
|
Python
|
tests/cloudio/common/test_utils_attribute_helpers.py
|
boozo-unlimited/cloudio-common-python
|
d612c2c0002cfdd85b8adf631f5f2a711d1316a7
|
[
"MIT"
] | null | null | null |
tests/cloudio/common/test_utils_attribute_helpers.py
|
boozo-unlimited/cloudio-common-python
|
d612c2c0002cfdd85b8adf631f5f2a711d1316a7
|
[
"MIT"
] | null | null | null |
tests/cloudio/common/test_utils_attribute_helpers.py
|
boozo-unlimited/cloudio-common-python
|
d612c2c0002cfdd85b8adf631f5f2a711d1316a7
|
[
"MIT"
] | 1
|
2021-06-24T12:04:04.000Z
|
2021-06-24T12:04:04.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import unittest
from tests.cloudio.common.paths import update_working_directory
from cloudio.common.utils import attribute_helpers
update_working_directory() # Needed when: 'pipenv run python -m unittest tests/cloudio/common/{this_file}.py'
class TestCloudioCommonUtilsAttributeHelpers(unittest.TestCase):
"""Tests attribute_helpers module.
"""
def test_generate_attribute_names_by_name(self):
attribute_name = 'power'
attribute_names = attribute_helpers.generate_attribute_names_by_name(attribute_name)
self.assertTrue(isinstance(attribute_names, tuple))
self.assertTupleEqual(attribute_names, ('_power', 'power'))
attribute_name = 'powerForMoreFreedom'
attribute_names = attribute_helpers.generate_attribute_names_by_name(attribute_name)
self.assertTrue(isinstance(attribute_names, tuple))
self.assertTupleEqual(attribute_names, ('_power_for_more_freedom', 'power_for_more_freedom'))
attribute_name = '_powerForMoreFreedom'
attribute_names = attribute_helpers.generate_attribute_names_by_name(attribute_name)
self.assertTrue(isinstance(attribute_names, tuple))
self.assertTupleEqual(attribute_names, ('_power_for_more_freedom', 'power_for_more_freedom'))
attribute_name = 'flowers_and_bees'
attribute_names = attribute_helpers.generate_attribute_names_by_name(attribute_name)
self.assertTrue(isinstance(attribute_names, tuple))
self.assertTupleEqual(attribute_names, ('_flowers_and_bees', 'flowers_and_bees'))
attribute_name = 'trees-and-flowers'
attribute_names = attribute_helpers.generate_attribute_names_by_name(attribute_name)
self.assertTrue(isinstance(attribute_names, tuple))
self.assertTupleEqual(attribute_names, ('_trees_and_flowers', 'trees_and_flowers'))
def test_generate_attribute_names_by_name_coverage(self):
attribute_name = ''
attribute_names = attribute_helpers.generate_attribute_names_by_name(attribute_name)
self.assertTupleEqual(attribute_names, tuple())
def test_generate_setters_from_attribute_name(self):
attribute_name = 'power'
setter_method_names = attribute_helpers.generate_setters_from_attribute_name(attribute_name)
self.assertTrue(isinstance(setter_method_names, tuple))
self.assertTupleEqual(setter_method_names, ('set_power', 'setPower'))
attribute_name = 'powerForMoreFreedom'
setter_method_names = attribute_helpers.generate_setters_from_attribute_name(attribute_name)
self.assertTrue(isinstance(setter_method_names, tuple))
self.assertTupleEqual(setter_method_names, ('set_power_for_more_freedom', 'setPowerForMoreFreedom'))
attribute_name = 'power_for_more_freedom'
setter_method_names = attribute_helpers.generate_setters_from_attribute_name(attribute_name)
self.assertTrue(isinstance(setter_method_names, tuple))
self.assertTupleEqual(setter_method_names, ('set_power_for_more_freedom', 'setPowerForMoreFreedom'))
attribute_name = '_power_for_more_freedom'
setter_method_names = attribute_helpers.generate_setters_from_attribute_name(attribute_name)
self.assertTrue(isinstance(setter_method_names, tuple))
self.assertTupleEqual(setter_method_names, ('set_power_for_more_freedom', 'setPowerForMoreFreedom'))
def test_generate_setters_from_attribute_name_coverage(self):
attribute_name = ''
attribute_names = attribute_helpers.generate_setters_from_attribute_name(attribute_name)
self.assertTupleEqual(attribute_names, tuple())
| 50.616438
| 110
| 0.77456
| 408
| 3,695
| 6.531863
| 0.144608
| 0.141463
| 0.076548
| 0.1197
| 0.830019
| 0.813508
| 0.813508
| 0.760976
| 0.734709
| 0.734709
| 0
| 0.000316
| 0.14452
| 3,695
| 72
| 111
| 51.319444
| 0.842771
| 0.043302
| 0
| 0.627451
| 0
| 0
| 0.134959
| 0.079104
| 0
| 0
| 0
| 0
| 0.392157
| 1
| 0.078431
| false
| 0
| 0.058824
| 0
| 0.156863
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
269d599b5fcf6ba59c256010e185a532f2ef9d03
| 46
|
py
|
Python
|
tardis/constants.py
|
GOLoDovkA-A/tardis
|
847b562022ccda2db2486549f739188ba48f172c
|
[
"BSD-3-Clause"
] | 1
|
2020-02-24T20:58:02.000Z
|
2020-02-24T20:58:02.000Z
|
tardis/constants.py
|
GOLoDovkA-A/tardis
|
847b562022ccda2db2486549f739188ba48f172c
|
[
"BSD-3-Clause"
] | 2
|
2019-06-10T11:24:50.000Z
|
2019-06-18T17:28:59.000Z
|
tardis/constants.py
|
GOLoDovkA-A/tardis
|
847b562022ccda2db2486549f739188ba48f172c
|
[
"BSD-3-Clause"
] | 1
|
2019-06-10T10:21:41.000Z
|
2019-06-10T10:21:41.000Z
|
from astropy.constants.astropyconst13 import *
| 46
| 46
| 0.869565
| 5
| 46
| 8
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.046512
| 0.065217
| 46
| 1
| 46
| 46
| 0.883721
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
26ccdccb260c1cb2b6a206f3a81ead02f2c6befd
| 178
|
py
|
Python
|
NotionDump/SQL/sql2notion.py
|
GatherStar/notion-dump-kernel
|
8ae9a53dfd8ad7beddbe53433ae1c44b58fdc606
|
[
"MIT"
] | 1
|
2022-02-10T15:35:22.000Z
|
2022-02-10T15:35:22.000Z
|
NotionDump/SQL/sql2notion.py
|
GatherStar/notion-dump-kernel
|
8ae9a53dfd8ad7beddbe53433ae1c44b58fdc606
|
[
"MIT"
] | null | null | null |
NotionDump/SQL/sql2notion.py
|
GatherStar/notion-dump-kernel
|
8ae9a53dfd8ad7beddbe53433ae1c44b58fdc606
|
[
"MIT"
] | null | null | null |
# author: delta1037
# Date: 2022/01/08
# mail:geniusrabbit@qq.com
# 将数据库表转换成md和CSV文件
class SQL2Notion:
def __init__(self, db_connect):
self.db_connect = db_connect
| 17.8
| 36
| 0.719101
| 23
| 178
| 5.26087
| 0.782609
| 0.223141
| 0.214876
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.089041
| 0.179775
| 178
| 9
| 37
| 19.777778
| 0.739726
| 0.426966
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 6
|
f857ce4cd89975dea6bb11d9aab6635903891af1
| 65
|
py
|
Python
|
config/__init__.py
|
openregister/openregister-widgets
|
8d6978302cc579ae65f9188ba90fbb63b89fbabd
|
[
"MIT"
] | null | null | null |
config/__init__.py
|
openregister/openregister-widgets
|
8d6978302cc579ae65f9188ba90fbb63b89fbabd
|
[
"MIT"
] | 3
|
2015-10-08T09:03:05.000Z
|
2017-02-08T13:50:35.000Z
|
config/__init__.py
|
openregister/openregister-widgets
|
8d6978302cc579ae65f9188ba90fbb63b89fbabd
|
[
"MIT"
] | 1
|
2021-04-11T08:29:48.000Z
|
2021-04-11T08:29:48.000Z
|
from .config import Config
from .config import DevelopmentConfig
| 21.666667
| 37
| 0.846154
| 8
| 65
| 6.875
| 0.5
| 0.363636
| 0.581818
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.123077
| 65
| 2
| 38
| 32.5
| 0.964912
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
f858b0496b59140335342d22f9609288a1a05962
| 100
|
py
|
Python
|
example/tests/test_mylib.py
|
movermeyer/pytest-cover
|
fd26cfa8406b6e8eaae49e03dddad558f4b59380
|
[
"MIT"
] | null | null | null |
example/tests/test_mylib.py
|
movermeyer/pytest-cover
|
fd26cfa8406b6e8eaae49e03dddad558f4b59380
|
[
"MIT"
] | null | null | null |
example/tests/test_mylib.py
|
movermeyer/pytest-cover
|
fd26cfa8406b6e8eaae49e03dddad558f4b59380
|
[
"MIT"
] | null | null | null |
import mylib
def test_add():
assert mylib.add(1, 1) == 2
assert not mylib.add(0, 1) == 2
| 12.5
| 35
| 0.59
| 18
| 100
| 3.222222
| 0.555556
| 0.275862
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.081081
| 0.26
| 100
| 7
| 36
| 14.285714
| 0.702703
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.5
| 1
| 0.25
| true
| 0
| 0.25
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
f862db57a8cad1efe0a69fe32f7a1e9a7824140e
| 9,983
|
py
|
Python
|
dataflake/fakeldap/tests/test_fakeldap_search.py
|
Addepar/dataflake.fakeldap
|
6ef1b3b9b8d7198a132b7dcce83d5a855db9a577
|
[
"ZPL-2.1"
] | null | null | null |
dataflake/fakeldap/tests/test_fakeldap_search.py
|
Addepar/dataflake.fakeldap
|
6ef1b3b9b8d7198a132b7dcce83d5a855db9a577
|
[
"ZPL-2.1"
] | 6
|
2017-12-12T00:52:22.000Z
|
2018-02-08T15:47:42.000Z
|
dataflake/fakeldap/tests/test_fakeldap_search.py
|
Addepar/dataflake.fakeldap
|
6ef1b3b9b8d7198a132b7dcce83d5a855db9a577
|
[
"ZPL-2.1"
] | 1
|
2020-05-13T11:29:59.000Z
|
2020-05-13T11:29:59.000Z
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (c) 2008-2012 Jens Vagelpohl and Contributors. All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
from dataflake.fakeldap.tests.base import FakeLDAPTests
from dataflake.fakeldap.utils import to_utf8
class FakeLDAPSearchTests(FakeLDAPTests):
def test_search_specific(self):
conn = self._makeOne()
self._addUser('foo')
self._addUser('footwo')
self._addUser('thirdfoo')
res = conn.search_s(b'ou=users,dc=localhost', query=b'(cn=foo)')
dn_values = [dn for (dn, attr_dict) in res]
self.assertEqual(len(dn_values), 1)
self.assertEqual(dn_values, [b'cn=foo,ou=users,dc=localhost'])
def test_search_specific_leadingspace(self):
conn = self._makeOne()
self._addUser('foo')
self._addUser('footwo')
self._addUser('thirdfoo')
res = conn.search_s(b'ou=users,dc=localhost', query=b'(cn= foo)')
dn_values = [dn for (dn, attr_dict) in res]
self.assertEqual(len(dn_values), 1)
self.assertEqual(dn_values, [b'cn=foo,ou=users,dc=localhost'])
def test_search_specific_trailingspace(self):
conn = self._makeOne()
self._addUser('foo')
self._addUser('footwo')
self._addUser('thirdfoo')
res = conn.search_s(b'ou=users,dc=localhost', query=b'(cn=foo )')
dn_values = [dn for (dn, attr_dict) in res]
self.assertEqual(len(dn_values), 1)
self.assertEqual(dn_values, [b'cn=foo,ou=users,dc=localhost'])
def test_search_specific_leadingtrailingspace(self):
conn = self._makeOne()
self._addUser('foo')
self._addUser('footwo')
self._addUser('thirdfoo')
res = conn.search_s(b'ou=users,dc=localhost', query=b'(cn= foo )')
dn_values = [dn for (dn, attr_dict) in res]
self.assertEqual(len(dn_values), 1)
self.assertEqual(dn_values, [b'cn=foo,ou=users,dc=localhost'])
def test_search_nonspecific(self):
conn = self._makeOne()
self._addUser('foo')
self._addUser('bar')
self._addUser('baz')
res = conn.search_s(b'ou=users,dc=localhost', query=b'(objectClass=*)')
dn_values = [dn for (dn, attr_dict) in res]
self.assertEqual(len(dn_values), 3)
# Note: searches for all results and not scope BASE will return
# RDNs instead of full DNs
self.assertEqual(set(dn_values),
set([b'cn=foo', b'cn=bar', b'cn=baz']))
def test_search_nonspecific_scope_base(self):
import ldap
conn = self._makeOne()
user_dn, password = self._addUser('foo')
res = conn.search_s(user_dn, scope=ldap.SCOPE_BASE,
query=b'(objectClass=*)')
dn_values = [dn for (dn, attr_dict) in res]
self.assertEqual(len(dn_values), 1)
self.assertEqual(dn_values, [b'cn=foo,ou=users,dc=localhost'])
def test_search_specific_scope_base(self):
import ldap
conn = self._makeOne()
user_dn, password = self._addUser('foo')
res = conn.search_s(user_dn, scope=ldap.SCOPE_BASE,
query=b'(&(objectClass=person)(cn=foo))')
dn_values = [dn for (dn, attr_dict) in res]
self.assertEqual(len(dn_values), 1)
self.assertEqual(dn_values, [b'cn=foo,ou=users,dc=localhost'])
def test_search_full_wildcard(self):
conn = self._makeOne()
self._addUser('foo')
self._addUser('footwo')
self._addUser('threefoo')
res = conn.search_s(b'ou=users,dc=localhost', query=b'(cn=*)')
dn_values = [dn for (dn, attr_dict) in res]
self.assertEqual(len(dn_values), 3)
self.assertEqual(set(dn_values),
set([b'cn=foo,ou=users,dc=localhost',
b'cn=footwo,ou=users,dc=localhost',
b'cn=threefoo,ou=users,dc=localhost']))
def test_search_startswithendswith_wildcard(self):
conn = self._makeOne()
self._addUser('foo')
self._addUser('onefootwo')
self._addUser('threefoo')
self._addUser('bar')
res = conn.search_s(b'ou=users,dc=localhost', query=b'(cn=*foo*)')
dn_values = [dn for (dn, attr_dict) in res]
self.assertEqual(len(dn_values), 3)
self.assertEqual(set(dn_values),
set([b'cn=foo,ou=users,dc=localhost',
b'cn=onefootwo,ou=users,dc=localhost',
b'cn=threefoo,ou=users,dc=localhost']))
def test_search_endswith_wildcard(self):
conn = self._makeOne()
self._addUser('foo')
self._addUser('footwo')
self._addUser('threefoo')
res = conn.search_s(b'ou=users,dc=localhost', query=b'(cn=*foo)')
dn_values = [dn for (dn, attr_dict) in res]
self.assertEqual(len(dn_values), 2)
self.assertEqual(set(dn_values),
set([b'cn=foo,ou=users,dc=localhost',
b'cn=threefoo,ou=users,dc=localhost']))
def test_search_startswith_wildcard(self):
conn = self._makeOne()
self._addUser('foo')
self._addUser('footwo')
self._addUser('threefoo')
res = conn.search_s(b'ou=users,dc=localhost', query=b'(cn=foo*)')
dn_values = [dn for (dn, attr_dict) in res]
self.assertEqual(len(dn_values), 2)
self.assertEqual(set(dn_values),
set([b'cn=foo,ou=users,dc=localhost',
b'cn=footwo,ou=users,dc=localhost']))
def test_search_anded_filter(self):
conn = self._makeOne()
self._addUser('foo')
self._addUser('bar')
self._addUser('baz')
query_success = b'(&(cn=foo)(objectClass=person))'
res = conn.search_s(b'ou=users,dc=localhost', query=query_success)
dn_values = [dn for (dn, attr_dict) in res]
self.assertEqual(len(dn_values), 1)
self.assertEqual(dn_values, [b'cn=foo,ou=users,dc=localhost'])
query_failure = b'(&(cn=foo)(objectClass=inetOrgPerson))'
self.assertFalse(conn.search_s(b'ou=users,dc=localhost',
query=query_failure))
def test_search_ored_filter(self):
conn = self._makeOne()
self._addUser('foo')
self._addUser('bar')
self._addUser('baz')
res = conn.search_s(b'ou=users,dc=localhost',
query=b'(|(cn=foo)(cn=bar))')
dn_values = [dn for (dn, attr_dict) in res]
self.assertEqual(len(dn_values), 2)
self.assertEqual(set(dn_values),
set([b'cn=foo,ou=users,dc=localhost',
b'cn=bar,ou=users,dc=localhost']))
def test_search_invalid_base(self):
import ldap
conn = self._makeOne()
self._addUser('foo')
self.assertRaises(ldap.NO_SUCH_OBJECT, conn.search_s,
b'o=base', query=b'(objectClass=*)')
def test_search_by_mail(self):
conn = self._makeOne()
self._addUser('foo', mail='foo@foo.com')
self._addUser('bar', mail='bar@bar.com')
self._addUser('baz', mail='baz@baz.com')
res = conn.search_s(b'ou=users,dc=localhost',
query=b'(|(mail=foo@foo.com)(mail=bar@bar.com))')
dn_values = [dn for (dn, attr_dict) in res]
self.assertEqual(len(dn_values), 2)
self.assertEqual(set(dn_values),
set([b'cn=foo,ou=users,dc=localhost',
b'cn=bar,ou=users,dc=localhost']))
def test_search_by_utf8(self):
conn = self._makeOne()
utf8_foo = to_utf8(u'f\xf8\xf8')
utf8_bar = to_utf8(u'b\xe5r')
self._addUser(utf8_foo)
self._addUser(utf8_bar)
self._addUser('baz')
res = conn.search_s(b'ou=users,dc=localhost',
query=b'(|(cn=%s)(cn=%s))' % (utf8_foo, utf8_bar))
dn_values = [dn for (dn, attr_dict) in res]
self.assertEqual(len(dn_values), 2)
self.assertEqual(set(dn_values),
set([b'cn=%s,ou=users,dc=localhost' % utf8_foo,
b'cn=%s,ou=users,dc=localhost' % utf8_bar]))
def test_return_all_attributes(self):
conn = self._makeOne()
self._addUser('foo', mail='foo@foo.com')
res = conn.search_s(b'ou=users,dc=localhost',
query=b'(cn=foo)', attrs=None)
self.assertEqual(len(res), 1)
dn, attr_dict = res[0]
self.assertEqual(dn, b'cn=foo,ou=users,dc=localhost')
self.assertTrue(b'cn' in attr_dict)
self.assertTrue(b'mail' in attr_dict)
self.assertTrue(b'userPassword' in attr_dict)
self.assertTrue(b'objectClass' in attr_dict)
def test_return_filtered_attributes(self):
conn = self._makeOne()
self._addUser('foo', mail='foo@foo.com')
res = conn.search_s(b'ou=users,dc=localhost',
query=b'(cn=foo)', attrs=[b'cn', b'mail'])
self.assertEqual(len(res), 1)
dn, attr_dict = res[0]
self.assertEqual(dn, b'cn=foo,ou=users,dc=localhost')
self.assertTrue(b'cn' in attr_dict)
self.assertTrue(b'mail' in attr_dict)
self.assertFalse(b'userPassword' in attr_dict)
self.assertFalse(b'objectClass' in attr_dict)
| 39.932
| 79
| 0.58259
| 1,308
| 9,983
| 4.271407
| 0.113914
| 0.088599
| 0.066046
| 0.132092
| 0.777698
| 0.769286
| 0.751745
| 0.74029
| 0.73152
| 0.725255
| 0
| 0.006118
| 0.263248
| 9,983
| 249
| 80
| 40.092369
| 0.753501
| 0.056897
| 0
| 0.707692
| 0
| 0
| 0.186155
| 0.1298
| 0
| 0
| 0
| 0
| 0.225641
| 1
| 0.092308
| false
| 0.020513
| 0.025641
| 0
| 0.123077
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.