hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
a3e2a0ac0503f5dc07ca496db5ccde98867c070b
| 93
|
py
|
Python
|
dlc/pose-tensorflow/net_factory.py
|
JaneliaSciComp/delectable
|
a8a1eb23b96f83c332d4b14593e0ae209bb062b2
|
[
"BSD-3-Clause"
] | null | null | null |
dlc/pose-tensorflow/net_factory.py
|
JaneliaSciComp/delectable
|
a8a1eb23b96f83c332d4b14593e0ae209bb062b2
|
[
"BSD-3-Clause"
] | 1
|
2020-03-09T07:32:01.000Z
|
2020-03-09T17:43:00.000Z
|
dlc/pose-tensorflow/nnet/net_factory.py
|
JaneliaSciComp/delectable
|
a8a1eb23b96f83c332d4b14593e0ae209bb062b2
|
[
"BSD-3-Clause"
] | 1
|
2020-06-16T04:12:58.000Z
|
2020-06-16T04:12:58.000Z
|
from nnet.pose_net import PoseNet
def pose_net(cfg):
cls = PoseNet
return cls(cfg)
| 13.285714
| 33
| 0.698925
| 15
| 93
| 4.2
| 0.666667
| 0.222222
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.225806
| 93
| 6
| 34
| 15.5
| 0.875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.25
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
430402d4455b92d16eb05a544b90fc89eded6d1a
| 274
|
py
|
Python
|
kedro/framework/cli/__init__.py
|
hfwittmann/kedro
|
b0d4fcd8f19b49a7916d78fd09daeb6209a7b6c6
|
[
"Apache-2.0"
] | 1
|
2021-11-25T12:33:13.000Z
|
2021-11-25T12:33:13.000Z
|
kedro/framework/cli/__init__.py
|
MerelTheisenQB/kedro
|
1eaa2e0fa5d80f96e18ea60b9f3d6e6efc161827
|
[
"Apache-2.0"
] | null | null | null |
kedro/framework/cli/__init__.py
|
MerelTheisenQB/kedro
|
1eaa2e0fa5d80f96e18ea60b9f3d6e6efc161827
|
[
"Apache-2.0"
] | null | null | null |
"""``kedro.framework.cli`` implements commands available from Kedro's CLI.
"""
from .cli import get_project_context, main
from .utils import command_with_verbosity, load_entry_points
__all__ = ["get_project_context", "main", "command_with_verbosity", "load_entry_points"]
| 34.25
| 88
| 0.788321
| 37
| 274
| 5.405405
| 0.567568
| 0.1
| 0.17
| 0.21
| 0.35
| 0.35
| 0
| 0
| 0
| 0
| 0
| 0
| 0.094891
| 274
| 7
| 89
| 39.142857
| 0.806452
| 0.259124
| 0
| 0
| 0
| 0
| 0.316327
| 0.112245
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
43078238db5cfaba6f3c4b017569cdaf0cf2dff0
| 303
|
py
|
Python
|
cvxlinreg/abs_value.py
|
alexshtf/inc_prox_pt
|
a826c7179a528757399e661c5619a68dad254711
|
[
"MIT"
] | null | null | null |
cvxlinreg/abs_value.py
|
alexshtf/inc_prox_pt
|
a826c7179a528757399e661c5619a68dad254711
|
[
"MIT"
] | null | null | null |
cvxlinreg/abs_value.py
|
alexshtf/inc_prox_pt
|
a826c7179a528757399e661c5619a68dad254711
|
[
"MIT"
] | null | null | null |
import math
class AbsValue:
def eval(self, z):
return abs(z)
def conjugate_has_compact_domain(self):
return True
def domain(self):
return (-1, 1)
def conjugate(self, s):
if -1 <= s <= 1:
return 0
else:
return math.inf
| 15.947368
| 43
| 0.518152
| 39
| 303
| 3.948718
| 0.538462
| 0.155844
| 0.207792
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.026882
| 0.386139
| 303
| 18
| 44
| 16.833333
| 0.801075
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.307692
| false
| 0
| 0.076923
| 0.230769
| 0.846154
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 4
|
431052d72c290d057f695aebfccff80b6ecec3c2
| 32,126
|
py
|
Python
|
torchdrug/layers/conv.py
|
wconnell/torchdrug
|
a710097cb4ad4c48e0de0d18fbb77ef0e806cdc8
|
[
"Apache-2.0"
] | 772
|
2021-08-10T05:03:46.000Z
|
2022-03-31T12:48:31.000Z
|
torchdrug/layers/conv.py
|
wconnell/torchdrug
|
a710097cb4ad4c48e0de0d18fbb77ef0e806cdc8
|
[
"Apache-2.0"
] | 77
|
2021-08-12T16:19:15.000Z
|
2022-03-30T14:32:14.000Z
|
torchdrug/layers/conv.py
|
wconnell/torchdrug
|
a710097cb4ad4c48e0de0d18fbb77ef0e806cdc8
|
[
"Apache-2.0"
] | 90
|
2021-08-11T16:27:13.000Z
|
2022-03-28T11:41:53.000Z
|
import functools
import torch
from torch import nn
from torch.nn import functional as F
from torch.utils import checkpoint
from torch_scatter import scatter_mean, scatter_add, scatter_max
from torchdrug import data, layers, utils
from torchdrug.layers import functional
class MessagePassingBase(nn.Module):
"""
Base module for message passing.
Any custom message passing module should be derived from this class.
"""
gradient_checkpoint = False
def message(self, graph, input):
"""
Compute edge messages for the graph.
Parameters:
graph (Graph): graph(s)
input (Tensor): node representations of shape :math:`(|V|, ...)`
Returns:
Tensor: edge messages of shape :math:`(|E|, ...)`
"""
raise NotImplementedError
def aggregate(self, graph, message):
"""
Aggregate edge messages to nodes.
Parameters:
graph (Graph): graph(s)
message (Tensor): edge messages of shape :math:`(|E|, ...)`
Returns:
Tensor: node updates of shape :math:`(|V|, ...)`
"""
raise NotImplementedError
def message_and_aggregate(self, graph, input):
"""
Fused computation of message and aggregation over the graph.
This may provide better time or memory complexity than separate calls of
:meth:`message <MessagePassingBase.message>` and :meth:`aggregate <MessagePassingBase.aggregate>`.
Parameters:
graph (Graph): graph(s)
input (Tensor): node representations of shape :math:`(|V|, ...)`
Returns:
Tensor: node updates of shape :math:`(|V|, ...)`
"""
message = self.message(graph, input)
update = self.aggregate(graph, message)
return update
def _message_and_aggregate(self, *tensors):
graph = data.Graph.from_tensors(tensors[:-1])
input = tensors[-1]
update = self.message_and_aggregate(graph, input)
return update
def combine(self, input, update):
"""
Combine node input and node update.
Parameters:
input (Tensor): node representations of shape :math:`(|V|, ...)`
update (Tensor): node updates of shape :math:`(|V|, ...)`
"""
raise NotImplementedError
def forward(self, graph, input):
"""
Perform message passing over the graph(s).
Parameters:
graph (Graph): graph(s)
input (Tensor): node representations of shape :math:`(|V|, ...)`
"""
if self.gradient_checkpoint:
update = checkpoint.checkpoint(self._message_and_aggregate, *graph.to_tensors(), input)
else:
update = self.message_and_aggregate(graph, input)
output = self.combine(input, update)
return output
class GraphConv(MessagePassingBase):
"""
Graph convolution operator from `Semi-Supervised Classification with Graph Convolutional Networks`_.
.. _Semi-Supervised Classification with Graph Convolutional Networks:
https://arxiv.org/pdf/1609.02907.pdf
Parameters:
input_dim (int): input dimension
output_dim (int): output dimension
edge_input_dim (int, optional): dimension of edge features
batch_norm (bool, optional): apply batch normalization on nodes or not
activation (str or function, optional): activation function
"""
def __init__(self, input_dim, output_dim, edge_input_dim=None, batch_norm=False, activation="relu"):
super(GraphConv, self).__init__()
self.input_dim = input_dim
self.output_dim = output_dim
self.edge_input_dim = edge_input_dim
if batch_norm:
self.batch_norm = nn.BatchNorm1d(output_dim)
else:
self.batch_norm = None
if isinstance(activation, str):
self.activation = getattr(F, activation)
else:
self.activation = activation
self.linear = nn.Linear(input_dim, output_dim)
if edge_input_dim:
self.edge_linear = nn.Linear(edge_input_dim, input_dim)
else:
self.edge_linear = None
def message(self, graph, input):
# add self loop
node_in = torch.cat([graph.edge_list[:, 0], torch.arange(graph.num_node, device=graph.device)])
degree_in = graph.degree_in.unsqueeze(-1) + 1
message = input[node_in]
if self.edge_linear:
edge_input = self.edge_linear(graph.edge_feature.float())
edge_input = torch.cat([edge_input, torch.zeros(graph.num_node, self.input_dim, device=graph.device)])
message += edge_input
message /= degree_in[node_in].sqrt()
return message
def aggregate(self, graph, message):
# add self loop
node_out = torch.cat([graph.edge_list[:, 1], torch.arange(graph.num_node, device=graph.device)])
edge_weight = torch.cat([graph.edge_weight, torch.ones(graph.num_node, device=graph.device)])
edge_weight = edge_weight.unsqueeze(-1)
degree_out = graph.degree_out.unsqueeze(-1) + 1
update = scatter_add(message * edge_weight, node_out, dim=0, dim_size=graph.num_node)
update = update / degree_out.sqrt()
return update
def message_and_aggregate(self, graph, input):
node_in, node_out = graph.edge_list.t()[:2]
node_in = torch.cat([node_in, torch.arange(graph.num_node, device=graph.device)])
node_out = torch.cat([node_out, torch.arange(graph.num_node, device=graph.device)])
edge_weight = torch.cat([graph.edge_weight, torch.ones(graph.num_node, device=graph.device)])
degree_in = graph.degree_in + 1
degree_out = graph.degree_out + 1
edge_weight = edge_weight / (degree_in[node_in] * degree_out[node_out]).sqrt()
adjacency = utils.sparse_coo_tensor(torch.stack([node_in, node_out]), edge_weight,
(graph.num_node, graph.num_node))
update = torch.sparse.mm(adjacency.t(), input)
if self.edge_linear:
edge_input = graph.edge_feature.float()
if self.edge_linear.in_features > self.edge_linear.out_features:
edge_input = self.edge_linear(edge_input)
edge_weight = edge_weight.unsqueeze(-1)
edge_update = scatter_add(edge_input * edge_weight, graph.edge_list[:, 1], dim=0,
dim_size=graph.num_node)
if self.edge_linear.in_features <= self.edge_linear.out_features:
edge_update = self.edge_linear(edge_update)
update += edge_update
return update
def combine(self, input, update):
output = self.linear(update)
if self.batch_norm:
output = self.batch_norm(output)
if self.activation:
output = self.activation(output)
return output
class GraphAttentionConv(MessagePassingBase):
"""
Graph attentional convolution operator from `Graph Attention Networks`_.
.. _Graph Attention Networks:
https://arxiv.org/pdf/1710.10903.pdf
Parameters:
input_dim (int): input dimension
output_dim (int): output dimension
edge_input_dim (int, optional): dimension of edge features
num_head (int, optional): number of attention heads
negative_slope (float, optional): negative slope of leaky relu activation
batch_norm (bool, optional): apply batch normalization on nodes or not
activation (str or function, optional): activation function
"""
eps = 1e-10
def __init__(self, input_dim, output_dim, edge_input_dim=None, num_head=1, negative_slope=0.2, concat=True,
batch_norm=False, activation="relu"):
super(GraphAttentionConv, self).__init__()
self.input_dim = input_dim
self.output_dim = output_dim
self.edge_input_dim = edge_input_dim
self.num_head = num_head
self.concat = concat
self.leaky_relu = functools.partial(F.leaky_relu, negative_slope=negative_slope)
if batch_norm:
self.batch_norm = nn.BatchNorm1d(output_dim)
else:
self.batch_norm = None
if isinstance(activation, str):
self.activation = getattr(F, activation)
else:
self.activation = activation
if output_dim % num_head != 0:
raise ValueError("Expect output_dim to be a multiplier of num_head, but found `%d` and `%d`"
% (output_dim, num_head))
self.linear = nn.Linear(input_dim, output_dim)
if edge_input_dim:
self.edge_linear = nn.Linear(edge_input_dim, output_dim)
else:
self.edge_linear = None
self.query = nn.Parameter(torch.zeros(num_head, output_dim * 2 // num_head))
nn.init.kaiming_uniform_(self.query, negative_slope, mode="fan_in")
def message(self, graph, input):
# add self loop
node_in = torch.cat([graph.edge_list[:, 0], torch.arange(graph.num_node, device=graph.device)])
node_out = torch.cat([graph.edge_list[:, 1], torch.arange(graph.num_node, device=graph.device)])
edge_weight = torch.cat([graph.edge_weight, torch.ones(graph.num_node, device=graph.device)])
edge_weight = edge_weight.unsqueeze(-1)
hidden = self.linear(input)
key = torch.stack([hidden[node_in], hidden[node_out]], dim=-1)
if self.edge_linear:
edge_input = self.edge_linear(graph.edge_feature.float())
edge_input = torch.cat([edge_input, torch.zeros(graph.num_node, self.output_dim, device=graph.device)])
key += edge_input.unsqueeze(-1)
key = key.view(-1, *self.query.shape)
weight = torch.einsum("hd, nhd -> nh", self.query, key)
weight = self.leaky_relu(weight)
weight = weight - scatter_max(weight, node_out, dim=0, dim_size=graph.num_node)[0][node_out]
attention = weight.exp() * edge_weight
# why mean? because with mean we have normalized message scale across different node degrees
normalizer = scatter_mean(attention, node_out, dim=0, dim_size=graph.num_node)[node_out]
attention = attention / (normalizer + self.eps)
value = hidden[node_in].view(-1, self.num_head, self.query.shape[-1] // 2)
attention = attention.unsqueeze(-1).expand_as(value)
message = (attention * value).flatten(1)
return message
def aggregate(self, graph, message):
# add self loop
node_out = torch.cat([graph.edge_list[:, 1], torch.arange(graph.num_node, device=graph.device)])
update = scatter_mean(message, node_out, dim=0, dim_size=graph.num_node)
return update
def combine(self, input, update):
output = update
if self.batch_norm:
output = self.batch_norm(output)
if self.activation:
output = self.activation(output)
return output
class GraphIsomorphismConv(MessagePassingBase):
"""
Graph isomorphism convolution operator from `How Powerful are Graph Neural Networks?`_
.. _How Powerful are Graph Neural Networks?:
https://arxiv.org/pdf/1810.00826.pdf
Parameters:
input_dim (int): input dimension
output_dim (int): output dimension
edge_input_dim (int, optional): dimension of edge features
hidden_dims (list of int, optional): hidden dimensions
eps (float, optional): initial epsilon
learn_eps (bool, optional): learn epsilon or not
batch_norm (bool, optional): apply batch normalization on nodes or not
activation (str or function, optional): activation function
"""
def __init__(self, input_dim, output_dim, edge_input_dim=None, hidden_dims=None, eps=0, learn_eps=False,
batch_norm=False, activation="relu"):
super(GraphIsomorphismConv, self).__init__()
self.input_dim = input_dim
self.output_dim = output_dim
self.edge_input_dim = edge_input_dim
eps = torch.tensor([eps], dtype=torch.float32)
if learn_eps:
self.eps = nn.Parameter(eps)
else:
self.register_buffer("eps", eps)
if batch_norm:
self.batch_norm = nn.BatchNorm1d(output_dim)
else:
self.batch_norm = None
if isinstance(activation, str):
self.activation = getattr(F, activation)
else:
self.activation = activation
if hidden_dims is None:
hidden_dims = []
self.mlp = layers.MLP(input_dim, list(hidden_dims) + [output_dim], activation)
if edge_input_dim:
self.edge_linear = nn.Linear(edge_input_dim, input_dim)
else:
self.edge_linear = None
def message(self, graph, input):
node_in = graph.edge_list[:, 0]
message = input[node_in]
if self.edge_linear:
message += self.edge_linear(graph.edge_feature.float())
return message
def aggregate(self, graph, message):
node_out = graph.edge_list[:, 1]
edge_weight = graph.edge_weight.unsqueeze(-1)
update = scatter_add(message * edge_weight, node_out, dim=0, dim_size=graph.num_node)
return update
def message_and_aggregate(self, graph, input):
adjacency = utils.sparse_coo_tensor(graph.edge_list.t()[:2], graph.edge_weight,
(graph.num_node, graph.num_node))
update = torch.sparse.mm(adjacency.t(), input)
if self.edge_linear:
edge_input = graph.edge_feature.float()
edge_weight = graph.edge_weight.unsqueeze(-1)
if self.edge_linear.in_features > self.edge_linear.out_features:
edge_input = self.edge_linear(edge_input)
edge_update = scatter_add(edge_input * edge_weight, graph.edge_list[:, 1], dim=0,
dim_size=graph.num_node)
if self.edge_linear.in_features <= self.edge_linear.out_features:
edge_update = self.edge_linear(edge_update)
update += edge_update
return update
def combine(self, input, update):
output = self.mlp((1 + self.eps) * input + update)
if self.batch_norm:
output = self.batch_norm(output)
if self.activation:
output = self.activation(output)
return output
class RelationalGraphConv(MessagePassingBase):
"""
Relational graph convolution operator from `Modeling Relational Data with Graph Convolutional Networks`_.
.. _Modeling Relational Data with Graph Convolutional Networks:
https://arxiv.org/pdf/1703.06103.pdf
Parameters:
input_dim (int): input dimension
output_dim (int): output dimension
num_relation (int): number of relations
edge_input_dim (int, optional): dimension of edge features
batch_norm (bool, optional): apply batch normalization on nodes or not
activation (str or function, optional): activation function
"""
eps = 1e-10
def __init__(self, input_dim, output_dim, num_relation, edge_input_dim=None, batch_norm=False, activation="relu"):
super(RelationalGraphConv, self).__init__()
self.input_dim = input_dim
self.output_dim = output_dim
self.num_relation = num_relation
self.edge_input_dim = edge_input_dim
if batch_norm:
self.batch_norm = nn.BatchNorm1d(output_dim)
else:
self.batch_norm = None
if isinstance(activation, str):
self.activation = getattr(F, activation)
else:
self.activation = activation
self.self_loop = nn.Linear(input_dim, output_dim)
self.linear = nn.Linear(num_relation * input_dim, output_dim)
if edge_input_dim:
self.edge_linear = nn.Linear(edge_input_dim, input_dim)
else:
self.edge_linear = None
def message(self, graph, input):
node_in = graph.edge_list[:, 0]
message = input[node_in]
if self.edge_linear:
message += self.edge_linear(graph.edge_feature.float())
return message
def aggregate(self, graph, message):
assert graph.num_relation == self.num_relation
node_out = graph.edge_list[:, 1] * self.num_relation + graph.edge_list[:, 2]
edge_weight = graph.edge_weight.unsqueeze(-1)
update = scatter_add(message * edge_weight, node_out, dim=0, dim_size=graph.num_node * self.num_relation) / \
(scatter_add(edge_weight, node_out, dim=0, dim_size=graph.num_node * self.num_relation) + self.eps)
return update.view(graph.num_node, self.num_relation * self.input_dim)
def message_and_aggregate(self, graph, input):
assert graph.num_relation == self.num_relation
node_in, node_out, relation = graph.edge_list.t()
node_out = node_out * self.num_relation + relation
degree_out = scatter_add(graph.edge_weight, node_out, dim_size=graph.num_node * graph.num_relation)
edge_weight = graph.edge_weight / degree_out[node_out]
adjacency = utils.sparse_coo_tensor(torch.stack([node_in, node_out]), edge_weight,
(graph.num_node, graph.num_node * graph.num_relation))
update = torch.sparse.mm(adjacency.t(), input)
if self.edge_linear:
edge_input = graph.edge_feature.float()
if self.edge_linear.in_features > self.edge_linear.out_features:
edge_input = self.edge_linear(edge_input)
edge_weight = edge_weight.unsqueeze(-1)
edge_update = scatter_add(edge_input * edge_weight, node_out, dim=0,
dim_size=graph.num_node * graph.num_relation)
if self.edge_linear.in_features <= self.edge_linear.out_features:
edge_update = self.edge_linear(edge_update)
update += edge_update
return update.view(graph.num_node, self.num_relation * self.input_dim)
def combine(self, input, update):
output = self.linear(update) + self.self_loop(input)
if self.batch_norm:
output = self.batch_norm(output)
if self.activation:
output = self.activation(output)
return output
class NeuralFingerprintConv(MessagePassingBase):
"""
Graph neural network operator from `Convolutional Networks on Graphs for Learning Molecular Fingerprints`_.
Note this operator doesn't include the sparsifying step of the original paper.
.. _Convolutional Networks on Graphs for Learning Molecular Fingerprints:
https://arxiv.org/pdf/1509.09292.pdf
Parameters:
input_dim (int): input dimension
output_dim (int): output dimension
edge_input_dim (int, optional): dimension of edge features
batch_norm (bool, optional): apply batch normalization on nodes or not
activation (str or function, optional): activation function
"""
def __init__(self, input_dim, output_dim, edge_input_dim=None, batch_norm=False, activation="relu"):
super(NeuralFingerprintConv, self).__init__()
self.input_dim = input_dim
self.output_dim = output_dim
self.edge_input_dim = edge_input_dim
if batch_norm:
self.batch_norm = nn.BatchNorm1d(output_dim)
else:
self.batch_norm = None
if isinstance(activation, str):
self.activation = getattr(F, activation)
else:
self.activation = activation
self.linear = nn.Linear(input_dim, output_dim)
if edge_input_dim:
self.edge_linear = nn.Linear(edge_input_dim, input_dim)
else:
self.edge_linear = None
def message(self, graph, input):
node_in = graph.edge_list[:, 0]
message = input[node_in]
if self.edge_linear:
message += self.edge_linear(graph.edge_feature.float())
return message
def aggregate(self, graph, message):
node_out = graph.edge_list[:, 1]
edge_weight = graph.edge_weight.unsqueeze(-1)
update = scatter_add(message * edge_weight, node_out, dim=0, dim_size=graph.num_node)
return update
def message_and_aggregate(self, graph, input):
adjacency = utils.sparse_coo_tensor(graph.edge_list.t()[:2], graph.edge_weight,
(graph.num_node, graph.num_node))
update = torch.sparse.mm(adjacency.t(), input)
if self.edge_linear:
edge_input = graph.edge_feature.float()
edge_weight = graph.edge_weight.unsqueeze(-1)
if self.edge_linear.in_features > self.edge_linear.out_features:
edge_input = self.edge_linear(edge_input)
edge_update = scatter_add(edge_input * edge_weight, graph.edge_list[:, 1], dim=0,
dim_size=graph.num_node)
if self.edge_linear.in_features <= self.edge_linear.out_features:
edge_update = self.edge_linear(edge_update)
update += edge_update
return update
def combine(self, input, update):
output = self.linear(input + update)
if self.batch_norm:
output = self.batch_norm(output)
if self.activation:
output = self.activation(output)
return output
class ContinuousFilterConv(MessagePassingBase):
"""
Continuous filter operator from
`SchNet: A continuous-filter convolutional neural network for modeling quantum interactions`_.
.. _SchNet\: A continuous-filter convolutional neural network for modeling quantum interactions:
https://arxiv.org/pdf/1706.08566.pdf
Parameters:
input_dim (int): input dimension
output_dim (int): output dimension
edge_input_dim (int, optional): dimension of edge features
hidden_dim (int, optional): hidden dimension. By default, same as :attr:`output_dim`
cutoff (float, optional): maximal scale for RBF kernels
num_gaussian (int, optional): number of RBF kernels
batch_norm (bool, optional): apply batch normalization on nodes or not
activation (str or function, optional): activation function
"""
def __init__(self, input_dim, output_dim, edge_input_dim=None, hidden_dim=None, cutoff=5, num_gaussian=100,
batch_norm=False, activation="shifted_softplus"):
super(ContinuousFilterConv, self).__init__()
self.input_dim = input_dim
self.output_dim = output_dim
self.edge_input_dim = edge_input_dim
if hidden_dim is None:
hidden_dim = output_dim
self.hidden_dim = hidden_dim
self.rbf = layers.RBF(stop=cutoff, num_kernel=num_gaussian)
if batch_norm:
self.batch_norm = nn.BatchNorm1d(output_dim)
else:
self.batch_norm = None
if activation == "shifted_softplus":
self.activation = functional.shifted_softplus
elif isinstance(activation, str):
self.activation = getattr(F, activation)
else:
self.activation = activation
self.input_layer = nn.Linear(input_dim, hidden_dim)
self.rbf_layer = nn.Linear(num_gaussian, hidden_dim)
self.output_layer = nn.Linear(hidden_dim, output_dim)
if edge_input_dim:
self.edge_linear = nn.Linear(edge_input_dim, input_dim)
else:
self.edge_linear = None
def message(self, graph, input):
node_in, node_out = graph.edge_list.t()[:2]
position = graph.node_position
message = self.input_layer(input)[node_in]
if self.edge_linear:
message += self.edge_linear(graph.edge_feature.float())
weight = self.rbf_layer(self.rbf(position[node_in], position[node_out]))
message *= weight
return message
def aggregate(self, graph, message):
node_out = graph.edge_list[:, 1]
edge_weight = graph.edge_weight.unsqueeze(-1)
update = scatter_add(message * edge_weight, node_out, dim=0, dim_size=graph.num_node)
return update
def message_and_aggregate(self, graph, input):
node_in, node_out = graph.edge_list.t()[:2]
position = graph.node_position
rbf_weight = self.rbf_layer(self.rbf(position[node_in], position[node_out]))
indices = torch.stack([node_out, node_in, torch.arange(graph.num_edge, device=graph.device)])
adjacency = utils.sparse_coo_tensor(indices, graph.edge_weight, (graph.num_node, graph.num_node, graph.num_edge))
update = functional.generalized_rspmm(adjacency, rbf_weight, self.input_layer(input))
if self.edge_linear:
edge_input = graph.edge_feature.float()
if self.edge_linear.in_features > self.edge_linear.out_features:
edge_input = self.edge_linear(edge_input)
edge_weight = graph.edge_weight.unsqueeze(-1) * rbf_weight
edge_update = scatter_add(edge_input * edge_weight, graph.edge_list[:, 1], dim=0,
dim_size=graph.num_node)
if self.edge_linear.in_features <= self.edge_linear.out_features:
edge_update = self.edge_linear(edge_update)
update += edge_update
return update
def combine(self, input, update):
output = self.output_layer(update)
if self.batch_norm:
output = self.batch_norm(output)
if self.activation:
output = self.activation(output)
return output
class MessagePassing(MessagePassingBase):
"""
Message passing operator from `Neural Message Passing for Quantum Chemistry`_.
This implements the edge network variant in the original paper.
.. _Neural Message Passing for Quantum Chemistry:
https://arxiv.org/pdf/1704.01212.pdf
Parameters:
input_dim (int): input dimension
edge_input_dim (int): dimension of edge features
hidden_dims (list of int, optional): hidden dims of edge network
batch_norm (bool, optional): apply batch normalization on nodes or not
activation (str or function, optional): activation function
"""
def __init__(self, input_dim, edge_input_dim, hidden_dims=None, batch_norm=False, activation="relu"):
super(MessagePassing, self).__init__()
self.input_dim = input_dim
self.output_dim = input_dim
self.edge_input_dim = edge_input_dim
if hidden_dims is None:
hidden_dims = []
if batch_norm:
self.batch_norm = nn.BatchNorm1d(input_dim)
else:
self.batch_norm = None
if isinstance(activation, str):
self.activation = getattr(F, activation)
else:
self.activation = activation
self.edge_mlp = layers.MLP(edge_input_dim, list(hidden_dims) + [input_dim * input_dim], activation)
def message(self, graph, input):
node_in = graph.edge_list[:, 0]
transform = self.edge_mlp(graph.edge_feature.float()).view(-1, self.input_dim, self.input_dim)
if graph.num_edge:
message = torch.einsum("bed, bd -> be", transform, input[node_in])
else:
message = torch.zeros(0, self.input_dim, device=graph.device)
return message
def aggregate(self, graph, message):
node_out = graph.edge_list[:, 1]
edge_weight = graph.edge_weight.unsqueeze(-1)
update = scatter_add(message * edge_weight, node_out, dim=0, dim_size=graph.num_node)
return update
def combine(self, input, update):
output = update
if self.batch_norm:
output = self.batch_norm(output)
if self.activation:
output = self.activation(output)
return output
class ChebyshevConv(MessagePassingBase):
"""
Chebyshev spectral graph convolution operator from
`Convolutional Neural Networks on Graphs with Fast Localized Spectral Filtering`_.
.. _Convolutional Neural Networks on Graphs with Fast Localized Spectral Filtering:
https://arxiv.org/pdf/1606.09375.pdf
Parameters:
input_dim (int): input dimension
output_dim (int): output dimension
edge_input_dim (int, optional): dimension of edge features
k (int, optional): number of Chebyshev polynomials.
This also corresponds to the radius of the receptive field.
hidden_dims (list of int, optional): hidden dims of edge network
batch_norm (bool, optional): apply batch normalization on nodes or not
activation (str or function, optional): activation function
"""
def __init__(self, input_dim, output_dim, edge_input_dim=None, k=1, batch_norm=False, activation="relu"):
super(ChebyshevConv, self).__init__()
self.input_dim = input_dim
self.output_dim = output_dim
self.k = k
self.edge_input_dim = edge_input_dim
if batch_norm:
self.batch_norm = nn.BatchNorm1d(output_dim)
else:
self.batch_norm = None
if isinstance(activation, str):
self.activation = getattr(F, activation)
else:
self.activation = activation
self.linear = nn.Linear((k + 1) * input_dim, output_dim)
if edge_input_dim:
self.edge_linear = nn.Linear(edge_input_dim, input_dim)
else:
self.edge_linear = None
def message(self, graph, input):
node_in = graph.edge_list[:, 0]
degree_in = graph.degree_in.unsqueeze(-1)
# because self-loop messages have a different scale, they are processed in combine()
message = input[node_in]
if self.edge_linear:
message += self.edge_linear(graph.edge_feature.float())
message /= degree_in[node_in].sqrt()
return message
def aggregate(self, graph, message):
node_out = graph.edge_list[:, 1]
edge_weight = graph.edge_weight.unsqueeze(-1)
degree_out = graph.degree_out.unsqueeze(-1)
# because self-loop messages have a different scale, they are processed in combine()
update = -scatter_add(message * edge_weight, node_out, dim=0, dim_size=graph.num_node)
update = update / degree_out.sqrt()
return update
def message_and_aggregate(self, graph, input):
node_in, node_out = graph.edge_list.t()[:2]
edge_weight = -graph.edge_weight / (graph.degree_in[node_in] * graph.degree_out[node_out]).sqrt()
adjacency = utils.sparse_coo_tensor(graph.edge_list.t()[:2], edge_weight, (graph.num_node, graph.num_node))
update = torch.sparse.mm(adjacency.t(), input)
if self.edge_linear:
edge_input = graph.edge_feature.float()
if self.edge_linear.in_features > self.edge_linear.out_features:
edge_input = self.edge_linear(edge_input)
edge_weight = edge_weight.unsqueeze(-1)
edge_update = scatter_add(edge_input * edge_weight, graph.edge_list[:, 1], dim=0,
dim_size=graph.num_node)
if self.edge_linear.in_features <= self.edge_linear.out_features:
edge_update = self.edge_linear(edge_update)
update += edge_update
return update
def forward(self, graph, input):
# Chebyshev polynomial bases
bases = [input]
for i in range(self.k):
x = super(ChebyshevConv, self).forward(graph, bases[-1])
if i > 0:
x = 2 * x - bases[-2]
bases.append(x)
bases = torch.cat(bases, dim=-1)
output = self.linear(bases)
if self.batch_norm:
x = self.batch_norm(output)
if self.activation:
output = self.activation(output)
return output
def combine(self, input, update):
output = input + update
return output
| 41.081841
| 121
| 0.640976
| 3,995
| 32,126
| 4.927409
| 0.075094
| 0.041859
| 0.049784
| 0.02032
| 0.764084
| 0.745339
| 0.721514
| 0.690323
| 0.665481
| 0.661468
| 0
| 0.007907
| 0.263867
| 32,126
| 781
| 122
| 41.134443
| 0.824475
| 0.20183
| 0
| 0.725296
| 0
| 0
| 0.006754
| 0
| 0
| 0
| 0
| 0
| 0.003953
| 1
| 0.088933
| false
| 0.019763
| 0.01581
| 0
| 0.195652
| 0.003953
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
433e9e9e80a4303c7e141d6834d195f78e013a25
| 3,899
|
py
|
Python
|
metalibm_core/core/advanced_operations.py
|
kalray/metalibm
|
e331ee4a1b3df9ebdf581453852ac019d7c1b6da
|
[
"MIT"
] | 27
|
2018-03-12T16:49:36.000Z
|
2021-12-15T06:53:55.000Z
|
metalibm_core/core/advanced_operations.py
|
kalray/metalibm
|
e331ee4a1b3df9ebdf581453852ac019d7c1b6da
|
[
"MIT"
] | 57
|
2018-03-12T16:49:56.000Z
|
2021-03-04T15:25:39.000Z
|
metalibm_core/core/advanced_operations.py
|
kalray/metalibm
|
e331ee4a1b3df9ebdf581453852ac019d7c1b6da
|
[
"MIT"
] | 4
|
2018-03-12T15:40:22.000Z
|
2018-11-28T14:34:54.000Z
|
# -*- coding: utf-8 -*-
## @package advanced_operations
# Metalibm Description Language advanced Operations
###############################################################################
# This file is part of metalibm (https://github.com/kalray/metalibm)
###############################################################################
# MIT License
#
# Copyright (c) 2018 Kalray
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
###############################################################################
###############################################################################
# This file is part of the new Metalibm tool
# created: Aug 9th, 2017
# last-modified: Mar 8th, 2018
#
# author(s): Nicolas Brunie (nbrunie@kalray.eu)
###############################################################################
from metalibm_core.core.ml_operations import (
SpecifierOperation, empty_range,
GeneralOperation,
ML_ArithmeticOperation,
)
class FixedPointPosition(ML_ArithmeticOperation):
""" Dynamic FixedPointPosition evaluator node
convert to a constant during code generation, once input
format has been determined """
name = "FixedPointPosition"
def range_function(self, ops, ops_interval_getter=lambda op: op.get_interval()):
return None
class FromMSBToLSB:
""" offset is given from MSB downward.
The node returns the index of position (MSB - offset) from LSB
"""
pass
class FromLSBToLSB:
""" offset is given from LSB upward.
The node returns the position of index (LSB + offset) from LSB
(i.e result = offset)
"""
pass
class FromPointToLSB:
""" The offset is given from point position upward.
The node returns the position of index (point + offset) from LSB
"""
pass
class FromPointToMSB:
""" The offset is given from point position upward.
The node returns the position of (point + offset) from MSB.
The result is expected to be negative
"""
pass
def __init__(self, op, position, align = FromLSBToLSB, **kwords):
self.__class__.__base__.__init__(self, op, position, **kwords)
self.align = align
def get_align(self):
return self.align
def finish_copy(self, new_copy, copy_map = None):
new_copy.align = self.align
class PlaceHolder(GeneralOperation):
""" This operation has an arbitrary arity.
For all purpose it is equal to its first input (main_input)
but carries on several inputs """
name = "PlaceHolder"
def __init__(self, *args, **kw):
PlaceHolder.__base__.__init__(self, *args, **kw)
def get_main_input(self):
return self.get_input(0)
def get_precision(self):
return self.get_main_input().get_precision()
| 37.854369
| 84
| 0.620415
| 461
| 3,899
| 5.136659
| 0.444685
| 0.037162
| 0.021959
| 0.028716
| 0.105997
| 0.073902
| 0.073902
| 0.073902
| 0.055743
| 0.055743
| 0
| 0.00521
| 0.212362
| 3,899
| 102
| 85
| 38.22549
| 0.765874
| 0.542703
| 0
| 0.125
| 0
| 0
| 0.024555
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.21875
| false
| 0.125
| 0.03125
| 0.125
| 0.625
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
|
0
| 4
|
4a32263dc33654dd970020bbf524b4d9f6cff4b0
| 600
|
py
|
Python
|
task_manager/shell.py
|
natszp/Tasker
|
263ba508b75bca939ad5879db0ccc2a1ce1c39a4
|
[
"MIT"
] | null | null | null |
task_manager/shell.py
|
natszp/Tasker
|
263ba508b75bca939ad5879db0ccc2a1ce1c39a4
|
[
"MIT"
] | null | null | null |
task_manager/shell.py
|
natszp/Tasker
|
263ba508b75bca939ad5879db0ccc2a1ce1c39a4
|
[
"MIT"
] | null | null | null |
from manager.models import Task
from datetime import datetime
task1 = Task.objects.create(name="zrobic koalcje", description="pożywna i staropolskia", date_created=datetime.now(), importance=False)
task2 = Task.objects.create(name="zrobic zakupy", description="warzywa w Lidlu i miesny", date_created=datetime.now(), importance=True)
task3 = Task.objects.create(name="posprzatać kuchnie", description="wymienic zapach w zmywarce", date_created=datetime.now(), importance=False)
task4 = Task.objects.create(name="odebrac poczte", description="na cito!", date_created=datetime.now(), importance=True)
| 75
| 143
| 0.791667
| 79
| 600
| 5.962025
| 0.493671
| 0.093418
| 0.144374
| 0.178344
| 0.424628
| 0.309979
| 0
| 0
| 0
| 0
| 0
| 0.007233
| 0.078333
| 600
| 7
| 144
| 85.714286
| 0.844485
| 0
| 0
| 0
| 0
| 0
| 0.231667
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
4a3b848bde2e2dc5e18626e7c84b91315bb787ee
| 351
|
py
|
Python
|
sst/tests/test_utils/logger_utils.py
|
Adamage/tutorials
|
b6600c052613909dbec378fea4a69deff46004dc
|
[
"MIT"
] | null | null | null |
sst/tests/test_utils/logger_utils.py
|
Adamage/tutorials
|
b6600c052613909dbec378fea4a69deff46004dc
|
[
"MIT"
] | 78
|
2021-09-20T11:48:08.000Z
|
2021-10-21T07:10:39.000Z
|
sst/tests/test_utils/logger_utils.py
|
Adamage/tutorials
|
b6600c052613909dbec378fea4a69deff46004dc
|
[
"MIT"
] | null | null | null |
import logging
def disable_logging():
logging.disable(logging.CRITICAL)
def enable_logging():
logging.disable(logging.NOTSET)
class MockHandler(logging.Handler):
def __init__(self, stream):
super().__init__()
self.log_records = stream
def emit(self, record):
self.log_records.append(record.getMessage())
| 18.473684
| 52
| 0.692308
| 40
| 351
| 5.775
| 0.5
| 0.181818
| 0.181818
| 0.242424
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.193732
| 351
| 18
| 53
| 19.5
| 0.816254
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.363636
| false
| 0
| 0.090909
| 0
| 0.545455
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 4
|
4a493da45c1fcd153742053cfc6c491cb42b91c8
| 31
|
py
|
Python
|
tests/__init__.py
|
toddrme2178/pandas_ext
|
6e5fd5aa3e567dec641014dfaee3c9f616f7e057
|
[
"MIT"
] | 4
|
2018-10-04T19:59:28.000Z
|
2020-09-12T01:47:40.000Z
|
tests/__init__.py
|
toddrme2178/pandas_ext
|
6e5fd5aa3e567dec641014dfaee3c9f616f7e057
|
[
"MIT"
] | 11
|
2019-01-09T17:32:24.000Z
|
2019-05-09T16:01:00.000Z
|
tests/__init__.py
|
toddrme2178/pandas_ext
|
6e5fd5aa3e567dec641014dfaee3c9f616f7e057
|
[
"MIT"
] | 1
|
2019-12-03T21:16:26.000Z
|
2019-12-03T21:16:26.000Z
|
"""Initialize test package."""
| 15.5
| 30
| 0.677419
| 3
| 31
| 7
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.096774
| 31
| 1
| 31
| 31
| 0.75
| 0.774194
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
4a4ca06fd216c39984feab2385ed1e02123c4370
| 574
|
py
|
Python
|
special_process/process_flume.py
|
linlife/Nagios
|
edd60b218ffcc4569a2d07fb42f4e2752fac3f15
|
[
"Apache-2.0"
] | null | null | null |
special_process/process_flume.py
|
linlife/Nagios
|
edd60b218ffcc4569a2d07fb42f4e2752fac3f15
|
[
"Apache-2.0"
] | null | null | null |
special_process/process_flume.py
|
linlife/Nagios
|
edd60b218ffcc4569a2d07fb42f4e2752fac3f15
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python2.7
import os
import sys
str1='usr/lib/jvm/java-1.7.0/bin/java -Xmx1024m -cp /opt/app/logcollector/flume/conf:/opt/app/logcollector/flume/lib/*:/opt/app/logcollector/flume/plugins.d/kafka-sink/lib/*:/opt/app/logcollector/flume/plugins.d/kafka-sink/libext/* -Djava.library.path= org.apache.flume.node.Application'
with os.popen('ps -ef | grep flume ') as f:
dt=f.readlines()
data=''.join(dt)
if str1 in data:
print 'Process flume is on working now !'
sys.exit(0)
else:
print 'Critical Process flume is stoped !!!'
sys.exit(2)
| 31.888889
| 287
| 0.702091
| 96
| 574
| 4.197917
| 0.59375
| 0.059553
| 0.17866
| 0.228288
| 0.2134
| 0.2134
| 0.2134
| 0.2134
| 0.2134
| 0
| 0
| 0.025948
| 0.127178
| 574
| 17
| 288
| 33.764706
| 0.778443
| 0.04007
| 0
| 0
| 0
| 0.083333
| 0.670909
| 0.441818
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.166667
| null | null | 0.166667
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
4a5972cfb6b36acac3107ef07a192ca15c0f57b1
| 8,001
|
py
|
Python
|
sdk/python/pulumi_aws/servicediscovery/_inputs.py
|
alexbowers/pulumi-aws
|
7dbdb03b1e4f7c0d51d5b5d17233ff4465c3eff5
|
[
"ECL-2.0",
"Apache-2.0"
] | 260
|
2018-06-18T14:57:00.000Z
|
2022-03-29T11:41:03.000Z
|
sdk/python/pulumi_aws/servicediscovery/_inputs.py
|
alexbowers/pulumi-aws
|
7dbdb03b1e4f7c0d51d5b5d17233ff4465c3eff5
|
[
"ECL-2.0",
"Apache-2.0"
] | 1,154
|
2018-06-19T20:38:20.000Z
|
2022-03-31T19:48:16.000Z
|
sdk/python/pulumi_aws/servicediscovery/_inputs.py
|
alexbowers/pulumi-aws
|
7dbdb03b1e4f7c0d51d5b5d17233ff4465c3eff5
|
[
"ECL-2.0",
"Apache-2.0"
] | 115
|
2018-06-28T03:20:27.000Z
|
2022-03-29T11:41:06.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = [
'ServiceDnsConfigArgs',
'ServiceDnsConfigDnsRecordArgs',
'ServiceHealthCheckConfigArgs',
'ServiceHealthCheckCustomConfigArgs',
]
@pulumi.input_type
class ServiceDnsConfigArgs:
def __init__(__self__, *,
dns_records: pulumi.Input[Sequence[pulumi.Input['ServiceDnsConfigDnsRecordArgs']]],
namespace_id: pulumi.Input[str],
routing_policy: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[Sequence[pulumi.Input['ServiceDnsConfigDnsRecordArgs']]] dns_records: An array that contains one DnsRecord object for each resource record set.
:param pulumi.Input[str] namespace_id: The ID of the namespace to use for DNS configuration.
:param pulumi.Input[str] routing_policy: The routing policy that you want to apply to all records that Route 53 creates when you register an instance and specify the service. Valid Values: MULTIVALUE, WEIGHTED
"""
pulumi.set(__self__, "dns_records", dns_records)
pulumi.set(__self__, "namespace_id", namespace_id)
if routing_policy is not None:
pulumi.set(__self__, "routing_policy", routing_policy)
@property
@pulumi.getter(name="dnsRecords")
def dns_records(self) -> pulumi.Input[Sequence[pulumi.Input['ServiceDnsConfigDnsRecordArgs']]]:
"""
An array that contains one DnsRecord object for each resource record set.
"""
return pulumi.get(self, "dns_records")
@dns_records.setter
def dns_records(self, value: pulumi.Input[Sequence[pulumi.Input['ServiceDnsConfigDnsRecordArgs']]]):
pulumi.set(self, "dns_records", value)
@property
@pulumi.getter(name="namespaceId")
def namespace_id(self) -> pulumi.Input[str]:
"""
The ID of the namespace to use for DNS configuration.
"""
return pulumi.get(self, "namespace_id")
@namespace_id.setter
def namespace_id(self, value: pulumi.Input[str]):
pulumi.set(self, "namespace_id", value)
@property
@pulumi.getter(name="routingPolicy")
def routing_policy(self) -> Optional[pulumi.Input[str]]:
"""
The routing policy that you want to apply to all records that Route 53 creates when you register an instance and specify the service. Valid Values: MULTIVALUE, WEIGHTED
"""
return pulumi.get(self, "routing_policy")
@routing_policy.setter
def routing_policy(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "routing_policy", value)
@pulumi.input_type
class ServiceDnsConfigDnsRecordArgs:
def __init__(__self__, *,
ttl: pulumi.Input[int],
type: pulumi.Input[str]):
"""
:param pulumi.Input[int] ttl: The amount of time, in seconds, that you want DNS resolvers to cache the settings for this resource record set.
:param pulumi.Input[str] type: The type of health check that you want to create, which indicates how Route 53 determines whether an endpoint is healthy. Valid Values: HTTP, HTTPS, TCP
"""
pulumi.set(__self__, "ttl", ttl)
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def ttl(self) -> pulumi.Input[int]:
"""
The amount of time, in seconds, that you want DNS resolvers to cache the settings for this resource record set.
"""
return pulumi.get(self, "ttl")
@ttl.setter
def ttl(self, value: pulumi.Input[int]):
pulumi.set(self, "ttl", value)
@property
@pulumi.getter
def type(self) -> pulumi.Input[str]:
"""
The type of health check that you want to create, which indicates how Route 53 determines whether an endpoint is healthy. Valid Values: HTTP, HTTPS, TCP
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: pulumi.Input[str]):
pulumi.set(self, "type", value)
@pulumi.input_type
class ServiceHealthCheckConfigArgs:
def __init__(__self__, *,
failure_threshold: Optional[pulumi.Input[int]] = None,
resource_path: Optional[pulumi.Input[str]] = None,
type: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[int] failure_threshold: The number of 30-second intervals that you want service discovery to wait before it changes the health status of a service instance. Maximum value of 10.
:param pulumi.Input[str] resource_path: The path that you want Route 53 to request when performing health checks. Route 53 automatically adds the DNS name for the service. If you don't specify a value, the default value is /.
:param pulumi.Input[str] type: The type of health check that you want to create, which indicates how Route 53 determines whether an endpoint is healthy. Valid Values: HTTP, HTTPS, TCP
"""
if failure_threshold is not None:
pulumi.set(__self__, "failure_threshold", failure_threshold)
if resource_path is not None:
pulumi.set(__self__, "resource_path", resource_path)
if type is not None:
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="failureThreshold")
def failure_threshold(self) -> Optional[pulumi.Input[int]]:
"""
The number of 30-second intervals that you want service discovery to wait before it changes the health status of a service instance. Maximum value of 10.
"""
return pulumi.get(self, "failure_threshold")
@failure_threshold.setter
def failure_threshold(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "failure_threshold", value)
@property
@pulumi.getter(name="resourcePath")
def resource_path(self) -> Optional[pulumi.Input[str]]:
"""
The path that you want Route 53 to request when performing health checks. Route 53 automatically adds the DNS name for the service. If you don't specify a value, the default value is /.
"""
return pulumi.get(self, "resource_path")
@resource_path.setter
def resource_path(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "resource_path", value)
@property
@pulumi.getter
def type(self) -> Optional[pulumi.Input[str]]:
"""
The type of health check that you want to create, which indicates how Route 53 determines whether an endpoint is healthy. Valid Values: HTTP, HTTPS, TCP
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "type", value)
@pulumi.input_type
class ServiceHealthCheckCustomConfigArgs:
def __init__(__self__, *,
failure_threshold: Optional[pulumi.Input[int]] = None):
"""
:param pulumi.Input[int] failure_threshold: The number of 30-second intervals that you want service discovery to wait before it changes the health status of a service instance. Maximum value of 10.
"""
if failure_threshold is not None:
pulumi.set(__self__, "failure_threshold", failure_threshold)
@property
@pulumi.getter(name="failureThreshold")
def failure_threshold(self) -> Optional[pulumi.Input[int]]:
"""
The number of 30-second intervals that you want service discovery to wait before it changes the health status of a service instance. Maximum value of 10.
"""
return pulumi.get(self, "failure_threshold")
@failure_threshold.setter
def failure_threshold(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "failure_threshold", value)
| 43.016129
| 233
| 0.67679
| 1,011
| 8,001
| 5.222552
| 0.148368
| 0.091667
| 0.05303
| 0.0375
| 0.80303
| 0.710985
| 0.641856
| 0.617424
| 0.586742
| 0.568561
| 0
| 0.005979
| 0.226597
| 8,001
| 185
| 234
| 43.248649
| 0.847285
| 0.373703
| 0
| 0.40367
| 1
| 0
| 0.120206
| 0.03814
| 0
| 0
| 0
| 0
| 0
| 1
| 0.201835
| false
| 0
| 0.045872
| 0
| 0.366972
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
4a7b0c76f22f7bd0ff38b3f5cc2d57868cd3ca54
| 159
|
py
|
Python
|
Image-Editor/src/Functions/Functions.py
|
TheCodingJungle/Python-Projects
|
eaec5b363b190fb013bcacbed48410c4b338fcc5
|
[
"MIT"
] | 5
|
2021-02-08T13:53:16.000Z
|
2021-09-20T05:14:19.000Z
|
Image-Editor/src/Functions/Functions.py
|
TheCodingJungle/Python-Projects
|
eaec5b363b190fb013bcacbed48410c4b338fcc5
|
[
"MIT"
] | 1
|
2021-07-29T20:00:34.000Z
|
2021-07-29T20:00:34.000Z
|
Image-Editor/src/Functions/Functions.py
|
TheCodingJungle/Python-Projects
|
eaec5b363b190fb013bcacbed48410c4b338fcc5
|
[
"MIT"
] | 1
|
2021-08-31T04:22:17.000Z
|
2021-08-31T04:22:17.000Z
|
# This file helps in imorting the functions.
from Crop import crop
from Resize import resize
from writeImage import writeImage
from readImage import readImage
| 26.5
| 44
| 0.836478
| 23
| 159
| 5.782609
| 0.565217
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.150943
| 159
| 6
| 45
| 26.5
| 0.985185
| 0.264151
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
4a8ee5737d82130c4f63b558717f9be85ad0b4cb
| 70
|
py
|
Python
|
grortir/externals/__init__.py
|
wojtekPi/grortir
|
0ef8b495527a4f3861e5df5db756d0ee3ed4aa6f
|
[
"MIT"
] | null | null | null |
grortir/externals/__init__.py
|
wojtekPi/grortir
|
0ef8b495527a4f3861e5df5db756d0ee3ed4aa6f
|
[
"MIT"
] | null | null | null |
grortir/externals/__init__.py
|
wojtekPi/grortir
|
0ef8b495527a4f3861e5df5db756d0ee3ed4aa6f
|
[
"MIT"
] | null | null | null |
"""Package contains modified external modules."""
# pylint: skip-file
| 23.333333
| 49
| 0.742857
| 8
| 70
| 6.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.114286
| 70
| 2
| 50
| 35
| 0.83871
| 0.885714
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
4a97ad5e7eb4f1fb5b934b4567d0e119e4ec23a1
| 26,313
|
py
|
Python
|
tests/core/test_test.py
|
Racerinorbit/golem
|
b02c7acaed6e84ff565e34e8626e835ec451e2e4
|
[
"MIT"
] | null | null | null |
tests/core/test_test.py
|
Racerinorbit/golem
|
b02c7acaed6e84ff565e34e8626e835ec451e2e4
|
[
"MIT"
] | null | null | null |
tests/core/test_test.py
|
Racerinorbit/golem
|
b02c7acaed6e84ff565e34e8626e835ec451e2e4
|
[
"MIT"
] | null | null | null |
import os
import sys
import pytest
from golem.core import test as test_module, settings_manager
from golem.core.project import Project
from golem.core.test import Test
SAMPLE_TEST_CONTENT = """
description = 'some description'
tags = []
data = [{'a': 'b'}]
pages = ['page1', 'page2']
def setup(data):
page1.func1()
def test(data):
page2.func2('a', 'b')
click(page2.elem1)
def teardown(data):
pass
"""
NEW_TEST_CONTENT = """
description = ''
tags = []
pages = []
def setup(data):
pass
def test(data):
pass
def teardown(data):
pass
"""
EMPTY_STEPS = {'setup': [], 'test': [], 'teardown': []}
class TestCreateTest:
def test_create_test(self, project_session, test_utils):
_, project = project_session.activate()
test_name = test_utils.random_string()
errors = test_module.create_test(project, test_name)
test = Test(project, test_name)
assert test.exists
assert errors == []
assert test.code == NEW_TEST_CONTENT
def test_create_test_name_exists(self, project_session, test_utils):
_, project = project_session.activate()
test_name = test_utils.random_string()
test_module.create_test(project, test_name)
errors = test_module.create_test(project, test_name)
assert errors == ['A test with that name already exists']
def test_create_test_invalid_name(self, project_session):
_, project = project_session.activate()
# invalid chars
invalid_names = [
'te-st',
'te st',
'te?st',
'test. .test'
]
for name in invalid_names:
errors = test_module.create_test(project, name)
assert errors == ['Only letters, numbers and underscores are allowed']
# empty directory
invalid_names = [
'.test',
'test..test',
]
for name in invalid_names:
errors = test_module.create_test(project, name)
assert errors == ['Directory name cannot be empty']
# empty file name
invalid_names = [
'',
'test.',
]
for name in invalid_names:
errors = test_module.create_test(project, name)
assert errors == ['File name cannot be empty']
def test_create_test_into_folder(self, project_session, test_utils):
_, project = project_session.activate()
random_dir = test_utils.random_string()
# to folder
test_name = '{}.test001'.format(random_dir)
errors = test_module.create_test(project, test_name)
assert errors == []
# verify that each parent dir has __init__.py file
init_path = os.path.join(Project(project).test_directory_path,
random_dir, '__init__.py')
assert test_name in Project(project).tests()
assert os.path.isfile(init_path)
# to sub-folder
random_dir = test_utils.random_string()
random_subdir = test_utils.random_string()
test_name = '{}.{}.test001'.format(random_dir, random_subdir)
errors = test_module.create_test(project, test_name)
assert errors == []
assert test_name in Project(project).tests()
# verify that each parent dir has __init__.py file
init_path = os.path.join(Project(project).test_directory_path,
random_dir, '__init__.py')
assert os.path.isfile(init_path)
init_path = os.path.join(Project(project).test_directory_path,
random_dir, random_subdir, '__init__.py')
assert os.path.isfile(init_path)
class TestRenameTest:
def test_rename_test(self, project_session, test_utils):
_, project = project_session.activate()
test_name = test_utils.create_random_test(project)
new_test_name = test_utils.random_string()
errors = test_module.rename_test(project, test_name, new_test_name)
assert errors == []
tests = Project(project).tests()
assert test_name not in tests
assert new_test_name in tests
def test_rename_test_in_folder(self, project_session, test_utils):
_, project = project_session.activate()
dir = test_utils.random_string()
name = test_utils.random_string()
test_name = '{}.{}'.format(dir, name)
test_utils.create_test(project, test_name)
# rename within same folder
new_name = test_utils.random_string()
new_test_name = '{}.{}'.format(dir, new_name)
errors = test_module.rename_test(project, test_name, new_test_name)
assert errors == []
tests = Project(project).tests()
assert test_name not in tests
assert new_test_name in tests
# rename to another non existent folder
test_name = new_test_name
name = new_name
new_dir = test_utils.random_string()
new_test_name = '{}.{}'.format(new_dir, name)
errors = test_module.rename_test(project, test_name, new_test_name)
assert errors == []
tests = Project(project).tests()
assert test_name not in tests
assert new_test_name in tests
def test_rename_test_invalid_name(self, project_session, test_utils):
_, project = project_session.activate()
test_name = test_utils.create_random_test(project)
# invalid chars
new_test_name = 'new-name'
errors = test_module.rename_test(project, test_name, new_test_name)
assert errors == ['Only letters, numbers and underscores are allowed']
tests = Project(project).tests()
assert test_name in tests
assert new_test_name not in tests
# empty filename
new_test_name = 'test.'
errors = test_module.rename_test(project, test_name, new_test_name)
assert errors == ['File name cannot be empty']
tests = Project(project).tests()
assert test_name in tests
assert new_test_name not in tests
# empty directory
new_test_name = 'test..test'
errors = test_module.rename_test(project, test_name, new_test_name)
assert errors == ['Directory name cannot be empty']
tests = Project(project).tests()
assert test_name in tests
assert new_test_name not in tests
def test_rename_test_src_does_not_exist(self, project_session, test_utils):
_, project = project_session.activate()
test_name = test_utils.random_string()
new_test_name = test_utils.random_string()
errors = test_module.rename_test(project, test_name, new_test_name)
assert errors == ['Test {} does not exist'.format(test_name)]
assert new_test_name not in Project(project).tests()
def test_rename_test_with_data_file(self, project_session, test_utils):
"""Assert when a test has a data file the data file is renamed as well"""
_, project = project_session.activate()
test_name = test_utils.create_random_test(project)
new_test_name = test_utils.random_string()
data_path = os.path.splitext(Test(project, test_name).path)[0] + '.csv'
with open(data_path, 'w+') as f:
f.write('')
new_data_path = os.path.splitext(Test(project, new_test_name).path)[0] + '.csv'
test_module.rename_test(project, test_name, new_test_name)
assert not os.path.isfile(data_path)
assert os.path.isfile(new_data_path)
def test_rename_dest_exists(self, project_session, test_utils):
_, project = project_session.activate()
dir = test_utils.random_string()
name_one = test_utils.random_string()
test_one = '{}.{}'.format(dir, name_one)
name_two = test_utils.random_string()
test_two = '{}.{}'.format(dir, name_two)
test_utils.create_test(project, test_one)
test_utils.create_test(project, test_two)
# rename test to existing test name
errors = test_module.rename_test(project, test_one, test_two)
assert errors == ['A file with that name already exists']
# rename test to same name
errors = test_module.rename_test(project, test_one, test_one)
assert errors == ['A file with that name already exists']
@pytest.mark.skipif("os.name != 'nt'")
def test_rename_test_test_is_open(self, project_session, test_utils):
"""Try to rename a test while it is open"""
_, project = project_session.activate()
test_name = test_utils.create_random_test(project)
new_test_name = test_utils.random_string()
with open(Test(project, test_name).path) as f:
errors = test_module.rename_test(project, test_name, new_test_name)
assert errors == ['There was an error renaming file']
class TestDuplicateTest:
def test_duplicate_test(self, project_session, test_utils):
_, project = project_session.activate()
# in root folder
test_name = test_utils.create_random_test(project)
new_test_name = test_utils.random_string()
errors = test_module.duplicate_test(project, test_name, new_test_name)
assert errors == []
tests = Project(project).tests()
assert test_name in tests
assert new_test_name in tests
# in folder
dir = test_utils.random_string()
name = test_utils.random_string()
test_name = '{}.{}'.format(dir, name)
test_utils.create_test(project, test_name)
new_name = test_utils.random_string()
new_test_name = '{}.{}'.format(dir, new_name)
errors = test_module.duplicate_test(project, test_name, new_test_name)
assert errors == []
tests = Project(project).tests()
assert test_name in tests
assert new_test_name in tests
def test_duplicate_test_same_name(self, project_session, test_utils):
_, project = project_session.activate()
test_name = test_utils.create_random_test(project)
errors = test_module.duplicate_test(project, test_name, test_name)
assert errors == ['New test name cannot be the same as the original']
def test_duplicate_test_dest_exists(self, project_session, test_utils):
_, project = project_session.activate()
test_one = test_utils.create_random_test(project)
test_two = test_utils.create_random_test(project)
errors = test_module.duplicate_test(project, test_one, test_two)
assert errors == ['A test with that name already exists']
# to another folder
test_one = test_utils.create_random_test(project)
test_two = '{}.{}'.format(test_utils.random_string(), test_utils.random_string())
test_utils.create_test(project, test_two)
errors = test_module.duplicate_test(project, test_one, test_two)
assert errors == ['A test with that name already exists']
# to same name
test_one = test_utils.create_random_test(project)
test_utils.create_test(project, test_two)
errors = test_module.duplicate_test(project, test_one, test_one)
assert errors == ['New test name cannot be the same as the original']
def test_duplicate_test_invalid_name(self, project_session, test_utils):
_, project = project_session.activate()
test_name = test_utils.create_random_test(project)
# invalid name
new_test_name = 'new-name'
errors = test_module.duplicate_test(project, test_name, new_test_name)
assert errors == ['Only letters, numbers and underscores are allowed']
# empty name
new_test_name = 'test.'
errors = test_module.duplicate_test(project, test_name, new_test_name)
assert errors == ['File name cannot be empty']
# empty directory
new_test_name = 'test.'
errors = test_module.duplicate_test(project, test_name, new_test_name)
assert errors == ['File name cannot be empty']
def test_duplicate_test_with_data_file(self, project_session, test_utils):
"""Assert when a test has a data file the data file is duplicated as well"""
_, project = project_session.activate()
test_name = test_utils.create_random_test(project)
new_test_name = test_utils.random_string()
data_path = os.path.splitext(Test(project, test_name).path)[0] + '.csv'
with open(data_path, 'w+') as f:
f.write('')
new_data_path = os.path.splitext(test_module.Test(project, new_test_name).path)[0] + '.csv'
test_module.duplicate_test(project, test_name, new_test_name)
assert os.path.isfile(data_path)
assert os.path.isfile(new_data_path)
class TestEditTest:
def test_edit_test_data_infile(self, project_function, test_utils):
_, project = project_function.activate()
test_name = test_utils.create_random_test(project)
description = 'description'
pages = ['page1', 'page2']
test_steps = {
'setup': [
{'type': 'function-call', 'action': 'click', 'parameters': ['elem1']}
],
'test': [
{'type': 'function-call', 'action': 'send_keys', 'parameters': ['elem2', 'keys']}
],
'teardown': []
}
data = [{
'key': '\'value\''
}]
settings_manager.save_project_settings(project, '{"test_data": "infile"}')
test_module.edit_test(project, test_name, description, pages, test_steps, data, [])
expected = (
'\n'
'description = \'description\'\n'
'\n'
'tags = []\n'
'\n'
'pages = [\'page1\',\n'
' \'page2\']\n'
'\n'
'data = [\n'
' {\n'
' \'key\': \'value\',\n'
' },\n'
']\n'
'\n\n'
'def setup(data):\n'
' click(elem1)\n'
'\n\n'
'def test(data):\n'
' send_keys(elem2, keys)\n'
'\n\n'
'def teardown(data):\n'
' pass\n')
with open(Test(project, test_name).path) as f:
assert f.read() == expected
def test_edit_test_data_csv(self, project_function, test_utils):
_, project = project_function.activate()
test_name = test_utils.create_random_test(project)
description = 'description'
pages = []
test_steps = {
'setup': [],
'test': [
{'type': 'function-call', 'action': 'send_keys', 'parameters': ['elem2', 'keys']}
],
'teardown': []
}
data = [{
'key': '\'value\''
}]
settings_manager.save_project_settings(project, '{"test_data": "csv"}')
test_module.edit_test(project, test_name, description, pages, test_steps, data, [])
expected = (
'\n'
'description = \'description\'\n'
'\n'
'tags = []\n'
'\n'
'pages = []\n'
'\n\n'
'def setup(data):\n'
' pass\n'
'\n\n'
'def test(data):\n'
' send_keys(elem2, keys)\n'
'\n\n'
'def teardown(data):\n'
' pass\n')
with open(Test(project, test_name).path) as f:
assert f.read() == expected
data_path = os.path.join(Project(project).test_directory_path,
'{}.csv'.format(test_name))
expected = ('key\n'
'\'value\'\n')
with open(data_path) as f:
assert f.read() == expected
def test_edit_test_explicit_page_import(self, project_function, test_utils):
_, project = project_function.activate()
test_name = test_utils.create_random_test(project)
pages = ['page1', 'module.page2']
settings_manager.save_project_settings(project, '{"implicit_page_import": false}')
test_module.edit_test(project, test_name, description='', pages=pages,
steps=EMPTY_STEPS, test_data=[], tags=[])
expected = ('from projects.{}.pages import page1\n'
'from projects.{}.pages.module import page2\n'
'\n\n'
'description = \'\'\n'
'\n'
'tags = []\n'
'\n\n'
'def setup(data):\n'
' pass\n'
'\n\n'
'def test(data):\n'
' pass\n'
'\n\n'
'def teardown(data):\n'
' pass\n'.format(project, project))
with open(Test(project, test_name).path) as f:
assert f.read() == expected
def test_edit_test_explicit_action_import(self, project_function, test_utils):
_, project = project_function.activate()
test_name = test_utils.create_random_test(project)
settings_manager.save_project_settings(project, '{"implicit_actions_import": false}')
test_module.edit_test(project, test_name, description='', pages=[],
steps=EMPTY_STEPS, test_data=[], tags=[])
expected = ('from golem import actions\n\n\n'
'description = \'\'\n\n'
'tags = []\n\n'
'pages = []\n\n\n'
'def setup(data):\n'
' pass\n\n\n'
'def test(data):\n'
' pass\n\n\n'
'def teardown(data):\n'
' pass\n')
with open(Test(project, test_name).path) as f:
assert f.read() == expected
def test_edit_test_skip(self, project_session, test_utils):
_, project = project_session.activate()
test_name = test_utils.create_random_test(project)
test_module.edit_test(project, test_name, description='', pages=[],
steps=EMPTY_STEPS, test_data=[], tags=[], skip=True)
path = Test(project, test_name).path
expected = ('\n'
'description = \'\'\n\n'
'tags = []\n\n'
'pages = []\n\n'
'skip = True\n\n\n'
'def setup(data):\n'
' pass\n\n\n'
'def test(data):\n'
' pass\n\n\n'
'def teardown(data):\n'
' pass\n')
with open(path) as f:
assert f.read() == expected
# skip is string
test_module.edit_test(project, test_name, description='', pages=[],
steps=EMPTY_STEPS, test_data=[], tags=[],
skip='please skip this')
path = Test(project, test_name).path
expected = ('\n'
'description = \'\'\n\n'
'tags = []\n\n'
'pages = []\n\n'
'skip = \'please skip this\'\n\n\n'
'def setup(data):\n'
' pass\n\n\n'
'def test(data):\n'
' pass\n\n\n'
'def teardown(data):\n'
' pass\n')
with open(path) as f:
assert f.read() == expected
class TestEditTestCode:
def test_edit_test_code_csv_data(self, project_session, test_utils):
_, project = project_session.activate()
test_data = [{'key': "'value'"}]
settings_manager.save_project_settings(project, '{"test_data": "csv"}')
test_name = test_utils.create_random_test(project)
test_module.edit_test_code(project, test_name, SAMPLE_TEST_CONTENT, test_data)
path = test_module.Test(project, test_name).path
with open(path) as f:
assert f.read() == SAMPLE_TEST_CONTENT
path = os.path.join(Project(project).test_directory_path, test_name + '.csv')
expected = ('key\n'
'\'value\'\n')
with open(path) as f:
assert f.read() == expected
class TestDeleteTest:
def test_delete_test(self, project_session, test_utils):
_, project = project_session.activate()
test_one = test_utils.random_string()
test_two = '{}.{}'.format(test_utils.random_string(), test_utils.random_string())
test_utils.create_test(project, test_one)
test_utils.create_test(project, test_two)
errors_one = test_module.delete_test(project, test_one)
errors_two = test_module.delete_test(project, test_two)
assert errors_one == []
assert errors_two == []
assert not os.path.isfile(Test(project, test_one).path)
assert not os.path.isfile(Test(project, test_two).path)
def test_delete_test_not_exist(self, project_session):
_, project = project_session.activate()
errors = test_module.delete_test(project, 'not-exist')
assert errors == ['Test not-exist does not exist']
def test_delete_test_with_data(self, project_session, test_utils):
""""test that when a test is deleted the data files
are deleted as well
"""
_, project = project_session.activate()
test_name = test_utils.create_random_test(project)
data_path = os.path.splitext(test_module.Test(project, test_name).path)[0] + '.csv'
open(data_path, 'x').close()
errors = test_module.delete_test(project, test_name)
assert errors == []
assert not os.path.isfile(data_path)
class TestTestExists:
def test_test_exists(self, project_session, test_utils):
_, project = project_session.activate()
test_name = test_utils.create_random_test(project)
assert Test(project, test_name).exists
assert not Test(project, 'not_exists_test').exists
class TestTestCode:
def test_test_code(self, project_session, test_utils):
_, project = project_session.activate()
test_name = test_utils.create_random_test(project)
test = Test(project, test_name)
with open(test.path, 'w') as f:
f.write(SAMPLE_TEST_CONTENT)
assert test.code == SAMPLE_TEST_CONTENT
class TestTestComponents:
def test_test_components(self, project_session, test_utils):
_, project = project_session.activate()
test_name = test_utils.create_random_test(project)
test = Test(project, test_name)
with open(test.path, 'w') as f:
f.write(SAMPLE_TEST_CONTENT)
components = test.components
assert components['description'] == 'some description'
assert components['pages'] == ['page1', 'page2']
assert components['tags'] == []
assert components['skip'] is False
assert components['steps']['setup'] == [{'code': 'page1.func1()',
'function_name': 'page1.func1',
'parameters': [],
'type': 'function-call'}]
expected_test_steps = [{'code': "page2.func2('a', 'b')",
'function_name': 'page2.func2',
'parameters': ["'a'", "'b'"],
'type': 'function-call'},
{'code': 'click(page2.elem1)',
'function_name': 'click',
'parameters': ['page2.elem1'],
'type': 'function-call'}]
assert components['steps']['test'] == expected_test_steps
assert components['steps']['teardown'] == []
def test_test_components_empty_test(self, project_session, test_utils):
_, project = project_session.activate()
test_name = test_utils.create_random_test(project)
test_content = Test(project, test_name).components
assert test_content['description'] == ''
assert test_content['pages'] == []
assert test_content['steps']['setup'] == []
assert test_content['steps']['test'] == []
assert test_content['steps']['teardown'] == []
def test_test_components_pages(self, project_session, test_utils):
"""components['pages'] contains the imported pages and the pages
defined in the list
"""
testdir, project = project_session.activate()
test_name = test_utils.create_random_test(project)
test_utils.create_page(project, 'page1')
test_utils.create_page(project, 'page2')
test_utils.create_page(project, 'module.page3')
sys.path.append(testdir)
with open(Test(project, test_name).path, 'w') as f:
test_content = ('from projects.{}.pages import page1, page2\n'
'from projects.{}.pages.module import page3\n'
'\n'
'pages = ["page4", "module2.page5"]\n'
'\n'
'def test(data):\n'
' pass\n'.format(project, project))
f.write(test_content)
components = Test(project, test_name).components
expected = ['page1', 'page2', 'module.page3', 'page4', 'module2.page5']
assert components['pages'].sort() == expected.sort()
def test_test_components_skip(self, project_session, test_utils):
_, project = project_session.activate()
test_name = test_utils.create_random_test(project)
# default / empty skip is False
assert Test(project, test_name).components['skip'] is False
# skip is True
test_module.edit_test(project, test_name, description='', pages=[],
steps=EMPTY_STEPS, test_data=[], tags=[], skip=True)
assert Test(project, test_name).components['skip'] is True
# skip is string
test_module.edit_test(project, test_name, description='', pages=[],
steps=EMPTY_STEPS, test_data=[], tags=[], skip='please skip')
assert Test(project, test_name).components['skip'] == 'please skip'
| 41.178404
| 99
| 0.588226
| 3,070
| 26,313
| 4.764821
| 0.059283
| 0.078206
| 0.078958
| 0.068841
| 0.802776
| 0.777276
| 0.738515
| 0.695515
| 0.669059
| 0.63939
| 0
| 0.003349
| 0.296431
| 26,313
| 638
| 100
| 41.242947
| 0.786798
| 0.031657
| 0
| 0.647834
| 0
| 0
| 0.13447
| 0.003898
| 0
| 0
| 0
| 0
| 0.167608
| 1
| 0.05838
| false
| 0.037665
| 0.028249
| 0
| 0.103578
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
4ab2343efbef3578ec75b383a439660e519b94e0
| 122
|
py
|
Python
|
samples/migrateADCGen1/mappers/__init__.py
|
daniel-dqsdatalabs/pyapacheatlas
|
7fbc0ae3b3c661db07a443306995d4c416a01e1a
|
[
"MIT"
] | 104
|
2020-12-07T14:18:20.000Z
|
2022-03-16T12:11:21.000Z
|
samples/migrateADCGen1/mappers/__init__.py
|
daniel-dqsdatalabs/pyapacheatlas
|
7fbc0ae3b3c661db07a443306995d4c416a01e1a
|
[
"MIT"
] | 98
|
2020-12-23T20:27:02.000Z
|
2022-03-10T15:44:43.000Z
|
samples/migrateADCGen1/mappers/__init__.py
|
daniel-dqsdatalabs/pyapacheatlas
|
7fbc0ae3b3c661db07a443306995d4c416a01e1a
|
[
"MIT"
] | 47
|
2020-12-17T16:28:31.000Z
|
2022-02-22T03:12:19.000Z
|
from .assetmapper import AssetMapper
from .assetfactory import AssetFactory
from .sqlserver import SqlServerTableMapper
| 20.333333
| 43
| 0.860656
| 12
| 122
| 8.75
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.114754
| 122
| 5
| 44
| 24.4
| 0.972222
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
43531c2f54d2dd69fe9cc867d839f38ee386c401
| 58
|
py
|
Python
|
scripts/test_dist.py
|
xcgoner/gluon-exp
|
432a1aafc1466720b6169bb41caabb2a217b0797
|
[
"Apache-2.0"
] | null | null | null |
scripts/test_dist.py
|
xcgoner/gluon-exp
|
432a1aafc1466720b6169bb41caabb2a217b0797
|
[
"Apache-2.0"
] | null | null | null |
scripts/test_dist.py
|
xcgoner/gluon-exp
|
432a1aafc1466720b6169bb41caabb2a217b0797
|
[
"Apache-2.0"
] | null | null | null |
import mxnet as mx
from mxnet import nd, gluon, autograd
| 14.5
| 37
| 0.775862
| 10
| 58
| 4.5
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.189655
| 58
| 3
| 38
| 19.333333
| 0.957447
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
435de3efa8dda26cee5eb9106382f815219461b4
| 10,762
|
py
|
Python
|
model-optimizer/mo/front/common/partial_infer/split_test.py
|
zhoub/dldt
|
e42c01cf6e1d3aefa55e2c5df91f1054daddc575
|
[
"Apache-2.0"
] | 3
|
2020-02-09T23:25:37.000Z
|
2021-01-19T09:44:12.000Z
|
model-optimizer/mo/front/common/partial_infer/split_test.py
|
zhoub/dldt
|
e42c01cf6e1d3aefa55e2c5df91f1054daddc575
|
[
"Apache-2.0"
] | null | null | null |
model-optimizer/mo/front/common/partial_infer/split_test.py
|
zhoub/dldt
|
e42c01cf6e1d3aefa55e2c5df91f1054daddc575
|
[
"Apache-2.0"
] | 2
|
2020-04-18T16:24:39.000Z
|
2021-01-19T09:42:19.000Z
|
"""
Copyright (c) 2018-2019 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
import numpy as np
from mo.front.common.partial_infer.split import tf_split_infer, tf_unpack_infer, tf_split_v_infer, split
from mo.front.common.partial_infer.utils import int64_array
from mo.graph.graph import Node
from mo.utils.unittest.graph import build_graph, build_graph_with_edge_attrs
class TestTFSplitInfer(unittest.TestCase):
graph = None
def setUp(self):
self.graph = build_graph({'split_dim': {'value': None, 'kind': 'data'},
'data_to_split': {'value': None, 'shape': None, 'kind': 'data'},
'split_node': {'kind': 'op', 'op': 'Split', 'num_split': 3, 'axis': None},
'out_data_1': {'value': None, 'shape': None, 'kind': 'data'},
'out_data_2': {'value': None, 'shape': None, 'kind': 'data'},
'out_data_3': {'value': None, 'shape': None, 'kind': 'data'},
},
[('split_dim', 'split_node'),
('data_to_split', 'split_node'),
('split_node', 'out_data_1'),
('split_node', 'out_data_2'),
('split_node', 'out_data_3'),
])
def test_tf_split_infer(self):
split_node = Node(self.graph, 'split_node')
self.graph.node['split_dim']['value'] = np.array(1)
self.graph.node['data_to_split']['shape'] = int64_array([2, 12, 25, 30])
tf_split_infer(split_node)
exp_shape = int64_array([2, 4, 25, 30])
for out_node in split_node.out_nodes().values():
self.assertTrue(np.all(exp_shape == out_node.shape))
self.assertEqual(1, split_node.input_port)
def test_tf_split_infer_negative_index(self):
split_node = Node(self.graph, 'split_node')
self.graph.node['split_dim']['value'] = np.array(-3)
self.graph.node['data_to_split']['shape'] = int64_array([2, 12, 25, 30])
tf_split_infer(split_node)
exp_shape = int64_array([2, 4, 25, 30])
for out_node in split_node.out_nodes().values():
self.assertTrue(np.all(exp_shape == out_node.shape))
self.assertEqual(1, split_node.input_port)
def test_tf_split_infer_unknown_index(self):
split_node = Node(self.graph, 'split_node')
self.graph.node['data_to_split']['shape'] = int64_array([2, 12, 25, 30])
tf_split_infer(split_node)
for out_node in split_node.out_nodes().values():
self.assertIsNone(out_node.shape)
def test_tf_split_infer_input_shape_is_None(self):
split_node = Node(self.graph, 'split_node')
self.graph.node['split_dim']['value'] = np.array(1)
tf_split_infer(split_node)
for out_node in split_node.out_nodes().values():
self.assertIsNone(out_node.shape)
def test_tf_split_infer_wrong_num_split(self):
split_node = Node(self.graph, 'split_node')
self.graph.node['split_dim']['value'] = np.array(0)
self.graph.node['data_to_split']['shape'] = int64_array([2, 12, 25, 30])
tf_split_infer(split_node)
for out_node in split_node.out_nodes().values():
self.assertIsNone(out_node.shape)
class TestTFSplitVInfer(unittest.TestCase):
graph = None
def setUp(self):
self.graph = build_graph({'data_to_split': {'value': None, 'shape': None, 'kind': 'data'},
'size_splits': {'value': [3, 5, 4], 'kind': 'data'},
'split_dim': {'value': None, 'kind': 'data'},
'split_node': {'kind': 'op', 'op': 'Split', 'axis': None},
'out_data_1': {'value': None, 'shape': None, 'kind': 'data'},
'out_data_2': {'value': None, 'shape': None, 'kind': 'data'},
'out_data_3': {'value': None, 'shape': None, 'kind': 'data'},
},
[('data_to_split', 'split_node'),
('size_splits', 'split_node'),
('split_dim', 'split_node'),
('split_node', 'out_data_1'),
('split_node', 'out_data_2'),
('split_node', 'out_data_3'),
])
def test_tf_split_infer_three_inputs(self):
split_node = Node(self.graph, 'split_node')
self.graph.node['split_dim']['value'] = np.array(1)
self.graph.node['data_to_split']['shape'] = int64_array([2, 12, 25, 30])
tf_split_v_infer(split_node)
exp_shape = [int64_array([2, 3, 25, 30]), int64_array([2, 5, 25, 30]), int64_array([2, 4, 25, 30])]
for ind, out_node in split_node.out_nodes().items():
self.assertTrue(np.all(exp_shape[ind] == out_node.shape))
def test_tf_split_infer_undef_size(self):
split_node = Node(self.graph, 'split_node')
self.graph.node['split_dim']['value'] = np.array(1)
self.graph.node['data_to_split']['shape'] = int64_array([2, 12, 25, 30])
self.graph.node['size_splits']['value'] = np.array([3, 2, -1])
tf_split_v_infer(split_node)
exp_shape = [int64_array([2, 3, 25, 30]), int64_array([2, 2, 25, 30]), int64_array([2, 7, 25, 30])]
for ind, out_node in split_node.out_nodes().items():
self.assertTrue(np.all(exp_shape[ind] == out_node.shape))
class TestTFUnpack(unittest.TestCase):
graph = None
def setUp(self):
self.graph = build_graph({'data_to_split': {'value': None, 'shape': None, 'kind': 'data'},
'unpack': {'kind': 'op', 'op': 'Split', 'num_split': 3, 'axis': None},
'out_data_1': {'value': None, 'shape': None, 'kind': 'data'},
'out_data_2': {'value': None, 'shape': None, 'kind': 'data'},
'out_data_3': {'value': None, 'shape': None, 'kind': 'data'},
'out_data_4': {'value': None, 'shape': None, 'kind': 'data'},
},
[('data_to_split', 'unpack'),
('unpack', 'out_data_1'),
('unpack', 'out_data_2'),
('unpack', 'out_data_3'),
])
def test_tf_unpack_infer(self):
unpack_node = Node(self.graph, 'unpack')
self.graph.node['unpack']['axis'] = np.array(1)
self.graph.node['data_to_split']['shape'] = int64_array([2, 3, 25, 30])
tf_unpack_infer(unpack_node)
exp_shape = int64_array([2, 1, 25, 30])
for out_node in unpack_node.out_nodes().values():
self.assertTrue(np.all(exp_shape == out_node.shape))
def test_tf_unpack_infer_default_number_of_pieces(self):
unpack_node = Node(self.graph, 'unpack')
self.graph.node['unpack']['axis'] = np.array(1)
self.graph.node['unpack']['num_split'] = None
self.graph.node['data_to_split']['shape'] = int64_array([2, 3, 25, 30])
tf_unpack_infer(unpack_node)
exp_shape = int64_array([2, 1, 25, 30])
for out_node in unpack_node.out_nodes().values():
self.assertTrue(np.all(exp_shape == out_node.shape))
def test_tf_unpack_infer_not_supported(self):
# the case when the size of the dimension being unpacked is not equal to number of pieces is not supported
unpack_node = Node(self.graph, 'unpack')
self.graph.node['unpack']['axis'] = np.array(1)
self.graph.node['data_to_split']['shape'] = int64_array([2, 6, 25, 30])
tf_unpack_infer(unpack_node)
for out_node in unpack_node.out_nodes().values():
self.assertIsNone(out_node.shape)
class TestSplitFunc(unittest.TestCase):
graph = None
def setUp(self):
self.graph = build_graph_with_edge_attrs(
{'data_to_split': {'value': None, 'shape': int64_array([2, 12, 25, 44]), 'kind': 'data'},
'split_node': {'kind': 'op', 'op': 'Split', 'axis': None},
'out_data_2': {'value': None, 'shape': None, 'kind': 'data'},
'out_data_5': {'value': None, 'shape': None, 'kind': 'data'},
'out_data_7': {'value': None, 'shape': None, 'kind': 'data'},
},
[('data_to_split', 'split_node', {'in': 0}),
('split_node', 'out_data_2', {'out': 2}),
('split_node', 'out_data_5', {'out': 5}),
('split_node', 'out_data_7', {'out': 7}),
])
def test_split_non_sequential_output_port(self):
split(Node(self.graph, 'data_to_split'), Node(self.graph, 'split_node'), -1, [3, 2, 7, 5, 6, 4, 9, 8])
self.assertTrue(np.all(Node(self.graph, 'out_data_2').shape == [2, 12, 25, 7]))
self.assertTrue(np.all(Node(self.graph, 'out_data_5').shape == [2, 12, 25, 4]))
self.assertTrue(np.all(Node(self.graph, 'out_data_7').shape == [2, 12, 25, 8]))
def test_split_value_infer_non_sequential_output_port(self):
data_node = Node(self.graph, 'data_to_split')
value = np.array(range(2 * 12 * 25 * 44)).reshape(data_node.shape)
data_node.value = value.copy()
split(data_node, Node(self.graph, 'split_node'), -1, [3, 2, 7, 5, 6, 4, 9, 8])
self.assertTrue(np.all(Node(self.graph, 'out_data_2').shape == [2, 12, 25, 7]))
self.assertTrue(np.all(Node(self.graph, 'out_data_5').shape == [2, 12, 25, 4]))
self.assertTrue(np.all(Node(self.graph, 'out_data_7').shape == [2, 12, 25, 8]))
self.assertTrue(np.all(Node(self.graph, 'out_data_2').value == value[:, :, :, 5:12]))
self.assertTrue(np.all(Node(self.graph, 'out_data_5').value == value[:, :, :, 23:27]))
self.assertTrue(np.all(Node(self.graph, 'out_data_7').value == value[:, :, :, 36:]))
| 49.141553
| 114
| 0.556588
| 1,411
| 10,762
| 3.992913
| 0.11056
| 0.083067
| 0.069223
| 0.051118
| 0.78612
| 0.757011
| 0.713348
| 0.705538
| 0.695066
| 0.679624
| 0
| 0.039969
| 0.283962
| 10,762
| 218
| 115
| 49.366972
| 0.69115
| 0.062535
| 0
| 0.603659
| 0
| 0
| 0.158376
| 0
| 0
| 0
| 0
| 0
| 0.128049
| 1
| 0.097561
| false
| 0
| 0.036585
| 0
| 0.182927
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
43790ed6541807d42a3623ea81620f736075ba4f
| 214
|
py
|
Python
|
uitwerkingen/1h-importeren.py
|
harcel/PyDataScienceIntroNL
|
8041b55203b941d8c0f189cdd19bdfd96420225c
|
[
"MIT"
] | null | null | null |
uitwerkingen/1h-importeren.py
|
harcel/PyDataScienceIntroNL
|
8041b55203b941d8c0f189cdd19bdfd96420225c
|
[
"MIT"
] | null | null | null |
uitwerkingen/1h-importeren.py
|
harcel/PyDataScienceIntroNL
|
8041b55203b941d8c0f189cdd19bdfd96420225c
|
[
"MIT"
] | null | null | null |
import eigenlib as lib
lib.geefmeinfo()
# get the documentation (alt-tab werkt ook!)
print(lib.plus3.__doc__)
print()
print(lib.plus3(8.))
print(lib.plus3(np.array([2., 109.])))
print(lib.plus3('stykje tekst'))
| 17.833333
| 44
| 0.71028
| 34
| 214
| 4.352941
| 0.647059
| 0.216216
| 0.351351
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.046875
| 0.102804
| 214
| 11
| 45
| 19.454545
| 0.723958
| 0.196262
| 0
| 0
| 0
| 0
| 0.070588
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.142857
| 0
| 0.142857
| 0.714286
| 0
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 4
|
43a2580c665d4b19a10c859abedc4c4be97819ca
| 226
|
py
|
Python
|
bbridge_sdk/entity/request/nlp_data.py
|
bbridge-team/bbridge-sdk-python
|
10a46b9ff87a91fdebc65f74cfb30a05a10bb641
|
[
"MIT"
] | null | null | null |
bbridge_sdk/entity/request/nlp_data.py
|
bbridge-team/bbridge-sdk-python
|
10a46b9ff87a91fdebc65f74cfb30a05a10bb641
|
[
"MIT"
] | 1
|
2017-03-16T06:39:38.000Z
|
2017-03-16T09:50:27.000Z
|
bbridge_sdk/entity/request/nlp_data.py
|
bbridge-team/bbridge-sdk-python
|
10a46b9ff87a91fdebc65f74cfb30a05a10bb641
|
[
"MIT"
] | 1
|
2017-04-07T01:29:55.000Z
|
2017-04-07T01:29:55.000Z
|
class NLPData(object):
def __init__(self, sentences):
"""
:type sentences: list[str]
"""
self.__sentences = sentences
@property
def sentences(self):
return self.__sentences
| 20.545455
| 36
| 0.579646
| 21
| 226
| 5.857143
| 0.571429
| 0.317073
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.314159
| 226
| 10
| 37
| 22.6
| 0.793548
| 0.115044
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0
| 0.166667
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
|
0
| 4
|
43a65437ec6212e13e832d9123f0c69b28eecb73
| 3,516
|
py
|
Python
|
tests/test_config_validation.py
|
anderskaplan/pomosite
|
59bc8e670c9a765953bc0535cf17de82e6fdec65
|
[
"MIT"
] | null | null | null |
tests/test_config_validation.py
|
anderskaplan/pomosite
|
59bc8e670c9a765953bc0535cf17de82e6fdec65
|
[
"MIT"
] | null | null | null |
tests/test_config_validation.py
|
anderskaplan/pomosite
|
59bc8e670c9a765953bc0535cf17de82e6fdec65
|
[
"MIT"
] | null | null | null |
import unittest
from pathlib import Path
import shutil
from xml.etree import ElementTree
from pomosite import generate, ConfigurationError
content_path = str(Path(Path(__file__).parent, "data/test_templating"))
output_dir = "temp/test_validation"
class TestConfigValidation(unittest.TestCase):
@classmethod
def setUpClass(self):
p = Path(output_dir)
if p.exists():
print("removing " + output_dir)
shutil.rmtree(output_dir)
def test_should_fail_on_missing_leading_slash(self):
site_config = {
"item_config": {
"P1": {
"endpoint": "x/",
"template": "page.html",
},
},
"template_dir": content_path + "/templates",
}
with self.assertRaises(ConfigurationError):
generate(site_config, output_dir)
def test_should_not_accept_two_identical_page_endpoints(self):
site_config = {
"item_config": {
"P1": {
"endpoint": "/xyz",
"template": "page.html",
},
"P2": {
"endpoint": "/xyz",
"template": "page.html",
},
},
"template_dir": content_path + "/templates",
}
with self.assertRaises(ConfigurationError):
generate(site_config, output_dir)
def test_should_not_accept_two_identical_static_endpoints(self):
site_config = {
"item_config": {
"S1": {
"endpoint": "/xyz",
"source": Path(content_path, "templates/page.html"),
},
"S2": {
"endpoint": "/xyz",
"source": Path(content_path, "templates/page.html"),
},
},
"template_dir": content_path + "/templates",
}
with self.assertRaises(ConfigurationError):
generate(site_config, output_dir)
def test_should_not_accept_two_identical_page_and_static_endpoints(self):
site_config = {
"item_config": {
"P1": {
"endpoint": "/xyz",
"template": "page.html",
},
"S1": {
"endpoint": "/xyz",
"source": Path(content_path, "templates/page.html"),
},
},
"template_dir": content_path + "/templates",
}
with self.assertRaises(ConfigurationError):
generate(site_config, output_dir)
def test_should_not_accept_endpoints_with_invalid_characters(self):
site_config = {
"item_config": {
"P1": {
"endpoint": "/xyö",
"template": "page.html",
},
},
"template_dir": content_path + "/templates",
}
with self.assertRaises(ConfigurationError):
generate(site_config, output_dir)
def test_should_not_accept_endpoints_with_space(self):
site_config = {
"item_config": {
"P1": {
"endpoint": "/xy zz",
"template": "page.html",
},
},
"template_dir": content_path + "/templates",
}
with self.assertRaises(ConfigurationError):
generate(site_config, output_dir)
| 32.555556
| 77
| 0.498294
| 291
| 3,516
| 5.704467
| 0.247423
| 0.072289
| 0.108434
| 0.057831
| 0.768072
| 0.748193
| 0.748193
| 0.659639
| 0.659639
| 0.659639
| 0
| 0.004192
| 0.389363
| 3,516
| 107
| 78
| 32.859813
| 0.76898
| 0
| 0
| 0.530612
| 1
| 0
| 0.156428
| 0
| 0
| 0
| 0
| 0
| 0.061224
| 1
| 0.071429
| false
| 0
| 0.05102
| 0
| 0.132653
| 0.010204
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
43bc1a799a8508aff2a9dff6ef99dbc8ae4b1eeb
| 359
|
py
|
Python
|
tasks/EPAM/python_course/foundation-python/l2/m2-15.py
|
AleksNeStu/projects
|
1a4c68dfbdcb77228f0f3617e58fd18fcb1f5dbb
|
[
"Apache-2.0"
] | 2
|
2022-01-19T18:01:35.000Z
|
2022-02-06T06:54:38.000Z
|
tasks/EPAM/python_course/foundation-python/l2/m2-15.py
|
AleksNeStu/projects
|
1a4c68dfbdcb77228f0f3617e58fd18fcb1f5dbb
|
[
"Apache-2.0"
] | null | null | null |
tasks/EPAM/python_course/foundation-python/l2/m2-15.py
|
AleksNeStu/projects
|
1a4c68dfbdcb77228f0f3617e58fd18fcb1f5dbb
|
[
"Apache-2.0"
] | null | null | null |
# is Evaluates to true if the variables on either side of the operator point to the same object and false otherwise. x is y, here is results in 1 if id(x) equals id(y).
# is not Evaluates to false if the variables on either side of the operator point to the same object and true otherwise. x is not y, here is not results in 1 if id(x) is not equal to id(y).
| 119.666667
| 189
| 0.749304
| 76
| 359
| 3.539474
| 0.368421
| 0.074349
| 0.104089
| 0.118959
| 0.572491
| 0.572491
| 0.460967
| 0.460967
| 0.460967
| 0.460967
| 0
| 0.007018
| 0.206128
| 359
| 3
| 189
| 119.666667
| 0.936842
| 0.986072
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
78f6064317aa3269772fc5e1c4d7da5880a767bc
| 304
|
py
|
Python
|
csp/__init__.py
|
JacobChen258/AI-Constraints-Satisfaction
|
9b01cfce447e40678eb2e426413b4e2e437257f0
|
[
"MIT"
] | null | null | null |
csp/__init__.py
|
JacobChen258/AI-Constraints-Satisfaction
|
9b01cfce447e40678eb2e426413b4e2e437257f0
|
[
"MIT"
] | null | null | null |
csp/__init__.py
|
JacobChen258/AI-Constraints-Satisfaction
|
9b01cfce447e40678eb2e426413b4e2e437257f0
|
[
"MIT"
] | null | null | null |
from .csp import CSP
from .tetris_csp import TetrisCSP
from .csp_algorithms import CSPAlgorithms
from .variable import Variable
from .constraint import Constraint
from .tetromino_puzzle_constraint import TetrominoPuzzleConstraint
from .tetris_variable import TetrisVariable
from .csp_util import CSPUtil
| 33.777778
| 66
| 0.868421
| 38
| 304
| 6.789474
| 0.394737
| 0.081395
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.105263
| 304
| 8
| 67
| 38
| 0.948529
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
601a4669ee0b03503a3661d1800f5462f100acbf
| 188
|
py
|
Python
|
froide/upload/serializers.py
|
kratz00/froide
|
f31e6dbe7f6d565058bde36461a6fa2d09e0388e
|
[
"MIT"
] | null | null | null |
froide/upload/serializers.py
|
kratz00/froide
|
f31e6dbe7f6d565058bde36461a6fa2d09e0388e
|
[
"MIT"
] | null | null | null |
froide/upload/serializers.py
|
kratz00/froide
|
f31e6dbe7f6d565058bde36461a6fa2d09e0388e
|
[
"MIT"
] | null | null | null |
from rest_framework import serializers
from .models import Upload
class UploadSerializer(serializers.ModelSerializer):
class Meta:
model = Upload
fields = '__all__'
| 18.8
| 52
| 0.728723
| 19
| 188
| 6.947368
| 0.736842
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.218085
| 188
| 9
| 53
| 20.888889
| 0.897959
| 0
| 0
| 0
| 0
| 0
| 0.037234
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
603a3febf50bf50252584143949326911364fed7
| 514
|
py
|
Python
|
ezidapp/models/datacite_queue.py
|
HEG-INCIPIT/ARKetype
|
bd2ced291292fa6cc2f101f59a9614698dae5102
|
[
"MIT"
] | 9
|
2020-02-26T00:45:09.000Z
|
2021-11-07T23:07:06.000Z
|
ezidapp/models/datacite_queue.py
|
HEG-INCIPIT/ARKetype
|
bd2ced291292fa6cc2f101f59a9614698dae5102
|
[
"MIT"
] | 213
|
2020-04-07T21:36:17.000Z
|
2022-03-29T21:26:04.000Z
|
ezidapp/models/datacite_queue.py
|
HEG-INCIPIT/ARKetype
|
bd2ced291292fa6cc2f101f59a9614698dae5102
|
[
"MIT"
] | 7
|
2020-04-07T20:04:51.000Z
|
2021-08-19T01:11:55.000Z
|
# =============================================================================
#
# EZID :: ezidapp/models/datacite_queue.py
#
# Database model for the DataCite queue.
#
# Author:
# Greg Janee <gjanee@ucop.edu>
#
# License:
# Copyright (c) 2015, Regents of the University of California
# http://creativecommons.org/licenses/BSD/
#
# -----------------------------------------------------------------------------
import registration_queue
class DataciteQueue(registration_queue.RegistrationQueue):
pass
| 24.47619
| 79
| 0.509728
| 42
| 514
| 6.166667
| 0.833333
| 0.100386
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.008772
| 0.11284
| 514
| 20
| 80
| 25.7
| 0.559211
| 0.754864
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 4
|
603e8d3db9a72dc84eeac73a6fd500ce2b340db7
| 274
|
py
|
Python
|
clientes/gateway.py
|
lauraziebarth/vendas-backend
|
026a86f1995af63f02738dd6166ebb3e145878a3
|
[
"MIT"
] | null | null | null |
clientes/gateway.py
|
lauraziebarth/vendas-backend
|
026a86f1995af63f02738dd6166ebb3e145878a3
|
[
"MIT"
] | null | null | null |
clientes/gateway.py
|
lauraziebarth/vendas-backend
|
026a86f1995af63f02738dd6166ebb3e145878a3
|
[
"MIT"
] | null | null | null |
from clientes.models import Cliente
def busca_um_cliente(cliente_id):
return Cliente.objects.get(id=cliente_id)
def busca_clientes_nao_excluidos():
return Cliente.objects.filter(excluido=False)
def busca_todos_os_clientes():
return Cliente.objects.all()
| 17.125
| 49
| 0.781022
| 38
| 274
| 5.368421
| 0.526316
| 0.117647
| 0.294118
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.131387
| 274
| 15
| 50
| 18.266667
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.428571
| false
| 0
| 0.142857
| 0.428571
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 4
|
6076ab4045f42373d8afc9df3315a7e7b053b69b
| 188
|
py
|
Python
|
F_Machine_learning/2_Supervised-Learning/solutions/ex0_4b.py
|
oercompbiomed/CBM101
|
20010dcb99fbf218c4789eb5918dcff8ceb94898
|
[
"MIT"
] | 7
|
2019-07-03T07:41:55.000Z
|
2022-02-06T20:25:37.000Z
|
Lab2-ML-tissue-classification/solutions/ex0_4b.py
|
computational-medicine/BMED360-2021
|
2c6052b9affedf1fee23c89d23941bf08eb2614c
|
[
"MIT"
] | 9
|
2019-03-14T15:15:09.000Z
|
2019-08-01T14:18:21.000Z
|
Lab2-ML-tissue-classification/solutions/ex0_4b.py
|
computational-medicine/BMED360-2021
|
2c6052b9affedf1fee23c89d23941bf08eb2614c
|
[
"MIT"
] | 11
|
2019-03-12T10:43:11.000Z
|
2021-10-05T12:15:00.000Z
|
def data_splitter(data, idxs):
subsample = data[idxs]
return subsample
## Note: matrices are indexed like mat[rows, cols]. If only one is provided, it is interpreted as mat[rows].
| 37.6
| 108
| 0.723404
| 29
| 188
| 4.655172
| 0.758621
| 0.118519
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.180851
| 188
| 5
| 108
| 37.6
| 0.876623
| 0.558511
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 4
|
609011e393f39f606ec754ea21754db7263e684c
| 45
|
py
|
Python
|
run_tests.py
|
acud/py-swarm
|
50f4b0eaa8266c721453fb44f4e2312aa5de15f0
|
[
"BSD-3-Clause"
] | 1
|
2021-07-27T07:45:53.000Z
|
2021-07-27T07:45:53.000Z
|
run_tests.py
|
acud/py-swarm
|
50f4b0eaa8266c721453fb44f4e2312aa5de15f0
|
[
"BSD-3-Clause"
] | null | null | null |
run_tests.py
|
acud/py-swarm
|
50f4b0eaa8266c721453fb44f4e2312aa5de15f0
|
[
"BSD-3-Clause"
] | null | null | null |
#!/bin/bash
python -m unittest test/*.py -v
| 11.25
| 31
| 0.644444
| 8
| 45
| 3.625
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.155556
| 45
| 3
| 32
| 15
| 0.763158
| 0.222222
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
6097a63bab820b8fe6085ed3b1fc0834611b451b
| 169
|
py
|
Python
|
hamiltorch/__init__.py
|
kakodkar/hamiltorch
|
0ed85dacb28a77b27b9cb9c55ed178284ca7f195
|
[
"BSD-2-Clause"
] | 237
|
2019-10-06T02:41:50.000Z
|
2022-03-25T19:55:56.000Z
|
hamiltorch/__init__.py
|
leoduan/hamiltorch
|
ac8feb278df2abd238a3d50604645a247c9610fd
|
[
"BSD-2-Clause"
] | 15
|
2020-01-06T17:21:49.000Z
|
2022-03-10T07:35:02.000Z
|
hamiltorch/__init__.py
|
leoduan/hamiltorch
|
ac8feb278df2abd238a3d50604645a247c9610fd
|
[
"BSD-2-Clause"
] | 47
|
2019-12-20T20:05:34.000Z
|
2022-01-04T15:48:44.000Z
|
__version__ = '0.4.0.dev1'
from .samplers import sample, sample_model, predict_model, sample_split_model, Sampler, Integrator, Metric
from .util import set_random_seed
| 33.8
| 106
| 0.810651
| 25
| 169
| 5.08
| 0.72
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.02649
| 0.106509
| 169
| 4
| 107
| 42.25
| 0.81457
| 0
| 0
| 0
| 0
| 0
| 0.059172
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
609e4f40804e523cf72716523a36f877e41b594a
| 26,153
|
py
|
Python
|
code/k.py
|
davidbradway/klassysouthkarolina
|
0b4ccee55543bc759c7684d6326b5d3768eeb4f4
|
[
"Apache-2.0"
] | 2
|
2017-07-10T15:39:43.000Z
|
2018-12-16T20:04:47.000Z
|
code/k.py
|
davidbradway/klassysouthkarolina
|
0b4ccee55543bc759c7684d6326b5d3768eeb4f4
|
[
"Apache-2.0"
] | null | null | null |
code/k.py
|
davidbradway/klassysouthkarolina
|
0b4ccee55543bc759c7684d6326b5d3768eeb4f4
|
[
"Apache-2.0"
] | null | null | null |
kwords=['ka', 'kababish', 'kabaka', 'kabaragoya', 'kabard', 'kabardian', 'kabaya', 'kabbeljaws', 'kabel', 'kaberu', 'kabiet', 'kabirpanthi', 'kabistan', 'kabonga', 'kabuki', 'kabuli', 'kabyle', 'kachari', 'kachin', 'kachin', 'kadaga', 'kadarite', 'kadaya', 'kadayan', 'kaddish', 'kadein', 'kadikane', 'kadischi', 'kadmi', 'kados', 'kadu', 'kaempferol', 'kaf', 'kafa', 'kaferita', 'kaffir', 'kaffir', 'kaffiyeh', 'kaffraria', 'kaffrarian', 'kafir', 'kafir', 'kafiri', 'kafirin', 'kafiz', 'kafka', 'kafkaesque', 'kafta', 'kago', 'kagu', 'kaha', 'kahar', 'kahau', 'kahikatea', 'kahili', 'kahu', 'kahuna', 'kai', 'kaibab', 'kaibartha', 'kaid', 'kaik', 'kaikara', 'kaikawaka', 'kail', 'kailyard', 'kailyarder', 'kailyardism', 'kaimo', 'kainah', 'kainga', 'kainite', 'kainsi', 'kainyn', 'kairine', 'kairoline', 'kaiser', 'kaiserdom', 'kaiserism', 'kaisership', 'kaitaka', 'kaithi', 'kaiwhiria', 'kaiwi', 'kaj', 'kajar', 'kajawah', 'kajugaru', 'kaka', 'kakan', 'kakapo', 'kakar', 'kakarali', 'kakariki', 'kakatoe', 'kakatoidae', 'kakawahie', 'kaki', 'kakidrosis', 'kakistocracy', 'kakkak', 'kakke', 'kakortokite', 'kala', 'kaladana', 'kalamalo', 'kalamansanai', 'kalamian', 'kalanchoe', 'kalandariyah', 'kalang', 'kalapooian', 'kalashnikov', 'kalasie', 'kaldani', 'kale', 'kaleidophon', 'kaleidophone', 'kaleidoscope', 'kaleidoscopic', 'kaleidoscopical', 'kaleidoscopically', 'kalekah', 'kalema', 'kalendae', 'kalends', 'kalewife', 'kaleyard', 'kali', 'kalian', 'kaliana', 'kaliborite', 'kalidium', 'kaliform', 'kaligenous', 'kalinga', 'kalinite', 'kaliophilite', 'kalipaya', 'kalispel', 'kalium', 'kallah', 'kallege', 'kallilite', 'kallima', 'kallitype', 'kalmarian', 'kalmia', 'kalmuck', 'kalo', 'kalogeros', 'kalokagathia', 'kalon', 'kalong', 'kalpis', 'kalsomine', 'kalsominer', 'kalumpang', 'kalumpit', 'kalwar', 'kalymmaukion', 'kalymmocyte', 'kamachile', 'kamacite', 'kamahi', 'kamala', 'kamaloka', 'kamansi', 'kamao', 'kamares', 'kamarezite', 'kamarupa', 'kamarupic', 'kamas', 'kamasin', 'kamass', 'kamassi', 'kamba', 'kambal', 'kamboh', 'kamchadal', 'kamchatkan', 'kame', 'kameeldoorn', 'kameelthorn', 'kamel', 'kamelaukion', 'kamerad', 'kamias', 'kamichi', 'kamik', 'kamikaze', 'kamiya', 'kammalan', 'kammererite', 'kamperite', 'kampong', 'kamptomorph', 'kan', 'kana', 'kanae', 'kanagi', 'kanaka', 'kanap', 'kanara', 'kanarese', 'kanari', 'kanat', 'kanauji', 'kanawari', 'kanawha', 'kanchil', 'kande', 'kandelia', 'kandol', 'kaneh', 'kanephore', 'kanephoros', 'kaneshite', 'kanesian', 'kang', 'kanga', 'kangani', 'kangaroo', 'kangarooer', 'kangli', 'kanji', 'kankanai', 'kankie', 'kannume', 'kanoon', 'kanred', 'kans', 'kansa', 'kansan', 'kantele', 'kanteletar', 'kanten', 'kanthan', 'kantian', 'kantianism', 'kantism', 'kantist', 'kanuri', 'kanwar', 'kaoliang', 'kaolin', 'kaolinate', 'kaolinic', 'kaolinite', 'kaolinization', 'kaolinize', 'kapa', 'kapai', 'kapeika', 'kapok', 'kapp', 'kappa', 'kappe', 'kappland', 'kapur', 'kaput', 'karabagh', 'karagan', 'karaism', 'karaite', 'karaitism', 'karaka', 'karakatchan', 'karakul', 'karakul', 'karamojo', 'karamu', 'karaoke', 'karatas', 'karate', 'karaya', 'karaya', 'karbi', 'karch', 'kareao', 'kareeta', 'karel', 'karela', 'karelian', 'karen', 'karharbari', 'kari', 'karite', 'karl', 'karling', 'karluk', 'karma', 'karmathian', 'karmic', 'karmouth', 'karo', 'kaross', 'karou', 'karree', 'karri', 'karroo', 'karroo', 'karrusel', 'karsha', 'karshuni', 'karst', 'karst', 'karstenite', 'karstic', 'kartel', 'karthli', 'kartometer', 'kartos', 'kartvel', 'kartvelian', 'karwar', 'karwinskia', 'karyaster', 'karyenchyma', 'karyochrome', 'karyochylema', 'karyogamic', 'karyogamy', 'karyokinesis', 'karyokinetic', 'karyologic', 'karyological', 'karyologically', 'karyology', 'karyolymph', 'karyolysidae', 'karyolysis', 'karyolysus', 'karyolytic', 'karyomere', 'karyomerite', 'karyomicrosome', 'karyomitoic', 'karyomitome', 'karyomiton', 'karyomitosis', 'karyomitotic', 'karyon', 'karyoplasm', 'karyoplasma', 'karyoplasmatic', 'karyoplasmic', 'karyopyknosis', 'karyorrhexis', 'karyoschisis', 'karyosome', 'karyotin', 'karyotype', 'kasa', 'kasbah', 'kasbeke', 'kascamiol', 'kasha', 'kashan', 'kasher', 'kashga', 'kashi', 'kashima', 'kashmiri', 'kashmirian', 'kashoubish', 'kashruth', 'kashube', 'kashubian', 'kashyapa', 'kasida', 'kasikumuk', 'kaska', 'kaskaskia', 'kasm', 'kasolite', 'kassabah', 'kassak', 'kassite', 'kassu', 'kastura', 'kasubian', 'kat', 'katabanian', 'katabasis', 'katabatic', 'katabella', 'katabolic', 'katabolically', 'katabolism', 'katabolite', 'katabolize', 'katabothron', 'katachromasis', 'katacrotic', 'katacrotism', 'katagenesis', 'katagenetic', 'katakana', 'katakinesis', 'katakinetic', 'katakinetomer', 'katakinetomeric', 'katakiribori', 'katalase', 'katalysis', 'katalyst', 'katalytic', 'katalyze', 'katamorphism', 'kataphoresis', 'kataphoretic', 'kataphoric', 'kataphrenia', 'kataplasia', 'kataplectic', 'kataplexy', 'katar', 'katastate', 'katastatic', 'katathermometer', 'katatonia', 'katatonic', 'katatype', 'katchung', 'katcina', 'kate', 'kath', 'katha', 'katha', 'kathal', 'katharina', 'katharine', 'katharometer', 'katharsis', 'kathartic', 'kathemoglobin', 'kathenotheism', 'kathleen', 'kathodic', 'kathopanishad', 'kathryn', 'kathy', 'katie', 'katik', 'katinka', 'katipo', 'katipunan', 'katipuneros', 'katmon', 'katogle', 'katrine', 'katrinka', 'katsup', 'katsuwonidae', 'katuka', 'katukina', 'katun', 'katurai', 'katy', 'katydid', 'katzenjammer', 'kauravas', 'kauri', 'kava', 'kavaic', 'kavass', 'kavi', 'kaw', 'kawaka', 'kawchodinne', 'kawika', 'kay', 'kay', 'kayak', 'kayaker', 'kayan', 'kayasth', 'kayastha', 'kayles', 'kayo', 'kayvan', 'kazak', 'kazi', 'kazoo', 'kazuhiro', 'kea', 'keach', 'keacorn', 'keatsian', 'keawe', 'keb', 'kebab', 'kebbie', 'kebbuck', 'kechel', 'keck', 'keckle', 'keckling', 'kecksy', 'kecky', 'ked', 'kedar', 'kedarite', 'keddah', 'kedge', 'kedger', 'kedgeree', 'kedlock', 'kedushshah', 'kee', 'keech', 'keek', 'keeker', 'keel', 'keelage', 'keelbill', 'keelblock', 'keelboat', 'keelboatman', 'keeled', 'keeler', 'keelfat', 'keelhale', 'keelhaul', 'keelie', 'keeling', 'keelivine', 'keelless', 'keelman', 'keelrake', 'keelson', 'keen', 'keena', 'keened', 'keener', 'keenly', 'keenness', 'keep', 'keepable', 'keeper', 'keeperess', 'keepering', 'keeperless', 'keepership', 'keeping', 'keepsake', 'keepsaky', 'keepworthy', 'keerogue', 'kees', 'keeshond', 'keest', 'keet', 'keeve', 'keewatin', 'kef', 'keffel', 'kefir', 'kefiric', 'kefti', 'keftian', 'keftiu', 'keg', 'kegler', 'kehaya', 'kehillah', 'kehoeite', 'keid', 'keilhauite', 'keita', 'keith', 'keitloa', 'kekchi', 'kekotene', 'kekuna', 'kelchin', 'keld', 'kele', 'kele', 'kelebe', 'kelectome', 'keleh', 'kelek', 'kelep', 'kelima', 'kelk', 'kell', 'kella', 'kellion', 'kellupweed', 'kelly', 'kelly', 'keloid', 'keloidal', 'kelp', 'kelper', 'kelpfish', 'kelpie', 'kelpware', 'kelpwort', 'kelpy', 'kelt', 'kelter', 'keltoi', 'kelty', 'kelvin', 'kelvin', 'kelyphite', 'kemal', 'kemalism', 'kemalist', 'kemb', 'kemp', 'kemperyman', 'kempite', 'kemple', 'kempster', 'kempt', 'kempy', 'ken', 'ken', 'kenaf', 'kenai', 'kenareh', 'kench', 'kend', 'kendir', 'kendyr', 'kenelm', 'kenipsim', 'kenlore', 'kenmark', 'kenn', 'kennebec', 'kennebecker', 'kennebunker', 'kennedya', 'kennel', 'kennelly', 'kennelman', 'kenner', 'kenneth', 'kenning', 'kenningwort', 'kenno', 'keno', 'kenogenesis', 'kenogenetic', 'kenogenetically', 'kenogeny', 'kenosis', 'kenotic', 'kenoticism', 'kenoticist', 'kenotism', 'kenotist', 'kenotoxin', 'kenotron', 'kenseikai', 'kensington', 'kensitite', 'kenspac', 'kenspeck', 'kenspeckle', 'kent', 'kent', 'kentallenite', 'kentia', 'kenticism', 'kentish', 'kentishman', 'kentledge', 'kenton', 'kentrogon', 'kentrolite', 'kentuckian', 'kentucky', 'kenyte', 'kep', 'kepi', 'keplerian', 'kept', 'ker', 'keracele', 'keralite', 'kerana', 'keraphyllocele', 'keraphyllous', 'kerasin', 'kerasine', 'kerat', 'keratalgia', 'keratectasia', 'keratectomy', 'keraterpeton', 'keratin', 'keratinization', 'keratinize', 'keratinoid', 'keratinose', 'keratinous', 'keratitis', 'keratoangioma', 'keratocele', 'keratocentesis', 'keratoconjunctivitis', 'keratoconus', 'keratocricoid', 'keratode', 'keratodermia', 'keratogenic', 'keratogenous', 'keratoglobus', 'keratoglossus', 'keratohelcosis', 'keratohyal', 'keratoid', 'keratoidea', 'keratoiritis', 'keratol', 'keratoleukoma', 'keratolysis', 'keratolytic', 'keratoma', 'keratomalacia', 'keratome', 'keratometer', 'keratometry', 'keratomycosis', 'keratoncus', 'keratonosus', 'keratonyxis', 'keratophyre', 'keratoplastic', 'keratoplasty', 'keratorrhexis', 'keratoscope', 'keratoscopy', 'keratose', 'keratosis', 'keratotome', 'keratotomy', 'keratto', 'keraulophon', 'keraulophone', 'keraunia', 'keraunion', 'keraunograph', 'keraunographic', 'keraunography', 'keraunophone', 'keraunophonic', 'keraunoscopia', 'keraunoscopy', 'kerbstone', 'kerchief', 'kerchiefed', 'kerchoo', 'kerchug', 'kerchunk', 'kerectomy', 'kerel', 'keres', 'keresan', 'kerewa', 'kerf', 'kerflap', 'kerflop', 'kerflummox', 'kerite', 'kermanji', 'kermanshah', 'kermes', 'kermesic', 'kermesite', 'kermis', 'kern', 'kernel', 'kerneled', 'kernelless', 'kernelly', 'kerner', 'kernetty', 'kernish', 'kernite', 'kernos', 'kerogen', 'kerosene', 'kerplunk', 'kerri', 'kerria', 'kerrie', 'kerrikerri', 'kerril', 'kerrite', 'kerry', 'kerry', 'kersantite', 'kersey', 'kerseymere', 'kerslam', 'kerslosh', 'kersmash', 'kerugma', 'kerwham', 'kerygma', 'kerygmatic', 'kerykeion', 'kerystic', 'kerystics', 'keryx', 'kesslerman', 'kestrel', 'ket', 'keta', 'ketal', 'ketapang', 'ketazine', 'ketch', 'ketchcraft', 'ketchup', 'ketembilla', 'keten', 'ketene', 'ketimide', 'ketimine', 'ketipate', 'ketipic', 'keto', 'ketogen', 'ketogenesis', 'ketogenic', 'ketoheptose', 'ketohexose', 'ketoketene', 'ketol', 'ketole', 'ketolysis', 'ketolytic', 'ketone', 'ketonemia', 'ketonic', 'ketonimid', 'ketonimide', 'ketonimin', 'ketonimine', 'ketonization', 'ketonize', 'ketonuria', 'ketose', 'ketoside', 'ketosis', 'ketosuccinic', 'ketoxime', 'kette', 'ketting', 'kettle', 'kettlecase', 'kettledrum', 'kettledrummer', 'kettleful', 'kettlemaker', 'kettlemaking', 'kettler', 'ketty', 'ketu', 'ketuba', 'ketupa', 'ketyl', 'keup', 'keuper', 'keurboom', 'kevalin', 'kevan', 'kevel', 'kevelhead', 'kevin', 'kevutzah', 'kevyn', 'keweenawan', 'keweenawite', 'kewpie', 'kex', 'kexy', 'key', 'keyage', 'keyboard', 'keyed', 'keyhole', 'keyless', 'keylet', 'keylock', 'keynesian', 'keynesianism', 'keynote', 'keynoter', 'keyseater', 'keyserlick', 'keysmith', 'keystone', 'keystoned', 'keystoner', 'keyway', 'kha', 'khaddar', 'khadi', 'khagiarite', 'khahoon', 'khaiki', 'khair', 'khaja', 'khajur', 'khakanship', 'khaki', 'khakied', 'khaldian', 'khalifa', 'khalifat', 'khalkha', 'khalsa', 'khami', 'khamsin', 'khamti', 'khan', 'khanate', 'khanda', 'khandait', 'khanjar', 'khanjee', 'khankah', 'khansamah', 'khanum', 'khar', 'kharaj', 'kharia', 'kharijite', 'kharoshthi', 'kharouba', 'kharroubah', 'khartoumer', 'kharua', 'kharwar', 'khasa', 'khasi', 'khass', 'khat', 'khatib', 'khatri', 'khatti', 'khattish', 'khaya', 'khazar', 'khazarian', 'khediva', 'khedival', 'khedivate', 'khedive', 'khediviah', 'khedivial', 'khediviate', 'khepesh', 'kherwari', 'kherwarian', 'khet', 'khevzur', 'khidmatgar', 'khila', 'khilat', 'khir', 'khirka', 'khitan', 'khivan', 'khlysti', 'khmer', 'khoja', 'khoja', 'khoka', 'khokani', 'khond', 'khorassan', 'khot', 'khotan', 'khotana', 'khowar', 'khu', 'khuai', 'khubber', 'khula', 'khuskhus', 'khussak', 'khutbah', 'khutuktu', 'khuzi', 'khvat', 'khwarazmian', 'kiack', 'kiaki', 'kialee', 'kiang', 'kiangan', 'kiaugh', 'kibber', 'kibble', 'kibbler', 'kibblerman', 'kibe', 'kibei', 'kibitka', 'kibitz', 'kibitzer', 'kiblah', 'kibosh', 'kiby', 'kick', 'kickable', 'kickapoo', 'kickback', 'kickee', 'kicker', 'kicking', 'kickish', 'kickless', 'kickoff', 'kickout', 'kickseys', 'kickshaw', 'kickup', 'kidder', 'kidder', 'kidderminster', 'kiddier', 'kiddish', 'kiddush', 'kiddushin', 'kiddy', 'kidhood', 'kidlet', 'kidling', 'kidnap', 'kidnapee', 'kidnaper', 'kidney', 'kidneyroot', 'kidneywort', 'kids', 'kidskin', 'kidsman', 'kiefekil', 'kieffer', 'kiekie', 'kiel', 'kier', 'kieran', 'kieselguhr', 'kieserite', 'kiestless', 'kieye', 'kiho', 'kikar', 'kikatsik', 'kikawaeo', 'kike', 'kiki', 'kiki', 'kikki', 'kikongo', 'kiku', 'kikuel', 'kikumon', 'kikuyu', 'kil', 'kiladja', 'kilah', 'kilampere', 'kilan', 'kilbrickenite', 'kildee', 'kilderkin', 'kileh', 'kilerg', 'kiley', 'kilhamite', 'kilhig', 'kiliare', 'kilim', 'kill', 'killable', 'killadar', 'killarney', 'killas', 'killcalf', 'killcrop', 'killcu', 'killdeer', 'killeekillee', 'killeen', 'killer', 'killick', 'killifish', 'killing', 'killingly', 'killingness', 'killinite', 'killogie', 'killweed', 'killwort', 'killy', 'kilmarnock', 'kiln', 'kilneye', 'kilnhole', 'kilnman', 'kilnrib', 'kilo', 'kiloampere', 'kilobar', 'kilocalorie', 'kilocycle', 'kilodyne', 'kilogauss', 'kilogram', 'kilojoule', 'kiloliter', 'kilolumen', 'kilometer', 'kilometrage', 'kilometric', 'kilometrical', 'kiloparsec', 'kilostere', 'kiloton', 'kilovar', 'kilovolt', 'kilowatt', 'kilp', 'kilt', 'kilter', 'kiltie', 'kilting', 'kiluba', 'kim', 'kim', 'kimbang', 'kimberlin', 'kimberlite', 'kimberly', 'kimbundu', 'kimeridgian', 'kimigayo', 'kimmo', 'kimnel', 'kimono', 'kimonoed', 'kin', 'kina', 'kinaesthesia', 'kinaesthesis', 'kinah', 'kinase', 'kinbote', 'kinch', 'kinch', 'kinchin', 'kinchinmort', 'kincob', 'kind', 'kindergarten', 'kindergartener', 'kindergartening', 'kindergartner', 'kinderhook', 'kindheart', 'kindhearted', 'kindheartedly', 'kindheartedness', 'kindle', 'kindler', 'kindlesome', 'kindlily', 'kindliness', 'kindling', 'kindly', 'kindness', 'kindred', 'kindredless', 'kindredly', 'kindredness', 'kindredship', 'kinematic', 'kinematical', 'kinematically', 'kinematics', 'kinematograph', 'kinemometer', 'kineplasty', 'kinepox', 'kinesalgia', 'kinescope', 'kinesiatric', 'kinesiatrics', 'kinesic', 'kinesics', 'kinesimeter', 'kinesiologic', 'kinesiological', 'kinesiology', 'kinesiometer', 'kinesis', 'kinesitherapy', 'kinesodic', 'kinesthesia', 'kinesthesis', 'kinesthetic', 'kinetic', 'kinetical', 'kinetically', 'kinetics', 'kinetochore', 'kinetogenesis', 'kinetogenetic', 'kinetogenetically', 'kinetogenic', 'kinetogram', 'kinetograph', 'kinetographer', 'kinetographic', 'kinetography', 'kinetomer', 'kinetomeric', 'kinetonema', 'kinetonucleus', 'kinetophone', 'kinetophonograph', 'kinetoplast', 'kinetoscope', 'kinetoscopic', 'king', 'king', 'kingbird', 'kingbolt', 'kingcob', 'kingcraft', 'kingcup', 'kingdom', 'kingdomed', 'kingdomful', 'kingdomless', 'kingdomship', 'kingfish', 'kingfisher', 'kinghead', 'kinghood', 'kinghunter', 'kingless', 'kinglessness', 'kinglet', 'kinglihood', 'kinglike', 'kinglily', 'kingliness', 'kingling', 'kingly', 'kingmaker', 'kingmaking', 'kingpiece', 'kingpin', 'kingrow', 'kingship', 'kingsman', 'kingu', 'kingweed', 'kingwood', 'kinipetu', 'kink', 'kinkable', 'kinkaider', 'kinkajou', 'kinkcough', 'kinkhab', 'kinkhost', 'kinkily', 'kinkiness', 'kinkle', 'kinkled', 'kinkly', 'kinksbush', 'kinky', 'kinless', 'kinnikinnick', 'kino', 'kinofluous', 'kinology', 'kinoplasm', 'kinoplasmic', 'kinorhyncha', 'kinospore', 'kinosternidae', 'kinosternon', 'kinotannic', 'kinsfolk', 'kinship', 'kinsman', 'kinsmanly', 'kinsmanship', 'kinspeople', 'kinswoman', 'kintar', 'kintyre', 'kioea', 'kioko', 'kiosk', 'kiotome', 'kiowa', 'kiowan', 'kioway', 'kip', 'kipage', 'kipchak', 'kipe', 'kiplingese', 'kiplingism', 'kippeen', 'kipper', 'kipperer', 'kippy', 'kipsey', 'kipskin', 'kiranti', 'kirghiz', 'kirghizean', 'kiri', 'kirillitsa', 'kirimon', 'kirk', 'kirk', 'kirker', 'kirkify', 'kirking', 'kirkinhead', 'kirklike', 'kirkman', 'kirktown', 'kirkward', 'kirkyard', 'kirman', 'kirmew', 'kirn', 'kirombo', 'kirsch', 'kirsten', 'kirsty', 'kirtle', 'kirtled', 'kirundi', 'kirve', 'kirver', 'kischen', 'kish', 'kishambala', 'kishen', 'kishon', 'kishy', 'kiskatom', 'kislev', 'kismet', 'kismetic', 'kisra', 'kiss', 'kissability', 'kissable', 'kissableness', 'kissage', 'kissar', 'kisser', 'kissing', 'kissingly', 'kissproof', 'kisswise', 'kissy', 'kist', 'kistful', 'kiswa', 'kiswahili', 'kit', 'kit', 'kitab', 'kitabis', 'kitalpha', 'kitamat', 'kitan', 'kitar', 'kitcat', 'kitchen', 'kitchendom', 'kitchener', 'kitchenette', 'kitchenful', 'kitchenless', 'kitchenmaid', 'kitchenman', 'kitchenry', 'kitchenward', 'kitchenwards', 'kitchenware', 'kitchenwife', 'kitcheny', 'kite', 'kiteflier', 'kiteflying', 'kith', 'kithe', 'kithless', 'kitish', 'kitkahaxki', 'kitkehahki', 'kitling', 'kitlope', 'kittatinny', 'kittel', 'kitten', 'kittendom', 'kittenhearted', 'kittenhood', 'kittenish', 'kittenishly', 'kittenishness', 'kittenless', 'kittenship', 'kitter', 'kittereen', 'kitthoge', 'kittiwake', 'kittle', 'kittlepins', 'kittles', 'kittlish', 'kittly', 'kittock', 'kittul', 'kitty', 'kitty', 'kittysol', 'kitunahan', 'kiva', 'kiver', 'kivikivi', 'kivu', 'kiwai', 'kiwanian', 'kiwanis', 'kiwi', 'kiwikiwi', 'kiyas', 'kiyi', 'kizil', 'kizilbash', 'kjeldahl', 'kjeldahlization', 'kjeldahlize', 'klafter', 'klaftern', 'klam', 'klamath', 'klan', 'klanism', 'klansman', 'klanswoman', 'klaprotholite', 'klaskino', 'klaudia', 'klaus', 'klavern', 'klaxon', 'klaxon', 'klebsiella', 'kleeneboc', 'kleinian', 'kleistian', 'klendusic', 'klendusity', 'klendusive', 'klepht', 'klephtic', 'klephtism', 'kleptic', 'kleptistic', 'kleptomania', 'kleptomaniac', 'kleptomanist', 'kleptophobia', 'klicket', 'klikitat', 'kling', 'klingsor', 'klip', 'klipbok', 'klipdachs', 'klipdas', 'klipfish', 'klippe', 'klippen', 'klipspringer', 'klister', 'klockmannite', 'klom', 'klondike', 'klondiker', 'klootchman', 'klop', 'klops', 'klosh', 'kluxer', 'klystron', 'kmet', 'knab', 'knabble', 'knack', 'knackebrod', 'knacker', 'knackery', 'knacky', 'knag', 'knagged', 'knaggy', 'knap', 'knapbottle', 'knape', 'knappan', 'knapper', 'knapper', 'knappish', 'knappishly', 'knapsack', 'knapsacked', 'knapsacking', 'knapweed', 'knar', 'knark', 'knarred', 'knarry', 'knautia', 'knave', 'knavery', 'knaveship', 'knavess', 'knavish', 'knavishly', 'knavishness', 'knawel', 'knead', 'kneadability', 'kneadable', 'kneader', 'kneading', 'kneadingly', 'knebelite', 'knee', 'kneebrush', 'kneecap', 'kneed', 'kneehole', 'kneel', 'kneeler', 'kneelet', 'kneeling', 'kneelingly', 'kneepad', 'kneepan', 'kneepiece', 'kneestone', 'kneiffia', 'kneippism', 'knell', 'knelt', 'knesset', 'knet', 'knew', 'knez', 'knezi', 'kniaz', 'kniazi', 'knick', 'knicker', 'knickerbocker', 'knickerbockered', 'knickerbockers', 'knickered', 'knickers', 'knickknack', 'knickknackatory', 'knickknacked', 'knickknackery', 'knickknacket', 'knickknackish', 'knickknacky', 'knickpoint', 'knife', 'knifeboard', 'knifeful', 'knifeless', 'knifelike', 'knifeman', 'knifeproof', 'knifer', 'knifesmith', 'knifeway', 'knight', 'knightage', 'knightess', 'knighthead', 'knighthood', 'knightia', 'knightless', 'knightlihood', 'knightlike', 'knightliness', 'knightling', 'knightly', 'knightship', 'knightswort', 'kniphofia', 'knisteneaux', 'knit', 'knitback', 'knitch', 'knitted', 'knitter', 'knitting', 'knittle', 'knitwear', 'knitweed', 'knitwork', 'knived', 'knivey', 'knob', 'knobbed', 'knobber', 'knobbiness', 'knobble', 'knobbler', 'knobbly', 'knobby', 'knobkerrie', 'knoblike', 'knobstick', 'knobstone', 'knobular', 'knobweed', 'knobwood', 'knock', 'knockabout', 'knockdown', 'knockemdown', 'knocker', 'knocking', 'knockless', 'knockoff', 'knockout', 'knockstone', 'knockup', 'knoll', 'knoller', 'knolly', 'knop', 'knopite', 'knopped', 'knopper', 'knoppy', 'knopweed', 'knorhaan', 'knorria', 'knosp', 'knosped', 'knossian', 'knot', 'knotberry', 'knotgrass', 'knothole', 'knothorn', 'knotless', 'knotlike', 'knotroot', 'knotted', 'knotter', 'knottily', 'knottiness', 'knotting', 'knotty', 'knotweed', 'knotwork', 'knotwort', 'knout', 'know', 'knowability', 'knowable', 'knowableness', 'knowe', 'knower', 'knowing', 'knowingly', 'knowingness', 'knowledge', 'knowledgeable', 'knowledgeableness', 'knowledgeably', 'knowledged', 'knowledgeless', 'knowledgement', 'knowledging', 'known', 'knowperts', 'knoxian', 'knoxville', 'knoxvillite', 'knub', 'knubbly', 'knubby', 'knublet', 'knuckle', 'knucklebone', 'knuckled', 'knuckler', 'knuckling', 'knuckly', 'knuclesome', 'knudsen', 'knur', 'knurl', 'knurled', 'knurling', 'knurly', 'knut', 'knut', 'knute', 'knutty', 'knyaz', 'knyazi', 'ko', 'ko', 'koa', 'koae', 'koala', 'koali', 'koasati', 'kob', 'koban', 'kobellite', 'kobi', 'kobird', 'kobold', 'kobong', 'kobu', 'kobus', 'koch', 'kochab', 'kochia', 'kochliarion', 'koda', 'kodagu', 'kodak', 'kodak', 'kodaker', 'kodakist', 'kodakry', 'kodashim', 'kodro', 'kodurite', 'koeberlinia', 'koeberliniaceae', 'koeberliniaceous', 'koechlinite', 'koeksotenok', 'koel', 'koellia', 'koelreuteria', 'koenenite', 'koeri', 'koff', 'koft', 'koftgar', 'koftgari', 'koggelmannetje', 'kogia', 'kohathite', 'koheleth', 'kohemp', 'kohen', 'kohistani', 'kohl', 'kohl', 'kohlan', 'kohlrabi', 'kohua', 'koi', 'koiari', 'koibal', 'koil', 'koila', 'koilanaglyphic', 'koilon', 'koimesis', 'koine', 'koine', 'koinon', 'koinonia', 'koipato', 'koitapu', 'kojang', 'kojiki', 'kokako', 'kokam', 'kokan', 'kokerboom', 'kokil', 'kokio', 'koklas', 'koklass', 'koko', 'koko', 'kokoon', 'kokoona', 'kokoromiko', 'kokowai', 'kokra', 'koksaghyz', 'koku', 'kokum', 'kokumin', 'kokumingun', 'kol', 'kola', 'kolach', 'kolarian', 'koldaji', 'kolea', 'koleroga', 'kolhoz', 'koli', 'kolinski', 'kolinsky', 'kolis', 'kolkhos', 'kolkhoz', 'kolkka', 'kollast', 'kollaster', 'koller', 'kollergang', 'kolo', 'kolobion', 'kolobus', 'kolokolo', 'kolsun', 'koltunna', 'koltunnor', 'koluschan', 'kolush', 'komati', 'komatik', 'kombu', 'kome', 'komi', 'kominuter', 'kommetje', 'kommos', 'komondor', 'kompeni', 'komsomol', 'kon', 'kona', 'konak', 'konariot', 'konde', 'kongo', 'kongoese', 'kongolese', 'kongoni', 'kongsbergite', 'kongu', 'konia', 'koniaga', 'koniga', 'konimeter', 'koninckite', 'konini', 'koniology', 'koniscope', 'konjak', 'konkani', 'konomihu', 'konrad', 'konstantin', 'konstantinos', 'kontakion', 'konyak', 'kooka', 'kookaburra', 'kookeree', 'kookery', 'kookri', 'koolah', 'kooletah', 'kooliman', 'koolokamba', 'koolooly', 'koombar', 'koomkie', 'koorg', 'kootcha', 'kootenay', 'kop', 'kopagmiut', 'kopeck', 'koph', 'kopi', 'koppa', 'koppen', 'koppite', 'koprino', 'kor', 'kora', 'kora', 'koradji', 'korah', 'korahite', 'korahitic', 'korait', 'korakan', 'koran', 'korana', 'koranic', 'koranist', 'korari', 'kore', 'kore', 'korean', 'korec', 'koreci', 'koreish', 'koreishite', 'korero', 'koreshan', 'koreshanity', 'kori', 'korimako', 'korin', 'kornephorus', 'kornerupine', 'kornskeppa', 'kornskeppur', 'korntonde', 'korntonder', 'korntunna', 'korntunnur', 'koroa', 'koromika', 'koromiko', 'korona', 'korova', 'korrel', 'korrigum', 'korumburra', 'koruna', 'korwa', 'kory', 'koryak', 'korymboi', 'korymbos', 'korzec', 'kos', 'kosalan', 'koschei', 'kosher', 'kosimo', 'kosin', 'kosmokrator', 'koso', 'kosong', 'kosotoxin', 'kossaean', 'kossean', 'kosteletzkya', 'koswite', 'kota', 'kotal', 'kotar', 'koto', 'kotoko', 'kotschubeite', 'kottigite', 'kotuku', 'kotukutuku', 'kotwal', 'kotwalee', 'kotyle', 'kotylos', 'kou', 'koulan', 'koungmiut', 'kouza', 'kovil', 'kowagmiut', 'kowhai', 'kowtow', 'koyan', 'kozo', 'kpuesi', 'kra', 'kra', 'kraal', 'kraft', 'krag', 'kragerite', 'krageroite', 'krait', 'kraken', 'krakowiak', 'kral', 'krama', 'krama', 'krameria', 'krameriaceae', 'krameriaceous', 'kran', 'krantzite', 'krapina', 'kras', 'krasis', 'kratogen', 'kratogenic', 'kraunhia', 'kraurite', 'kraurosis', 'kraurotic', 'krausen', 'krausite', 'kraut', 'kreis', 'kreistag', 'kreistle', 'kreittonite', 'krelos', 'kremersite', 'kremlin', 'krems', 'kreng', 'krennerite', 'krepi', 'kreplech', 'kreutzer', 'kriegspiel', 'krieker', 'krigia', 'krimmer', 'krina', 'kriophoros', 'kris', 'krishna', 'krishnaism', 'krishnaist', 'krishnaite', 'krishnaitic', 'kristen', 'kristi', 'kristian', 'kristin', 'kristinaux', 'krisuvigite', 'kritarchy', 'krithia', 'kriton', 'kritrima', 'krobyloi', 'krobylos', 'krocket', 'krohnkite', 'krome', 'kromeski', 'kromogram', 'kromskop', 'krona', 'krone', 'kronen', 'kroner', 'kronion', 'kronor', 'kronur', 'kroo', 'kroon', 'krosa', 'krouchka', 'kroushka', 'kru', 'krugerism', 'krugerite', 'kruman', 'krummhorn', 'kryokonite', 'krypsis', 'kryptic', 'krypticism', 'kryptocyanine', 'kryptol', 'kryptomere', 'krypton', 'krzysztof', 'kshatriya', 'kshatriyahood', 'kua', 'kuan', 'kuan', 'kuar', 'kuba', 'kuba', 'kubachi', 'kubanka', 'kubba', 'kubera', 'kubuklion', 'kuchean', 'kuchen', 'kudize', 'kudos', 'kudrun', 'kudu', 'kudzu', 'kuehneola', 'kuei', 'kufic', 'kuge', 'kugel', 'kuhnia', 'kui', 'kuichua', 'kuki', 'kukoline', 'kukri', 'kuku', 'kukui', 'kukulcan', 'kukupa', 'kukuruku', 'kula', 'kulack', 'kulah', 'kulah', 'kulaite', 'kulak', 'kulakism', 'kulanapan', 'kulang', 'kuldip', 'kuli', 'kulimit', 'kulkarni', 'kullaite', 'kullani', 'kulm', 'kulmet', 'kulturkampf', 'kulturkreis', 'kuman', 'kumbi', 'kumhar', 'kumiss', 'kummel', 'kumni', 'kumquat', 'kumrah', 'kumyk', 'kunai', 'kunbi', 'kundry', 'kuneste', 'kung', 'kunk', 'kunkur', 'kunmiut', 'kunzite', 'kuomintang', 'kupfernickel', 'kupfferite', 'kuphar', 'kupper', 'kuranko', 'kurbash', 'kurchatovium', 'kurchicine', 'kurchine', 'kurd', 'kurdish', 'kurdistan', 'kurgan', 'kuri', 'kurilian', 'kurku', 'kurmburra', 'kurmi', 'kuroshio', 'kurrajong', 'kurt', 'kurtosis', 'kuruba', 'kurukh', 'kuruma', 'kurumaya', 'kurumba', 'kurung', 'kurus', 'kurvey', 'kurveyor', 'kusa', 'kusam', 'kusan', 'kusha', 'kushshu', 'kusimansel', 'kuskite', 'kuskos', 'kuskus', 'kuskwogmiut', 'kustenau', 'kusti', 'kusum', 'kusum', 'kutcha', 'kutchin', 'kutenai', 'kuttab', 'kuttar', 'kuttaur', 'kuvasz', 'kuvera', 'kvass', 'kvint', 'kvinter', 'kwakiutl', 'kwamme', 'kwan', 'kwannon', 'kwapa', 'kwarta', 'kwarterka', 'kwazoku', 'kyack', 'kyah', 'kyar', 'kyat', 'kyaung', 'kybele', 'kyklopes', 'kyklops', 'kyl', 'kyle', 'kyle', 'kylite', 'kylix', 'kylo', 'kymation', 'kymatology', 'kymbalon', 'kymogram', 'kymograph', 'kymographic', 'kynurenic', 'kynurine', 'kyphoscoliosis', 'kyphoscoliotic', 'kyphosidae', 'kyphosis', 'kyphotic', 'kyrie', 'kyrine', 'kyschtymite', 'kyte', 'kyu', 'kyung', 'kyurin', 'kyurinish'];
| 13,076.5
| 26,152
| 0.651015
| 2,282
| 26,153
| 7.460999
| 0.982033
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.087218
| 26,153
| 1
| 26,153
| 26,153
| 0.713221
| 0
| 0
| 0
| 0
| 0
| 0.650786
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
60e6008a0ea47071e572e2098c826d44ff862e6c
| 171
|
py
|
Python
|
mod_kvm/forms.py
|
T1duS/ccextractor-web
|
73e704640d13c9b5d84ae2e8bc5bdcf352caaa75
|
[
"MIT"
] | 19
|
2018-05-18T13:55:54.000Z
|
2019-10-26T10:08:45.000Z
|
mod_kvm/forms.py
|
T1duS/ccextractor-web
|
73e704640d13c9b5d84ae2e8bc5bdcf352caaa75
|
[
"MIT"
] | 23
|
2018-06-04T07:10:15.000Z
|
2019-10-27T18:45:21.000Z
|
mod_kvm/forms.py
|
T1duS/ccextractor-web
|
73e704640d13c9b5d84ae2e8bc5bdcf352caaa75
|
[
"MIT"
] | 21
|
2018-07-07T07:54:12.000Z
|
2020-11-24T14:35:27.000Z
|
"""
ccextractor-web | forms.py
Author : Saurabh Shrivastava
Email : saurabh.shrivastava54+ccextractorweb[at]gmail.com
Link : https://github.com/saurabhshri
"""
| 19
| 60
| 0.719298
| 19
| 171
| 6.473684
| 0.894737
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.013793
| 0.152047
| 171
| 8
| 61
| 21.375
| 0.834483
| 0.94152
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
60eab9810e9971ebc6e1640abf5bd26c0f7d8b50
| 54
|
py
|
Python
|
wflow-py/wflow/bmimodel.py
|
edwinkost/wflow
|
ee9291d4b556d7b61f7f13bcb972774be9a16cec
|
[
"MIT"
] | null | null | null |
wflow-py/wflow/bmimodel.py
|
edwinkost/wflow
|
ee9291d4b556d7b61f7f13bcb972774be9a16cec
|
[
"MIT"
] | null | null | null |
wflow-py/wflow/bmimodel.py
|
edwinkost/wflow
|
ee9291d4b556d7b61f7f13bcb972774be9a16cec
|
[
"MIT"
] | null | null | null |
# Simple script to link to python wflow bmi models
| 10.8
| 50
| 0.740741
| 9
| 54
| 4.444444
| 0.888889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.240741
| 54
| 4
| 51
| 13.5
| 0.97561
| 0.888889
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
60fc4076cf8a1b43ce9adc265eba2909a91456bf
| 33
|
py
|
Python
|
ox_herd/core/__init__.py
|
empower-capital/ox_herd
|
2aa77db945296c152dc8d420f42a6d6455d514fa
|
[
"BSD-2-Clause"
] | 1
|
2021-11-28T20:35:31.000Z
|
2021-11-28T20:35:31.000Z
|
ox_herd/core/__init__.py
|
empower-capital/ox_herd
|
2aa77db945296c152dc8d420f42a6d6455d514fa
|
[
"BSD-2-Clause"
] | 5
|
2017-11-21T00:21:13.000Z
|
2021-06-30T19:47:54.000Z
|
ox_herd/core/__init__.py
|
empower-capital/ox_herd
|
2aa77db945296c152dc8d420f42a6d6455d514fa
|
[
"BSD-2-Clause"
] | 4
|
2021-12-17T10:58:15.000Z
|
2021-12-23T14:38:40.000Z
|
"""Core modules for ox_herd.
"""
| 11
| 28
| 0.636364
| 5
| 33
| 4
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.151515
| 33
| 2
| 29
| 16.5
| 0.714286
| 0.757576
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
71cd1afdacfa6b319174fec97a3ff7763260d03b
| 141
|
py
|
Python
|
annotateapp/apps.py
|
joerodd/POnSS
|
8f0317100dcd3515f24e321747e9e266760b19f0
|
[
"MIT"
] | 1
|
2020-09-02T00:57:02.000Z
|
2020-09-02T00:57:02.000Z
|
annotateapp/apps.py
|
joerodd/POnSS
|
8f0317100dcd3515f24e321747e9e266760b19f0
|
[
"MIT"
] | null | null | null |
annotateapp/apps.py
|
joerodd/POnSS
|
8f0317100dcd3515f24e321747e9e266760b19f0
|
[
"MIT"
] | 1
|
2021-04-01T17:39:28.000Z
|
2021-04-01T17:39:28.000Z
|
from __future__ import unicode_literals
from django.apps import AppConfig
class AnnotateApp2Config(AppConfig):
name = 'annotate_app2'
| 17.625
| 39
| 0.808511
| 16
| 141
| 6.75
| 0.8125
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.016529
| 0.141844
| 141
| 7
| 40
| 20.142857
| 0.876033
| 0
| 0
| 0
| 0
| 0
| 0.092199
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.5
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
e0973d9a0b11ebb30d69d34e6189a759ca72c96e
| 251
|
py
|
Python
|
hardware/opentrons_hardware/firmware_bindings/message.py
|
anuwrag/opentrons
|
28c8d76a19e367c6bd38f5290faaa32abf378715
|
[
"Apache-2.0"
] | 235
|
2017-10-27T20:37:27.000Z
|
2022-03-30T14:09:49.000Z
|
hardware/opentrons_hardware/firmware_bindings/message.py
|
anuwrag/opentrons
|
28c8d76a19e367c6bd38f5290faaa32abf378715
|
[
"Apache-2.0"
] | 8,425
|
2017-10-26T15:25:43.000Z
|
2022-03-31T23:54:26.000Z
|
hardware/opentrons_hardware/firmware_bindings/message.py
|
anuwrag/opentrons
|
28c8d76a19e367c6bd38f5290faaa32abf378715
|
[
"Apache-2.0"
] | 130
|
2017-11-09T21:02:37.000Z
|
2022-03-15T18:01:24.000Z
|
"""Can message."""
from __future__ import annotations
from dataclasses import dataclass
from .arbitration_id import ArbitrationId
@dataclass(frozen=True)
class CanMessage:
"""A can message."""
arbitration_id: ArbitrationId
data: bytes
| 17.928571
| 41
| 0.752988
| 28
| 251
| 6.535714
| 0.642857
| 0.10929
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.159363
| 251
| 13
| 42
| 19.307692
| 0.867299
| 0.10757
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.428571
| 0
| 0.857143
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
e0c26560e36d59fb0f99a2f1310d2d64b6afc1bb
| 102
|
py
|
Python
|
Codewars/8kyu/find-the-position/Python/solution1.py
|
RevansChen/online-judge
|
ad1b07fee7bd3c49418becccda904e17505f3018
|
[
"MIT"
] | 7
|
2017-09-20T16:40:39.000Z
|
2021-08-31T18:15:08.000Z
|
Codewars/8kyu/find-the-position/Python/solution1.py
|
RevansChen/online-judge
|
ad1b07fee7bd3c49418becccda904e17505f3018
|
[
"MIT"
] | null | null | null |
Codewars/8kyu/find-the-position/Python/solution1.py
|
RevansChen/online-judge
|
ad1b07fee7bd3c49418becccda904e17505f3018
|
[
"MIT"
] | null | null | null |
# Python - 3.6.0
position = lambda alphabet: f'Position of alphabet: {ord(alphabet) - ord("a") + 1}'
| 25.5
| 83
| 0.647059
| 16
| 102
| 4.125
| 0.75
| 0.333333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.047059
| 0.166667
| 102
| 3
| 84
| 34
| 0.729412
| 0.137255
| 0
| 0
| 0
| 0
| 0.604651
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
e0c392565b189a84b1cb5ab883a9dc67a421970c
| 15,753
|
py
|
Python
|
tensorflow_quantum/core/ops/noise/noisy_sampled_expectation_op_test.py
|
quantummind/quantum
|
fd952d0362c5445eef0da4437fb3e5ebb16b7948
|
[
"Apache-2.0"
] | 2
|
2021-09-24T09:41:47.000Z
|
2021-10-04T20:55:09.000Z
|
tensorflow_quantum/core/ops/noise/noisy_sampled_expectation_op_test.py
|
quantummind/quantum
|
fd952d0362c5445eef0da4437fb3e5ebb16b7948
|
[
"Apache-2.0"
] | 1
|
2021-11-15T04:47:04.000Z
|
2021-11-15T04:47:04.000Z
|
tensorflow_quantum/core/ops/noise/noisy_sampled_expectation_op_test.py
|
quantummind/quantum
|
fd952d0362c5445eef0da4437fb3e5ebb16b7948
|
[
"Apache-2.0"
] | 1
|
2021-05-10T09:12:40.000Z
|
2021-05-10T09:12:40.000Z
|
# Copyright 2020 The TensorFlow Quantum Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests that specifically target noisy expectation calculation."""
import numpy as np
from absl.testing import parameterized
import tensorflow as tf
import cirq
from tensorflow_quantum.core.ops import batch_util
from tensorflow_quantum.core.ops.noise import noisy_sampled_expectation_op
from tensorflow_quantum.python import util
class NoisyExpectationCalculationTest(tf.test.TestCase, parameterized.TestCase):
"""Tests tfq.noise.expectation."""
def test_noisy_expectation_inputs(self):
"""Make sure noisy expectation op fails gracefully on bad inputs."""
n_qubits = 5
batch_size = 5
symbol_names = ['alpha']
qubits = cirq.GridQubit.rect(1, n_qubits)
circuit_batch, resolver_batch = \
util.random_symbol_circuit_resolver_batch(
qubits, symbol_names, batch_size, include_channels=True)
symbol_values_array = np.array(
[[resolver[symbol]
for symbol in symbol_names]
for resolver in resolver_batch])
pauli_sums = util.random_pauli_sums(qubits, 3, batch_size)
num_samples = [[10]] * batch_size
with self.assertRaisesRegex(tf.errors.InvalidArgumentError,
'programs must be rank 1'):
# Circuit tensor has too many dimensions.
noisy_sampled_expectation_op.sampled_expectation(
util.convert_to_tensor([circuit_batch]), symbol_names,
symbol_values_array,
util.convert_to_tensor([[x] for x in pauli_sums]), num_samples)
with self.assertRaisesRegex(tf.errors.InvalidArgumentError,
'symbol_names must be rank 1.'):
# symbol_names tensor has too many dimensions.
noisy_sampled_expectation_op.sampled_expectation(
util.convert_to_tensor(circuit_batch), np.array([symbol_names]),
symbol_values_array,
util.convert_to_tensor([[x] for x in pauli_sums]), num_samples)
with self.assertRaisesRegex(tf.errors.InvalidArgumentError,
'symbol_values must be rank 2.'):
# symbol_values_array tensor has too many dimensions.
noisy_sampled_expectation_op.sampled_expectation(
util.convert_to_tensor(circuit_batch), symbol_names,
np.array([symbol_values_array]),
util.convert_to_tensor([[x] for x in pauli_sums]), num_samples)
with self.assertRaisesRegex(tf.errors.InvalidArgumentError,
'symbol_values must be rank 2.'):
# symbol_values_array tensor has too few dimensions.
noisy_sampled_expectation_op.sampled_expectation(
util.convert_to_tensor(circuit_batch), symbol_names,
symbol_values_array[0],
util.convert_to_tensor([[x] for x in pauli_sums]), num_samples)
with self.assertRaisesRegex(tf.errors.InvalidArgumentError,
'pauli_sums must be rank 2.'):
# pauli_sums tensor has too few dimensions.
noisy_sampled_expectation_op.sampled_expectation(
util.convert_to_tensor(circuit_batch),
symbol_names, symbol_values_array,
util.convert_to_tensor(list(pauli_sums)), num_samples)
with self.assertRaisesRegex(tf.errors.InvalidArgumentError,
'pauli_sums must be rank 2.'):
# pauli_sums tensor has too many dimensions.
noisy_sampled_expectation_op.sampled_expectation(
util.convert_to_tensor(circuit_batch), symbol_names,
symbol_values_array,
[util.convert_to_tensor([[x] for x in pauli_sums])],
num_samples)
with self.assertRaisesRegex(tf.errors.InvalidArgumentError,
'num_samples must be rank 2'):
# num_samples tensor has the wrong shape.
noisy_sampled_expectation_op.sampled_expectation(
util.convert_to_tensor(circuit_batch), symbol_names,
symbol_values_array,
util.convert_to_tensor([[x] for x in pauli_sums]),
[num_samples])
with self.assertRaisesRegex(tf.errors.InvalidArgumentError,
'num_samples must be rank 2'):
# num_samples tensor has the wrong shape.
noisy_sampled_expectation_op.sampled_expectation(
util.convert_to_tensor(circuit_batch), symbol_names,
symbol_values_array,
util.convert_to_tensor([[x] for x in pauli_sums]),
num_samples[0])
with self.assertRaisesRegex(tf.errors.InvalidArgumentError,
'Unparseable proto'):
# circuit tensor has the right type but invalid values.
noisy_sampled_expectation_op.sampled_expectation(
['junk'] * batch_size, symbol_names, symbol_values_array,
util.convert_to_tensor([[x] for x in pauli_sums]), num_samples)
with self.assertRaisesRegex(tf.errors.InvalidArgumentError,
'Could not find symbol in parameter map'):
# symbol_names tensor has the right type but invalid values.
noisy_sampled_expectation_op.sampled_expectation(
util.convert_to_tensor(circuit_batch), ['junk'],
symbol_values_array,
util.convert_to_tensor([[x] for x in pauli_sums]), num_samples)
with self.assertRaisesRegex(tf.errors.InvalidArgumentError,
'qubits not found in circuit'):
# pauli_sums tensor has the right type but invalid values.
new_qubits = [cirq.GridQubit(5, 5), cirq.GridQubit(9, 9)]
new_pauli_sums = util.random_pauli_sums(new_qubits, 2, batch_size)
noisy_sampled_expectation_op.sampled_expectation(
util.convert_to_tensor(circuit_batch), symbol_names,
symbol_values_array,
util.convert_to_tensor([[x] for x in new_pauli_sums]),
num_samples)
with self.assertRaisesRegex(tf.errors.InvalidArgumentError,
'Unparseable proto'):
# pauli_sums tensor has the right type but invalid values 2.
noisy_sampled_expectation_op.sampled_expectation(
util.convert_to_tensor(circuit_batch), symbol_names,
symbol_values_array, [['junk']] * batch_size, num_samples)
with self.assertRaisesRegex(TypeError, 'Cannot convert'):
# circuits tensor has the wrong type.
noisy_sampled_expectation_op.sampled_expectation(
[1.0] * batch_size, symbol_names, symbol_values_array,
util.convert_to_tensor([[x] for x in pauli_sums]), num_samples)
with self.assertRaisesRegex(TypeError, 'Cannot convert'):
# symbol_names tensor has the wrong type.
noisy_sampled_expectation_op.sampled_expectation(
util.convert_to_tensor(circuit_batch), [0.1234],
symbol_values_array,
util.convert_to_tensor([[x] for x in pauli_sums]), num_samples)
with self.assertRaisesRegex(tf.errors.UnimplementedError, ''):
# symbol_values tensor has the wrong type.
noisy_sampled_expectation_op.sampled_expectation(
util.convert_to_tensor(circuit_batch), symbol_names,
[['junk']] * batch_size,
util.convert_to_tensor([[x] for x in pauli_sums]), num_samples)
with self.assertRaisesRegex(TypeError, 'Cannot convert'):
# pauli_sums tensor has the wrong type.
noisy_sampled_expectation_op.sampled_expectation(
util.convert_to_tensor(circuit_batch), symbol_names,
symbol_values_array, [[1.0]] * batch_size, num_samples)
with self.assertRaisesRegex(TypeError, 'missing'):
# we are missing an argument.
# pylint: disable=no-value-for-parameter
noisy_sampled_expectation_op.sampled_expectation(
util.convert_to_tensor(circuit_batch), symbol_names,
symbol_values_array, num_samples)
# pylint: enable=no-value-for-parameter
with self.assertRaisesRegex(TypeError, 'positional arguments'):
# pylint: disable=too-many-function-args
noisy_sampled_expectation_op.sampled_expectation(
util.convert_to_tensor(circuit_batch), symbol_names,
symbol_values_array,
util.convert_to_tensor([[x] for x in pauli_sums]), [],
num_samples)
# pylint: enable=too-many-function-args
with self.assertRaisesRegex(tf.errors.InvalidArgumentError,
expected_regex='do not match'):
# wrong op size.
noisy_sampled_expectation_op.sampled_expectation(
util.convert_to_tensor([cirq.Circuit()]), symbol_names,
symbol_values_array.astype(np.float64),
util.convert_to_tensor([[x] for x in pauli_sums]), num_samples)
with self.assertRaisesRegex(tf.errors.InvalidArgumentError,
'greater than 0'):
# pylint: disable=too-many-function-args
noisy_sampled_expectation_op.sampled_expectation(
util.convert_to_tensor(circuit_batch), symbol_names,
symbol_values_array,
util.convert_to_tensor([[x] for x in pauli_sums]),
[[-1]] * batch_size)
# pylint: enable=too-many-function-args
with self.assertRaisesRegex(tf.errors.InvalidArgumentError,
expected_regex='do not match'):
# wrong symbol_values size.
noisy_sampled_expectation_op.sampled_expectation(
util.convert_to_tensor(circuit_batch), symbol_names,
symbol_values_array[:int(batch_size * 0.5)],
util.convert_to_tensor([[x] for x in pauli_sums]), num_samples)
@parameterized.parameters([
{
'n_qubits': 13,
'batch_size': 1,
'noisy': False
}, # ComputeLarge.
{
'n_qubits': 6,
'batch_size': 25,
'noisy': False
}, # ComputeSmall.
{
'n_qubits': 6,
'batch_size': 10,
'noisy': True
}, # ComputeSmall.
{
'n_qubits': 8,
'batch_size': 1,
'noisy': True
} # ComputeLarge.
])
def test_simulate_consistency(self, batch_size, n_qubits, noisy):
"""Test consistency with batch_util.py simulation."""
symbol_names = ['alpha', 'beta']
qubits = cirq.GridQubit.rect(1, n_qubits)
circuit_batch, resolver_batch = \
util.random_symbol_circuit_resolver_batch(
qubits, symbol_names, batch_size, include_channels=noisy)
symbol_values_array = np.array(
[[resolver[symbol]
for symbol in symbol_names]
for resolver in resolver_batch])
pauli_sums1 = util.random_pauli_sums(qubits, 3, batch_size)
pauli_sums2 = util.random_pauli_sums(qubits, 3, batch_size)
batch_pauli_sums = [[x, y] for x, y in zip(pauli_sums1, pauli_sums2)]
num_samples = [[10000] * 2] * batch_size
op_exps = noisy_sampled_expectation_op.sampled_expectation(
util.convert_to_tensor(circuit_batch),
symbol_names, symbol_values_array,
util.convert_to_tensor(batch_pauli_sums), num_samples)
cirq_exps = batch_util.batch_calculate_expectation(
circuit_batch, resolver_batch, batch_pauli_sums,
cirq.DensityMatrixSimulator() if noisy else cirq.Simulator())
tol = 0.5
self.assertAllClose(cirq_exps, op_exps, atol=tol, rtol=tol)
@parameterized.parameters([{
'channel': x
} for x in util.get_supported_channels()])
def test_single_channel(self, channel):
"""Individually test adding just a single channel type to circuits."""
symbol_names = []
batch_size = 5
n_qubits = 6
qubits = cirq.GridQubit.rect(1, n_qubits)
circuit_batch, resolver_batch = \
util.random_circuit_resolver_batch(
qubits, batch_size, include_channels=False)
for i in range(batch_size):
circuit_batch[i] = circuit_batch[i] + channel.on_each(*qubits)
symbol_values_array = np.array(
[[resolver[symbol]
for symbol in symbol_names]
for resolver in resolver_batch])
pauli_sums1 = util.random_pauli_sums(qubits, 3, batch_size)
pauli_sums2 = util.random_pauli_sums(qubits, 3, batch_size)
batch_pauli_sums = [[x, y] for x, y in zip(pauli_sums1, pauli_sums2)]
num_samples = [[20000] * 2] * batch_size
op_exps = noisy_sampled_expectation_op.sampled_expectation(
util.convert_to_tensor(circuit_batch),
symbol_names, symbol_values_array,
util.convert_to_tensor(batch_pauli_sums), num_samples)
cirq_exps = batch_util.batch_calculate_expectation(
circuit_batch, resolver_batch, batch_pauli_sums,
cirq.DensityMatrixSimulator())
self.assertAllClose(cirq_exps, op_exps, atol=0.35, rtol=0.35)
def test_correctness_empty(self):
"""Test the expectation for empty circuits."""
empty_circuit = util.convert_to_tensor([cirq.Circuit()])
empty_symbols = tf.convert_to_tensor([], dtype=tf.dtypes.string)
empty_values = tf.convert_to_tensor([[]])
empty_paulis = tf.convert_to_tensor([[]], dtype=tf.dtypes.string)
empty_n_samples = tf.convert_to_tensor([[]], dtype=tf.int32)
out = noisy_sampled_expectation_op.sampled_expectation(
empty_circuit, empty_symbols, empty_values, empty_paulis,
empty_n_samples)
expected = np.array([[]], dtype=np.complex64)
self.assertAllClose(out, expected)
def test_correctness_no_circuit(self):
"""Test the correctness with the empty tensor."""
empty_circuit = tf.raw_ops.Empty(shape=(0,), dtype=tf.string)
empty_symbols = tf.raw_ops.Empty(shape=(0,), dtype=tf.string)
empty_values = tf.raw_ops.Empty(shape=(0, 0), dtype=tf.float32)
empty_paulis = tf.raw_ops.Empty(shape=(0, 0), dtype=tf.string)
empty_n_samples = tf.raw_ops.Empty(shape=(0, 0), dtype=tf.int32)
out = noisy_sampled_expectation_op.sampled_expectation(
empty_circuit, empty_symbols, empty_values, empty_paulis,
empty_n_samples)
self.assertShapeEqual(np.zeros((0, 0)), out)
if __name__ == "__main__":
tf.test.main()
| 46.606509
| 80
| 0.629213
| 1,798
| 15,753
| 5.214127
| 0.13515
| 0.09792
| 0.0736
| 0.08512
| 0.74592
| 0.727147
| 0.711893
| 0.704213
| 0.67872
| 0.66208
| 0
| 0.009739
| 0.282994
| 15,753
| 337
| 81
| 46.744807
| 0.820274
| 0.134197
| 0
| 0.599174
| 0
| 0
| 0.041
| 0
| 0
| 0
| 0
| 0
| 0.103306
| 1
| 0.020661
| false
| 0
| 0.028926
| 0
| 0.053719
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
e0f29fdf785e76fa0c82155c03ab6a73bc274610
| 7,530
|
py
|
Python
|
followers/tests/test_views.py
|
HanSaloZu/drf-blog-api
|
966776e59ed7699a9e94aeb85fdd6785c2532a3a
|
[
"MIT"
] | null | null | null |
followers/tests/test_views.py
|
HanSaloZu/drf-blog-api
|
966776e59ed7699a9e94aeb85fdd6785c2532a3a
|
[
"MIT"
] | null | null | null |
followers/tests/test_views.py
|
HanSaloZu/drf-blog-api
|
966776e59ed7699a9e94aeb85fdd6785c2532a3a
|
[
"MIT"
] | null | null | null |
from urllib.parse import urlencode
from django.urls import reverse
from utils.tests import APIViewTestCase, ListAPIViewTestCase
from ..services import follow, is_following
class FollowersListAPIViewTestCase(ListAPIViewTestCase):
def url(self, parameters={}):
url = reverse("followers_list")
if parameters:
url += "?" + urlencode(parameters)
return url
def setUp(self):
self.first_user = self.UserModel.objects.create_user(
login="FirstUser", email="first@gmail.com", password="pass")
auth_credentials = self.generate_jwt_auth_credentials(self.first_user)
self.client.credentials(HTTP_AUTHORIZATION=auth_credentials)
self.second_user = self.UserModel.objects.create_user(
login="SecondUser", email="second@gmail.com", password="pass")
self.third_user = self.UserModel.objects.create_user(
login="ThirdUser", email="third@gmail.com", password="pass")
follow(self.second_user, self.first_user)
follow(self.third_user, self.first_user)
def test_request_by_unauthenticated_client(self):
self.client.credentials()
response = self.client.get(self.url())
self.unauthorized_client_error_response_test(response)
def test_followers_list(self):
response = self.client.get(self.url())
self.check_common_details_of_list_view_response(
response,
total_items=2,
page_size=2
)
def test_followers_list_with_q_parameter(self):
response = self.client.get(self.url({"q": "SecondUser"}))
self.check_common_details_of_list_view_response(
response,
total_items=1,
page_size=1
)
self.assertEqual(response.data["items"]
[0]["id"], self.second_user.id)
class FollowingListAPIViewTestCase(ListAPIViewTestCase):
url = reverse("following_list")
def setUp(self):
self.first_user = self.UserModel.objects.create_user(
login="FirstUser", email="first@gmail.com", password="pass")
auth_credentials = self.generate_jwt_auth_credentials(self.first_user)
self.client.credentials(HTTP_AUTHORIZATION=auth_credentials)
self.second_user = self.UserModel.objects.create_user(
login="SecondUser", email="second@gmail.com", password="pass")
follow(self.first_user, self.second_user)
def test_request_by_unauthenticated_client(self):
self.client.credentials()
response = self.client.get(self.url)
self.unauthorized_client_error_response_test(response)
def test_following_list(self):
response = self.client.get(self.url)
self.check_common_details_of_list_view_response(
response,
total_items=1,
page_size=1
)
self.assertEqual(response.data["items"]
[0]["id"], self.second_user.id)
class FollowingAPIViewTestCase(APIViewTestCase):
def url(self, kwargs):
return reverse("following", kwargs=kwargs)
def setUp(self):
self.first_user = self.UserModel.objects.create_user(
login="FirstUser", email="first_user_@gmail.com", password="pass")
auth_credentials = self.generate_jwt_auth_credentials(self.first_user)
self.client.credentials(HTTP_AUTHORIZATION=auth_credentials)
self.second_user = self.UserModel.objects.create_user(
login="SecondUser", email="second_user_@gmail.com", password="pass")
def test_request_by_unauthenticated_client(self):
self.client.credentials()
response = self.client.get(self.url({"login": self.second_user.login}))
self.unauthorized_client_error_response_test(response)
def test_follow(self):
"""
A valid follow request should return isFollowed: True
"""
response = self.client.put(self.url({"login": self.second_user.login}))
self.assertEqual(response.status_code,
self.http_status.HTTP_200_OK)
self.assertIs(is_following(self.first_user, self.second_user), True)
self.assertIs(response.data["isFollowed"], True)
def test_self_follow(self):
"""
Self follow should return a 400 error
"""
response = self.client.put(self.url({"login": self.first_user.login}))
self.client_error_response_test(
response,
messages=["You cannot follow yourself"]
)
self.assertIs(is_following(self.first_user, self.first_user), False)
def test_double_follow(self):
"""
Duplicate follow should return isFollowed: True
"""
follow(self.first_user, self.second_user)
response = self.client.put(self.url({"login": self.second_user.login}))
self.assertEqual(response.status_code,
self.http_status.HTTP_200_OK)
self.assertIs(is_following(self.first_user, self.second_user), True)
self.assertIs(response.data["isFollowed"], True)
def test_follow_with_invalid_login(self):
"""
A follow request with an invalid login should return a 404 error
"""
response = self.client.put(self.url({"login": "login"}))
self.client_error_response_test(
response,
code="notFound",
status=self.http_status.HTTP_404_NOT_FOUND,
messages=["Invalid login, user is not found"]
)
def test_unfollow(self):
"""
A valid unfollow request should return isFollowed: False
"""
follow(self.first_user, self.second_user)
response = self.client.delete(
self.url({"login": self.second_user.login}))
self.assertEqual(response.status_code,
self.http_status.HTTP_200_OK)
self.assertIs(is_following(self.first_user, self.second_user), False)
self.assertIs(response.data["isFollowed"], False)
def test_unfollow_not_followed_user(self):
"""
Unfollowing from an unfollowed user should return a 404 error
"""
response = self.client.delete(
self.url({"login": self.second_user.login}))
self.client_error_response_test(
response,
code="notFound",
status=self.http_status.HTTP_404_NOT_FOUND,
messages=["You are not yet followed this user"]
)
self.assertIs(is_following(self.first_user, self.second_user), False)
def test_is_following(self):
"""
If the user is being followed, isFollowed should be True,
otherwise False
"""
response = self.client.get(self.url({"login": self.second_user.login}))
self.assertEqual(response.status_code, self.http_status.HTTP_200_OK)
self.assertIs(response.data["isFollowed"], False)
follow(self.first_user, self.second_user)
response = self.client.get(self.url({"login": self.second_user.login}))
self.assertEqual(response.status_code, self.http_status.HTTP_200_OK)
self.assertIs(response.data["isFollowed"], True)
def test_is_following_with_invalid_login(self):
response = self.client.get(self.url({"login": "invalid"}))
self.client_error_response_test(
response,
code="notFound",
status=self.http_status.HTTP_404_NOT_FOUND,
messages=["Invalid login, user is not found"]
)
| 35.518868
| 80
| 0.652324
| 876
| 7,530
| 5.377854
| 0.13242
| 0.053067
| 0.062407
| 0.054129
| 0.761622
| 0.755678
| 0.741032
| 0.711951
| 0.66695
| 0.656336
| 0
| 0.00717
| 0.240637
| 7,530
| 211
| 81
| 35.687204
| 0.816719
| 0.052722
| 0
| 0.657143
| 0
| 0
| 0.077122
| 0.006176
| 0
| 0
| 0
| 0
| 0.121429
| 1
| 0.135714
| false
| 0.05
| 0.028571
| 0.007143
| 0.207143
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
e0f7e58b1ba501b8d6abb6593275dfb88668af75
| 7,682
|
py
|
Python
|
ht_web_service/apps/ht/migrations/0004_auto_20180330_1138.py
|
Kit-Angelov/ht-web-service
|
9eabc0696634c2e5eba2c9789cc32f548d84cccb
|
[
"MIT"
] | 1
|
2018-11-09T07:31:41.000Z
|
2018-11-09T07:31:41.000Z
|
ht_web_service/apps/ht/migrations/0004_auto_20180330_1138.py
|
Kit-Angelov/ht-web-service
|
9eabc0696634c2e5eba2c9789cc32f548d84cccb
|
[
"MIT"
] | 5
|
2020-06-05T17:15:40.000Z
|
2021-09-07T23:39:19.000Z
|
ht_web_service/apps/ht/migrations/0004_auto_20180330_1138.py
|
Kit-Angelov/ht-web-service
|
9eabc0696634c2e5eba2c9789cc32f548d84cccb
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.11 on 2018-03-30 08:38
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('ht', '0003_auto_20180329_1022'),
]
operations = [
migrations.AddField(
model_name='feature',
name='prov_doc_FDA',
field=models.CharField(blank=True, max_length=256, null=True, verbose_name='Предоставление документов для подготовки распоряжения ФДА о предоставлении в аренду '),
),
migrations.AlterField(
model_name='feature',
name='cadastral_num_formed',
field=models.CharField(blank=True, max_length=256, null=True, verbose_name='Кадастровый номер образованного объекта недвижимости'),
),
migrations.AlterField(
model_name='feature',
name='cadastral_num_origin_14',
field=models.CharField(blank=True, max_length=256, null=True, verbose_name='Кадастровый номер исходного объекта недвижимости ПМТ 2014'),
),
migrations.AlterField(
model_name='feature',
name='cadastral_num_origin_17',
field=models.CharField(blank=True, max_length=256, null=True, verbose_name='Кадастровый номер исходного объкта недвижимости ПМТ 2017'),
),
migrations.AlterField(
model_name='feature',
name='category_origin',
field=models.CharField(blank=True, max_length=256, null=True, verbose_name='Категория исходного объекта недвижимости'),
),
migrations.AlterField(
model_name='feature',
name='comments',
field=models.CharField(blank=True, max_length=256, null=True, verbose_name='Комментарий'),
),
migrations.AlterField(
model_name='feature',
name='contacts_holder',
field=models.CharField(blank=True, max_length=256, null=True, verbose_name='Контакты правообладателя'),
),
migrations.AlterField(
model_name='feature',
name='district',
field=models.CharField(blank=True, max_length=56, null=True, verbose_name='Район'),
),
migrations.AlterField(
model_name='feature',
name='event',
field=models.CharField(blank=True, max_length=256, null=True, verbose_name='Мероприятие'),
),
migrations.AlterField(
model_name='feature',
name='form_area',
field=models.CharField(blank=True, max_length=256, null=True, verbose_name='Формирование земельных участков (кадастровый учет)'),
),
migrations.AlterField(
model_name='feature',
name='obj_costat',
field=models.CharField(blank=True, max_length=256, null=True, verbose_name='Стоимость объектов недвижимости'),
),
migrations.AlterField(
model_name='feature',
name='obj_type_origin',
field=models.CharField(blank=True, max_length=256, null=True, verbose_name='ВРИ исходного объекта недвижимости'),
),
migrations.AlterField(
model_name='feature',
name='offer_to_holdering',
field=models.CharField(blank=True, max_length=256, null=True, verbose_name='Направление оферты правообладателю'),
),
migrations.AlterField(
model_name='feature',
name='order',
field=models.CharField(blank=True, max_length=256, null=True, verbose_name='№ п/п'),
),
migrations.AlterField(
model_name='feature',
name='origin_area_17',
field=models.CharField(blank=True, max_length=256, null=True, verbose_name='Площадь исходного объекта недвижимости, кв.м. ПМТ 2017'),
),
migrations.AlterField(
model_name='feature',
name='piquetu',
field=models.CharField(blank=True, max_length=256, null=True, verbose_name='Пикет'),
),
migrations.AlterField(
model_name='feature',
name='plot',
field=models.CharField(blank=True, max_length=256, null=True, verbose_name='№ зу ПМТ'),
),
migrations.AlterField(
model_name='feature',
name='pre_doc_transfer_type',
field=models.CharField(blank=True, max_length=256, null=True, verbose_name='Подготовка документов для перевода (отнесения) в категорию земель транспорта и/или изменение (установление) ВРИ'),
),
migrations.AlterField(
model_name='feature',
name='pre_lang_plan',
field=models.CharField(blank=True, max_length=256, null=True, verbose_name='Подготовка межевого плана и передача его на кадастровый учет'),
),
migrations.AlterField(
model_name='feature',
name='provision_doc',
field=models.CharField(blank=True, max_length=256, null=True, verbose_name='Предоставление документов для подготовки распоряжения ФДА об изъятии'),
),
migrations.AlterField(
model_name='feature',
name='requisites_agree_vac',
field=models.CharField(blank=True, max_length=256, null=True, verbose_name='Реквизиты соглашения об изъятии '),
),
migrations.AlterField(
model_name='feature',
name='requisites_assess',
field=models.CharField(blank=True, max_length=256, null=True, verbose_name='Реквизиты отчета об оценке'),
),
migrations.AlterField(
model_name='feature',
name='requisites_dir_vac',
field=models.CharField(blank=True, max_length=256, null=True, verbose_name='Реквизиты распоряжения об изъятии'),
),
migrations.AlterField(
model_name='feature',
name='requisites_lease',
field=models.CharField(blank=True, max_length=256, null=True, verbose_name='Реквизиты распоряжения о предоставлении в аренду'),
),
migrations.AlterField(
model_name='feature',
name='requisites_lease_agree',
field=models.CharField(blank=True, max_length=256, null=True, verbose_name='Реквизиты договора аренды'),
),
migrations.AlterField(
model_name='feature',
name='rights_14',
field=models.CharField(blank=True, max_length=256, null=True, verbose_name='Права ПМТ 2014'),
),
migrations.AlterField(
model_name='feature',
name='rights_17',
field=models.CharField(blank=True, max_length=256, null=True, verbose_name='Права ПМТ 2017'),
),
migrations.AlterField(
model_name='feature',
name='rights_august_14',
field=models.CharField(blank=True, max_length=256, null=True, verbose_name='Право после августа 2014'),
),
migrations.AlterField(
model_name='feature',
name='status_area',
field=models.CharField(blank=True, max_length=256, null=True, verbose_name='Статус участка'),
),
migrations.AlterField(
model_name='feature',
name='vac_area_14',
field=models.CharField(blank=True, max_length=256, null=True, verbose_name='Площадь подлежащая изъятию, кв.м. ПМТ 2014'),
),
migrations.AlterField(
model_name='feature',
name='vac_area_17',
field=models.CharField(blank=True, max_length=256, null=True, verbose_name='Площадь подлежащая изъятию, кв.м. ПМТ 2017'),
),
]
| 44.923977
| 202
| 0.623145
| 815
| 7,682
| 5.70184
| 0.196319
| 0.060039
| 0.106736
| 0.133419
| 0.819238
| 0.819238
| 0.758984
| 0.68711
| 0.591564
| 0.499247
| 0
| 0.030813
| 0.264905
| 7,682
| 170
| 203
| 45.188235
| 0.791394
| 0.008982
| 0
| 0.564417
| 1
| 0
| 0.233114
| 0.014717
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.01227
| 0
| 0.030675
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
46045a2f93ad27b884b5a038fb3cb447429aa6cd
| 92
|
py
|
Python
|
prometheus_flask/__init__.py
|
lishulong16/mokitou
|
96c5af6f86b2786ffda203f023b4a5f11015853f
|
[
"MIT"
] | 1
|
2018-04-25T01:32:15.000Z
|
2018-04-25T01:32:15.000Z
|
prometheus_flask/__init__.py
|
lishulong16/mokitou
|
96c5af6f86b2786ffda203f023b4a5f11015853f
|
[
"MIT"
] | null | null | null |
prometheus_flask/__init__.py
|
lishulong16/mokitou
|
96c5af6f86b2786ffda203f023b4a5f11015853f
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
@contact: lishulong.never@gmail.com
@time: 2018/5/8 上午10:17
"""
| 15.333333
| 35
| 0.597826
| 14
| 92
| 3.928571
| 0.928571
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.1375
| 0.130435
| 92
| 5
| 36
| 18.4
| 0.55
| 0.891304
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
46061ef6e20ced89d91a0e256bf1797c6a0bb0d4
| 109
|
py
|
Python
|
Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/openedx/core/djangoapps/schedules/exceptions.py
|
osoco/better-ways-of-thinking-about-software
|
83e70d23c873509e22362a09a10d3510e10f6992
|
[
"MIT"
] | 3
|
2021-12-15T04:58:18.000Z
|
2022-02-06T12:15:37.000Z
|
Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/openedx/core/djangoapps/schedules/exceptions.py
|
osoco/better-ways-of-thinking-about-software
|
83e70d23c873509e22362a09a10d3510e10f6992
|
[
"MIT"
] | null | null | null |
Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/openedx/core/djangoapps/schedules/exceptions.py
|
osoco/better-ways-of-thinking-about-software
|
83e70d23c873509e22362a09a10d3510e10f6992
|
[
"MIT"
] | 1
|
2019-01-02T14:38:50.000Z
|
2019-01-02T14:38:50.000Z
|
# lint-amnesty, pylint: disable=missing-module-docstring
class CourseUpdateDoesNotExist(Exception):
pass
| 27.25
| 56
| 0.807339
| 11
| 109
| 8
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.100917
| 109
| 3
| 57
| 36.333333
| 0.897959
| 0.495413
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.5
| 0
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 4
|
461a580983356f9d90d59948b5dc6cbbf3373922
| 113
|
py
|
Python
|
decloud/__init__.py
|
CNES/decloud
|
6b06ae98bfe68821b4ebd0e7ba06723809cb9b42
|
[
"Apache-2.0"
] | 8
|
2022-02-25T13:15:07.000Z
|
2022-03-20T18:29:49.000Z
|
decloud/__init__.py
|
CNES/decloud
|
6b06ae98bfe68821b4ebd0e7ba06723809cb9b42
|
[
"Apache-2.0"
] | 1
|
2022-02-25T13:21:33.000Z
|
2022-02-25T13:21:33.000Z
|
decloud/__init__.py
|
CNES/decloud
|
6b06ae98bfe68821b4ebd0e7ba06723809cb9b42
|
[
"Apache-2.0"
] | 1
|
2022-03-31T23:43:12.000Z
|
2022-03-31T23:43:12.000Z
|
"""
Decloud provides all the things to perform experiments with Sentinel-1/2, and targets image de-clouding.
"""
| 28.25
| 104
| 0.761062
| 17
| 113
| 5.058824
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.020619
| 0.141593
| 113
| 3
| 105
| 37.666667
| 0.865979
| 0.920354
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
1ca0f9cc3279f0794201d0d2445a81f22a943387
| 62
|
py
|
Python
|
muttdown/__main__.py
|
guygma/muttdown
|
cd596cfdd976cd719cb16e247c574b2778050f6f
|
[
"ISC"
] | 50
|
2015-07-02T17:55:53.000Z
|
2021-11-20T18:53:55.000Z
|
muttdown/__main__.py
|
guygma/muttdown
|
cd596cfdd976cd719cb16e247c574b2778050f6f
|
[
"ISC"
] | 19
|
2015-06-03T17:08:04.000Z
|
2020-09-16T21:19:56.000Z
|
muttdown/__main__.py
|
guygma/muttdown
|
cd596cfdd976cd719cb16e247c574b2778050f6f
|
[
"ISC"
] | 11
|
2015-06-01T15:27:58.000Z
|
2019-12-03T20:11:00.000Z
|
import sys
from muttdown.main import main
sys.exit(main())
| 8.857143
| 30
| 0.741935
| 10
| 62
| 4.6
| 0.6
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.16129
| 62
| 6
| 31
| 10.333333
| 0.884615
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
1ca3e17a607ab3b8327c02023ef54e15e792ed45
| 91
|
py
|
Python
|
Accessible_Campus-master/Geodjango/firstgis/apps.py
|
zzrose/Campus_Locator
|
9262968165c198c15cffd0b3165c97b26bdafed2
|
[
"Apache-2.0"
] | 1
|
2019-02-25T23:17:29.000Z
|
2019-02-25T23:17:29.000Z
|
Geodjango/firstgis/apps.py
|
Harrymissi/Accessible_Campus
|
e20c14a18809e86e90b4aff528d2966a5b36f416
|
[
"Apache-2.0"
] | null | null | null |
Geodjango/firstgis/apps.py
|
Harrymissi/Accessible_Campus
|
e20c14a18809e86e90b4aff528d2966a5b36f416
|
[
"Apache-2.0"
] | null | null | null |
from django.apps import AppConfig
class FirstgisConfig(AppConfig):
name = 'firstgis'
| 15.166667
| 33
| 0.758242
| 10
| 91
| 6.9
| 0.9
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.164835
| 91
| 5
| 34
| 18.2
| 0.907895
| 0
| 0
| 0
| 0
| 0
| 0.087912
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
1cba67972eae70c38a5f159726a88bc4b37b549e
| 458
|
py
|
Python
|
network/serializers.py
|
pawangeek/PollsChain
|
6059796c671d3250f2cd8bb36171bf54013d176e
|
[
"MIT"
] | null | null | null |
network/serializers.py
|
pawangeek/PollsChain
|
6059796c671d3250f2cd8bb36171bf54013d176e
|
[
"MIT"
] | null | null | null |
network/serializers.py
|
pawangeek/PollsChain
|
6059796c671d3250f2cd8bb36171bf54013d176e
|
[
"MIT"
] | null | null | null |
from rest_framework import serializers
from .models import Transaction, Block, Peer
class TransactionSerializer(serializers.ModelSerializer):
class Meta:
model = Transaction
fields='__all__'
class BlockSerializer(serializers.ModelSerializer):
class Meta:
model=Block
fields='__all__'
class PeerSerializer(serializers.ModelSerializer):
class Meta:
model = Peer
fields = ('name', 'address',)
| 19.913043
| 57
| 0.696507
| 42
| 458
| 7.380952
| 0.47619
| 0.251613
| 0.3
| 0.33871
| 0.387097
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.224891
| 458
| 22
| 58
| 20.818182
| 0.873239
| 0
| 0
| 0.357143
| 0
| 0
| 0.054585
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.142857
| 0
| 0.571429
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 4
|
1cf675b9c4f0372a456cb39d55b19295e1898797
| 276
|
py
|
Python
|
ShopperMiles/vendor/admin_export/urls.py
|
juansahe/shoppy
|
265e6e5d3cfc0bc05df3c793e9b4f5921ce78ae5
|
[
"MIT"
] | null | null | null |
ShopperMiles/vendor/admin_export/urls.py
|
juansahe/shoppy
|
265e6e5d3cfc0bc05df3c793e9b4f5921ce78ae5
|
[
"MIT"
] | null | null | null |
ShopperMiles/vendor/admin_export/urls.py
|
juansahe/shoppy
|
265e6e5d3cfc0bc05df3c793e9b4f5921ce78ae5
|
[
"MIT"
] | null | null | null |
# from django.conf.urls import url, patterns
# from django.contrib.admin.views.decorators import staff_member_required
# from .views import AdminExport
# view = staff_member_required(AdminExport.as_view())
# urlpatterns = [
# url(r'^export/$', view, name="export"),
# ]
| 27.6
| 73
| 0.735507
| 35
| 276
| 5.657143
| 0.6
| 0.10101
| 0.191919
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.130435
| 276
| 9
| 74
| 30.666667
| 0.825
| 0.923913
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 0
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
e81fe97b06635861c4434a4d7f1849781b8f46a5
| 425
|
py
|
Python
|
flex/void.py
|
centergy/flex
|
4fc11d3ad48e4b5016f53256015e3eed2157daae
|
[
"MIT"
] | null | null | null |
flex/void.py
|
centergy/flex
|
4fc11d3ad48e4b5016f53256015e3eed2157daae
|
[
"MIT"
] | null | null | null |
flex/void.py
|
centergy/flex
|
4fc11d3ad48e4b5016f53256015e3eed2157daae
|
[
"MIT"
] | null | null | null |
import builtins
class VoidType(object):
__slots__ = ()
def __new__(cls):
if not hasattr(builtins, 'Void'):
builtins.Void = super(VoidType, cls).__new__(cls)
return builtins.Void
def __len__(self):
return 0
def __bool__(self):
return False
__nonzero__ = __bool__
# def __getnewargs__(self):
# return ()
def __str__(self):
return 'Void'
def __repr__(self):
return 'Void'
__VOID = VoidType()
| 13.709677
| 52
| 0.684706
| 52
| 425
| 4.788462
| 0.461538
| 0.200803
| 0.11245
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.002924
| 0.195294
| 425
| 30
| 53
| 14.166667
| 0.725146
| 0.084706
| 0
| 0.117647
| 0
| 0
| 0.031088
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.294118
| false
| 0
| 0.058824
| 0.235294
| 0.823529
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 4
|
e820d6a0b3d0db228100e64bfb41667b2ef6fb5f
| 1,693
|
py
|
Python
|
tests/test_160-security_policies.py
|
britive/python-api
|
2daa7693f1d4adf03626abd78598e30f62b6e2e6
|
[
"MIT"
] | null | null | null |
tests/test_160-security_policies.py
|
britive/python-api
|
2daa7693f1d4adf03626abd78598e30f62b6e2e6
|
[
"MIT"
] | null | null | null |
tests/test_160-security_policies.py
|
britive/python-api
|
2daa7693f1d4adf03626abd78598e30f62b6e2e6
|
[
"MIT"
] | null | null | null |
from .cache import * # will also import some globals like `britive`
def test_create(cached_security_policy):
assert isinstance(cached_security_policy, dict)
def test_list(cached_security_policy):
policies = britive.security_policies.list()
assert isinstance(policies, list)
assert cached_security_policy['id'] in [p['id'] for p in policies]
def test_get(cached_security_policy):
policy = britive.security_policies.get(security_policy_id=cached_security_policy['id'])
assert isinstance(policy, dict)
def test_disable(cached_security_policy):
response = britive.security_policies.disable(security_policy_id=cached_security_policy['id'])
assert response is None
policy = britive.security_policies.get(security_policy_id=cached_security_policy['id'])
assert policy['status'] == 'Inactive'
def test_enable(cached_security_policy):
response = britive.security_policies.enable(security_policy_id=cached_security_policy['id'])
assert response is None
policy = britive.security_policies.get(security_policy_id=cached_security_policy['id'])
assert policy['status'] == 'Active'
def test_update(cached_security_policy):
response = britive.security_policies.update(
security_policy_id=cached_security_policy['id'],
ips=['2.2.2.2']
)
assert response is None
policy = britive.security_policies.get(security_policy_id=cached_security_policy['id'])
assert policy['conditions'][0]['values'] == ['2.2.2.2']
def test_delete(cached_security_policy):
response = britive.security_policies.delete(security_policy_id=cached_security_policy['id'])
assert response is None
cleanup('security-policy')
| 35.270833
| 97
| 0.763142
| 220
| 1,693
| 5.572727
| 0.186364
| 0.2969
| 0.277325
| 0.161501
| 0.628059
| 0.628059
| 0.628059
| 0.430669
| 0.430669
| 0.430669
| 0
| 0.006118
| 0.131128
| 1,693
| 47
| 98
| 36.021277
| 0.827328
| 0.025989
| 0
| 0.25
| 0
| 0
| 0.055252
| 0
| 0
| 0
| 0
| 0
| 0.34375
| 1
| 0.21875
| false
| 0
| 0.03125
| 0
| 0.25
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
1c1576961bfaf65770fd47940e1dc49e91254e5d
| 209
|
py
|
Python
|
pacote-download/atividades-python/fatorial.py
|
bigmaster112/matheus-python
|
865a5420c9d5f5f38f7bba67aea0841c23896cee
|
[
"MIT"
] | null | null | null |
pacote-download/atividades-python/fatorial.py
|
bigmaster112/matheus-python
|
865a5420c9d5f5f38f7bba67aea0841c23896cee
|
[
"MIT"
] | null | null | null |
pacote-download/atividades-python/fatorial.py
|
bigmaster112/matheus-python
|
865a5420c9d5f5f38f7bba67aea0841c23896cee
|
[
"MIT"
] | null | null | null |
numero = int(input('Digite um numero'))
fatorial = numero
contador = 1
while (numero - contador) > 1:
fatorial = fatorial * (numero-contador)
contador +=1
print('O fatorial de ', numero,'é', fatorial)
| 26.125
| 45
| 0.674641
| 27
| 209
| 5.222222
| 0.481481
| 0.297872
| 0.312057
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.017544
| 0.181818
| 209
| 8
| 45
| 26.125
| 0.807018
| 0
| 0
| 0
| 0
| 0
| 0.147619
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.142857
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
1c264ac43a5f389b625f961b872cdca85ac616a3
| 412
|
py
|
Python
|
pystreamable/exceptions.py
|
jernejovc/pystreamable
|
78e342dd502bedac6214781bf4f4f85f5a444471
|
[
"MIT"
] | 9
|
2017-08-09T19:29:16.000Z
|
2021-11-29T02:50:24.000Z
|
pystreamable/exceptions.py
|
jernejovc/pystreamable
|
78e342dd502bedac6214781bf4f4f85f5a444471
|
[
"MIT"
] | 3
|
2017-10-11T18:41:05.000Z
|
2018-10-22T09:04:12.000Z
|
pystreamable/exceptions.py
|
jernejovc/pystreamable
|
78e342dd502bedac6214781bf4f4f85f5a444471
|
[
"MIT"
] | 3
|
2019-08-25T13:17:25.000Z
|
2021-01-22T18:11:10.000Z
|
from __future__ import print_function
class StreamableApiException(Exception):
"""
Base class for all Streamable API wrapper exceptions.
"""
pass
class StreamableApiServerException(StreamableApiException):
"""
Streamable API server exception.
"""
pass
class StreamableApiClientException(StreamableApiException):
"""
Streamable API client exception.
"""
pass
| 17.913043
| 59
| 0.711165
| 33
| 412
| 8.727273
| 0.606061
| 0.135417
| 0.243056
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.216019
| 412
| 22
| 60
| 18.727273
| 0.891641
| 0.288835
| 0
| 0.428571
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.428571
| 0.142857
| 0
| 0.571429
| 0.142857
| 0
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 0
|
0
| 4
|
1c3a864ae8eabfe2c5b0f38f8dad8915a9ad63fe
| 918
|
py
|
Python
|
fbdplc/wires.py
|
Jmeyer1292/block_diagram_z3
|
b7180d2dedc33ccb86aa3c58c898dd7adb9653fe
|
[
"Apache-2.0"
] | 4
|
2021-09-18T13:32:57.000Z
|
2022-03-15T22:13:56.000Z
|
fbdplc/wires.py
|
Jmeyer1292/block_diagram_z3
|
b7180d2dedc33ccb86aa3c58c898dd7adb9653fe
|
[
"Apache-2.0"
] | null | null | null |
fbdplc/wires.py
|
Jmeyer1292/block_diagram_z3
|
b7180d2dedc33ccb86aa3c58c898dd7adb9653fe
|
[
"Apache-2.0"
] | 2
|
2021-12-06T20:19:04.000Z
|
2022-03-15T22:13:58.000Z
|
'''
Edges in a block diagram computational graph. The edges themselves don't have direction,
but the ports that they attach to may.
'''
class WireConnection:
pass
class NamedConnection(WireConnection):
def __init__(self, target_uid: int, target_port: str):
self.target_uid = target_uid
self.target_port = target_port
def __str__(self):
return f'NamedConnection(id={self.target_uid}, port={self.target_port})'
class IdentConnection(WireConnection):
def __init__(self, target_uid: int):
self.target_uid = target_uid
def __str__(self):
return f'IdentConnection(id={self.target_uid})'
class Wire:
'''
Wires in TIA's S7 XML format can have more than two terminals, but we always decompose them
into a series of two terminal blocks.
'''
def __init__(self, a: WireConnection, b: WireConnection):
self.a = a
self.b = b
| 24.810811
| 95
| 0.686275
| 125
| 918
| 4.784
| 0.464
| 0.133779
| 0.130435
| 0.083612
| 0.254181
| 0.123746
| 0.123746
| 0
| 0
| 0
| 0
| 0.001403
| 0.223312
| 918
| 36
| 96
| 25.5
| 0.837307
| 0.279956
| 0
| 0.235294
| 0
| 0
| 0.156151
| 0.154574
| 0
| 0
| 0
| 0
| 0
| 1
| 0.294118
| false
| 0.058824
| 0
| 0.117647
| 0.647059
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
|
0
| 4
|
1c470f3b148dd13ad815f7979d810003cd90888e
| 1,031
|
py
|
Python
|
tests/datastructures/test_shuffle.py
|
TristenSeth/campy
|
9e726c342d682239e1c19e6f5645c0b2167d7fab
|
[
"MIT"
] | 5
|
2018-12-03T19:18:50.000Z
|
2021-05-31T07:17:06.000Z
|
tests/datastructures/test_shuffle.py
|
TristenSeth/campy
|
9e726c342d682239e1c19e6f5645c0b2167d7fab
|
[
"MIT"
] | 1
|
2017-06-07T04:33:46.000Z
|
2017-06-07T04:33:46.000Z
|
tests/datastructures/test_shuffle.py
|
TristenSeth/campy
|
9e726c342d682239e1c19e6f5645c0b2167d7fab
|
[
"MIT"
] | 1
|
2017-06-06T07:29:07.000Z
|
2017-06-06T07:29:07.000Z
|
"""Tests for the :mod:`campy.datastructures.shuffle` module."""
# These reference shuffled values are being generated by Python running
# 3.7.2 (default, Dec 27 2018, 07:35:06) \n[Clang 10.0.0 (clang-1000.11.45.5)]
# on macOS 10.14.2
from campy.datastructures.shuffle import shuffle
import random
def test_shuffle_list():
random.seed(41)
assert shuffle([3, 1, 4, 1, 5, 9]) == [5, 9, 3, 1, 4, 1]
def test_shuffle_tuple():
random.seed(41)
assert shuffle((3, 1, 4, 1, 5, 9)) == (5, 9, 3, 1, 4, 1)
def test_shuffle_string():
random.seed(41)
assert shuffle('abcdefg') == 'afgebcd'
def test_shuffle_bytes():
random.seed(41)
assert shuffle(b'abcdefg') == b'afgebcd'
def test_shuffle_bytearray():
random.seed(41)
assert shuffle(bytearray(b'abcdefg')) == bytearray(b'afgebcd')
# def test_shuffle_dict():
# def test_shuffle_set():
# def test_shuffle_frozenset():
# Other types to test: namedtuple? defaultdict? counter? collections abc subclasses?
# def test_shuffle_noniterable():
| 22.413043
| 84
| 0.681862
| 158
| 1,031
| 4.335443
| 0.449367
| 0.091971
| 0.183942
| 0.131387
| 0.322628
| 0.148905
| 0.148905
| 0.148905
| 0.148905
| 0.148905
| 0
| 0.078089
| 0.167798
| 1,031
| 45
| 85
| 22.911111
| 0.72028
| 0.403492
| 0
| 0.294118
| 1
| 0
| 0.069884
| 0
| 0
| 0
| 0
| 0
| 0.294118
| 1
| 0.294118
| true
| 0
| 0.117647
| 0
| 0.411765
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
1c5c01689b84baac6ad976451aff4b8f71dcb9ea
| 191
|
py
|
Python
|
psi/templates/io/__init__.py
|
NCRAR/psiexperiment
|
c3f8580b2b155ce42ebb936019d862c4343b545c
|
[
"MIT"
] | 2
|
2020-07-10T07:49:52.000Z
|
2020-11-15T13:20:52.000Z
|
psi/templates/io/__init__.py
|
NCRAR/psiexperiment
|
c3f8580b2b155ce42ebb936019d862c4343b545c
|
[
"MIT"
] | 1
|
2020-04-20T20:37:48.000Z
|
2020-04-20T20:37:48.000Z
|
psi/templates/io/__init__.py
|
NCRAR/psiexperiment
|
c3f8580b2b155ce42ebb936019d862c4343b545c
|
[
"MIT"
] | 3
|
2020-04-17T15:03:36.000Z
|
2022-01-14T23:19:29.000Z
|
# Configurations in this directory prefixed with _ are meant to be copied using
# `psi-config creaate-io` and modified. Configurations without the underscore
# prefix can be loaded directly.
| 47.75
| 79
| 0.801047
| 27
| 191
| 5.62963
| 0.925926
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.151832
| 191
| 3
| 80
| 63.666667
| 0.938272
| 0.963351
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
1c65c8302cb402e20550924e947dda41320f571a
| 559
|
py
|
Python
|
email_manager/admin.py
|
SACGF/variantgrid
|
515195e2f03a0da3a3e5f2919d8e0431babfd9c9
|
[
"RSA-MD"
] | 5
|
2021-01-14T03:34:42.000Z
|
2022-03-07T15:34:18.000Z
|
email_manager/admin.py
|
SACGF/variantgrid
|
515195e2f03a0da3a3e5f2919d8e0431babfd9c9
|
[
"RSA-MD"
] | 551
|
2020-10-19T00:02:38.000Z
|
2022-03-30T02:18:22.000Z
|
email_manager/admin.py
|
SACGF/variantgrid
|
515195e2f03a0da3a3e5f2919d8e0431babfd9c9
|
[
"RSA-MD"
] | null | null | null |
from django.contrib import admin
from email_manager.models import EmailLog
@admin.register(EmailLog)
class EmailLogAdmin(admin.ModelAdmin):
list_per_page = 500
ordering = ('-created',)
list_display = ('created', 'subject', 'recipient_list', 'from_email', 'probably_sent',)
search_fields = ('recipient_list',)
def has_add_permission(self, request, obj=None):
return False
def has_change_permission(self, request, obj=None):
return False
def has_delete_permission(self, request, obj=None):
return False
| 26.619048
| 91
| 0.708408
| 68
| 559
| 5.602941
| 0.558824
| 0.047244
| 0.165354
| 0.188976
| 0.338583
| 0.338583
| 0.338583
| 0.23622
| 0.23622
| 0
| 0
| 0.006579
| 0.184258
| 559
| 20
| 92
| 27.95
| 0.828947
| 0
| 0
| 0.214286
| 0
| 0
| 0.13059
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.214286
| false
| 0
| 0.142857
| 0.214286
| 0.928571
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 4
|
1c75d6064875274b31b97ad8cae1f13bfb2d5de5
| 2,379
|
py
|
Python
|
tardis/default_settings/__init__.py
|
keithschulze/mytardis
|
8ed3562574ce990d42bfe96133185a82c31c27d4
|
[
"Apache-2.0"
] | null | null | null |
tardis/default_settings/__init__.py
|
keithschulze/mytardis
|
8ed3562574ce990d42bfe96133185a82c31c27d4
|
[
"Apache-2.0"
] | null | null | null |
tardis/default_settings/__init__.py
|
keithschulze/mytardis
|
8ed3562574ce990d42bfe96133185a82c31c27d4
|
[
"Apache-2.0"
] | null | null | null |
# pylint: disable=wildcard-import
# first apps, so other files can add to INSTALLED_APPS
from tardis.default_settings.apps import *
from tardis.default_settings.admins import *
from tardis.default_settings.analytics import *
from tardis.default_settings.auth import *
from tardis.default_settings.caches import *
from tardis.default_settings.celery import *
from tardis.default_settings.custom_views import *
from tardis.default_settings.database import *
from tardis.default_settings.debug import *
from tardis.default_settings.downloads import *
from tardis.default_settings.email import *
from tardis.default_settings.filters import *
from tardis.default_settings.frontend import *
from tardis.default_settings.i18n import *
from tardis.default_settings.localisation import *
from tardis.default_settings.logging import *
from tardis.default_settings.middlewares import *
from tardis.default_settings.publication import *
from tardis.default_settings.search import *
from tardis.default_settings.sftp import *
from tardis.default_settings.sharing import *
from tardis.default_settings.site_customisations import *
from tardis.default_settings.staging import *
from tardis.default_settings.static_files import *
from tardis.default_settings.storage import *
from tardis.default_settings.templates import *
from tardis.default_settings.uploads import *
from tardis.default_settings.urls import *
# Get version from git to be displayed on About page.
def get_git_version():
repo_dir = path.dirname(path.dirname(path.abspath(__file__)))
def run_git(args):
import subprocess
process = subprocess.Popen('git %s' % args,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True,
cwd=repo_dir,
universal_newlines=True)
return process.communicate()[0]
try:
info = {
'commit_id': run_git("log -1 --format='%H'").strip(),
'date': run_git("log -1 --format='%cd' --date=rfc").strip(),
'branch': run_git("rev-parse --abbrev-ref HEAD").strip(),
'tag': run_git("describe --abbrev=0 --tags").strip(),
}
except Exception:
return ["unavailable"]
return info
MYTARDIS_VERSION = get_git_version()
| 39
| 72
| 0.704918
| 287
| 2,379
| 5.66899
| 0.358885
| 0.172096
| 0.292563
| 0.43024
| 0.534112
| 0
| 0
| 0
| 0
| 0
| 0
| 0.003171
| 0.204708
| 2,379
| 60
| 73
| 39.65
| 0.856765
| 0.057167
| 0
| 0
| 0
| 0
| 0.064314
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.04
| false
| 0
| 0.58
| 0
| 0.68
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
1c778325f7fa92db35a01d5d91f9f2732c6f24e7
| 2,158
|
py
|
Python
|
DailyProgrammer/DP20150410W.py
|
DayGitH/Python-Challenges
|
bc32f1332a92fcc2dfa6f5ea4d95f8a8d64c3edf
|
[
"MIT"
] | 2
|
2020-12-23T18:59:22.000Z
|
2021-04-14T13:16:09.000Z
|
DailyProgrammer/DP20150410W.py
|
DayGitH/Python-Challenges
|
bc32f1332a92fcc2dfa6f5ea4d95f8a8d64c3edf
|
[
"MIT"
] | null | null | null |
DailyProgrammer/DP20150410W.py
|
DayGitH/Python-Challenges
|
bc32f1332a92fcc2dfa6f5ea4d95f8a8d64c3edf
|
[
"MIT"
] | null | null | null |
"""
[Weekly #22] Machine Learning
https://www.reddit.com/r/dailyprogrammer/comments/3206mk/weekly_22_machine_learning/
# [](#WeeklyIcon) Asimov would be proud!
[Machine learning](http://en.wikipedia.org/wiki/Machine_learning) is a diverse field spanning from optimization and
data classification, to computer vision and pattern recognition. Modern algorithms for detecting spam email use machine
learning to react to developing types of spam and spot them quicker than people could!
Techniques include evolutionary programming and genetic algorithms, and models such as [artificial neural
networks](http://en.wikipedia.org/wiki/Artificial_neural_network). Do you work in any of these fields, or study them in
academics? Do you know something about them that's interesting, or have any cool resources or videos to share? Show
them to the world!
Libraries like [OpenCV](http://en.wikipedia.org/wiki/OpenCV) (available [here](http://opencv.org/)) use machine
learning to some extent, in order to adapt to new situations. The United Kingdom makes extensive use of [automatic
number plate recognition](http://en.wikipedia.org/wiki/Police-enforced_ANPR_in_the_UK) on speed cameras, which is a
subset of optical character recognition that needs to work in high speeds and poor visibility.
Of course, there's also /r/MachineLearning if you want to check out even more. They have a [simple questions
thread](http://www.reddit.com/r/MachineLearning/comments/2xopnm/mondays_simple_questions_thread_20150302/) if you want
some reading material!
*This post was inspired by [this challenge
submission](http://www.reddit.com/r/dailyprogrammer_ideas/comments/31wpzp/intermediate_hello_world_genetic_or_evolutionary/).
Check out /r/DailyProgrammer_Ideas to submit your own challenges to the subreddit!*
### IRC
We have an [IRC channel on Freenode](http://www.reddit.com/r/dailyprogrammer/comments/2dtqr7/), at
**#reddit-dailyprogrammer**. Join the channel and lurk with us!
### Previously...
The previous weekly thread was [**Recap and Updates**](http://www.reddit.com/r/dailyprogrammer/comments/2sx7nn/).
"""
def main():
pass
if __name__ == "__main__":
main()
| 56.789474
| 125
| 0.7924
| 323
| 2,158
| 5.204334
| 0.55418
| 0.05354
| 0.035693
| 0.038667
| 0.150506
| 0.088043
| 0.047591
| 0
| 0
| 0
| 0
| 0.012036
| 0.114458
| 2,158
| 37
| 126
| 58.324324
| 0.867609
| 0.966636
| 0
| 0
| 0
| 0
| 0.125
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| true
| 0.25
| 0
| 0
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 4
|
1c96ecd4e43bb877ddf87f71d953cf922f10d8ed
| 247
|
py
|
Python
|
openpds/core/management/commands/flumoji_firebase.py
|
eschloss/FluFuture
|
385506f5d12201b3909d08e42fb1ebb5c0cb323f
|
[
"MIT"
] | null | null | null |
openpds/core/management/commands/flumoji_firebase.py
|
eschloss/FluFuture
|
385506f5d12201b3909d08e42fb1ebb5c0cb323f
|
[
"MIT"
] | null | null | null |
openpds/core/management/commands/flumoji_firebase.py
|
eschloss/FluFuture
|
385506f5d12201b3909d08e42fb1ebb5c0cb323f
|
[
"MIT"
] | null | null | null |
from django.core.management.base import BaseCommand
import logging
from openpds.questions.tasks import howAreYouFeelingTodayAllUsers
class Command(BaseCommand):
def handle(self, *args, **kwargs):
howAreYouFeelingTodayAllUsers.delay()
| 30.875
| 65
| 0.801619
| 25
| 247
| 7.92
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.121457
| 247
| 7
| 66
| 35.285714
| 0.912442
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| false
| 0
| 0.5
| 0
| 0.833333
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
98cd4bedbf051d709881b2f5aecbf5d8bd582be8
| 508
|
py
|
Python
|
oops_fhir/r4/value_set/loinccodes.py
|
Mikuana/oops_fhir
|
77963315d123756b7d21ae881f433778096a1d25
|
[
"MIT"
] | null | null | null |
oops_fhir/r4/value_set/loinccodes.py
|
Mikuana/oops_fhir
|
77963315d123756b7d21ae881f433778096a1d25
|
[
"MIT"
] | null | null | null |
oops_fhir/r4/value_set/loinccodes.py
|
Mikuana/oops_fhir
|
77963315d123756b7d21ae881f433778096a1d25
|
[
"MIT"
] | null | null | null |
from pathlib import Path
from fhir.resources.valueset import ValueSet as _ValueSet
from oops_fhir.utils import ValueSet
__all__ = ["LOINCCodes"]
_resource = _ValueSet.parse_file(Path(__file__).with_suffix(".json"))
class LOINCCodes(ValueSet):
"""
LOINC Codes
This value set includes all LOINC codes
Status: draft - Version: 4.0.1
http://hl7.org/fhir/ValueSet/observation-codes
"""
# TODO: fix this template issue1
pass
class Meta:
resource = _resource
| 17.517241
| 69
| 0.698819
| 64
| 508
| 5.3125
| 0.640625
| 0.082353
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.012469
| 0.21063
| 508
| 28
| 70
| 18.142857
| 0.835411
| 0.322835
| 0
| 0
| 0
| 0
| 0.047468
| 0
| 0
| 0
| 0
| 0.035714
| 0
| 1
| 0
| false
| 0.111111
| 0.333333
| 0
| 0.555556
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
|
0
| 4
|
98e783cd42569d2e132137c2f145ad0161ef41c3
| 201
|
py
|
Python
|
tests/conftest.py
|
araneto/foodelivery
|
9c8c587307286d9f0b79206bf8464d8fff9073fa
|
[
"MIT"
] | null | null | null |
tests/conftest.py
|
araneto/foodelivery
|
9c8c587307286d9f0b79206bf8464d8fff9073fa
|
[
"MIT"
] | 1
|
2020-09-14T22:09:03.000Z
|
2020-09-14T22:09:03.000Z
|
tests/conftest.py
|
araneto/foodelivery
|
9c8c587307286d9f0b79206bf8464d8fff9073fa
|
[
"MIT"
] | null | null | null |
"""
Run make install before run tests
"""
import pytest
from foodelivery.app import create_app
@pytest.fixture(scope="module")
def app():
"""Instance of Main flask app"""
return create_app()
| 16.75
| 38
| 0.706468
| 28
| 201
| 5
| 0.714286
| 0.128571
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.169154
| 201
| 11
| 39
| 18.272727
| 0.838323
| 0.298507
| 0
| 0
| 0
| 0
| 0.046875
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| true
| 0
| 0.4
| 0
| 0.8
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
98f753d1c4e5758d90059e57e232a2987db844a3
| 123
|
py
|
Python
|
lectures/code/dict_print.py
|
naskoch/python_course
|
84adfd3f8d48ca3ad5837f7acc59d2fa051e95d3
|
[
"MIT"
] | 4
|
2015-08-10T17:46:55.000Z
|
2020-04-18T21:09:03.000Z
|
lectures/code/dict_print.py
|
naskoch/python_course
|
84adfd3f8d48ca3ad5837f7acc59d2fa051e95d3
|
[
"MIT"
] | null | null | null |
lectures/code/dict_print.py
|
naskoch/python_course
|
84adfd3f8d48ca3ad5837f7acc59d2fa051e95d3
|
[
"MIT"
] | 2
|
2019-04-24T03:31:02.000Z
|
2019-05-13T07:36:06.000Z
|
>>> d = {1: 'one', 2: 'two', 3: 'three'}
>>> for key, value in d.items():
... print key, value
...
1 one
2 two
3 three
| 15.375
| 40
| 0.495935
| 22
| 123
| 2.772727
| 0.590909
| 0.131148
| 0.163934
| 0.262295
| 0.459016
| 0.459016
| 0
| 0
| 0
| 0
| 0
| 0.064516
| 0.243902
| 123
| 7
| 41
| 17.571429
| 0.591398
| 0
| 0
| 0
| 0
| 0
| 0.089431
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0.142857
| 1
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
98fe2100e443a72cbbb6f3d74c7b430f232ca6d3
| 239
|
py
|
Python
|
textToBraille/admin.py
|
JoelVG/text-to-braille
|
9e6f5c7337d2d402378cd96f9476eb39d7b82328
|
[
"MIT"
] | null | null | null |
textToBraille/admin.py
|
JoelVG/text-to-braille
|
9e6f5c7337d2d402378cd96f9476eb39d7b82328
|
[
"MIT"
] | 1
|
2022-02-06T21:12:48.000Z
|
2022-02-06T21:12:48.000Z
|
textToBraille/admin.py
|
JoelVG/text-to-braille
|
9e6f5c7337d2d402378cd96f9476eb39d7b82328
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from .models import Translation
# Register your models here.
class TranslationAdmin(admin.ModelAdmin):
list_display = ('text', 'braille_translation')
admin.site.register(Translation, TranslationAdmin)
| 29.875
| 50
| 0.803347
| 27
| 239
| 7.037037
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.108787
| 239
| 8
| 51
| 29.875
| 0.892019
| 0.108787
| 0
| 0
| 0
| 0
| 0.108491
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.4
| 0
| 0.8
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
c7086ad17884ddb1f62e33bef40213c6c10d2055
| 883
|
py
|
Python
|
baselines/EMNLP2019/uri_features.py
|
ParikhKadam/knowledge-net
|
abc4ed3ebb88bfde8c1f02709371324ae6347ba0
|
[
"MIT"
] | 240
|
2019-09-13T21:33:24.000Z
|
2022-03-28T02:35:00.000Z
|
baselines/EMNLP2019/uri_features.py
|
ParikhKadam/knowledge-net
|
abc4ed3ebb88bfde8c1f02709371324ae6347ba0
|
[
"MIT"
] | 8
|
2020-01-28T23:04:59.000Z
|
2021-05-21T16:01:28.000Z
|
baselines/EMNLP2019/uri_features.py
|
ParikhKadam/knowledge-net
|
abc4ed3ebb88bfde8c1f02709371324ae6347ba0
|
[
"MIT"
] | 34
|
2019-09-21T00:19:37.000Z
|
2022-02-04T19:59:23.000Z
|
import itertools
import numpy as np
import networkx as nx
import vocab
def coref_score(instance, property_id):
return [ instance.subject_entity["coref_score"], instance.object_entity["coref_score"] ]
def el_score(instance, property_id):
return [ instance.subject_entity["el_score"], instance.object_entity["el_score"] ]
def _entity_linker_types_from_mention(entity):
arr = np.zeros(len(vocab.types), np.float32)
for i, t in enumerate(vocab.types):
if t in entity["types"]:
arr[i] = 1.0
return arr
def entity_linker_types(instance, property_id):
return np.concatenate([
_entity_linker_types_from_mention(instance.subject_entity),
_entity_linker_types_from_mention(instance.object_entity)
])
def wikidata_predicates(instance, property_id):
return None
def text_score(instance, property_id):
return [ instance.text_instance.scores[property_id] ]
| 31.535714
| 90
| 0.775764
| 126
| 883
| 5.134921
| 0.325397
| 0.092736
| 0.139104
| 0.185471
| 0.366306
| 0.323029
| 0.15456
| 0.15456
| 0
| 0
| 0
| 0.005161
| 0.12231
| 883
| 28
| 91
| 31.535714
| 0.829677
| 0
| 0
| 0
| 0
| 0
| 0.048643
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.26087
| false
| 0
| 0.173913
| 0.217391
| 0.695652
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 4
|
c71c65dfc54a49c0ce16a8b9dccc0de2db33751d
| 881
|
py
|
Python
|
python/dgl/_dataloading/__init__.py
|
ketyi/dgl
|
a1b859c29b63a673c148d13231a49504740e0e01
|
[
"Apache-2.0"
] | 9,516
|
2018-12-08T22:11:31.000Z
|
2022-03-31T13:04:33.000Z
|
python/dgl/_dataloading/__init__.py
|
ketyi/dgl
|
a1b859c29b63a673c148d13231a49504740e0e01
|
[
"Apache-2.0"
] | 2,494
|
2018-12-08T22:43:00.000Z
|
2022-03-31T21:16:27.000Z
|
python/dgl/_dataloading/__init__.py
|
ketyi/dgl
|
a1b859c29b63a673c148d13231a49504740e0e01
|
[
"Apache-2.0"
] | 2,529
|
2018-12-08T22:56:14.000Z
|
2022-03-31T13:07:41.000Z
|
"""The ``dgl.dataloading`` package contains:
* Data loader classes for iterating over a set of nodes or edges in a graph and generates
computation dependency via neighborhood sampling methods.
* Various sampler classes that perform neighborhood sampling for multi-layer GNNs.
* Negative samplers for link prediction.
For a holistic explanation on how different components work together.
Read the user guide :ref:`guide-minibatch`.
.. note::
This package is experimental and the interfaces may be subject
to changes in future releases. It currently only has implementations in PyTorch.
"""
from .neighbor import *
from .dataloader import *
from .cluster_gcn import *
from .shadow import *
from . import negative_sampler
from .async_transferer import AsyncTransferer
from .. import backend as F
if F.get_preferred_backend() == 'pytorch':
from .pytorch import *
| 30.37931
| 89
| 0.77412
| 120
| 881
| 5.641667
| 0.708333
| 0.059084
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.164586
| 881
| 28
| 90
| 31.464286
| 0.919837
| 0.676504
| 0
| 0
| 1
| 0
| 0.02518
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.888889
| 0
| 0.888889
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
c738c4999ed8cdf2e8966d52f4e0656d6ad275a7
| 866
|
py
|
Python
|
bat/report/report.py
|
XUANLANcognition/Bat
|
4ee2e81aa4e41b8a355701fa6d24a1e00115c3a4
|
[
"BSD-2-Clause"
] | null | null | null |
bat/report/report.py
|
XUANLANcognition/Bat
|
4ee2e81aa4e41b8a355701fa6d24a1e00115c3a4
|
[
"BSD-2-Clause"
] | null | null | null |
bat/report/report.py
|
XUANLANcognition/Bat
|
4ee2e81aa4e41b8a355701fa6d24a1e00115c3a4
|
[
"BSD-2-Clause"
] | null | null | null |
#!/usr/bin/env python
# -*- config : utf-8 -*-
'help'
class Report(object):
"""
You can wirte the report has created into a html file.
"""
def __init__(self, headtitle, content):
self.headtitle = 'Bat''s' + headtitle
self.content = content
def replace(self):
self.content = self.content.replace('\n', '<br>')
self.content = self.content.replace('###', '')
self.content = self.content.replace('[', '<h3>')
self.content = self.content.replace(']', '</h3>')
def create(self):
self.replace()
self.content = '<h1>' + self.headtitle + ' Report</h1>' + self.content
with open('./' + self.headtitle + '.html', 'w') as f:
f.write(self.content)
class ScanReport(Report):
"""
For scan
"""
def __init__(self, headtitle, content):
pass
| 25.470588
| 78
| 0.553118
| 99
| 866
| 4.757576
| 0.444444
| 0.280255
| 0.127389
| 0.186837
| 0.369427
| 0.131635
| 0
| 0
| 0
| 0
| 0
| 0.007874
| 0.266744
| 866
| 33
| 79
| 26.242424
| 0.733858
| 0.12933
| 0
| 0.111111
| 0
| 0
| 0.071527
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.222222
| false
| 0.055556
| 0
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 4
|
c75374fcb3ef0a237f3413fc6e0302c93e408590
| 19,165
|
py
|
Python
|
epg/epgcpmg.py
|
jtamir/mri-sim-py
|
f606b835412bfc6a84dc0a8124807ea0979f663c
|
[
"MIT"
] | 5
|
2019-09-06T18:51:56.000Z
|
2020-05-26T10:17:29.000Z
|
epg/epgcpmg.py
|
jtamir/mri-sim-py
|
f606b835412bfc6a84dc0a8124807ea0979f663c
|
[
"MIT"
] | null | null | null |
epg/epgcpmg.py
|
jtamir/mri-sim-py
|
f606b835412bfc6a84dc0a8124807ea0979f663c
|
[
"MIT"
] | 5
|
2017-02-19T14:28:43.000Z
|
2020-06-10T07:42:54.000Z
|
#!/usr/bin/python
# EPG CPMG simulation code, based off of Matlab scripts from Brian Hargreaves <bah@stanford.edu>
# 2015 Jonathan Tamir <jtamir@eecs.berkeley.edu>
import numpy as np
from warnings import warn
def rf(FpFmZ, alpha):
"Same as rf2, but only returns FpFmZ"""
return rf2(FpFmZ, alpha)[0]
def rf2(FpFmZ, alpha):
""" Propagate EPG states through an RF rotation of
alpha (radians). Assumes CPMG condition, i.e.
magnetization lies on the real x axis.
INPUT:
FpFmZ = 3xN vector of F+, F- and Z states.
alpha = RF pulse flip angle in radians
OUTPUT:
FpFmZ = Updated FpFmZ state.
RR = RF rotation matrix (3x3).
"""
# -- From Weigel at al, JMRI 41(2015)266-295, Eq. 21.
if abs(alpha) > 2 * np.pi:
warn('rf2: Flip angle should be in radians! alpha=%f' % alpha)
cosa2 = np.cos(alpha/2.)**2
sina2 = np.sin(alpha/2.)**2
cosa = np.cos(alpha)
sina = np.sin(alpha)
RR = np.array([ [cosa2, sina2, sina],
[sina2, cosa2, -sina],
[-0.5 * sina, 0.5 * sina, cosa] ])
FpFmZ = np.dot(RR, FpFmZ)
return FpFmZ, RR
def rf_ex(FpFmZ, alpha):
"Same as rf2_ex, but only returns FpFmZ"""
return rf2_ex(FpFmZ, alpha)[0]
def rf2_ex(FpFmZ, alpha):
""" Propagate EPG states through an RF excitation of
alpha (radians) along the y direction, i.e. phase of pi/2.
INPUT:
FpFmZ = 3xN vector of F+, F- and Z states.
alpha = RF pulse flip angle in radians
OUTPUT:
FpFmZ = Updated FpFmZ state.
RR = RF rotation matrix (3x3).
"""
try:
alpha = alpha[0]
except:
pass
if abs(alpha) > 2 * np.pi:
warn('rf2_ex: Flip angle should be in radians! alpha=%f' % alpha)
cosa2 = np.cos(alpha/2.)**2
sina2 = np.sin(alpha/2.)**2
cosa = np.cos(alpha)
sina = np.sin(alpha)
RR = np.array([ [cosa2, -sina2, sina],
[-sina2, cosa2, sina],
[-0.5 * sina, -0.5 * sina, cosa] ])
FpFmZ = np.dot(RR, FpFmZ)
return FpFmZ, RR
def rf_prime(FpFmZ, alpha):
"""Same as rf_prime2, but only returns FpFmZ"""
return rf_prime2(FpFmZ, alpha)[0]
def rf_prime2(FpFmZ, alpha):
""" Compute the gradient of the RF rotation operator, where
alpha (radians) is the RF rotation. Assumes CPMG condition, i.e.
magnetization lies on the real x axis.
INPUT:
FpFmZ = 3xN vector of F+, F- and Z states.
alpha = RF pulse flip angle in radians
OUTPUT:
FpFmZ = Derivative of FpFmZ state w.r.t. alpha
RR = Derivative of RF rotation matrix (3x3) w.r.t. alpha
"""
if abs(alpha) > 2 * np.pi:
warn('rf_prime2: Flip angle should be in radians! alpha=%f' % alpha)
RR = np.array([ [-np.cos(alpha/2.) * np.sin(alpha/2.), np.cos(alpha/2.) * np.sin(alpha/2.), np.cos(alpha)],
[np.cos(alpha/2.) * np.sin(alpha/2.), -np.cos(alpha/2.) * np.sin(alpha/2.), -np.cos(alpha)],
[-0.5 * np.cos(alpha), 0.5 * np.cos(alpha), -np.sin(alpha)] ])
FpFmZ = np.dot(RR, FpFmZ)
return FpFmZ, RR
def rf_B1_prime(FpFmZ, alpha, B1):
"""Same as rf_B1_prime2, but only returns FpFmZ"""
return rf_B1_prime2(FpFmZ, alpha, B1)[0]
def rf_B1_prime2(FpFmZ, alpha, B1):
""" Compute the gradient of B1 inhomogeneity w.r.t. RF refocusing operator, where
alpha (radians) is the RF rotation and B1 is the B1 homogeneity (0, 2).
Assumes CPMG condition, i.e. magnetization lies on the real x axis.
INPUT:
FpFmZ = 3xN vector of F+, F- and Z states.
alpha = RF pulse flip angle in radians
B1 = B1 Homogeneity, where 1. is homogeneous
OUTPUT:
FpFmZ = Derivative of FpFmZ state w.r.t. alpha
RR = Derivative of RF rotation matrix (3x3) w.r.t. B1
"""
if abs(alpha) > 2 * np.pi:
warn('rf_B1_prime2: Flip angle should be in radians! alpha=%f' % alpha)
if B1 < 0 or B1 > 2:
warn('rf_B1_prime2: B1 Homogeneity should be a percentage between (0, 2)')
RR = np.array([ [-alpha*np.cos(B1*alpha/2.) * np.sin(B1*alpha/2.), alpha*np.cos(B1*alpha/2.) * np.sin(B1*alpha/2.), alpha*np.cos(B1*alpha)],
[alpha*np.cos(B1*alpha/2.) * np.sin(B1*alpha/2.), -alpha*np.cos(B1*alpha/2.) * np.sin(B1*alpha/2.), -alpha*np.cos(B1*alpha)],
[-0.5*alpha*np.cos(B1*alpha), 0.5*alpha*np.cos(B1*alpha), -alpha*np.sin(B1*alpha)] ])
FpFmZ = np.dot(RR, FpFmZ)
return FpFmZ, RR
def rf_ex_B1_prime(FpFmZ, alpha, B1):
"""Gradient of B1 inhomogeneity w.r.t. RF excitation operator, where
alpha (radians) is the RF rotation and B1 is the B1 honogeneity (0, 2).
Assumes CPMG condition, i.e. RF excitation in the y direction.
INPUT:
FpFmZ = 3xN vector of F+, F- and Z states.
alpha = RF pulse flip angle in radians
B1 = B1 Homogeneity, where 1. is homogeneous
OUTPUT:
FpFmZ = Derivative of FpFmZ state w.r.t. alpha
"""
if abs(alpha) > 2 * np.pi:
warn('rf_ex_B1_prime2: Flip angle should be in radians! alpha=%f' % alpha)
if B1 < 0 or B1 > 2:
warn('rf_ex_B1_prime: B1 Homogeneity should be a percentage between (0, 2)')
RR = np.array([ [-alpha*np.cos(B1*alpha/2.) * np.sin(B1*alpha/2.), alpha*np.cos(B1*alpha/2.) * np.sin(B1*alpha/2.), alpha*np.cos(B1*alpha)],
[alpha*np.cos(B1*alpha/2.) * np.sin(B1*alpha/2.), -alpha*np.cos(B1*alpha/2.) * np.sin(B1*alpha/2.), alpha*np.cos(B1*alpha)],
[-0.5*alpha*np.cos(B1*alpha), -0.5*alpha*np.cos(B1*alpha), -alpha*np.sin(B1*alpha)] ])
FpFmZ = np.dot(RR, FpFmZ)
return FpFmZ
def relax_mat(T, T1, T2):
E2 = np.exp(-T/T2)
E1 = np.exp(-T/T1)
EE = np.diag([E2, E2, E1]) # Decay of states due to relaxation alone.
return EE
def relax_mat_prime_T1(T, T1, T2):
E1_prime_T1 = T * np.exp(-T/T1) / T1**2
return np.diag([0, 0, E1_prime_T1])
def relax_mat_prime_T2(T, T1, T2):
E2_prime_T2 = T * np.exp(-T/T2) / T2**2
return np.diag([E2_prime_T2, E2_prime_T2, 0])
def relax_prime_T1(FpFmZ, T, T1, T2):
"""returns E'(T1) FpFmZ + E0'(T1)"""
EE_prime_T1 = relax_mat_prime_T1(T, T1, T2)
RR = -EE_prime_T1[2,2]
FpFmZ = np.dot(EE_prime_T1, FpFmZ)
FpFmZ[2,0] = FpFmZ[2,0] + RR
return FpFmZ
def relax_prime_T2(FpFmZ, T, T1, T2):
"""returns E'(T2) FpFmZ"""
EE_prime_T2 = relax_mat_prime_T2(T, T1, T2)
FpFmZ = np.dot(EE_prime_T2, FpFmZ)
return FpFmZ
def relax(FpFmZ, T, T1, T2):
"""Same as relax2, but only returns FpFmZ"""
return relax2(FpFmZ, T, T1, T2)[0]
def relax2(FpFmZ, T, T1, T2):
""" Propagate EPG states through a period of relaxation over
an interval T.
INPUT:
FpFmZ = 3xN vector of F+, F- and Z states.
T1, T2 = Relaxation times (same as T)
T = Time interval (same as T1,T2)
OUTPUT:
FpFmZ = updated F+, F- and Z states.
EE = decay matrix, 3x3 = diag([E2 E2 E1]);
"""
E2 = np.exp(-T/T2)
E1 = np.exp(-T/T1)
EE = np.diag([E2, E2, E1]) # Decay of states due to relaxation alone.
RR = 1 - E1 # Mz Recovery, affects only Z0 state, as
# recovered magnetization is not dephased.
FpFmZ = np.dot(EE, FpFmZ) # Apply Relaxation
FpFmZ[2,0] = FpFmZ[2,0] + RR # Recovery
return FpFmZ, EE
def grad(FpFmZ, noadd=False):
"""Propagate EPG states through a "unit" gradient. Assumes CPMG condition,
i.e. all states are real-valued.
INPUT:
FpFmZ = 3xN vector of F+, F- and Z states.
noadd = True to NOT add any higher-order states - assume
that they just go to zero. Be careful - this
speeds up simulations, but may compromise accuracy!
OUTPUT:
Updated FpFmZ state.
"""
# Gradient does not affect the Z states.
if noadd == False:
FpFmZ = np.hstack((FpFmZ, [[0],[0],[0]])) # add higher dephased state
FpFmZ[0,:] = np.roll(FpFmZ[0,:], 1) # shift Fp states
FpFmZ[1,:] = np.roll(FpFmZ[1,:], -1) # shift Fm states
FpFmZ[1,-1] = 0 # Zero highest Fm state
FpFmZ[0,0] = FpFmZ[1,0] # Fill in lowest Fp state
return FpFmZ
def FSE_TE(FpFmZ, alpha, TE, T1, T2, noadd=False, recovery=True):
""" Propagate EPG states through a full TE, i.e.
relax -> grad -> rf -> grad -> relax.
Assumes CPMG condition, i.e. all states are real-valued.
INPUT:
FpFmZ = 3xN vector of F+, F- and Z states.
alpha = RF pulse flip angle in radians
T1, T2 = Relaxation times (same as TE)
TE = Echo Time interval (same as T1, T2)
noadd = True to NOT add any higher-order states - assume
that they just go to zero. Be careful - this
speeds up simulations, but may compromise accuracy!
OUTPUT:
FpFmZ = updated F+, F- and Z states.
"""
EE = relax_mat(TE/2., T1, T2)
if recovery:
FpFmZ = relax(FpFmZ, TE/2., T1, T2)
else:
FpFmZ = np.dot(EE, FpFmZ)
FpFmZ = grad(FpFmZ, noadd)
FpFmZ = rf(FpFmZ, alpha)
FpFmZ = grad(FpFmZ, noadd)
if recovery:
FpFmZ = relax(FpFmZ, TE/2., T1, T2)
else:
FpFmZ = np.dot(EE, FpFmZ)
return FpFmZ
def FSE_TE_prime_alpha(FpFmZ, alpha, TE, T1, T2, noadd=False, recovery=True):
""" Gradient of EPG over a full TE, w.r.t. flip angle alpha, i.e.
relax -> grad -> rf_prime -> grad -> relax_hat,
where rf_prime is the derivative of the RF pulse matrix w.r.t. alpha,
and relax_hat is the relaxation without longitudinal recovery
Assumes CPMG condition, i.e. all states are real-valued.
INPUT:
FpFmZ = 3xN vector of F+, F- and Z states.
alpha = RF pulse flip angle in radians
T1, T2 = Relaxation times (same as TE)
TE = Echo Time interval (same as T1, T2)
noadd = True to NOT add any higher-order states - assume
that they just go to zero. Be careful - this
speeds up simulations, but may compromise accuracy!
recovery = True to include T1 recovery in the Z0 state.
OUTPUT:
FpFmZ = updated F+, F- and Z states.
"""
FpFmZ, EE = relax2(FpFmZ, TE/2., T1, T2)
FpFmZ = grad(FpFmZ, noadd)
FpFmZ = rf_prime(FpFmZ, alpha)
FpFmZ = grad(FpFmZ, noadd)
FpFmZ = np.dot(EE, FpFmZ)
return FpFmZ
def FSE_TE_prime1_T2(FpFmZ, alpha, TE, T1, T2, noadd=False):
""" Returns E(T2) G R G E'(T2) FpFmZ"""
EE = relax_mat(TE/2., T1, T2)
EE_prime = relax_mat_prime_T2(TE/2., T1, T2)
FpFmZ = np.dot(EE_prime, FpFmZ)
FpFmZ = grad(FpFmZ, noadd)
FpFmZ = rf(FpFmZ, alpha)
FpFmZ = grad(FpFmZ, noadd)
FpFmZ = np.dot(EE, FpFmZ)
return FpFmZ
def FSE_TE_prime2_T2(FpFmZ, alpha, TE, T1, T2, noadd=False):
""" Returns E'(T2) G R G (E(T2) FpFmZ + E0)"""
EE_prime = relax_mat_prime_T2(TE/2., T1, T2)
FpFmZ = relax(FpFmZ, TE/2., T1, T2)
FpFmZ = grad(FpFmZ, noadd)
FpFmZ = rf(FpFmZ, alpha)
FpFmZ = grad(FpFmZ, noadd)
FpFmZ = np.dot(EE_prime, FpFmZ)
return FpFmZ
def FSE_TE_prime1_T1(FpFmZ, alpha, TE, T1, T2, noadd=False):
""" Returns E(T1) G R G (E'(T1) FpFmZ + E0'(T1))"""
EE = relax_mat(TE/2., T1, T2)
FpFmZ = relax_prime_T1(FpFmZ, TE/2., T1, T2) # E'(T1) FpFmZ + E0'(T1)
FpFmZ = grad(FpFmZ, noadd)
FpFmZ = rf(FpFmZ, alpha)
FpFmZ = grad(FpFmZ, noadd)
FpFmZ = np.dot(EE, FpFmZ)
return FpFmZ
def FSE_TE_prime2_T1(FpFmZ, alpha, TE, T1, T2, noadd=False):
""" Returns E'(T1) G R G E(T1) FpFmZ + E0'(T1)"""
EE = relax_mat(TE/2., T1, T2)
FpFmZ = np.dot(EE, FpFmZ)
FpFmZ = grad(FpFmZ, noadd)
FpFmZ = rf(FpFmZ, alpha)
FpFmZ = grad(FpFmZ, noadd)
FpFmZ = relax_prime_T1(FpFmZ, TE/2., T1, T2) # E'(T1) FpFmZ + E0'(T1)
return FpFmZ
def FSE_TE_prime_B1(FpFmZ, alpha, TE, T1, T2, B1, noadd=False):
""" Gradient of EPG over a full TE, w.r.t. B1 homogeneity fraciton B1, i.e.
relax -> grad -> rf_B1_prime -> grad -> relax_hat,
where rf_B1_prime is the derivative of the RF pulse matrix w.r.t. B1,
and relax_hat is the relaxation without longitudinal recovery
Assumes CPMG condition, i.e. all states are real-valued.
INPUT:
FpFmZ = 3xN vector of F+, F- and Z states.
alpha = RF pulse flip angle in radians
T1, T2 = Relaxation times (same as TE)
TE = Echo Time interval (same as T1, T2)
B1 = fraction of B1 homogeneity (1 is fully homogeneous)
noadd = True to NOT add any higher-order states - assume
that they just go to zero. Be careful - this
speeds up simulations, but may compromise accuracy!
recovery = True to include T1 recovery in the Z0 state.
OUTPUT:
FpFmZ = updated F+, F- and Z states.
"""
FpFmZ, EE = relax2(FpFmZ, TE/2., T1, T2)
FpFmZ = grad(FpFmZ, noadd)
FpFmZ = rf_B1_prime(FpFmZ, alpha, B1)
FpFmZ = grad(FpFmZ, noadd)
FpFmZ = np.dot(EE, FpFmZ)
return FpFmZ
### Gradients of full FSE EPG function across T time points
def FSE_signal_prime_alpha_idx(angles_rad, TE, T1, T2, idx):
"""Gradient of EPG function at each time point w.r.t. RF pulse alpha_i"""
T = len(angles_rad)
zi = np.hstack((np.array([[1],[1],[0]]), np.zeros((3, T))))
z_prime = np.zeros((T, 1))
for i in range(T):
alpha = angles_rad[i]
if i < idx:
zi = FSE_TE(zi, alpha, TE, T1, T2, noadd=True)
z_prime[i] = 0
elif i == idx:
wi = FSE_TE_prime_alpha(zi, alpha, TE, T1, T2, noadd=True)
z_prime[i] = wi[0,0]
else:
wi = FSE_TE(wi, alpha, TE, T1, T2, noadd=True, recovery=False)
z_prime[i] = wi[0,0]
return z_prime
def FSE_signal_prime_T1(angles_rad, TE, T1, T2):
return FSE_signal_ex_prime_T1(np.pi/2, angles_rad, TE, T1, T2)
def FSE_signal_ex_prime_T1(angle_ex_rad, angles_rad, TE, T1, T2, B1=1.):
"""Gradient of EPG function at each time point w.r.t. T1"""
T = len(angles_rad)
try:
B1 = B1[0]
except:
pass
# since the grad doesn't depend on B1 inhomog, can just pre-scale flip angles
angle_ex_rad = B1 * np.copy(angle_ex_rad)
angles_rad = B1 * np.copy(angles_rad)
zi = np.hstack((rf_ex(np.array([[0],[0],[1]]), angle_ex_rad), np.zeros((3, T))))
z_prime = np.zeros((T, 1))
for i in range(T):
alpha = angles_rad[i]
if i == 0:
wi = np.zeros((3, T+1))
else:
wi = FSE_TE(wi, alpha, TE, T1, T2, noadd=True, recovery=False)
wi += FSE_TE_prime1_T1(zi, alpha, TE, T1, T2, noadd=True)
wi += FSE_TE_prime2_T1(zi, alpha, TE, T1, T2, noadd=True)
zi = FSE_TE(zi, alpha, TE, T1, T2, noadd=True)
z_prime[i] = wi[0,0]
return z_prime
def FSE_signal_prime_T2(angles_rad, TE, T1, T2):
return FSE_signal_ex_prime_T2(np.pi/2, angles_rad, TE, T1, T2)
def FSE_signal_ex_prime_T2(angle_ex_rad, angles_rad, TE, T1, T2, B1=1.):
"""Gradient of EPG function at each time point w.r.t. T2"""
T = len(angles_rad)
try:
B1 = B1[0]
except:
pass
# since the grad doesn't depend on B1 inhomog, can just pre-scale flip angles
angle_ex_rad = B1 * np.copy(angle_ex_rad)
angles_rad = B1 * np.copy(angles_rad)
zi = np.hstack((rf_ex(np.array([[0],[0],[1]]), angle_ex_rad), np.zeros((3, T))))
z_prime = np.zeros((T, 1))
for i in range(T):
alpha = angles_rad[i]
if i == 0:
wi = np.zeros((3, T+1))
else:
wi = FSE_TE(wi, alpha, TE, T1, T2, noadd=True, recovery=False)
wi += FSE_TE_prime1_T2(zi, alpha, TE, T1, T2, noadd=True)
wi += FSE_TE_prime2_T2(zi, alpha, TE, T1, T2, noadd=True)
zi = FSE_TE(zi, alpha, TE, T1, T2, noadd=True)
z_prime[i] = wi[0,0]
return z_prime
def FSE_signal_ex_prime_B1(angle_ex_rad, angles_rad, TE, T1, T2, B1):
"""Gradient of EPG function at each time point w.r.t. B1 Homogeneity.
Includes the excitation flip angle"""
T = len(angles_rad)
zi = np.hstack((np.array([[0],[0],[1]]), np.zeros((3, T+1))))
z_prime = np.zeros((T, 1))
wi = rf_ex_B1_prime(zi, angle_ex_rad, B1)
zi = rf_ex(zi, angle_ex_rad * B1)
for i in range(T):
alpha = angles_rad[i]
if i == 0:
xi = FSE_TE(wi, alpha * B1, TE, T1, T2, noadd=True, recovery=False)
else:
xi = FSE_TE(wi, alpha * B1, TE, T1, T2, noadd=True)
wi = FSE_TE_prime_B1(zi, alpha, TE, T1, T2, B1, noadd=True) + xi
zi = FSE_TE(zi, alpha * B1, TE, T1, T2, noadd=True)
z_prime[i] = wi[0,0]
return z_prime
### Full FSE EPG function across T time points
def FSE_signal_ex(angle_ex_rad, angles_rad, TE, T1, T2, B1=1.):
"""Same as FSE_signal2_ex, but only returns Mxy"""
return FSE_signal2_ex(angle_ex_rad, angles_rad, TE, T1, T2, B1)[0]
def FSE_signal(angles_rad, TE, T1, T2):
"""Same as FSE_signal2, but only returns Mxy"""
return FSE_signal2(angles_rad, TE, T1, T2)[0]
def FSE_signal2(angles_rad, TE, T1, T2):
"""Same as FSE_signal2_ex, but assumes excitation pulse is 90 degrees"""
return FSE_signal2_ex(np.pi/2., angles_rad, TE, T1, T2)
def FSE_signal2_ex(angle_ex_rad, angles_rad, TE, T1, T2, B1=1.):
"""Simulate Fast Spin-Echo CPMG sequence with specific flip angle train.
Prior to the flip angle train, an excitation pulse of angle_ex_rad degrees
is applied in the Y direction. The flip angle train is then applied in the X direction.
INPUT:
angles_rad = array of flip angles in radians equal to echo train length
TE = echo time/spacing
T1 = T1 value in seconds
T2 = T2 value in seconds
OUTPUT:
Mxy = Transverse magnetization at each echo time
Mz = Longitudinal magnetization at each echo time
"""
T = len(angles_rad)
Mxy = np.zeros((T,1))
Mz = np.zeros((T,1))
P = np.array([[0],[0],[1]]) # initially on Mz
try:
B1 = B1[0]
except:
pass
# pre-scale by B1 homogeneity
angle_ex_rad = B1 * np.copy(angle_ex_rad)
angles_rad = B1 * np.copy(angles_rad)
P = rf_ex(P, angle_ex_rad) # initial tip
for i in range(T):
alpha = angles_rad[i]
P = FSE_TE(P, alpha, TE, T1, T2)
Mxy[i] = P[0,0]
Mz[i] = P[2,0]
return Mxy, Mz
if __name__ == "__main__":
import matplotlib.pyplot as plt
T1 = 1000e-3
T2 = 200e-3
TE = 5e-3
N = 100
angles = 120 * np.ones((N,))
angles_rad = angles * np.pi / 180.
S = FSE_signal(angles_rad, TE, T1, T2)
S2 = abs(S)
plt.plot(TE*1000*np.arange(1, N+1), S2)
plt.xlabel('time (ms)')
plt.ylabel('signal')
plt.title('T1 = %.2f ms, T2 = %.2f ms' % (T1 * 1000, T2 * 1000))
plt.show()
| 29.081942
| 145
| 0.58899
| 3,117
| 19,165
| 3.52743
| 0.091434
| 0.025466
| 0.021282
| 0.020009
| 0.801273
| 0.753342
| 0.716417
| 0.677035
| 0.65075
| 0.622556
| 0
| 0.04724
| 0.279833
| 19,165
| 658
| 146
| 29.12614
| 0.749384
| 0.365249
| 0
| 0.550877
| 0
| 0
| 0.044641
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.119298
| false
| 0.014035
| 0.010526
| 0.007018
| 0.249123
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
c75f9945d45f38861b45531d76bfde2007d0d0d4
| 827
|
py
|
Python
|
website/migrations/0006_auto_20181006_2147.py
|
Lewes/ecssweb
|
62c332757c24d7edac52a04121d8b77eced783a1
|
[
"MIT"
] | 4
|
2021-03-17T21:09:18.000Z
|
2022-03-03T17:10:51.000Z
|
website/migrations/0006_auto_20181006_2147.py
|
Lewes/ecssweb
|
62c332757c24d7edac52a04121d8b77eced783a1
|
[
"MIT"
] | 15
|
2018-08-21T19:01:06.000Z
|
2022-03-11T23:29:26.000Z
|
website/migrations/0006_auto_20181006_2147.py
|
Lewes/ecssweb
|
62c332757c24d7edac52a04121d8b77eced783a1
|
[
"MIT"
] | 2
|
2018-08-21T18:46:36.000Z
|
2021-11-13T16:23:53.000Z
|
# Generated by Django 2.1.2 on 2018-10-06 20:47
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('website', '0005_committeerolemember_role_short_name'),
]
operations = [
migrations.AlterModelOptions(
name='committeerolemember',
options={'verbose_name_plural': 'committee roles members'},
),
migrations.AlterModelOptions(
name='society',
options={'verbose_name_plural': 'societies'},
),
migrations.AlterModelOptions(
name='societylink',
options={'verbose_name_plural': 'societies links'},
),
migrations.AlterModelOptions(
name='sponsorlink',
options={'verbose_name_plural': 'sponsors links'},
),
]
| 27.566667
| 71
| 0.600967
| 69
| 827
| 7.028986
| 0.550725
| 0.22268
| 0.25567
| 0.197938
| 0.136082
| 0
| 0
| 0
| 0
| 0
| 0
| 0.032203
| 0.286578
| 827
| 29
| 72
| 28.517241
| 0.789831
| 0.054414
| 0
| 0.347826
| 1
| 0
| 0.297436
| 0.051282
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.043478
| 0
| 0.173913
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
c771695886348bb3e9603b85ca5837f2f876019e
| 148
|
py
|
Python
|
plnx_demo_parameterize/plnx_sanity/conf.py
|
Xilinx/roast-examples
|
2d39194b6c8bc6e2efc793f1256c530d40c898d2
|
[
"MIT"
] | null | null | null |
plnx_demo_parameterize/plnx_sanity/conf.py
|
Xilinx/roast-examples
|
2d39194b6c8bc6e2efc793f1256c530d40c898d2
|
[
"MIT"
] | null | null | null |
plnx_demo_parameterize/plnx_sanity/conf.py
|
Xilinx/roast-examples
|
2d39194b6c8bc6e2efc793f1256c530d40c898d2
|
[
"MIT"
] | null | null | null |
#
# Copyright (c) 2020 Xilinx, Inc. All rights reserved.
# SPDX-License-Identifier: MIT
#
plnx_package_boot = True # Generate Package Boot Images
| 21.142857
| 56
| 0.743243
| 20
| 148
| 5.4
| 0.9
| 0.203704
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.032258
| 0.162162
| 148
| 6
| 57
| 24.666667
| 0.83871
| 0.743243
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
c78ee4155340a40daab30f067efa7855099612dd
| 83
|
py
|
Python
|
myweb/pub_form/form_upload.py
|
marktiu7/Web
|
206876df425699e2e345aea8afc4efd27362f519
|
[
"Apache-2.0"
] | null | null | null |
myweb/pub_form/form_upload.py
|
marktiu7/Web
|
206876df425699e2e345aea8afc4efd27362f519
|
[
"Apache-2.0"
] | null | null | null |
myweb/pub_form/form_upload.py
|
marktiu7/Web
|
206876df425699e2e345aea8afc4efd27362f519
|
[
"Apache-2.0"
] | null | null | null |
from django import forms
class UploadFile(forms.Form):
file=forms.FileField()
| 16.6
| 29
| 0.759036
| 11
| 83
| 5.727273
| 0.818182
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.144578
| 83
| 4
| 30
| 20.75
| 0.887324
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
c79fe6b29df0aed29daf66fe94c63ca18e713983
| 198
|
py
|
Python
|
scripts/arcrest/agol/__init__.py
|
datastark/crime-analysis-toolbox
|
af45e4ba59284d78b1c7d3e208a05e5001d024dd
|
[
"Apache-2.0"
] | 5
|
2019-01-12T13:57:52.000Z
|
2021-05-04T01:24:53.000Z
|
scripts/arcrest/agol/__init__.py
|
datastark/crime-analysis-toolbox
|
af45e4ba59284d78b1c7d3e208a05e5001d024dd
|
[
"Apache-2.0"
] | null | null | null |
scripts/arcrest/agol/__init__.py
|
datastark/crime-analysis-toolbox
|
af45e4ba59284d78b1c7d3e208a05e5001d024dd
|
[
"Apache-2.0"
] | 1
|
2018-08-11T19:09:57.000Z
|
2018-08-11T19:09:57.000Z
|
from __future__ import absolute_import
from .services import FeatureService, FeatureLayer, TableLayer, TiledService
from . import helperservices
from ._uploads import Uploads
__version__ = "3.5.3"
| 28.285714
| 76
| 0.828283
| 23
| 198
| 6.695652
| 0.608696
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.017143
| 0.116162
| 198
| 6
| 77
| 33
| 0.862857
| 0
| 0
| 0
| 0
| 0
| 0.025253
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.8
| 0
| 0.8
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
c7b02e3aa2da4eb60a526c0d6e2fa5915fcfb1af
| 40,072
|
py
|
Python
|
game/gameplay/level.py
|
chadfraser/CluGame
|
090f3749e102c5331136298356d543c8b4e8a9a5
|
[
"MIT"
] | 2
|
2018-05-17T11:14:19.000Z
|
2018-05-24T21:16:07.000Z
|
game/gameplay/level.py
|
chadfraser/CluGame
|
090f3749e102c5331136298356d543c8b4e8a9a5
|
[
"MIT"
] | null | null | null |
game/gameplay/level.py
|
chadfraser/CluGame
|
090f3749e102c5331136298356d543c8b4e8a9a5
|
[
"MIT"
] | null | null | null |
import pygame
import random
from game.tools.asset_cache import getImage
import game.tools.constants as c
class Level:
"""Create a new level object.
This class should not be called directly. Only call its subclasses.
Attributes:
rubberTilesHorizontal: A list of tuples indicating which columns and rows to place horizontal rubber
traps.
rubberTilesVertical: A list of tuples indicating which columns and rows to place vertical rubber traps.
goldTilesHorizontal: A list of tuples indicating which columns and rows to place horizontal gold sprites.
goldTilesVertical: A list of tuples indicating which columns and rows to place vertical gold sprites.
"""
def __init__(self, rubberTilesHorizontal, rubberTilesVertical, goldTilesHorizontal, goldTilesVertical):
"""Init Level using the lists of tuples rubberTilesHorizontal, rubberTilesVertical, goldTilesHorizontal,
and goldTilesVertical.
Instance variables:
image: A None type object. Subclasses replace this with a Surface object of the image to be drawn for
the current level.
standardImage: A None type object. Subclasses replace this with a Surface object of the image to be
seen in standard play of the current level.
lightImage: A None type object. Subclasses replace this with a Surface object of a lighter variant of
the image to be seen in standard play of the current level.
Designed to be used when an ItemClock object is active, or to give the illusion of the level
flashing.
backgroundColor: A tuple indicating the color of the level's background.
activeRubberTraps: An empty list. Subclasses replace this with a list of tuples indicating which
columns and rows have horizontal rubber traps that begin the game in an active state.
playerStartPositions: A list of four tuples indicating which columns and rows each player starts on.
blackHolePositions: An empty list. Subclasses replace this with a list of tuples indicating which
columns and start with a black hole sprite.
itemTiles: An empty list. Subclasses replace this with a list of tuples indicating which columns and
rows can have items spawned on them.
levelBorderRects: An empty list. Subclasses replace this with a list of rect objects that form the
boundaries of the level.
isFlashing: A boolean indicating if the level should be in a flashing animation, switching between
its standardImage and lightImage.
frameCount: An integer that increases whenever the flashBoard method is called.
"""
self.image = self.standardImage = self.lightImage = None
self.backgroundColor = c.BLACK
self.rubberTilesHorizontal = rubberTilesHorizontal
self.rubberTilesVertical = rubberTilesVertical
self.goldTilesHorizontal = goldTilesHorizontal
self.goldTilesVertical = goldTilesVertical
self.activeRubberTraps = []
self.playerStartPosition = [(0, 0), (0, 0), (0, 0), (0, 0)]
self.blackHolePositions = []
self.itemTiles = []
self.levelBorderRects = []
self.isFlashing = False
self.frameCount = 0
def initialize(self):
"""Set the relevant variables of the level to their initial values."""
self.isFlashing = False
self.image = self.standardImage
self.frameCount = 0
def flashBoard(self):
"""Switch the level's image between standardImage and flashingImage every 6 frames."""
if self.isFlashing:
self.frameCount += 1
if self.frameCount % 12 < 6:
self.image = self.standardImage
else:
self.image = self.lightImage
class BoardOneLevel(Level):
"""Create a new object of the first variant of levels.
Attributes:
rubberTilesHorizontal: A list of tuples indicating which columns and rows to place horizontal rubber
traps.
rubberTilesVertical: A list of tuples indicating which columns and rows to place vertical rubber traps.
goldTilesHorizontal: A list of tuples indicating which columns and rows to place horizontal gold sprites.
goldTilesVertical: A list of tuples indicating which columns and rows to place vertical gold sprites.
"""
def __init__(self, rubberTilesHorizontal, rubberTilesVertical, goldTilesHorizontal, goldTilesVertical):
"""Init BoardOneLevel using the lists of tuples rubberTilesHorizontal, rubberTilesVertical,
goldTilesHorizontal, and goldTilesVertical.
Instance variables:
standardImage: The image to be drawn for the level during standard gameplay.
lightImage: A lighter variant of standardImage, designed to be used when an ItemClock object is
active, or to give the illusion of the level flashing.
image: The current image to be drawn for the level.
Defaults to the standardImage.
backgroundColor: A tuple indicating the color of the level's background.
playerStartPositions: A list of four tuples indicating which columns and rows each player starts on.
blackHolePositions: A list of four tuples indicating which columns and rows each black hole sprite
starts on.
itemTiles: A list of tuples indicating which columns and rows can have items spawned on them.
This should include every tile that a player can reach, except those tiles that the players start
on.
levelBorderRects: A list of rect objects that form the boundaries of the level.
"""
super().__init__(rubberTilesHorizontal, rubberTilesVertical, goldTilesHorizontal, goldTilesVertical)
self.standardImage = getImage(c.BACKGROUND_FOLDER, "background_1A.png")
self.lightImage = getImage(c.BACKGROUND_FOLDER, "background_1B.png")
self.image = self.standardImage
self.backgroundColor = c.DARK_RED
self.playerStartPosition = [(1, 1), (9, 1), (2, 7), (8, 7)]
self.blackHolePositions = [(5, 4)]
self.itemTiles = [(x, y) for x in range(1, 10) for y in range(0, 8) if (x, y) not in self.playerStartPosition
and (x, y) not in self.blackHolePositions and (x, y) not in [(1, 0), (9, 0), (1, 7),
(9, 7)]]
self.levelBorderRects = [pygame.Rect(0, 0, 80, 84), pygame.Rect(0, 0, 512, 36), pygame.Rect(0, 0, 39, 448),
pygame.Rect(432, 0, 80, 84), pygame.Rect(477, 0, 39, 448),
pygame.Rect(0, 380, 80, 84), pygame.Rect(432, 380, 80, 84),
pygame.Rect(0, 426, 512, 36)]
class BoardTwoLevel(Level):
"""Create a new object of the second variant of levels.
Attributes:
rubberTilesHorizontal: A list of tuples indicating which columns and rows to place horizontal rubber
traps.
rubberTilesVertical: A list of tuples indicating which columns and rows to place vertical rubber traps.
goldTilesHorizontal: A list of tuples indicating which columns and rows to place horizontal gold sprites.
goldTilesVertical: A list of tuples indicating which columns and rows to place vertical gold sprites.
"""
def __init__(self, rubberTilesHorizontal, rubberTilesVertical, goldTilesHorizontal, goldTilesVertical):
"""Init BoardTwoLevel using the lists of tuples rubberTilesHorizontal, rubberTilesVertical,
goldTilesHorizontal, and goldTilesVertical.
Instance variables:
standardImage: The image to be drawn for the level during standard gameplay.
lightImage: A lighter variant of standardImage, designed to be used when an ItemClock object is
active, or to give the illusion of the level flashing.
image: The current image to be drawn for the level.
Defaults to the standardImage.
backgroundColor: A tuple indicating the color of the level's background.
playerStartPositions: A list of four tuples indicating which columns and rows each player starts on.
blackHolePositions: A list of four tuples indicating which columns and rows each black hole sprite
starts on.
itemTiles: A list of tuples indicating which columns and rows can have items spawned on them.
This should include every tile that a player can reach, except those tiles that the players start
on.
levelBorderRects: A list of rect objects that form the boundaries of the level.
"""
super().__init__(rubberTilesHorizontal, rubberTilesVertical, goldTilesHorizontal, goldTilesVertical)
self.standardImage = getImage(c.BACKGROUND_FOLDER, "background_2A.png")
self.lightImage = getImage(c.BACKGROUND_FOLDER, "background_2B.png")
self.image = self.standardImage
self.backgroundColor = c.DARK_GREEN
self.playerStartPosition = [(4, 0), (6, 0), (1, 5), (9, 5)]
self.blackHolePositions = [(2, 6), (8, 6)]
self.itemTiles = [(x, y) for x in range(1, 10) for y in range(0, 8) if (x, y) not in self.playerStartPosition
and (x, y) not in self.blackHolePositions and (x, y) not in [(1, 0), (9, 0), (1, 7),
(9, 7)]]
self.levelBorderRects = [pygame.Rect(0, 0, 80, 84), pygame.Rect(0, 0, 512, 36), pygame.Rect(0, 0, 39, 448),
pygame.Rect(432, 0, 80, 84), pygame.Rect(477, 0, 39, 448),
pygame.Rect(0, 380, 80, 84), pygame.Rect(432, 380, 80, 84),
pygame.Rect(0, 426, 512, 36)]
class BoardThreeLevel(Level):
"""Create a new object of the third variant of levels.
Attributes:
rubberTilesHorizontal: A list of tuples indicating which columns and rows to place horizontal rubber
traps.
rubberTilesVertical: A list of tuples indicating which columns and rows to place vertical rubber traps.
goldTilesHorizontal: A list of tuples indicating which columns and rows to place horizontal gold sprites.
goldTilesVertical: A list of tuples indicating which columns and rows to place vertical gold sprites.
"""
def __init__(self, rubberTilesHorizontal, rubberTilesVertical, goldTilesHorizontal, goldTilesVertical):
"""Init BoardThreeLevel using the lists of tuples rubberTilesHorizontal, rubberTilesVertical,
goldTilesHorizontal, and goldTilesVertical.
Instance variables:
standardImage: The image to be drawn for the level during standard gameplay.
lightImage: A lighter variant of standardImage, designed to be used when an ItemClock object is
active, or to give the illusion of the level flashing.
image: The current image to be drawn for the level.
Defaults to the standardImage.
backgroundColor: A tuple indicating the color of the level's background.
playerStartPositions: A list of four tuples indicating which columns and rows each player starts on.
blackHolePositions: A list of four tuples indicating which columns and rows each black hole sprite
starts on.
itemTiles: A list of tuples indicating which columns and rows can have items spawned on them.
This should include every tile that a player can reach, except those tiles that the players start
on.
levelBorderRects: A list of rect objects that form the boundaries of the level.
"""
super().__init__(rubberTilesHorizontal, rubberTilesVertical, goldTilesHorizontal, goldTilesVertical)
self.standardImage = getImage(c.BACKGROUND_FOLDER, "background_3A.png")
self.lightImage = getImage(c.BACKGROUND_FOLDER, "background_3B.png")
self.image = self.standardImage
self.backgroundColor = c.DARK_BLUE
self.playerStartPosition = [(5, 1), (5, 6), (1, 3), (9, 3)]
self.blackHolePositions = [(4, 4), (6, 4)]
self.itemTiles = [(x, y) for x in range(1, 10) for y in range(0, 8) if (x, y) not in self.playerStartPosition
and (x, y) not in self.blackHolePositions and (x, y) not in [(4, 0), (5, 0), (6, 0),
(4, 7), (5, 7), (6, 7)]]
self.levelBorderRects = [pygame.Rect(0, 0, 512, 36), pygame.Rect(0, 0, 39, 448), pygame.Rect(477, 0, 39, 448),
pygame.Rect(0, 426, 512, 36), pygame.Rect(190, 0, 134, 84),
pygame.Rect(190, 380, 134, 84)]
class BoardFourLevel(Level):
"""Create a new object of the fourth variant of levels.
Attributes:
rubberTilesHorizontal: A list of tuples indicating which columns and rows to place horizontal rubber
traps.
rubberTilesVertical: A list of tuples indicating which columns and rows to place vertical rubber traps.
goldTilesHorizontal: A list of tuples indicating which columns and rows to place horizontal gold sprites.
goldTilesVertical: A list of tuples indicating which columns and rows to place vertical gold sprites.
"""
def __init__(self, rubberTilesHorizontal, rubberTilesVertical, goldTilesHorizontal, goldTilesVertical):
"""Init BoardFourLevel using the lists of tuples rubberTilesHorizontal, rubberTilesVertical,
goldTilesHorizontal, and goldTilesVertical.
Instance variables:
standardImage: The image to be drawn for the level during standard gameplay.
lightImage: A lighter variant of standardImage, designed to be used when an ItemClock object is
active, or to give the illusion of the level flashing.
image: The current image to be drawn for the level.
Defaults to the standardImage.
backgroundColor: A tuple indicating the color of the level's background.
playerStartPositions: A list of four tuples indicating which columns and rows each player starts on.
blackHolePositions: A list of four tuples indicating which columns and rows each black hole sprite
starts on.
itemTiles: A list of tuples indicating which columns and rows can have items spawned on them.
This should include every tile that a player can reach, except those tiles that the players start
on.
levelBorderRects: A list of rect objects that form the boundaries of the level.
"""
super().__init__(rubberTilesHorizontal, rubberTilesVertical, goldTilesHorizontal, goldTilesVertical)
self.standardImage = getImage(c.BACKGROUND_FOLDER, "background_4A.png")
self.lightImage = getImage(c.BACKGROUND_FOLDER, "background_4B.png")
self.image = self.standardImage
self.backgroundColor = c.PURPLE
self.playerStartPosition = [(4, 0), (6, 0), (1, 7), (9, 7)]
self.blackHolePositions = [(2, 2), (8, 2), (4, 6), (6, 6)]
self.itemTiles = [(x, y) for x in range(0, 11) for y in range(0, 8) if (x, y) not in self.playerStartPosition
and (x, y) not in self.blackHolePositions and (x, y) not in [(5, 0), (0, 1), (5, 1), (10, 1),
(0, 2), (10, 2), (0, 3),
(10, 3), (0, 4), (10, 4),
(0, 5), (10, 5), (0, 6), (5, 6),
(10, 6), (5, 7)]]
self.levelBorderRects = [pygame.Rect(0, 0, 512, 36), pygame.Rect(238, 0, 36, 132),
pygame.Rect(238, 346, 36, 132), pygame.Rect(0, 426, 512, 36),
pygame.Rect(0, 92, 38, 280), pygame.Rect(476, 92, 38, 280)]
class BoardFiveLevel(Level):
"""Create a new object of the fifth variant of levels.
Attributes:
rubberTilesHorizontal: A list of tuples indicating which columns and rows to place horizontal rubber
traps.
rubberTilesVertical: A list of tuples indicating which columns and rows to place vertical rubber traps.
goldTilesHorizontal: A list of tuples indicating which columns and rows to place horizontal gold sprites.
goldTilesVertical: A list of tuples indicating which columns and rows to place vertical gold sprites.
"""
def __init__(self, rubberTilesHorizontal, rubberTilesVertical, goldTilesHorizontal, goldTilesVertical):
"""Init BoardFiveLevel using the lists of tuples rubberTilesHorizontal, rubberTilesVertical,
goldTilesHorizontal, and goldTilesVertical.
Instance variables:
standardImage: The image to be drawn for the level during standard gameplay.
lightImage: A lighter variant of standardImage, designed to be used when an ItemClock object is
active, or to give the illusion of the level flashing.
image: The current image to be drawn for the level.
Defaults to the standardImage.
backgroundColor: A tuple indicating the color of the level's background.
activeRubberTraps: A list of tuples indicating which columns and rows have horizontal rubber traps
which begin the game in an active state.
playerStartPositions: A list of four tuples indicating which columns and rows each player starts on.
blackHolePositions: A list of four tuples indicating which columns and rows each black hole sprite
starts on.
itemTiles: A list of tuples indicating which columns and rows can have items spawned on them.
This should include every tile that a player can reach, except those tiles that the players start
on.
levelBorderRects: A list of rect objects that form the boundaries of the level.
"""
super().__init__(rubberTilesHorizontal, rubberTilesVertical, goldTilesHorizontal, goldTilesVertical)
self.standardImage = getImage(c.BACKGROUND_FOLDER, "background_5A.png")
self.lightImage = getImage(c.BACKGROUND_FOLDER, "background_5B.png")
self.image = self.standardImage
self.backgroundColor = c.DARK_ORANGE
self.activeRubberTraps = [(1, 4), (9, 4)]
self.playerStartPosition = [(1, 0), (9, 0), (4, 7), (6, 7)]
self.blackHolePositions = [(2, 4), (4, 4), (6, 4), (8, 4)]
self.itemTiles = [(x, y) for x in range(0, 11) for y in range(0, 8) if (x, y) not in self.playerStartPosition
and (x, y) not in self.blackHolePositions and (x, y) not in [(0, 0), (5, 0), (10, 0),
(0, 7), (5, 7), (10, 7)]]
self.levelBorderRects = [pygame.Rect(0, 0, 512, 36), pygame.Rect(238, 0, 40, 84), pygame.Rect(0, 426, 512, 36),
pygame.Rect(238, 380, 40, 84), pygame.Rect(0, 0, 36, 84), pygame.Rect(478, 0, 36, 84),
pygame.Rect(0, 380, 36, 84), pygame.Rect(478, 380, 36, 84)]
class BonusLevel(Level):
"""Create a new object of the sixth, bonus variant of levels.
Note that since the bonus level layout is unique, the arguments that are usually passed to the other level
variants are instead created as constant instance variables in the bonus level __init__ method.
"""
def __init__(self):
"""Init BonusLevel.
Instance variables:
goldTilesHorizontal: A list of tuples indicating which columns and rows to place horizontal gold
sprites.
goldTilesVertical: A list of tuples indicating which columns and rows to place vertical gold sprites.
standardImage: The image to be drawn for the level during standard gameplay.
lightImage: A lighter variant of standardImage, designed to be used when an ItemClock object is
active, or to give the illusion of the level flashing.
image: The current image to be drawn for the level.
Defaults to the standardImage.
backgroundColor: A tuple indicating the color of the level's background.
playerStartPositions: A list of four tuples indicating which columns and rows each player starts on.
levelBorderRects: A list of rect objects that form the boundaries of the level.
"""
goldTilesHorizontal = [(2, 1), (3, 1), (4, 1), (5, 1), (6, 1), (7, 1), (8, 1), (2, 2), (3, 2), (4, 2), (5, 2),
(6, 2), (7, 2), (8, 2), (2, 3), (8, 3), (2, 4), (8, 4), (2, 5), (8, 5), (2, 6), (3, 6),
(4, 6), (5, 6), (6, 6), (7, 6), (8, 6), (2, 7), (3, 7), (4, 7), (5, 7), (6, 7), (7, 7),
(8, 7)]
goldTilesVertical = [(2, 1), (3, 1), (4, 1), (5, 1), (6, 1), (7, 1), (8, 1), (9, 1), (2, 2), (3, 2), (8, 2),
(9, 2), (2, 3), (3, 3), (8, 3), (9, 3), (2, 4), (3, 4), (8, 4), (9, 4), (2, 5), (3, 5),
(8, 5), (9, 5), (2, 6), (3, 6), (4, 6), (5, 6), (6, 6), (7, 6), (8, 6), (9, 6)]
super().__init__([], [], goldTilesHorizontal, goldTilesVertical)
self.standardImage = getImage(c.BACKGROUND_FOLDER, "background_6A.png")
self.lightImage = getImage(c.BACKGROUND_FOLDER, "background_6B.png")
self.image = self.standardImage
self.backgroundColor = c.DARK_RED
self.playerStartPosition = [(4, 1), (6, 1), (3, 6), (7, 6)]
self.levelBorderRects = [pygame.Rect(0, 0, 512, 36), pygame.Rect(188, 186, 136, 94),
pygame.Rect(0, 426, 512, 36), pygame.Rect(0, 0, 39, 448),
pygame.Rect(477, 0, 39, 448)]
# Create an instance of each of the 41 different level patterns. This ensures that there is exactly one copy of each
# level pattern at all times, with the gold tiles and rubber trap tiles in the proper locations.
HEART = BoardOneLevel([], [(4, 3), (7, 3)],
[(3, 1), (4, 1), (6, 1), (7, 1), (2, 2), (5, 2), (8, 2), (2, 4), (8, 4), (3, 5), (7, 5), (4, 6),
(6, 6), (5, 7)],
[(3, 1), (5, 1), (6, 1), (8, 1), (2, 2), (9, 2), (2, 3), (9, 3), (3, 4), (8, 4), (4, 5), (7, 5),
(5, 6), (6, 6)])
HOUSE = BoardOneLevel([], [(4, 5), (7, 5)],
[(4, 1), (5, 1), (6, 1), (3, 2), (7, 2), (2, 3), (8, 3), (2, 4), (3, 4), (4, 4), (5, 4), (6, 4),
(7, 4), (8, 4), (3, 7), (4, 7), (5, 7), (6, 7), (7, 7)],
[(4, 1), (7, 1), (3, 2), (8, 2), (2, 3), (9, 3), (3, 4), (8, 4), (3, 5), (8, 5), (3, 6), (8, 6)])
FACE = BoardOneLevel([(2, 4), (8, 4)], [],
[(4, 1), (6, 1), (4, 3), (6, 3), (2, 5), (8, 5), (2, 6), (3, 6), (4, 6), (5, 6), (6, 6), (7, 6),
(8, 6), (3, 7), (4, 7), (5, 7), (6, 7), (7, 7)],
[(4, 1), (5, 1), (6, 1), (7, 1), (4, 2), (5, 2), (6, 2), (7, 2), (2, 5), (3, 5), (8, 5), (9, 5),
(3, 6), (8, 6)])
HUMAN = BoardOneLevel([(5, 3)], [],
[(5, 1), (3, 2), (4, 2), (5, 2), (6, 2), (7, 2), (2, 3), (3, 3), (7, 3), (8, 3), (2, 4), (8, 4),
(5, 6), (4, 7), (6, 7)],
[(5, 1), (6, 1), (3, 2), (8, 2), (2, 3), (3, 3), (4, 3), (7, 3), (8, 3), (9, 3), (4, 4), (7, 4),
(4, 5), (7, 5), (4, 6), (5, 6), (6, 6), (7, 6)])
BUBBLES = BoardOneLevel([], [],
[(3, 1), (4, 1), (5, 1), (2, 2), (6, 2), (8, 3), (7, 4), (8, 4), (2, 5), (6, 5), (2, 6),
(3, 6), (4, 6), (5, 6), (6, 6)],
[(3, 1), (6, 1), (2, 2), (4, 2), (5, 2), (7, 2), (2, 3), (7, 3), (8, 3), (9, 3), (2, 4),
(7, 4), (3, 5), (6, 5), (4, 6), (5, 6)])
LETTER_KE = BoardOneLevel([], [(5, 6)],
[(3, 1), (7, 1), (6, 2), (8, 2), (6, 3), (8, 3), (6, 6), (3, 7), (6, 7), (7, 7)],
[(3, 1), (4, 1), (7, 1), (8, 1), (3, 2), (4, 2), (6, 2), (9, 2), (3, 3), (4, 3), (7, 3),
(8, 3), (3, 4), (4, 4), (7, 4), (8, 4), (3, 5), (4, 5), (7, 5), (8, 5), (3, 6), (4, 6),
(6, 6), (8, 6)])
TELEVISION = BoardOneLevel([], [(2, 4), (9, 4)],
[(4, 1), (6, 1), (4, 2), (5, 2), (6, 2), (3, 3), (4, 3), (5, 3), (6, 3), (7, 3), (4, 4),
(5, 4), (6, 4), (4, 6), (5, 6), (6, 6), (3, 7), (4, 7), (5, 7), (6, 7), (7, 7)],
[(5, 1), (6, 1), (4, 2), (7, 2), (3, 3), (8, 3), (3, 4), (4, 4), (7, 4), (8, 4), (3, 5),
(4, 5), (7, 5), (8, 5), (3, 6), (8, 6)])
KOOPA = BoardOneLevel([], [(7, 4)],
[(3, 1), (2, 2), (5, 2), (6, 2), (2, 3), (4, 3), (7, 3), (3, 5), (3, 6), (4, 6), (5, 6), (6, 6),
(7, 6), (3, 7), (7, 7)],
[(3, 1), (4, 1), (2, 2), (4, 2), (5, 2), (7, 2), (3, 3), (4, 3), (8, 3), (3, 4), (4, 4), (8, 4),
(3, 5), (8, 5), (4, 6), (7, 6)])
CLOWN = BoardTwoLevel([(5, 2)], [(4, 6), (7, 6)],
[(3, 2), (7, 2), (2, 3), (4, 3), (6, 3), (8, 3), (2, 4), (4, 4), (6, 4), (8, 4), (3, 5), (5, 5),
(7, 5), (5, 7)],
[(3, 2), (4, 2), (7, 2), (8, 2), (2, 3), (5, 3), (6, 3), (9, 3), (3, 4), (4, 4), (7, 4), (8, 4),
(5, 5), (6, 5), (5, 6), (6, 6)])
SPADE = BoardTwoLevel([(5, 3)], [],
[(5, 1), (4, 2), (6, 2), (3, 3), (7, 3), (5, 4), (3, 5), (4, 5), (5, 5), (6, 5), (7, 5), (4, 6),
(6, 6), (4, 7), (5, 7), (6, 7)],
[(5, 1), (6, 1), (4, 2), (7, 2), (3, 3), (8, 3), (3, 4), (5, 4), (6, 4), (8, 4), (5, 5), (6, 5),
(4, 6), (7, 6)])
MOUSE = BoardTwoLevel([], [(5, 3), (6, 3)],
[(3, 1), (7, 1), (3, 2), (4, 2), (5, 2), (6, 2), (7, 2), (3, 3), (7, 3), (3, 5), (5, 5), (7, 5),
(4, 6), (5, 6), (6, 6), (5, 7)],
[(3, 1), (4, 1), (7, 1), (8, 1), (4, 2), (7, 2), (3, 3), (8, 3), (3, 4), (8, 4), (4, 5), (5, 5),
(6, 5), (7, 5), (5, 6), (6, 6)])
EAGLE = BoardTwoLevel([(4, 4), (6, 4)], [],
[(5, 1), (6, 1), (6, 2), (2, 3), (3, 3), (4, 3), (6, 3), (7, 3), (8, 3), (2, 4), (8, 4), (3, 5),
(4, 5), (6, 5), (7, 5), (4, 6), (5, 6), (6, 6), (4, 7), (6, 7)],
[(5, 1), (7, 1), (5, 2), (6, 2), (2, 3), (9, 3), (3, 4), (8, 4), (5, 5), (6, 5), (4, 6), (5, 6),
(6, 6), (7, 6)])
RAIN = BoardTwoLevel([(5, 2)], [],
[(4, 1), (5, 1), (6, 1), (3, 2), (7, 2), (2, 3), (8, 3), (2, 4), (3, 4), (4, 4), (5, 4), (6, 4),
(7, 4), (8, 4)],
[(4, 1), (7, 1), (3, 2), (8, 2), (2, 3), (9, 3), (4, 4), (6, 4), (8, 4), (3, 5), (5, 5), (7, 5)])
CAR = BoardTwoLevel([(3, 5), (7, 5)], [],
[(4, 2), (5, 2), (6, 2), (7, 2), (3, 3), (8, 3), (2, 4), (5, 4), (7, 4), (8, 4), (2, 6), (3, 6),
(4, 6), (5, 6), (6, 6), (7, 6), (8, 6)],
[(4, 2), (7, 2), (8, 2), (3, 3), (7, 3), (9, 3), (2, 4), (5, 4), (6, 4), (9, 4), (2, 5), (5, 5),
(6, 5), (9, 5)])
MUSHROOM = BoardTwoLevel([(5, 4)], [],
[(4, 1), (5, 1), (6, 1), (3, 2), (7, 2), (2, 3), (8, 3), (2, 5), (3, 5), (4, 5), (5, 5),
(6, 5), (7, 5), (8, 5), (4, 7), (5, 7), (6, 7)],
[(4, 1), (7, 1), (3, 2), (8, 2), (2, 3), (9, 3), (2, 4), (9, 4), (4, 5), (5, 5), (6, 5),
(7, 5), (4, 6), (7, 6)])
SKULL = BoardTwoLevel([(5, 7)], [],
[(3, 1), (4, 1), (5, 1), (6, 1), (7, 1), (4, 2), (6, 2), (3, 4), (4, 4), (6, 4), (7, 4), (3, 5),
(5, 5), (7, 5), (4, 6), (5, 6), (6, 6), (3, 7), (7, 7)],
[(3, 1), (8, 1), (3, 2), (5, 2), (6, 2), (8, 2), (3, 3), (8, 3), (5, 4), (6, 4), (4, 5), (7, 5),
(4, 6), (7, 6)])
SUBMARINE = BoardThreeLevel([], [(3, 1), (8, 1)],
[(4, 3), (5, 3), (8, 3), (2, 4), (3, 4), (4, 4), (5, 4), (6, 4), (7, 4), (8, 4), (7, 5),
(8, 5), (2, 6), (3, 6), (4, 6), (5, 6), (6, 6), (8, 6)],
[(5, 2), (4, 3), (6, 3), (8, 3), (9, 3), (2, 4), (8, 4), (2, 5), (7, 5), (8, 5), (9, 5)])
GLASSES = BoardThreeLevel([(3, 2), (7, 2)], [],
[(3, 3), (4, 3), (6, 3), (7, 3), (2, 4), (5, 4), (8, 4), (3, 6), (4, 6), (6, 6), (7, 6)],
[(2, 1), (9, 1), (2, 2), (9, 2), (2, 3), (3, 3), (5, 3), (6, 3), (8, 3), (9, 3), (3, 4),
(5, 4), (6, 4), (8, 4), (3, 5), (5, 5), (6, 5), (8, 5)])
KOALA = BoardThreeLevel([(4, 3), (6, 3), (2, 6), (8, 6)], [],
[(2, 1), (8, 1), (3, 2), (4, 2), (5, 2), (6, 2), (7, 2), (2, 3), (8, 3), (3, 5), (5, 5),
(7, 5), (4, 6), (5, 6), (6, 6)],
[(2, 1), (3, 1), (8, 1), (9, 1), (2, 2), (9, 2), (3, 3), (8, 3), (3, 4), (8, 4), (4, 5),
(5, 5), (6, 5), (7, 5)])
BUTTERFLY = BoardThreeLevel([], [(5, 2), (6, 2)],
[(2, 2), (8, 2), (3, 3), (7, 3), (4, 4), (5, 4), (6, 4), (4, 5), (6, 5), (2, 6), (3, 6),
(5, 6), (7, 6), (8, 6)],
[(2, 2), (3, 2), (8, 2), (9, 2), (2, 3), (4, 3), (7, 3), (9, 3), (2, 4), (5, 4), (6, 4),
(9, 4), (2, 5), (4, 5), (5, 5), (6, 5), (7, 5), (9, 5)])
FISH = BoardThreeLevel([(2, 1), (8, 1), (7, 5)], [],
[(2, 2), (6, 2), (7, 2), (8, 2), (3, 3), (4, 4), (5, 4), (4, 5), (5, 5), (3, 6), (6, 6), (7, 6),
(8, 6), (2, 7)],
[(2, 2), (3, 2), (6, 2), (9, 2), (2, 3), (4, 3), (6, 3), (8, 3), (9, 3), (2, 4), (5, 4), (9, 4),
(2, 5), (4, 5), (6, 5), (9, 5), (2, 6), (3, 6)])
CLU_CLU = BoardThreeLevel([], [(8, 3), (3, 4)],
[(2, 1), (8, 1), (8, 2), (2, 3), (4, 3), (6, 3), (4, 4), (2, 6), (4, 6), (6, 6), (8, 6),
(2, 7)],
[(2, 1), (8, 1), (9, 1), (2, 2), (4, 2), (6, 2), (7, 2), (4, 4), (4, 5), (6, 5), (8, 5),
(9, 5), (2, 6), (3, 6)])
CROWN = BoardThreeLevel([(2, 7), (8, 7)], [(4, 2), (7, 2)],
[(2, 1), (8, 1), (2, 2), (5, 2), (8, 2), (5, 3), (3, 4), (4, 4), (6, 4), (7, 4), (2, 6),
(3, 6), (4, 6), (5, 6), (6, 6), (7, 6), (8, 6)],
[(2, 1), (3, 1), (8, 1), (9, 1), (2, 2), (3, 2), (5, 2), (6, 2), (8, 2), (9, 2), (2, 3),
(3, 3), (5, 3), (6, 3), (8, 3), (9, 3), (2, 4), (9, 4), (2, 5), (9, 5)])
SWORD_SHIELD = BoardThreeLevel([(7, 4), (2, 7), (8, 7)], [(2, 1)],
[(3, 2), (6, 3), (7, 3), (8, 3), (2, 5), (3, 5), (4, 5), (6, 5), (8, 5), (3, 6),
(7, 6)],
[(3, 2), (4, 2), (3, 3), (4, 3), (6, 3), (9, 3), (3, 4), (4, 4), (6, 4), (9, 4), (3, 5),
(4, 5), (7, 5), (8, 5)])
HOLE = BoardFourLevel([(3, 3), (7, 3)], [(4, 4), (7, 4)],
[(3, 1), (7, 1), (3, 2), (7, 2), (5, 3), (2, 4), (5, 4), (8, 4), (2, 5), (5, 5), (8, 5), (3, 6),
(7, 6), (3, 7), (7, 7)],
[(3, 1), (4, 1), (7, 1), (8, 1), (5, 3), (6, 3), (2, 4), (3, 4), (5, 4), (6, 4), (8, 4), (9, 4),
(3, 6), (4, 6), (7, 6), (8, 6)])
KEY = BoardFourLevel([(2, 4), (4, 4), (6, 4), (8, 4)], [],
[(2, 1), (8, 1), (3, 2), (7, 2), (2, 3), (3, 3), (7, 3), (8, 3), (2, 5), (3, 5), (7, 5), (8, 5),
(2, 6), (8, 6), (3, 7), (7, 7)],
[(2, 1), (3, 1), (8, 1), (9, 1), (2, 2), (4, 2), (7, 2), (9, 2), (2, 5), (4, 5), (7, 5), (9, 5),
(3, 6), (4, 6), (7, 6), (8, 6)])
RIBBON = BoardFourLevel([(2, 4), (5, 4), (8, 4)], [],
[(2, 2), (3, 2), (7, 2), (8, 2), (2, 3), (3, 3), (4, 3), (5, 3), (6, 3), (7, 3), (8, 3),
(2, 5), (3, 5), (4, 5), (5, 5), (6, 5), (7, 5), (8, 5), (2, 6), (3, 6), (7, 6), (8, 6)],
[(2, 2), (4, 2), (7, 2), (9, 2), (4, 3), (7, 3), (4, 4), (7, 4), (2, 5), (4, 5), (7, 5),
(9, 5)])
LETTER_H = BoardFourLevel([(4, 4), (6, 4)], [(3, 3), (8, 3), (3, 5), (8, 5)],
[(2, 1), (3, 1), (7, 1), (8, 1), (4, 3), (5, 3), (6, 3), (4, 5), (5, 5), (6, 5), (2, 7),
(3, 7), (7, 7), (8, 7)],
[(2, 1), (4, 1), (7, 1), (9, 1), (2, 2), (4, 2), (7, 2), (9, 2), (2, 3), (9, 3), (2, 4),
(9, 4), (2, 5), (4, 5), (7, 5), (9, 5), (2, 6), (4, 6), (7, 6), (9, 6)])
PUNCTUATION = BoardFourLevel([], [(6, 3), (5, 4), (2, 6), (9, 6)],
[(3, 1), (7, 1), (8, 1), (7, 2), (7, 3), (8, 4), (3, 5), (7, 5), (3, 6), (7, 6), (3, 7),
(7, 7)],
[(3, 1), (4, 1), (7, 1), (9, 1), (3, 2), (4, 2), (8, 2), (9, 2), (3, 3), (4, 3), (7, 3),
(9, 3), (3, 4), (4, 4), (7, 4), (8, 4), (3, 6), (4, 6), (7, 6), (8, 6)])
FROWN = BoardFourLevel([], [(2, 3), (9, 3)],
[(3, 1), (7, 1), (3, 3), (7, 3), (3, 4), (4, 4), (5, 4), (6, 4), (7, 4), (4, 5), (5, 5), (6, 5),
(3, 7), (7, 7)],
[(3, 1), (4, 1), (7, 1), (8, 1), (3, 2), (4, 2), (7, 2), (8, 2), (3, 4), (8, 4), (3, 5), (4, 5),
(7, 5), (8, 5), (3, 6), (4, 6), (7, 6), (8, 6)])
PYTHON = BoardFourLevel([(2, 1), (8, 7)], [],
[(8, 1), (7, 2), (3, 3), (4, 3), (5, 3), (6, 3), (2, 4), (4, 4), (5, 4), (6, 4), (8, 4),
(4, 5), (5, 5), (6, 5), (7, 5), (3, 6), (2, 7)],
[(8, 1), (9, 1), (7, 2), (8, 2), (9, 2), (3, 3), (7, 3), (9, 3), (2, 4), (4, 4), (8, 4),
(2, 5), (3, 5), (4, 5), (2, 6), (3, 6)])
FLIP = BoardFourLevel([(7, 2), (5, 3), (3, 6)], [],
[(2, 1), (2, 2), (3, 2), (7, 3), (5, 4), (3, 5), (5, 5), (7, 6), (8, 6), (8, 7)],
[(2, 1), (3, 1), (9, 1), (3, 2), (4, 2), (9, 2), (3, 3), (4, 3), (7, 3), (8, 3), (3, 4), (4, 4),
(5, 4), (6, 4), (7, 4), (8, 4), (2, 5), (7, 5), (8, 5), (2, 6), (8, 6), (9, 6)])
SPIDER = BoardFiveLevel([(3, 4), (7, 4)], [(1, 2), (4, 2), (7, 2), (4, 5), (7, 5), (10, 5)],
[(2, 1), (8, 1), (5, 2), (2, 3), (8, 3), (2, 5), (8, 5), (5, 6), (2, 7), (8, 7)],
[(3, 1), (8, 1), (2, 2), (9, 2), (5, 3), (6, 3), (5, 4), (6, 4), (2, 5), (9, 5), (3, 6),
(8, 6)])
LETTER_X = BoardFiveLevel([(5, 3), (5, 5)], [(3, 1), (8, 1), (10, 2), (1, 5), (3, 6), (8, 6)],
[(4, 2), (5, 2), (6, 2), (2, 3), (8, 3), (5, 4), (2, 5), (8, 5), (4, 6), (5, 6), (6, 6)],
[(4, 1), (7, 1), (2, 2), (9, 2), (3, 3), (8, 3), (3, 4), (8, 4), (2, 5), (9, 5), (4, 6),
(7, 6)])
BOX = BoardFiveLevel([(5, 3), (3, 4), (7, 4), (5, 5)], [(3, 2), (8, 2), (3, 5), (8, 5)],
[(2, 1), (3, 1), (7, 1), (8, 1), (4, 2), (6, 2), (4, 6), (6, 6), (2, 7), (3, 7), (7, 7), (8, 7)],
[(1, 2), (2, 2), (4, 2), (7, 2), (9, 2), (10, 2), (1, 5), (2, 5), (4, 5), (7, 5), (9, 5),
(10, 5)])
DIAMOND = BoardFiveLevel([(3, 4), (5, 4), (7, 4)], [(4, 1), (7, 1), (4, 6), (7, 6)],
[(2, 2), (5, 2), (8, 2), (1, 3), (3, 3), (5, 3), (7, 3), (9, 3), (1, 5), (3, 5), (5, 5),
(7, 5), (9, 5), (2, 6), (5, 6), (8, 6)],
[(3, 1), (8, 1), (2, 2), (9, 2), (2, 5), (9, 5), (3, 6), (8, 6)])
INVERTED_DIAMOND = BoardFiveLevel([(3, 6), (7, 6)], [(3, 2), (8, 2)],
[(2, 1), (8, 1), (1, 2), (9, 2), (5, 3), (3, 5), (5, 5), (7, 5), (1, 6), (9, 6),
(2, 7), (8, 7)],
[(2, 1), (9, 1), (1, 2), (10, 2), (3, 4), (8, 4), (1, 5), (10, 5), (2, 6), (9, 6)])
BOX_PLUS = BoardFiveLevel([], [(2, 2), (9, 2), (4, 5), (7, 5)],
[(2, 1), (8, 1), (5, 2), (5, 3), (5, 5), (5, 6), (3, 7), (7, 7)],
[(1, 2), (3, 2), (5, 2), (6, 2), (8, 2), (10, 2), (5, 3), (6, 3), (5, 4), (6, 4), (1, 5),
(2, 5), (3, 5), (5, 5), (6, 5), (8, 5), (9, 5), (10, 5)])
CRUSHER = BoardFiveLevel([(4, 2), (6, 6)], [],
[(2, 1), (8, 1), (1, 3), (9, 3), (5, 4), (1, 5), (9, 5), (2, 7), (8, 7)],
[(2, 1), (3, 1), (8, 1), (9, 1), (2, 2), (3, 2), (8, 2), (9, 2), (3, 3), (8, 3), (3, 4),
(8, 4), (2, 5), (3, 5), (8, 5), (9, 5), (2, 6), (3, 6), (8, 6), (9, 6)])
KEY_PLUS = BoardFiveLevel([(3, 4), (5, 4), (7, 4)], [],
[(1, 2), (5, 2), (9, 2), (1, 3), (2, 3), (4, 3), (6, 3), (8, 3), (9, 3),
(1, 5), (2, 5), (4, 5), (6, 5), (8, 5), (9, 5), (1, 6), (5, 6), (9, 6)],
[(2, 1), (3, 1), (8, 1), (9, 1), (3, 2), (5, 2), (6, 2), (8, 2),
(3, 5), (5, 5), (6, 5), (8, 5), (2, 6), (3, 6), (8, 6), (9, 6)])
BONUS_LEVEL = BonusLevel()
boardOneLevels = [HEART, HOUSE, FACE, HUMAN, BUBBLES, LETTER_KE, TELEVISION, KOOPA]
boardTwoLevels = [CLOWN, SPADE, MOUSE, EAGLE, RAIN, CAR, MUSHROOM, SKULL]
boardThreeLevels = [SUBMARINE, GLASSES, KOALA, BUTTERFLY, FISH, CLU_CLU, CROWN, SWORD_SHIELD]
boardFourLevels = [HOLE, KEY, RIBBON, LETTER_H, PUNCTUATION, FROWN, PYTHON, FLIP]
boardFiveLevels = [SPIDER, LETTER_X, BOX, DIAMOND, INVERTED_DIAMOND, BOX_PLUS, CRUSHER, KEY_PLUS]
listOfAllBoardsPastOne = [boardTwoLevels, boardThreeLevels, boardFourLevels, boardFiveLevels]
def getLevelOrder():
"""Get a random order of the 21 levels to be played, including one of the boardOneLevels, one of the bonus
levels, and four of each other variant of levels.
Note that though the order of when each specific level instance is played is randomized, the order will
always follow the following pattern, repeated endlessly:
boardOneLevel, boardTwoLevel, boardThreeLevel, boardFourLevel, boardFiveLevel, BonusLevel
boardTwoLevel, boardThreeLevel, boardFourLevel, boardFiveLevel, BonusLevel
boardTwoLevel, boardThreeLevel, boardFourLevel, boardFiveLevel, BonusLevel
boardTwoLevel, boardThreeLevel, boardFourLevel, boardFiveLevel, BonusLevel
Returns:
newLevelOrder: A list of Level objects in the order to be played.
"""
random.shuffle(boardOneLevels)
newLevelOrder = [boardOneLevels[0]]
for boardList in listOfAllBoardsPastOne:
random.shuffle(boardList)
for num in range(4):
for boardList in listOfAllBoardsPastOne:
newLevelOrder.append(boardList[num])
newLevelOrder.append(BONUS_LEVEL)
return newLevelOrder
| 69.089655
| 119
| 0.474147
| 5,741
| 40,072
| 3.291413
| 0.056262
| 0.007621
| 0.020375
| 0.069644
| 0.7817
| 0.757462
| 0.748254
| 0.712214
| 0.67935
| 0.642252
| 0
| 0.125959
| 0.336095
| 40,072
| 579
| 120
| 69.208981
| 0.584311
| 0.318751
| 0
| 0.155689
| 0
| 0
| 0.00784
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.02994
| false
| 0
| 0.011976
| 0
| 0.065868
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
c7b3f98bfbcc3eb8db3d0f88535ff41566e41fcd
| 185
|
py
|
Python
|
budgetbuddy/stocks/apps.py
|
michaelqknguyen/Budget-Buddy
|
d1d25648d29f9b398b399e63b187b54daf3be521
|
[
"MIT"
] | null | null | null |
budgetbuddy/stocks/apps.py
|
michaelqknguyen/Budget-Buddy
|
d1d25648d29f9b398b399e63b187b54daf3be521
|
[
"MIT"
] | null | null | null |
budgetbuddy/stocks/apps.py
|
michaelqknguyen/Budget-Buddy
|
d1d25648d29f9b398b399e63b187b54daf3be521
|
[
"MIT"
] | null | null | null |
from django.apps import AppConfig
from django.utils.translation import gettext_lazy as _
class StocksConfig(AppConfig):
name = 'budgetbuddy.stocks'
verbose_name = _('Stocks')
| 23.125
| 54
| 0.767568
| 22
| 185
| 6.272727
| 0.727273
| 0.144928
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.151351
| 185
| 7
| 55
| 26.428571
| 0.878981
| 0
| 0
| 0
| 0
| 0
| 0.12973
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.4
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
c7bbd120d8b2d28d027984c87205f445484d5dfd
| 759
|
py
|
Python
|
stage/factory/base.py
|
daveshed/linearstage
|
79d34d02482cd6c34102d07f29f6d2c7b7088c08
|
[
"MIT"
] | null | null | null |
stage/factory/base.py
|
daveshed/linearstage
|
79d34d02482cd6c34102d07f29f6d2c7b7088c08
|
[
"MIT"
] | null | null | null |
stage/factory/base.py
|
daveshed/linearstage
|
79d34d02482cd6c34102d07f29f6d2c7b7088c08
|
[
"MIT"
] | null | null | null |
#pylint: disable=missing-docstring
#pyling: enable=missing-docstring
import abc
class StageFactoryBase(abc.ABC):
"""
The factory base class that provides necessary objects required to
instantiate a Stage object.
"""
@abc.abstractproperty
def minimum_position(self):
"""The stage minimum position"""
return
@abc.abstractproperty
def maximum_position(self):
"""The stage maximum position"""
return
@abc.abstractproperty
def motor(self):
"""The motor to be used by the stage"""
return
@abc.abstractproperty
def end_stop(self):
"""
End stop object that is triggered when the stage reaches the end of
its travel
"""
return
| 23
| 75
| 0.635046
| 86
| 759
| 5.569767
| 0.5
| 0.158664
| 0.183716
| 0.175365
| 0.150313
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.28195
| 759
| 32
| 76
| 23.71875
| 0.878899
| 0.43083
| 0
| 0.571429
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.285714
| false
| 0
| 0.071429
| 0
| 0.714286
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 4
|
c7e9561ef48f0b334718cd86a746e2aeb7d60039
| 532
|
py
|
Python
|
wagtail_editor_extensions/utils/feature.py
|
mattdood/wagtail-editor-extensions
|
ecd60ffa6adaa77470f8b1081f4a6a2d994e7f7c
|
[
"MIT"
] | null | null | null |
wagtail_editor_extensions/utils/feature.py
|
mattdood/wagtail-editor-extensions
|
ecd60ffa6adaa77470f8b1081f4a6a2d994e7f7c
|
[
"MIT"
] | null | null | null |
wagtail_editor_extensions/utils/feature.py
|
mattdood/wagtail-editor-extensions
|
ecd60ffa6adaa77470f8b1081f4a6a2d994e7f7c
|
[
"MIT"
] | null | null | null |
from wagtail_editor_extensions.conf import get_setting
def get_feature_choices(feature_setting):
return tuple(get_setting(feature_setting).items())
def get_feature_name(feature_name, name):
feature = '%s_%s' % (feature_name, name)
return feature
def get_feature_name_upper(feature_name, name):
return get_feature_name(feature_name, name).upper()
def get_feature_name_list(feature_setting, feature_name):
return [get_feature_name_upper(feature_name, name) for name in get_setting(feature_setting).keys()]
| 28
| 103
| 0.791353
| 77
| 532
| 5.064935
| 0.272727
| 0.310256
| 0.179487
| 0.130769
| 0.364103
| 0.323077
| 0.174359
| 0
| 0
| 0
| 0
| 0
| 0.118421
| 532
| 18
| 104
| 29.555556
| 0.831557
| 0
| 0
| 0
| 0
| 0
| 0.009399
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.4
| false
| 0
| 0.1
| 0.3
| 0.9
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 4
|
c7f524dc0b5952d8ed6eeb0a6f5f053697b986c6
| 3,507
|
py
|
Python
|
COLORFUL-Python/colorful-server/run_examples.py
|
FFFFOX/COLORFUL
|
3a6fab6184f94b0a3a5b3e56a649cca076ddfc91
|
[
"MIT"
] | 77
|
2021-12-09T03:14:31.000Z
|
2022-03-06T06:06:38.000Z
|
COLORFUL-Python/colorful-server/run_examples.py
|
FFFFOX/COLORFUL
|
3a6fab6184f94b0a3a5b3e56a649cca076ddfc91
|
[
"MIT"
] | 1
|
2021-12-09T03:22:23.000Z
|
2021-12-09T03:22:23.000Z
|
COLORFUL-Python/colorful-server/run_examples.py
|
FFFFOX/COLORFUL
|
3a6fab6184f94b0a3a5b3e56a649cca076ddfc91
|
[
"MIT"
] | 9
|
2021-12-09T07:02:41.000Z
|
2021-12-17T07:51:04.000Z
|
from recolor import Core
def main():
# Simulating Protanopia with diagnosed degree of 0.9 and saving the image to file.
Core.simulate(input_path='Examples_Check/ex_original.jpg',
return_type='save',
save_path='Examples_Check/ex_simulate_protanopia.png',
simulate_type='protanopia',
simulate_degree_primary=0.9)
# Simulating deuteranopia with diagnosed degree of 0.9 and saving the image to file.
Core.simulate(input_path='Examples_Check/ex_original.jpg',
return_type='save',
save_path='Examples_Check/ex_simulate_deuteranopia.png',
simulate_type='deuteranopia',
simulate_degree_primary=0.9)
# Simulating Tritanopia with diagnosed degree of 0.9 and saving the image to file.
Core.simulate(input_path='Examples_Check/ex_original.jpg',
return_type='save',
save_path='Examples_Check/ex_simulate_tritanopia.png',
simulate_type='tritanopia',
simulate_degree_primary=0.9)
# Simulating Hybrid (Protanomaly + Deutranomaly) with diagnosed degree of 0.9 and 1.0 and saving the image to file.
Core.simulate(input_path='Examples_Check/ex_original.jpg',
return_type='save',
save_path='Examples_Check/ex_simulate_hybrid.png',
simulate_type='hybrid',
simulate_degree_primary=0.5,
simulate_degree_sec=0.5)
# Correcting Image for Protanopia with diagnosed degree of 1.0 and saving the image to file.
Core.correct(input_path='Examples_Check/ex_original.jpg',
return_type='save',
save_path='Examples_Check/ex_corrected_protanopia.png',
protanopia_degree=0.9,
deuteranopia_degree=0.0)
# Also simulate the corrected image to see difference.
Core.simulate(input_path='Examples_Check/ex_corrected_protanopia.png',
return_type='save',
save_path='Examples_Check/ex_simulate_corrected_protanopia.png',
simulate_type='protanopia',
simulate_degree_primary=0.9)
# Correcting Image for deuteranopia with diagnosed degree of 1.0 and saving the image to file.
Core.correct(input_path='Examples_Check/ex_original.jpg',
return_type='save',
save_path='Examples_Check/ex_corrected_deuteranopia.png',
protanopia_degree=0.0,
deuteranopia_degree=1.0)
# Also simulate the corrected image to see difference.
Core.simulate(input_path='Examples_Check/ex_corrected_deuteranopia.png',
return_type='save',
save_path='Examples_Check/ex_simulate_corrected_deuteranopia.png',
simulate_type='deuteranopia',
simulate_degree_primary=0.9)
# Correcting Image for Hybrid with diagnosed degree of 1.0 for both protanopia and
# deuteranopia and saving the image to file.
Core.correct(input_path='Examples_Check/ex_original.jpg',
return_type='save',
save_path='Examples_Check/ex_corrected_hybrid.png',
protanopia_degree=0.5,
deuteranopia_degree=0.5)
# You can also use different return types and get numpy array or PIL.Image for further processing.
# See recolor.py
return
if __name__ == '__main__':
main()
| 45.545455
| 119
| 0.646421
| 422
| 3,507
| 5.111374
| 0.151659
| 0.100139
| 0.141864
| 0.158554
| 0.782568
| 0.76217
| 0.726936
| 0.691701
| 0.675012
| 0.674084
| 0
| 0.016555
| 0.27659
| 3,507
| 76
| 120
| 46.144737
| 0.833662
| 0.252067
| 0
| 0.490196
| 0
| 0
| 0.302682
| 0.262835
| 0
| 0
| 0
| 0
| 0
| 1
| 0.019608
| true
| 0
| 0.019608
| 0
| 0.058824
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
1bec560cd25b3932b83ccc67b2a7c9975b2c145c
| 4,371
|
py
|
Python
|
imf/tests/test_imf.py
|
segasai/imf
|
9a33b9e68b0af677dab86511e343d6099d9ea530
|
[
"MIT",
"BSD-3-Clause"
] | null | null | null |
imf/tests/test_imf.py
|
segasai/imf
|
9a33b9e68b0af677dab86511e343d6099d9ea530
|
[
"MIT",
"BSD-3-Clause"
] | null | null | null |
imf/tests/test_imf.py
|
segasai/imf
|
9a33b9e68b0af677dab86511e343d6099d9ea530
|
[
"MIT",
"BSD-3-Clause"
] | null | null | null |
import pytest
import numpy as np
import itertools
from .. import imf
from ..imf import kroupa, chabrier2005
@pytest.mark.parametrize(('inp', 'out', 'rtol', 'atol'),
[(0.05, 5.6159, 1e-3, 1e-3),
(1.5, 0.0359, 1e-4, 1e-4),
(1.0, 0.0914, 1e-4, 1e-4),
(3.0, 0.0073, 1e-4, 1e-4),
(1, 0.0914, 1e-4, 1e-4),
(3, 0.0073, 1e-4, 1e-4)])
def test_kroupa_val(inp, out, rtol, atol):
kroupa = imf.Kroupa()
np.testing.assert_allclose(kroupa(inp), out, rtol=rtol, atol=atol)
np.testing.assert_allclose(imf.kroupa(inp), out, rtol=rtol, atol=atol)
@pytest.mark.parametrize('massfunc', imf.massfunctions.keys())
def test_mmax(massfunc):
"""
Regression test for issue #4
"""
if (not hasattr(imf.get_massfunc(massfunc), 'mmin')):
pytest.skip("{0} doesn't have mmin defined".format(massfunc))
c = imf.make_cluster(10000, mmax=1, mmin=0.01, massfunc=massfunc)
assert c.max() <= 1
@pytest.mark.parametrize(('mlow', 'mhigh'),
itertools.product((0.01, 0.08, 0.1, 0.5, 1.0, 0.03),
(0.02, 0.08, 0.4, 0.5, 1.0, 120)))
def test_kroupa_integral(mlow, mhigh):
if mlow >= mhigh:
pytest.skip("mmin >= mmax")
num = kroupa.integrate(mlow, mhigh, numerical=True)[0]
anl = kroupa.integrate(mlow, mhigh, numerical=False)[0]
np.testing.assert_almost_equal(num, anl)
if num != 0:
assert anl != 0
@pytest.mark.parametrize(('mlow', 'mhigh'),
itertools.product((0.01, 0.08, 0.1, 0.5, 1.0, 0.03),
(0.02, 0.08, 0.4, 0.5, 1.0, 120)))
def test_kroupa_mintegral(mlow, mhigh):
if mlow >= mhigh:
pytest.skip("mmin >= mmax")
num = kroupa.m_integrate(mlow, mhigh, numerical=True)[0]
anl = kroupa.m_integrate(mlow, mhigh, numerical=False)[0]
print("{0} {1} {2:0.3f} {3:0.3f}".format(mlow, mhigh, num, anl))
np.testing.assert_almost_equal(num, anl)
if num != 0:
assert anl != 0
@pytest.mark.parametrize(('mlow', 'mhigh'),
itertools.product((0.033, 0.01, 0.08, 0.1, 0.5, 1.0, 0.03),
(0.02, 0.05, 0.08, 0.4, 0.5, 1.0, 120)))
def test_chabrier_integral(mlow, mhigh):
if mlow >= mhigh:
pytest.skip("mmin >= mmax")
num = chabrier2005.integrate(mlow, mhigh, numerical=True)[0]
anl = chabrier2005.integrate(mlow, mhigh, numerical=False)[0]
print("{0} {1} {2:0.3f} {3:0.3f}".format(mlow, mhigh, num, anl))
np.testing.assert_almost_equal(num, anl)
# for mlow in (0.01, 0.08, 0.1, 0.5, 1.0):
# for mhigh in (0.02, 0.08, 0.4, 0.5, 1.0):
# try:
# num = chabrier2005.m_integrate(mlow, mhigh, numerical=True)[0]
# anl = chabrier2005.m_integrate(mlow, mhigh, numerical=False)[0]
# except ValueError:
# continue
# print("{0} {1} {2:0.3f} {3:0.3f}".format(mlow, mhigh, num, anl))
# np.testing.assert_almost_equal(num, anl)
def test_make_cluster():
cluster = imf.make_cluster(1000)
assert np.abs(sum(cluster) - 1000 < 100)
def test_kroupa_inverses():
assert np.abs(imf.inverse_imf(0, massfunc=imf.Kroupa(), mmin=0.01) - 0.01) < 2e-3
assert np.abs(imf.inverse_imf(0, massfunc=imf.Kroupa(mmin=0.01)) - 0.01) < 2e-3
assert np.abs(imf.inverse_imf(1, massfunc=imf.Kroupa(), mmax=200) - 200) < 1
assert np.abs(imf.inverse_imf(1, massfunc=imf.Kroupa(mmax=200)) - 200) < 1
@pytest.mark.parametrize(('inp', 'out', 'rtol', 'atol'),
[(0.05, 5.6159, 1e-3, 1e-3),
(1.5, 0.0359, 1e-4, 1e-4),
(1.0, 0.0914, 1e-4, 1e-4),
(3.0, 0.0073, 1e-4, 1e-4),
(1, 0.0914, 1e-4, 1e-4),
(3, 0.0073, 1e-4, 1e-4)])
def test_kroupa_val_unchanged(inp, out, rtol, atol):
# regression: make sure that imf.kroupa = imf.Kroupa
kroupa = imf.Kroupa()
np.testing.assert_allclose(kroupa(inp), out, rtol=rtol, atol=atol)
np.testing.assert_allclose(imf.kroupa(inp), out, rtol=rtol, atol=atol)
np.testing.assert_allclose(kroupa(inp), imf.kroupa(inp))
| 38.342105
| 85
| 0.548616
| 653
| 4,371
| 3.611026
| 0.147014
| 0.025445
| 0.021204
| 0.025445
| 0.757422
| 0.752332
| 0.729432
| 0.714589
| 0.639101
| 0.639101
| 0
| 0.115336
| 0.277968
| 4,371
| 113
| 86
| 38.681416
| 0.631812
| 0.114619
| 0
| 0.56
| 0
| 0
| 0.047334
| 0
| 0
| 0
| 0
| 0
| 0.213333
| 1
| 0.106667
| false
| 0
| 0.066667
| 0
| 0.173333
| 0.026667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
1bf80f3b3752e5b287d0b8e6c0b588d768bd9936
| 391
|
py
|
Python
|
RawArchiver/TimedTriggers/TriggerManage.py
|
fake-name/ReadableWebProxy
|
ed5c7abe38706acc2684a1e6cd80242a03c5f010
|
[
"BSD-3-Clause"
] | 193
|
2016-08-02T22:04:35.000Z
|
2022-03-09T20:45:41.000Z
|
RawArchiver/TimedTriggers/TriggerManage.py
|
fake-name/ReadableWebProxy
|
ed5c7abe38706acc2684a1e6cd80242a03c5f010
|
[
"BSD-3-Clause"
] | 533
|
2016-08-23T20:48:23.000Z
|
2022-03-28T15:55:13.000Z
|
RawArchiver/TimedTriggers/TriggerManage.py
|
rrosajp/ReadableWebProxy
|
ed5c7abe38706acc2684a1e6cd80242a03c5f010
|
[
"BSD-3-Clause"
] | 19
|
2015-08-13T18:01:08.000Z
|
2021-07-12T17:13:09.000Z
|
import logging
import abc
import datetime
import traceback
import urllib.parse
import sqlalchemy.exc
import common.database as db
# import RawArchiver.TimedTriggers.RawRollingRewalkTrigger
# def exposed_raw_rewalk_old():
# '''
# Trigger the rewalking system on the rawarchiver
# '''
# run = RawArchiver.TimedTriggers.RawRollingRewalkTrigger.RollingRawRewalkTrigger()
# run.go()
| 17
| 84
| 0.787724
| 42
| 391
| 7.261905
| 0.690476
| 0.157377
| 0.308197
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.13555
| 391
| 22
| 85
| 17.772727
| 0.902367
| 0.608696
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
400e8fd638e662ffd2e604a84ee3770ee327d2d0
| 142
|
py
|
Python
|
run.py
|
arryaaas/Met-Num
|
4975efdd98b7d69dd3684eb2eb6a138134b51ce3
|
[
"MIT"
] | null | null | null |
run.py
|
arryaaas/Met-Num
|
4975efdd98b7d69dd3684eb2eb6a138134b51ce3
|
[
"MIT"
] | null | null | null |
run.py
|
arryaaas/Met-Num
|
4975efdd98b7d69dd3684eb2eb6a138134b51ce3
|
[
"MIT"
] | null | null | null |
from app import app
import os
if __name__ == "__main__":
port = int(os.environ.get("PORT", 5000))
app.run(debug=True, port=port)
| 23.666667
| 45
| 0.647887
| 22
| 142
| 3.818182
| 0.681818
| 0.214286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.035398
| 0.204225
| 142
| 6
| 46
| 23.666667
| 0.707965
| 0
| 0
| 0
| 0
| 0
| 0.086957
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.4
| 0
| 0.4
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
4038b1b4ae008c4e4a97fb17709b6104f7b17487
| 204
|
py
|
Python
|
mellon/path.py
|
LaudateCorpus1/mellon
|
a7a9f6d8abf1dd03b63a94ddb4439c6cc6c2e272
|
[
"MIT"
] | 5
|
2016-12-20T19:39:01.000Z
|
2021-01-08T16:19:17.000Z
|
mellon/path.py
|
CrowdStrike/mellon
|
7216f255d397a41b1c2777a1b02f1c085d07ddfe
|
[
"MIT"
] | 1
|
2018-03-21T17:05:13.000Z
|
2018-03-21T17:05:13.000Z
|
mellon/path.py
|
LaudateCorpus1/mellon
|
a7a9f6d8abf1dd03b63a94ddb4439c6cc6c2e272
|
[
"MIT"
] | 2
|
2017-11-01T15:03:27.000Z
|
2018-11-13T03:04:44.000Z
|
from zope.component.factory import Factory
from zope import interface
from . import IPath
@interface.implementer(IPath)
class FilesystemPath(str):
pass
filesystemPathFactory = Factory(FilesystemPath)
| 25.5
| 47
| 0.823529
| 23
| 204
| 7.304348
| 0.565217
| 0.095238
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.112745
| 204
| 8
| 47
| 25.5
| 0.928177
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.142857
| 0.428571
| 0
| 0.571429
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
|
0
| 4
|
403e5248774c44cfc57d17b7d058b3105c020f24
| 203
|
py
|
Python
|
overholt/users/__init__.py
|
prdonahue/overholt
|
e03209f7a059d165a9154355d090738af3159028
|
[
"MIT"
] | 1,152
|
2015-01-04T16:30:17.000Z
|
2022-03-27T19:50:52.000Z
|
overholt/users/__init__.py
|
prdonahue/overholt
|
e03209f7a059d165a9154355d090738af3159028
|
[
"MIT"
] | 25
|
2020-07-06T08:49:08.000Z
|
2021-07-27T06:15:43.000Z
|
overholt/users/__init__.py
|
prdonahue/overholt
|
e03209f7a059d165a9154355d090738af3159028
|
[
"MIT"
] | 218
|
2015-01-06T20:41:52.000Z
|
2022-03-25T19:07:53.000Z
|
# -*- coding: utf-8 -*-
"""
overholt.users
~~~~~~~~~~~~~~
overholt users package
"""
from ..core import Service
from .models import User
class UsersService(Service):
__model__ = User
| 13.533333
| 28
| 0.596059
| 21
| 203
| 5.571429
| 0.714286
| 0.222222
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.006329
| 0.221675
| 203
| 14
| 29
| 14.5
| 0.734177
| 0.374384
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.5
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
4087934eb49184e801ab0929fdb930bd0267d5b1
| 902
|
py
|
Python
|
python/src/aoc/year2017/day1.py
|
ocirne/adventofcode
|
ea9b5f1b48a04284521e85c96b420ed54adf55f0
|
[
"Unlicense"
] | 1
|
2021-02-16T21:30:04.000Z
|
2021-02-16T21:30:04.000Z
|
python/src/aoc/year2017/day1.py
|
ocirne/adventofcode
|
ea9b5f1b48a04284521e85c96b420ed54adf55f0
|
[
"Unlicense"
] | null | null | null |
python/src/aoc/year2017/day1.py
|
ocirne/adventofcode
|
ea9b5f1b48a04284521e85c96b420ed54adf55f0
|
[
"Unlicense"
] | null | null | null |
from aoc.util import load_input
def solve_captcha(line):
"""
>>> solve_captcha('1122')
3
>>> solve_captcha('1111')
4
>>> solve_captcha('1234')
0
>>> solve_captcha('91212129')
9
"""
return sum(int(c) for i, c in enumerate(line) if line[i - 1] == c)
def part1(lines):
return solve_captcha(lines[0].strip())
def solve_new_captcha(line):
"""
>>> solve_new_captcha('1212')
6
>>> solve_new_captcha('1221')
0
>>> solve_new_captcha('123425')
4
>>> solve_new_captcha('123123')
12
>>> solve_new_captcha('12131415')
4
"""
return sum(int(x) for x, y in zip(line, line[len(line) // 2 :] + line[: len(line) // 2]) if x == y)
def part2(lines):
return solve_new_captcha(lines[0].strip())
if __name__ == "__main__":
data = load_input(__file__, 2017, "1")
print(part1(data))
print(part2(data))
| 19.608696
| 103
| 0.583149
| 126
| 902
| 3.904762
| 0.404762
| 0.113821
| 0.213415
| 0.073171
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.105109
| 0.240577
| 902
| 45
| 104
| 20.044444
| 0.613139
| 0.314856
| 0
| 0
| 0
| 0
| 0.017208
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.307692
| false
| 0
| 0.076923
| 0.153846
| 0.692308
| 0.153846
| 0
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 4
|
40880c5e38e35d28917caa19394f49c8079c8309
| 470
|
py
|
Python
|
tests/__init__.py
|
pauliacomi/pyGAPS
|
c4d45b710e171c937471686437e382e05aec4ed5
|
[
"MIT"
] | 35
|
2018-01-24T14:59:08.000Z
|
2022-03-10T02:47:58.000Z
|
tests/__init__.py
|
pauliacomi/pyGAPS
|
c4d45b710e171c937471686437e382e05aec4ed5
|
[
"MIT"
] | 29
|
2018-01-06T12:08:08.000Z
|
2022-03-11T20:26:53.000Z
|
tests/__init__.py
|
pauliacomi/pyGAPS
|
c4d45b710e171c937471686437e382e05aec4ed5
|
[
"MIT"
] | 20
|
2019-06-12T19:20:29.000Z
|
2022-03-02T09:57:02.000Z
|
# TODO saturation_pressure for each point
# TODO isotherm excess / absolute
# TODO implement AIF
# TODO osmotic ensemble
# TODO universal adsorption isotherm model UAIM
# TODO double check all model solvers
# TODO volume_liquid loading mode
# TODO universal adsorption isotherm model (UAIM)
# TODO deeper integration with NIST isodb
# TODO code copy behaviour
# TODO check adsorbate lists from VOC
# TODO tplot psd
# TODO General Adsorbent Library and Evaluation (GALE)
| 33.571429
| 54
| 0.797872
| 65
| 470
| 5.738462
| 0.692308
| 0.069705
| 0.123324
| 0.16622
| 0.235925
| 0.235925
| 0.235925
| 0
| 0
| 0
| 0
| 0
| 0.16383
| 470
| 13
| 55
| 36.153846
| 0.949109
| 0.942553
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0.076923
| null | 1
| null | true
| 0
| 0
| null | null | null | 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
409ae6195baf24502a85c3bcc544eefd1d198fcc
| 140
|
py
|
Python
|
tests/myapp/urls.py
|
clover-es/django-mptt
|
6c78234659e10b15e9a102bc19f8bd26a6bf3b58
|
[
"MIT"
] | 5
|
2018-09-08T18:31:49.000Z
|
2021-07-17T02:05:06.000Z
|
tests/myapp/urls.py
|
clover-es/django-mptt
|
6c78234659e10b15e9a102bc19f8bd26a6bf3b58
|
[
"MIT"
] | null | null | null |
tests/myapp/urls.py
|
clover-es/django-mptt
|
6c78234659e10b15e9a102bc19f8bd26a6bf3b58
|
[
"MIT"
] | 1
|
2019-06-25T17:13:02.000Z
|
2019-06-25T17:13:02.000Z
|
import django
from django.conf.urls import include, url
from django.contrib import admin
urlpatterns = [url(r'^admin/', admin.site.urls)]
| 20
| 48
| 0.764286
| 21
| 140
| 5.095238
| 0.571429
| 0.186916
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.121429
| 140
| 6
| 49
| 23.333333
| 0.869919
| 0
| 0
| 0
| 0
| 0
| 0.05
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.75
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
40a4597b17358399dcfd29900eb9a636becacfa2
| 3,105
|
py
|
Python
|
tests/test_api.py
|
aidanmcdonagh22/shorty
|
2a7a79bc35941eda4a535f25807a56c3bcf587c1
|
[
"MIT"
] | null | null | null |
tests/test_api.py
|
aidanmcdonagh22/shorty
|
2a7a79bc35941eda4a535f25807a56c3bcf587c1
|
[
"MIT"
] | null | null | null |
tests/test_api.py
|
aidanmcdonagh22/shorty
|
2a7a79bc35941eda4a535f25807a56c3bcf587c1
|
[
"MIT"
] | null | null | null |
from .utils import check_api_response
from requests_mock import Mocker
# Integration Testing
class TestShortlinkView:
def test_no_url(self, client):
# perform client post to endpoint
response = client.post('/shortlinks', json={ "provider": "bitly" })
# check response
check_api_response(response, 400, { "error": "parameter url must be provided and as a string" })
def test_bad_provider(self, client):
# perform client post to endpoint
response = client.post('/shortlinks', json={
"url": "https://google.com", "provider": "somethingrandom"
})
# check response
check_api_response(response, 400, { "error": "provider must be 'bitly' or 'tinyurl'" })
def test_successful_response_bitly(self, client):
url = "https://google.com"
shortlink = "http://short.com"
with Mocker() as m:
# mock response
m.post("https://api-ssl.bitly.com/v4/shorten", json={ "link": shortlink })
# perform client post to endpoint
response = client.post('/shortlinks', json={
"url": url, "provider": "bitly"
})
# check response
check_api_response(response, 200, { "url": url, "link": shortlink })
def test_unsuccessful_response_bitly(self, client):
url = "https://google.com"
errorMsg = "Error: we could not provide you a link"
with Mocker() as m:
# mock response
m.post("https://api-ssl.bitly.com/v4/shorten", json={ "message": errorMsg }, status_code=400)
# perform client post to endpoint
response = client.post('/shortlinks', json={
"url": url, "provider": "bitly"
})
# check response
check_api_response(response, 400, { "error": errorMsg })
def test_successful_response_tinyurl(self, client):
url = "https://facebook.com"
shortlink = "http://short.com"
with Mocker() as m:
# mock response
m.get(f"http://tinyurl.com/api-create.php?url={url}", text=shortlink)
# perform client post to endpoint
response = client.post('/shortlinks', json={
"url": url, "provider": "tinyurl"
})
# check response
check_api_response(response, 200, { "url": url, "link": shortlink })
def test_unsuccessful_response_tinyurl(self, client):
url = "https://facebook.com"
error = "error creating tinyurl link"
with Mocker() as m:
# mock response
m.get(f"http://tinyurl.com/api-create.php?url={url}", text=error, status_code=400)
# perform client post to endpoint
response = client.post('/shortlinks', json={
"url": url, "provider": "tinyurl"
})
# check response
check_api_response(response, 400, { "error": error })
| 37.409639
| 105
| 0.553623
| 328
| 3,105
| 5.140244
| 0.219512
| 0.071174
| 0.066429
| 0.067616
| 0.785291
| 0.785291
| 0.785291
| 0.785291
| 0.662515
| 0.662515
| 0
| 0.012411
| 0.325282
| 3,105
| 82
| 106
| 37.865854
| 0.792363
| 0.114976
| 0
| 0.553191
| 0
| 0
| 0.238095
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.12766
| false
| 0
| 0.042553
| 0
| 0.191489
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
40bc4a36759cf1fb735daafeaf1a9c7fe1a66580
| 504
|
py
|
Python
|
db_context/Context.py
|
crippledfaith/shop
|
fb6a520170968e9f90d4d70c3f6a4e793b105e84
|
[
"Apache-2.0"
] | null | null | null |
db_context/Context.py
|
crippledfaith/shop
|
fb6a520170968e9f90d4d70c3f6a4e793b105e84
|
[
"Apache-2.0"
] | null | null | null |
db_context/Context.py
|
crippledfaith/shop
|
fb6a520170968e9f90d4d70c3f6a4e793b105e84
|
[
"Apache-2.0"
] | null | null | null |
from pymongo import MongoClient
class Context:
def __init__(self):
self.client = MongoClient(port=27017)
self.db = self.client.shop
def save(self, collection_name, obj):
self.db[collection_name].insert_one(obj)
def update(self, collection_name, obj):
self.db[collection_name].find_one_and_update({"_id": obj["_id"]}, {"$set": obj}, upsert=True)
def delete(self, collection_name, obj):
self.db[collection_name].delete_one({"_id": obj["_id"]})
| 28
| 101
| 0.664683
| 68
| 504
| 4.647059
| 0.411765
| 0.265823
| 0.170886
| 0.199367
| 0.389241
| 0.389241
| 0.389241
| 0.389241
| 0
| 0
| 0
| 0.012255
| 0.190476
| 504
| 17
| 102
| 29.647059
| 0.762255
| 0
| 0
| 0
| 0
| 0
| 0.031809
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.363636
| false
| 0
| 0.090909
| 0
| 0.545455
| 0
| 0
| 0
| 0
| null | 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 4
|
40be4052d0b2e6de09ff0edfe724246d49c56e0c
| 97
|
py
|
Python
|
src/api/jp/kanjivg.py
|
Xifax/suzu-web
|
ebe6b87093f73bf8a100d7b78b1d4a83cf203315
|
[
"BSD-2-Clause"
] | null | null | null |
src/api/jp/kanjivg.py
|
Xifax/suzu-web
|
ebe6b87093f73bf8a100d7b78b1d4a83cf203315
|
[
"BSD-2-Clause"
] | null | null | null |
src/api/jp/kanjivg.py
|
Xifax/suzu-web
|
ebe6b87093f73bf8a100d7b78b1d4a83cf203315
|
[
"BSD-2-Clause"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
KanjiVG stub
"""
class KanjiVG:
pass
| 9.7
| 23
| 0.536082
| 12
| 97
| 4.333333
| 0.916667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.013699
| 0.247423
| 97
| 9
| 24
| 10.777778
| 0.69863
| 0.56701
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.5
| 0
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 4
|
40dc02ef4eafdaa22394a1b06dc6ab6f7ccb8ddb
| 175
|
py
|
Python
|
src/natasy/neural_network/layers/hiddenlayer.py
|
disooqi/DNN
|
f87a10afba0810778ab3669f30e20128779f9da0
|
[
"AFL-3.0"
] | 3
|
2019-03-03T11:01:26.000Z
|
2022-02-01T15:53:47.000Z
|
src/natasy/neural_network/layers/hiddenlayer.py
|
disooqi/DNN
|
f87a10afba0810778ab3669f30e20128779f9da0
|
[
"AFL-3.0"
] | 20
|
2018-10-31T16:54:21.000Z
|
2021-08-28T06:05:56.000Z
|
src/natasy/neural_network/layers/hiddenlayer.py
|
disooqi/Natasy
|
f87a10afba0810778ab3669f30e20128779f9da0
|
[
"AFL-3.0"
] | null | null | null |
from . import NeuralNetworkLayer
class HiddenLayer(NeuralNetworkLayer):
def __init__(self, *args, **kwargs):
super(HiddenLayer, self).__init__(*args, **kwargs)
| 21.875
| 58
| 0.714286
| 17
| 175
| 6.882353
| 0.647059
| 0.17094
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.16
| 175
| 7
| 59
| 25
| 0.795918
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.25
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 4
|
40f8f66a4d55e39902ce1951d1b7a771e6affc06
| 42
|
py
|
Python
|
jupyterhub/etc/jupyterhub/jupyterhub_config.py
|
nevdullcode/vagrant-jupyterhub
|
41a361a64dd38c07abb863da0e0a1585acad1bff
|
[
"MIT"
] | 1
|
2021-04-18T19:56:28.000Z
|
2021-04-18T19:56:28.000Z
|
jupyterhub/etc/jupyterhub/jupyterhub_config.py
|
nevdullcode/vagrant-jupyterhub
|
41a361a64dd38c07abb863da0e0a1585acad1bff
|
[
"MIT"
] | null | null | null |
jupyterhub/etc/jupyterhub/jupyterhub_config.py
|
nevdullcode/vagrant-jupyterhub
|
41a361a64dd38c07abb863da0e0a1585acad1bff
|
[
"MIT"
] | null | null | null |
c.Authenticator.admin_users = {'vagrant'}
| 21
| 41
| 0.761905
| 5
| 42
| 6.2
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.071429
| 42
| 1
| 42
| 42
| 0.794872
| 0
| 0
| 0
| 0
| 0
| 0.166667
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
40fcc69cdab9b5733a6cbbf5c796e97d633fde88
| 2,187
|
py
|
Python
|
pychip8/operations/__init__.py
|
edwin-jones/pychip8
|
da8c850cdba90cbb6d1244de3b6118be8119c67d
|
[
"MIT"
] | null | null | null |
pychip8/operations/__init__.py
|
edwin-jones/pychip8
|
da8c850cdba90cbb6d1244de3b6118be8119c67d
|
[
"MIT"
] | 4
|
2018-09-02T20:37:47.000Z
|
2018-09-08T19:11:08.000Z
|
pychip8/operations/__init__.py
|
edwin-jones/pychip8
|
da8c850cdba90cbb6d1244de3b6118be8119c67d
|
[
"MIT"
] | null | null | null |
from pychip8.operations.set_x_to_y import SetXToY
from pychip8.operations.set_x import SetX
from pychip8.operations.set_i import SetI
from pychip8.operations.random import Random
from pychip8.operations.save_registers_zero_to_x import SaveRegistersZeroToX
from pychip8.operations.load_registers_zero_to_x import LoadRegistersZeroToX
from pychip8.operations.save_x_as_bcd import SaveXAsBcd
from pychip8.operations.load_character_address import LoadCharacterAddress
from pychip8.operations.graphics.clear_display import ClearDisplay
from pychip8.operations.graphics.draw_sprite import DrawSprite
from pychip8.operations.jumps.goto import Goto
from pychip8.operations.jumps.goto_plus import GotoPlus
from pychip8.operations.jumps.skip_if_equal import SkipIfEqual
from pychip8.operations.jumps.skip_if_not_equal import SkipIfNotEqual
from pychip8.operations.jumps.skip_if_x_y_equal import SkipIfXyEqual
from pychip8.operations.jumps.skip_if_x_y_not_equal import SkipIfXyNotEqual
from pychip8.operations.jumps.return_from_function import ReturnFromFunction
from pychip8.operations.jumps.call_function import CallFunction
from pychip8.operations.arithmetic.add_to_x import AddToX
from pychip8.operations.arithmetic.add_y_to_x import AddYToX
from pychip8.operations.arithmetic.take_y_from_x import TakeYFromX
from pychip8.operations.arithmetic.take_x_from_y import TakeXFromY
from pychip8.operations.arithmetic.add_x_to_i import AddXToI
from pychip8.operations.bitwise.shift_x_left import ShiftXLeft
from pychip8.operations.bitwise.shift_x_right import ShiftXRight
from pychip8.operations.bitwise.bitwise_and import BitwiseAnd
from pychip8.operations.bitwise.bitwise_or import BitwiseOr
from pychip8.operations.bitwise.bitwise_xor import BitwiseXor
from pychip8.operations.timers.set_x_to_delay_timer import SetXToDelayTimer
from pychip8.operations.timers.set_sound_timer import SetSoundTimer
from pychip8.operations.timers.set_delay_timer import SetDelayTimer
from pychip8.operations.input.skip_if_key_pressed import SkipIfKeyPressed
from pychip8.operations.input.skip_if_key_not_pressed import SkipIfKeyNotPressed
from pychip8.operations.input.wait_for_key_press import WaitForKeyPress
| 53.341463
| 80
| 0.89209
| 307
| 2,187
| 6.104235
| 0.276873
| 0.199573
| 0.381003
| 0.110993
| 0.422092
| 0.144077
| 0.073639
| 0.036286
| 0
| 0
| 0
| 0.016626
| 0.064929
| 2,187
| 40
| 81
| 54.675
| 0.899756
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
dc0ece8ea8024b7639e8aef0ff9259fc64b7906d
| 210
|
py
|
Python
|
crypto/large case/secret.py
|
n0trix/SUSCTF2022_official_wp
|
51224930c59cd732b9d80cc63d95dd1a06ebc308
|
[
"MIT"
] | 32
|
2022-03-01T06:57:42.000Z
|
2022-03-27T09:23:07.000Z
|
crypto/large case/secret.py
|
n0trix/SUSCTF2022_official_wp
|
51224930c59cd732b9d80cc63d95dd1a06ebc308
|
[
"MIT"
] | null | null | null |
crypto/large case/secret.py
|
n0trix/SUSCTF2022_official_wp
|
51224930c59cd732b9d80cc63d95dd1a06ebc308
|
[
"MIT"
] | 6
|
2022-03-01T06:49:09.000Z
|
2022-03-21T13:21:26.000Z
|
e=259776235785533
message=b'For RSA, the wrong key generation method can also reveal information. You recover my secret message, and here is the flag:SUSCTF{N0n_c0prime_RSA_c1pher_cAn_a1s0_recover_me33age!!!}'
| 105
| 191
| 0.833333
| 34
| 210
| 4.941176
| 0.852941
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.117021
| 0.104762
| 210
| 2
| 191
| 105
| 0.776596
| 0
| 0
| 0
| 0
| 0.5
| 0.857143
| 0.3
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
9088f7e6a1eb4276bc6e187d21405ff6eb5f15a2
| 3,735
|
py
|
Python
|
web/sufee/views.py
|
shalevy1/Flask-Sufee-Admin
|
3a663c2a49ef07772de646317eab350d795e0e5e
|
[
"MIT"
] | 1
|
2020-07-13T03:55:56.000Z
|
2020-07-13T03:55:56.000Z
|
web/sufee/views.py
|
shalevy1/Flask-Sufee-Admin
|
3a663c2a49ef07772de646317eab350d795e0e5e
|
[
"MIT"
] | null | null | null |
web/sufee/views.py
|
shalevy1/Flask-Sufee-Admin
|
3a663c2a49ef07772de646317eab350d795e0e5e
|
[
"MIT"
] | 1
|
2020-09-11T17:30:37.000Z
|
2020-09-11T17:30:37.000Z
|
from flask import Flask
from flask import render_template
from flask import abort
from sufee import app
@app.route("/")
@app.route("/home")
def index():
template = 'index.html'
return render_template(template)
@app.route("/about")
def aboutindex():
template = 'about.html'
return render_template(template)
@app.route("/page-login")
def login():
template = "page-login.html"
return render_template(template)
@app.route("/page-register")
def register():
template = "page-register.html"
return render_template(template)
@app.route("/pages-forget")
def page_forget():
template = 'pages-forget.html'
return render_template(template)
@app.route("/ui-buttons")
def ui_buttons():
template = 'ui-buttons.html'
return render_template(template)
@app.route("/ui-badges")
def ui_badges():
template = 'ui-badges.html'
return render_template(template)
@app.route("/ui-tabs")
def ui_tabs():
template = 'ui-tabs.html'
return render_template(template)
@app.route("/charts-chartjs")
def charts_chartjs():
template = 'charts-chartjs.html'
return render_template(template)
@app.route("/charts-flot")
def charts_flot():
template = 'charts-flot.html'
return render_template(template)
@app.route("/charts-peity")
def charts_peity():
template = '/charts-peity.html'
return render_template(template)
@app.route("/font-fontawesome")
def font_fontawesome():
template = 'font-fontawesome.html'
return render_template(template)
@app.route("/font-themify")
def font_themify():
template = 'font-themify.html'
return render_template(template)
@app.route("/font-advanced")
def font_advanced():
template = 'font-advanced.html'
return render_template(template)
@app.route("/forms-basic")
def forms_basic():
template = 'forms-basic.html'
return render_template(template)
@app.route("/forms-advanced")
def forms_advanced():
template = 'forms-advanced.html'
return render_template(template)
@app.route("/frame")
def frame():
template = 'frame.html'
return render_template(template)
@app.route("/maps-gmap")
def maps_gmap():
template = 'maps-gmap.html'
return render_template(template)
@app.route("/maps-vector")
def maps_vector():
template = 'maps-vector.html'
return render_template(template)
@app.route("/tables-basic")
def tables_basic():
template = 'tables-basic.html'
return render_template(template)
@app.route("/tables-data")
def tables_data():
template = 'tables-data.html'
return render_template(template)
@app.route("/ui-alerts")
def ui_alerts():
template = 'ui-alerts.html'
return render_template(template)
@app.route("/ui-cards")
def ui_cards():
template = 'ui-cards.html'
return render_template(template)
@app.route("/ui-grids")
def ui_grids():
template = 'ui-grids.html'
return render_template(template)
@app.route("/ui-modals")
def ui_modals():
template = 'ui-modals.html'
return render_template(template)
@app.route("/ui-progressbar")
def ui_progressbar():
template = 'ui-progressbar.html'
return render_template(template)
@app.route("/ui-social-buttons")
def ui_social_buttons():
template = 'ui-social-buttons.html'
return render_template(template)
@app.route("/ui-switches")
def ui_switches():
template = 'ui-switches.html'
return render_template(template)
# @app.route("/ui-tabs")
# def ui_tabs():
# template = 'ui-tabs.html'
# return render_template(template)
@app.route("/ui-typography")
def ui_typography():
template = '/ui-typography.html'
return render_template(template)
@app.route("/widgets")
def widgets():
template = 'widgets.html'
return render_template(template)
| 22.91411
| 39
| 0.701473
| 473
| 3,735
| 5.41649
| 0.105708
| 0.174863
| 0.193599
| 0.290398
| 0.553474
| 0.540984
| 0.540984
| 0.494145
| 0.123341
| 0.08509
| 0
| 0
| 0.147256
| 3,735
| 162
| 40
| 23.055556
| 0.804396
| 0.027845
| 0
| 0.24
| 0
| 0
| 0.225655
| 0.011862
| 0
| 0
| 0
| 0
| 0
| 1
| 0.24
| false
| 0
| 0.032
| 0
| 0.512
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 4
|
9090469fa96e932346e141d16f47b247dde38a8c
| 230
|
py
|
Python
|
src/sage/version.py
|
haiyashah/sage
|
55a711e3d6251f2ff4f3bcccc4c6a8b7a2a8d1b2
|
[
"BSL-1.0"
] | null | null | null |
src/sage/version.py
|
haiyashah/sage
|
55a711e3d6251f2ff4f3bcccc4c6a8b7a2a8d1b2
|
[
"BSL-1.0"
] | null | null | null |
src/sage/version.py
|
haiyashah/sage
|
55a711e3d6251f2ff4f3bcccc4c6a8b7a2a8d1b2
|
[
"BSL-1.0"
] | null | null | null |
# Sage version information for Python scripts
# This file is auto-generated by the sage-update-version script, do not edit!
version = '9.6.beta5'
date = '2022-03-12'
banner = 'SageMath version 9.6.beta5, Release Date: 2022-03-12'
| 38.333333
| 77
| 0.743478
| 39
| 230
| 4.384615
| 0.717949
| 0.093567
| 0.105263
| 0.163743
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.111675
| 0.143478
| 230
| 5
| 78
| 46
| 0.756345
| 0.517391
| 0
| 0
| 1
| 0
| 0.657407
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
90b254aa4492a406e7bdbaae580cc675d6f89529
| 381
|
py
|
Python
|
apps/staff/templatetags/staff_template_filters.py
|
mrtaalebi/sitigo
|
cce8b4f5299b58d7365789ead416d4568b443743
|
[
"Apache-2.0"
] | null | null | null |
apps/staff/templatetags/staff_template_filters.py
|
mrtaalebi/sitigo
|
cce8b4f5299b58d7365789ead416d4568b443743
|
[
"Apache-2.0"
] | 8
|
2020-02-12T01:02:15.000Z
|
2022-03-11T23:53:39.000Z
|
apps/staff/templatetags/staff_template_filters.py
|
mrtaalebi/sitigo
|
cce8b4f5299b58d7365789ead416d4568b443743
|
[
"Apache-2.0"
] | null | null | null |
from django import template
register = template.Library()
@register.filter
def len(a):
return a.__len__()
@register.filter
def modulo(a, b):
return a % b
@register.filter
def name(staff, lang):
if lang == "fa":
return staff.persian_firstname + " " + staff.persian_lastname
else:
return staff.english_firstname + " " + staff.english_lastname
| 16.565217
| 69
| 0.669291
| 48
| 381
| 5.145833
| 0.479167
| 0.17004
| 0.206478
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.217848
| 381
| 22
| 70
| 17.318182
| 0.828859
| 0
| 0
| 0.214286
| 0
| 0
| 0.010526
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.214286
| false
| 0
| 0.071429
| 0.142857
| 0.571429
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 4
|
90d9b9a8576d5d62c0259b9fe249745ff6afe877
| 1,380
|
py
|
Python
|
benchmarks/cinderella.py
|
stanlysamuel/gensys
|
87e924235dcda592bd37e694d54ccc0b62e4a34a
|
[
"MIT"
] | null | null | null |
benchmarks/cinderella.py
|
stanlysamuel/gensys
|
87e924235dcda592bd37e694d54ccc0b62e4a34a
|
[
"MIT"
] | null | null | null |
benchmarks/cinderella.py
|
stanlysamuel/gensys
|
87e924235dcda592bd37e694d54ccc0b62e4a34a
|
[
"MIT"
] | null | null | null |
from gensys.helper import *
from gensys.fixpoints import *
from z3 import *
#Cinderella-Stepmother game of 5 buckets with bucket size of C.
# 1. Define Environment moves
def environment(b1, b2, b3, b4, b5, b1_, b2_, b3_, b4_, b5_):
return And(b1_ + b2_ + b3_ + b4_ + b5_ == b1 + b2 + b3 + b4 + b5 + 1, b1_>=b1, b2_>=b2, b3_>=b3, b4_>=b4, b5_>=b5)
#2. Define Controller moves
def move1(b1, b2, b3, b4, b5, b1_, b2_, b3_, b4_, b5_):
return And( b1_ == 0.0, b2_ == 0.0, b3_ == b3, b4_ == b4, b5_ == b5)
def move2(b1, b2, b3, b4, b5, b1_, b2_, b3_, b4_, b5_):
return And( b2_ == 0.0, b3_ == 0.0, b4_ == b4, b5_ == b5, b1_ == b1)
def move3(b1, b2, b3, b4, b5, b1_, b2_, b3_, b4_, b5_):
return And( b3_ == 0.0, b4_ == 0.0, b5_ == b5, b1_ == b1, b2_ == b2)
def move4(b1, b2, b3, b4, b5, b1_, b2_, b3_, b4_, b5_):
return And( b4_ == 0.0, b5_ == 0.0, b1_ == b1, b2_ == b2, b3_ == b3)
def move5(b1, b2, b3, b4, b5, b1_, b2_, b3_, b4_, b5_):
return And( b5_ == 0.0, b1_ == 0.0, b2_ == b2, b3_ == b3, b4_ == b4)
controller_moves = [move1, move2, move3, move4, move5]
C = sys.argv[1]
mode = sys.argv[2]
# 3. Define Guarantee
def guarantee(b1, b2, b3, b4, b5):
return And(b1 <= C , b2 <=C , b3 <=C , b4 <=C , b5 <=C , b1 >= 0.0 , b2 >= 0.0 , b3 >= 0.0 , b4 >= 0.0 , b5 >= 0.0)
safety_fixedpoint(controller_moves, environment, guarantee, int(mode))
| 37.297297
| 119
| 0.574638
| 255
| 1,380
| 2.843137
| 0.172549
| 0.09931
| 0.124138
| 0.165517
| 0.451034
| 0.451034
| 0.362759
| 0.29931
| 0.270345
| 0.270345
| 0
| 0.172316
| 0.230435
| 1,380
| 37
| 120
| 37.297297
| 0.510358
| 0.098551
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.142857
| 0.333333
| 0.809524
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 4
|
29563b18f2ee7efac2a242cf2a303898b0505e3f
| 24
|
py
|
Python
|
data/studio21_generated/introductory/3172/starter_code.py
|
vijaykumawat256/Prompt-Summarization
|
614f5911e2acd2933440d909de2b4f86653dc214
|
[
"Apache-2.0"
] | null | null | null |
data/studio21_generated/introductory/3172/starter_code.py
|
vijaykumawat256/Prompt-Summarization
|
614f5911e2acd2933440d909de2b4f86653dc214
|
[
"Apache-2.0"
] | null | null | null |
data/studio21_generated/introductory/3172/starter_code.py
|
vijaykumawat256/Prompt-Summarization
|
614f5911e2acd2933440d909de2b4f86653dc214
|
[
"Apache-2.0"
] | null | null | null |
def parse_fen(string):
| 12
| 22
| 0.75
| 4
| 24
| 4.25
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.125
| 24
| 2
| 23
| 12
| 0.809524
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
296f5eabbb26a7428b7eb5ce12e1c70bb2c5c798
| 199
|
py
|
Python
|
python/simple_keyword_arguments.py
|
idrougge/ple
|
fcf645e23fec68fea93a068eb3bb78a88ca8af46
|
[
"MIT"
] | 59
|
2016-10-27T03:33:17.000Z
|
2022-03-03T06:10:10.000Z
|
python/simple_keyword_arguments.py
|
idrougge/ple
|
fcf645e23fec68fea93a068eb3bb78a88ca8af46
|
[
"MIT"
] | 6
|
2017-01-07T19:27:33.000Z
|
2019-11-10T21:29:59.000Z
|
python/simple_keyword_arguments.py
|
idrougge/ple
|
fcf645e23fec68fea93a068eb3bb78a88ca8af46
|
[
"MIT"
] | 27
|
2016-09-20T16:22:03.000Z
|
2022-01-15T09:28:06.000Z
|
def set_fill_color(red, green, blue):
pass
def draw_rectangle(corner, other_corner):
pass
set_fill_color(red=161, green=219, blue=114)
draw_rectangle(corner=(105,20), other_corner=(60,60))
| 22.111111
| 53
| 0.743719
| 33
| 199
| 4.242424
| 0.545455
| 0.1
| 0.171429
| 0.214286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.102857
| 0.120603
| 199
| 8
| 54
| 24.875
| 0.697143
| 0
| 0
| 0.333333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0.333333
| 0
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 4
|
29845881af93bb75270c761046387972658af4e3
| 518
|
py
|
Python
|
skift/__init__.py
|
dimidd/skift
|
48ce16ff07710cde956ff3a7071d83cbbd0f9977
|
[
"MIT"
] | 244
|
2018-02-04T09:33:32.000Z
|
2022-03-06T05:26:36.000Z
|
skift/__init__.py
|
dimidd/skift
|
48ce16ff07710cde956ff3a7071d83cbbd0f9977
|
[
"MIT"
] | 19
|
2018-02-16T03:23:45.000Z
|
2022-02-14T13:06:41.000Z
|
skift/__init__.py
|
dimidd/skift
|
48ce16ff07710cde956ff3a7071d83cbbd0f9977
|
[
"MIT"
] | 28
|
2018-02-05T06:54:46.000Z
|
2022-02-03T14:47:14.000Z
|
"""Utilities for pandas."""
from .core import FirstColFtClassifier # noqa: F401
from .core import IdxBasedFtClassifier # noqa: F401
from .core import FirstObjFtClassifier # noqa: F401
from .core import ColLblBasedFtClassifier # noqa: F401
from .core import SeriesFtClassifier # noqa: F401
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
for name in ['get_versions', '_version', 'core', 'name']:
try:
globals().pop(name)
except KeyError:
pass
| 28.777778
| 57
| 0.725869
| 60
| 518
| 6.1
| 0.416667
| 0.10929
| 0.191257
| 0.174863
| 0.240437
| 0
| 0
| 0
| 0
| 0
| 0
| 0.035129
| 0.175676
| 518
| 17
| 58
| 30.470588
| 0.822014
| 0.148649
| 0
| 0
| 0
| 0
| 0.081207
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.076923
| 0.461538
| 0
| 0.461538
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
|
0
| 4
|
461e11134327f36ac57873ad2fa3db8c7445fa13
| 218
|
py
|
Python
|
mentor_mentee/mentor_mentee/doctype/relation/relation.py
|
sehjal408/SK
|
9796fa2d7754a8583db246abdb09341824f254c2
|
[
"MIT"
] | null | null | null |
mentor_mentee/mentor_mentee/doctype/relation/relation.py
|
sehjal408/SK
|
9796fa2d7754a8583db246abdb09341824f254c2
|
[
"MIT"
] | null | null | null |
mentor_mentee/mentor_mentee/doctype/relation/relation.py
|
sehjal408/SK
|
9796fa2d7754a8583db246abdb09341824f254c2
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2022, SK and contributors
# For license information, please see license.txt
import frappe
from frappe.website.website_generator import WebsiteGenerator
class Relation(WebsiteGenerator):
pass
| 15.571429
| 61
| 0.788991
| 26
| 218
| 6.576923
| 0.807692
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.021739
| 0.155963
| 218
| 13
| 62
| 16.769231
| 0.907609
| 0.399083
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.25
| 0.5
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
|
0
| 4
|
46595df494e81791f7fca742cdfdd7448f09a9ff
| 831
|
py
|
Python
|
matchbook/tests/test_apiclient.py
|
jackrhunt13/matchbook
|
a12ac26e272ddc004f2590b4f4ad8f4715f1df66
|
[
"MIT"
] | 11
|
2017-07-11T10:08:19.000Z
|
2021-01-22T17:08:44.000Z
|
matchbook/tests/test_apiclient.py
|
oddoneuk/matchbook
|
eb37817c4f6604097be406edf2df7f711586dcf6
|
[
"MIT"
] | 10
|
2017-07-14T23:43:25.000Z
|
2021-08-19T17:21:10.000Z
|
matchbook/tests/test_apiclient.py
|
oddoneuk/matchbook
|
eb37817c4f6604097be406edf2df7f711586dcf6
|
[
"MIT"
] | 9
|
2017-12-13T13:25:42.000Z
|
2021-07-16T18:24:23.000Z
|
import unittest
from matchbook.apiclient import APIClient
from matchbook.endpoints import Betting, Account, KeepAlive, Login, Logout, MarketData, ReferenceData, Reporting
class APIClientTest(unittest.TestCase):
def test_apiclient_init(self):
client = APIClient('username', 'password')
assert str(client) == 'APIClient'
assert repr(client) == '<APIClient [username]>'
assert isinstance(client.account, Account)
assert isinstance(client.betting, Betting)
assert isinstance(client.keep_alive, KeepAlive)
assert isinstance(client.login, Login)
assert isinstance(client.logout, Logout)
assert isinstance(client.market_data, MarketData)
assert isinstance(client.reference_data, ReferenceData)
assert isinstance(client.reporting, Reporting)
| 37.772727
| 112
| 0.726835
| 84
| 831
| 7.130952
| 0.392857
| 0.213689
| 0.293823
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.186522
| 831
| 21
| 113
| 39.571429
| 0.886095
| 0
| 0
| 0
| 0
| 0
| 0.056627
| 0
| 0
| 0
| 0
| 0
| 0.625
| 1
| 0.0625
| false
| 0.0625
| 0.1875
| 0
| 0.3125
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 4
|
465ea013fb24a2d6820687e62ebe6b5f80c99a82
| 342
|
py
|
Python
|
ooer/models/user.py
|
williammck/ooer
|
d6dc07627acfa41de824b86addf4eb70ecc8fc76
|
[
"MIT"
] | null | null | null |
ooer/models/user.py
|
williammck/ooer
|
d6dc07627acfa41de824b86addf4eb70ecc8fc76
|
[
"MIT"
] | null | null | null |
ooer/models/user.py
|
williammck/ooer
|
d6dc07627acfa41de824b86addf4eb70ecc8fc76
|
[
"MIT"
] | null | null | null |
from mongoengine import *
from flask_login import UserMixin
class User(Document, UserMixin, object):
username = StringField(required=True, unique=True)
email = StringField()
def get_id(self):
return str(self.id)
def __repr__(self):
return self.username
def __str__(self):
return self.username
| 20.117647
| 54
| 0.681287
| 41
| 342
| 5.439024
| 0.560976
| 0.134529
| 0.125561
| 0.197309
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.233918
| 342
| 16
| 55
| 21.375
| 0.851145
| 0
| 0
| 0.181818
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.272727
| false
| 0
| 0.181818
| 0.272727
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 4
|
467196265c0667547d23bb24cd17be53c71cb8f3
| 137
|
py
|
Python
|
lender_books/apps.py
|
joyliao07/django_lender
|
332bae3be31842fbf4e43443bd04a9467fce5a3d
|
[
"MIT"
] | 1
|
2019-02-27T01:51:30.000Z
|
2019-02-27T01:51:30.000Z
|
lender_books/apps.py
|
joyliao07/django_lender
|
332bae3be31842fbf4e43443bd04a9467fce5a3d
|
[
"MIT"
] | 4
|
2019-01-08T00:56:19.000Z
|
2019-01-11T03:14:21.000Z
|
lender_books/apps.py
|
joyliao07/django_lender
|
332bae3be31842fbf4e43443bd04a9467fce5a3d
|
[
"MIT"
] | null | null | null |
"""To configurate app lender_books."""
from django.apps import AppConfig
class LenderBooksConfig(AppConfig):
name = 'lender_books'
| 19.571429
| 38
| 0.759124
| 16
| 137
| 6.375
| 0.8125
| 0.215686
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.138686
| 137
| 6
| 39
| 22.833333
| 0.864407
| 0.233577
| 0
| 0
| 0
| 0
| 0.121212
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
467f5b347cbaa534e381c0d1b0b13ac58ef48616
| 117
|
py
|
Python
|
gputools/denoise/__init__.py
|
gmazzamuto/gputools
|
73a4dee76a119f94d8163781a85b691fd080d506
|
[
"BSD-3-Clause"
] | 89
|
2015-08-28T14:17:33.000Z
|
2022-01-20T16:19:34.000Z
|
gputools/denoise/__init__.py
|
gmazzamuto/gputools
|
73a4dee76a119f94d8163781a85b691fd080d506
|
[
"BSD-3-Clause"
] | 24
|
2015-08-28T19:06:22.000Z
|
2022-02-21T21:10:13.000Z
|
gputools/denoise/__init__.py
|
gmazzamuto/gputools
|
73a4dee76a119f94d8163781a85b691fd080d506
|
[
"BSD-3-Clause"
] | 17
|
2015-08-28T18:56:43.000Z
|
2021-09-15T23:15:36.000Z
|
from .nlm3 import nlm3
from .nlm2 import nlm2
from .bilateral2 import bilateral2
from .bilateral3 import bilateral3
| 19.5
| 34
| 0.820513
| 16
| 117
| 6
| 0.375
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.08
| 0.145299
| 117
| 5
| 35
| 23.4
| 0.88
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
4686aca7a7aa468b4230731e1687c9dfcfee1f59
| 58
|
py
|
Python
|
src/Basket.py
|
MattBlue92/progettoGiocattolo
|
8d4c7f924d9ebc3358e1d575b968e1c695e3d312
|
[
"MIT"
] | null | null | null |
src/Basket.py
|
MattBlue92/progettoGiocattolo
|
8d4c7f924d9ebc3358e1d575b968e1c695e3d312
|
[
"MIT"
] | 3
|
2020-05-12T09:21:20.000Z
|
2020-05-12T20:27:55.000Z
|
src/Basket.py
|
MattBlue92/progettoGiocattolo
|
8d4c7f924d9ebc3358e1d575b968e1c695e3d312
|
[
"MIT"
] | null | null | null |
class Basket:
def getPrice(self):
return 100;
| 14.5
| 23
| 0.603448
| 7
| 58
| 5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.075
| 0.310345
| 58
| 4
| 24
| 14.5
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
|
0
| 4
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.