hexsha string | size int64 | ext string | lang string | max_stars_repo_path string | max_stars_repo_name string | max_stars_repo_head_hexsha string | max_stars_repo_licenses list | max_stars_count int64 | max_stars_repo_stars_event_min_datetime string | max_stars_repo_stars_event_max_datetime string | max_issues_repo_path string | max_issues_repo_name string | max_issues_repo_head_hexsha string | max_issues_repo_licenses list | max_issues_count int64 | max_issues_repo_issues_event_min_datetime string | max_issues_repo_issues_event_max_datetime string | max_forks_repo_path string | max_forks_repo_name string | max_forks_repo_head_hexsha string | max_forks_repo_licenses list | max_forks_count int64 | max_forks_repo_forks_event_min_datetime string | max_forks_repo_forks_event_max_datetime string | content string | avg_line_length float64 | max_line_length int64 | alphanum_fraction float64 | qsc_code_num_words_quality_signal int64 | qsc_code_num_chars_quality_signal float64 | qsc_code_mean_word_length_quality_signal float64 | qsc_code_frac_words_unique_quality_signal float64 | qsc_code_frac_chars_top_2grams_quality_signal float64 | qsc_code_frac_chars_top_3grams_quality_signal float64 | qsc_code_frac_chars_top_4grams_quality_signal float64 | qsc_code_frac_chars_dupe_5grams_quality_signal float64 | qsc_code_frac_chars_dupe_6grams_quality_signal float64 | qsc_code_frac_chars_dupe_7grams_quality_signal float64 | qsc_code_frac_chars_dupe_8grams_quality_signal float64 | qsc_code_frac_chars_dupe_9grams_quality_signal float64 | qsc_code_frac_chars_dupe_10grams_quality_signal float64 | qsc_code_frac_chars_replacement_symbols_quality_signal float64 | qsc_code_frac_chars_digital_quality_signal float64 | qsc_code_frac_chars_whitespace_quality_signal float64 | qsc_code_size_file_byte_quality_signal float64 | qsc_code_num_lines_quality_signal float64 | qsc_code_num_chars_line_max_quality_signal float64 | qsc_code_num_chars_line_mean_quality_signal float64 | qsc_code_frac_chars_alphabet_quality_signal float64 | qsc_code_frac_chars_comments_quality_signal float64 | qsc_code_cate_xml_start_quality_signal float64 | qsc_code_frac_lines_dupe_lines_quality_signal float64 | qsc_code_cate_autogen_quality_signal float64 | qsc_code_frac_lines_long_string_quality_signal float64 | qsc_code_frac_chars_string_length_quality_signal float64 | qsc_code_frac_chars_long_word_length_quality_signal float64 | qsc_code_frac_lines_string_concat_quality_signal float64 | qsc_code_cate_encoded_data_quality_signal float64 | qsc_code_frac_chars_hex_words_quality_signal float64 | qsc_code_frac_lines_prompt_comments_quality_signal float64 | qsc_code_frac_lines_assert_quality_signal float64 | qsc_codepython_cate_ast_quality_signal float64 | qsc_codepython_frac_lines_func_ratio_quality_signal float64 | qsc_codepython_cate_var_zero_quality_signal bool | qsc_codepython_frac_lines_pass_quality_signal float64 | qsc_codepython_frac_lines_import_quality_signal float64 | qsc_codepython_frac_lines_simplefunc_quality_signal float64 | qsc_codepython_score_lines_no_logic_quality_signal float64 | qsc_codepython_frac_lines_print_quality_signal float64 | qsc_code_num_words int64 | qsc_code_num_chars int64 | qsc_code_mean_word_length int64 | qsc_code_frac_words_unique null | qsc_code_frac_chars_top_2grams int64 | qsc_code_frac_chars_top_3grams int64 | qsc_code_frac_chars_top_4grams int64 | qsc_code_frac_chars_dupe_5grams int64 | qsc_code_frac_chars_dupe_6grams int64 | qsc_code_frac_chars_dupe_7grams int64 | qsc_code_frac_chars_dupe_8grams int64 | qsc_code_frac_chars_dupe_9grams int64 | qsc_code_frac_chars_dupe_10grams int64 | qsc_code_frac_chars_replacement_symbols int64 | qsc_code_frac_chars_digital int64 | qsc_code_frac_chars_whitespace int64 | qsc_code_size_file_byte int64 | qsc_code_num_lines int64 | qsc_code_num_chars_line_max int64 | qsc_code_num_chars_line_mean int64 | qsc_code_frac_chars_alphabet int64 | qsc_code_frac_chars_comments int64 | qsc_code_cate_xml_start int64 | qsc_code_frac_lines_dupe_lines int64 | qsc_code_cate_autogen int64 | qsc_code_frac_lines_long_string int64 | qsc_code_frac_chars_string_length int64 | qsc_code_frac_chars_long_word_length int64 | qsc_code_frac_lines_string_concat null | qsc_code_cate_encoded_data int64 | qsc_code_frac_chars_hex_words int64 | qsc_code_frac_lines_prompt_comments int64 | qsc_code_frac_lines_assert int64 | qsc_codepython_cate_ast int64 | qsc_codepython_frac_lines_func_ratio int64 | qsc_codepython_cate_var_zero int64 | qsc_codepython_frac_lines_pass int64 | qsc_codepython_frac_lines_import int64 | qsc_codepython_frac_lines_simplefunc int64 | qsc_codepython_score_lines_no_logic int64 | qsc_codepython_frac_lines_print int64 | effective string | hits int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
942dc9b440e4ffa9fdc514a9cb6d21b6ea806211 | 398 | py | Python | Solutions/PAT/Basic/1004.py | Kahsolt/OJ-Notes | 6623ab7d61e305ce0467d6220f49134044b67c9e | [
"WTFPL"
] | null | null | null | Solutions/PAT/Basic/1004.py | Kahsolt/OJ-Notes | 6623ab7d61e305ce0467d6220f49134044b67c9e | [
"WTFPL"
] | null | null | null | Solutions/PAT/Basic/1004.py | Kahsolt/OJ-Notes | 6623ab7d61e305ce0467d6220f49134044b67c9e | [
"WTFPL"
] | null | null | null | #!/usr/bin/env python3
# 成绩排名 (20)
n = int(input())
l = input()
maxRecord = l
minRecord = l
maxScore = int(l.split()[2])
minScore = int(l.split()[2])
for i in range(n-1):
l = input()
s = int(l.split()[2])
if s < minScore:
minScore = s
minRecord = l
if s > maxScore:
maxScore = s
maxRecord = l
print(' '.join(maxRecord.split()[:-1]))
print(' '.join(minRecord.split()[:-1])) | 18.952381 | 39 | 0.580402 | 62 | 398 | 3.725806 | 0.403226 | 0.051948 | 0.116883 | 0.12987 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.028481 | 0.20603 | 398 | 21 | 40 | 18.952381 | 0.702532 | 0.077889 | 0 | 0.352941 | 0 | 0 | 0.005464 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.117647 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
942ecae3009ce2581679881569b9f0511a4f09f7 | 9,057 | py | Python | thseq/modules/attention.py | DeepLearnXMU/ABDNMT-RNMT | c3b20e4afdbfee5741e95a42bbd31329bb9bb93d | [
"MIT"
] | 12 | 2019-08-17T15:40:11.000Z | 2022-02-04T16:22:18.000Z | thseq/modules/attention.py | DeepLearnXMU/ABDNMT-RNMT | c3b20e4afdbfee5741e95a42bbd31329bb9bb93d | [
"MIT"
] | null | null | null | thseq/modules/attention.py | DeepLearnXMU/ABDNMT-RNMT | c3b20e4afdbfee5741e95a42bbd31329bb9bb93d | [
"MIT"
] | 3 | 2019-06-04T08:39:56.000Z | 2020-01-10T06:52:04.000Z | import torch
import torch.nn as nn
import torch.nn.functional as F
class Attention(nn.Module):
def __init__(self, query_size, attention_size):
super(Attention, self).__init__()
self.query_size = query_size
self.key_size = attention_size
self.map_query = nn.Linear(query_size, attention_size)
self.v = nn.Linear(attention_size, 1)
def forward(self, query, keys, values, mask):
"""
alpha = v^T * (tanh(W * K + U * Q))
Args:
query: B x D
keys: B x T x D
values: B x T x D
mask: B x T
Returns:
"""
# B x T x D
x = keys + self.map_query(query).unsqueeze(1)
# B x T
x = self.v(torch.tanh(x)).squeeze(-1)
x.data.masked_fill_(mask, -float('inf'))
x = F.softmax(x, -1)
output = torch.bmm(x.unsqueeze(1), values).squeeze(1)
return output, x
def scaled_dot_attention(query, key, value, mask=None, dropout=None):
"Compute 'Scaled Dot Product Attention'"
# d_k = query.size(-1) mistake!
d_k = key.size(-1)
scaling = d_k ** -0.5
scores = torch.matmul(query, key.transpose(-2, -1)) * scaling
if mask is not None:
scores = scores.masked_fill(mask, -1e9)
p_attn = F.softmax(scores, dim=-1)
if dropout is not None:
p_attn = dropout(p_attn)
return torch.matmul(p_attn, value), p_attn
def additive_attention(query, key, value, V, mask=None, dropout=None):
"Compute 'Scaled Dot Product Attention'"
b, h, t, d = key.size()
# V.size() : h,d,1
scores = torch.tanh(key + query) # b,h,t,d
scores = scores.transpose(1, 0).contiguous().view(h, b * t, d) # h,b*t,d
scores = torch.bmm(scores, V) # h,b*t,1
scores = scores.view(h, b, t, 1).transpose(1, 0) # b,h,t
if mask is not None:
scores = scores.masked_fill(mask, -1e9)
p_attn = F.softmax(scores, dim=-1)
if dropout is not None:
p_attn = dropout(p_attn)
return torch.matmul(p_attn, value), p_attn
class MultiHeadedAttention(nn.Module):
r"""Multi-head Attention
Supports reusing and dynamically extending projected keys and values to reduce computation overheads.
We can determine the behaviors by feeding different combinations of inputs to forward function.
1. kv=None and kv_memory=None:
This can be used in trivial self-attention, where the whole input sequence is known
and each input tensor serves as query, key and value.
2. kv=(tensor, tensor) and kv_memory=None:
Assume the keys and values are not projected and do projections.
3. kv=None and kv_memory=(tensor,tensor):
Reuse the projected keys and values.
For example, this is used in decoder-encoder cross-attention.
4. kv=(tensor,tensor) and kv_memory=(tensor,tensor) and expand_memory:
Reuse the projected keys and values and extend them by new keys and values.
This is used in masked self-attention, where only the partial input sequence is exposed to current query.
Args:
num_head: number of heads
input_sizes: a tuple representing (query_size, key_size, value_size).
In different cases, keys are sometimes used as values and queries may be used as both keys and values.
We can use this known condition to simplify the calculation.
1) When input_sizes=(query_size, None, None), query will be used as key and value;
2) When input_sizes=(query_size, key_size, None), key will be used as value.
The calculation is reduced to 1 linear projection in case 1) and 2 projections in case 2).
Otherwise, there will be 3 linear projections.
hidden_size: projection size for query and key.
output_value_size: projection size for value
dropout:
attention_type: 'scaled_dot' or 'additive'.
Inputs:
query: a 3-d tensor of shape (batch, length_q, hidden_size) to be transformed
kv: a tuple of key and value tensor to be transformed, (key, value)
kv_memory: a tuple of key and value tensor already transformed, (key, value)
mask: a 3-d tensor of shape (batch, length_q, length_k)
expand_memory: when kv_memory are presented, expand newly transformed kv to kv_memory
outputs:
out: a tensor of weighted average of values.
kv_memory: current keys and values.
"""
def __init__(self, num_head, input_sizes, hidden_size, output_value_size, dropout=None, mode='scaled_dot'):
"""
"""
"Take in model size and number of heads."
super(MultiHeadedAttention, self).__init__()
assert hidden_size % num_head == 0
assert isinstance(input_sizes, (tuple, list)) and len(input_sizes) == 3
self.num_head = num_head
query_size, key_size, value_size = input_sizes
def add_nonlinear(module):
if mode == 'additive':
module.add_module('1', nn.Tanh())
if key_size is None:
self.map_qkv = nn.Sequential(nn.Linear(query_size, 2 * hidden_size + output_value_size))
add_nonlinear(self.map_qkv)
elif value_size is None:
self.map_q = nn.Linear(query_size, hidden_size)
self.map_kv = nn.Linear(key_size, hidden_size + output_value_size)
add_nonlinear(self.map_q)
add_nonlinear(self.map_kv)
else:
self.map_q = nn.Linear(query_size, hidden_size)
self.map_k = nn.Linear(key_size, hidden_size)
self.map_v = nn.Linear(value_size, hidden_size)
add_nonlinear(self.map_q)
add_nonlinear(self.map_k)
add_nonlinear(self.map_v)
self.linear_out = nn.Linear(output_value_size, output_value_size)
self.scores = None
self.dropout = nn.Dropout(dropout) if dropout else None
self.V = None
if mode == 'additive':
self.V = nn.Parameter(torch.rand(num_head, hidden_size // num_head, 1))
self.attention_type = mode
self.query_size = query_size
self.key_size = key_size
self.value_size = value_size
self.hidden_size = hidden_size
self.output_value_size = output_value_size
def forward(self, query, kv, kv_memory, mask=None, expand_memory=False):
if kv is None:
kv = (None, None)
if kv_memory is None:
kv_memory = (None, None)
if not isinstance(kv, (tuple, list)):
kv = (kv,)
if not isinstance(kv_memory, (tuple, list)):
kv_memory = (kv_memory,)
squeeze = query.dim() == 2
if squeeze:
query = query.unsqueeze(1)
if mask is not None:
mask = mask.unsqueeze(1) # broadcast to num_head dim
k, v = kv if kv is not None else (None, None)
batch_size = query.size(0)
# 1) Do all the linear projections
if self.key_size is None:
qkv = self.map_qkv(query) # (batch_size, length, dim)
q, k, v = qkv.split([self.hidden_size, self.hidden_size, self.output_value_size], -1)
elif self.value_size is None:
q = self.map_q(query) # (batch_size, length, dim)
if k is not None:
kv = self.map_kv(kv[0]) # (batch_size, length, dim)
k, v = kv.split([self.hidden_size, self.output_value_size], -1)
else:
k, v = kv_memory
else:
q = self.map_q(query) # (batch_size, length, dim)
if k is not None:
k = self.map_k(kv[0]) # (batch_size, length, dim)
v = self.map_v(kv[1]) # (batch_size, length, dim)
else:
k, v = kv_memory
if expand_memory:
k_memory, v_memory = kv_memory
if k_memory is not None:
k = torch.cat([k_memory, k], 1) # concatenate on length dimension
v = torch.cat([v_memory, v], 1)
kv_memory = k, v
# split into heads
# (batch, num_head, length, dim)
q, k, v = [x.view(x.size(0), x.size(1), self.num_head, x.size(-1) // self.num_head).transpose(1, 2)
for x in (q, k, v)]
# 2) Apply attention on all the projected vectors in batch.
if self.attention_type == 'scaled_dot':
x, self.scores = scaled_dot_attention(q, k, v, mask=mask,
dropout=self.dropout)
elif self.attention_type == 'additive':
x, self.scores = additive_attention(q, k, v, self.V, mask=mask,
dropout=self.dropout, )
else:
raise NotImplementedError
# 3) "Concat" using a view and apply a final linear.
x = x.transpose(1, 2).contiguous() \
.view(batch_size, -1, self.output_value_size)
out = self.linear_out(x)
if squeeze:
out = out.squeeze(1)
return out, kv_memory
| 38.540426 | 114 | 0.602738 | 1,311 | 9,057 | 4.007628 | 0.154081 | 0.026646 | 0.031405 | 0.021698 | 0.308337 | 0.254473 | 0.177579 | 0.168062 | 0.142558 | 0.100114 | 0 | 0.010408 | 0.299879 | 9,057 | 234 | 115 | 38.705128 | 0.818167 | 0.320857 | 0 | 0.266667 | 0 | 0 | 0.027276 | 0 | 0 | 0 | 0 | 0 | 0.014815 | 1 | 0.051852 | false | 0 | 0.022222 | 0 | 0.118519 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9432f1baae90553cebe69202cb4904909bae73e4 | 3,737 | py | Python | examples/infersent.py | goel96vibhor/AdvSentEval | c23684c5f9da905517071361fdb40acf194cd608 | [
"BSD-3-Clause"
] | 2 | 2018-12-19T22:06:22.000Z | 2019-01-29T16:59:31.000Z | examples/infersent.py | goel96vibhor/AdvSentEval | c23684c5f9da905517071361fdb40acf194cd608 | [
"BSD-3-Clause"
] | null | null | null | examples/infersent.py | goel96vibhor/AdvSentEval | c23684c5f9da905517071361fdb40acf194cd608 | [
"BSD-3-Clause"
] | 2 | 2019-02-10T22:40:43.000Z | 2019-04-03T06:16:33.000Z | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
"""
InferSent models. See https://github.com/facebookresearch/InferSent.
"""
from __future__ import absolute_import, division, unicode_literals
import sys
import os
import torch
import logging
import numpy as np
# get models.py from InferSent repo
from models import InferSent
from AdversarialModels import WordNetSynonym
# Set PATHs
PATH_SENTEVAL = '../'
PATH_TO_DATA = '../data'
PATH_TO_W2V = 'fasttext/glove.840B.300d.txt' # or crawl-300d-2M.vec for V2
MODEL_PATH = 'infersent1.pkl'
V = 1 # version of InferSent
assert os.path.isfile(MODEL_PATH) and os.path.isfile(PATH_TO_W2V), \
'Set MODEL and GloVe PATHs'
# import senteval
sys.path.insert(0, PATH_SENTEVAL)
import senteval
def dim(a):
if not type(a) == list:
return []
return [len(a)] + dim(a[0])
def prepare(params, samples):
params.infersent.build_vocab([' '.join(s) for s in samples], tokenize=False)
def batcher(params, batch):
sentences = [' '.join(s) for s in batch]
embeddings = params.infersent.encode(sentences, bsize=params.batch_size, tokenize=False)
return embeddings
def adversarialFunc(params, batch_sentences, batch_labels, embeddings = None):
# sentvec = np.multiply(sentvec, params.wvec_dim)
adv_batch_sentences, adv_labels = params.infersent.prepare_adversarial_samples(batch_sentences, batch_labels)
# print("adv samples size %d",len(adv_batch_sentences))
total_count = sum(len(x) for x in adv_batch_sentences)
# print("sum of sentences called %d, batch_size %d" %(total_count, params.batch_size))
adv_embeddings = []
for sent_adversaries, i in zip(adv_batch_sentences, range(len(adv_batch_sentences))):
sentences = [' '.join(s) for s in sent_adversaries]
sent_adv_embeddings = params.infersent.encode_without_shuffle(sentences, bsize=params.batch_size, tokenize=False)
adv_embeddings.append(sent_adv_embeddings)
if i%10 == 0:
print("%d sentences done"%(i))
# print("Adv embeddings shape: %s, adv_labels shape", len(sent_adv_embeddings), dim(adv_labels[i]))
# print("Adv embeddings shape: %s, adv_labels shape",dim(adv_embeddings),dim(adv_labels))
# for i in range(0,len(adv_embeddings),10):
# print("Adv embeddings shape: %s, adv_labels shape", len(adv_embeddings[i]), len(adv_labels[i]))
return adv_embeddings, adv_labels, adv_batch_sentences
"""
Evaluation of trained model on Transfer Tasks (SentEval)
"""
# define senteval params
params_senteval = {'task_path': PATH_TO_DATA, 'usepytorch': True, 'kfold': 5, 'model_name': 'infersent','batch_size': 128, 'train': False}
params_senteval['classifier'] = {'nhid': 0, 'optim': 'rmsprop', 'batch_size': 128,
'tenacity': 3, 'epoch_size': 2}
# Set up logger
logging.basicConfig(format='%(asctime)s : %(message)s', level=logging.DEBUG)
if __name__ == "__main__":
# Load InferSent model
params_model = {'bsize': 64, 'word_emb_dim': 300, 'enc_lstm_dim': 2048,
'pool_type': 'max', 'dpout_model': 0.0, 'version': V}
model = InferSent(params_model)
model.load_state_dict(torch.load(MODEL_PATH))
model.set_w2v_path(PATH_TO_W2V)
print("model created for infersent")
# params_senteval['infersent'] = model
params_senteval['infersent'] = model.cuda()
se = senteval.engine.SE(params_senteval, batcher, prepare, adversarialFunc=adversarialFunc)
# transfer_tasks = ['SST2']
transfer_tasks = ['STSBenchmark']
results = se.eval(transfer_tasks)
# print(results)
| 33.366071 | 138 | 0.704843 | 509 | 3,737 | 4.97053 | 0.357564 | 0.06166 | 0.040316 | 0.010672 | 0.121344 | 0.097233 | 0.081423 | 0.048221 | 0.048221 | 0 | 0 | 0.016468 | 0.17126 | 3,737 | 111 | 139 | 33.666667 | 0.800452 | 0.270003 | 0 | 0 | 0 | 0 | 0.132548 | 0.010634 | 0 | 0 | 0 | 0 | 0.018868 | 1 | 0.075472 | false | 0 | 0.169811 | 0 | 0.320755 | 0.037736 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
943370e1bfc6784163e96c08d443c0404b7a159d | 799 | py | Python | pymeteo/constants.py | Matze77/pyMeteo | 8cf234e996c2f294da175ce89e22c8f80ce0df15 | [
"BSD-3-Clause"
] | 51 | 2015-09-02T16:48:36.000Z | 2022-03-03T04:01:09.000Z | pymeteo/constants.py | Matze77/pyMeteo | 8cf234e996c2f294da175ce89e22c8f80ce0df15 | [
"BSD-3-Clause"
] | 26 | 2015-06-16T21:30:21.000Z | 2019-12-12T12:53:30.000Z | pymeteo/constants.py | Matze77/pyMeteo | 8cf234e996c2f294da175ce89e22c8f80ce0df15 | [
"BSD-3-Clause"
] | 20 | 2015-12-25T05:57:37.000Z | 2022-01-06T06:44:55.000Z | """This module provides constants used in the rest of the package
"""
# some constants
missingval = -99999999.
m2km = 0.001
km2m = 1000.
gravity = 9.81
maxparcels = 99999
L = 2.501e6 # latent heat of vaporization
Rd = 287.04 # gas constant dry air
Rv = 461.5 # gas constant water vapor
epsilon = Rd/Rv
cp = 1005.7 # what about cpd vs cpv
cpd = 1005.7 # what about cpd vs cpv
cpv = 1875.0
cpl = 4190.0
cpi = 2118.636
cv = 718.
g = 9.81
p00 = 100000. # reference pressure
T00 = 273.15
xlv = L
xls = 2836017.0
# Derivced values
lv1 = xlv+(cpl-cpv)*T00
lv2 = cpl - cpv
ls1 = xls+(cpi-cpv)*T00
ls2 = cpi - cpv
kappa = (cp-cv)/cp
kappa_d = Rd/cp
rp00 = 1./p00
reps = Rv/Rd
eps = epsilon
rddcp = kappa_d
cpdrd = cp/Rd
cpdg = cp/g
converge = 0.0002
| 17.755556 | 65 | 0.627034 | 137 | 799 | 3.642336 | 0.627737 | 0.012024 | 0.036072 | 0.056112 | 0.088176 | 0.088176 | 0.088176 | 0 | 0 | 0 | 0 | 0.19322 | 0.261577 | 799 | 44 | 66 | 18.159091 | 0.652542 | 0.289111 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
943405ec2b5893fc8c55f18e23a5611334471ceb | 23,161 | py | Python | baikal/_core/model.py | awesome-archive/baikal | c44b4f1b0f7a01ca5d41762043723aeeb5a130c9 | [
"BSD-3-Clause"
] | 1 | 2019-11-18T23:12:22.000Z | 2019-11-18T23:12:22.000Z | baikal/_core/model.py | awesome-archive/baikal | c44b4f1b0f7a01ca5d41762043723aeeb5a130c9 | [
"BSD-3-Clause"
] | null | null | null | baikal/_core/model.py | awesome-archive/baikal | c44b4f1b0f7a01ca5d41762043723aeeb5a130c9 | [
"BSD-3-Clause"
] | 1 | 2020-04-19T12:16:15.000Z | 2020-04-19T12:16:15.000Z | from collections import defaultdict
from typing import Union, List, Dict, Set, Iterable, Optional
from baikal._core.data_placeholder import is_data_placeholder_list, DataPlaceholder
from baikal._core.digraph import DiGraph
from baikal._core.step import Step, InputStep
from baikal._core.typing import ArrayLike
from baikal._core.utils import find_duplicated_items, listify, safezip2, SimpleCache
# Just to avoid function signatures painful to the eye
DataPlaceHolders = Union[DataPlaceholder, List[DataPlaceholder]]
ArrayLikes = Union[ArrayLike, List[ArrayLike]]
DataDict = Dict[Union[DataPlaceholder, str], ArrayLike]
# TODO: Update docstrings
class Model(Step):
"""A Model is a network (more precisely, a directed acyclic graph) of Steps,
and it is defined from the input/output specification of the pipeline.
Models have fit and predict routines that, together with graph-based engine,
allow the automatic (feed-forward) computation of each of the pipeline steps
when fed with data.
Parameters
----------
inputs
Inputs to the model.
outputs
Outputs of the model.
targets
Targets of the model.
name
Name of the model (optional). If no name is passed, a name will be
automatically generated.
trainable
Whether the model is trainable (True) or not (False). Setting
`trainable=False` freezes the model. This flag is only meaningful when
using the model as a step in a bigger model.
Attributes
----------
graph
The graph associated to the model built from the input/output
specification.
Methods
-------
fit
Trains the model on the given input and target data.
predict
Generates predictions from the input data. It can also be used to
query intermediate outputs.
get_step
Get a step (graph node) in the model by name.
get_data_placeholder
Get a data placeholder (graph half-edge) in the model by name.
get_params
Get parameters of the model.
set_params
Set the parameters of the model.
"""
def __init__(
self,
inputs: DataPlaceHolders,
outputs: DataPlaceHolders,
targets: Optional[DataPlaceHolders] = None,
name: Optional[str] = None,
trainable: bool = True,
):
super().__init__(name=name, trainable=trainable)
def check(this: DataPlaceHolders, what: str) -> List:
this = listify(this)
if not is_data_placeholder_list(this):
raise ValueError("{} must be of type DataPlaceholder.".format(what))
if len(set(this)) != len(this):
raise ValueError("{} must be unique.".format(what))
return this
inputs = check(inputs, "inputs")
outputs = check(outputs, "outputs")
if targets is not None:
targets = check(targets, "targets")
else:
targets = []
self._n_outputs = len(outputs)
self._internal_inputs = inputs
self._internal_outputs = outputs
self._internal_targets = targets
self._build()
def _build(self):
# Model uses the DiGraph data structure to store and operate on its DataPlaceholder and Steps.
self._graph = build_graph_from_outputs(self._internal_outputs)
# Collect data placeholders
self._data_placeholders = {}
for step in self._graph:
for output in step.outputs:
self._data_placeholders[output.name] = output
# Collect steps
self._steps = {step.name: step for step in self._graph}
self._all_steps_sorted = (
self._graph.topological_sort()
) # Fail early if graph is acyclic
self._steps_cache = SimpleCache()
self._get_required_steps(
self._internal_inputs, self._internal_targets, self._internal_outputs
)
def _get_required_steps(
self,
given_inputs: Iterable[DataPlaceholder],
given_targets: Iterable[DataPlaceholder],
desired_outputs: Iterable[DataPlaceholder],
*,
allow_unused_inputs=False,
allow_unused_targets=False,
follow_targets=True,
ignore_trainable_false=True
) -> List[Step]:
"""Backtrack from the desired outputs until the given inputs and targets
to get the required steps. That is, find the ancestors of the nodes that
provide the desired outputs. Raise an error if there is an ancestor whose
input/target is not in the given inputs/targets. We assume a DAG (guaranteed
by the success of topological_sort) and unique given_inputs, given_targets
and required_outputs (guaranteed by the callers of this function).
inputs and targets are handled separately to allow ignoring the targets
when getting the required steps at predict time.
Unused inputs might be allowed. This is the case in predict.
Unused targets might be allowed. This is the case in fit.
"""
trainable_flags = tuple(
(step_name, self.get_step(step_name).trainable)
for step_name in sorted(self._steps)
)
# We use as keys all the information that affects the
# computation of the required steps
cache_key = (
tuple(sorted(given_inputs)),
tuple(sorted(given_targets)),
tuple(sorted(desired_outputs)),
allow_unused_inputs,
allow_unused_targets,
follow_targets,
ignore_trainable_false,
trainable_flags,
)
if cache_key in self._steps_cache:
return self._steps_cache[cache_key]
given_inputs = set(given_inputs)
given_targets = set(given_targets)
desired_outputs = set(desired_outputs)
given_inputs_found = set() # type: Set[DataPlaceholder]
given_targets_found = set() # type: Set[DataPlaceholder]
required_steps = set() # type: Set[Step]
# Depth-first search
# backtracking stops if any of the following happen:
# - found a given input or target
# - found a known required step
# - hit an InputStep
def backtrack(output):
steps_required_by_output = set()
if output in given_inputs:
given_inputs_found.add(output)
return steps_required_by_output
if output in given_targets:
given_targets_found.add(output)
return steps_required_by_output
parent_step = output.step
if parent_step in required_steps:
return steps_required_by_output
steps_required_by_output = {parent_step}
for input in parent_step.inputs:
steps_required_by_output |= backtrack(input)
if follow_targets and (parent_step.trainable or ignore_trainable_false):
for target in parent_step.targets:
steps_required_by_output |= backtrack(target)
return steps_required_by_output
for output in desired_outputs:
required_steps |= backtrack(output)
# Check for missing inputs/targets
# InputSteps that were reached by backtracking are missing inputs/targets.
# We *do not* compare given_inputs_found/given_targets_found with
# self._internal_inputs/self._internal_targets because we allow giving
# intermediate inputs directly.
missing_inputs_or_targets = set() # type: Set[DataPlaceholder]
for step in required_steps:
if isinstance(step, InputStep):
missing_inputs_or_targets |= set(step.outputs)
if missing_inputs_or_targets:
raise ValueError(
"The following inputs or targets are required but were not given:\n"
"{}".format(
",".join([input.name for input in missing_inputs_or_targets])
)
)
# Check for any unused inputs/targets
unused_inputs = given_inputs - given_inputs_found
if unused_inputs and not allow_unused_inputs:
raise ValueError(
"The following inputs were given but are not required:\n"
"{}".format(",".join([input.name for input in unused_inputs]))
)
unused_targets = given_targets - given_targets_found
if unused_targets and not allow_unused_targets:
raise ValueError(
"The following targets were given but are not required:\n"
"{}".format(",".join([target.name for target in unused_targets]))
)
required_steps_sorted = [
step for step in self._all_steps_sorted if step in required_steps
]
self._steps_cache[cache_key] = required_steps_sorted
return required_steps_sorted
def _normalize_data(
self,
data: Union[ArrayLikes, DataDict],
data_placeholders: List[DataPlaceholder],
) -> Dict[DataPlaceholder, ArrayLike]:
if isinstance(data, dict):
return self._normalize_dict(data)
else:
return self._normalize_list(data, data_placeholders)
def _normalize_dict(self, data: DataDict) -> Dict[DataPlaceholder, ArrayLike]:
data_norm = {}
for key, value in data.items():
key = self.get_data_placeholder(
key.name if isinstance(key, DataPlaceholder) else key
)
data_norm[key] = value
return data_norm
@staticmethod
def _normalize_list(
data: ArrayLikes, data_placeholders: List[DataPlaceholder]
) -> Dict[DataPlaceholder, ArrayLike]:
data = listify(data)
try:
data_norm = dict(safezip2(data_placeholders, data))
except ValueError as e:
# TODO: Improve this message
message = (
"When passing inputs/outputs as a list or a single array, "
"the number of arrays must match the number of inputs/outputs "
"specified at instantiation. "
"Got {}, expected: {}.".format(len(data), len(data_placeholders))
)
raise ValueError(message) from e
return data_norm
def get_step(self, name: str) -> Step:
"""Get a step (graph node) in the model by name.
Parameters
----------
name
Name of the step.
Returns
-------
The step.
"""
# Steps are assumed to have unique names (guaranteed by success of _build_graph)
if name in self._steps.keys():
return self._steps[name]
raise ValueError("{} was not found in the model.".format(name))
def get_data_placeholder(self, name: str) -> DataPlaceholder:
"""Get a data placeholder (graph half-edge) in the model by name.
Parameters
----------
name
Name of the data placeholder.
Returns
-------
The data placeholder.
"""
# If the step names are unique, so are the data_placeholder names
if name in self._data_placeholders.keys():
return self._data_placeholders[name]
raise ValueError("{} was not found in the model.".format(name))
def fit(
self,
X: Union[ArrayLikes, DataDict],
y: Optional[Union[ArrayLikes, DataDict]] = None,
**fit_params
):
"""Trains the model on the given input and target data.
The model will automatically propagate the data through the pipeline and
fit any internal steps that require training.
Parameters
----------
X
Input data (independent variables). It can be either of:
- A single array-like object (in the case of a single input)
- A list of array-like objects (in the case of multiple inputs)
- A dictionary mapping DataPlaceholders (or their names) to
array-like objects. The keys must be among the inputs passed
at instantiation.
y
Target data (dependent variables) (optional). It can be either of:
- None (in the case all steps are either non-trainable and/or
unsupervised learning steps)
- A single array-like object (in the case of a single target)
- A list of array-like objects(in the case of multiple targets)
- A dictionary mapping target DataPlaceholders (or their names) to
array-like objects. The keys must be among the targets passed
at instantiation.
Targets required by steps that were set as non-trainable might
be omitted.
fit_params
Parameters passed to the fit method of each model step, where each
parameter name has the form ``<step-name>__<parameter-name>``.
Returns
-------
"""
# TODO: Add better error message to know which step failed in case of any error
# TODO: Consider using joblib's Parallel and Memory classes to parallelize and cache computations
# In graph parlance, the 'parallelizable' paths of a graph are called 'disjoint paths'
# https://stackoverflow.com/questions/37633941/get-list-of-parallel-paths-in-a-directed-graph
# TODO: How to behave when fit was called on a Model (and Step) that is trainable=False?
# input/output normalization
X_norm = self._normalize_data(X, self._internal_inputs)
for input in self._internal_inputs:
if input not in X_norm:
raise ValueError("Missing input {}.".format(input))
if y is not None:
y_norm = self._normalize_data(y, self._internal_targets)
for target in self._internal_targets:
if target not in y_norm:
raise ValueError("Missing target {}.".format(target))
else:
y_norm = {}
# Get steps and their fit_params
# We allow unused targets to allow modifying the trainable flags
# without having to change the targets accordingly.
steps = self._get_required_steps(
X_norm,
y_norm,
self._internal_outputs,
allow_unused_targets=True,
ignore_trainable_false=False,
)
fit_params_steps = defaultdict(dict) # type: Dict[Step, Dict]
for param_key, param_value in fit_params.items():
# TODO: Add check for __. Add error message if step was not found
step_name, _, param_name = param_key.partition("__")
step = self.get_step(step_name)
fit_params_steps[step][param_name] = param_value
# Intermediate results are stored here
# keys: DataPlaceholder instances, values: actual data (e.g. numpy arrays)
results_cache = dict()
results_cache.update(X_norm)
results_cache.update(y_norm)
for step in steps:
Xs = [results_cache[i] for i in step.inputs]
# TODO: Use fit_transform if step has it
# 1) Fit phase
if hasattr(step, "fit") and step.trainable:
ys = [results_cache[t] for t in step.targets]
fit_params = fit_params_steps.get(step, {})
# TODO: Add a try/except to catch missing output data errors (e.g. when forgot ensemble outputs)
step.fit(*Xs, *ys, **fit_params) # type: ignore # (it's a mixin)
# 2) predict/transform phase
successors = [s for s in self.graph.successors(step)]
if successors:
self._compute_step(step, Xs, results_cache)
return self
def predict(
self,
X: Union[ArrayLikes, DataDict],
output_names: Optional[Union[str, List[str]]] = None,
) -> ArrayLikes:
"""
**Models are query-able**. That is, you can request other outputs other
than those specified at model instantiation. This allows querying
intermediate outputs and ease debugging.
Parameters
----------
X
Input data. It follows the same format as in the fit function.
output_names
Names of required outputs (optional). You can specify any final or
intermediate output by passing the name of its associated data
placeholder. If not specified, it will return the outputs specified
at instantiation.
Returns
-------
The computed outputs.
"""
# Intermediate results are stored here
results_cache = dict() # type: Dict[DataPlaceholder, ArrayLike]
# Normalize inputs
X_norm = self._normalize_data(X, self._internal_inputs)
# Get required outputs
if output_names is None:
outputs = self._internal_outputs
else:
output_names = listify(output_names)
if len(set(output_names)) != len(output_names):
raise ValueError("output_names must be unique.")
outputs = [self.get_data_placeholder(output) for output in output_names]
# We allow unused inputs to allow debugging different outputs
# without having to change the inputs accordingly.
steps = self._get_required_steps(
X_norm, [], outputs, allow_unused_inputs=True, follow_targets=False
)
# Compute
results_cache.update(X_norm)
for step in steps:
Xs = [results_cache[i] for i in step.inputs]
self._compute_step(step, Xs, results_cache)
output_data = [results_cache[o] for o in outputs]
if len(output_data) == 1:
return output_data[0]
else:
return output_data
@staticmethod
def _compute_step(step, Xs, cache):
# TODO: Raise warning if computed output is already in cache.
# This happens when recomputing a step that had a subset of its outputs already passed in the inputs.
# TODO: Some regressors have extra options in their predict method, and they return a tuple of arrays.
# https://scikit-learn.org/stable/glossary.html#term-predict
output_data = step.compute(*Xs)
output_data = listify(output_data)
try:
cache.update(safezip2(step.outputs, output_data))
except ValueError as e:
message = (
"The number of output data elements ({}) does not match "
"the number of {} outputs ({}).".format(
len(output_data), step.name, len(step.outputs)
)
)
raise RuntimeError(message) from e
def get_params(self, deep=True):
"""Get the parameters of the model.
Parameters
----------
deep
Get the parameters of any nested models.
Returns
-------
params
Parameter names mapped to their values.
"""
# InputSteps are excluded
params = {}
for step in self._steps.values():
if isinstance(step, InputStep):
continue
params[step.name] = step
if hasattr(step, "get_params"):
for param_name, value in step.get_params(deep).items():
params["{}__{}".format(step.name, param_name)] = value
return params
def set_params(self, **params):
"""Set the parameters of the model.
Parameters
----------
params
Dictionary mapping parameter names to their values. Valid parameter
of the form ``<step-name>__<parameter-name>``). Entire steps can
be replaced with ``<step-name>`` keys.
Valid parameter keys can be listed with get_params().
Returns
-------
self
"""
# ----- 1. Replace steps
for key in list(params.keys()):
if key in self._steps:
self._replace_step(key, params.pop(key))
# ----- 2. Replace each step params
# Collect params by step
step_params = defaultdict(dict)
for key, value in params.items():
step_name, _, param_name = key.partition("__")
step_params[step_name][param_name] = value
# Set params for each step
for step_name, params in step_params.items():
step = self.get_step(step_name)
step.set_params(**params)
return self
def _replace_step(self, step_key, new_step):
# Transfer connectivity configuration from old step
# to new step and replace old with new
# TODO: Add check for isinstance(new_step, Step) to fail early before messing things up
transfer_attrs = ["_name", "trainable", "_inputs", "_outputs", "_targets"]
old_step = self._steps[step_key]
for attr in transfer_attrs:
setattr(new_step, attr, getattr(old_step, attr))
# Update outputs of old step to point to the new step
# TODO: The output dataplaceholders should be replaced too
for output in old_step.outputs:
output._step = new_step
# Rebuild model
self._build()
@property
def graph(self):
return self._graph
def build_graph_from_outputs(outputs: Iterable[DataPlaceholder]) -> DiGraph:
"""Builds a graph by backtracking from a sets of outputs.
It does so by backtracking recursively in depth-first fashion, jumping
from outputs to steps in tandem until hitting a step with no inputs (an
InputStep).
It builds the graph including the targets, i.e. the graph at fit time.
Parameters
----------
outputs
Outputs (data placeholders) from where the backtrack to build the
graph starts.
Returns
-------
graph
The built graph.
"""
graph = DiGraph()
# Add nodes (steps)
def collect_steps_from(output):
parent_step = output.step
if parent_step in graph:
return
graph.add_node(parent_step)
for input in parent_step.inputs:
collect_steps_from(input)
for target in parent_step.targets:
collect_steps_from(target)
for output in outputs:
collect_steps_from(output)
# Add edges (data)
for step in graph:
for input in step.inputs:
graph.add_edge(input.step, step, input)
for target in step.targets:
graph.add_edge(target.step, step, target)
# Check for any nodes (steps) with duplicated names
duplicated_names = find_duplicated_items([step.name for step in graph])
if duplicated_names:
raise RuntimeError(
"A graph cannot contain steps with duplicated names. "
"Found the following duplicates:\n"
"{}".format(duplicated_names)
)
return graph
| 35.964286 | 112 | 0.614265 | 2,764 | 23,161 | 4.992041 | 0.149783 | 0.012176 | 0.00587 | 0.012176 | 0.218655 | 0.140093 | 0.118206 | 0.099652 | 0.075228 | 0.058124 | 0 | 0.001066 | 0.311731 | 23,161 | 643 | 113 | 36.020218 | 0.8645 | 0.354734 | 0 | 0.2 | 0 | 0 | 0.056264 | 0 | 0 | 0 | 0 | 0.007776 | 0 | 1 | 0.060317 | false | 0.003175 | 0.022222 | 0.003175 | 0.152381 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
943464b9a939d5e26362c02446f6afb5de549747 | 1,127 | py | Python | elmo-chainer/bilm_encode_sentenses.py | take-cheeze/models | 3ded8fd062c57f20f6154cac2dd0d998181de755 | [
"MIT"
] | 112 | 2018-04-18T07:13:03.000Z | 2022-03-11T03:36:34.000Z | elmo-chainer/bilm_encode_sentenses.py | take-cheeze/models | 3ded8fd062c57f20f6154cac2dd0d998181de755 | [
"MIT"
] | 16 | 2018-05-11T11:41:08.000Z | 2021-04-24T03:50:54.000Z | elmo-chainer/bilm_encode_sentenses.py | take-cheeze/models | 3ded8fd062c57f20f6154cac2dd0d998181de755 | [
"MIT"
] | 45 | 2018-04-18T07:13:06.000Z | 2021-12-22T03:46:18.000Z | '''
Encode dataset as biLM embeddings to a file.
'''
import argparse
import json
from bilm import dump_bilm_embeddings
parser = argparse.ArgumentParser()
parser.add_argument('--gpu', '-g', type=int, default=-1,
help='GPU ID (negative value indicates CPU)')
parser.add_argument('--batchsize', '-b', type=int, default=32,
help='Minibatch size of computation')
parser.add_argument('--input', '-in', '-i', required=True,
help='Path of input text file')
parser.add_argument('--output', '-out', '-o', required=True,
help='Path of output file to be written')
args = parser.parse_args()
print(json.dumps(args.__dict__, indent=2))
# Location of pretrained LM.
vocab_file = 'vocab-2016-09-10.txt'
options_file = 'elmo_2x4096_512_2048cnn_2xhighway_options.json'
weight_file = 'elmo_2x4096_512_2048cnn_2xhighway_weights.hdf5'
dataset_file = args.input
embedding_file = args.output
assert args.input != args.output
dump_bilm_embeddings(
vocab_file, dataset_file, options_file, weight_file, embedding_file,
gpu=args.gpu, batchsize=args.batchsize
)
| 33.147059 | 72 | 0.704525 | 153 | 1,127 | 4.973856 | 0.48366 | 0.047306 | 0.089356 | 0.052562 | 0.144547 | 0.086728 | 0 | 0 | 0 | 0 | 0 | 0.041622 | 0.168589 | 1,127 | 33 | 73 | 34.151515 | 0.770544 | 0.063886 | 0 | 0 | 0 | 0 | 0.267431 | 0.08787 | 0 | 0 | 0 | 0 | 0.041667 | 1 | 0 | false | 0 | 0.125 | 0 | 0.125 | 0.041667 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
94352c18896ac52cb3e3688b9085e562ded5443b | 7,023 | py | Python | Autonomous/libs/ARTracker.py | ROMANT21/SoonerRoverTeamV | 7d09b3d5afec8e3d0ac112d3bf28bcaabce28b50 | [
"MIT"
] | null | null | null | Autonomous/libs/ARTracker.py | ROMANT21/SoonerRoverTeamV | 7d09b3d5afec8e3d0ac112d3bf28bcaabce28b50 | [
"MIT"
] | null | null | null | Autonomous/libs/ARTracker.py | ROMANT21/SoonerRoverTeamV | 7d09b3d5afec8e3d0ac112d3bf28bcaabce28b50 | [
"MIT"
] | null | null | null | import cv2
import cv2.aruco as aruco
import numpy as np
import configparser
import os
class ARTracker:
# Constructor
def __init__(self, cameras, write=False):
self.write=write
self.distanceToMarker = -1
self.distanceToMarker1 = 0
self.distanceToMarker2 = 0
self.widthOfMarker = 0.0
self.widthOfMarker1 = 0
self.widthOfMarker2 = 0
self.centerXMarker = 0
self.angleToMarker = -999.9
#self.cameras = np.empty(3, dtype=str)
self.cameras = cameras
# Open the config file
config = configparser.ConfigParser(allow_no_value=True)
config.read(os.path.dirname(__file__) + '/../config.ini')
# Set variables from the config file
self.degreesPerPixel = float(config['ARTRACKER']['DEGREES_PER_PIXEL'])
self.focalLength = float(config['ARTRACKER']['FOCAL_LENGTH'])
self.knownMarkerWidth = float(config['ARTRACKER']['KNOWN_TAG_WIDTH'])
self.format = config['ARTRACKER']['FORMAT']
self.frameWidth = int(config['ARTRACKER']['FRAME_WIDTH'])
self.frameHeight = int(config['ARTRACKER']['FRAME_HEIGHT'])
# Initialize video writer, fps is set
if self.write:
self.videoWriter = cv2.VideoWriter("autonomous.avi", cv2.VideoWriter_fourcc(
self.format[0], self.format[1], self.format[2], self.format[3]), 5, (self.frameWidth, self.frameHeight), False)
# Set the ar marker dictionary
self.markerDict = aruco.Dictionary_get(aruco.DICT_4X4_50)
# Initialize cameras
self.caps=[]
for i in range(0, len(self.cameras)):
self.caps.append(cv2.VideoCapture(self.cameras[i]))
if not self.caps[i].isOpened():
print(f"!!!!!!!!!!!!!!!!!!!!!!!!!!Camera ", i, " did not open!!!!!!!!!!!!!!!!!!!!!!!!!!")
self.caps[i].set(cv2.CAP_PROP_FRAME_WIDTH, self.frameWidth)
self.caps[i].set(cv2.CAP_PROP_FRAME_HEIGHT, self.frameHeight)
self.caps[i].set(cv2.CAP_PROP_BUFFERSIZE, 1) # greatly speeds up the program but the writer is a bit wack because of this
self.caps[i].set(cv2.CAP_PROP_FOURCC, cv2.VideoWriter_fourcc(self.format[0], self.format[1], self.format[2], self.format[3]))
def markerFound(self, id1, image, id2=-1):
# converts to grayscale
cv2.cvtColor(image, cv2.COLOR_RGB2GRAY, image)
index1 = -1
index2 = -1
bw = image #will hold the black and white image
# tries converting to b&w using different different cutoffs to find the perfect one for the current lighting
for i in range(40, 221, 60):
bw = cv2.threshold(image,i,255, cv2.THRESH_BINARY)[1]
(self.corners, self.markerIDs, self.rejected) = aruco.detectMarkers(bw, self.markerDict)
if not (self.markerIDs is None):
if id2==-1:
index1 = -1
# this just checks to make sure that it found the right marker
for i in range(len(self.markerIDs)):
if self.markerIDs[i] == id1:
index1 = i
break
if index1 != -1:
print("Found the correct marker!")
if self.write:
self.videoWriter.write(bw) #purely for debug
cv2.waitKey(1)
break
else:
print("Found a marker but was not the correct one")
else:
index1 = -1
index2 = -1
if len(self.markerIDs) == 1:
print('Only found marker ', self.markerIDs[0])
else:
for i in range(len(self.markerIDs) - 1, -1,-1): #I trust the biggest markers the most
if self.markerIDs[i] == id1:
index1 = i
elif self.markerIDs[i] == id2:
index2 = i
if index1 != -1 and index2 != -1:
print('Found both markers!')
if self.write:
self.videoWriter.write(bw) #purely for debug
cv2.waitKey(1)
break
if i == 220: #did not find any AR markers with any b&w cutoff
if self.write:
self.videoWriter.write(image)
cv2.waitKey(1)
self.distanceToMarker = -1
self.angleToMarker = -999
return False
if id2 == -1:
self.widthOfMarker = self.corners[index1][0][1][0] - self.corners[index1][0][0][0]
self.distanceToMarker = (self.knownMarkerWidth * self.focalLength) / self.widthOfMarker
self.centerXMarker = (self.corners[index1][0][1][0] + self.corners[index1][0][0][0]) / 2
# takes the pixels from the marker to the center of the image and multiplies it by the degrees per pixel
self.angleToMarker = self.degreesPerPixel * (self.centerXMarker - self.frameWidth/2)
else:
self.widthOfMarker1 = self.corners[index1][0][1][0] - self.corners[index1][0][0][0]
self.widthOfMarker2 = self.corners[index2][0][1][0] - self.corners[index2][0][0][0]
#distanceToAR = (knownWidthOfMarker(20cm) * focalLengthOfCamera) / pixelWidthOfMarker
self.distanceToMarker1 = (self.knownMarkerWidth * self.focalLength) / self.widthOfMarker1
self.distanceToMarker2 = (self.knownMarkerWidth * self.focalLength) / self.widthOfMarker2
print(f"1: {self.distanceToMarker1} \n2: {self.distanceToMarker2}")
self.distanceToMarker = (self.distanceToMarker1 + self.distanceToMarker2) / 2
self.centerXMarker = (self.corners[index1][0][1][0] + self.corners[index2][0][0][0]) / 2
#takes the pixels from the marker to the center of the image and multiplies it by the degrees per pixel
self.angleToMarker = self.degreesPerPixel * (self.centerXMarker - self.frameWidth/2)
return True
#id1 is the marker you want to look for
#specify id2 if you want to look for a gate
#set write to true to write out images to disk
#cameras=number of cameras to check. -1 for all of them
def findMarker(self, id1, id2=-1, cameras=-1):
if cameras == -1:
cameras=len(self.caps)
for i in range(cameras):
ret, frame = self.caps[i].read()
if self.markerFound(id1, frame, id2=id2):
return True
return False
| 46.82 | 137 | 0.544212 | 791 | 7,023 | 4.788875 | 0.262958 | 0.019799 | 0.031415 | 0.033263 | 0.324974 | 0.278775 | 0.26056 | 0.220961 | 0.206705 | 0.19905 | 0 | 0.038318 | 0.349708 | 7,023 | 149 | 138 | 47.134228 | 0.79111 | 0.153496 | 0 | 0.324074 | 0 | 0 | 0.065529 | 0.018578 | 0 | 0 | 0 | 0 | 0 | 1 | 0.027778 | false | 0 | 0.046296 | 0 | 0.12037 | 0.055556 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
94354260b7f3c6fef0f04ea5b99624f3fede367a | 1,850 | py | Python | fb_api.py | ianchen06/bus_bot | d8808deda07e7455f82497f6cb4efa65bb46732b | [
"MIT"
] | null | null | null | fb_api.py | ianchen06/bus_bot | d8808deda07e7455f82497f6cb4efa65bb46732b | [
"MIT"
] | null | null | null | fb_api.py | ianchen06/bus_bot | d8808deda07e7455f82497f6cb4efa65bb46732b | [
"MIT"
] | null | null | null | import os
import requests
PAGE_ACCESS_TOKEN = os.getenv('PAGE_ACCESS_TOKEN')
def send_msg(data):
res = requests.post("https://graph.facebook.com/v2.6/me/messages",
params={'access_token': PAGE_ACCESS_TOKEN},
json=data)
if res.status_code == 200:
pass
#app.logger.info("Successfully send msg id %s to user %s"%(res.json().get('message_id'), res.json().get('recipient_id')))
else:
#app.logger.info(res.text)
pass
def send_text_msg(recipient_id, message_text):
chunk_size = 640
r = ''
dd = []
for row in message_text.split('\n'):
if len(r+row+'\n') > chunk_size:
dd.append(r)
r = ''
r = r+row+'\n'
dd.append(r)
for chunk in dd:
data = {
"recipient": {"id": recipient_id},
"message": {"text": chunk}
}
send_msg(data)
def send_quick_reply(recipient_id, message_text):
data = {
"recipient": {"id": recipient_id},
"message": {"text": message_text,"quick_replies":[
{
"content_type":"text",
"title":"再次查詢",
"payload":"requery",
"image_url":"https://cdn4.iconfinder.com/data/icons/ionicons/512/icon-refresh-128.png"
},
{
"content_type":"text",
"title":"到站提醒",
"payload":"subscription",
"image_url":"https://d30y9cdsu7xlg0.cloudfront.net/png/31771-200.png"
},
{
"content_type":"text",
"title":"加到常用公車",
"payload":"add_to_favorites",
"image_url":"https://cdn4.iconfinder.com/data/icons/small-n-flat/24/star-128.png"
},
{
"content_type":"text",
"title":"取消",
"payload":"reset"
},
#{
# "content_type":"location"
#}
]}
}
send_msg(data)
| 27.205882 | 129 | 0.535676 | 216 | 1,850 | 4.412037 | 0.412037 | 0.080797 | 0.075551 | 0.09234 | 0.271773 | 0.214061 | 0.159496 | 0.081847 | 0 | 0 | 0 | 0.026054 | 0.294595 | 1,850 | 67 | 130 | 27.61194 | 0.704215 | 0.094054 | 0 | 0.280702 | 0 | 0.035088 | 0.313585 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.052632 | false | 0.035088 | 0.035088 | 0 | 0.087719 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9436f350e7d7acc2d2cbaf063890d3cd75d3b805 | 5,015 | py | Python | tuinwolk/tests/algorithm_test.py | TuinfeesT/TuinWolk | 0af0321948f4f573d8eb5ad1b87ea42bfa6644e1 | [
"MIT"
] | 1 | 2017-09-08T02:34:22.000Z | 2017-09-08T02:34:22.000Z | tuinwolk/tests/algorithm_test.py | TuinfeesT/TuinWolk | 0af0321948f4f573d8eb5ad1b87ea42bfa6644e1 | [
"MIT"
] | null | null | null | tuinwolk/tests/algorithm_test.py | TuinfeesT/TuinWolk | 0af0321948f4f573d8eb5ad1b87ea42bfa6644e1 | [
"MIT"
] | null | null | null | import random, string, sys
GROWTH_RATE = 2 # we only place repo's in locations that support at least GROWTH_RATE times the size of the repo
CHIP_RATE = 0.9 # if a repo has no locations that support GROWTH_RATE * repo.local_size, we chip off a bit off the desired size to see if we can find a repo anyway
MAX_SERVERS = 10
MAX_LOCATIONS = 10
MAX_REPOS = 10
__test__ = False
class GeoLoc:
def __init__(self, coords=("", "")):
self.coords = coords
class Server:
def __init__(self, ip, port, user, geoloc=GeoLoc()):#, locations=[]): self.geoloc = geoloc self.ip = ip
self.port = port
self.user = user
self.geoloc = geoloc
self.locations = []
def __str__(self):
return repr(self)
def __repr__(self):
s = '{ip}:{port:5}\n'.format(ip=self.ip, port=self.port)
for loc in self.locations:
s += '\t{loc}\n'.format(loc=loc)
for repo in loc.repos:
s += '\t\t{repo}\n'.format(repo=repo)
return s
class Repo:
def __init__(self, name, safe_mode, local_size, min_locations, locations, base_location=None):
self.name = name
self.safe_mode = safe_mode
self.local_size = local_size
self.min_locations = min_locations
self.locations = locations
self.base_location = base_location
def __str__(self):
return repr(self) #'%7s (%3dMB : %s)' %(self.name, self.local_size, self.min_locations)
def __repr__(self):
return '{0} ({1:5d}MB : {2} sites : based in {3})'.format(self.name, self.commit_size(), self.min_locations, self.base_location)
def commit_size(self):
return GROWTH_RATE * self.local_size
class Location:
def __init__(self, max_up, max_down, max_size, path, server):
self.max_up = max_up
self.max_down = max_down
self.max_size = max_size
self.path = path
self.server = server
self.server.locations.append(self)
self.repos = []
def __str__(self):
return repr(self) #'%s@%s:%d(%dMB)' % (self.user, self.ip, self.port, self.max_size)
def __repr__(self):
return '{path} @ {ip}({size_left:5}MB of {max_size:5}MB left)'.format(path=self.path, ip=self.server.ip, size_left=self.max_size - self.committed_size(), max_size=self.max_size)
def committed_size(self):
return sum([r.commit_size() for r in self.repos])
def main():
(servers, repos, locations) = init_test_data()
run = 0
while distribute(servers, repos, locations, run):
print_state(servers)
run += 1
def distribute(servers, repos, locations, run=0):
#below is version 0.2 of the distribution protocol.
placed = False
#set the base locations
for repo in repos:
if repo not in repo.base_location.repos:
repo.base_location.repos.append(repo)
if repo.base_location not in repo.locations:
repo.locations.append(repo.base_location)
repos = sorted(repos, key=lambda r : r.local_size, reverse=True)
locations = sorted(locations, key=lambda l : l.max_size - l.committed_size(), reverse=True)
for repo in repos:
possible_locations = find_possible_locations(repo, locations)
print("Will try to place {repo} in {min_loc} of {pos}".format(repo=repo, min_loc=repo.min_locations - len(repo.locations) if run == 0 else 1, pos=possible_locations))
for placing in (range(repo.min_locations - 1) if run == 0 else [0]):
if len(possible_locations) > 0:
print("Placing {repo} in {loc}".format(repo=repo, loc=possible_locations[-1]))
placed = True
repo.locations.append(possible_locations[-1])
possible_locations[-1].repos.append(repo)
possible_locations = possible_locations[:-1]
elif run == 0:
print('wine!')
break
return placed
def init_test_data():
servers = []
for i in range(MAX_SERVERS):
s = Server(ip='ip_' + str(i), port=random.randint(1024,65535), user=random_str(5))
servers.append(s)
locations = []
for i in range(MAX_LOCATIONS):
locations.append(Location(max_up=random.randint(0,500), max_down=random.randint(0,500), max_size=random.randint(1000,100000), path="/" + random_str(4), server=random.choice(servers)))
repos = []
for i in range(MAX_REPOS):
repos.append(Repo(name='repo_' + str(i), safe_mode=i % 3 == 0, local_size=int(1.02 ** random.randint(100,550)), min_locations=i % 3 + 1, locations=[]))
for repo in repos:
locs = find_possible_locations(repo, locations)
if len(locs) > 0:
loc = random.choice(locs)
repo.base_location=loc
repo.locations.append(repo.base_location)
loc.repos.append(repo)
else:
print("testset broken")
sys.exit(-1)
print_state(servers)
return (servers, repos, locations)
def random_str(number):
return ''.join(random.choice(string.ascii_letters + string.digits) for x in range(number))
def find_possible_locations(repo, locations):
result = []
for loc in locations:
serv_repos = []
for l in loc.server.locations:
serv_repos += l.repos
if repo not in serv_repos and (loc.max_size - loc.committed_size()) > repo.commit_size():
result.append(loc)
return result
def print_state(s):
print('Servers:')
if type(s) == list:
for serv in s:
print(serv)
else:
print(s)
print("*"*200)
if __name__ == '__main__':
main()
| 31.149068 | 185 | 0.705085 | 792 | 5,015 | 4.270202 | 0.189394 | 0.022768 | 0.028386 | 0.014193 | 0.152868 | 0.041987 | 0 | 0 | 0 | 0 | 0 | 0.01939 | 0.15673 | 5,015 | 160 | 186 | 31.34375 | 0.780326 | 0.098903 | 0 | 0.141732 | 0 | 0.007874 | 0.054102 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.141732 | false | 0 | 0.007874 | 0.062992 | 0.275591 | 0.086614 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
94370721e69f25f233cc1d50ab8e488f9896e6fc | 881 | py | Python | Basic Data Structures/string/StringToInteger.py | rush2catch/algorithms-leetcode | 38a5e6aa33d48fa14fe09c50c28a2eaabd736e55 | [
"MIT"
] | null | null | null | Basic Data Structures/string/StringToInteger.py | rush2catch/algorithms-leetcode | 38a5e6aa33d48fa14fe09c50c28a2eaabd736e55 | [
"MIT"
] | null | null | null | Basic Data Structures/string/StringToInteger.py | rush2catch/algorithms-leetcode | 38a5e6aa33d48fa14fe09c50c28a2eaabd736e55 | [
"MIT"
] | null | null | null | class Solution(object):
def compareVersion(self, version1, version2):
v1 = version1.split('.')
v2 = version2.split('.')
int_v1 = 0
int_v2 = 0
point_v1 = 0
point_v2 = 0
int_v1 = self.convert(v1[0])
int_v2 = self.convert(v2[0])
if len(v1) > 1:
point_v1 = self.convert(v1[1])
if len(v2) > 1:
point_v2 = self.convert(v2[1])
if int_v1 < int_v2:
return -1
elif int_v1 > int_v2:
return 1
else:
if point_v1 < point_v2:
return -1
elif point_v1 > point_v2:
return 1
else:
return 0
def convert(self, s):
if not s or len(s) == 0:
return 0
ans = 0
n = len(s)
table = {'0':0, '1':1, '2':2, '3':3, '4':4, '5':5, '6':6, '7':7, '8':8, '9':9}
for i in range(n - 1, -1, -1):
ans += table[s[i]] * (10**(n - 1 - i))
return ans
obj = Solution()
s1 = '0.1'
s2 = '1.23'
s3 = '1.22'
print(obj.compareVersion(s2, s3))
| 17.62 | 80 | 0.553916 | 163 | 881 | 2.895706 | 0.288344 | 0.042373 | 0.076271 | 0.033898 | 0.161017 | 0.161017 | 0 | 0 | 0 | 0 | 0 | 0.129376 | 0.254257 | 881 | 49 | 81 | 17.979592 | 0.589041 | 0 | 0 | 0.205128 | 0 | 0 | 0.026107 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.051282 | false | 0 | 0 | 0 | 0.25641 | 0.025641 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
943809361a17063e9ad22cf431e9277484379aec | 1,003 | py | Python | src/day3.py | mfrdbigolin/AoC2020 | 18757f0d610ad6a7064a22f94688900208260e48 | [
"MIT"
] | null | null | null | src/day3.py | mfrdbigolin/AoC2020 | 18757f0d610ad6a7064a22f94688900208260e48 | [
"MIT"
] | null | null | null | src/day3.py | mfrdbigolin/AoC2020 | 18757f0d610ad6a7064a22f94688900208260e48 | [
"MIT"
] | null | null | null | #!/bin/python3
# Copyright (C) 2020 Matheus Fernandes Bigolin <mfrdrbigolin@disroot.org>
# SPDX-License-Identifier: MIT
"""Day Three, Toboggan Trajectory."""
from sys import argv
from utils import open_file, arrange, usage_and_exit, product
def solve(terrain, slopes):
"""Return the product of the number of trees for each <slopes>,
according to the <terrain>. """
return product(map(lambda s: tree(terrain, s), slopes))
def tree(terrain, slope):
"""Calculate the number of trees for a <terrain> and a particular
<slope>. """
trees = 0
for i, _ in enumerate(terrain):
if slope[0]*i <= len(terrain) \
and terrain[slope[0] * i][slope[1]*i % len(terrain[0])] == "#":
trees += 1
return trees
SLOPES = [(1,1), (1,3), (1,5), (1,7), (2,1)]
if __name__ == "__main__":
usage_and_exit(len(argv) != 2)
input_data = arrange(open_file(argv[1]))
print(solve(input_data, [SLOPES[1]]))
print(solve(input_data, SLOPES))
| 22.288889 | 74 | 0.63011 | 143 | 1,003 | 4.293706 | 0.475524 | 0.043974 | 0.039088 | 0.052117 | 0.14658 | 0.084691 | 0 | 0 | 0 | 0 | 0 | 0.030457 | 0.214357 | 1,003 | 44 | 75 | 22.795455 | 0.748731 | 0.31007 | 0 | 0 | 0 | 0 | 0.013514 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.117647 | false | 0 | 0.117647 | 0 | 0.352941 | 0.117647 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
943c68bb009981d99005672e8d3f04e12a35c5f4 | 1,753 | py | Python | utils/large_pickle.py | t-aritake/ancestral_atom_learning | 1af3451058f31dfdd28289bb05e90bb2ec1d9e5d | [
"MIT"
] | null | null | null | utils/large_pickle.py | t-aritake/ancestral_atom_learning | 1af3451058f31dfdd28289bb05e90bb2ec1d9e5d | [
"MIT"
] | null | null | null | utils/large_pickle.py | t-aritake/ancestral_atom_learning | 1af3451058f31dfdd28289bb05e90bb2ec1d9e5d | [
"MIT"
] | null | null | null | import pickle
# openしたfileのread, write関数を細工して,2**3
class LargeObject(object):
def __init__(self, f):
self.f = f
def __getattr__(self, item):
# 項目名'item'を参照されたらf.'item'を返す
# (itemは任意の文字列)
# 要するにfile objectのread write以外は元のファイルオブジェクトを見ればよい
return getattr(self.f, item)
def write(self, obj):
# このobjはもうbyteに変換されてくるのでnumpy.arrayに対してもlenでOK
n = len(obj)
print("writing total_bytes={0}...".format(n), flush=True)
idx = 0
while idx < n:
# 2の32乗はmacでpickleを用いた保存が不可なので
# 31bitで表現できる限界の長さで止める
batch_size = min(n-idx, (1 << 31) - 1)
print("writing bytes [{0}, {1})... ".format(idx, idx+batch_size), end="", flush=True)
self.f.write(obj[idx:idx + batch_size])
print("done.", flush=True)
idx += batch_size
def read(self, n):
if n >= (1 << 31):
obj = bytearray(n)
idx = 0
while idx < n:
batch_size = min(n - idx, (1<<31) - 1)
print("loading bytes [{0}, {1})... ".format(idx, idx+batch_size), end="", flush=True)
obj[idx:idx+batch_size] = self.f.read(batch_size)
print("done.", flush=True)
idx += batch_size
return obj
return self.f.read(n)
def pickle_dump(obj, file_path):
with open(file_path, 'wb') as f:
return pickle.dump(obj, LargeObject(f), protocol=pickle.HIGHEST_PROTOCOL)
def pickle_load(file_path):
with open(file_path, 'rb') as f:
return pickle.load(LargeObject(f))
if __name__ == '__main__':
import numpy
data = numpy.random.normal(size=(2**14, 2**15))
pickle_dump(data, './test.pkl')
| 31.303571 | 101 | 0.560753 | 215 | 1,753 | 4.413953 | 0.339535 | 0.085353 | 0.075869 | 0.063224 | 0.325606 | 0.269758 | 0.219178 | 0.219178 | 0.219178 | 0.084299 | 0 | 0.025224 | 0.298916 | 1,753 | 55 | 102 | 31.872727 | 0.746949 | 0.124929 | 0 | 0.263158 | 0 | 0 | 0.074705 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.157895 | false | 0 | 0.052632 | 0.026316 | 0.368421 | 0.131579 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
943f4b96e47fad94ad779b8d7570c1b93a4ab158 | 4,373 | py | Python | RpiServer/main.py | MaxThom/LightYourHearth | bdd6b871ce076d6fa7ab7ad721d345c6f7e5fc5c | [
"MIT"
] | null | null | null | RpiServer/main.py | MaxThom/LightYourHearth | bdd6b871ce076d6fa7ab7ad721d345c6f7e5fc5c | [
"MIT"
] | 4 | 2021-06-08T22:21:15.000Z | 2022-03-12T00:47:34.000Z | RpiServer/main.py | MaxThom/LightYourHearth-rpi | e8b01569e583d7aa3343b9bdcd8b0e7d4834ae5f | [
"MIT"
] | null | null | null | from blue_connect import BluetoothComm
from ledstrip import Ledstrip
from button_manager import ButtonManager
import subprocess
import constants
import pprint
import time
import datetime
# ps -ef | grep python
# sudo kill -9 [pid]
# pip freeze > requirements.txt
# pip install -r requirements.txt
# sudo systemctl status lightyourhearth.service
# sudo systemctl enable lightyourhearth.service
# sudo systemctl disable lightyourhearth.service
# sudo systemctl start lightyourhearth.service
# sudo systemctl stop lightyourhearth.service
def main():
global blue_comm
try:
launch_bluetooth_server("")
except Exception as e:
log_message(e)
btn_manager.close()
blue_comm.close()
def button_callback(channel):
global is_off
global last_command
if (is_off and last_command[0] != constants.LED_OFF):
is_off = False
commandAction[last_command[0]](last_command[1])
else:
is_off = True
pixels.pixel_off(None)
def launch_bluetooth_server(args):
global blue_comm
blue_comm = BluetoothComm()
blue_comm.accept_comm()
blue_comm.read_comm_async(on_bluetooth_message_received)
def on_bluetooth_message_received(msg):
global blue_comm
global last_command
global is_off
print(msg)
all_commands = msg.split('&')
if (len(all_commands) > 1):
all_commands.pop(0)
for cmd in all_commands:
arr = cmd.split(":")
name = arr[0]
args = {}
if (len(arr) > 1 and arr[1]):
arg_str = arr[1]
for val in arg_str.split(","):
key_value = val.split("=")
args[key_value[0]] = key_value[1]
print(name)
pprint.pprint(args)
if name in commandAction:
if (name != constants.LED_SET_BRIGHTNESS
and name != constants.LED_SETTINGS
and name != constants.LED_ANIMATION_CAPABILITIES
and name != constants.BLUETOOTH_DISCONNECT):
last_command = (name, args)
is_off = False
commandAction[name](args)
else:
print("Unknown command")
def get_server_capabilities(args):
global blue_comm
for anim in constants.SERVER_CAPABILITIES:
print("Cap:" + anim + ":" + pprint.pformat(constants.SERVER_CAPABILITIES[anim]))
time.sleep(0.075)
blue_comm.send_comm("Cap:" + anim + ":" + pprint.pformat(constants.SERVER_CAPABILITIES[anim]))
def log_message(msg):
print(msg)
f = open("/home/pi/Desktop/LightYourHearth-rpi/Logs/LightYourHeath_Logs.txt", "a")
f.write("[%s] -> %s.\n" % (datetime.datetime.now(), msg))
f.close()
btn_manager = ButtonManager(button_callback)
pixels = Ledstrip()
blue_comm = None
is_off = False
last_command = (constants.LED_OFF, None)
commandAction = {
constants.BLUETOOTH_DISCONNECT: launch_bluetooth_server,
constants.LED_OFF: pixels.pixel_off,
constants.LED_RAINBOW_COLOR: pixels.pixel_rainbow_colors,
constants.LED_RAINBOW_CYCLE: pixels.pixel_rainbow_cycle,
constants.LED_RAINBOW_CYCLE_SUCCESSIVE: pixels.pixel_rainbow_cycle_successive,
constants.LED_BRIGHTNESS_DECREASE: pixels.pixel_brightness_decrease,
constants.LED_BLINK_COLOR: pixels.pixel_blink_color,
constants.LED_APPEAR_FROM_BACK: pixels.pixel_appear_from_back,
constants.LED_COLOR_WIPE: pixels.pixel_color_wipe,
constants.LED_COLOR_PAIR: pixels.pixel_color_pair,
constants.LED_COLOR_WIPE_CYCLE: pixels.pixel_color_wipe_cycle,
constants.LED_COLOR_WIPE_RAINBOW: pixels.pixel_color_wipe_rainbow,
constants.LED_THEATER_CHASE: pixels.pixel_theater_chase,
constants.LED_BREATHING: pixels.pixel_breathing,
constants.LED_BREATHING_LERP: pixels.pixel_breathing_lerp,
constants.LED_BREATHING_RAINBOW: pixels.pixel_breathing_rainbow,
constants.LED_FIREWORKS: pixels.pixel_fireworks,
constants.LED_LABYRINTH: pixels.pixel_labyrinth,
constants.LED_SET_BRIGHTNESS: pixels.set_brightness,
constants.LED_SETTINGS: pixels.set_settings,
constants.LED_ANIMATION_CAPABILITIES: get_server_capabilities
}
if __name__ == "__main__":
pprint.pprint(constants.SERVER_CAPABILITIES)
main()
| 34.706349 | 102 | 0.686943 | 524 | 4,373 | 5.429389 | 0.26145 | 0.105448 | 0.036555 | 0.049209 | 0.035852 | 0.035852 | 0.035852 | 0.035852 | 0 | 0 | 0 | 0.004734 | 0.227075 | 4,373 | 125 | 103 | 34.984 | 0.836982 | 0.075234 | 0 | 0.148515 | 0 | 0 | 0.028763 | 0.016117 | 0 | 0 | 0 | 0 | 0 | 1 | 0.059406 | false | 0 | 0.079208 | 0 | 0.138614 | 0.089109 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9440dc39866b4ed3cdff0a21be7c14bc4667a7e9 | 837 | py | Python | tutorials/10-days-of-statistics/s10-normal-distribution-2.py | PingHuskar/hackerrank | 1bfdbc63de5d0f94cd9e6ae250476b4a267662f2 | [
"Unlicense"
] | 41 | 2018-05-11T07:54:34.000Z | 2022-03-29T19:02:32.000Z | tutorials/10-days-of-statistics/s10-normal-distribution-2.py | PingHuskar/hackerrank | 1bfdbc63de5d0f94cd9e6ae250476b4a267662f2 | [
"Unlicense"
] | 2 | 2021-09-13T10:03:26.000Z | 2021-10-04T10:21:05.000Z | tutorials/10-days-of-statistics/s10-normal-distribution-2.py | PingHuskar/hackerrank | 1bfdbc63de5d0f94cd9e6ae250476b4a267662f2 | [
"Unlicense"
] | 21 | 2019-01-23T19:06:59.000Z | 2021-12-23T16:03:47.000Z | # Tutorials > 10 Days of Statistics > Day 5: Normal Distribution II
# Problems based on basic statistical distributions.
#
# https://www.hackerrank.com/challenges/s10-normal-distribution-2/problem
# challenge id: 21230
#
import math
def N(x, µ, σ):
""" Normal Distribution """
π = math.pi
return math.exp(- (x - µ) ** 2 / (2 * σ * σ)) / (σ * math.sqrt(2 * π))
def Φ(x, µ, σ):
""" Cumulative Probability """
return 1 / 2 * (1 + math.erf((x - µ) / σ / math.sqrt(2)))
µ, σ = map(float, input().split())
q1 = float(input())
q2 = float(input())
# percentage of students having grade > q1
print("{:.2f}".format(100 - Φ(q1, µ, σ) * 100))
# percentage of students having grade ≥ q2
print("{:.2f}".format(100 - Φ(q2, µ, σ) * 100))
# percentage of students having grade < q2
print("{:.2f}".format(Φ(q2, µ, σ) * 100))
| 24.617647 | 74 | 0.610514 | 131 | 837 | 3.908397 | 0.450382 | 0.027344 | 0.017578 | 0.152344 | 0.287109 | 0.140625 | 0.140625 | 0.140625 | 0 | 0 | 0 | 0.065574 | 0.198327 | 837 | 33 | 75 | 25.363636 | 0.695976 | 0.450418 | 0 | 0 | 0 | 0 | 0.041002 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.166667 | false | 0 | 0.083333 | 0 | 0.416667 | 0.25 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9441d12538aebf4fa87c06330fdaa81538b0282c | 2,214 | py | Python | cloud.py | ArkinSolomon/Cuby | c0957efee8f2f29cc85fb155a4f8fd5658482881 | [
"MIT"
] | null | null | null | cloud.py | ArkinSolomon/Cuby | c0957efee8f2f29cc85fb155a4f8fd5658482881 | [
"MIT"
] | null | null | null | cloud.py | ArkinSolomon/Cuby | c0957efee8f2f29cc85fb155a4f8fd5658482881 | [
"MIT"
] | null | null | null | import pygame
from random import randrange
from cloud_part import Cloud_Part
import math
'''
This class creates clouds at random locations in the screen.
'''
class Cloud(object):
# Class initializer
def __init__(self, screen, width, height, horizontal_constraints, vertical_constraints, VERBOSE):
super(Cloud, self).__init__()
self.clouds = pygame.sprite.Group()
self.part_size = 40
# All possible clouds
POSSIBLE_CLOUDS = [30, 15, 39, 38, 57, 60, 58, 23]
# Print a random amount of clouds
curr_x = horizontal_constraints[0]
if VERBOSE: print('Creating cloud set')
x_screens = int(math.ceil((horizontal_constraints[1] - horizontal_constraints[0]) / width))
y_screens = int(math.ceil((vertical_constraints[1] - vertical_constraints[0]) / height))
for _ in range(x_screens):
curr_y = vertical_constraints[0]
for _ in range(y_screens):
for _ in range(randrange(8, 12, 1)):
CLOUD = POSSIBLE_CLOUDS[randrange(0, len(POSSIBLE_CLOUDS) - 1, 1)]
current_cloud = list(bin(CLOUD)[2:])
# Make sure cloud has 6 parts
if len(current_cloud) < 6:
for _ in range(6 - len(current_cloud)):
current_cloud.insert(0, 0)
x = randrange(curr_x, curr_x + (width - (self.part_size * 3)), self.part_size)
y = randrange(curr_y, curr_y + (height - (self.part_size * 3)), self.part_size)
y_inc_count = 0
if VERBOSE: print('Creating cloud at (%d, %d) [C#%d]') % (x, y, CLOUD)
# Create cloud
for cloud_part in current_cloud:
if cloud_part == '1':
self.clouds.add(Cloud_Part(screen, x, y, self.part_size))
x += self.part_size
y_inc_count += 1
if y_inc_count == 3:
y += self.part_size
x -= self.part_size * 3
curr_y += height
curr_x += width
| 38.842105 | 101 | 0.533875 | 264 | 2,214 | 4.25 | 0.30303 | 0.064171 | 0.096257 | 0.034759 | 0.157754 | 0.157754 | 0.092692 | 0.092692 | 0 | 0 | 0 | 0.031496 | 0.369015 | 2,214 | 56 | 102 | 39.535714 | 0.771654 | 0.049684 | 0 | 0 | 0 | 0 | 0.025616 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.027027 | false | 0 | 0.108108 | 0 | 0.162162 | 0.054054 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9441f8090083e12e5068f348f98fdd3f2fe20640 | 2,070 | py | Python | backend/audio_analysis/audio_emotions.py | shan-memery/political-atlas | ecfcd7185592943c34487ec9fae65916d5f2f212 | [
"MIT"
] | 3 | 2021-02-22T00:56:48.000Z | 2021-03-25T06:15:38.000Z | backend/audio_analysis/audio_emotions.py | shan-memery/political-atlas | ecfcd7185592943c34487ec9fae65916d5f2f212 | [
"MIT"
] | 3 | 2021-05-03T18:42:30.000Z | 2022-02-27T12:23:09.000Z | backend/audio_analysis/audio_emotions.py | shan-memery/political-atlas | ecfcd7185592943c34487ec9fae65916d5f2f212 | [
"MIT"
] | null | null | null | import base64
import json
import pathlib
import requests
import sys
BASE_DIR = pathlib.Path(__file__).parent.absolute()
with open(BASE_DIR.joinpath('config.json')) as config_file:
config = json.load(config_file)
WEB_HOOK = "https://pabe.vanillacre.me/deepwebhook"
querystring: dict = {
"apikey": config['API_KEY'],
"webhook": WEB_HOOK
}
payload: dict = {
"encoding": "mp3",
"languageCode": "en-US"
}
headers: dict = {
'Content-Type': "application/json",
}
# Takes a local audio file and processes emotions
# Default: async response -> response sent to Webhook
# Can pass 2nd param of "sync" to get synchronous response & immediately print response.
# Synchronous response limits audio file to 1 minute in duration
def audio_process_emotions_file(audio_file_name: str, response_type: str = "async") -> None:
file_ending_index = audio_file_name.rfind('.')
audio_type: str = audio_file_name[file_ending_index::]
payload["encoding"] = audio_type
with open(audio_file_name, 'rb') as fin:
audio_content = fin.read()
payload["content"] = base64.b64encode(audio_content).decode('utf-8')
url: str = f"https://proxy.api.deepaffects.com/audio/generic/api/v2/{response_type}/recognise_emotion"
response = requests.post(url, json=payload, headers=headers, params=querystring)
if (response_type == "sync"):
print(response.text)
# Takes a remote audio file and processes emotions
# async response -> response sent to Webhook
def audio_process_emotions(audio_url: str):
audio_type: str = audio_url[audio_url.rfind('.') + 1::]
if (audio_type not in ('wav', 'mp3', 'pcm', 'aac', 'mulaw', 'mp4', 'm4a', 'mov', 'wmv')):
print("not valid audio_url")
return
payload["encoding"] = audio_type
payload["url"] = audio_url
url: str = "https://proxy.api.deepaffects.com/audio/generic/api/v2/async/recognise_emotion"
response = requests.post(url, json=payload, headers=headers, params=querystring)
print(response.json())
return response.json().get('request_id')
| 33.934426 | 106 | 0.702415 | 278 | 2,070 | 5.064748 | 0.395683 | 0.044744 | 0.036932 | 0.02983 | 0.267045 | 0.225852 | 0.177557 | 0.177557 | 0.177557 | 0.115057 | 0 | 0.009227 | 0.162319 | 2,070 | 60 | 107 | 34.5 | 0.802768 | 0.165217 | 0 | 0.095238 | 0 | 0.047619 | 0.228356 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.047619 | false | 0 | 0.119048 | 0 | 0.214286 | 0.071429 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
944340e0a695126f125615f284c92afb1ac4bd00 | 4,587 | py | Python | gravy/sndfile/__init__.py | greenbender/django-gravy | cbdf33db31c73797ca704a89707b6bba48fb3cb9 | [
"BSD-3-Clause"
] | 6 | 2018-04-02T22:00:57.000Z | 2021-12-17T00:33:12.000Z | gravy/sndfile/__init__.py | greenbender/django-gravy | cbdf33db31c73797ca704a89707b6bba48fb3cb9 | [
"BSD-3-Clause"
] | null | null | null | gravy/sndfile/__init__.py | greenbender/django-gravy | cbdf33db31c73797ca704a89707b6bba48fb3cb9 | [
"BSD-3-Clause"
] | null | null | null | from _binding import *
class SndFileError(Exception):
pass
class VirtualIO(SF_VIRTUAL_IO):
def __init__(self, fd):
# references
self._fd = fd
self._c_char = c_char
self._string_at = string_at
def get_filelen(userdata):
where = self._fd.tell()
self._fd.seek(0, SEEK_END)
length = self._fd.tell()
self._fd.seek(where, SEEK_SET)
return length
def seek(count, whence, userdata):
self._fd.seek(count, whence)
return self._fd.tell()
def read(data, count, userdata):
dst = (self._c_char * count).from_address(data)
buf = self._fd.read(count)
length = len(buf)
dst[:length] = buf
return length
def write(data, count, userdata):
self._fd.write(self._string_at(data, count))
return count
def tell(userdata):
return self._fd.tell()
return super(VirtualIO, self).__init__(
get_filelen=sf_vio_get_filelen(get_filelen),
seek=sf_vio_seek(seek),
read=sf_vio_read(read),
write=sf_vio_write(write),
tell=sf_vio_tell(tell)
)
class SndFile(object):
default_frames = 1024
def __init__(self, obj, mode='r', major=None, subtype=None, samplerate=None, channels=None):
self._sndfile = None
# references
self._obj = obj
self._sf_close = sf_close
# create sfinfo
self._sfinfo = SF_INFO()
if samplerate:
self._sfinfo.samplerate = samplerate
if channels:
self._sfinfo.channels = channels
if major:
self._sfinfo.format = self._parse_format(major, subtype)
# open sndfile
mode = self._parse_mode(mode)
if isinstance(self._obj, basestring):
self._sndfile = sf_open(self._obj, mode, byref(self._sfinfo))
elif hasattr(self._obj, 'fileno'):
self._sndfile = sf_open_fd(self._obj.fileno(), mode, byref(self._sfinfo), 0)
else:
self._vio = VirtualIO(self._obj)
self._sndfile = sf_open_virtual(byref(self._vio), mode, byref(self._sfinfo), None)
if self._sndfile is None:
raise SndFileError(sf_strerror(self._sndfile))
def __getattr__(self, name):
return getattr(self._sfinfo, name)
def __del__(self):
self.close()
def __iter__(self):
return self
@staticmethod
def _parse_format(major, subtype=None):
fmt = 0
if major == 'WAV':
fmt |= SF_FORMAT_WAV
subtype = subtype or 'PCM_32'
elif major == 'OGG':
fmt |= SF_FORMAT_OGG
subtype = subtype or 'VORBIS'
if subtype == 'PCM_32':
fmt |= SF_FORMAT_PCM_32
elif subtype == 'GSM610':
fmt |= SF_FORMAT_GSM610
elif subtype == 'VORBIS':
fmt |= SF_FORMAT_VORBIS
return fmt
@staticmethod
def _parse_mode(mode):
if mode == 'r':
return SFM_READ
if mode == 'r+':
return SFM_RDWR
if mode == 'w':
return SFM_WRITE
if mode == 'w+':
return SFM_WRITE
raise ValueError('Bad mode %s' % mode)
def _get_remaining_frames(self):
return self._sfinfo.frames - sf_seek(self._sndfile, 0, SEEK_CUR)
def _get_frame_buffer(self, frames=None):
frames = frames or self.default_frames
count = self._sfinfo.channels * frames
if not hasattr(self, '_buf') or len(self._buf) < count:
self._buf = (c_int * count)()
return self._buf
def _get_item_buffer(self, count):
if not hasattr(self, '_buf') or len(self._buf) < count:
self._buf = (c_int * count)()
return self._buf
def read(self, frames=None):
frames = frames or self._get_remaining_frames()
buf = self._get_frame_buffer(frames)
count = sf_read_int(self._sndfile, buf, len(buf))
return buf[:count]
def write(self, items):
count = len(items)
buf = self._get_item_buffer(count)
buf[:count] = items
count = sf_write_int(self._sndfile, buf, count)
return count
def next(self):
items = self.read(self.default_frames)
if not items:
raise StopIteration
return items
def close(self):
if self._sndfile:
self._sf_close(self._sndfile)
self._sndfile = None
| 29.216561 | 96 | 0.571179 | 554 | 4,587 | 4.425993 | 0.184116 | 0.053834 | 0.022431 | 0.020799 | 0.128059 | 0.115008 | 0.081566 | 0.055465 | 0.055465 | 0.055465 | 0 | 0.006485 | 0.327665 | 4,587 | 156 | 97 | 29.403846 | 0.788586 | 0.010464 | 0 | 0.146341 | 0 | 0 | 0.014998 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.154472 | false | 0.00813 | 0.00813 | 0.03252 | 0.349594 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9449a2b0d6c1565d42d9cb5e3842bc3035d92db9 | 833 | py | Python | basics/testTkinter1.py | lostFox/autoRunSomething | 519f2ebca6e2c78aa3caeed2e88b8f92403a8b46 | [
"BSD-3-Clause"
] | null | null | null | basics/testTkinter1.py | lostFox/autoRunSomething | 519f2ebca6e2c78aa3caeed2e88b8f92403a8b46 | [
"BSD-3-Clause"
] | 1 | 2020-04-28T07:54:19.000Z | 2020-05-23T17:57:43.000Z | basics/testTkinter1.py | lostFox/autoRunSomething | 519f2ebca6e2c78aa3caeed2e88b8f92403a8b46 | [
"BSD-3-Clause"
] | null | null | null | """tkimage.pyw 简单的图片查看器
"""
import tkinter as tk
import tkinter.filedialog as fd
def openimage(canvas):
"""事件处理函数:使用文件对话框打开图片
"""
filename = fd.askopenfilename(filetypes=[("PNG图片", "*.png"),
("GIF图片", "*.gif")])
global image # 注意这个需要定义为全局变量
image = tk.PhotoImage(file=filename)
canvas.create_image((0, 0), image=image, anchor="nw")
def main():
"""主函数:设置窗口部件,指定按钮点击事件处理函数
"""
window = tk.Tk()
window.geometry("600x480")
window.title("简单的图片查看器")
canvas = tk.Canvas(window, width=600, height=440)
canvas.pack(side="bottom")
button = tk.Button(window, text="打开图片",
command=lambda: openimage(canvas))
button.pack()
tk.mainloop()
if __name__ == "__main__":
main()
| 25.242424 | 66 | 0.565426 | 86 | 833 | 5.372093 | 0.616279 | 0.056277 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.023411 | 0.282113 | 833 | 32 | 67 | 26.03125 | 0.749164 | 0.105642 | 0 | 0 | 0 | 0 | 0.079023 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.1 | false | 0 | 0.1 | 0 | 0.2 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9449bc4a88c928fcca8b119b92228696c1d3f838 | 1,027 | py | Python | tests/test_data.py | eyaler/alt-reality-cam | 317c84239b64dc761f05f38c2d0024dc82237fa4 | [
"MIT"
] | 3 | 2019-05-04T14:04:41.000Z | 2020-12-10T22:05:03.000Z | tests/test_data.py | eyaler/alt-reality-cam | 317c84239b64dc761f05f38c2d0024dc82237fa4 | [
"MIT"
] | null | null | null | tests/test_data.py | eyaler/alt-reality-cam | 317c84239b64dc761f05f38c2d0024dc82237fa4 | [
"MIT"
] | null | null | null | import joblib
import pandas as pd
from util import draw_boxes, display_image, get_image_from_s3, get_image_boxes
import os
objects = joblib.load(os.path.join('..','data','data.joblib'))
id2url = joblib.load(os.path.join('..','data','id2url.joblib'))
meta = pd.read_csv(os.path.join('..','open_images_', 'validation-images-with-rotation.csv'))
meta.set_index('ImageID', inplace=True)
print('data loaded')
for index, row in meta.iterrows():
if index not in id2url:
continue
#if not np.isnan(row.Rotation):
if row.Rotation != 270:
continue
'''
image_url = id2url[index]
try:
image = get_image(image_url, rotate=id2rot[index])
except:
print('error downloading: ' + image_url)
continue
'''
image = get_image_from_s3(index)
result = get_image_boxes(objects, index)
print(row.Rotation)
print(result)
image_with_boxes = draw_boxes(image, result["detection_boxes"], result["detection_class_names"])
display_image(image_with_boxes)
| 26.333333 | 100 | 0.682571 | 141 | 1,027 | 4.765957 | 0.397163 | 0.059524 | 0.058036 | 0.050595 | 0.127976 | 0.071429 | 0 | 0 | 0 | 0 | 0 | 0.011876 | 0.180136 | 1,027 | 38 | 101 | 27.026316 | 0.786223 | 0.029211 | 0 | 0.1 | 0 | 0 | 0.171817 | 0.069221 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.2 | 0 | 0.2 | 0.15 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
944e55182faf9a6527a5868bbcdc772b013e1b6d | 1,221 | py | Python | bcipy/signal/model/inference.py | theGreenJedi/BciPy | 222ac9b79f1ab3374e888be9e9b8d86f88d1bc82 | [
"MIT"
] | null | null | null | bcipy/signal/model/inference.py | theGreenJedi/BciPy | 222ac9b79f1ab3374e888be9e9b8d86f88d1bc82 | [
"MIT"
] | 5 | 2021-06-08T23:56:19.000Z | 2022-03-12T00:57:00.000Z | bcipy/signal/model/inference.py | theGreenJedi/BciPy | 222ac9b79f1ab3374e888be9e9b8d86f88d1bc82 | [
"MIT"
] | null | null | null | import numpy as np
def inference(x, targets, model, alphabet):
"""Evaluate the log likelihood ratios given the model and input.
Then maps the distribution over the alphabet.
Args:
x(ndarray(float)): 3 dimensional np array first dimension is
channels second dimension is trials and third dimension is
time samples.
targets(ndarray[str]): flashed symbols in order.
model(pipeline): trained likelihood model.
alphabet(list[str]): symbol set. Letters in the alphabet. All uppercase
Return:
lik_r(ndarray[float]): likelihood array.
"""
# Evaluates the likelihood probabilities for p(e|l=1) and p(e|l=0)
scores = np.exp(model.transform(x))
# Evaluates the likelihood ratios
scores = scores[:, 1] / (scores[:, 0] + np.power(.1, 10)) + np.power(.1, 10)
# print("These values should be between 0 and 1: ", scores)
# This maps the likelihood distribution over the alphabet
# If the letter in the alphabet does not exist in the target string,
# it takes 1
lik_r = np.ones(len(alphabet))
for idx in range(len(scores)):
lik_r[alphabet.index(targets[idx])] *= scores[idx]
return lik_r
| 37 | 80 | 0.657658 | 173 | 1,221 | 4.618497 | 0.50289 | 0.055069 | 0.047559 | 0.067584 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.015184 | 0.244881 | 1,221 | 32 | 81 | 38.15625 | 0.85141 | 0.651106 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.125 | false | 0 | 0.125 | 0 | 0.375 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
944f4e07ae76e0175a6fe84669287a44fcfa23a9 | 1,231 | py | Python | test/CheckManpage.py | sanel/ledger | 81e1b4ff3c78cd18e5d8c3844eb8e406dc3c8836 | [
"BSD-3-Clause"
] | 3,509 | 2015-01-01T11:47:51.000Z | 2022-03-30T09:22:43.000Z | test/CheckManpage.py | sanel/ledger | 81e1b4ff3c78cd18e5d8c3844eb8e406dc3c8836 | [
"BSD-3-Clause"
] | 651 | 2015-01-09T16:18:10.000Z | 2022-03-26T23:52:00.000Z | test/CheckManpage.py | sanel/ledger | 81e1b4ff3c78cd18e5d8c3844eb8e406dc3c8836 | [
"BSD-3-Clause"
] | 440 | 2015-01-02T21:28:11.000Z | 2022-03-25T05:38:08.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
import sys
import re
import os
import argparse
from os.path import *
from subprocess import Popen, PIPE
from CheckOptions import CheckOptions
class CheckManpage (CheckOptions):
def __init__(self, args):
CheckOptions.__init__(self, args)
self.option_pattern = '\.It Fl \\\\-([-A-Za-z]+)'
self.function_pattern = '\.It Fn ([-A-Za-z_]+)'
self.source_file = join(self.source, 'doc', 'ledger.1')
self.source_type = 'manpage'
if __name__ == "__main__":
def getargs():
parser = argparse.ArgumentParser(prog='CheckManpage',
description='Check that ledger options are documented in the manpage')
parser.add_argument('-l', '--ledger',
dest='ledger',
type=str,
action='store',
required=True,
help='the path to the ledger executable to test with')
parser.add_argument('-s', '--source',
dest='source',
type=str,
action='store',
required=True,
help='the path to the top level ledger source directory')
return parser.parse_args()
args = getargs()
script = CheckManpage(args)
status = script.main()
sys.exit(status)
| 26.76087 | 82 | 0.649878 | 154 | 1,231 | 5.006494 | 0.525974 | 0.038911 | 0.031128 | 0.020752 | 0.119326 | 0.119326 | 0.119326 | 0.119326 | 0.119326 | 0.119326 | 0 | 0.00207 | 0.215272 | 1,231 | 45 | 83 | 27.355556 | 0.796066 | 0.034119 | 0 | 0.166667 | 0 | 0 | 0.232519 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.055556 | false | 0 | 0.222222 | 0 | 0.333333 | 0.027778 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9450618219812524336f36a73f4f2432207e9704 | 1,789 | py | Python | examples/somebenchmark.py | dunnkers/fseval | 49a11a63e09e65b1f14389b6ba3a9ae3aeae086d | [
"MIT"
] | 5 | 2020-07-08T11:58:46.000Z | 2022-01-26T13:58:00.000Z | examples/somebenchmark.py | dunnkers/fseval | 49a11a63e09e65b1f14389b6ba3a9ae3aeae086d | [
"MIT"
] | 63 | 2021-05-09T06:18:24.000Z | 2022-03-27T18:05:58.000Z | examples/somebenchmark.py | dunnkers/fseval | 49a11a63e09e65b1f14389b6ba3a9ae3aeae086d | [
"MIT"
] | 1 | 2022-02-11T03:24:14.000Z | 2022-02-11T03:24:14.000Z | import hydra
from fseval.adapters.openml import OpenMLDataset
from fseval.config import DatasetConfig, EstimatorConfig, PipelineConfig
from fseval.main import run_pipeline
from fseval.types import Task
from hydra.core.config_store import ConfigStore
from sklearn.base import BaseEstimator
from sklearn.feature_selection import f_classif
cs = ConfigStore.instance()
### 📈 Define Feature Ranker
class ANOVAFValueClassifier(BaseEstimator):
def fit(self, X, y):
scores, _ = f_classif(X, y)
self.feature_importances_ = scores
anova_ranker = EstimatorConfig(
name="Anova F-Value",
estimator={"_target_": "somebenchmark.ANOVAFValueClassifier"},
_estimator_type="classifier",
estimates_feature_importances=True,
)
cs.store(group="ranker", name="anova_f_value", node=anova_ranker)
### 🧾 Define validator
knn_estimator = EstimatorConfig(
name="k-NN",
estimator={"_target_": "sklearn.neighbors.KNeighborsClassifier"},
_estimator_type="classifier",
estimates_target=True,
)
cs.store(group="validator", name="knn", node=knn_estimator)
### 💾 Define datasets
cs.store(
group="dataset",
name="iris",
node=DatasetConfig(
name="iris",
task=Task.classification,
adapter=OpenMLDataset(dataset_id=61, target_column="class"),
),
)
cs.store(
group="dataset",
name="ozone",
node=DatasetConfig(
name="Ozone Levels",
task=Task.classification,
adapter=OpenMLDataset(dataset_id=1487, target_column="Class"),
),
)
### ⚙️ Define pipeline config
cs.store(name="my_config", node=PipelineConfig())
### 🚀 Run fseval
@hydra.main(config_path=None, config_name="my_config")
def main(cfg: PipelineConfig) -> None:
run_pipeline(cfg)
if __name__ == "__main__":
main()
| 24.506849 | 72 | 0.713248 | 208 | 1,789 | 5.961538 | 0.375 | 0.028226 | 0.03871 | 0.024194 | 0.119355 | 0.082258 | 0.082258 | 0 | 0 | 0 | 0 | 0.004011 | 0.163779 | 1,789 | 72 | 73 | 24.847222 | 0.820856 | 0.058133 | 0 | 0.27451 | 0 | 0 | 0.139005 | 0.043739 | 0 | 0 | 0 | 0 | 0 | 1 | 0.039216 | false | 0 | 0.196078 | 0 | 0.254902 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
94509be309d1816f01aaf5c9843c5c4efa72acc0 | 2,776 | py | Python | onadata/apps/remote_app/models.py | awemulya/fieldsight-kobocat | f302d084e30fb637d43ec638c701e01a3dddc721 | [
"BSD-2-Clause"
] | 38 | 2017-02-28T05:39:40.000Z | 2019-01-16T04:39:04.000Z | onadata/apps/remote_app/models.py | awemulya/fieldsightt | f302d084e30fb637d43ec638c701e01a3dddc721 | [
"BSD-2-Clause"
] | 20 | 2017-04-27T09:14:27.000Z | 2019-01-17T06:35:52.000Z | onadata/apps/remote_app/models.py | awemulya/fieldsightt | f302d084e30fb637d43ec638c701e01a3dddc721 | [
"BSD-2-Clause"
] | 5 | 2017-02-22T12:25:19.000Z | 2019-01-15T11:16:40.000Z | from random import choice
from string import ascii_lowercase, digits
from django.utils import timezone
from django.db import models
from django.conf import settings
from django.contrib.auth import get_user_model
from django.db.models.signals import post_delete, post_save
from django.dispatch import receiver
import jwt
from onadata.apps.fieldsight.models import Project, Site
def generate_random_username():
chars = ascii_lowercase + digits
length = 16
username = ''.join([choice(chars) for _ in range(length)])
User = get_user_model()
try:
User.objects.get(username=username)
return generate_random_username()
except User.DoesNotExist:
return username
class RemoteApp(models.Model):
title = models.CharField(max_length=255)
projects = models.ManyToManyField(
Project,
through='ConnectedProject',
blank=True
)
auth_user = models.OneToOneField(
settings.AUTH_USER_MODEL,
on_delete=models.CASCADE,
)
token = models.TextField(blank=True)
def __str__(self):
return self.title
def clean(self):
if not hasattr(self, 'auth_user') or not self.auth_user:
User = get_user_model()
user = User.objects.create_user(
username=generate_random_username()
)
self.auth_user = user
if not hasattr(self, 'token') or not self.token:
payload = {'userId': self.auth_user.id}
self.token = jwt.encode(
payload,
settings.SECRET_KEY,
algorithm='HS256',
)
class ConnectedProject(models.Model):
key = models.CharField(max_length=255)
project = models.ForeignKey(Project)
app = models.ForeignKey(RemoteApp)
updated_at = models.DateTimeField(default=None, blank=True, null=True)
def __str__(self):
return str(self.project)
class ConnectedDomain(models.Model):
app = models.ForeignKey(RemoteApp)
domain = models.CharField(max_length=255)
def __str__(self):
return self.domain
@receiver(post_delete, sender=RemoteApp)
def handle_remote_app_delete(sender, instance, **kwargs):
user = instance.auth_user
user.delete()
@receiver(post_save, sender=Project)
def handle_project_save(sender, instance, **kwargs):
project = ConnectedProject.objects.filter(
project=instance
).first()
if project:
project.updated_at = timezone.now()
project.save()
@receiver(post_save, sender=Site)
def handle_site_save(sender, instance, **kwargs):
project = ConnectedProject.objects.filter(
project=instance.project
).first()
if project:
project.updated_at = timezone.now()
project.save()
| 26.438095 | 74 | 0.67147 | 323 | 2,776 | 5.597523 | 0.303406 | 0.030973 | 0.026549 | 0.039823 | 0.220686 | 0.140487 | 0.140487 | 0.140487 | 0.140487 | 0.140487 | 0 | 0.006591 | 0.23487 | 2,776 | 104 | 75 | 26.692308 | 0.844633 | 0 | 0 | 0.2125 | 0 | 0 | 0.014769 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.1 | false | 0 | 0.125 | 0.0375 | 0.45 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9451d8920aafd4d93813d4d8a6d863e885a6a62c | 2,276 | py | Python | example_code/HABET_tracker.py | M2I-HABET/ahac2020workshop | 7ebc6e2ba3bf10ef18881e96523adc5d54f8d62e | [
"MIT"
] | null | null | null | example_code/HABET_tracker.py | M2I-HABET/ahac2020workshop | 7ebc6e2ba3bf10ef18881e96523adc5d54f8d62e | [
"MIT"
] | null | null | null | example_code/HABET_tracker.py | M2I-HABET/ahac2020workshop | 7ebc6e2ba3bf10ef18881e96523adc5d54f8d62e | [
"MIT"
] | null | null | null | # HABET Basic Tracking Payload CircuitPython Script
# Recieves GPS data and transmits it via LoRa radio module
# Utilizes Adafruit Feather M4 Express, Ultimate GPS FeatherWing, RFM95W 433 MHz FeatherWing
# Last updated 9/2/2020 by Austin Trask
import time
import board
import busio
import digitalio
import analogio
import adafruit_rfm9x
# Device ID
FEATHER_ID = b'1'
print("startup")
# For monitoring battery voltage
vbat_voltage = analogio.AnalogIn(board.VOLTAGE_MONITOR)
# Define CS pin for RFM95W LoRa
CS = digitalio.DigitalInOut(board.D10)
# Initialize SPI bus for RFM95W LoRa
spi = busio.SPI(board.SCK, MOSI=board.MOSI, MISO=board.MISO)
# Define radio frequency
RADIO_FREQ_MHZ = 433.0
#RADIO_FREQ_MHZ = 915.0
# Define RESET pin
RESET = digitalio.DigitalInOut(board.D11)
# Initialize LoRa Radio
rfm9x = adafruit_rfm9x.RFM9x(spi, CS, RESET, RADIO_FREQ_MHZ)
# Set transmit power (to maximum)
rfm9x.tx_power = 23
# Define RX and TX pins for the GPS
RX = board.RX
TX = board.TX
# Set the UART serial connection for the GPS module
uart = busio.UART(TX, RX, baudrate=9600, timeout=1)
# Define the send message function
def sendMessage(message):
try:
# Measures battery voltage and formats it to transmit
battery_voltage = get_voltage(vbat_voltage)
v = "{:.2f}".format(battery_voltage)
rfm9x.send(FEATHER_ID+b','+message+'V,'+v)
except:
print("Message failed to send")
# Define the get voltage function
def get_voltage(pin):
return (pin.value * 3.3) / 65536 * 2
current = time.monotonic()
old = current
newGPS = False
while True:
current = time.monotonic()
# Checks for new GPS data
if uart.in_waiting > 0:
gps_string = uart.readline()
print(gps_string)
if "GPGGA" in gps_string:
gps_str = str(gps_string)
newGPS = True
# Sends GPS data every 5 seconds (or 5 times whatever your later sleep value is)
if current-old>5:
old = current
if newGPS:
print(gps_str)
sendMessage(gps_str)
newGPS = False
else:
# Message for if there is no GPS
sendMessage("No GPS")
print("No GPS")
# Pauses for 1 second before restaring the cycle
time.sleep(.1)
| 25.288889 | 92 | 0.685413 | 326 | 2,276 | 4.705521 | 0.429448 | 0.036506 | 0.023468 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.032683 | 0.233743 | 2,276 | 89 | 93 | 25.573034 | 0.846904 | 0.368629 | 0 | 0.125 | 0 | 0 | 0.039604 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.041667 | false | 0 | 0.125 | 0.020833 | 0.1875 | 0.104167 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9453b14b9e14bb2251d140e523b7352f31df8836 | 1,058 | py | Python | message_broker/exchange.py | nicolads87/mnemosine | b8057b3bb6482586c663bba2c392eb2c81795a2a | [
"MIT"
] | null | null | null | message_broker/exchange.py | nicolads87/mnemosine | b8057b3bb6482586c663bba2c392eb2c81795a2a | [
"MIT"
] | null | null | null | message_broker/exchange.py | nicolads87/mnemosine | b8057b3bb6482586c663bba2c392eb2c81795a2a | [
"MIT"
] | null | null | null | #!/usr/bin/env python
#===============================
# MNEMOSINE PY EXCHANGE
#===============================
from threading import Thread
import queue
import logging
import uuid
from message_broker.broker.broker import Broker
logging.basicConfig(level=logging.INFO,
format='[%(threadName)-10s] %(message)s',
)
class Exchange(object):
def __init__(self):
self.brokers = {}
def publish(self, queue_name, message, connection):
if self.brokers.get(queue_name) is None:
broker = Broker(queue_name)
broker.start()
self.brokers[queue_name] = broker
self.brokers[queue_name].publish(message)
connection.close()
def subscribe(self, queue_name, connection):
if self.brokers.get(queue_name) is None:
broker = Broker(queue_name)
broker.start()
self.brokers[queue_name] = broker
self.brokers[queue_name].subscribe(connection)
| 25.804878 | 62 | 0.559546 | 107 | 1,058 | 5.392523 | 0.364486 | 0.155979 | 0.103986 | 0.138648 | 0.412478 | 0.412478 | 0.412478 | 0.412478 | 0.412478 | 0.412478 | 0 | 0.00267 | 0.29206 | 1,058 | 40 | 63 | 26.45 | 0.76769 | 0.10397 | 0 | 0.333333 | 0 | 0 | 0.034292 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.125 | false | 0 | 0.208333 | 0 | 0.375 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9454a7596d445e08a15c5b3eac352a2c7dc0a75a | 1,293 | py | Python | functionality_cores/jokes.py | SergeJohanns/aboc-bot | 542866c5dc2df2d93821fc9c9d5190c6e3089117 | [
"MIT"
] | null | null | null | functionality_cores/jokes.py | SergeJohanns/aboc-bot | 542866c5dc2df2d93821fc9c9d5190c6e3089117 | [
"MIT"
] | null | null | null | functionality_cores/jokes.py | SergeJohanns/aboc-bot | 542866c5dc2df2d93821fc9c9d5190c6e3089117 | [
"MIT"
] | null | null | null | import time
from functionality_core import FCore
from functionality_cores.utilcore import asynced
from functionality_cores.kerberos import require_ring
class jokes(FCore):
"""Collection of joke commands."""
def get_commands(self):
return {"recursion":self.recursion, "bee":self.bee}
@asynced
def recursion(self, update, context):
for _ in range(3):
context.bot.send_message(chat_id=update.effective_chat.id, text="/recursion")
time.sleep(1)
context.bot.send_message(chat_id=update.effective_chat.id, text="jk")
@require_ring(2)
@asynced
def bee(self, update, context):
DELAY = 1 if update.effective_chat.type == "private" else 3
def blocks(sentences: list):
out = []
i, j = 0, 0
while j < len(sentences):
while j < len(sentences) and len(". ".join(sentences[i:j]) + ".") < 2048:
j += 1
out.append(". ".join(sentences[i:j-1]) + ".")
i = j
return out
with open("Data/bee.txt", 'r') as bee:
for block in blocks(bee.read().split(". ")):
context.bot.send_message(chat_id=update.effective_chat.id, text=block)
time.sleep(DELAY) | 36.942857 | 89 | 0.581593 | 161 | 1,293 | 4.565217 | 0.42236 | 0.04898 | 0.103401 | 0.085714 | 0.212245 | 0.212245 | 0.212245 | 0.212245 | 0.212245 | 0.212245 | 0 | 0.014177 | 0.290797 | 1,293 | 35 | 90 | 36.942857 | 0.78735 | 0.021655 | 0 | 0.066667 | 0 | 0 | 0.04127 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.133333 | false | 0 | 0.133333 | 0.033333 | 0.366667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
945713b531ab76dc304c8cb885e6156fb03e8b00 | 1,403 | py | Python | 02_Optimization/nightRun.py | NaoPagamosEstatistica/mlclass | d47a003edda368959a78cf207e66e77a80e6f5b3 | [
"MIT"
] | null | null | null | 02_Optimization/nightRun.py | NaoPagamosEstatistica/mlclass | d47a003edda368959a78cf207e66e77a80e6f5b3 | [
"MIT"
] | null | null | null | 02_Optimization/nightRun.py | NaoPagamosEstatistica/mlclass | d47a003edda368959a78cf207e66e77a80e6f5b3 | [
"MIT"
] | null | null | null | import os
import subprocess
from random import randint
os.system("g++ hillClimb.cpp -o test -std=c++11")
inf = 1<<20
radious = [90, 45, 30, 360, 10]
best = []
for i in range(len(radious)):
f = open("%d" % (radious[i]), "w")
b = open("best%d" % (radious[i]), "w")
best += [[-inf]]
f.close()
b.close()
def treatRes(res):
res = str(res)
s = res[2:len(res) - 3]
return(str(s))
def runHill(args, radio):
result = subprocess.check_output(["./test", *args, "0", "1000", str(radio), "20"])
result = treatRes(result)
lol = result.split(' ')
#print(lol)
print(result)
return(result, lol)
while (True):
f = []
b = []
for i in range(len(radious)):
f += [open("%d" % (radious[i]), "a")]
b += [open("best%d" % (radious[i]), "r")]
lines = b[i].readlines()
if (len(lines)):
best[i] = lines[0].split()
else:
best[i] = [-inf]
args = []
for i in range(6):
args += [randint(0, 359)]
args = list(map(str, args))
for i in range(len(radious)):
result, ans = runHill(args, radious[i])
print(result, file=f[i])
if (float(ans[0]) > float(best[i][0])):
b[i].close()
b[i] = open("best%d" % (radious[i]), "w")
print(result, file=b[i])
for i in range(len(radious)):
f[i].close()
b[i].close() | 25.509091 | 86 | 0.498931 | 203 | 1,403 | 3.44335 | 0.344828 | 0.06867 | 0.042918 | 0.078684 | 0.267525 | 0.240343 | 0.131617 | 0.100143 | 0.100143 | 0.100143 | 0 | 0.032967 | 0.286529 | 1,403 | 55 | 87 | 25.509091 | 0.665335 | 0.007128 | 0 | 0.125 | 0 | 0 | 0.055276 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.041667 | false | 0 | 0.0625 | 0 | 0.104167 | 0.0625 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9459564910a505c210ee0c2d7c209c21ba0721f1 | 5,243 | py | Python | httpserver/tcpserver.py | ansal/http-server | 6020b235039bf1332de613a7ffad12c668594cdc | [
"MIT"
] | 1 | 2022-03-26T18:08:54.000Z | 2022-03-26T18:08:54.000Z | httpserver/tcpserver.py | ansal/http-server | 6020b235039bf1332de613a7ffad12c668594cdc | [
"MIT"
] | null | null | null | httpserver/tcpserver.py | ansal/http-server | 6020b235039bf1332de613a7ffad12c668594cdc | [
"MIT"
] | null | null | null | # Simple TCP server
import socket
import queue
import select
class TCPServer:
def __init__(self, ip, port, callback=None, max_connections=5, bytes_count=1024):
# The IP address this server binds to
self.ip = ip
# The port this server binds to
self.port = port
# Callback to run after a request is served
self.callback = callback
# Number of bytes to be read from the socket
self.bytes_count = bytes_count
# Maximum number of connections
self._max_connections = max_connections
# Create an INET, STREAMing socket
self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# Create a list of readers and writers of sockets for reading and writing data.
self._readers = [self._socket, ] # Our server socket will be the first.
self._writers = []
# We also need a dictionay of queues for data to be sent from the
# callback.
self.queue = {}
def start(self):
""" Binds the socket and start listening """
# Set it as a non-blocking one
self._socket.setblocking(0)
# Bind the socket to specified ip and port
self._socket.bind((self.ip, self.port))
# And finally, start listening
self._socket.listen(self._max_connections)
def run(self):
# Create the main server loop that reads and writes data from the
# sockets.
while True:
# Call the select.select() call to get notified when the sockets
# are ready for processing.
# It will actually call the OS system call select() which monitors
# sockets, open files, pipes etc for any communication/error
# happening on them. See
# https://docs.python.org/3/library/select.html for more
# information.
read, write, err = select.select(
self._readers,
self._writers,
self._readers
)
# Process the socketd that need to be read from
for socket in read:
# If the socket is our server socket, then it means that there
# is a client waiting at the other end to connect
if socket is self._socket:
# If you want the IP, you can get it from the second value
# of the tuple below _
client_socket, _ = self._socket.accept()
client_socket.setblocking(0) # Make it non-blocking.
# Add the socket to the list of readers list
self._readers.append(client_socket)
# And give it a queue for any callback to put data
self.queue[client_socket] = queue.Queue()
else:
# This is some other client trying to send us data. So read
# that data.
try:
data = socket.recv(self.bytes_count)
except Exception as e:
raise e
# If there is data from the socket, call the callback and
# put the socket in the writer list incase the callback
# decided to put some data in the queue and we have to send
# it to the client later.
if data:
if self.callback is not None:
self.callback(self.queue[socket], data)
if socket not in self._writers:
self._writers.append(socket)
else:
# We have received no data ie zero bytes. So close the
# connection and remove the socket.
self.remove_socket(socket)
# Process the sockets that need to be written to
for socket in write:
# Get the data from the queue
try:
data = self.queue[socket].get_nowait()
except queue.Empty:
# The queue is empty. The callback probably didn't put any
# data in it. Hence remove it from the writer list
self._writers.remove(socket)
else:
# Callback has put some data in the queue. So send it back
# to the client.
socket.send(data)
# Once the data is send, remove the socket from everywhere,
# destroy the queue and close it.
self.remove_socket(socket)
# Process the sockets that have errors
for socket in err:
# Remove and close the socket
self.remove_socket(socket)
def remove_socket(self, socket):
# Remove the socket from both lists, destroy the queue and close the
# socket.
if socket in self._readers:
self._readers.remove(socket)
if socket in self._writers:
self._writers.remove(socket)
del self.queue[socket]
socket.close()
| 33.825806 | 87 | 0.54034 | 627 | 5,243 | 4.443381 | 0.296651 | 0.038765 | 0.013999 | 0.02369 | 0.121321 | 0.060302 | 0.030869 | 0.030869 | 0 | 0 | 0 | 0.002567 | 0.405684 | 5,243 | 154 | 88 | 34.045455 | 0.891528 | 0.403014 | 0 | 0.166667 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.066667 | false | 0 | 0.05 | 0 | 0.133333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
945dd0d9b70abc4ba2a33b9db34923ccdcde4e40 | 2,225 | py | Python | src/resource/spear_of_Adun.py | Thrimbda/my-life-for-Aiur | 5e385e1b6e49c4b7f75024572d21a6621ad550d9 | [
"MIT"
] | null | null | null | src/resource/spear_of_Adun.py | Thrimbda/my-life-for-Aiur | 5e385e1b6e49c4b7f75024572d21a6621ad550d9 | [
"MIT"
] | null | null | null | src/resource/spear_of_Adun.py | Thrimbda/my-life-for-Aiur | 5e385e1b6e49c4b7f75024572d21a6621ad550d9 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# @Author: Macsnow
# @Date: 2017-05-11 10:53:57
# @Last Modified by: Macsnow
# @Last Modified time: 2017-05-11 15:36:00
from flask.ext.restful import Resource, reqparse
from flask import session
from src.common.util import abortIfSubjectUnauthenticated
from src.common.util import abortInvalideSubject
from src.common.util import abortInvalideRole
from src.common.util import checkRole
from src.common.util import checkPermission
from src.common.subject_role import subject_role
from src.common.role_permission import role_permission
from src.common.nexus import nexus
class SpearOfAdun(Resource):
def __init__(self):
self.putparser = reqparse.RequestParser()
self.putparser.add_argument('subject',
type=str,
location='json')
self.putparser.add_argument('role',
type=str,
location='json')
super(SpearOfAdun, self).__init__()
def get(self):
permission = 'get_status'
abortIfSubjectUnauthenticated(session)
checkPermission(session['role'], permission, role_permission)
return nexus.getStatus(session['role']), 200
def post(self):
args = self.putparser.parse_args()
if args['subject'] is not None:
abortInvalideSubject(args['subject'])
if args['role'] is not None:
abortInvalideRole(args['role'])
checkRole(args['subject'], args['role'], subject_role)
session['subject'] = args['subject']
session['role'] = args['role']
return {'message': 'login as %s using %s' % (session['subject'], session['role'])}, 201
def put(self):
args = self.putparser.parse_args()
if args['role'] is not None:
abortInvalideRole(args['role'])
session['role'] = args['role']
return {'message': 'you-%s change role to %s' % (session.subject, session.role)}, 200
def delete(self):
abortIfSubjectUnauthenticated(session)
session.pop('subject')
session.pop('role')
return '', 204
| 37.711864 | 96 | 0.60809 | 240 | 2,225 | 5.558333 | 0.325 | 0.041979 | 0.077961 | 0.063718 | 0.288606 | 0.163418 | 0.115442 | 0.115442 | 0.065967 | 0 | 0 | 0.02545 | 0.275955 | 2,225 | 58 | 97 | 38.362069 | 0.802607 | 0.061573 | 0 | 0.304348 | 0 | 0 | 0.092885 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.108696 | false | 0 | 0.217391 | 0 | 0.434783 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9464fd14b792b482c66ebf4c89048f0fbdc126f7 | 449 | py | Python | vmreflect/tests/test_util.py | andrewdotn/vmreflect | f6dcab5df747df7bb8b53d9637f3f48d66910ca5 | [
"BSD-2-Clause"
] | 1 | 2015-10-20T15:09:08.000Z | 2015-10-20T15:09:08.000Z | vmreflect/tests/test_util.py | andrewdotn/vmreflect | f6dcab5df747df7bb8b53d9637f3f48d66910ca5 | [
"BSD-2-Clause"
] | null | null | null | vmreflect/tests/test_util.py | andrewdotn/vmreflect | f6dcab5df747df7bb8b53d9637f3f48d66910ca5 | [
"BSD-2-Clause"
] | null | null | null | import string
import unittest
class TestUtil(unittest.TestCase):
def test_random_string(self):
from vmreflect.utils import get_random_string
for l in range(20):
self.assertEquals(l, len(get_random_string(length=l)))
for alphabet in ['1234', string.letters, '!$(AJ$)AF(A@F']:
self.assertTrue(
all(c in alphabet
for c in get_random_string(alphabet=alphabet)))
| 29.933333 | 67 | 0.625835 | 58 | 449 | 4.706897 | 0.551724 | 0.175824 | 0.164835 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.018405 | 0.273942 | 449 | 14 | 68 | 32.071429 | 0.819018 | 0 | 0 | 0 | 0 | 0 | 0.037946 | 0 | 0 | 0 | 0 | 0 | 0.181818 | 1 | 0.090909 | false | 0 | 0.272727 | 0 | 0.454545 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9465ea6e8a3e28767df80dc2e1f77115e0fcf158 | 2,788 | py | Python | test/test_model/test_annotation.py | alexandermendes/libanno | bc504498ef330fab46f2334f96631457d520ec90 | [
"MIT"
] | 7 | 2018-05-22T04:54:08.000Z | 2020-05-28T20:27:39.000Z | test/test_model/test_annotation.py | alexandermendes/libanno | bc504498ef330fab46f2334f96631457d520ec90 | [
"MIT"
] | 9 | 2018-07-20T12:54:24.000Z | 2020-11-05T17:44:19.000Z | test/test_model/test_annotation.py | alexandermendes/libanno | bc504498ef330fab46f2334f96631457d520ec90 | [
"MIT"
] | 1 | 2019-06-12T14:19:33.000Z | 2019-06-12T14:19:33.000Z | # -*- coding: utf8 -*-
from flask import url_for, current_app
from nose.tools import *
from base import Test, db, with_context
from explicates.model.collection import Collection
from explicates.model.annotation import Annotation
class TestModelAnnotation(Test):
def setUp(self):
super(TestModelAnnotation, self).setUp()
@with_context
def test_iri(self):
"""Test Annotation IRI generated correctly."""
collection_data = {
'type': [
'AnnotationCollection',
'BasicContainer'
]
}
annotation_data = {
'body': 'foo',
'target': 'bar'
}
collection = Collection(data=collection_data)
annotation = Annotation(collection=collection, data=annotation_data)
db.session.add(collection)
db.session.add(annotation)
db.session.commit()
expected = url_for('api.annotations',
collection_id=collection.id,
annotation_id=annotation.id,
_external=True)
assert_equal(annotation.iri, expected)
@with_context
def test_default_language_set(self):
"""Test default language is set."""
default_lang = current_app.config.get('FTS_DEFAULT')
annotation_data = {
'body': 'foo',
'target': 'bar'
}
collection = Collection()
annotation = Annotation(collection=collection, data=annotation_data)
db.session.add(annotation)
db.session.commit()
assert_equal(annotation.language, default_lang)
@with_context
def test_alternative_language_set_from_single_body(self):
"""Test alternative language is set from a single body."""
annotation_data = {
'body': {
'language': 'fr'
},
'target': 'bar'
}
collection = Collection()
annotation = Annotation(collection=collection, data=annotation_data)
db.session.add(annotation)
db.session.commit()
assert_equal(annotation.language, 'french')
@with_context
def test_alternative_language_set_from_multiple_bodies(self):
"""Test alternative language is set from multiple bodies."""
annotation_data = {
'body': [
{
'language': ['de', 'fr']
},
{
'language': 'ru'
}
],
'target': 'bar'
}
collection = Collection()
annotation = Annotation(collection=collection, data=annotation_data)
db.session.add(annotation)
db.session.commit()
assert_equal(annotation.language, 'german')
| 31.681818 | 76 | 0.573888 | 254 | 2,788 | 6.122047 | 0.259843 | 0.05209 | 0.07717 | 0.046302 | 0.51254 | 0.51254 | 0.51254 | 0.442444 | 0.340193 | 0.340193 | 0 | 0.000532 | 0.325323 | 2,788 | 87 | 77 | 32.045977 | 0.826156 | 0.071736 | 0 | 0.39726 | 0 | 0 | 0.064667 | 0 | 0 | 0 | 0 | 0 | 0.054795 | 1 | 0.068493 | false | 0 | 0.068493 | 0 | 0.150685 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9466a0890aee0dc1575d353661a33b3cadbde55e | 6,641 | py | Python | src/cropbox_search/cropbox_record_sm.py | asr-ros/asr_state_machine | bdba6f34275a83dade4cc40d2bd21a18dbcef4c3 | [
"BSD-3-Clause"
] | 1 | 2019-10-29T13:37:26.000Z | 2019-10-29T13:37:26.000Z | src/cropbox_search/cropbox_record_sm.py | asr-ros/asr_state_machine | bdba6f34275a83dade4cc40d2bd21a18dbcef4c3 | [
"BSD-3-Clause"
] | null | null | null | src/cropbox_search/cropbox_record_sm.py | asr-ros/asr_state_machine | bdba6f34275a83dade4cc40d2bd21a18dbcef4c3 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
'''
Copyright (c) 2016, Allgeyer Tobias, Aumann Florian, Borella Jocelyn, Hutmacher Robin, Karrenbauer Oliver, Marek Felix, Meissner Pascal, Trautmann Jeremias, Wittenbeck Valerij
All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''
import roslib
import rospy
import smach
import smach_ros
from common.common_sm import GetMoveRobotSateMachine
from common.init import SearchInit
from common.move import PTUPoseCorrection
from indirect_search.nbv import NBVSetPointCloud, NextBestView, NextBestViewUpdate
from cropbox_search_states import CropBoxGeneration
from record_states import CropboxStateRecording
from common.visualize_waypoints import VisualizeWaypoints
class CropBoxRecordStateMachine():
sm_init = smach.StateMachine(outcomes=['succeeded',
'aborted'])
with sm_init:
smach.StateMachine.add('SEARCH_INIT',
SearchInit(),
transitions={'succeeded':'CROPBOX_GENERATION',
'aborted':'aborted'},
remapping={'searched_object_types':'searched_object_types'})
smach.StateMachine.add('CROPBOX_GENERATION',
CropBoxGeneration(),
transitions={'succeeded':'NBV_SET_POINT_CLOUD',
'aborted':'aborted'},
remapping={'object_pointcloud':'object_pointcloud'})
smach.StateMachine.add('NBV_SET_POINT_CLOUD',
NBVSetPointCloud(),
transitions={'succeeded':'succeeded',
'aborted':'aborted',
'too_many_deactivated_normals':'aborted'},
remapping={'object_pointcloud':'object_pointcloud'})
sm_cropbox_record = smach.StateMachine(outcomes=['aborted',
'finished'])
with sm_cropbox_record:
smach.StateMachine.add('CROPBOX_RECORD_INIT',
sm_init,
transitions={'succeeded':'NBV_CALCULATION',
'aborted':'aborted'})
smach.StateMachine.add('NBV_CALCULATION',
NextBestView(),
transitions={'found_next_best_view':'MOVE_ROBOT_TO_VIEW',
'aborted':'aborted',
'no_nbv_found':'finished',
'nbv_update_point_cloud':'MOVE_ROBOT_TO_VIEW'},
remapping={'goal_camera_pose':'goal_camera_pose',
'goal_robot_pose':'goal_robot_pose',
'goal_ptu_position':'goal_ptu_position',
'searched_object_types':'searched_object_types'})
smach.StateMachine.add('MOVE_ROBOT_TO_VIEW',
GetMoveRobotSateMachine(),
transitions={'succeeded':'PTU_POSE_CORRECTION',
'aborted':'aborted'},
remapping={'goal_robot_pose':'goal_robot_pose',
'goal_ptu_position':'goal_ptu_position'})
smach.StateMachine.add('PTU_POSE_CORRECTION',
PTUPoseCorrection(),
transitions={'succeeded':'VISUALIZE_WAYPOINT',
'aborted':'VISUALIZE_WAYPOINT'},
remapping={'goal_camera_pose':'goal_camera_pose'})
smach.StateMachine.add('VISUALIZE_WAYPOINT',
VisualizeWaypoints(),
transitions={'succeeded':'NBV_UPDATE_POINT_CLOUD'})
smach.StateMachine.add('NBV_UPDATE_POINT_CLOUD',
NextBestViewUpdate(),
transitions={'succeeded':'STATE_RECORDING',
'aborted':'aborted',
'no_nbv_found':'finished'},
remapping={'goal_camera_pose':'goal_camera_pose',
'searched_object_types':'searched_object_types',
'deactivated_object_normals_count':'deactivated_object_normals_count'})
smach.StateMachine.add('STATE_RECORDING',
CropboxStateRecording(),
transitions={'succeeded':'NBV_CALCULATION',
'aborted':'aborted'},
remapping={'goal_camera_pose':'goal_camera_pose',
'goal_robot_pose':'goal_robot_pose',
'goal_ptu_position':'goal_ptu_position',
'deactivated_object_normals_count':'deactivated_object_normals_count'})
| 58.254386 | 755 | 0.568288 | 587 | 6,641 | 6.202726 | 0.357751 | 0.056029 | 0.05493 | 0.029662 | 0.299368 | 0.268608 | 0.18786 | 0.167536 | 0.135677 | 0.103818 | 0 | 0.001646 | 0.359584 | 6,641 | 113 | 756 | 58.769912 | 0.854456 | 0.248005 | 0 | 0.25641 | 0 | 0 | 0.274478 | 0.069823 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.141026 | 0 | 0.179487 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
94674f843dab73643aa11740f5acb4a209e24514 | 58,296 | py | Python | tests/unit/test_paragraphs.py | IanVermes/recompose | e675af59930ae2d45ec054dfa0d0ebc85383c669 | [
"MIT"
] | null | null | null | tests/unit/test_paragraphs.py | IanVermes/recompose | e675af59930ae2d45ec054dfa0d0ebc85383c669 | [
"MIT"
] | null | null | null | tests/unit/test_paragraphs.py | IanVermes/recompose | e675af59930ae2d45ec054dfa0d0ebc85383c669 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf8 -*-
"""Unit test of paragraph processing classes: PreProcessed and PostProcessed
as well as Processor subclasses.
Copyright: Ian Vermes 2019
"""
from tests.base_testcases import ParagraphsTestCase, BaseTestCase, ProcessorTestCase_Genuine
from tests.special_testcases import ProcessorTestCase_Abstract
import helpers.logging as pkg_logging
from helpers import paragraphs
import helpers.paragraphs # for tagetted mocking
import exceptions
import testfixtures
from lxml import etree
from unittest.mock import patch, MagicMock
from collections import defaultdict
import random
import unittest
import functools
import itertools
import os
PREPROCESSED_CONFIG = {
"pre_italic": "Berthelot, Katell, Michaël Langlois and Thierry Legrand,",
"italic": ("La Bibliothèque de Qumran 3b: Torah Deutéronome et "
"Pantateque dans son ensemble."),
"post_italic": ("Les Éditions du Cerf, Paris, 2017. xxi, 730 pp. €75.00. "
"ISBN 978 2 20411 147 8.")
}
class Test_Processor_Classes(BaseTestCase):
@classmethod
def setUpClass(cls):
cls.mock_config = PREPROCESSED_CONFIG
cls.patcher = patch("helpers.paragraphs.PreProcessed", autospec=True)
cls.MockPreProcessed = cls.patcher.start()
cls.processor_classes = {"authors": paragraphs.ProcessorAuthors,
"title": paragraphs.ProcessorTitle,
"meta": paragraphs.ProcessorMeta}
expected_attrs = {"authors": "authors editors",
"title": "title series",
"meta": ("illustrator translator "
"publisher pubplace year "
"pages price isbn")}
for attr_group, attr in expected_attrs.items():
expected_attrs[attr_group] = attr.split()
cls.expected_attrs = expected_attrs
@classmethod
def tearDownClass(cls):
cls.patcher.stop()
def setUp(self):
self.pre = self.MockPreProcessed("Some XML paragraph <w:p>")
self.pre.configure_mock(**self.mock_config)
def test_mocked_PreProcessed(self):
pre = self.MockPreProcessed("Some XML paragraph <w:p>")
pre.configure_mock(**self.mock_config)
for i, attr in enumerate(("pre_italic", "post_italic", "italic")):
with self.subTest(attr=attr):
self.assertHasAttr(pre, attr)
attr_value = getattr(pre, attr)
dict_value = self.mock_config[attr]
self.assertEqual(attr_value, dict_value)
def test_PostProcessed_attr_by_group(self):
pre = self.pre
post = paragraphs.PostProcessed(pre)
for group in self.expected_attrs:
with self.subTest(criteria=f"{group} - hasAttr"):
for attr in self.expected_attrs[group]:
with self.subTest(attr=attr):
self.assertHasAttr(post, attr)
def test_Processor_attrs_by_group(self):
for group in self.processor_classes.keys():
with self.subTest(group=group):
self.check_Processor_attrs_by_group(group)
def check_Processor_attrs_by_group(self, group):
pre = self.pre
Processor = self.processor_classes[group]
self.assertIn(group.lower(), Processor.__name__.lower(),
msg="Precondition - class name is sensible!")
expected_attrs = self.expected_attrs[group]
processor_obj = Processor(pre)
for attr in expected_attrs:
with self.subTest(attr=attr):
self.assertHasAttr(processor_obj, attr)
class Test_ProcessorAuthor_Class(ProcessorTestCase_Abstract, ProcessorTestCase_Genuine):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.group = "pre_italic"
cls.strings = cls._strings[cls.group]
cls.Processor = paragraphs.ProcessorAuthors
cls.mock_config = PREPROCESSED_CONFIG
cls.MockPreProcessed = MagicMock(autospec=helpers.paragraphs.PreProcessed)
cls.strucural_arg["good"] = ("Hockey, Katherine M., and David G. "
"Horrell (eds),")
cls.strucural_arg["bad"] = ("Berthelot, Katell, Michaël Langlois and "
"Thierry Legrand,")
cls.first_author = "del Olmo Lete, Gregorio,"
cls.editorial_arg = {}
cls.editorial_arg["(eds),"] = cls.strucural_arg["good"]
cls.editorial_arg["(ed.),"] = "Alexandru, Florian (ed.),"
cls.editorial_arg[""] = cls.first_author
def test_assignement_to_editor_or_author_attr(self):
for editor_substring, raw_string in self.editorial_arg.items():
with self.subTest(criteria="specific", substring=editor_substring):
processor_obj = self.Processor(raw_string)
self.assertEqual(processor_obj.isEditor(),
bool(editor_substring),
msg="Precondtion")
# Test1: either attribute list is populated
is_populated = any([len(processor_obj.authors),
len(processor_obj.editors)])
self.assertTrue(is_populated, msg=f"raw_string = {raw_string}")
# Test2: the correct list is populated:
self.check_author_editor_attr_assignment(processor_obj)
with self.subTest(criteria="generic"):
for raw_string in self.strings:
processor_obj = self.Processor(raw_string)
if processor_obj.isValid():
self.check_author_editor_attr_assignment(processor_obj)
else:
continue # Ignore badly structured raw strings.
def check_author_editor_attr_assignment(self, processor_obj):
if processor_obj.isEditor():
self.assertGreaterEqual(len(processor_obj.editors), 1)
self.assertEqual(len(processor_obj.authors), 0)
else:
self.assertGreaterEqual(len(processor_obj.authors), 1)
self.assertEqual(len(processor_obj.editors), 0)
def test_method_isEditor(self):
for editor_suffix in ["(ed.),", "(eds),"]:
msg = f"isEditor - positive specific {editor_suffix}"
with self.subTest(criteria=msg):
string = self.editorial_arg[editor_suffix]
processor_obj = self.Processor(string)
flag = processor_obj.isEditor()
self.assertTrue(flag)
self.assertIs(flag, True)
no_editor_suffix = ""
with self.subTest(criteria=f"strip - negative specific"):
string = self.editorial_arg[no_editor_suffix]
processor_obj = self.Processor(string)
flag = processor_obj.isEditor()
self.assertFalse(flag)
self.assertIs(flag, False)
def test_cls_method_strip_editor(self):
cls_method = self.Processor.strip_editor
naughty = "(eds),"
with self.subTest(criteria=f"strip - positive specific {naughty}"):
string = self.editorial_arg[naughty]
self.assertIn(naughty, string, msg="Precondtion")
# Expect string should have a trailing space.
expect = "Hockey, Katherine M., and David G. Horrell "
result = cls_method(string)
self.assertNotEqual(string, result)
self.assertEqual(result, expect)
naughty = "(ed.),"
with self.subTest(criteria=f"strip - positive specific {naughty}"):
string = self.editorial_arg[naughty]
self.assertIn(naughty, string, msg="Precondtion")
# Expect string should have a trailing space.
expect = "Alexandru, Florian "
result = cls_method(string)
self.assertNotEqual(string, result)
self.assertEqual(result, expect)
with self.subTest(criteria="strip - negative specific"):
string = self.editorial_arg[""]
expect = string
result = cls_method(string)
self.assertEqual(result, expect)
with self.subTest(criteria="strip - generic"):
missing1, missing2 = "(eds),", "(ed.),"
counter = 0
for string in self.strings:
if missing1 in string:
counter += 1
result = cls_method(string)
self.assertNotIn(missing1, result)
elif missing2 in string:
counter += 1
result = cls_method(string)
self.assertNotIn(missing2, result)
else:
result = cls_method(string)
self.assertEqual(string, result)
assertmsg = "Postcondition: nothing was actually tested!"
self.assertGreater(counter, 0, msg=assertmsg)
def test_cls_method_split(self):
cls_method = self.Processor.split
with self.subTest(criteria="split - multi auth"):
string = self.strucural_arg["good"]
expected = ["Katherine M. Hockey", "David G. Horrell"]
result = cls_method(string)
self.assertListEqual(expected, result)
with self.subTest(criteria="split - multi auth, bad struct"):
string = self.strucural_arg["bad"]
expected = ["Katell Berthelot",
"Michaël Langlois and Thierry Legrand"]
result = cls_method(string)
self.assertListEqual(expected, result)
with self.subTest(criteria="split - single author"):
string = self.first_author
expected = ["Gregorio del Olmo Lete"]
result = cls_method(string)
self.assertListEqual(expected, result)
class Test_ProcessorTitle_Class(ProcessorTestCase_Abstract, ProcessorTestCase_Genuine):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.group = "italic"
cls.strings = cls._strings[cls.group]
cls.Processor = paragraphs.ProcessorTitle
cls.mock_config = PREPROCESSED_CONFIG
cls.MockPreProcessed = MagicMock(autospec=helpers.paragraphs.PreProcessed)
cls.strucural_arg["good"] = ("New Approaches to an Integrated History "
"of the Holocaust: Social History, "
"Representation, Theory. Lessons and "
"Legacies: Volume XIII.")
cls.strucural_arg["bad"] = ("Lessons and Legacies, Volume XIII: New "
"Approaches to an Integrated History of "
"the Holocaust: Social History, "
"Representation, Theory.")
cls.series_arg = {}
cls.series_arg[True] = (cls.strucural_arg["good"])
cls.series_arg[False] = ("An Early History of Compassion: Emotion "
"and Imagination in Hellenistic Judaism.")
def test_assignment_to_title_and_series(self):
for series_flag, raw_string in self.series_arg.items():
with self.subTest(criteria="specific", has_series=series_flag):
processor_obj = self.Processor(raw_string)
self.assertEqual(processor_obj.isSeries(),
series_flag,
msg="Precondtion")
# Test1: both attributes is a string even if empty
is_string = all([isinstance(processor_obj.title, str),
isinstance(processor_obj.series, str)])
self.assertTrue(is_string)
# Test2: either attribute is populated
is_populated = any([len(processor_obj.title),
len(processor_obj.series)])
self.assertTrue(is_populated, msg=f"raw_string = {raw_string}")
# Test3: the correct list is populated:
self.check_title_series_attr_assignment(processor_obj)
with self.subTest(criteria="generic"):
for raw_string in self.strings:
processor_obj = self.Processor(raw_string)
if processor_obj.isValid():
self.check_title_series_attr_assignment(processor_obj)
else:
continue # Ignore badly structured raw strings.
def check_title_series_attr_assignment(self, processor_obj):
if processor_obj.isSeries():
self.assertGreaterEqual(len(processor_obj.title), 1)
self.assertGreaterEqual(len(processor_obj.series), 1)
else:
self.assertGreaterEqual(len(processor_obj.title), 1)
self.assertEqual(len(processor_obj.series), 0)
def test_method_isSeries_specific(self):
for expected_bool, raw_string in self.series_arg.items():
processor_obj = self.Processor(raw_string)
self.assertEqual(processor_obj.isSeries(), expected_bool)
def test_method_isSeries_general(self):
tally = defaultdict(int)
expect_series_count = 2 # Expect 2 raw strings with legit seriesinfo
for raw_string in self.strings:
processor_obj = self.Processor(raw_string)
tally[processor_obj.isSeries()] += 1
self.assertEqual(tally[True], expect_series_count)
self.assertEqual(tally[False], len(self.strings) - expect_series_count)
def test_cls_method_split(self):
cls_method = self.Processor.split
with self.subTest(criteria="split - title + series"):
string = self.strucural_arg["good"]
expected = [("New Approaches to an Integrated History of the "
"Holocaust: Social History, Representation, Theory"),
"Lessons and Legacies: Volume XIII"]
result = cls_method(string)
self.assertListEqual(expected, result)
with self.subTest(criteria="split - title + series, bad structure"):
string = self.strucural_arg["bad"]
expected = [string.strip().strip(".").strip(), ""]
result = cls_method(string)
self.assertListEqual(expected, result)
with self.subTest(criteria="split - title only"):
string = self.series_arg[False]
expected = [string.strip().strip(".").strip(), ""]
result = cls_method(string)
self.assertListEqual(expected, result)
class Test_ProcessorMeta_Class(ProcessorTestCase_Abstract, ProcessorTestCase_Genuine):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.group = "post_italic"
cls.strings = cls._strings[cls.group]
cls.Processor = paragraphs.ProcessorMeta
cls.mock_config = PREPROCESSED_CONFIG
cls.MockPreProcessed = MagicMock(autospec=helpers.paragraphs.PreProcessed)
cls.strucural_arg["good"] = ("Translated by Michaela Lang. Indiana "
"University Press, Bloomington IN, 2018. "
"ix, 161 pp. $65.00. ISBN 978 0 25303 "
"835 7.")
cls.strucural_arg["bad"] = ("The Jewish Museum of Greece, Athens, "
"2018. 312 pp. ISBN 978 9 60888 539 4.")
cls.extra_arg = {}
cls.extra_arg[True] = cls.strucural_arg["good"]
cls.extra_arg[False] = ("Brill, Leiden, 2018. xiii, 240 pp. €94.00. "
"ISBN 978 9 00434 447 1.")
cls.illustrator_translator_arg = {}
cls.illustrator_translator_arg["illustrator"] = ("Illustrated by Kristine A. Thorsen. Northwestern University Press, Evanston IL, 2018. xii, 642 pp. $45.00. ISBN 978 0 81012 607 7.")
cls.illustrator_translator_arg["translator"] = cls.strucural_arg["good"]
cls.issn_isbn_arg = {}
cls.issn_isbn_arg["issn"] = ("The Hebrew University of Jerusalem, "
"Jerusalem, 2018. x, 660 pp. $120.00. "
"ISSN 0793 4289.")
cls.issn_isbn_arg["isbn"] = cls.strucural_arg["good"]
@staticmethod
def get_strings_by_attr(obj, attrs):
"Helper func to retrieve object strings from a list of attribute names."
for attr in attrs:
yield getattr(obj, attr)
def test_assignement_of_extra_attributes_specifc(self):
extra_attrs = set("extra translator illustrator".split())
non_extra_attrs = self.Processor._data_attrs - extra_attrs
for extra_flag, raw_string in self.extra_arg.items():
processor_obj = self.Processor(raw_string)
self.assertEqual(processor_obj.isExtra(),
extra_flag,
msg="Precondtion")
# Test1: extra attributes are a string even if empty
query = self.get_strings_by_attr(processor_obj, extra_attrs)
is_string = all([isinstance(q, str) for q in query])
self.assertTrue(is_string)
# Test2a: non-extra attr have populated strings
query = self.get_strings_by_attr(processor_obj, non_extra_attrs)
is_notempty = all([len(q) > 0 for q in query])
self.assertTrue(is_notempty)
# Test2b: extra attrs are partially populated with strings
self.assertTrue(processor_obj.isValid(), msg="Precondition")
self.check_extra_attr_assignment(processor_obj)
def test_assignement_of_extra_attributes_generic(self):
for raw_string in self.strings:
processor_obj = self.Processor(raw_string)
if processor_obj.isValid():
self.check_extra_attr_assignment(processor_obj)
else:
continue # Ignore badly structured raw strings.
def check_extra_attr_assignment(self, processor_obj):
attrs = processor_obj._extra_attrs
query = self.get_strings_by_attr(processor_obj, attrs)
if processor_obj.isExtra():
self.assertGreaterEqual(len(processor_obj.extra), 1)
any_notempty = any([len(q) > 0 for q in query])
self.assertTrue(any_notempty)
else:
self.assertEqual(len(processor_obj.extra), 0)
all_empty = any([len(q) == 0 for q in query])
self.assertTrue(all_empty)
def test_assignment_of_translator_illustrator_attributes(self):
for key_string, raw_string in self.illustrator_translator_arg.items():
obj = self.Processor(raw_string)
self.asserTrue(obj.isExtra(), msg="Precondtion")
self.asserTrue(obj.isValid(), msg="Precondtion")
self.assertIn(key_string, obj._extra_attrs, msg="Precondtion")
# Test1: Assignment is to the correct attribute
value = getattr(obj, key_string) # key_string == an attr of obj
self.assertGreater(len(value), 0)
# Test2: Other extra attrs are left empty
other_attrs = obj._extra_attrs - set([key_string])
query = self.get_strings_by_attr(obj, other_attrs)
all_empty = any([len(q) == 0 for q in query])
self.assertTrue(all_empty)
def test_method_isExtra(self):
for key in ["illustrator", "translator"]:
msg = f"Positive method result for '{key}'"
with self.subTest(criteria=msg):
string = self.illustrator_translator_arg[key]
processor_obj = self.Processor(string)
flag = processor_obj.isExtra()
self.assertTrue(flag)
self.assertIs(flag, True)
with self.subTest(criteria="Negative method result"):
string = self.extra_arg[False]
processor_obj = self.Processor(string)
flag = processor_obj.isExtra()
self.assertFalse(flag)
self.assertIs(flag, False)
def test_cls_method_split(self):
cls_method = self.Processor.split
with self.subTest(criteria="split - good, no extra"):
expected = {"publisher": "Brill",
"pubplace": "Leiden",
"year": "2018",
"pages": "xiii, 240 pp",
"price": "€94.00",
"isbn": "ISBN 978 9 00434 447 1",
"issn": "",
"extra": "",
"translator": "",
"illustrator": "",
}
result = cls_method(self.extra_arg[False])
self.assertDictEqual(expected, result)
with self.subTest(criteria="split - good, with extra"):
expected = {"publisher": "Indiana University Press",
"pubplace": "Bloomington IN",
"year": "2018",
"pages": "ix, 161 pp",
"price": "$65.00",
"isbn": "ISBN 978 0 25303 835 7",
"issn": "",
"extra": "Translated by Michaela Lang",
"translator": "Michaela Lang",
"illustrator": "",
}
result = cls_method(self.illustrator_translator_arg["translator"])
self.assertDictEqual(expected, result)
with self.subTest(criteria="split - bad"):
expected = {"publisher": "The Jewish Museum of Greece",
"pubplace": "Athens",
"year": "2018",
"pages": "312 pp",
"price": "",
"isbn": "ISBN 978 9 60888 539 4",
"issn": "",
"extra": "",
"translator": "",
"illustrator": "",
}
result = cls_method(self.strucural_arg["bad"])
self.assertDictEqual(expected, result)
def test_cls_method_count_fullstop(self):
self.assertEqual(self.Processor.count_fullstop(""), 0)
self.assertEqual(self.Processor.count_fullstop("."), 1)
self.assertEqual(self.Processor.count_fullstop("." * 5), 5)
string = self.strucural_arg["good"]
self.assertEqual(self.Processor.count_fullstop(string), 6)
def test_cls_search_isbn(self):
method = self.Processor._search_isbn
setup = [("good", "ISBN 978 0 25303 835 7"),
("bad", "ISBN 978 9 60888 539 4")
]
for key, expected in setup:
with self.subTest(criteria=f"structure is {key}"):
string = self.strucural_arg[key]
result = method(string)
self.assertEqual(expected, result)
def test_cls_search_issn(self):
method = self.Processor._search_issn
setup = [("issn", "ISSN 0793 4289"),
("isbn", "")
]
for key, expected in setup:
with self.subTest(criteria=f"structure is {key}"):
string = self.issn_isbn_arg[key]
result = method(string)
self.assertEqual(expected, result)
def test_cls_search_price(self):
method = self.Processor._search_price
setup = [("good", "$65.00"),
("bad", "") # Deliberately empty.
]
for key, expected in setup:
with self.subTest(criteria=f"structure is {key}"):
string = self.strucural_arg[key]
result = method(string)
self.assertEqual(expected, result)
def test_cls_search_pages(self):
method = self.Processor._search_pages
setup = [("good", "ix, 161 pp"),
("bad", "312 pp")
]
for key, expected in setup:
with self.subTest(criteria=f"structure is {key}"):
string = self.strucural_arg[key]
result = method(string)
self.assertEqual(expected, result)
def test_cls_search_year(self):
method = self.Processor._search_year
setup = [("good", "2018"),
("bad", "2018")
]
for key, expected in setup:
with self.subTest(criteria=f"structure is {key}"):
string = self.strucural_arg[key]
result = method(string)
self.assertEqual(expected, result)
def test_cls_search_publisher(self):
method = self.Processor._search_publisher
setup = [("good", "Indiana University Press"),
("bad", "The Jewish Museum of Greece")
]
for key, expected in setup:
with self.subTest(criteria=f"structure is {key}"):
string = self.strucural_arg[key]
result = method(string)
self.assertEqual(expected, result)
def test_cls_search_pubplace(self):
method = self.Processor._search_pubplace
setup = [("good", "Bloomington IN"),
("bad", "Athens")
]
for key, expected in setup:
with self.subTest(criteria=f"structure is {key}"):
string = self.strucural_arg[key]
result = method(string)
self.assertEqual(expected, result)
def test_cls_search_extra(self):
method = self.Processor._search_extra
setup = [("good", "Translated by Michaela Lang"),
("bad", "")
]
for key, expected in setup:
with self.subTest(criteria=f"structure is {key}"):
string = self.strucural_arg[key]
result = method(string)
self.assertEqual(expected, result)
def test_cls_search_illustrator(self):
method = self.Processor._search_illustrator
setup = [("illustrator", "Kristine A. Thorsen"),
("translator", "")
]
for key, expected in setup:
with self.subTest(criteria=f"structure is {key}"):
string = self.illustrator_translator_arg[key]
result = method(string)
self.assertEqual(expected, result)
def test_cls_search_translator(self):
method = self.Processor._search_translator
setup = [("illustrator", ""),
("translator", "Michaela Lang")
]
for key, expected in setup:
with self.subTest(criteria=f"structure is {key}"):
string = self.illustrator_translator_arg[key]
result = method(string)
self.assertEqual(expected, result)
class Test_PreProcessed(ParagraphsTestCase):
def setUp(self):
paragraphs.PreProcessed._reset_xpaths()
def test_instantiation_good_arg_PARA(self):
query = ("//w:p[(count(descendant::w:i) > 0) and "
"(count(descendant::w:t) > 0)]")
find = self.input.xpaths.get(query)
para = find(self.input.tree)[0]
# Para with text and italic
_ = paragraphs.PreProcessed(para)
def test_instantiation_wrong_arg_PARA(self):
expected_exception = exceptions.RecomposeError
expected_substrings = "paragraph no italic text tags".split()
# Xpath queries for various paras
queries = {}
queries["neither"] = ("//w:p[(count(descendant::w:i) = 0) and "
"(count(descendant::w:t) = 0)]")
queries["no_italic"] = "//w:p[count(descendant::w:i) = 0]"
queries["no_text"] = "//w:p[count(descendant::w:t) = 0]"
# Para without text and italic
for key, query in queries.items():
with self.subTest(query=key):
find = self.input.xpaths.get(query)
para = find(self.input.tree)[0]
with self.assertRaises(expected_exception) as fail:
_ = paragraphs.PreProcessed(para)
self.assertSubstringsInString(expected_substrings,
str(fail.exception))
def test_instantiation_wrong_arg_OTHER(self):
expected_exception = exceptions.RecomposeError
expected_substrings = "element is not a paragraph w:p".split()
# Wrong element
query = "//w:rPr"
find = self.input.xpaths.get(query)
para = find(self.input.tree)[0]
expected_substrings.append(para.xpath("name()"))
with self.assertRaises(expected_exception) as fail:
_ = paragraphs.PreProcessed(para)
self.assertSubstringsInString(expected_substrings,
str(fail.exception))
def test_instantiation_wrong_arg_PATTERN(self):
# While memoizing at a class level can be controlled with class methods,
# the class is generally only going to be instanced by elements of the
# #same nsmap. In our tests some w:p nodes have different uris: one set
# by Microsoft in their DOCX derived XML and one by me for the XML
# stubs. So for instancing when the source element has differing nsmaps,
# we lobotomize! Its only for testing anyway, right!
try:
old_xpaths = paragraphs.PreProcessed._xpaths
paragraphs.PreProcessed._xpaths = None
self.assertIsNone(paragraphs.PreProcessed._xpaths, msg="Precondition!")
expected_exception = exceptions.ParagraphItalicPatternWarning
# Bad italic pattern
para = self.italic_interrupted_sequence_raises()
with self.assertRaises(expected_exception):
_ = paragraphs.PreProcessed(para)
finally:
paragraphs.PreProcessed._xpaths = old_xpaths
def test_has_attrs(self):
iter_para = self.input.iter_paragraphs()
para = next(iter_para)
attrs = ["pre_italic", "italic", "post_italic", "xpaths"]
pre = paragraphs.PreProcessed(para)
for attr in attrs:
with self.subTest(attr_name=attr):
self.assertHasAttr(pre, attr)
def test_xpath_attr_type(self):
from helpers import xml
iter_para = self.input.iter_paragraphs()
para = next(iter_para)
pre = paragraphs.PreProcessed(para)
self.assertIsInstance(pre.xpaths, xml.XPaths)
def test_xpath_attr_identity(self):
paras = itertools.islice(self.input.iter_paragraphs(), 2)
pre0, pre1, *_ = [paragraphs.PreProcessed(p) for p in paras]
self.assertIsNot(pre0.xpaths, self.input.xpaths)
self.assertIs(pre0.xpaths, pre1.xpaths)
def test_xpaths_attr_shared_by_instances(self):
iter_para = self.input.iter_paragraphs()
pre0 = paragraphs.PreProcessed(next(iter_para))
pre1 = paragraphs.PreProcessed(next(iter_para))
some_query = "string()" # May have been used by other test?
# Precondition
self.assertNotIn(some_query, pre0.xpaths)
self.assertNotIn(some_query, pre1.xpaths)
this_finder = pre0.xpaths.get(some_query)
self.assertIn(some_query, pre0.xpaths)
self.assertIn(some_query, pre1.xpaths)
other_finder = pre1.xpaths.get(some_query)
self.assertIs(this_finder, other_finder)
def test_attrs_substrings(self):
iter_para = self.input.iter_paragraphs()
para = next(iter_para)
pre = paragraphs.PreProcessed(para)
with self.subTest(attr_name="pre_italic"):
self.assertIsInstance(pre.pre_italic, str)
self.assertGreaterEqual(len(pre.pre_italic), 1)
self.assertGreaterEqual(pre.pre_italic.count(","), 1)
self.assertTrue(pre.pre_italic.endswith(","))
self.assertEqual(len(pre.pre_italic), len(pre.pre_italic.strip()))
with self.subTest(attr_name="post_italic"):
self.assertIsInstance(pre.post_italic, str)
self.assertGreaterEqual(len(pre.post_italic), 1)
self.assertEqual(pre.post_italic.lower().count("isbn"), 1,
msg=pre.post_italic)
self.assertGreaterEqual(pre.post_italic.count("."), 4)
self.assertTrue(pre.post_italic.endswith("."))
self.assertEqual(len(pre.post_italic), len(pre.post_italic.strip()))
with self.subTest(attr_name="italic"):
self.assertIsInstance(pre.italic, str)
self.assertGreaterEqual(len(pre.italic), 1)
self.assertGreaterEqual(pre.italic.count("."), 1)
self.assertTrue(pre.italic.endswith("."))
self.assertEqual(len(pre.italic), len(pre.italic.strip()))
def test_str_dunder(self):
text_file = self.text_filename
with open(text_file) as handle:
lines = handle.read().splitlines()
lines = [l.strip() for l in lines if not l.isspace() if l]
paras = list(self.input.iter_paragraphs())
self.assertEqual(len(paras), len(lines)) # Precondition
for i, (para, line) in enumerate(zip(paras, lines), start=1):
with self.subTest(para_number=i):
pre = paragraphs.PreProcessed(para)
pre_str = str(pre)
with self.subTest(property="length"):
self.assertEqual(len(pre_str), len(line))
with self.subTest(property="case_insensitive"):
self.assertEqual(pre_str.lower(), line.lower())
def test_generate_italic_pattern(self):
data = {(False, True, False): self.italic_correct_sequence,
(False, True, False, True, False): self.italic_interrupted_sequence_raises
}
method = paragraphs.PreProcessed._get_italic_pattern
for expected, xmlfunc in data.items():
with self.subTest(pattern=xmlfunc.__name__):
result = method(xmlfunc(), _memoize=False)
self.assertEqual(result, expected)
def test_validate_method_passes_correct_italic_pattern(self):
funcs = [self.italic_correct_sequence,
self.italic_correct_sequence_with_small_caps,
self.italic_correct_sequence_longer]
method = paragraphs.PreProcessed._is_valid_italic_pattern
funcs = {f: f.__name__ for f in funcs}
for xml_func, name in funcs.items():
with self.subTest(xml_type=name):
flag = method(xml_func(), _memoize=False)
self.assertTrue(flag)
def test_validate_method_fails_incorrect_italic_patterns(self):
funcs = [self.italic_interrupted_sequence_raises,
self.italic_interrupted_sequence_longer_raises,
self.italic_inverted_sequence_raises,
self.italic_NO_PRE_sequence_raises,
self.italic_NO_POST_sequence_raises,
self.italic_NO_PRE_NO_POST_sequence_raises]
funcs = {f: f.__name__ for f in funcs}
method = paragraphs.PreProcessed._is_valid_italic_pattern
expected_exception = exceptions.ParagraphItalicPatternWarning
expected_substrings = ["paragraph", "has", "Pattern",
"found", "one italic section",
"two non-italic sections"]
for xml_func, name in funcs.items():
xml = xml_func()
with self.subTest(xml_type=name, fatal=True):
with self.assertRaises(expected_exception) as fail:
method(xml, fatal=True, _memoize=False)
errmsg = str(fail.exception)
self.assertSubstringsInString(expected_substrings, errmsg)
with self.subTest(xml_type=name, fatal=False):
flag = method(xml, fatal=False, _memoize=False)
self.assertFalse(flag)
def test_validate_method_raises_exception_with_detail(self):
funcs = [self.italic_inverted_sequence_raises,
self.italic_interrupted_sequence_longer_raises]
funcs = {f: f.__name__ for f in funcs}
expected_detail = "italic, non-italic, italic"
method = paragraphs.PreProcessed._is_valid_italic_pattern
expected_exception = exceptions.ParagraphItalicPatternWarning
for xml_func, name in funcs.items():
xml = xml_func()
with self.subTest(xml_type=name, fatal=True):
with self.assertRaises(expected_exception) as fail:
method(xml, fatal=True, _memoize=False)
self.assertIn(expected_detail, str(fail.exception))
def test_validate_method_exception_detail_includes_offending_text(self):
funcs = [self.italic_correct_sequence_longer,
self.italic_interrupted_sequence_longer_raises,
self.italic_interrupted_sequence_with_whitespace_raises]
expected = [((False, 'Pre Text 1Pre Text 2Pre Text 3'),
(True, 'Italic Text 1Italic Text 2'),
(False, 'Post Text 1Post Text 2Post Text 3')),
# func1 result
((False, 'Pre Text 1Pre Text 2Pre Text 3'),
(True, 'First Italic Text 1First Italic Text 2'),
(False, 'Interupted Not Italic 1Interupted Not Italic 2'),
(True, 'Second Italic Text 1Second Italic Text 2Second Italic Text 3'),
(False, 'Post Text 1Post Text 2')),
# func2 result
((False, 'Pre Text 1Pre Text 2Pre Text 3'),
(True, 'First Italic Text 1First Italic Text 2'),
(False, 'Interupted Not Italic 1Interupted Not Italic 2'),
(True, ' '),
(False, 'Post Text 1Post Text 2'))
# func3 result
]
details = [tuple((f"italic: {s}" for b, s in t if b)) for t in expected]
funcs = {f: (f.__name__, exp, d) for f, exp, d in zip(funcs, expected, details)}
method_grouper = paragraphs.PreProcessed._group_contiguous_text_by_font
method_is_valid = paragraphs.PreProcessed._is_valid_italic_pattern
expected_exception = exceptions.ParagraphItalicPatternWarning
whitespace_generic = "# SPACE! "
whitespace_repl = chr(9251) # OPEN BOX symbol
whitespace_specific = f"...Not Italic 2{whitespace_repl}Post Text..."
substrings = [whitespace_generic,
whitespace_repl,
whitespace_specific]
for xml_func, (name, expect_res, detail) in funcs.items():
xml = xml_func()
with self.subTest(xml_type=name, fatal=True):
actual_res = method_grouper(xml, _memoize=False)
# Test1 : Verify grouping works.
self.check_grouped_italic_strings(actual_res, expect_res)
# Test2 : Verify exceptions raised better detail.
try:
method_is_valid(xml, fatal=True, _memoize=False)
except expected_exception as err:
error = str(err)
else:
error = ""
if error:
msg = f"{name} should not have raised an exception!"
self.assertIn("raise", name, msg=msg)
self.assertSubstringsInString(detail, error)
else:
msg = f"{name} should not have passed!"
self.assertIn("correct", name, msg=msg)
continue
# Test3 : Verify whitespace annotated.
if error and "whitespace" in name:
self.assertSubstringsInString(substrings, error)
else:
msg = f"{name} skipped a test!"
self.assertNotIn("whitespace", name, msg=msg)
def check_grouped_italic_strings(self, actual_res, expect_res):
self.assertEqual(len(actual_res), len(expect_res), msg="Postcondition")
for pair in zip(actual_res, expect_res):
actual_tup, expect_tup = pair
self.assertTupleEqual(actual_tup, expect_tup)
def test_identify_substrings_method(self):
xml = self.italic_correct_sequence()
get_text = etree.XPath("//w:t/text()", namespaces=xml.nsmap)
exp_pre, exp_ital, exp_post = get_text(xml)
method = paragraphs.PreProcessed._identify_substrings
res_pre, res_ital, res_post = method(xml, _memoize=False)
with self.subTest(section="pre"):
self.assertEqual(exp_pre, res_pre)
with self.subTest(section="italic"):
self.assertEqual(exp_ital, res_ital)
with self.subTest(section="post"):
self.assertEqual(exp_post, res_post)
def test_identify_substrings_method_interprets_smallCaps_tag(self):
xml = self.italic_correct_sequence_with_small_caps()
get_text = etree.XPath("//w:t/text()", namespaces=xml.nsmap)
*_, exp_post = get_text(xml)
method = paragraphs.PreProcessed._identify_substrings
self.assertFalse(exp_post.isupper(), msg="Precondition")
*_, res_post = method(xml, _memoize=False)
self.assertTrue(res_post.isupper())
self.assertEqual(exp_post.upper(), res_post)
def italic_correct_sequence(self):
xml_str = """<w:p xmlns:w="http://google.com">
<w:r>
<w:rPr></w:rPr>
<w:t>Pre Text</w:t>
</w:r>
<w:r>
<w:rPr><w:i/></w:rPr>
<w:t>Italic Text</w:t>
</w:r>
<w:r>
<w:rPr></w:rPr>
<w:t>Post Text</w:t>
</w:r></w:p>
"""
root = etree.fromstring(xml_str)
return root
def italic_correct_sequence_longer(self):
xml_str = """<w:p xmlns:w="http://google.com">
<w:r>
<w:rPr></w:rPr>
<w:t>Pre Text 1</w:t>
</w:r>
<w:r>
<w:rPr></w:rPr>
<w:t>Pre Text 2</w:t>
</w:r>
<w:r>
<w:rPr></w:rPr>
<w:t>Pre Text 3</w:t>
</w:r>
<w:r>
<w:rPr><w:i/></w:rPr>
<w:t>Italic Text 1</w:t>
</w:r>
<w:r>
<w:rPr><w:i/></w:rPr>
<w:t>Italic Text 2</w:t>
</w:r>
<w:r>
<w:rPr></w:rPr>
<w:t>Post Text 1</w:t>
</w:r>
<w:r>
<w:rPr></w:rPr>
<w:t>Post Text 2</w:t>
</w:r>
<w:r>
<w:rPr></w:rPr>
<w:t>Post Text 3</w:t>
</w:r></w:p>
"""
root = etree.fromstring(xml_str)
return root
def italic_correct_sequence_with_small_caps(self):
xml_str = """<w:p xmlns:w="http://google.com">
<w:r>
<w:rPr></w:rPr>
<w:t>Pre Text</w:t>
</w:r>
<w:r>
<w:rPr><w:i/></w:rPr>
<w:t>Italic Text</w:t>
</w:r>
<w:r>
<w:rPr><w:smallCaps/></w:rPr>
<w:t>isbn</w:t>
</w:r></w:p>
"""
root = etree.fromstring(xml_str)
return root
def italic_interrupted_sequence_raises(self):
# XML deliberately has interrupte italic subsequence..
xml_str = """<w:p xmlns:w="http://google.com">
<w:r>
<w:rPr></w:rPr>
<w:t>Pre Text</w:t>
</w:r>
<w:r>
<w:rPr><w:i/></w:rPr>
<w:t>Italic Text</w:t>
</w:r>
<w:r>
<w:rPr></w:rPr>
<w:t>Interupted Not Italic</w:t>
</w:r>
<w:r>
<w:rPr><w:i/></w:rPr>
<w:t>More Italic Text</w:t>
</w:r>
<w:r>
<w:rPr></w:rPr>
<w:t>Post Text</w:t>
</w:r></w:p>
"""
root = etree.fromstring(xml_str)
return root
def italic_interrupted_sequence_longer_raises(self):
xml_str = """<w:p xmlns:w="http://google.com">
<w:r>
<w:rPr></w:rPr>
<w:t>Pre Text 1</w:t>
</w:r>
<w:r>
<w:rPr></w:rPr>
<w:t>Pre Text 2</w:t>
</w:r>
<w:r>
<w:rPr></w:rPr>
<w:t>Pre Text 3</w:t>
</w:r>
<w:r>
<w:rPr><w:i/></w:rPr>
<w:t>First Italic Text 1</w:t>
</w:r>
<w:r>
<w:rPr><w:i/></w:rPr>
<w:t>First Italic Text 2</w:t>
</w:r>
<w:r>
<w:rPr></w:rPr>
<w:t>Interupted Not Italic 1</w:t>
</w:r>
<w:r>
<w:rPr></w:rPr>
<w:t>Interupted Not Italic 2</w:t>
</w:r>
<w:r>
<w:rPr><w:i/></w:rPr>
<w:t>Second Italic Text 1</w:t>
</w:r>
<w:r>
<w:rPr><w:i/></w:rPr>
<w:t>Second Italic Text 2</w:t>
</w:r>
<w:r>
<w:rPr><w:i/></w:rPr>
<w:t>Second Italic Text 3</w:t>
</w:r>
<w:r>
<w:rPr></w:rPr>
<w:t>Post Text 1</w:t>
</w:r>
<w:r>
<w:rPr></w:rPr>
<w:t>Post Text 2</w:t>
</w:r></w:p>
"""
root = etree.fromstring(xml_str)
return root
def italic_interrupted_sequence_with_whitespace_raises(self):
xml_str = """<w:p xmlns:w="http://google.com">
<w:r>
<w:rPr></w:rPr>
<w:t>Pre Text 1</w:t>
</w:r>
<w:r>
<w:rPr></w:rPr>
<w:t>Pre Text 2</w:t>
</w:r>
<w:r>
<w:rPr></w:rPr>
<w:t>Pre Text 3</w:t>
</w:r>
<w:r>
<w:rPr><w:i/></w:rPr>
<w:t>First Italic Text 1</w:t>
</w:r>
<w:r>
<w:rPr><w:i/></w:rPr>
<w:t>First Italic Text 2</w:t>
</w:r>
<w:r>
<w:rPr></w:rPr>
<w:t>Interupted Not Italic 1</w:t>
</w:r>
<w:r>
<w:rPr></w:rPr>
<w:t>Interupted Not Italic 2</w:t>
</w:r>
<w:r>
<w:rPr><w:i/></w:rPr>
<w:t> </w:t>
</w:r>
<w:r>
<w:rPr></w:rPr>
<w:t>Post Text 1</w:t>
</w:r>
<w:r>
<w:rPr></w:rPr>
<w:t>Post Text 2</w:t>
</w:r></w:p>
"""
root = etree.fromstring(xml_str)
return root
def italic_inverted_sequence_raises(self):
# XML deliberately has italic then no italic then italic again.
xml_str = """<w:p xmlns:w="http://google.com">
<w:r>
<w:rPr><w:i/></w:rPr>
<w:t>Pre Text</w:t>
</w:r>
<w:r>
<w:rPr></w:rPr>
<w:t>Italic Text</w:t>
</w:r>
<w:r>
<w:rPr><w:i/></w:rPr>
<w:t>Post Text</w:t>
</w:r></w:p>
"""
root = etree.fromstring(xml_str)
return root
def italic_NO_PRE_sequence_raises(self):
# XML deliberately too short
xml_str = """<w:p xmlns:w="http://google.com">
<w:r>
<w:rPr><w:i/></w:rPr>
<w:t>Italic Text</w:t>
</w:r>
<w:r>
<w:rPr></w:rPr>
<w:t>Post Text</w:t>
</w:r></w:p>
"""
root = etree.fromstring(xml_str)
return root
def italic_NO_POST_sequence_raises(self):
# XML deliberately too short
xml_str = """<w:p xmlns:w="http://google.com">
<w:r>
<w:rPr></w:rPr>
<w:t>Pre Text</w:t>
</w:r>
<w:r>
<w:rPr><w:i/></w:rPr>
<w:t>Italic Text</w:t>
</w:r></w:p>
"""
root = etree.fromstring(xml_str)
return root
def italic_NO_PRE_NO_POST_sequence_raises(self):
# XML deliberately too short
xml_str = """<w:p xmlns:w="http://google.com">
<w:r>
<w:rPr><w:i/></w:rPr>
<w:t>Italic Text</w:t>
</w:r></w:p>
"""
root = etree.fromstring(xml_str)
return root
class Test_Paragraph_ShorteningFunc(ParagraphsTestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
paras_xml = list(cls.input.iter_paragraphs())[:5]
paras_preproc = list(map(helpers.paragraphs.PreProcessed, paras_xml))
cls.args = ["Lorem ipsum dolor sit amet, consectetur adipisicing elit"]
cls.args.extend(paras_xml)
cls.args.extend(paras_preproc)
def test_func_with_different_args(self):
length = 30
func = helpers.paragraphs.get_paragraph_head
for arg in self.args:
with self.subTest(type=type(arg)):
result = func(arg, length)
self.assertIsInstance(result, str)
def test_func_shortens_string(self):
desired_length = 30
func = helpers.paragraphs.get_paragraph_head
for arg in self.args:
with self.subTest(type=type(arg)):
result = func(arg, desired_length)
self.assertLengthInRange(result,
min=1,
max=desired_length)
def test_func_kwargs_bullet(self):
func = helpers.paragraphs.get_paragraph_head
desired_length = 30
expected_bullet_star = "* )"
expected_bullet_number = "{i:02d})"
for i, arg in enumerate(self.args):
with self.subTest(type=type(arg), bullet="*"):
result = func(arg, desired_length, bullet=True)
self.assertIn(expected_bullet_star, result)
with self.subTest(type=type(arg), bullet="int"):
result = func(arg, desired_length, bullet_num=i)
self.assertIn(str(i), result)
self.assertIn(expected_bullet_number.format(i=i), result)
def test_adds_ellipsis(self):
desired_length = 30
string = ("Lorem ipsum dolor sit amet, consectetur adipisicing elit, "
"sed do eiusmod tempor incididunt")
ellipsis = "..."
func = helpers.paragraphs.get_paragraph_head
self.assertGreater(len(string), desired_length, msg="Precondition")
result = func(string, desired_length)
self.assertIn(ellipsis, result)
self.assertTrue(result.endswith(ellipsis))
def test_func_wrapped_as_partial(self):
desired_length = 30
bullet_number = 7
expected_bullet = f"{bullet_number:02d})"
ellipsis = "..."
string = ("Lorem ipsum dolor sit amet, consectetur adipisicing elit, "
"sed do eiusmod tempor incididunt")
self.assertGreater(len(string), desired_length, msg="Precondition")
func = helpers.paragraphs.get_paragraph_head
curried_func = functools.partial(func, string, desired_length,
bullet_num=bullet_number)
# Curried is callable?
try:
result = curried_func()
except TypeError as err:
error = err
else:
error = None
# Curried result is as expected
self.assertIsNone(error)
self.assertIsInstance(result, str)
self.assertLengthInRange(result, min=1, max=desired_length)
self.assertTrue(result.startswith(expected_bullet))
self.assertTrue(result.endswith(ellipsis))
@patch("helpers.paragraphs.PreProcessed.is_valid_italic_pattern")
class Test_ProcessParagraphs_Function(ParagraphsTestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.preprocess_exc = exceptions.ParagraphItalicPatternWarning
cls.iter_paragraphs = list(cls.input.iter_paragraphs())
cls.default_log_filename = pkg_logging.default_log_filename()
cls.error_mock_detail = "*** mocked detail ***".upper()
def tearDown(self):
pkg_logging.finish_logging()
if os.path.exists(self.default_log_filename):
os.remove(self.default_log_filename)
def random_fail(self, *args, **kwargs):
fatal = kwargs["fatal"]
choice = random.choice([True, False])
return self._failing_func(choice, fatal)
def must_fail(self, *args, **kwargs):
fatal = kwargs["fatal"]
choice = False
return self._failing_func(choice, fatal)
def _failing_func(self, choice, fatal):
if choice:
return choice
else:
if fatal:
detail = self.error_mock_detail
raise self.preprocess_exc(detail=detail)
else:
return choice
def test_mocked_PreProcessed_mock_sideffect1(self, mock_preprocessed_method=None):
mock_preprocessed_method.side_effect = self.random_fail
para_elements = self.iter_paragraphs
count = 0
exceptions = []
for para_elem in para_elements:
try:
helpers.paragraphs.PreProcessed(para_elem)
except self.preprocess_exc:
count += 1
except Exception as err:
exceptions.append(err.__class__.__name__)
else:
count += 1
self.assertFalse(len(exceptions), msg=", ".join(set(exceptions)))
self.assertEqual(len(para_elements), count)
def test_mocked_PreProcessed_mock_sideffect2(self, mock_preprocessed_method=None):
mock_preprocessed_method.side_effect = self.must_fail
para_elements = self.iter_paragraphs
err_count = 0
success_count = 0
exceptions = []
for para_elem in para_elements:
try:
helpers.paragraphs.PreProcessed(para_elem)
except self.preprocess_exc:
err_count += 1
except Exception as err:
exceptions.append(err.__class__.__name__)
else:
success_count += 1
self.assertFalse(len(exceptions), msg=", ".join(set(exceptions)))
self.assertEqual(err_count, len(para_elements))
self.assertEqual(success_count, 0)
def test_process_paragraphs_handles_warnings(self, mock_preprocessed_method):
mock_preprocessed_method.side_effect = self.must_fail
para_elements = self.iter_paragraphs
handled_exceptions = (exceptions.RecomposeWarning, )
pkg_logging.setup_logging()
isHandled = False
hasUnexpectedError = None
try:
helpers.paragraphs.process_paragraphs(para_elements)
except handled_exceptions:
isHandled = False
except Exception as err:
isHandled = False
hasUnexpectedError = err
else:
isHandled = True
self.assertIsNone(hasUnexpectedError)
self.assertTrue(isHandled,
msg=(f"Exception of type {handled_exceptions} "
"should have been handled."))
def test_process_paragraphs_logs(self, mock_preprocessed_method):
mock_preprocessed_method.side_effect = self.must_fail
para_elements = self.iter_paragraphs
pkg_logging.setup_logging()
with self.subTest(logging="in general"):
pkg_logger = pkg_logging.getLogger()
with self.assertLogs(logger=pkg_logger.logger):
helpers.paragraphs.process_paragraphs(para_elements)
with self.subTest(logging="quantative"):
# Capture logging to stream and file
with testfixtures.OutputCapture() as stream:
helpers.paragraphs.process_paragraphs(para_elements)
stream_contents = stream.captured.strip()
with open(self.default_log_filename) as handle:
logfile_contents = handle.read().splitlines()
# Test logging to stdout/stderr: expect nothing
self.assertEqual(stream_contents, "")
# Test logging to logfile: expect n warning lines
filtered_contents = [l for l in logfile_contents if "WARNING" in l]
self.assertLengthInRange(filtered_contents,
min=len(para_elements),
max=len(para_elements) + 1)
# Test logging to logfile: expect things - prelog + warning
self.assertLengthInRange(logfile_contents,
min=len(para_elements) * 2,
max=len(para_elements) * 2 + 1)
def test_process_paragraphs_log_messages_as_expected(self, mock_preprocessed_method):
mock_preprocessed_method.side_effect = self.must_fail
para_elems_zerothonly = list(itertools.islice(self.iter_paragraphs, 1))
para = para_elems_zerothonly[0]
helpers.paragraphs.process_paragraphs(para_elems_zerothonly)
with open(self.default_log_filename) as handle:
logfile_contents = handle.read().splitlines()
self.assertLengthInRange(logfile_contents, min=2, max=2)
line_0, line_1 = logfile_contents
with self.subTest(line="prelog line"):
level = "INFO"
max_length = 30
raw_detail = para.xpath("string()")[:max_length]
func = helpers.paragraphs.get_paragraph_head
expected_detail = func(para, max_length, bullet_num=1)
expected = [expected_detail, level]
self.assertSubstringsInString(expected, line_0,
msg=f"line='{line_0}'")
self.assertStringsSimilar(raw_detail, expected_detail, 0.5)
line_0_substring = line_0.split(level)[1]
self.assertStringsSimilar(raw_detail, line_0_substring, 0.3)
with self.subTest(line="error line"):
level = "WARNING"
expected = [self.error_mock_detail, level, "italic"]
self.assertSubstringsInString(expected, line_1,
msg=f"line='{line_1}'")
if __name__ == '__main__':
unittest.main()
| 38.403162 | 190 | 0.571206 | 6,501 | 58,296 | 4.945239 | 0.102138 | 0.006408 | 0.009145 | 0.009145 | 0.574668 | 0.50944 | 0.447728 | 0.394102 | 0.356901 | 0.344023 | 0 | 0.011594 | 0.320914 | 58,296 | 1,517 | 191 | 38.428477 | 0.800419 | 0.037481 | 0 | 0.48562 | 0 | 0.000822 | 0.189355 | 0.012402 | 0 | 0 | 0 | 0 | 0.132293 | 1 | 0.0682 | false | 0.002465 | 0.013147 | 0 | 0.098603 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
946b8d6d8025c10201a0e997dc4dab9b8be0198b | 2,911 | py | Python | geode/utility/json_conversion.py | jjqcat/geode | 157cc904c113cc5e29a1ffe7c091a83b8ec2cf8e | [
"BSD-3-Clause"
] | 75 | 2015-02-08T22:04:31.000Z | 2022-02-26T14:31:43.000Z | geode/utility/json_conversion.py | girving/geode | 4267bd8d0c1a768ee0b6f57a40a551d8c0965178 | [
"BSD-3-Clause"
] | 15 | 2015-01-08T15:11:38.000Z | 2021-09-05T13:27:22.000Z | geode/utility/json_conversion.py | girving/geode | 4267bd8d0c1a768ee0b6f57a40a551d8c0965178 | [
"BSD-3-Clause"
] | 22 | 2015-03-11T16:43:13.000Z | 2021-02-15T09:37:51.000Z | import json
from numpy import *
from geode import *
def from_ndarray(v, typ = float):
return map(typ, v.flatten())
def from_array(v, typ = float):
return map(typ, v)
to_json_fn = {}
from_json_fn = {}
from_json_fn['int'] = lambda v: int(v)
from_json_fn['real'] = lambda v: real(v)
from_json_fn['float'] = lambda v: float(v)
from_json_fn['string'] = lambda v: str(v)
from_json_fn['bool'] = lambda v: bool(v)
from_json_fn['ndarray'] = lambda v : array(v)
from_json_fn['mat22'] = lambda v: Matrix(array(v).reshape(2, 2))
from_json_fn['mat33'] = lambda v: Matrix(array(v).reshape(3, 3))
from_json_fn['mat44'] = lambda v: Matrix(array(v).reshape(4, 4))
from_json_fn['frame2'] = lambda v: Frames(v['t'], Rotation.from_sv(array(v['r'])))
from_json_fn['frame3'] = lambda v: Frames(v['t'], Rotation.from_sv(array(v['r'])))
from_json_fn['box2'] = from_json_fn['box3'] = lambda v: Box(v['min'], v['max'])
from_json_fn['TriangleSoup'] = from_json_fn['SegmentSoup'] = lambda v: v
from_json_fn['dict'] = lambda v: v
to_json_fn[dict] = lambda v: { 't': 'dict', 'v': v }
to_json_fn[int] = lambda v: { 't': 'int', 'v': v }
to_json_fn[real] = lambda v: { 't': 'real', 'v': v }
to_json_fn[float] = lambda v: { 't': 'float', 'v': v }
to_json_fn[str] = lambda v: { 't': 'string', 'v': v }
to_json_fn[bool] = lambda v: { 't': 'bool', 'v': v }
to_json_fn[Box2d] = to_json_fn[Box3d] = lambda v: {
't': ('box%s') % len(v.min),
'v': {
'min': from_array(v.min),
'max': from_array(v.max)
}
}
to_json_fn[list] = lambda v: {
't': 'list',
'v': v # let's hope this works on the client...
}
to_json_fn[ndarray] = lambda v: {
't': 'ndarray',
'v': {
'shape': v.shape,
'data': from_ndarray(v)
}
}
to_json_fn[Matrix] = lambda v: {
't': ('mat%s%s') % (len(v), len(v[0])),
'v': from_ndarray(v)
}
to_json_fn[Frames] = lambda v: {
't': ('frame%s') % (len(v.t)),
'v': {
't': map(float, v.t),
'r': map(float, v.r.sv)
}
}
to_json_fn[TriangleSoup] = lambda v: {
't': 'TriangleSoup',
'v': from_ndarray(v.elements, int)
}
to_json_fn[SegmentSoup] = lambda v: {
't': 'SegmentSoup',
'v': from_ndarray(v.elements, int)
}
to_json_fn[MutableTriangleTopology] = lambda v: {
't': 'TriangleTopology',
'v': {
'vertices': from_ndarray(v.vertex_field(vertex_position_id)),
'elements': from_ndarray(v.elements(), int)
}
}
def to_json(v):
fn = to_json_fn.get(type(v), None)
if callable(fn):
return fn(v)
else:
raise TypeError("Don't know how to transscribe type %s to json." % type(v))
def to_json_string(v):
return json.dumps(to_json(v), allow_nan = False, separators = (',', ':'))
def from_json(d):
fn = from_json_fn.get(d['t'])
return fn(d['v']) if callable(fn) else None
def from_json_string(s):
return from_json(json.loads(s))
def register(typ, name, to_fn, from_fn):
to_json_fn[typ] = to_fn
from_json_fn[name] = from_fn
| 25.761062 | 82 | 0.618344 | 497 | 2,911 | 3.416499 | 0.183099 | 0.130742 | 0.111896 | 0.053004 | 0.383981 | 0.187279 | 0.117786 | 0.091873 | 0.091873 | 0.054181 | 0 | 0.007887 | 0.172449 | 2,911 | 112 | 83 | 25.991071 | 0.69697 | 0.013054 | 0 | 0.067416 | 0 | 0 | 0.106931 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.078652 | false | 0 | 0.033708 | 0.044944 | 0.179775 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
946e1bad96b68d54c90ade3c0e3ea528c85c1987 | 2,350 | py | Python | tibanna_4dn/core.py | 4dn-dcic/tibanna_ff | 6fcfc056b832c14500e525207afeb5722f366a26 | [
"MIT"
] | 2 | 2019-10-08T17:36:02.000Z | 2019-10-08T18:42:05.000Z | tibanna_4dn/core.py | 4dn-dcic/tibanna_ff | 6fcfc056b832c14500e525207afeb5722f366a26 | [
"MIT"
] | null | null | null | tibanna_4dn/core.py | 4dn-dcic/tibanna_ff | 6fcfc056b832c14500e525207afeb5722f366a26 | [
"MIT"
] | null | null | null | from tibanna_ffcommon.core import API as _API
from .stepfunction import StepFunctionPony
from .stepfunction_cost_updater import StepFunctionCostUpdater
from .vars import (
TIBANNA_DEFAULT_STEP_FUNCTION_NAME,
LAMBDA_TYPE,
IAM_BUCKETS,
DEV_ENV,
PROD_ENV
)
class API(_API):
# This one cannot be imported in advance, because it causes circular import.
# lambdas run_workflow / validate_md5_s3_initiator needs to import this API
# to call run_workflow
@property
def lambdas_module(self):
from . import lambdas as pony_lambdas
return pony_lambdas
@property
def tibanna_packages(self):
import tibanna
import tibanna_ffcommon
import tibanna_4dn
return [tibanna, tibanna_ffcommon, tibanna_4dn]
StepFunction = StepFunctionPony
StepFunctionCU = StepFunctionCostUpdater
default_stepfunction_name = TIBANNA_DEFAULT_STEP_FUNCTION_NAME
default_env = DEV_ENV
sfn_type = LAMBDA_TYPE
lambda_type = LAMBDA_TYPE
@property
def IAM(self):
from .iam_utils import IAM
return IAM
def __init__(self):
pass
def deploy_core(self, name, suffix=None, usergroup='', subnets=None, security_groups=None,
env=None, quiet=False):
if env:
usergroup = env + '_' + usergroup if usergroup else env
else:
if usergroup:
env = DEV_ENV
else:
env = PROD_ENV
super().deploy_core(name=name, suffix=suffix, usergroup=usergroup, subnets=subnets,
security_groups=security_groups, quiet=quiet)
def deploy_pony(self, suffix=None, usergroup='', subnets=None, security_groups=None, env=None, deploy_costupdater=True):
if env:
usergroup = env + '_' + usergroup if usergroup else env
else:
if usergroup:
env = DEV_ENV
else:
env = PROD_ENV
self.deploy_tibanna(suffix=suffix, usergroup=usergroup, setup=True, default_usergroup_tag='',
do_not_delete_public_access_block=True, no_randomize=True,
buckets=','.join(IAM_BUCKETS(env)), deploy_costupdater=deploy_costupdater,
subnets=subnets, security_groups=security_groups)
| 34.558824 | 124 | 0.648936 | 263 | 2,350 | 5.528517 | 0.319392 | 0.057772 | 0.020633 | 0.035763 | 0.310867 | 0.246217 | 0.188446 | 0.188446 | 0.188446 | 0.188446 | 0 | 0.002387 | 0.286809 | 2,350 | 67 | 125 | 35.074627 | 0.865155 | 0.071915 | 0 | 0.303571 | 0 | 0 | 0.001378 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.107143 | false | 0.017857 | 0.160714 | 0 | 0.446429 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
946e744db067216acca7174cc9413bd1c9de27ef | 1,262 | py | Python | motor.py | fehlfarbe/vorpal-hexabot-micropython | 129eac0018f291545efa304f9202dded315fba93 | [
"MIT"
] | null | null | null | motor.py | fehlfarbe/vorpal-hexabot-micropython | 129eac0018f291545efa304f9202dded315fba93 | [
"MIT"
] | null | null | null | motor.py | fehlfarbe/vorpal-hexabot-micropython | 129eac0018f291545efa304f9202dded315fba93 | [
"MIT"
] | null | null | null | import pca9685
_DC_MOTORS = ((8, 9, 10), (13, 12, 11), (2, 3, 4), (7, 6, 5))
class DCMotors:
def __init__(self, i2c, address=0x40, freq=1600):
self.pca9685 = pca9685.PCA9685(i2c, address)
self.pca9685.freq(freq)
def _pin(self, pin, value=None):
if value is None:
return bool(self.pca9685.pwm(pin)[0])
if value:
self.pca9685.pwm(pin, 4096, 0)
else:
self.pca9685.pwm(pin, 0, 0)
def speed(self, index, value=None):
pwm, in2, in1 = _DC_MOTORS[index]
if value is None:
value = self.pca9685.duty(pwm)
if self._pin(in2) and not self._pin(in1):
value = -value
return value
if value > 0:
# Forward
self._pin(in2, False)
self._pin(in1, True)
elif value < 0:
# Backward
self._pin(in1, False)
self._pin(in2, True)
else:
# Release
self._pin(in1, False)
self._pin(in2, False)
self.pca9685.duty(pwm, abs(value))
def brake(self, index):
pwm, in2, in1 = _DC_MOTORS[index]
self._pin(in1, True)
self._pin(in2, True)
self.pca9685.duty(pwm, 0)
| 27.434783 | 61 | 0.515055 | 165 | 1,262 | 3.812121 | 0.315152 | 0.122417 | 0.079491 | 0.081081 | 0.251192 | 0.149444 | 0.079491 | 0 | 0 | 0 | 0 | 0.116481 | 0.360539 | 1,262 | 45 | 62 | 28.044444 | 0.662949 | 0.019017 | 0 | 0.4 | 0 | 0 | 0 | 0 | 0 | 0 | 0.003241 | 0 | 0 | 1 | 0.114286 | false | 0 | 0.028571 | 0 | 0.228571 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
946fcddeae45422d84791902026dee2a591bd902 | 978 | py | Python | chargen.py | celestian/chargen | 7e7be23ea10ae5734c5d6d3a30ff66afdc7a6845 | [
"MIT"
] | 1 | 2018-04-23T00:45:24.000Z | 2018-04-23T00:45:24.000Z | chargen.py | celestian/chargen | 7e7be23ea10ae5734c5d6d3a30ff66afdc7a6845 | [
"MIT"
] | null | null | null | chargen.py | celestian/chargen | 7e7be23ea10ae5734c5d6d3a30ff66afdc7a6845 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*-coding:utf-8-*-
"""chargen
Usage:
chargen.py start project <project>
chargen.py add template <template_file> into <project>
chargen.py (-h | --help)
chargen.py --version
Options:
-h --help Show this screen.
--version Show version.
"""
from docopt import docopt
import core.projects
def start_project(project_name):
recent_projects = core.projects.Projects()
recent_projects.add_project(project_name)
def add_template_into_project(project_name, template_file):
recent_projects = core.projects.Projects()
recent_projects.add_template_into_project(project_name, template_file)
def main(args):
if args['start'] and args['project']:
start_project(args['<project>'])
if args['add'] and args['template'] and args['into']:
add_template_into_project(args['<project>'], args['<template_file>'])
if __name__ == '__main__':
args = docopt(__doc__, version='chargen 0.0.1')
main(args)
| 23.285714 | 77 | 0.701431 | 129 | 978 | 5.03876 | 0.294574 | 0.107692 | 0.110769 | 0.101538 | 0.290769 | 0.290769 | 0.290769 | 0.290769 | 0 | 0 | 0 | 0.006083 | 0.159509 | 978 | 41 | 78 | 23.853659 | 0.784672 | 0.278119 | 0 | 0.125 | 0 | 0 | 0.116046 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.1875 | false | 0 | 0.125 | 0 | 0.3125 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9472f1d45c073a911013e85819b6ce785223389f | 2,092 | py | Python | orangecontrib/recommendation/rating/base_rating.py | robertcv/orange3-recommendation | db421c32f85f123b1f3058865438df1b996772cd | [
"BSD-2-Clause"
] | 22 | 2016-09-11T11:40:17.000Z | 2019-07-27T21:45:21.000Z | orangecontrib/recommendation/rating/base_rating.py | robertcv/orange3-recommendation | db421c32f85f123b1f3058865438df1b996772cd | [
"BSD-2-Clause"
] | 14 | 2016-08-16T22:19:31.000Z | 2020-12-17T00:03:34.000Z | orangecontrib/recommendation/rating/base_rating.py | robertcv/orange3-recommendation | db421c32f85f123b1f3058865438df1b996772cd | [
"BSD-2-Clause"
] | 19 | 2016-08-16T20:06:57.000Z | 2021-09-16T11:42:11.000Z | from orangecontrib.recommendation import Learner, Model
import numpy as np
__all__ = ["LearnerRecommendation", "ModelRecommendation"]
class ModelRecommendation(Model):
def predict_on_range(self, predictions):
# Just for modeling ratings with latent factors
try:
if self.min_rating is not None:
predictions[predictions < self.min_rating] = self.min_rating
if self.max_rating is not None:
predictions[predictions > self.max_rating] = self.max_rating
finally:
return predictions
def fix_predictions(self, X, predictions, bias):
idxs_users_missing, idxs_items_missing = self.indices_missing
# Set average when neither the user nor the item exist
g_avg = bias['globalAvg']
common_indices = np.intersect1d(idxs_users_missing, idxs_items_missing)
predictions[common_indices] = g_avg
# Only users exist (return average + {dUser})
if 'dUsers' in bias:
missing_users = np.setdiff1d(idxs_users_missing, common_indices)
if len(missing_users) > 0:
user_idxs = X[missing_users, self.order[0]]
predictions[missing_users] = g_avg + bias['dUsers'][user_idxs]
# Only items exist (return average + {dItem})
if 'dItems' in bias:
missing_items = np.setdiff1d(idxs_items_missing, common_indices)
if len(missing_items) > 0:
item_idxs = X[missing_items, self.order[1]]
predictions[missing_items] = g_avg + bias['dItems'][item_idxs]
return predictions
class LearnerRecommendation(Learner):
def __init__(self, preprocessors=None, verbose=False, min_rating=None,
max_rating=None):
self.min_rating = min_rating
self.max_rating = max_rating
super().__init__(preprocessors=preprocessors, verbose=verbose)
def prepare_model(self, model):
model.min_rating = self.min_rating
model.max_rating = self.max_rating
return super().prepare_model(model) | 36.701754 | 79 | 0.657744 | 246 | 2,092 | 5.325203 | 0.304878 | 0.054962 | 0.049618 | 0.043511 | 0.227481 | 0.160305 | 0.062595 | 0 | 0 | 0 | 0 | 0.004528 | 0.260994 | 2,092 | 57 | 80 | 36.701754 | 0.84282 | 0.08891 | 0 | 0.052632 | 0 | 0 | 0.038381 | 0.011041 | 0 | 0 | 0 | 0 | 0 | 1 | 0.105263 | false | 0 | 0.052632 | 0 | 0.289474 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9473f79c4819abb3294b7e94567ee6d53a6be9c6 | 14,480 | py | Python | analyze-pictures/app.py | realnetworks-safr/python-examples | afa4ca89fb39ef4c19511565fcfe46f8dbf42dbf | [
"MIT"
] | 1 | 2020-09-15T15:36:07.000Z | 2020-09-15T15:36:07.000Z | analyze-pictures/app.py | realnetworks-safr/python-examples | afa4ca89fb39ef4c19511565fcfe46f8dbf42dbf | [
"MIT"
] | 12 | 2020-03-03T17:48:22.000Z | 2022-03-12T00:02:51.000Z | analyze-pictures/app.py | realnetworks-safr/python-examples | afa4ca89fb39ef4c19511565fcfe46f8dbf42dbf | [
"MIT"
] | 2 | 2019-08-16T13:01:05.000Z | 2020-06-14T15:04:57.000Z | #!/usr/bin/python3
from datetime import datetime
import os
import shutil
import logging
import base64
import requests
import json
import cv2 as cv
from PIL import Image, ImageEnhance
import pandas as pd
import numpy as np
import math
logging.basicConfig(filename='app.log', filemode='w',level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s")
logging.getLogger().addHandler(logging.StreamHandler())
#change required
user_id = 'userid'
passwd = 'passwd'
BASE_URL = 'https://covi.int2.real.com{0}'
#change optional
DIRECTORY = 'python-test'
SITE='python-test'
SOURCE = 'python-test'
#folders - change only if necessary
SOURCE_PATH = 'source/'
NOK_PATH = 'nok/'
OK_PATH= 'ok/'
#Default values
DEFAULT_MIN_CENTER_POSE_QUALITY = 0.76
DEFAULT_MIN_SHARPNESS = 0.62
DEFAULT_MIN_CONTRAST = 0.63
DEFAULT_MIN_FACE_WIDTH = 210
DEFAULT_MIN_FACE_HEIGHT = 260
URL_RECOGNITION = BASE_URL.format('/people?')
#disabled registering/updating, only recogintion is used
URL_RECOGNITION = URL_RECOGNITION + 'insert=false&update=false&merge=false®roup=false&insert-profile=false'
URL_RECOGNITION = URL_RECOGNITION + '&provide-face-id=false&differentiate=false'
#disabled detectors, decrease memory/cpu usage
URL_RECOGNITION = URL_RECOGNITION + '&detect-age=false&detect-gender=false&detect-sentiment=false'
URL_RECOGNITION = URL_RECOGNITION + '&detect-occlusion=true&min-size=0'
#disabled filter, allow anything
URL_RECOGNITION = URL_RECOGNITION + '&min-cpq=0&min-fsq=0&min-fcq=0&max-occlusion=0&type=person&include-expired=false'
URL_RECOGNITION = URL_RECOGNITION + '&site={}&source={}'.format(SITE, SOURCE)
count_success = 0
count_errors = 0
total = 0
def createHeader(user_id, password, directory):
enconding = 'utf-8'
encode_password = base64.b64encode(bytes(password, enconding)).decode(enconding)
header_auth = "{0}:{1}".format(user_id, encode_password)
return {
'Content-Type': 'application/octet-stream',
'X-RPC-AUTHORIZATION' : header_auth,
'X-RPC-DIRECTORY' : directory
}
def get_quality_params(personObj):
response = {}
attributes = personObj['attributes']
if 'centerPoseQuality' in attributes.keys():
response.update({'centerPoseQuality' : attributes['centerPoseQuality']})
if 'sharpnessQuality' in attributes.keys():
response.update({'sharpnessQuality' : attributes['sharpnessQuality']})
if 'contrastQuality' in attributes.keys():
response.update({'contrastQuality' : attributes['contrastQuality']})
return response
def get_dimension(personObj):
response = {}
attributes = personObj['attributes']
if 'dimension' in attributes.keys():
dimension = attributes['dimension']
response.update( {'height' : dimension['height']} )
response.update( {'width' : dimension['width']} )
return response
def get_image_points(points):
#2D image points. If you change the image, you need to change vector
return np.array([
(points['nose-tip-x'], points['nose-tip-y']), # Nose tip
(399, 561), # Chin
(points['left-eye-center-x'], points['left-eye-center-y']), # Left eye left corner
(points['right-eye-center-x'], points['right-eye-center-y']), # Right eye right corne
(points['left-mouth-corner-x'], points['left-mouth-corner-y']), # Left Mouth corner
(points['right-mouth-corner-x'], points['right-mouth-corner-y']) # Right mouth corner
], dtype="double")
def get_model_points():
# 3D model points.
return np.array([
(0.0, 0.0, 0.0), # Nose tip
(0.0, -330.0, -65.0), # Chin
(-225.0, 170.0, -135.0), # Left eye left corner
(225.0, 170.0, -135.0), # Right eye right corne
(-150.0, -150.0, -125.0), # Left Mouth corner
(150.0, -150.0, -125.0) # Right mouth corner
])
def get_attributes(personObj):
response = {}
attributes = personObj['attributes']
response.update( {'occlusion' : None} )
if 'landmarks' in attributes.keys():
logging.debug('attributes {}'.format(attributes))
attributes = attributes['landmarks']
logging.debug('attributes {}'.format(attributes))
if 'right-eye-center' in attributes.keys():
attr = attributes['right-eye-center']
response.update( {'right-eye-center-x' : attr['x']} )
response.update( {'right-eye-center-y' : attr['y']} )
if 'left-eye-center' in attributes.keys():
attr = attributes['left-eye-center']
response.update( {'left-eye-center-x' : attr['x']} )
response.update( {'left-eye-center-y' : attr['y']} )
if 'nose-tip' in attributes.keys():
attr = attributes['nose-tip']
response.update( {'nose-tip-x' : attr['x']} )
response.update( {'nose-tip-y' : attr['y']} )
if 'right-mouth-corner' in attributes.keys():
attr = attributes['right-mouth-corner']
response.update( {'right-mouth-corner-x' : attr['x']} )
response.update( {'right-mouth-corner-y' : attr['y']} )
if 'left-mouth-corner' in attributes.keys():
attr = attributes['left-mouth-corner']
response.update( {'left-mouth-corner-x' : attr['x']} )
response.update( {'left-mouth-corner-y' : attr['y']} )
if 'occlusion' in attributes.keys():
response.update( {'occlusion' : attributes['occlusion']} )
return response
def get_roll_pitch_yaw(model_points, image_points, size):
focal_length = size[1]
center = (size[1]/2, size[0]/2)
camera_matrix = np.array(
[[focal_length, 0, center[0]],
[0, focal_length, center[1]],
[0, 0, 1]], dtype = "double")
axis = np.float32([[500,0,0],
[0,500,0],
[0,0,500]])
dist_coeffs = np.zeros((4,1)) # Assuming no lens distortion
(success, rotation_vector, translation_vector) = cv.solvePnP(model_points, image_points, camera_matrix, dist_coeffs, cv.SOLVEPNP_ITERATIVE)
imgpts, jac = cv.projectPoints(axis, rotation_vector, translation_vector, camera_matrix, dist_coeffs)
modelpts, jac2 = cv.projectPoints(model_points, rotation_vector, translation_vector, camera_matrix, dist_coeffs)
rvec_matrix = cv.Rodrigues(rotation_vector)[0]
proj_matrix = np.hstack((rvec_matrix, translation_vector))
eulerAngles = cv.decomposeProjectionMatrix(proj_matrix)[6]
(pitch, yaw, roll) = [math.radians(_) for _ in eulerAngles]
pitch = math.degrees(math.asin(math.sin(pitch)))
roll = -math.degrees(math.asin(math.sin(roll)))
yaw = math.degrees(math.asin(math.sin(yaw)))
return (pitch, yaw, roll)
def submit_photo(sess, header, relative_file_path):
global count_errors
try:
with open(relative_file_path, 'rb') as upload_file:
with sess.post(URL_RECOGNITION.format(SITE, SOURCE), headers=header, data=upload_file) as response:
if response.status_code == 401:
raise Exception('Could not connect, check the credentials: {}:{} and the URL: {}'.format(user_id, passwd, URL_RECOGNITION))
if response.status_code == requests.codes.created:
logging.debug('response {}'.format(response))
response = response.json()['identifiedFaces']
logging.debug('json object body {}'.format(response))
if (bool(response) and len(response) != 0):
response = response[0]
if (len(response) == 0): #nothing has been detected from the file
count_errors = count_errors +1
logging.error('No face has been detected for file {} is invalid. as JSON object {}. Moving file to {}'
.format(relative_file_path, response, NOK_PATH))
move_file(relative_file_path, NOK_PATH)
return response
except FileNotFoundError:
logging.error('Missing file {}'.format(relative_file_path))
return None
def move_file(target_file, new_path):
#create folder if it does not exist
if not os.path.exists(new_path):
os.mkdir(new_path)
moved_path = os.path.realpath(target_file)
new_file_path = os.path.realpath(target_file).replace(SOURCE_PATH, new_path)
shutil.move(moved_path, new_file_path)
def verify_params(relative_file_path, dimension, quality_params, occlusion):
global count_success
global count_errors
center_pose_quality = quality_params['centerPoseQuality']
sharpness = quality_params['sharpnessQuality']
contrast = quality_params['contrastQuality']
face_width= dimension['width']
face_height = dimension['height']
if (center_pose_quality < DEFAULT_MIN_CENTER_POSE_QUALITY or sharpness < DEFAULT_MIN_SHARPNESS or contrast < DEFAULT_MIN_CONTRAST
or face_width < DEFAULT_MIN_FACE_WIDTH or face_height < DEFAULT_MIN_FACE_HEIGHT):
#move to NOK folder
count_errors = count_errors +1
move_file(relative_file_path, NOK_PATH)
logging.info('File: {} is {}. Moving to {} folder. dimension: {}, quality_params{}, occlusion: {}'.format(relative_file_path, 'Not Ok', NOK_PATH, dimension, quality_params, occlusion))
return 'NOK'
#move to OK folder
count_success = count_success +1
move_file(relative_file_path, OK_PATH)
logging.info('File: {} is {}. Moving to {} folder.'.format(relative_file_path, 'Ok', OK_PATH))
return 'OK'
def process(path):
global total
list_sources = []
list_face_height = []
list_face_width = []
list_rolls = []
list_pitchs = []
list_yaws = []
list_quality_params_sharpness = []
list_quality_params_contrast = []
list_quality_params_center_pose_quality = []
list_status = []
list_occlusion = []
a_session = requests.Session()
a_header = createHeader(user_id, passwd, DIRECTORY)
cont = 0
for r, d, f in os.walk(path):
for file_item in f:
num_files = len(f)
if '.jpg' in file_item.lower():
cont = cont + 1
relative_file_path = os.path.join(r, file_item)
logging.info('Validating file: {} - {}/{}'.format(relative_file_path, cont, num_files))
im = cv.imread(relative_file_path)
size = im.shape
logging.debug('Size {}'.format(str(size)))
dimension = {}
attributes = {}
quality_params = {}
pitch = {}
yaw = {}
roll = {}
occlusion = {}
person_obj = submit_photo(a_session, a_header, relative_file_path)
logging.debug('Person as JSON object {}'.format(person_obj))
if (bool(person_obj)):
dimension = get_dimension(person_obj)
attributes = get_attributes(person_obj)
quality_params = get_quality_params(person_obj)
image_points = get_image_points(attributes)
model_points = get_model_points()
occlusion = attributes['occlusion']
(pitch, yaw, roll) = get_roll_pitch_yaw(model_points, image_points, size)
is_ok = verify_params(relative_file_path, dimension, quality_params, occlusion)
list_occlusion.append(occlusion)
list_sources.append(relative_file_path)
list_face_height.append(dimension['height'])
list_face_width.append(dimension['width'])
list_rolls.append(roll)
list_pitchs.append(pitch)
list_yaws.append(yaw)
list_quality_params_sharpness.append(quality_params['sharpnessQuality'])
list_quality_params_contrast.append(quality_params['contrastQuality'])
list_quality_params_center_pose_quality.append(quality_params['centerPoseQuality'])
list_status.append(is_ok)
logging.debug('attributes: {}'.format(str(attributes)))
logging.debug('image_points: {}'.format(str(image_points)))
logging.debug('model_points: {}'.format(str(model_points)))
else:
list_sources.append(relative_file_path)
list_face_height.append(None)
list_face_width.append(None)
list_rolls.append(None)
list_pitchs.append(None)
list_yaws.append(None)
list_quality_params_sharpness.append(None)
list_quality_params_contrast.append(None)
list_quality_params_center_pose_quality.append(None)
list_status.append("NOK")
list_occlusion.append(None)
TEMPLATE_MSG = 'Image: {} \n Size: {} \n Roll {} \n Pitch:: {} \n Yaw: {} \n Quality Params: {} \n Occlusion: {} \n\n '
logging.debug(TEMPLATE_MSG.format(relative_file_path, dimension, pitch, roll, yaw, quality_params, occlusion))
df = pd.DataFrame({
'Source':list_sources,
'Status':list_status,
'Height':list_face_height,
'Width':list_face_width,
'Roll':list_rolls,
'Pitch':list_pitchs,
'Yaw':list_yaws,
'Sharpness':list_quality_params_sharpness,
'Contrast':list_quality_params_contrast,
'Center Pose Quality':list_quality_params_center_pose_quality,
'Occlusion':list_occlusion
})
df.to_csv('result.csv', encoding='utf-8', index=False, sep=";")
total = cont
if __name__ == '__main__':
logging.info("Starting process...")
start_time = datetime.now()
try:
process(SOURCE_PATH)
except Exception as e:
logging.error('An error has ocurred. \n {}'.format(e))
finally:
logging.info('...ending process. Time slapsed {}. Success: {}, Errors: {}, Total: {}.'.format((datetime.now() - start_time), count_success, count_errors, total))
| 43.223881 | 193 | 0.617541 | 1,675 | 14,480 | 5.138507 | 0.201791 | 0.042291 | 0.033461 | 0.019519 | 0.260718 | 0.160335 | 0.104566 | 0.056001 | 0.035552 | 0.012316 | 0 | 0.014272 | 0.259669 | 14,480 | 334 | 194 | 43.353293 | 0.788619 | 0.043785 | 0 | 0.086331 | 0 | 0.010791 | 0.16207 | 0.022512 | 0 | 0 | 0 | 0 | 0 | 1 | 0.039568 | false | 0.021583 | 0.043165 | 0.007194 | 0.122302 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
94748c13b48c015ed744c4825ce449b56551be58 | 3,445 | py | Python | 2015-09-09-calculator/arti.py | EIK-LUG/CodeClubPython | fb0660ad85a7b0a17d33d37d18f8b41ae597e022 | [
"WTFPL"
] | 2 | 2015-09-12T10:11:38.000Z | 2015-09-13T13:18:25.000Z | 2015-09-09-calculator/arti.py | EIK-LUG/CodeClubPython | fb0660ad85a7b0a17d33d37d18f8b41ae597e022 | [
"WTFPL"
] | 2 | 2015-09-12T07:18:15.000Z | 2015-10-07T06:01:56.000Z | 2015-09-09-calculator/arti.py | EIK-LUG/CodeClubPython | fb0660ad85a7b0a17d33d37d18f8b41ae597e022 | [
"WTFPL"
] | 5 | 2015-09-11T11:19:51.000Z | 2018-02-08T18:17:44.000Z | #!/usr/bin/env python3
# This is a simple 4 banger calculator
LICENSE = """DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
Version 2, December 2004
Copyright (C) 2015 Arti Zirk <arti.zirk@gmail.com>
Everyone is permitted to copy and distribute verbatim or modified
copies of this license document, and changing it is allowed as long
as the name is changed.
DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
0. You just DO WHAT THE FUCK YOU WANT TO.
"""
"""
# Calculator
This program can calculate numbers
# Examples
>>> c = Calculator()
>>> print(c.calculate("1+1"))
2
>>>
# Tests
$ python3 -m unittest main.py
"""
import re
import unittest
class EvalCalculator():
"""Calculator class implementing a simple calculator using python built in
`eval` function to make my life easier"""
input_filter = re.compile(r"^[0-9\+\-\\\*\(\)\.]*$")
def calculate(self, input_string):
if not self.input_filter.match(input_string):
raise SyntaxError("Input string contains invalid characters")
result = eval(input_string) # CHEATING
return result
class Calculator(EvalCalculator):
"""This is a propper calculator.
It will in theory convert string to a list of numbers and operands and then
use reverse polish notation to calculate everything"""
def parse_to_list(self, input_string):
"""Converts input_string to list of numbers and """
the_list = []
a_number = []
for char in input_string:
if char.isdigit():
a_number.append(char)
elif char == ".":
a_number.append(char)
elif char in ("()*/+-"):
if a_number:
number = "".join(a_number)
if "." in number:
number = float(number)
else:
number = int(number)
the_list.append(number)
number = None
the_list.append(char)
def convert_to_rpn(self, input_list):
"""Converts list generated by `parse_to_list` to
reverse polish notation (lisp?)"""
raise NotImplemented("http://andreinc.net/2010/10/05/converting-infix-to-rpn-shunting-yard-algorithm/")
def calculate(self, input_string):
if not self.input_filter.match(input_string):
raise SyntaxError("Input string contains invalid characters")
the_list = self.parse_to_list(input_string)
rpn_list = self.convert_to_rpn(the_list)
raise NotImplemented("Can't yet calculate this")
class TestEvalCalculator(unittest.TestCase):
"""Tests for calculator class using `eval`"""
def setUp(self):
self.calculator = EvalCalculator()
def calc(self, input_string, output_val):
self.assertEqual(self.calculator.calculate(input_string), output_val)
def test_1p1(self):
self.calc("1+1", 2)
def test_2t2(self):
self.calc("2*2", 4)
@unittest.expectedFailure
def test_syntax(self):
self.calc("1+1a", 2)
class TestCalculator(TestEvalCalculator):
"""Tests for calculator class using real parsing"""
def setUp(self):
self.calculator = Calculator()
if __name__ == "__main__":
calc = EvalCalculator()
result = calc.calculate("1+1")
print(result)
| 28.708333 | 111 | 0.628447 | 429 | 3,445 | 4.932401 | 0.375291 | 0.06758 | 0.028355 | 0.018431 | 0.226843 | 0.175803 | 0.152174 | 0.141777 | 0.141777 | 0.108696 | 0 | 0.016315 | 0.270537 | 3,445 | 119 | 112 | 28.94958 | 0.825706 | 0.158781 | 0 | 0.153846 | 0 | 0.015385 | 0.264398 | 0.016081 | 0 | 0 | 0 | 0 | 0.015385 | 1 | 0.153846 | false | 0 | 0.030769 | 0 | 0.276923 | 0.015385 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
94796da60c973686297c10aaaafe4c00df0e9db5 | 2,444 | py | Python | runtime/components/Data_Quality/apply_substitution.py | ulise/hetida-designer | a6be8eb45abf950d5498e3ca756ea1d2e46b5c00 | [
"MIT"
] | 41 | 2020-11-18T10:12:29.000Z | 2022-03-28T21:46:41.000Z | runtime/components/Data_Quality/apply_substitution.py | ulise/hetida-designer | a6be8eb45abf950d5498e3ca756ea1d2e46b5c00 | [
"MIT"
] | 4 | 2020-12-08T15:28:15.000Z | 2022-02-01T11:40:17.000Z | runtime/components/Data_Quality/apply_substitution.py | ulise/hetida-designer | a6be8eb45abf950d5498e3ca756ea1d2e46b5c00 | [
"MIT"
] | 14 | 2020-11-18T11:39:17.000Z | 2022-03-21T15:05:11.000Z | from hetdesrun.component.registration import register
from hetdesrun.datatypes import DataType # add your own imports here
from hetdesrun.utils import plotly_fig_to_json_dict
import pandas as pd
def handle_substitutions(original_series, substitution_series):
"""Applies substituion series on raw values
The substitution series can contain
* replacement values (at indices occuring in original)
* new values (values at indices not in original)
* null values at indices in original marking values for invalidation (ignoring)
Returns a tuple of pandas Series objects
(completely_handled, replaced_values, replacements, new_values, ignored_values) """
new = original_series.copy()
deleted = new.loc[substitution_series.isnull().reindex(new.index, fill_value=False)]
kept_before_replacing = new.loc[
substitution_series.notnull().reindex(new.index, fill_value=True)
]
replaced_originals = new.loc[
substitution_series.notnull().reindex(new.index, fill_value=False)
]
replacements = substitution_series.reindex(original_series.index).dropna()
new_values = substitution_series.loc[
~substitution_series.index.isin(original_series.index)
]
completely_handled_series = new.copy()
completely_handled_series = completely_handled_series.loc[
substitution_series.notnull().reindex(
completely_handled_series.index, fill_value=True
)
]
completely_handled_series.update(substitution_series)
completely_handled_series = pd.concat([completely_handled_series, new_values])
return (
completely_handled_series.sort_index(),
replaced_originals,
replacements,
new_values,
deleted,
)
# ***** DO NOT EDIT LINES BELOW *****
# These lines may be overwritten if input/output changes.
@register(
inputs={"raw_values": DataType.Series, "substitution_series": DataType.Series},
outputs={"substituted_ts_plot": DataType.PlotlyJson},
)
def main(*, raw_values, substitution_series):
"""entrypoint function for this component"""
# ***** DO NOT EDIT LINES ABOVE *****
# write your function code here.
s1 = raw_values.sort_index()
s1 = s1.loc[~s1.index.duplicated(keep="first")]
s2 = substitution_series.sort_index()
s2 = s2.loc[~s2.index.duplicated(keep="first")]
return {"substituted_ts": handle_substitutions(s1, s2)[0]}
| 34.422535 | 94 | 0.720949 | 288 | 2,444 | 5.902778 | 0.381944 | 0.137647 | 0.108235 | 0.042353 | 0.105294 | 0.084706 | 0.064706 | 0.064706 | 0.064706 | 0.064706 | 0 | 0.005514 | 0.183715 | 2,444 | 70 | 95 | 34.914286 | 0.846617 | 0.252046 | 0 | 0 | 0 | 0 | 0.040427 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.047619 | false | 0 | 0.095238 | 0 | 0.190476 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
947efdbc985976ac04f87ceecfcc075b0bbb1d41 | 16,366 | py | Python | src/main.py | elifesciences/update-iam-human | c00a8824f13584bb0f3456252dd9478b6e21c3e7 | [
"MIT"
] | null | null | null | src/main.py | elifesciences/update-iam-human | c00a8824f13584bb0f3456252dd9478b6e21c3e7 | [
"MIT"
] | 1 | 2018-09-18T00:41:53.000Z | 2018-09-19T00:28:33.000Z | src/main.py | elifesciences/update-iam-human | c00a8824f13584bb0f3456252dd9478b6e21c3e7 | [
"MIT"
] | null | null | null | import getpass
import argparse
import boto3
import sys, os, csv
from github import Github
from github.InputFileContent import InputFileContent
import json
from datetime import timedelta
from collections import OrderedDict
from . import utils
from .utils import ensure, ymd, splitfilter, vals, lmap, lfilter, utcnow
MAX_KEY_AGE_DAYS, GRACE_PERIOD_DAYS = 180, 7
# states
UNKNOWN = '?'
USER_NOT_FOUND = 'user-not-found'
IDEAL = 'ideal'
GRACE_PERIOD = 'in-grace-period'
ALL_CREDENTIALS_ACTIVE = 'all-credentials-active'
NO_CREDENTIALS_ACTIVE = 'no-credentials-active'
OLD_CREDENTIALS = 'old-credentials'
NO_CREDENTIALS = 'no-credentials'
MANY_CREDENTIALS = 'many-credentials'
STATE_DESCRIPTIONS = {
IDEAL: "1 active set of credentials younger than max age of credentials",
GRACE_PERIOD: "two active sets of credentials, one set created in the last $grace-period days",
ALL_CREDENTIALS_ACTIVE: "two active sets of credentials, both sets older than $grace-period days",
NO_CREDENTIALS_ACTIVE: "credentials present but none are active",
OLD_CREDENTIALS: "credentials are old and will be rotated",
NO_CREDENTIALS: "no credentials exist",
# bad states
USER_NOT_FOUND: "user not found",
MANY_CREDENTIALS: "more than 2 sets of credentials exist (program error)",
UNKNOWN: "credentials are in an unhandled state (program error)"
}
def current_user():
# username => signature
mapping = {
'luke': 'Luke Skibinski',
'giorgio': 'Giorgio Sironi'
}
return mapping[getpass.getuser()]
#
# aws IAM
#
INPUT_HEADER = ['name', 'email', 'iam-username']
def validate_row(row):
ensure(isinstance(row, dict), "row must be a dictionary: %s" % (type(row),))
name, email, username = vals(row, 'name', 'email', 'iam-username')
ensure(name and email and username, "bad-value: all values in a row must be present: %s" % (row,))
ensure('@' in email and '.' in email, "bad-value: email doesn't look like an email to me: %s" % (email,))
return True
def read_input(user_csvpath):
ensure(os.path.exists(user_csvpath), "path not found: %s" % user_csvpath)
ensure(os.path.isfile(user_csvpath), "path is not a file: %s" % user_csvpath)
with open(user_csvpath) as fh:
rows = list(csv.DictReader(fh, fieldnames=INPUT_HEADER))
ensure(len(rows) > 1, "csv file is empty")
header = list(rows.pop(0).keys()) # skip the header
ensure(header == INPUT_HEADER, "csv file has incorrect header: %s" % header)
lmap(validate_row, rows)
return rows
def coerce_key(kp):
return {
'access_key_id': kp.access_key_id,
'create_date': kp.create_date,
'status': kp.status,
'-obj': kp,
}
def _get_user(iam_username):
try:
iam = boto3.resource('iam')
iamuser = iam.User(iam_username)
iamuser.load()
return iamuser
except Exception as err:
print('warning: %s' % str(err))
return None
def key_list(iam_username):
_user = _get_user(iam_username)
return lmap(coerce_key, _user.access_keys.all()) if _user else None
def get_key(iam_username, key_id):
keys = lfilter(lambda kp: kp['access_key_id'] == key_id, key_list(iam_username))
if len(keys) == 1:
return keys[0]
def user_report(user_csvrow, max_key_age, grace_period_days):
"given a row, returns the same row with a list of action"
try:
today = utcnow()
state = UNKNOWN
actions = []
access_keys = key_list(user_csvrow['iam-username'])
ensure(access_keys is not None, USER_NOT_FOUND)
#ensure(len(access_keys) > 0, NO_CREDENTIALS)
ensure(len(access_keys) < 3, MANY_CREDENTIALS) # there must only ever be 0, 1 or 2 keys
# carry some state around with us for future ops
user_csvrow.update({
'grace-period-days': grace_period_days,
'max-key-age': max_key_age
})
active_keys, inactive_keys = splitfilter(lambda key: key['status'] != 'Inactive', access_keys)
# always prune inactive keys
[actions.append(('delete', key['access_key_id'])) for key in inactive_keys]
if len(access_keys) == 0:
# users with no credentials should have been filtered out in generate_csv.py
# this is useful for when we're targeting those who didn't update or are brand new
state = NO_CREDENTIALS
actions += [
('create', 'new')
]
elif len(active_keys) > 1:
# we have two active keys
# * user is possibly using both sets, which is no longer supported, or
# * user was granted a new set of credentials by this script
oldest_key, newest_key = sorted(active_keys, key=lambda kp: kp['create_date']) # ASC
if (today - newest_key['create_date']).days > grace_period_days:
state = ALL_CREDENTIALS_ACTIVE
# grace period is over. mark the oldest of the two active keys as inactive.
# it will be deleted on the next turn
actions.append(('disable', oldest_key['access_key_id']))
else:
# we're in the grace period, nothing to do until it ends
state = GRACE_PERIOD
elif len(active_keys) == 1:
active_key = active_keys[0]
# if max_key_age <= 1, you get a 'create' action on every call
if (today - active_key['create_date']).days <= max_key_age:
state = IDEAL
else:
# remaining key is too old
state = OLD_CREDENTIALS
actions += [
('create', 'new')
]
else:
state = NO_CREDENTIALS_ACTIVE
user_csvrow.update({
'success?': True,
'state': state,
'reason': STATE_DESCRIPTIONS[state],
'actions': actions,
})
return user_csvrow
except AssertionError as err:
state = str(err)
user_csvrow.update({
'success?': False,
'state': state,
'reason': STATE_DESCRIPTIONS[state],
'actions': []
})
return user_csvrow
def delete_key(iam_username, key_id):
print('deleting key for', iam_username)
key = get_key(iam_username, key_id)
if key:
key['-obj'].delete()
return True
return False
def disable_key(iam_username, key_id):
print('disabling key for', iam_username)
key = get_key(iam_username, key_id)
if key:
key['-obj'].deactivate()
return True
return False
def create_key(iam_username, _):
print('creating key for', iam_username)
iamuser = _get_user(iam_username)
key = iamuser.create_access_key_pair()
return {'aws-access-key': key.access_key_id,
'aws-secret-key': key.secret_access_key}
def execute_user_report(user_report_data):
ensure(isinstance(user_report_data, dict), "user-report must be a dict")
dispatch = {
'delete': delete_key,
'disable': disable_key,
'create': create_key,
}
iam_username = user_report_data['iam-username']
actions = user_report_data['actions']
results = [(fnkey, dispatch[fnkey](iam_username, val)) for fnkey, val in actions]
# weakness: no more than one type of action per execution else results get squashed
# for example, can't do two 'disables' or two 'deletes'. not a problem right now
user_report_data['results'] = OrderedDict(results)
return user_report_data
def execute_report(report_data):
"executes the list of actions against each user in the given report data."
ensure(isinstance(report_data, list), "report data must be a list of user-report dicts")
return lmap(execute_user_report, report_data)
#
# github gist
#
# ll: {'key': 'github-api-token'}
GH_CREDENTIALS_FILE = os.path.abspath("private.json")
def gh_credentials():
return json.load(open(GH_CREDENTIALS_FILE, 'r'))
def gh_user():
"returns a user that can create gists"
credentials = gh_credentials()
gh = Github(credentials['key'])
return gh.get_user()
def create_gist(description, content):
public = False
authenticated_user = gh_user()
content = InputFileContent(content)
gist = authenticated_user.create_gist(public, {'content': content}, description)
return {
'gist-html-url': gist.html_url,
'gist-id': gist.id,
'gist-created-at': gist.created_at
}
def gh_create_user_gist(user_csvrow):
ensure('results' in user_csvrow, "`gh_create_user_gist` requires the results of calling `execute_user_report`")
content = '''Hello, {insert-name-of-human}
Your new AWS credentials are:
aws_access_key_id={insert-access-key}
aws_secret_access_key={insert-secret-key}
Your old credentials (if any) and this message will expire on {insert-expiry-date}.'''
new_key = user_csvrow['results']['create']
content = content.format_map({
'insert-name-of-human': user_csvrow['name'],
'insert-access-key': new_key['aws-access-key'],
'insert-secret-key': new_key['aws-secret-key'],
'insert-expiry-date': ymd(utcnow() + timedelta(days=user_csvrow['grace-period-days'])),
})
print('creating gist for', user_csvrow['name'])
gist = create_gist("new AWS API credentials", content)
user_csvrow.update(gist)
# nullify the secret key, we no longer need it
user_csvrow['results']['create']['aws-secret-key'] = '[redacted]'
return user_csvrow
#
# email
#
EMAIL_FROM = 'it-admin@elifesciences.org' # verified SES address
EMAIL_DEV_ADDR = 'tech-team@elifesciences.org'
def send_email(to_addr, subject, content):
# https://boto3.readthedocs.io/en/latest/reference/services/ses.html?highlight=ses#client
ses = boto3.client('ses', region_name='us-east-1')
# https://boto3.readthedocs.io/en/latest/reference/services/ses.html?highlight=ses#SES.Client.send_email
kwargs = {
'Source': EMAIL_FROM,
'Destination': {'ToAddresses': [to_addr]},
'Message': {
'Subject': {'Charset': 'UTF-8', 'Data': subject},
'Body': {'Text': {'Charset': 'UTF-8', 'Data': content}}
},
'ReplyToAddresses': [EMAIL_FROM],
'ReturnPath': EMAIL_DEV_ADDR,
}
return ses.send_email(**kwargs)
def email_user__old_credentials_disabled(user_csvrow):
ensure('results' in user_csvrow, "`email_user__old_credentials_disabled` requires the results of calling `execute_user_report`")
ensure('disable' in user_csvrow['results'] and user_csvrow['results']['disable'],
"`email_user__old_credentials_disabled` requires a key was successfully disabled")
disabled_credential_key = dict(user_csvrow['actions'])['disable']
name, to_addr = vals(user_csvrow, 'name', 'email')
subject = 're: Replacement AWS credentials'
content = '''Hello {insert-name-of-human},
The grace period for updating your AWS credentials is over and "{insert-disabled-credential-key}" has been disabled.
You can find your new credentials linked to in our previous email.
Please contact it-admin@elifesciences.org if you have any problems.
---
This email is not spam, is not a scam and if you have *any* doubts whatsoever about it's authenticity,
please contact someone in the IT team first.
This email was generated {todays-date} by {author} using this program:
https://github.com/elifesciences/update-iam-human'''
content = content.format_map({
'insert-name-of-human': user_csvrow['name'],
'insert-disabled-credential-key': disabled_credential_key,
'insert-grace-period': user_csvrow['grace-period-days'],
'todays-date': ymd(utcnow()),
'author': current_user()
})
print('sending email %r to %s (%s)' % (subject, user_csvrow['name'], to_addr))
result = send_email(to_addr, subject, content)
user_csvrow.update({
'disabled-email-id': result['MessageId'], # probably not at all useful
'disabled-email-sent': utcnow(),
})
return user_csvrow
def email_user__new_credentials(user_csvrow):
ensure('gist-html-url' in user_csvrow, "`email_user__new_credentials` requires the results of calling `gh_create_user_gist`")
name, to_addr = vals(user_csvrow, 'name', 'email')
subject = 'Replacement AWS credentials'
content = '''Hello {insert-name-of-human},
Your AWS credentials are being rotated.
This means a new set of credentials has been created for you and any
old credentials will be removed after the grace period ({insert-expiry-date}).
Your new set of credentials can be found here:
{insert-gist-url}
Please contact it-admin@elifesciences.org if you have any problems.
---
This email is not spam, is not a scam and if you have *any* doubts whatsoever about it's authenticity,
please contact someone in the IT team first.
This email was generated {todays-date} by {author} using this program:
https://github.com/elifesciences/update-iam-human'''
content = content.format_map({
'insert-name-of-human': user_csvrow['name'],
'insert-expiry-date': ymd(utcnow() + timedelta(days=user_csvrow['grace-period-days'])),
'insert-gist-url': user_csvrow['gist-html-url'],
'todays-date': ymd(utcnow()),
'author': current_user()
})
print('sending email to %s (%s)' % (user_csvrow['name'], to_addr))
result = send_email(to_addr, subject, content)
user_csvrow.update({
'email-id': result['MessageId'], # probably not at all useful
'email-sent': utcnow(),
})
# nullify the gist html url, it contains the secret key
user_csvrow['gist-html-url'] = '[redacted]'
return user_csvrow
#
# report wrangling
#
def notify(report_results):
"notifies users after executing actions in report"
# TODO: should user be notified if credentials have been disabled after a grace period?
# create a gist for those users with new credentials
users_w_new_credentials, unnotified = splitfilter(lambda row: 'create' in row['results'], report_results)
users_w_gists = lmap(gh_create_user_gist, users_w_new_credentials)
results = lmap(email_user__new_credentials, users_w_gists)
return {'notified': results, 'unnotified': unnotified}
def write_report(user_csvpath, passes, fails, executed):
report = {'passes': passes, 'fails': fails}
type_of_content = 'results' if executed else 'report'
path = os.path.splitext(os.path.basename(user_csvpath))[0]
path = '%s-%s-%s.json' % (path, type_of_content, ymd(utcnow())) # "humans-results-2019-01-01.json"
data = utils.lossy_json_dumps(report, indent=4)
print(data)
with open(path, 'w') as fh:
fh.write(data)
return path
def main(user_csvpath, max_key_age=MAX_KEY_AGE_DAYS, grace_period_days=GRACE_PERIOD_DAYS, execute=False):
csv_contents = read_input(user_csvpath)
max_key_age, grace_period_days = lmap(int, [max_key_age, grace_period_days])
print('querying %s users ...' % len(csv_contents))
results = [user_report(row, max_key_age, grace_period_days) for row in csv_contents]
pass_rows, fail_rows = splitfilter(lambda row: row['success?'], results)
if not pass_rows:
# nothing to do
return len(fail_rows)
if execute:
results = execute_report(pass_rows)
results = notify(results)
print('wrote: ', write_report(user_csvpath, results, fail_rows, execute))
else:
print('wrote: ', write_report(user_csvpath, pass_rows, fail_rows, execute))
return 0
if __name__ == '__main__':
try:
ensure(os.path.exists(GH_CREDENTIALS_FILE), "no github credentials found: %s" % GH_CREDENTIALS_FILE)
parser = argparse.ArgumentParser()
parser.add_argument('user_csvpath')
parser.add_argument('--execute', default=False, action='store_true')
parser.add_argument('--max-key-age', default=MAX_KEY_AGE_DAYS)
parser.add_argument('--grace-period-days', default=GRACE_PERIOD_DAYS)
kwargs = parser.parse_args().__dict__ # {'user_csvpath': 'example.csv', 'execute': False, 'max_key_age': 180, 'grace_period_days': 7}
sys.exit(main(**kwargs))
except AssertionError as err:
print('err:', err)
retcode = getattr(err, 'retcode', 1)
sys.exit(retcode)
| 37.280182 | 141 | 0.668459 | 2,199 | 16,366 | 4.789905 | 0.18281 | 0.035128 | 0.025634 | 0.009684 | 0.273521 | 0.226716 | 0.184088 | 0.160068 | 0.151714 | 0.12551 | 0 | 0.003428 | 0.215813 | 16,366 | 438 | 142 | 37.365297 | 0.817282 | 0.114139 | 0 | 0.233129 | 0 | 0.009202 | 0.303716 | 0.0403 | 0 | 0 | 0 | 0.002283 | 0.006135 | 1 | 0.070552 | false | 0.02454 | 0.033742 | 0.006135 | 0.190184 | 0.03681 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
94837f054bfa14530b650e254c7da3d29ae1ed3b | 10,958 | py | Python | examples-master/python_examples/md_poly_lj_module.py | JungHoonJung/2021MD | 29bfae7a750217d50654e4973a2be6fb0d968bdf | [
"MIT"
] | 186 | 2017-04-30T18:11:12.000Z | 2022-03-31T18:35:09.000Z | examples-master/python_examples/md_poly_lj_module.py | JungHoonJung/2021MD | 29bfae7a750217d50654e4973a2be6fb0d968bdf | [
"MIT"
] | 17 | 2017-02-09T11:08:49.000Z | 2022-01-29T14:40:09.000Z | examples-master/python_examples/md_poly_lj_module.py | JungHoonJung/2021MD | 29bfae7a750217d50654e4973a2be6fb0d968bdf | [
"MIT"
] | 92 | 2017-09-13T05:20:40.000Z | 2022-03-19T16:17:02.000Z | #!/usr/bin/env python3
# md_poly_lj_module.py
#------------------------------------------------------------------------------------------------#
# This software was written in 2016/17 #
# by Michael P. Allen <m.p.allen@warwick.ac.uk>/<m.p.allen@bristol.ac.uk> #
# and Dominic J. Tildesley <d.tildesley7@gmail.com> ("the authors"), #
# to accompany the book "Computer Simulation of Liquids", second edition, 2017 ("the text"), #
# published by Oxford University Press ("the publishers"). #
# #
# LICENCE #
# Creative Commons CC0 Public Domain Dedication. #
# To the extent possible under law, the authors have dedicated all copyright and related #
# and neighboring rights to this software to the PUBLIC domain worldwide. #
# This software is distributed without any warranty. #
# You should have received a copy of the CC0 Public Domain Dedication along with this software. #
# If not, see <http://creativecommons.org/publicdomain/zero/1.0/>. #
# #
# DISCLAIMER #
# The authors and publishers make no warranties about the software, and disclaim liability #
# for all uses of the software, to the fullest extent permitted by applicable law. #
# The authors and publishers do not recommend use of this software for any purpose. #
# It is made freely available, solely to clarify points made in the text. When using or citing #
# the software, you should not imply endorsement by the authors or publishers. #
#------------------------------------------------------------------------------------------------#
"""Force routine for MD simulation, polyatomic molecules, LJ atoms."""
import numpy as np
fast = True # Change this to replace NumPy force evaluation with slower Python
# Bond vectors in body-fixed frame
# Isosceles triangle, 3 sites, with unit bond length and bond angle alpha
# which we set to 75 degrees here
alpha = 75.0 * np.pi / 180.0
alpha2 = alpha / 2.0
na = 3
db = np.array([[-np.sin(alpha2), 0.0, -np.cos(alpha2)/3.0],
[0.0, 0.0, 2.0*np.cos(alpha2)/3.0],
[np.sin(alpha2), 0.0, -np.cos(alpha2)/3.0]], dtype=np.float_)
diameter = 2.0 * np.sqrt ( np.max ( np.sum(db**2,axis=1) ) ) # Molecular diameter
# Cutoff distance and force-shift parameters (all private) chosen as per the reference:
# S Mossa, E La Nave, HE Stanley, C Donati, F Sciortino, P Tartaglia, Phys Rev E, 65, 041205 (2002)
r_cut = 2.612 # in sigma=1 units, where r_cut = 1.2616 nm, sigma = 0.483 nm
sr_cut = 1.0/r_cut
sr_cut6 = sr_cut**6
sr_cut12 = sr_cut6**2
lambda1 = 4.0*(7.0*sr_cut6-13.0*sr_cut12)
lambda2 = -24.0*(sr_cut6-2.0*sr_cut12)*sr_cut
m = np.array([ 1.0/3.0, 1.0/3.0, 1.0/3.0 ],dtype=np.float_) # Masses add up to 1.0
# The following section sets the diagonal moments of inertia "realistically"
# based on the values of atomic masses and bond vectors (above), with some checking
# that the total mass is 1, the COM is at the origin, and the inertia tensor is diagonal.
# However, there is nothing to stop the user replacing this section with a statement just
# setting the values of inertia[:]. The masses m are not used by the calling program.
# It might be advantageous, for instance, to artificially increase the values in inertia.
# Ensure that the db bonds, xyz molecular axes, and masses are chosen such that
# the total mass is 1 and the centre-of-mass is at the origin
assert np.isclose(np.sum(m),1.0), 'M is not 1.0 {}'.format(np.sum(m))
com = np.sum ( m[:,np.newaxis]*db, axis = 0 )
assert np.all ( np.isclose(com,0.0) ), 'COM error {} {} {}'.format(*com)
# Ensure that the db bonds, xyz molecular axes, and masses are chosen such that
# the off-diagonal elements of the inertia tensor are zero
inertia = -np.einsum('ij,ik->jk',m[:,np.newaxis]*db,db)
offdiag = inertia[np.triu_indices(3,1)]
assert np.all ( np.isclose(offdiag,0.0) ), 'Inertia not diagonal {} {} {}'.format(*offdiag)
# Calculate the diagonal elements of the inertia tensor
inertia = np.sum(m[:,np.newaxis]*db**2) + np.diagonal ( inertia )
class PotentialType:
"""A composite variable for interactions."""
def __init__(self, pot, vir, ovr):
self.pot = pot # the potential energy
self.vir = vir # the virial
self.ovr = ovr # a flag indicating overlap (i.e. pot too high to use)
def __add__(self, other):
pot = self.pot + other.pot
vir = self.vir + other.vir
ovr = self.ovr or other.ovr
return PotentialType(pot,vir,ovr)
def introduction():
"""Prints out introductory statements at start of run."""
print('Lennard-Jones potential')
print('Cut-and-force-shifted')
print('Diameter, sigma = 1')
print('Well depth, epsilon = 1')
if fast:
print('Fast NumPy force routine')
else:
print('Slow Python force routine')
print( "{:40}{:15d}".format('Number of atoms per molecule', na) )
for i, b in enumerate(db):
print( "{}{:2d}{:15.6f}{:15.6f}{:15.6f}".format('Body-fixed atom vector',i,*b))
print( "{:40}{:15.6f}".format('Molecular diameter', diameter) )
print( "{:40}{:15.6f}".format('r_cut', r_cut) )
print( "{:40}{:15.6f}".format('Force-shift lambda1', lambda1) )
print( "{:40}{:15.6f}".format('Force-shift lambda2', lambda2) )
print( "{:40}{:15.6f}{:15.6f}{:15.6f}".format('Inertia Ixx, Iyy, Izz', *inertia) )
def conclusion():
"""Prints out concluding statements at end of run."""
print('Program ends')
def force ( box, r, d ):
"""Takes in box, and r & d arrays, and calculates forces, torques and potentials etc."""
import numpy as np
# It is assumed that positions are in units where box = 1
# Forces are calculated in units where sigma = 1 and epsilon = 1
# Note that this is the force-shifted LJ potential with a linear smoothing term
# S Mossa, E La Nave, HE Stanley, C Donati, F Sciortino, P Tartaglia, Phys Rev E, 65, 041205 (2002)
n, ndim = r.shape
assert ndim==3, 'Dimension error for r'
nn, nna, ndim = d.shape
assert nna==na and ndim==3, 'Dimension error for d'
assert n==nn, 'Dimension mismatch for r and d'
sr2_ovr = 1.77 # Overlap threshold (pot > 100)
rm_cut_box = ( r_cut + diameter ) / box # Molecular cutoff in box=1 units
rm_cut_box_sq = rm_cut_box**2 # squared
assert rm_cut_box<0.5, 'rm_cut/box too large'
r_cut_sq = r_cut ** 2
# Initialize
f = np.zeros_like(r)
tau = np.zeros_like(r)
total = PotentialType ( pot=0.0, vir=0.0, ovr=False )
if fast:
for i in range(n-1):
rij = r[i,:]-r[i+1:,:] # Separation vectors for j>i
rij = rij - np.rint(rij) # Periodic boundary conditions in box=1 units
rij = rij * box # Now in sigma=1 units
for a in range(na):
for b in range(na):
rab = rij + d[i,a,:] - d[i+1:,b,:] # All atom-atom vectors for given a and b
rab_sq = np.sum(rab**2,axis=1) # Squared separations
in_range = rab_sq < r_cut_sq # Set flags for within cutoff
sr2 = 1.0 / rab_sq # (sigma/rab)**2
ovr = sr2 > sr2_ovr # Set flags for any overlaps
rmag = np.sqrt(rab_sq)
sr6 = sr2 ** 3
sr12 = sr6 ** 2
pot = np.where ( in_range,
4.0*(sr12-sr6) + lambda1 + lambda2*rmag, 0.0 ) # force-shifted pair potentials
virab = np.where ( in_range,
24.0*(2.0*sr12-sr6) - lambda2*rmag, 0.0 ) # pair virials
fab = virab * sr2
fab = rab * fab[:,np.newaxis] # atom-atom pair forces
total = total + PotentialType ( pot=np.sum(pot), vir=np.sum(rij*fab), ovr=np.any(ovr) )
fia = np.sum(fab,axis=0)
f[i,:] = f[i,:] + fia
f[i+1:,:] = f[i+1:,:] - fab
tau[i,:] = tau[i,:] + np.cross ( d[i,a,:], fia )
tau[i+1:,:] = tau[i+1:,:] - np.cross ( d[i+1:,b,:], fab )
else:
for i in range(n-1): # Outer loop
for j in range(i+1,n): # Inner loop
rij = r[i,:]-r[j,:] # Separation vector
rij = rij - np.rint(rij) # Periodic boundary conditions in box=1 units
rij_sq = np.sum(rij**2) # Squared separation
if rij_sq < rm_cut_box_sq: # Check within cutoff
rij = rij * box # Now in sigma=1 units
for a in range(na):
for b in range(na):
rab = rij + d[i,a,:] - d[j,b,:] # Atom-atom vector for given a and b
rab_sq = np.sum(rab**2) # Squared separation
if rab_sq < r_cut_sq: # Test within potential cutoff
sr2 = 1.0 / rab_sq # (sigma/rab)**2
ovr = sr2 > sr2_ovr # Set flag for overlap
rmag = np.sqrt(rab_sq)
sr6 = sr2 ** 3
sr12 = sr6 ** 2
pot = 4.0*(sr12-sr6) + lambda1 + lambda2*rmag # force-shifted pair potential
virab = 24.0*(2.0*sr12-sr6) - lambda2*rmag # pair virial
fab = virab * sr2
fab = rab * fab # atom-atom pair force
total = total + PotentialType ( pot=pot, vir=np.sum(rij*fab), ovr=ovr )
f[i,:] = f[i,:] + fab
f[j,:] = f[j,:] - fab
tau[i,:] = tau[i,:] + np.cross ( d[i,a,:], fab )
tau[j,:] = tau[j,:] - np.cross ( d[j,b,:], fab )
# Multiply results by numerical factors
total.vir = total.vir / 3.0 # Divide virial by 3
return total, f, tau
| 52.180952 | 113 | 0.515788 | 1,477 | 10,958 | 3.780636 | 0.274204 | 0.004298 | 0.010745 | 0.00985 | 0.252507 | 0.223138 | 0.184993 | 0.152042 | 0.141655 | 0.141655 | 0 | 0.043411 | 0.354627 | 10,958 | 209 | 114 | 52.430622 | 0.746182 | 0.446158 | 0 | 0.227642 | 0 | 0 | 0.095134 | 0.013639 | 0 | 0 | 0 | 0 | 0.056911 | 1 | 0.04065 | false | 0 | 0.01626 | 0 | 0.081301 | 0.113821 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
948c6619b9b82445ad34ff17a5f8f5eb8c8c2d63 | 1,609 | py | Python | code9/Magic Methods.py | python3-7/python- | 73fa1cf161c5e985da062c50907e03399f79ee2d | [
"MIT"
] | null | null | null | code9/Magic Methods.py | python3-7/python- | 73fa1cf161c5e985da062c50907e03399f79ee2d | [
"MIT"
] | null | null | null | code9/Magic Methods.py | python3-7/python- | 73fa1cf161c5e985da062c50907e03399f79ee2d | [
"MIT"
] | null | null | null | from win32com import client as wc
import os, time
class Pdf_docx_md(object):
def load_files(self, path):
'''
加载所需文件,返回文件名(All)
'''
os.chdir(path)
dir = os.getcwd()
files = os.listdir(dir)
for file in files:
yield file
def do_exchange(self, file, file_path, change_path):
'''
文件转换(pdf2docx2md)
'''
word = wc.Dispatch('Word.Application')
if file[-4:] == '.pdf':
# pdf转docx
doc = word.Documents.Open(file_path + file)
doc.SaveAs('%s\\%s.docx' % (change_path, 'pdf2docx'), 16)
doc.Close()
time.sleep(5)
command = 'C:/pandoc.exe -s {PATH}\\pdf2docx.docx -o {PATH}\\docx2md.md'
command = command.format(PATH=change_path)
os.popen(command, 'r')
def find_mass(self, change_path):
'''
内容解析(业务需求函数)
'''
total_path = change_path + '\\docx2md.md'
# 解析md文件
str_html = open(total_path, 'rb')
return str_html
def main(self, file_path, change_path):
files = self.load_files(file_path)
for file in files:
print('正在转换:',file)
self.do_exchange(file, file_path, change_path)
print('转换成功!')
self.find_mass(change_path)
if __name__ == '__main__':
pdf_docx_md = Pdf_docx_md()
file_path = 'D:\\Users\\23292\\Documents\\pdf\\课堂pdf\\python\\面向对象'
change_path = 'D:\\Users\\23292\\Documents\\pdf\\课堂pdf\\python\\md'
pdf_docx_md.main(file_path, change_path)
| 27.741379 | 84 | 0.550653 | 198 | 1,609 | 4.257576 | 0.40404 | 0.118624 | 0.099644 | 0.085409 | 0.142349 | 0.090154 | 0.090154 | 0.090154 | 0 | 0 | 0 | 0.019856 | 0.311374 | 1,609 | 57 | 85 | 28.22807 | 0.740975 | 0.040398 | 0 | 0.057143 | 0 | 0.028571 | 0.161533 | 0.085558 | 0 | 0 | 0 | 0 | 0 | 1 | 0.114286 | false | 0 | 0.057143 | 0 | 0.228571 | 0.057143 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
948d70917a8ff7f0374b0985e3aa182bab5e84b4 | 649 | py | Python | hackerrank/set_operations.py | benhunter/py-stuff | a04f94851370e08a65792a53a6207f3146eb130b | [
"MIT"
] | 3 | 2017-05-22T03:14:21.000Z | 2019-05-24T11:44:15.000Z | hackerrank/set_operations.py | benhunter/py-stuff | a04f94851370e08a65792a53a6207f3146eb130b | [
"MIT"
] | null | null | null | hackerrank/set_operations.py | benhunter/py-stuff | a04f94851370e08a65792a53a6207f3146eb130b | [
"MIT"
] | null | null | null | # https://www.hackerrank.com/domains/python?filters%5Bsubdomains%5D%5B%5D=py-sets
# Set Union
input()
first = set(map(int, input().split()))
input()
second = set(map(int, input().split()))
print(len(first.union(second)))
# Set Intersection
input()
first = set(input().split())
input()
second = set(input().split())
print(len(first.intersection(second)))
# Set Difference
input()
english = set(input().split())
input()
french = set(input().split())
print(len(english.difference(french)))
# Set Symmetric Difference
input()
english = set(input().split())
input()
french = set(input().split())
print(len(english.symmetric_difference(french)))
| 18.542857 | 81 | 0.699538 | 87 | 649 | 5.206897 | 0.310345 | 0.1766 | 0.172185 | 0.15894 | 0.567329 | 0.326711 | 0.326711 | 0.326711 | 0.326711 | 0.326711 | 0 | 0.006814 | 0.095532 | 649 | 34 | 82 | 19.088235 | 0.764906 | 0.224961 | 0 | 0.6 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.2 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
846725d310463621e9c429dbf9bb3ad4bef3a529 | 1,146 | py | Python | parse_api/nlp/entity_extraction.py | dennysem/seq2sql_api | a6922fba53f684848578c6da104e312e5b29299f | [
"MIT"
] | null | null | null | parse_api/nlp/entity_extraction.py | dennysem/seq2sql_api | a6922fba53f684848578c6da104e312e5b29299f | [
"MIT"
] | null | null | null | parse_api/nlp/entity_extraction.py | dennysem/seq2sql_api | a6922fba53f684848578c6da104e312e5b29299f | [
"MIT"
] | null | null | null | class EntityExtractor:
def __init__(self, data_source):
self.data_source = data_source
def extract(self, query):
query = query.lower()
aggregation = self._extract_aggregation(query)
dimensions = self._extract_dimensions(query)
filters = self._extract_filters(query)
return aggregation, dimensions, filters
def _extract_dimensions(self, query):
dimensions = set()
dimension_aliases = self.data_source.dimension_aliases
for alias, dimension in dimension_aliases.items():
if alias in query:
dimensions.add(dimension)
return list(dimensions)
def _extract_aggregation(self, query):
if query == 'all apps':
return 'all'
elif 'rating' in query:
return 'mean'
else:
return 'count'
def _extract_filters(self, query):
filters = set()
filter_aliases = self.data_source.filter_aliases
for alias, dimension in filter_aliases.items():
if alias in query:
filters.add((dimension, alias))
return list(filters)
| 31.833333 | 62 | 0.620419 | 121 | 1,146 | 5.652893 | 0.264463 | 0.073099 | 0.081871 | 0.061404 | 0.152047 | 0.076023 | 0 | 0 | 0 | 0 | 0 | 0 | 0.298429 | 1,146 | 35 | 63 | 32.742857 | 0.850746 | 0 | 0 | 0.066667 | 0 | 0 | 0.022688 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.166667 | false | 0 | 0 | 0 | 0.4 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8468ee72e6d83b1a43c0ba9a4f645f8b97bb8a4a | 5,938 | py | Python | WebMain.py | q792602257/XiguaLiveDanmakuHelper | b1f45ee90dc31ada27ce17eb6b062735d358d608 | [
"WTFPL"
] | 5 | 2019-05-10T04:16:57.000Z | 2021-06-12T03:43:11.000Z | WebMain.py | q792602257/XiguaLiveDanmakuHelper | b1f45ee90dc31ada27ce17eb6b062735d358d608 | [
"WTFPL"
] | null | null | null | WebMain.py | q792602257/XiguaLiveDanmakuHelper | b1f45ee90dc31ada27ce17eb6b062735d358d608 | [
"WTFPL"
] | 4 | 2019-05-10T04:33:55.000Z | 2021-09-09T03:05:00.000Z | import os
from glob import glob
from time import sleep
from flask_cors import CORS
from flask import Flask, jsonify, request, render_template, Response, send_file
import Common
import threading
from liveDownloader import run as RUN
app = Flask(__name__)
app.config['JSON_AS_ASCII'] = False
CORS(app, supports_credentials=True)
@app.route("/")
def index():
return render_template("index.html")
@app.route("/config", methods=["GET"])
def readConfig():
config = Common.config.copy()
config.pop("b_p")
config.pop("mv")
return jsonify(config)
@app.route("/config", methods=["POST"])
def writeConfig():
# TODO : 完善
Common.appendOperation("更新配置")
Common.reloadConfig()
return jsonify({"message": "ok", "code": 200, "status": 0, "data": request.form})
@app.route("/force/not/upload", methods=["POST"])
def toggleForceNotUpload():
Common.forceNotUpload = not Common.forceNotUpload
Common.appendOperation("将强制不上传的值改为:{}".format(Common.forceNotUpload))
return jsonify({"message": "ok", "code": 200, "status": 0, "data": {
"forceNotUpload": Common.forceNotUpload,
}})
@app.route("/force/not/encode", methods=["POST"])
def toggleForceNotEncode():
Common.forceNotEncode = not Common.forceNotEncode
Common.appendOperation("将强制不编码的值改为:{}".format(Common.forceNotEncode))
return jsonify({"message": "ok", "code": 200, "status": 0, "data": {
"forceNotEncode": Common.forceNotEncode,
}})
@app.route("/force/not/download", methods=["POST"])
def toggleForceNotDownload():
Common.forceNotDownload = not Common.forceNotDownload
Common.appendOperation("将强制不下载的值改为:{}".format(Common.forceNotDownload))
return jsonify({"message": "ok", "code": 200, "status": 0, "data": {
"forceNotDownload": Common.forceNotDownload,
}})
@app.route("/force/not/broadcast", methods=["POST"])
def toggleForceNotBroadcast():
Common.forceNotBroadcasting = not Common.forceNotBroadcasting
return jsonify({"message": "ok", "code": 200, "status": 0, "data": {
"forceNotBroadcasting": Common.forceNotBroadcasting,
}})
@app.route("/force/start/encode", methods=["POST"])
def toggleForceStartEncodeThread():
Common.forceStartEncodeThread = True
Common.appendOperation("强制运行编码线程")
return jsonify({"message": "ok", "code": 200, "status": 0, "data": {
}})
@app.route("/force/start/upload", methods=["POST"])
def toggleForceStartUploadThread():
Common.forceStartUploadThread = True
Common.appendOperation("强制运行上传线程")
return jsonify({"message": "ok", "code": 200, "status": 0, "data": {
}})
@app.route("/force/start/clean", methods=["POST"])
def startForceCleanDisk():
Common.doClean(True)
Common.appendOperation("强制执行清理程序")
return jsonify({"message": "ok", "code": 200, "status": 0, "data": {
}})
@app.route("/encode/insert", methods=["POST"])
def insertEncode():
if "filename" in request.form and os.path.exists(request.form["filename"]):
Common.appendOperation("添加编码文件:{}".format(request.form["filename"]))
Common.encodeQueue.put(request.form["filename"])
return jsonify({"message": "ok", "code": 200, "status": 0})
else:
return jsonify({"message": "no filename specific", "code": 400, "status": 1})
@app.route("/upload/insert", methods=["POST"])
def insertUpload():
if "filename" in request.form and os.path.exists(request.form["filename"]):
Common.appendOperation("添加上传文件:{}".format(request.form["filename"]))
Common.uploadQueue.put(request.form["filename"])
return jsonify({"message": "ok", "code": 200, "status": 0})
else:
return jsonify({"message": "no filename specific", "code": 400, "status": 1})
@app.route("/upload/finish", methods=["POST"])
def finishUpload():
Common.appendOperation("设置当前已完成上传")
Common.uploadQueue.put(True)
return jsonify({"message": "ok", "code": 200, "status": 0})
@app.route("/stats", methods=["GET"])
def getAllStats():
return jsonify({"message": "ok", "code": 200, "status": 0, "data": Common.collectInfomation()})
@app.route("/stats/device", methods=["GET"])
def getDeviceStatus():
return jsonify({"message": "ok", "code": 200, "status": 0, "data": {
"status": Common.getCurrentStatus(),
}})
@app.route("/stats/config", methods=["GET"])
def getConfigStats():
return jsonify({"message": "ok", "code": 200, "status": 0, "data": {
"config": {
"forceNotBroadcasting": Common.forceNotBroadcasting,
"forceNotDownload": Common.forceNotDownload,
"forceNotUpload": Common.forceNotUpload,
"forceNotEncode": Common.forceNotEncode,
"downloadOnly": Common.config['dlO'],
}
}})
@app.route("/account/reLogin", methods=["POST"])
def accountRelogin():
res = Common.loginBilibili(True)
return jsonify({"message": "ok", "code": 200, "status": 0, "data": {"result": res}})
@app.route("/files/", methods=["GET"])
def fileIndex():
a = []
for i in (glob("*.mp4") + glob("*.flv")):
a.append({
"name": i,
"size": Common.parseSize(os.path.getsize(i))
})
return render_template("files.html", files=a)
@app.route("/files/download/<path>", methods=["GET"])
def fileDownload(path):
if not (".mp4" in path or ".flv" in path):
return Response(status=404)
if os.path.exists(path):
return send_file(path, as_attachment=True)
else:
return Response(status=404)
def SubThread():
t = threading.Thread(target=RUN, args=())
t.setDaemon(True)
t.start()
while True:
if t.is_alive():
sleep(240)
else:
t = threading.Thread(target=RUN, args=())
t.setDaemon(True)
t.start()
if not app.debug:
p = threading.Thread(target=SubThread)
p.setDaemon(True)
p.start()
if __name__ == "__main__":
app.run()
| 30.608247 | 99 | 0.640957 | 646 | 5,938 | 5.854489 | 0.232198 | 0.04019 | 0.0899 | 0.087255 | 0.301428 | 0.285034 | 0.285034 | 0.285034 | 0.285034 | 0.178213 | 0 | 0.016132 | 0.175312 | 5,938 | 193 | 100 | 30.766839 | 0.756177 | 0.001516 | 0 | 0.304054 | 0 | 0 | 0.190147 | 0.003712 | 0 | 0 | 0 | 0.005181 | 0 | 1 | 0.135135 | false | 0 | 0.054054 | 0.027027 | 0.344595 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8469fabb25cbc09922ff10f7b4374ba8210bc37e | 512 | py | Python | fun/fun/asgi.py | larryw3i/fun | e753ce6d448f7f6ec3169a4d1fa7e1c7bff70a27 | [
"MIT"
] | 1 | 2022-01-01T11:14:58.000Z | 2022-01-01T11:14:58.000Z | fun/fun/asgi.py | larryw3i/osp | d9526a179876053a6b93e5a110d2de730376f511 | [
"MIT"
] | 4 | 2021-06-12T06:05:44.000Z | 2021-06-13T06:20:00.000Z | fun/fun/asgi.py | larryw3i/osp | d9526a179876053a6b93e5a110d2de730376f511 | [
"MIT"
] | null | null | null | """
ASGI config for newv project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
settings_exists = os.path.exists(
os.path.join('.', 'fun', 'settings.py'))
os.environ.setdefault(
'DJANGO_SETTINGS_MODULE',
'fun.settings' if settings_exists else 'fun.settings_')
application = get_asgi_application()
| 24.380952 | 78 | 0.748047 | 73 | 512 | 5.123288 | 0.643836 | 0.088235 | 0.096257 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.004484 | 0.128906 | 512 | 20 | 79 | 25.6 | 0.834081 | 0.410156 | 0 | 0 | 0 | 0 | 0.210884 | 0.07483 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.25 | 0 | 0.25 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
846af959bf6097940802093edacbaf3bdcb9cb94 | 1,300 | py | Python | aim_target/predict.py | maloyan/army2021 | b54d1d9a89fd40e3cb08fb0318dfed735e051f29 | [
"MIT"
] | 2 | 2021-08-31T19:54:53.000Z | 2021-09-06T10:47:19.000Z | aim_target/predict.py | maloyan/army2021 | b54d1d9a89fd40e3cb08fb0318dfed735e051f29 | [
"MIT"
] | null | null | null | aim_target/predict.py | maloyan/army2021 | b54d1d9a89fd40e3cb08fb0318dfed735e051f29 | [
"MIT"
] | null | null | null | import json
import os
import sys
import cv2
import numpy as np
import pandas as pd
import torch
from tqdm import tqdm
from aim_target.utils import compute_accuracy
with open(sys.argv[1], "r") as f:
config = json.load(f)
model = torch.load(
f"{config['checkpoints']}/{config['image_size']}_{config['model']}.pt"
)
model.to("cuda")
model.eval()
meta_info = pd.read_csv(config["data_csv"])
meta_info = meta_info[meta_info["split"] == "test"]
test_data = [os.path.join(config["data_path"], i) for i in meta_info.path.values]
test_target = meta_info.target.values
fin_outputs = []
with torch.no_grad():
for i in tqdm(test_data, total=len(test_data)):
image = cv2.imread(i)
image = cv2.resize(image, (config["image_size"], config["image_size"]))
image = torch.tensor(np.moveaxis(image, -1, 0), dtype=torch.float).unsqueeze(0)
image = image.to(config["device"])
outputs = model(image)
outputs = outputs.squeeze(1)
outputs = np.argmax(outputs.detach().cpu().numpy(), axis=1)
fin_outputs.extend(outputs.tolist())
compute_accuracy(test_target, fin_outputs)
# meta_info.target = fin_outputs
# meta_info.path = meta_info.path.apply(lambda x: x.split("/")[1])
# meta_info[["target", "path"]].to_csv("submission.csv", index=None)
| 29.545455 | 87 | 0.686154 | 198 | 1,300 | 4.348485 | 0.378788 | 0.092915 | 0.052265 | 0.04878 | 0.055749 | 0 | 0 | 0 | 0 | 0 | 0 | 0.009074 | 0.152308 | 1,300 | 43 | 88 | 30.232558 | 0.772232 | 0.124615 | 0 | 0 | 0 | 0 | 0.109347 | 0.059083 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.28125 | 0 | 0.28125 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
846c44254933e76179e17c4bb021953ba3b99f10 | 11,471 | py | Python | src/genomehubs/lib/_assembly_metadata.py | genomehubs/genomehubs | 5cf09ea0c13a76e6bd2dc629ee58da70ca7894f5 | [
"MIT"
] | 7 | 2017-06-08T13:44:16.000Z | 2021-01-26T07:57:53.000Z | src/genomehubs/lib/_assembly_metadata.py | genomehubs/genomehubs | 5cf09ea0c13a76e6bd2dc629ee58da70ca7894f5 | [
"MIT"
] | 76 | 2017-03-21T14:51:47.000Z | 2022-02-16T11:53:38.000Z | src/genomehubs/lib/_assembly_metadata.py | genomehubs/genomehubs | 5cf09ea0c13a76e6bd2dc629ee58da70ca7894f5 | [
"MIT"
] | 3 | 2019-04-24T10:10:54.000Z | 2021-01-26T07:57:54.000Z | #!/usr/bin/env python3
"""Assembly metadata methods."""
import sys
from collections import defaultdict
from tolkein import toinsdc
from tolkein import tolog
from .es_functions import index_stream
from .es_functions import query_keyword_value_template
from .es_functions import query_value_template
from .hub import add_attributes
from .hub import index_templator
from .taxon import add_taxonomy_info_to_meta
from .taxon import index_template as taxon_index_template
from .taxonomy import index_template as taxonomy_index_template
LOGGER = tolog.logger(__name__)
def index_template(taxonomy_name, opts):
"""Index template (includes name, mapping and types)."""
parts = ["assembly", taxonomy_name, opts["hub-name"], opts["hub-version"]]
template = index_templator(parts, opts)
return template
def parse_insdc_metadata(opts):
"""Prepare INSDC metadata for indexing."""
roots = opts.get("insdc-root", [1])
if not isinstance(roots, list):
roots = [roots]
for root in roots:
root = str(root)
count = 0
if root.startswith("GCA_"):
LOGGER.warning(
"Indexing assembly metadata GCA accession not yet implemented"
)
break
elif root.isdigit():
LOGGER.info("Indexing assembly metadata for taxid %s", root)
count = toinsdc.count_taxon_assembly_meta(root)
else:
LOGGER.warning("%s is not a valid value for `insdc-root`", root)
if count > 0:
LOGGER.info("Indexing metadata for %d assemblies", count)
assemblies = toinsdc.stream_taxon_assembly_meta(root)
for assembly in assemblies:
if assembly["genome_representation"] != "full":
continue
assembly["assembly_id"] = "%s.%s" % (
assembly["gca_accession"],
assembly["assembly_version"],
)
yield assembly
def get_list_entries_by_dict_value(values, list_of_dicts, *, key="key"):
"""Get entries from a list of dicts by key."""
entries = []
for entry in list_of_dicts:
if key in entry and entry[key] in values:
entries.append(entry)
return entries
def get_list_indices_by_dict_value(values, list_of_dicts, *, key="key"):
"""Get indices from a list of dicts by key."""
indices = []
for idx, entry in enumerate(list_of_dicts):
if key in entry and entry[key] in values:
indices.append(idx)
return indices
def add_assembly_attributes_to_taxon(
batch, asm_by_taxon_id, taxa, *, index_name, shared_attributes
):
"""Add assembly attributes to taxon."""
for taxon_id, assemblies in asm_by_taxon_id.items():
if taxon_id in taxa:
taxon_attributes = defaultdict(list)
if "attributes" not in taxa[taxon_id]:
taxa[taxon_id]["attributes"] = []
for idx in assemblies:
asm_meta = batch[idx]
attributes = get_list_entries_by_dict_value(
shared_attributes, asm_meta["attributes"]
)
for attr in attributes:
taxon_attr = {**attr}
del taxon_attr["key"]
taxon_attr["source_index"] = index_name
taxon_attr["source_doc_id"] = (
"assembly-%s" % asm_meta["assembly_id"]
)
taxon_attributes[attr["key"]].append(taxon_attr)
for key, values in taxon_attributes.items():
indices = get_list_indices_by_dict_value(
{key}, taxa[taxon_id]["attributes"]
)
if len(indices) == 1:
idx = indices[0]
# TODO: test if values are already present
taxa[taxon_id]["attributes"][idx]["values"] += values
else:
taxa[taxon_id]["attributes"].append({"key": key, "values": values})
def stream_taxa(taxa):
"""Stream dict of taxa for indexing."""
for taxon_id, value in taxa.items():
yield "taxon_id-%s" % taxon_id, value
def collate_unique_key_value_indices(key, list_of_dicts):
"""Collate indices of unique key values in a list of dicts."""
unique_key_values = set()
entry_indices_by_key_value = defaultdict(list)
for idx, entry in enumerate(list_of_dicts):
if entry[key] not in unique_key_values:
unique_key_values.add(entry[key])
entry_indices_by_key_value[entry[key]].append(idx)
return list(unique_key_values), entry_indices_by_key_value
def get_taxa_to_create(
es,
batch,
opts,
*,
taxonomy_name="ncbi",
taxon_ids=None,
asm_by_taxon_id=None,
template=None,
shared_attributes=None
):
"""Create a dict of taxa to create."""
taxa_to_create = {}
if not taxon_ids:
return {}
taxonomy_template = taxonomy_index_template(taxonomy_name, opts)
taxonomy_res = query_value_template(
es,
"taxonomy_node_by_taxon_id",
taxon_ids,
taxonomy_template["index_name"],
)
if taxonomy_res is None:
LOGGER.error(
"Could not connect to taxonomy index '%s'",
taxonomy_template["index_name"],
)
sys.exit(1)
ancestors = set()
for taxonomy_result in taxonomy_res["responses"]:
if taxonomy_result["hits"]["total"]["value"] == 1:
source = taxonomy_result["hits"]["hits"][0]["_source"]
taxa_to_create[source["taxon_id"]] = source
for ancestor in source["lineage"]:
ancestors.add(ancestor["taxon_id"])
for idx in asm_by_taxon_id[source["taxon_id"]]:
add_taxonomy_info_to_meta(batch[idx], source)
add_assembly_attributes_to_taxon(
batch,
asm_by_taxon_id,
taxa_to_create,
index_name=template["index_name"],
shared_attributes=shared_attributes,
)
taxonomy_res = query_value_template(
es,
"taxonomy_node_by_taxon_id",
list(ancestors),
taxonomy_template["index_name"],
)
if taxonomy_res and "responses" in taxonomy_res:
for taxonomy_result in taxonomy_res["responses"]:
if taxonomy_result["hits"]["total"]["value"] == 1:
source = taxonomy_result["hits"]["hits"][0]["_source"]
taxa_to_create[source["taxon_id"]] = source
return taxa_to_create
def preprocess_batch(es, batch, opts, *, taxonomy_name="ncbi"):
"""Preprocess a batch of assembly metadata to add/update taxonomy information."""
template = index_template(taxonomy_name, opts)
taxon_template = taxon_index_template(taxonomy_name, opts)
# TODO: find shared attributes programatically
shared_attributes = {
"assembly_span",
"host_scientific_name",
"sample_location",
"sample_sex",
}
taxon_ids, asm_by_taxon_id = collate_unique_key_value_indices("taxon_id", batch)
taxon_res = query_keyword_value_template(
es,
"attributes_by_keyword_value",
"taxon_id",
taxon_ids,
taxon_template["index_name"],
)
taxa = {}
if taxon_res is not None:
for taxon_result in taxon_res["responses"]:
if (
"error" not in taxon_result
and taxon_result["hits"]["total"]["value"] == 1
):
source = taxon_result["hits"]["hits"][0]["_source"]
taxa[source["taxon_id"]] = source
for idx in asm_by_taxon_id[source["taxon_id"]]:
add_taxonomy_info_to_meta(batch[idx], source)
taxa_to_update = {}
taxon_ids = []
for taxon_id, assemblies in asm_by_taxon_id.items():
if taxon_id in taxa:
taxa_to_update[taxon_id] = {}
if "attributes" in taxa[taxon_id]:
taxa_to_update[taxon_id]["attributes"] = taxa[taxon_id]["attributes"]
else:
taxon_ids.append(taxon_id)
add_assembly_attributes_to_taxon(
batch,
asm_by_taxon_id,
taxa_to_update,
index_name=template["index_name"],
shared_attributes=shared_attributes,
)
taxa_to_create = get_taxa_to_create(
es,
batch,
opts,
taxonomy_name=taxonomy_name,
taxon_ids=taxon_ids,
asm_by_taxon_id=asm_by_taxon_id,
template=template,
shared_attributes=shared_attributes,
)
to_create = len(taxa_to_create.keys())
to_update = len(taxa_to_update.keys())
LOGGER.info(
"%d taxa to create, %d to update",
to_create,
to_update,
)
if to_create > 0:
index_stream(es, taxon_template["index_name"], stream_taxa(taxa_to_create))
if to_update > 0:
index_stream(
es,
taxon_template["index_name"],
stream_taxa(taxa_to_update),
_op_type="update",
)
return to_create, to_update
def stream_assemblies(assemblies):
"""Stream list of assemblies for indexing."""
for asm in assemblies:
yield "assembly-%s" % asm["assembly_id"], asm
def set_top_level_meta(raw_meta):
"""Set top level assembly metadata."""
top_level = {
"assembly_id": raw_meta["assembly_id"],
"taxon_id": raw_meta["taxon_id"],
}
return top_level
def index(es, opts, *, metadata_name="insdc", taxonomy_name="ncbi"):
"""Index all assemblies descended from root."""
parsers = {"insdc": parse_insdc_metadata}
sources = {"insdc": "INSDC"}
parser = parsers.get(metadata_name.lower(), None)
if parser is None:
LOGGER.warning("No parser available for %s metadata", metadata_name)
return None
LOGGER.info("Indexing %s metadata", metadata_name)
template = index_template(taxonomy_name, opts)
stream = parser(opts)
batch = []
iteration = 1
while True:
stop = False
try:
raw_meta = next(stream)
attributes = add_attributes(
raw_meta,
template["types"]["attributes"],
attributes=[],
source=sources.get(metadata_name, metadata_name),
)
identifiers = add_attributes(
raw_meta,
template["types"]["identifiers"],
attributes=[],
source=sources.get(metadata_name, metadata_name),
attr_type="identifiers",
)
top_level = set_top_level_meta(raw_meta)
asm_meta = {
**top_level,
"attributes": attributes,
"identifiers": identifiers,
}
batch.append(asm_meta)
except StopIteration:
stop = True
if len(batch) == opts["es-batch"] or stop:
# TODO: set attributes
LOGGER.info("Processing batch %d with %d assemblies", iteration, len(batch))
created, updated = preprocess_batch(
es, batch, opts, taxonomy_name=taxonomy_name
)
LOGGER.info("Indexing %d assemblies", len(batch))
assembly_stream = stream_assemblies(batch)
index_stream(es, template["index_name"], assembly_stream)
batch = []
iteration += 1
if stop:
break
return
| 34.447447 | 88 | 0.601866 | 1,347 | 11,471 | 4.835189 | 0.126949 | 0.042991 | 0.017964 | 0.020267 | 0.363581 | 0.301244 | 0.245663 | 0.212191 | 0.196837 | 0.158606 | 0 | 0.002109 | 0.297359 | 11,471 | 332 | 89 | 34.551205 | 0.805955 | 0.058757 | 0 | 0.246479 | 0 | 0 | 0.115492 | 0.009135 | 0 | 0 | 0 | 0.003012 | 0 | 1 | 0.042254 | false | 0 | 0.042254 | 0 | 0.119718 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8470cc594037f191b3b43b6897fa69a25d35dce1 | 2,890 | py | Python | tests/test_time_res_method.py | GispoCoding/datacube-ows | 4ffe14ccf9f382bff8f4a33fc54729d3e18b670e | [
"Apache-2.0"
] | null | null | null | tests/test_time_res_method.py | GispoCoding/datacube-ows | 4ffe14ccf9f382bff8f4a33fc54729d3e18b670e | [
"Apache-2.0"
] | null | null | null | tests/test_time_res_method.py | GispoCoding/datacube-ows | 4ffe14ccf9f382bff8f4a33fc54729d3e18b670e | [
"Apache-2.0"
] | null | null | null | import pytest
from datetime import datetime
import pytz
from datacube_ows.ows_configuration import OWSProductLayer, TIMERES_RAW, TIMERES_MON, TIMERES_YR
def dummy_timeres_layer(time_res):
prod = product_layer = OWSProductLayer.__new__(OWSProductLayer)
prod.time_resolution = time_res
return prod
class Thing:
def __init__(self, **kwargs):
for k, v in kwargs.items():
setattr(self, k, v)
@pytest.fixture
def dummy_raw_layer():
return dummy_timeres_layer(TIMERES_RAW)
@pytest.fixture
def dummy_monthly_layer():
return dummy_timeres_layer(TIMERES_MON)
@pytest.fixture
def dummy_yearly_layer():
return dummy_timeres_layer(TIMERES_YR)
@pytest.fixture
def simple_geobox():
from datacube_ows.wms_utils import _get_geobox
from affine import Affine
from datacube.utils import geometry
aff = Affine.translation(145.0, -35.0) * Affine.scale(
1.0/256, 2.0/256
)
return geometry.GeoBox(256, 256, aff, 'EPSG:4326')
def test_raw_timeres(dummy_raw_layer, simple_geobox):
assert dummy_raw_layer.is_raw_time_res
assert not dummy_raw_layer.is_month_time_res
assert not dummy_raw_layer.is_year_time_res
assert dummy_raw_layer.dataset_groupby() == "solar_day"
assert dummy_raw_layer.search_times(
datetime(2020, 6, 7, 20, 20, 0, tzinfo=pytz.utc),
simple_geobox,
) == (
datetime(2020, 6, 6, 13, 55, tzinfo=pytz.utc),
datetime(2020, 6, 7, 13, 54, 59, tzinfo=pytz.utc),
)
def test_mon_timeres(dummy_monthly_layer, simple_geobox):
assert not dummy_monthly_layer.is_raw_time_res
assert dummy_monthly_layer.is_month_time_res
assert not dummy_monthly_layer.is_year_time_res
gby = dummy_monthly_layer.dataset_groupby()
assert gby.dimension == 'time'
t = Thing(begin="ABC")
ds = Thing(time=t)
assert gby.group_by_func(ds) == "ABC"
assert gby.units == 'seconds since 1970-01-01 00:00:00'
assert gby.sort_key(ds) == "ABC"
assert dummy_monthly_layer.search_times(
datetime(2020, 6, 1, tzinfo=pytz.utc),
simple_geobox,
) == (
datetime(2020, 6, 1),
datetime(2020, 6, 30),
)
def test_year_timeres(dummy_yearly_layer):
assert not dummy_yearly_layer.is_raw_time_res
assert not dummy_yearly_layer.is_month_time_res
assert dummy_yearly_layer.is_year_time_res
gby = dummy_yearly_layer.dataset_groupby()
assert gby.dimension == 'time'
t = Thing(begin="ABC")
ds = Thing(time=t)
assert gby.group_by_func(ds) == "ABC"
assert gby.units == 'seconds since 1970-01-01 00:00:00'
assert gby.sort_key(ds) == "ABC"
assert dummy_yearly_layer.search_times(
datetime(2020, 6, 1, tzinfo=pytz.utc),
simple_geobox,
) == (
datetime(2020, 1, 1),
datetime(2020, 12, 31, 23, 59, 59),
)
| 30.104167 | 96 | 0.689619 | 418 | 2,890 | 4.466507 | 0.23445 | 0.041243 | 0.048741 | 0.03428 | 0.504017 | 0.498125 | 0.371719 | 0.343867 | 0.247456 | 0.247456 | 0 | 0.058257 | 0.210035 | 2,890 | 95 | 97 | 30.421053 | 0.759527 | 0 | 0 | 0.307692 | 0 | 0 | 0.038062 | 0 | 0 | 0 | 0 | 0 | 0.269231 | 1 | 0.115385 | false | 0 | 0.089744 | 0.038462 | 0.282051 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8472be5ff4c0eb90b45ae9ec3a6f3d498c42634d | 8,846 | py | Python | ini/types.py | ClementJ18/ini_parser | 68bee79af6319cc967c3fa13e4ba7b5ef0485fa0 | [
"MIT"
] | null | null | null | ini/types.py | ClementJ18/ini_parser | 68bee79af6319cc967c3fa13e4ba7b5ef0485fa0 | [
"MIT"
] | null | null | null | ini/types.py | ClementJ18/ini_parser | 68bee79af6319cc967c3fa13e4ba7b5ef0485fa0 | [
"MIT"
] | null | null | null | from .utils import to_float
from .enums import *
import regex
import sys
from itertools import zip_longest
from collections import defaultdict
class Bool:
@classmethod
def convert(cls, parser, value):
value = parser.get_macro(value)
if isinstance(value, bool):
return value
if not value.lower() in ["yes", "no"]:
raise ValueError(f"Expected a MACRO or a BOOL but found {value}")
return value.lower() == "yes"
class Number:
operation_mapping = {
"MULTIPLY": lambda x, y: x*y,
"DIVIDE": lambda x, y: x/y
}
pattern = regex.compile(r"#(MULTIPLY|DIVIDE)\(\s*((?R)|[0-9]*)\s*((?R)|[0-9]*)\s*\)", regex.VERSION1)
@classmethod
def do_operation(cls, parser, value):
match = cls.pattern.match(value)
if match is None:
return cls.get_value(parser, value)
return cls.operation_mapping[match.group(1)](
cls.do_operation(parser, match.group(2)),
cls.do_operation(parser, match.group(3))
)
@classmethod
def convert(cls, parser, value):
return cls.get_value(parser, cls.do_operation(parser, value))
@classmethod
def get_value(cls, parser, values):
raise NotImplementedError
class Float(Number):
@classmethod
def get_value(cls, parser, value):
value = parser.get_macro(value)
try:
return to_float(value)
except ValueError:
raise ValueError(f"Expected a MACRO or FLOAT but found {value}")
class Int(Number):
@classmethod
def get_value(cls, parser, value):
value = parser.get_macro(value)
try:
return int(value)
except ValueError:
raise ValueError(f"Expected a MACRO or INT but found {value}")
class Degrees(Number):
@classmethod
def get_value(cls, parser, value):
num = Int.get_value(parser, value)
if 180 >= num >= -180:
return num
raise ValueError(f"Expected a MACRO or DEGREES but found {num}")
class Complex:
@classmethod
def convert(cls, parser, value):
values = [0, 0, 0] #x y z
for em in value.split():
ref, code = em.split(":")
values[cls.indexes.index(ref.strip())] = float(code.strip())
return values
class Coords(Complex):
indexes = ["X", "Y", "Z"]
class RGB(Complex):
indexes = ["R", "G", "B"]
class ContainerConverter:
@staticmethod
def get_annotation(annotation):
if isinstance(annotation, str):
return getattr(sys.modules["ini"], annotation)
return annotation
class _List(ContainerConverter):
def __init__(self, element_type=None, index=None):
self.element_type = element_type
self.index = index
def convert(self, parser, value):
if isinstance(value, str):
value = value.split()
annotation = self.get_annotation(self.element_type)
return [annotation.convert(parser, x) for x in value[self.index:]]
def __getitem__(self, params):
if not isinstance(params, tuple):
params = (params, 0)
return self.__class__(params[0], params[1])
List = _List()
class _Tuple(ContainerConverter):
def __init__(self, *element_types):
self.element_types = element_types
def convert(self, parser, value):
annotations = [self.get_annotation(e_type) for e_type in self.element_types]
if isinstance(value, str):
value = value.split(maxsplit=len(self.element_types) - 1)
em = []
for t, e in zip_longest(annotations, value, fillvalue=annotations[-1]):
em.append(t.convert(parser, e))
return tuple(em)
def __getitem__(self, params):
return self.__class__(*params)
Tuple = _Tuple()
class _Union(ContainerConverter):
def __init__(self, *types):
self.types = types
def convert(self, parser, value):
for t_type in self.types:
t = self.get_annotation(t_type)
try:
return t.convert(parser, value)
except:
pass
raise ValueError(f"Failed to convert to any of {self.types}")
def __getitem__(self, params):
return self.__class__(*params)
Union = _Union()
class _KeyValuePair(ContainerConverter):
def __init__(self, *values):
self.values = values
def convert(self, parser, value):
pairs = defaultdict(list)
for value_type, pair in zip_longest(self.values, value.split(), fillvalue=self.values[-1]):
key, raw_v = pair.split(":")
annotation = self.get_annotation(value_type)
if isinstance(annotation, _List):
value = self.get_annotation(annotation.element_type).convert(parser, raw_v)
pairs[key].append(value)
else:
value = annotation.convert(parser, raw_v)
pairs[key] = value
return pairs
def __getitem__(self, *params):
return self.__class__(*params)
KeyValuePair = _KeyValuePair()
class String:
"""
type : str
name : str
value : str
shortcut : Optional[str]
"""
def __init__(self, name, value):
self.type, self.name = name.rsplit(":", 1)
self.value = value
match = regex.search(r"&([A-Za-z])", value)
self.shortcut = match.group(1) if match else None
def __str__(self):
return self.value
def __repr__(self):
return f"<String {self.name}>"
@classmethod
def convert(cls, parser, value):
return parser.strings[value]
@property
def full_name(self):
return f"{self.type}:{self.name}"
def string_comparator(self, original, changed):
deleted = [x for x in original if x not in changed]
new = [x for x in changed if x not in original]
changed = [x for x in original if original.get(x).value != changed.get(x, String("NULL:NULL", "NULL")).value and changed.get(x) is not None]
return changed, deleted, new
Moment = Tuple[MomentEnum, Union["Weapon", "OCL"]]
class FilterList:
members = []
def __init__(self, values, parser):
self.descriptor = None
self.relations = []
self.inclusion = []
self.exclusion = []
for value in values.split():
if value in Descriptors.__members__:
self.descriptor = Descriptors[value]
elif value in Relations.__members__:
self.relations.append(Relations[value])
elif value.startswith(('-', '+')):
for member in self.members:
if isinstance(member, str):
member = getattr(sys.modules["ini"], member)
try:
converted = member.convert(parser, value[1:])
break
except KeyError:
pass
else:
# converted = value[1:]
raise ValueError(f"Excepted any of {self.members} but got {value[1:]}")
if value[0] == "-":
self.exclusion.append(converted)
else:
self.inclusion.append(converted)
def __repr__(self):
return f"<{self.__class__.__name__}>"
def is_in(self, obj, relation = None):
exclusions = (x == obj.name or x in obj.kindof for x in self.exclusion)
if any(exclusions):
return False
if relation not in self.relations and relation is not None:
return False
inclusions = (x == obj.name or x in obj.kindof for x in self.inclusion)
if self.descriptor == Descriptors.ALL:
return all(inclusions)
if self.descriptor == Descriptors.ANY:
return any(inclusions)
if self.descriptor == Descriptors.NONE:
return not any(inclusions)
return False
@classmethod
def convert(cls, parser, value):
return cls(value, parser)
class ObjectFilter(FilterList):
members = [KindOf, "Object"]
class DeathTypeFilter(FilterList):
members = [DeathType]
class DamageTypeFilter(FilterList):
members = [DamageType]
| 30.191126 | 144 | 0.553697 | 970 | 8,846 | 4.903093 | 0.174227 | 0.041632 | 0.026493 | 0.025231 | 0.301514 | 0.226451 | 0.165265 | 0.128469 | 0.067283 | 0.067283 | 0 | 0.004987 | 0.342641 | 8,846 | 292 | 145 | 30.294521 | 0.812898 | 0.009835 | 0 | 0.232227 | 0 | 0.004739 | 0.053555 | 0.012271 | 0 | 0 | 0 | 0 | 0 | 1 | 0.146919 | false | 0.009479 | 0.028436 | 0.047393 | 0.450237 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8473a16363e9a185f76eb2bc8c324f1926385713 | 8,299 | py | Python | ina233.py | scottvr/pi-ina233 | 906d4349fffbe06a14c9fd9bf9dc2f32c50ccdc2 | [
"MIT"
] | 1 | 2021-02-21T09:18:47.000Z | 2021-02-21T09:18:47.000Z | ina233.py | scottvr/pi-ina233 | 906d4349fffbe06a14c9fd9bf9dc2f32c50ccdc2 | [
"MIT"
] | null | null | null | ina233.py | scottvr/pi-ina233 | 906d4349fffbe06a14c9fd9bf9dc2f32c50ccdc2 | [
"MIT"
] | 1 | 2021-07-27T06:08:34.000Z | 2021-07-27T06:08:34.000Z | """ This library supports the TI INA233 current and power monitor
with a Raspberry PI using SMBus/I2C.
By scottvr for 1SimplePhone.com
"""
from smbus2 import SMBus
class INA233:
CLEAR_FAULTS = 0x03
RESTORE_DEFAULT_ALL = 0x12
CAPABILITY = 0x19
IOUT_OC_WARN_LIMIT = 0x4A
VIN_OV_WARN_LIMIT = 0x57
VIN_UV_WARN_LIMIT = 0x58
PIN_OP_WARN_LIMIT = 0x6B
STATUS_BYTE = 0x78
STATUS_WORD = 0x79
STATUS_IOUT = 0x79
STATUS_IOUT = 0x7B
STATUS_INPUT = 0x7C
STATUS_CML = 0x7E
STATUS_MFR_SPECIFIC = 0x80
READ_EIN = 0x86
READ_VIN = 0x88
READ_IIN = 0x89
READ_VOUT = 0x8B
READ_IOUT = 0x8C
READ_POUT = 0x96
READ_PIN = 0x97
MFR_ID = 0x99
MFR_MODEL = 0x9A
MFR_REVISION = 0x9B
MFR_ADC_CONFIG = 0xD0
MFR_READ_VSHUNT = 0xD1
MFR_ALERT_MASK = 0xD2
MFR_CALIBRATION = 0xD4
MFR_DEVICE_CONFIG = 0xD5
CLEAR_EIN = 0xD6
TI_MFR_ID = 0xE0
TI_MFR_MODEL = 0xE1
TI_MFR_REVISION = 0xE2
# from table 1 p 17 of INA233 documentation pdf
#SHUNT VOLTAGE TELEMETRY & WARNING COEFFICIENTS
_m_vs = 4
_R_vs = 5
_b_vs = 0
_m_c =0
_R_c = 0
_m_p = 0
_R_p = 0
#BUS VOLTAGE TELEMETRY & WARNING COEFFICIENTS
_R_vb = 2
_b_vb = 0
_m_vb = 8
# CURRENT & POWER CONSTANT TELEMETRY & WARNING COEFFICIENTS
_b_c = 0
_b_p = 0
_BUS_MILLIVOLTS_LSB = 0.00125
_SHUNT_MILLIVOLTS_LSB = 0.0000025
_accumulator_24 = 0
_sample_count = 0
def __init__(self, bus, address):
self._bus = SMBus(bus)
self._address = address
def calibrate(self, R_shunt, I_max):
""" Calibration and scaling values per section 7.5.2
of TI INA233 datasheet
"""
self._R_shunt = R_shunt
self._I_max = I_max
self._Current_LSB = 0
self._Power_LSB = 0
self._CAL = 0
tmp = 0
round_done = False
ERROR = 0
self._Current_LSB=self._I_max/(pow(2,15))
self._Power_LSB=25*self._Current_LSB
self._CAL=0.00512/(self._R_shunt*self._Current_LSB)
#check if CAL is in the uint16 range
if self._CAL>0xFFFF:
ERROR=1
else:
self._bus.write_word_data(self._address,self.MFR_CALIBRATION,int(self._CAL))
self._m_c=1/self._Current_LSB
self._m_p=1/self._Power_LSB
#Calculate m and R for maximum accuracy in current measurement
tmp=int(self._m_c)
while ((tmp > 32768) or (tmp < -32768)):
self._m_c=self._m_c/10
self._R_c = self._R_c + 1
tmp=int(self._m_c)
while round_done==False:
tmp=int(self._m_c)
if tmp==self._m_c:
round_done=True
else:
tmp=int(self._m_c*10) #shift decimal to the right
if ((tmp>32768) or (tmp<-32768)): #m_c is out of int16 (-32768 to 32768)
round_done=True
else:
self._m_c=self._m_c*10
self._R_c = self._R_c - 1
round_done=False
#Calculate m and R for maximum accuracy in power measurement
tmp = int(self._m_p)
while tmp>32768 or tmp<-32768:
self._m_p=self._m_p/10
self._R_p = self._R_p + 1
tmp = int(self._m_p)
while round_done == False:
tmp = int(self._m_p)
if tmp==self._m_p:
round_done=True
else:
tmp = int(self._m_p*10) #shift decimal to the right
if tmp>32768 or tmp<-32768: #m_p is out of int16 (-32768 to 32768)
round_done=True
else:
self._m_p = self._m_p*10
self._R_p = self._R_p - 1
self._m_c = int(self._m_c)
self._m_p = int(self._m_p)
def _getBusVoltageIn_raw(self):
raw_read = self._bus.read_word_data(self._address, self.READ_VIN)
return int(raw_read)
def _getBusVoltageOut_raw(self):
raw_read = self._bus.read_word_data(self._address, self.READ_VOUT)
return int(raw_read)
def _getShuntVoltage_raw(self):
raw_read = self._bus.read_word_data(self._address, self.MFR_READ_VSHUNT)
return int(raw_read)
def _getCurrentOut_raw(self):
raw_read = self._bus.read_i2c_block_data(self._address, self.READ_IOUT, 2)
return raw_read[0] * 256 + raw_read[1]
def _getCurrentIn_raw(self):
raw_read = self._bus.read_i2c_block_data(self._address, self.READ_IIN, 2)
return raw_read[0] * 256 + raw_read[1]
def getShuntVoltage_mV(self):
raw_read = self._getShuntVoltage_raw()
#return ((raw_read*pow(10,-self._R_vs)-self._b_vs)/self._m_vs)
return raw_read * self._SHUNT_MILLIVOLTS_LSB * 1000
def getBusVoltageIn_V(self):
raw_read = self._getBusVoltageIn_raw()
#return ((raw_read*pow(10,-self._R_vb)-self._b_vb)/self._m_vb)
return raw_read * self._BUS_MILLIVOLTS_LSB
def getBusVoltageOut_V(self):
raw_read = self._getBusVoltageOut_raw()
#return ((raw_read*pow(10,-self._R_vb)-self._b_vb)/self._m_vb)
return raw_read * self._BUS_MILLIVOLTS_LSB
def getCurrentIn_mA(self):
word_rdata=self._getCurrentIn_raw()
current_twos_compliment = word_rdata
current_sign_bit = current_twos_compliment >> 15
if(current_sign_bit == 1):
current = float(self._twos_compliment_to_int(current_twos_compliment, 16)) * self._Current_LSB
else:
#current =(value*(pow(10,-self._R_c))-self._b_c)/self._m_c
current = float(current_twos_compliment) * self._Current_LSB
return current
def getCurrentOut_mA(self):
word_rdata=self._getCurrentOut_raw()
current_twos_compliment = word_rdata
current_sign_bit = current_twos_compliment >> 15
if(current_sign_bit == 1):
current = float(self._twos_compliment_to_int(current_twos_compliment, 16)) * self._Current_LSB
else:
#current =(value*(pow(10,-self._R_c))-self._b_c)/self._m_c
current = float(current_twos_compliment) * self._Current_LSB
return current
def _getPower_raw(self):
raw_read = self._bus.read_word_data(self._address, self.READ_PIN)
return int(raw_read)
def _getEnergy_raw(self):
raw_read = self._bus.read_i2c_block_data(self._address,self.READ_EIN,6)
self._accumulator=(raw_read[0] << 8) | raw_read[1]
self._roll_over=raw_read[2]
self._sample_count=raw_read[5]<< 16
self._sample_count=(raw_read[4]<< 8) | self._sample_count
self._sample_count=(raw_read[3] | self._sample_count)
def getAv_Power_mW(self):
raw_av_power=0
av_power=0
# prev_accumulator_24 = self._accumulator_24
# prev_sample_count = self._sample_count
self._getEnergy_raw()
#Total Accumulated Unscaled Power (Accumulator_24) = (rollover_count × 2^16) + Accumulator
self._accumulator_24=int(self._roll_over)*65536+int(self._accumulator)
# raw_av_power=(self._accumulator_24-prev_accumulator_24)/(self._sample_count-prev_sample_count)
# doing it this way may be less accurate, but it avoids the divide by zero in the first reading
raw_av_power=(self._accumulator_24)/(self._sample_count)
#av_power=(raw_av_power*pow(10,-self._R_p)-self._b_p)/self._m_p
av_power = raw_av_power * self._Power_LSB
return av_power * 1000
def getPower_mW(self):
raw_read=self._getPower_raw()
#power =(raw_read*pow(10,-self._R_p)-self._b_p)/self._m_p
power = raw_read * self._Power_LSB
return power
def _twos_compliment_to_int(self, val, bits):
if (val & (1 << (bits - 1))) != 0:
val = val - (1 << bits)
return val
| 35.314894 | 106 | 0.599108 | 1,152 | 8,299 | 3.913194 | 0.198785 | 0.051242 | 0.036602 | 0.036602 | 0.486025 | 0.405501 | 0.374889 | 0.374889 | 0.318101 | 0.318101 | 0 | 0.056239 | 0.314375 | 8,299 | 234 | 107 | 35.465812 | 0.735852 | 0.177732 | 0 | 0.234286 | 0 | 0 | 0 | 0 | 0 | 0 | 0.020381 | 0 | 0 | 1 | 0.097143 | false | 0 | 0.005714 | 0 | 0.468571 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8474415733d26503f0e0b41a1a2b80c15eaec5b3 | 530 | py | Python | but/trades/serializers/api_sell_base.py | yevgnenll/but | 2cb3d7b8fd4b898440f9a74ee4b6b8fbdff32bb1 | [
"MIT"
] | 4 | 2017-02-25T04:46:41.000Z | 2021-03-16T21:41:51.000Z | but/trades/serializers/api_sell_base.py | yevgnenll/but | 2cb3d7b8fd4b898440f9a74ee4b6b8fbdff32bb1 | [
"MIT"
] | 18 | 2016-04-09T07:29:33.000Z | 2017-04-06T04:39:54.000Z | but/trades/serializers/api_sell_base.py | yevgnenll/but | 2cb3d7b8fd4b898440f9a74ee4b6b8fbdff32bb1 | [
"MIT"
] | null | null | null | from rest_framework import serializers
from trades.models import Sell
class SellBaseModelSerializer(serializers.ModelSerializer):
username = serializers.CharField(
source="user.username",
)
class Meta:
model = Sell
fields = (
'pk',
'title',
'goods_name',
'username',
'sub_title',
'stock',
'sold_count',
'price',
'welcome_image',
)
| 19.62963 | 59 | 0.475472 | 38 | 530 | 6.5 | 0.763158 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.437736 | 530 | 26 | 60 | 20.384615 | 0.828859 | 0 | 0 | 0 | 0 | 0 | 0.150943 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.105263 | 0 | 0.263158 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8478a40f6283841cbeef90271865092b9e8ed207 | 3,484 | py | Python | bootstrap.py | ptweir/pyresampling | 7d81e2b3b2af57e43e24dddd2e04918f73f9f290 | [
"MIT"
] | 4 | 2015-11-18T18:27:38.000Z | 2017-01-18T18:20:22.000Z | bootstrap.py | ptweir/pyresampling | 7d81e2b3b2af57e43e24dddd2e04918f73f9f290 | [
"MIT"
] | null | null | null | bootstrap.py | ptweir/pyresampling | 7d81e2b3b2af57e43e24dddd2e04918f73f9f290 | [
"MIT"
] | null | null | null | import numpy as np
import collections
try:
from scipy.stats import scoreatpercentile
except:
# in case no scipy
scoreatpercentile = False
def _confidence_interval_1d(A, alpha=.05, metric=np.mean, numResamples=10000, interpolate=True):
"""Calculates bootstrap confidence interval along one dimensional array"""
if not isinstance(alpha, collections.Iterable):
alpha = np.array([alpha])
N = len(A)
resampleInds = np.random.randint(0, N, (numResamples,N))
metricOfResampled = metric(A[resampleInds], axis=-1)
confidenceInterval = np.zeros(2*len(alpha),dtype='float')
if interpolate:
for thisAlphaInd, thisAlpha in enumerate(alpha):
confidenceInterval[2*thisAlphaInd] = scoreatpercentile(metricOfResampled, thisAlpha*100/2.0)
confidenceInterval[2*thisAlphaInd+1] = scoreatpercentile(metricOfResampled, 100-thisAlpha*100/2.0)
else:
sortedMetricOfResampled = np.sort(metricOfResampled)
for thisAlphaInd, thisAlpha in enumerate(alpha):
confidenceInterval[2*thisAlphaInd] = sortedMetricOfResampled[int(round(thisAlpha*numResamples/2.0))]
confidenceInterval[2*thisAlphaInd+1] = sortedMetricOfResampled[int(round(numResamples - thisAlpha*numResamples/2.0))]
return confidenceInterval
def _ma_confidence_interval_1d(A, alpha=.05, metric=np.mean, numResamples=10000, interpolate=True):
A = np.ma.masked_invalid(A, copy=True)
A = A.compressed()
confidenceInterval = _confidence_interval_1d(A, alpha, metric, numResamples, interpolate)
return confidenceInterval
def confidence_interval(A, axis=None, alpha=.05, metric=np.mean, numResamples=10000, interpolate=True):
"""Return the bootstrap confidence interval of an array or along an axis ignoring NaNs and masked elements.
Parameters
----------
A : array_like
Array containing numbers whose confidence interval is desired.
axis : int, optional
Axis along which the confidence interval is computed.
The default is to compute the confidence interval of the flattened array.
alpha: float or array, optional
confidence level of confidence interval. 100.0*(1-alpha) percent confidence interval will be returned.
If length-n array, n confidence intervals will be computed
The default is .05
metric : numpy function, optional
metric to calculate confidence interval for.
The default is numpy.mean
numResamples : int, optional
number of bootstrap samples. The default is 10000.
interpolate: bool, optional
uses scipy.stats.scoreatpercentile to interpolate between bootstrap samples if alpha*numResamples/2.0 is not integer.
The default is True
Returns
-------
confidenceInterval : ndarray
An array with the same shape as `A`, with the specified axis replaced by one twice the length of the alpha
If `A` is a 0-d array, or if axis is None, a length-2 ndarray is returned.
"""
if interpolate is True and scoreatpercentile is False:
print("need scipy to interpolate between values")
interpolate = False
A = A.copy()
if axis is None:
A = A.ravel()
outA = _ma_confidence_interval_1d(A, alpha, metric, numResamples, interpolate)
else:
outA = np.apply_along_axis(_ma_confidence_interval_1d, axis, A, alpha, metric, numResamples, interpolate)
return outA
| 43.55 | 129 | 0.705511 | 424 | 3,484 | 5.742925 | 0.287736 | 0.103491 | 0.041068 | 0.034497 | 0.243121 | 0.232444 | 0.183573 | 0.183573 | 0.138398 | 0.059138 | 0 | 0.024845 | 0.214409 | 3,484 | 79 | 130 | 44.101266 | 0.864815 | 0.367968 | 0 | 0.153846 | 0 | 0 | 0.021614 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.076923 | false | 0 | 0.076923 | 0 | 0.230769 | 0.025641 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
84791502a9e42427caeb5f730a6711482d65741e | 806 | py | Python | suasor/suasor/auxilium.py | campovski/suasor | 11bd745b8e023d9cf79ce23bec992e23a6427613 | [
"MIT"
] | null | null | null | suasor/suasor/auxilium.py | campovski/suasor | 11bd745b8e023d9cf79ce23bec992e23a6427613 | [
"MIT"
] | 14 | 2018-04-09T15:51:18.000Z | 2018-04-16T13:44:22.000Z | suasor/suasor/auxilium.py | campovski/suasor | 11bd745b8e023d9cf79ce23bec992e23a6427613 | [
"MIT"
] | null | null | null | import datetime
import os
from .models import Log, LogType
from .settings import DEBUG, DIR_DATA, DIR_DATA_DEBUG, DIR_DATA_IMAGES, DIR_DATA_PEOPLE, DIR_DATA_LOG
"""
Writes information about error that occured (like name or image could not be extracted).
@param at: The name of function that this error occured in
@param desc: Description of error
"""
def _log(log_type, package, at, desc):
log_time = datetime.datetime.now()
if DEBUG:
with open(os.path.join(DIR_DATA_LOG, '{}.log'.format(package)), 'a') as f:
f.write('[{0}] @ {1} @ {2}: {3}\n'.format(log_type, log_time, at, desc))
log = Log()
log.at_time = log_time
log.type = LogType.objects.get(name=log_type)
log.in_package = package
log.in_function = at
log.description = desc
log.save()
| 32.24 | 101 | 0.684864 | 127 | 806 | 4.181102 | 0.464567 | 0.079096 | 0.045198 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.006154 | 0.193548 | 806 | 24 | 102 | 33.583333 | 0.810769 | 0 | 0 | 0 | 0 | 0 | 0.050489 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.0625 | false | 0 | 0.25 | 0 | 0.3125 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
847cbacea93949610c39623fec080a8c96e35146 | 2,091 | py | Python | twitchchat_display/main.py | shughes-uk/twitchchat_display | b81157ead7007703f4f795ea4c0d4f1cd3d553ce | [
"MIT"
] | 1 | 2015-12-17T08:12:14.000Z | 2015-12-17T08:12:14.000Z | twitchchat_display/main.py | shughes-uk/twitchchat_display | b81157ead7007703f4f795ea4c0d4f1cd3d553ce | [
"MIT"
] | null | null | null | twitchchat_display/main.py | shughes-uk/twitchchat_display | b81157ead7007703f4f795ea4c0d4f1cd3d553ce | [
"MIT"
] | 2 | 2015-09-22T00:46:44.000Z | 2015-11-04T01:29:15.000Z | #!/usr/bin/env python
# This Python file uses the following encoding: utf-8
import logging.config
import signal
import sys
import time
from pathlib import Path
import click
import pygame
from twitchchat import twitch_chat
from .config import get_config, logging_config
from .display import TwitchChatDisplay
logger = logging.getLogger("twitch_monitor")
def signal_term_handler(signal, frame):
logger.critical("Sigterm recieved, exiting")
sys.exit(0)
@click.command()
@click.option("-v", "--verbosity", count=True)
@click.option(
"-c",
"--config",
"config_fp",
default="config.yaml",
type=click.Path(exists=True, dir_okay=False, readable=True, writable=True),
)
def main(verbosity, config_fp):
logging.config.dictConfig(logging_config(verbosity))
config_fp = Path(config_fp)
logger.info(f"Loading {config_fp}")
config = get_config(config_fp)
signal.signal(signal.SIGTERM, signal_term_handler)
try:
logger.info("Loading TwitchChatDisplay")
console = TwitchChatDisplay(
config["screen_width"], config["screen_height"], config["client_id"]
)
console.display_message("Loading twitch_api manager")
console.display_message("Loading twitch_message handler")
tirc = twitch_chat(
config["twitch_username"],
config["twitch_oauth"],
config["twitch_channels"],
config["client_id"],
)
tirc.subscribeChatMessage(console.new_twitchmessage)
if "ignored_users" in config:
for user in config["ignored_users"]:
console.ignore_user(user)
try:
logger.info("Loaded TwitchChatDisplay")
console.display_message("Loading complete, awaiting messages")
console.start()
tirc.start()
while True:
time.sleep(0.1)
if pygame.display.get_init():
pygame.event.pump()
finally:
console.stop()
finally:
pygame.quit()
if __name__ == "__main__":
main()
| 28.256757 | 80 | 0.645146 | 233 | 2,091 | 5.60515 | 0.44206 | 0.036753 | 0.048239 | 0.064319 | 0.052067 | 0 | 0 | 0 | 0 | 0 | 0 | 0.002536 | 0.245815 | 2,091 | 73 | 81 | 28.643836 | 0.825618 | 0.034433 | 0 | 0.065574 | 0 | 0 | 0.178483 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.032787 | false | 0 | 0.163934 | 0 | 0.196721 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
847e369447b3cdbbad166cace6b991fbfe990141 | 19,440 | py | Python | confclass.py | eladn/confclass | 5008d1ee5a18b02135a4a981b4eeab7ce9a62ab6 | [
"Apache-2.0"
] | null | null | null | confclass.py | eladn/confclass | 5008d1ee5a18b02135a4a981b4eeab7ce9a62ab6 | [
"Apache-2.0"
] | null | null | null | confclass.py | eladn/confclass | 5008d1ee5a18b02135a4a981b4eeab7ce9a62ab6 | [
"Apache-2.0"
] | null | null | null | import typing
from typing import Optional, Callable, Iterable, Iterator, Dict, Union
import dataclasses
import argparse
from functools import partial
import yaml
import os
__all__ = [
'confclass',
'confparam'
]
# A sentinel object to detect if a parameter is supplied or not.
# Use an empty class to give it a unique representation.
class _UNSET_TYPE:
def __copy__(self):
return self
def __deepcopy__(self, memodict={}):
return self
_UNSET = _UNSET_TYPE()
class _CONFCLASS_MARK_TYPE:
def __copy__(self):
return self
def __deepcopy__(self, memodict={}):
return self
_CONFCLASS_MARK = _CONFCLASS_MARK_TYPE()
class DefaultBoolean:
"""
Used to distinguish between an explicitly set boolean value and an unset default fallback boolean value.
"""
def __init__(self, val: bool = False):
self.val = val
def __bool__(self):
return self.val
@classmethod
def is_default(cls, val):
return isinstance(val, DefaultBoolean)
def _add_arg_prefix_to_arg_name(arg_name: str, arg_prefix: Optional[str] = None):
if arg_prefix is None or len(arg_prefix) < 1:
return arg_name
arg_name_wo_preceding_dashes = arg_name.lstrip('-')
nr_preceding_dashes = len(arg_name) - len(arg_name_wo_preceding_dashes)
preceding_dashes = "-" * nr_preceding_dashes
return f'{preceding_dashes}{arg_prefix}--{arg_name_wo_preceding_dashes}'
def _is_confclass(cls_or_instance):
return hasattr(cls_or_instance, '__is_confclass') and \
getattr(cls_or_instance, '__is_confclass') is _CONFCLASS_MARK
def _union_type_check(possible_types: Iterable[type], value: object) -> object:
for _type_candidate in possible_types:
try:
casted = _type_candidate(value)
return casted
except Exception as e:
raise argparse.ArgumentTypeError(e)
_collection_types = {list, tuple, set, frozenset} # dict, OrderedDict
def _collection_type_check(_collection_type: type, _items_types, value: object) -> object:
# TODO: fully implement and check this function.
assert _collection_type in _collection_types
return _collection_type(value)
def _typing_type_to_argparse_add_argument_kwargs(_type: type) -> Dict:
# TODO: fully implement and check this function.
kwargs = {}
if isinstance(_type, typing._GenericAlias):
if _type.__origin__ is typing.Union:
if type(None) in _type.__args__:
kwargs['required'] = False
kwargs['default'] = None
possible_types = [tp for tp in _type.__args__ if tp is not type(None)]
assert len(possible_types) > 0
if len(possible_types) == 1:
kwargs.update(_typing_type_to_argparse_add_argument_kwargs(possible_types[0]))
return kwargs
kwargs['type'] = partial(_union_type_check, possible_types)
return kwargs
if _type.__origin__ in _collection_types:
# FIXME: how does `argparse` expect to get list?
# maybe we just should set `type` to be the item type and set `nargs` to "?" or "+"?
return {'type': partial(_collection_type_check, _type.__origin__, _type.__args__)}
raise ValueError(f'Type `{_type}` is not supported by `confclass`.')
elif _type is bool:
return {'action': 'store_true'}
else:
return {'type': _type}
class ConfParam(dataclasses.Field):
description: Optional[str] = None
default_as_other_field: Optional[str] = None
default_factory_with_self_access: Optional[Callable] = None
choices: Optional[Iterable] = None
def __init__(self,
default=dataclasses.MISSING,
default_factory=dataclasses.MISSING,
init=True,
repr=True,
hash=None,
compare=True,
metadata=None,
description: Optional[str] = None,
default_as_other_field: Optional[str] = None,
default_factory_with_self_access: Optional[Callable] = None,
default_description: Optional[str] = None,
init_from_arg: Union[DefaultBoolean, bool] = DefaultBoolean(False),
arg_names: Optional[Iterable[str]] = None,
arg_prefix: Optional[str] = None,
choices: Optional[Iterable] = None):
self.description = description
self.default_as_other_field = default_as_other_field
self.default_factory_with_self_access = default_factory_with_self_access
self.default_description = default_description
self._arg_names = tuple(arg_names) if arg_names is not None else None
self.arg_prefix = arg_prefix
# Notice: In the line below it is important that `init_from_arg` is the last, so it would stay DefaultBoolean
# when everything else is False.
self.init_from_arg = bool(self._arg_names) or bool(self.arg_prefix) or init_from_arg
self.choices = list(choices) if choices is not None else None
if default_as_other_field is not None and default_factory_with_self_access is not None:
raise ValueError('Cannot set both `default_as_other_field` and `default_factory_with_self_access`.')
if default_as_other_field is not None or default_factory_with_self_access is not None:
if default is not dataclasses.MISSING or default_factory is not dataclasses.MISSING:
raise ValueError(
'Cannot set both `default` nor `default_factory` together with `default_as_other_field`'
'or with `default_factory_with_self_access`.')
# We initially set the `field.default` to an unique `_UNSET` value, which we later detect
# as the field value and re-assign a new value to this field.
default = _UNSET
super(ConfParam, self).__init__(
default=default, default_factory=default_factory, init=init,
repr=repr, hash=hash, compare=compare, metadata=metadata)
def get_arg_names(self, argname_prefix: Optional[str] = None):
arg_names = self._arg_names
if not arg_names:
arg_names = (f"--{self.name.replace('_', '-')}",)
if argname_prefix is None or len(argname_prefix) == 0:
return arg_names
return tuple(_add_arg_prefix_to_arg_name(arg_name, argname_prefix) for arg_name in arg_names)
def add_to_argparser(self, argparser: argparse.ArgumentParser, argname_prefix: Optional[str] = None):
if _is_confclass(self.type):
confclass = self.type
total_argname_prefix = None
if argname_prefix and self.arg_prefix:
total_argname_prefix = argname_prefix + '-' + self.arg_prefix
elif argname_prefix and not self.arg_prefix:
total_argname_prefix = argname_prefix
elif not argname_prefix and self.arg_prefix:
total_argname_prefix = self.arg_prefix
confclass.add_args_to_argparser(argparser, total_argname_prefix)
else:
arg_kwargs = {}
arg_names = self.get_arg_names(argname_prefix)
if self.is_arg_positional:
arg_kwargs['dest'] = self.get_arg_dest(argname_prefix)
arg_kwargs['required'] = self.is_required_as_arg
if self.description:
arg_kwargs['help'] = self.description
if self.default_description:
arg_kwargs['help'] += f' (default: {self.default_description})'
elif self.default_as_other_field is not None:
arg_kwargs['help'] += f' (default: value of `{self.default_as_other_field}`)'
elif self.default is not dataclasses.MISSING and self.default is not _UNSET:
arg_kwargs['help'] += f' (default: {self.default})'
elif self.default_factory is not dataclasses.MISSING:
arg_kwargs['help'] += f' (default: {self.default_factory()})'
if self.choices is not None:
arg_kwargs['choices'] = self.choices
# TODO: complete the rest of the possible parameters that we should pass here to `add_argument()`
# TODO: for a boolean parameter add `store_true` and `store_false` arguments.
arg_kwargs.update(_typing_type_to_argparse_add_argument_kwargs(self.type))
argparser.add_argument(
*arg_names,
**arg_kwargs)
def load_from_args(self, args: argparse.Namespace, argname_prefix: Optional[str] = None) -> object:
if _is_confclass(self.type):
confclass = self.type
total_argname_prefix = None
if argname_prefix and self.arg_prefix:
total_argname_prefix = argname_prefix + '-' + self.arg_prefix
elif argname_prefix and not self.arg_prefix:
total_argname_prefix = argname_prefix
elif not argname_prefix and self.arg_prefix:
total_argname_prefix = self.arg_prefix
return confclass.load_from_args(args, total_argname_prefix)
else:
arg_dest = self.get_arg_dest(argname_prefix)
if arg_dest in args:
return args.__getattribute__(arg_dest)
return None
def get_arg_dest(self, argname_prefix: Optional[str] = None) -> str:
return self.get_arg_names(argname_prefix)[0].strip('-').replace('-', '_')
@property
def has_default(self):
return self.default is not dataclasses.MISSING or \
self.default_factory is not dataclasses.MISSING or \
self.default_factory_with_self_access is not None or \
self.default_as_other_field is not None
@property
def is_arg_positional(self):
return all(arg_name[0] == '-' for arg_name in self.get_arg_names())
@property
def is_required_as_arg(self):
return self.init_from_arg and not self.has_default
confparam = ConfParam
def confclass(_cls,
frozen: bool = True,
init_all_from_arg_by_default: bool = True,
load_from_yaml_via_arg: bool = True):
# cls_annotations = _cls.__dict__.get('__annotations__', {})
# cls_fields = [dataclasses._get_field(_cls, name, type)
# for name, type in cls_annotations.items()]
# print('_cls annotations:', _cls.__dict__.get('__annotations__', {}))
# class _inh:
# __dict__ = _cls.__dict__
# # __class__ = _cls.__class__
# __annotations__ = _cls.__annotations__
# # a: int
# pass
# print('_cls annotations:', _cls.__dict__.get('__annotations__', {}))
# print('_inh annotations:', _inh.__dict__.get('__annotations__', {}))
# print('_cls __dict__:', _cls.__dict__)
# print('_inh __dict__:', _inh.__dict__)
# _cls = _inh
# print('_inh(_cls) annotations:', _inh.__dict__.get('__annotations__', {}))
# del _inh.__dict__['__annotations__']['a']
# _inh.__dict__['__annotations__'].update(_cls.__dict__.get('__annotations__', {}))
# object.__setattr__(_inh, '__annotations__', _cls.__dict__.get('__annotations__', {}))
# print('_inh(_cls) [after coping annotations from _cls] annotations:', _inh.__dict__.get('__annotations__', {}))
# _cls = _inh
def __set_unset_fields(_self):
for fld in dataclasses.fields(_self):
if not isinstance(fld, ConfParam):
continue
if getattr(_self, fld.name) is not _UNSET:
continue
if fld.default_factory_with_self_access is not None:
new_value = fld.default_factory_with_self_access(_self)
object.__setattr__(_self, fld.name, new_value)
elif fld.default_as_other_field is not None:
assert hasattr(_self, fld.default_as_other_field)
value = getattr(_self, fld.default_as_other_field)
assert value is not _UNSET
object.__setattr__(_self, fld.name, value)
def __verify_fields_values(_self):
pass # TODO: implement!
orig_post_init = getattr(_cls, '__post_init__', None)
def __post_init__(self):
__set_unset_fields(self)
if orig_post_init is not None:
orig_post_init(self)
__verify_fields_values(self)
setattr(_cls, '__post_init__', __post_init__)
# Create a `dataclass()` out of the _cls
# Make sure that auto created fields are from type `ConfParam` rather than `dataclasses.Field`
orig_dataclasses_field_fn = dataclasses.field
dataclasses.field = confparam
_cls = dataclasses.dataclass(_cls, frozen=frozen)
dataclasses.field = orig_dataclasses_field_fn
if init_all_from_arg_by_default:
for fld in dataclasses.fields(_cls):
if not isinstance(fld, ConfParam):
continue
if DefaultBoolean.is_default(fld.init_from_arg):
fld.init_from_arg = True
def _iter_fields_with_args(cls) -> Iterator[ConfParam]:
for fld in dataclasses.fields(cls):
if not isinstance(fld, ConfParam):
continue
if not fld.init_from_arg:
continue
yield fld
setattr(_cls, '_iter_fields_with_args', classmethod(_iter_fields_with_args))
def add_args_to_argparser(cls,
argparser: argparse.ArgumentParser,
argname_prefix: Optional[str] = None):
for fld in _iter_fields_with_args(cls):
fld.add_to_argparser(argparser, argname_prefix)
setattr(_cls, 'add_args_to_argparser', classmethod(add_args_to_argparser))
def _load_from_args(cls,
args: Optional[argparse.Namespace] = None,
argname_prefix: Optional[str] = None) -> dict:
if args is None:
argparser = argparse.ArgumentParser()
cls.add_args_to_argparser(argparser)
args = argparser.parse_args()
kwargs_to_ctor = {}
for fld in _iter_fields_with_args(cls):
value = fld.load_from_args(args, argname_prefix)
if value is not None:
kwargs_to_ctor[fld.name] = value
return kwargs_to_ctor
def load_from_args(cls,
args: Optional[argparse.Namespace] = None,
argname_prefix: Optional[str] = None):
kwargs_to_ctor = _load_from_args(cls, args, argname_prefix)
return cls(**kwargs_to_ctor)
setattr(_cls, 'load_from_args', classmethod(load_from_args))
default_hierarchy_fallback_order = frozenset({'args', 'kwargs', 'yaml'})
def factory(cls,
load_from_args: Union[argparse.Namespace, bool] = False,
load_from_yaml: Union[str, bool] = False,
argname_prefix: Optional[str] = None,
verify_confclass: bool = True,
hierarchy_fallback_order=default_hierarchy_fallback_order,
**explicit_params_to_set):
"""
Default params setting hierarchy fallback:
1. From argument
2. Explicit given as kwargs
3. From yaml
4. Default value
"""
assert set(hierarchy_fallback_order).issubset(default_hierarchy_fallback_order)
# TODO: handle differently confparams which are inner confclasses!
# they should be created iff they (or one of its inner confclasses)
# got a non-default value (from arg, explicit or yaml) for one of its params.
# TODO: allow getting dict as explicit value for an inner confclass.
# because you want to set some of its params and maybe load the rest
# from arg or from the yaml.
kwargs_to_ctor = {}
for origin in hierarchy_fallback_order:
if origin == 'yaml' and load_from_yaml:
if load_from_yaml is True:
load_from_yaml = f'{cls.__name__}.yaml'
if os.path.isfile(load_from_yaml):
with open(load_from_yaml, 'r') as yaml_file:
# TODO: handle inner confclasses
loaded_params_dict = yaml.safe_load(yaml_file)
kwargs_to_ctor.update(loaded_params_dict)
if origin == 'kwargs':
kwargs_to_ctor.update(explicit_params_to_set)
if origin == 'args' and load_from_args:
args = load_from_args if isinstance(load_from_args, argparse.Namespace) else None
kwargs_to_ctor.update(_load_from_args(cls, args, argname_prefix))
obj = cls(**kwargs_to_ctor)
if verify_confclass and hasattr(obj, '__verify_conf__'):
obj.__verify_conf__()
return obj
setattr(_cls, 'factory', classmethod(factory))
def save_to_yaml(_self,
dest_yaml_path: Optional[str] = None,
export_only_explicitly_set_params: bool = False) -> typing.NoReturn:
dict_to_export = dataclasses.asdict(_self)
if export_only_explicitly_set_params:
# TODO: fix for sub confclasses (which are dicts here and they also
# may recursively include inner confclasses)
dict_to_export = {key: val
for key, val in dict_to_export.items()
if key in _self.__explicitly_set_params__}
if dest_yaml_path is None:
dest_yaml_path = f'{_self.__class__.__name__}.yaml'
with open(dest_yaml_path, 'w') as yaml_file:
yaml.dump(dict_to_export, yaml_file, default_flow_style=False)
setattr(_cls, 'save_to_yaml', save_to_yaml)
def pprint(_self, print_fn: Callable[[str], typing.NoReturn] = print, min_param_name_col_len: int = 0):
def _longest_param_name_len(_self) -> int:
return max(max((len(param_name),
(4 + _longest_param_name_len(_self.__getattribute__(param_name)))
if _is_confclass(_self.__getattribute__(param_name)) else 0)
)
for param_name, _ in dataclasses.asdict(_self).items())
for param_name, param_val in dataclasses.asdict(_self).items():
if _is_confclass(_self.__getattribute__(param_name)):
param_val = ''
param_name_col_len = max(_longest_param_name_len(_self) + 2, min_param_name_col_len)
print_fn(f'{param_name: <{param_name_col_len}}{param_val}')
if _is_confclass(_self.__getattribute__(param_name)):
_self.__getattribute__(param_name).pprint(
print_fn=lambda x: print_fn(f' {x}'), min_param_name_col_len=param_name_col_len-4)
setattr(_cls, 'pprint', pprint)
orig_ctor = getattr(_cls, '__init__')
def __init__(_self, **kwargs):
orig_ctor(_self, **kwargs)
explicitly_set_params: typing.Set[str] = set(getattr(_self, '__explicitly_set_params__', set()))
explicitly_set_params.update((param_name for param_name in kwargs))
object.__setattr__(_self, '__explicitly_set_params__', frozenset(explicitly_set_params))
setattr(_cls, '__init__', __init__)
setattr(_cls, '__is_confclass', _CONFCLASS_MARK)
return _cls
| 43.882619 | 117 | 0.641358 | 2,362 | 19,440 | 4.832345 | 0.121084 | 0.044419 | 0.021027 | 0.023305 | 0.373927 | 0.283336 | 0.220343 | 0.160505 | 0.11004 | 0.101454 | 0 | 0.001135 | 0.274846 | 19,440 | 442 | 118 | 43.9819 | 0.808541 | 0.140278 | 0 | 0.1625 | 0 | 0 | 0.058632 | 0.026876 | 0 | 0 | 0 | 0.004525 | 0.015625 | 1 | 0.103125 | false | 0.003125 | 0.021875 | 0.0375 | 0.24375 | 0.015625 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
847fd9cb96497848a266a48adad5af45d252833f | 4,594 | py | Python | client/views.py | adwalkz/DjangoDataAnalysis | 1a888fc3a5dc34651a3b25d476e985ff39d8e305 | [
"MIT"
] | 4 | 2021-08-04T17:02:56.000Z | 2021-09-30T07:07:04.000Z | client/views.py | adwalkz/DjangoDataAnalysis | 1a888fc3a5dc34651a3b25d476e985ff39d8e305 | [
"MIT"
] | null | null | null | client/views.py | adwalkz/DjangoDataAnalysis | 1a888fc3a5dc34651a3b25d476e985ff39d8e305 | [
"MIT"
] | null | null | null | from django.shortcuts import render, get_object_or_404, get_list_or_404
from django.db.models import Count, Avg, Max, Min
from ts.models import *
from .models import SurveyQuestion, SurveyQuestionChoice, SurveyResponse
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from fpdf import FPDF
class PDF(FPDF):
def __init__(self, client_name):
super().__init__()
self.WIDTH = 210
self.HEIGHT = 297
self.name = client_name
def header(self):
self.image('ts/static/ts/index.png', 10, 3, 33)
self.set_font('Arial', 'B', 11)
self.cell(self.WIDTH - 80)
self.cell(60, 1, self.name.upper() + ' SURVEY REPORT', 0, 0, 'R')
self.ln(20)
def footer(self):
self.set_y(-15)
self.set_font('Arial', 'I', 8)
self.set_text_color(128)
self.cell(0, 10, 'Page ' + str(self.page_no()), 0, 0, 'C')
def page_body(self, images):
self.image(images, 15, 35, self.WIDTH)
def print_page(self, images):
self.add_page()
self.page_body(images)
def index(request, pk):
"""
This view is for
creating dynamic
and personalized
client home page
"""
ques_obj = SurveyQuestion.objects.filter(company_id=pk)
res_obj = SurveyResponse.objects.filter(company_id=pk)
total_survey_question = ques_obj.count()
total_survey_response = res_obj.count()//total_survey_question
stats_list = []
charts = []
for q in ques_obj:
if q.ques_type == 1:
res_obj = SurveyResponse.objects.filter(ques_id_id = q.ques_id)
stat_dic = res_obj.aggregate( \
Avg('user_response'), \
Max('user_response'), \
Min('user_response'))
stat_dic['ques'] = q.ques_text
stats_list.append(stat_dic)
elif q.ques_type == 2:
choice_list = []
res_list = []
stats = []
res_obj = SurveyResponse.objects.filter(ques_id_id = q.ques_id)
choice_obj = SurveyQuestionChoice.objects.filter(ques_id_id = q.ques_id)
choice_list = [c.choice_text for c in choice_obj]
res_list = [r.user_response for r in res_obj]
stats = [res_list.count(i) for i in set(res_list)]
name = str(q.company_id_id) + '_' + str(q.ques_id)
plt.pie(stats, labels=set(res_list), autopct='%1.0f%%')
plt.title(q.ques_text)
plt.legend()
plt.savefig('client/static/client/charts/' + name + '.png', \
transparent=True)
plt.close()
charts.append(name)
context = {
'client': get_object_or_404(Clients, pk=pk),
'total_ques': total_survey_question,
'total_response': total_survey_response,
'charts': charts,
'stats': stats_list
}
pdf = PDF(context['client'].client_name)
for i in charts:
pdf.print_page('client/static/client/charts/' + i + '.png')
pdf.output('client/static/client/' + str(pk) +'.pdf', 'F')
return render(request, "client/index.html", context)
def survey(request, pk):
"""
This view is for
creating dynamic
and personalized
client survey page
"""
context = {
'client': get_object_or_404(Clients, pk=pk),
'categories': SurveyQuestion.objects.filter(company_id=pk).values('ques_category').annotate(Count('ques_category')).order_by(),
'ques_set': SurveyQuestion.objects.filter(company_id=pk),
'choice_set': SurveyQuestionChoice.objects.filter(company_id=pk)
}
return render(request, "client/survey.html", context)
def save(request, pk):
"""
This view is for
saving participant
response into db
"""
context = {
'client': get_object_or_404(Clients, pk=pk)
}
for res in request.POST:
if res == "csrfmiddlewaretoken" or res == "submit":
continue
response = SurveyResponse(
company_id = Clients.objects.get(pk=pk),
ques_id = SurveyQuestion.objects.get(pk=int(res)),
user_response=request.POST[res]
)
response.save()
return render(request, "client/thanks.html", context)
def feedback(request, pk):
"""
This view is for
saving participant
feedback regarding
survey
"""
context = {'client': get_object_or_404(Clients, pk=pk)}
return render(request, "client/feedback.html", context)
| 28.534161 | 139 | 0.597736 | 575 | 4,594 | 4.582609 | 0.25913 | 0.039469 | 0.020873 | 0.026565 | 0.270968 | 0.226186 | 0.182922 | 0.182922 | 0.153321 | 0.080455 | 0 | 0.017841 | 0.280148 | 4,594 | 160 | 140 | 28.7125 | 0.778954 | 0.054854 | 0 | 0.07 | 0 | 0 | 0.098206 | 0.023371 | 0 | 0 | 0 | 0 | 0 | 1 | 0.09 | false | 0 | 0.07 | 0 | 0.21 | 0.02 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
84863eae140817ec0c54b39184cf63b3eb2511d1 | 15,051 | py | Python | Structures/Interceptor.py | saisua/Scraper | ae9f93fbb0b3350cd5c8ba8e87b7ad63461d9a30 | [
"MIT"
] | null | null | null | Structures/Interceptor.py | saisua/Scraper | ae9f93fbb0b3350cd5c8ba8e87b7ad63461d9a30 | [
"MIT"
] | 13 | 2020-06-19T13:31:16.000Z | 2020-06-19T14:05:44.000Z | Structures/Interceptor.py | saisua/Scraper2 | ae9f93fbb0b3350cd5c8ba8e87b7ad63461d9a30 | [
"MIT"
] | null | null | null | print("Starting Interceptor imports... ")
import sys
import socket
from multiprocessing import Manager, Process
from re import search, sub
from os import getcwd
import io
import pyshark
import pem
from pyshark.capture.capture import Capture
from pyshark.capture.live_capture import LiveCapture
from pyshark.packet.packet import Packet
from mitmproxy import proxy, options, http
from mitmproxy.tools.dump import DumpMaster
from mitmproxy.addons import core
from click import echo
from PIL import Image
from ctypes import c_bool
#from cryptography.hazmat.backends import default_backend
#from cryptography.hazmat.primitives.asymmetric.rsa import generate_private_key
from Structures.Async_generator import AGenerator
print("Done (Interceptor)")
class Interceptor():
stdout = sys.stdout
async_gen:AGenerator
non_realtime:Process=None
__open:bool
_manager:Manager
ip:str
port:int
__capture:LiveCapture
def __init__(self, manager:Manager=None, *, ip:str='127.0.0.1', port:int=None,
process_non_realtime:bool=False, non_realtime_capture_filter:str='',
non_realtime_interface:str='any'):
self._manager = manager or Manager()
self.async_gen = AGenerator()
self.__open = file_a = self._manager.Value(c_bool, True)
self.ip = ip
self.port = port or Interceptor.__get_open_port(ip)
#self.key = generate_private_key(65537, 2048, default_backend())
if(process_non_realtime):
self.start_pyshark_capture(non_realtime_capture_filter, non_realtime_interface)
self.non_realtime = Process(target=self.non_realtime_process)
self.non_realtime.start()
opts = options.Options(
listen_host=self.ip, mode="socks5",
listen_port=self.port,
confdir=f"{getcwd()}//Structures//.mitmproxy"
)
opts.add_option("body_size_limit", int, 0, "")
opts.add_option("keep_host_header", bool, True, "")
opts.add_option("showhost", bool, True, "")
#opts.add_option("keep_host_header")
pconf = proxy.config.ProxyConfig(opts)
self.proxy = DumpMaster(opts, with_termlog=False, with_dumper=False)
self.proxy.server = proxy.server.ProxyServer(pconf)
self.proxy.addons.add(self)
self.proxy_process = Process(target=self.proxy.run, daemon=True)
self.proxy_process.start()
if(manager is None):
self._manager = Manager()
else:
self._manager = manager
def close(self):
self.__open.value = False
self.proxy.shutdown()
self.proxy_process.terminate()
self.proxy_process.join(3)
if(not self.non_realtime is None):
self.__capture.close()
# No terminate to ensure tshark is closed
self.non_realtime.join(5)
self.async_gen(all=True)
def running(self):
echo(f"The interceptor is up and running in {self.ip}:{self.port}", file=self.stdout)
def request(self, flow:http.HTTPFlow):
#echo(f"request {flow.client_conn.address} -> {flow.server_conn.address}", file=self.stdout)
pass
def response(self, flow:http.HTTPFlow):
#echo(f"response {flow.server_conn.address} -> {flow.client_conn.address}", file=self.stdout)
pass
#print(f"{flow.server_conn.address[0]} : {flow.response.headers.get('content-type')}")
#if(not flow.server_conn.address[0] in self.logged_data):
# self.logged_data[flow.server_conn.address[0]] = []
#self.logged_data[flow.server_conn.address[0]].append(flow.response.headers.get("content-type"))
#content = (dict(map(Sniffer_event.keys_lower, flow.response.headers.items()))
# .get("content-type",""))
#echo(f"\n\nRESPONSE {content}", file=self.stdout)
#if(content.startswith("image")):
#self.images+=1
#self.async_gen.append(Sniffer_event.save_image(flow.response.raw_content, self.images, content))
def start_pyshark_capture(self, capture_filter:str='', interface:str='any'):
with open(f"{'//'.join(__file__.split('/')[:-1])}//.mitmproxy//ssl.key", "wb") as file:
file.write(pem.parse_file(f"{'//'.join(__file__.split('/')[:-1])}//.mitmproxy//mitmproxy-ca.pem")[0].as_bytes())
# Custom parameters got from https://github.com/eaufavor/pyshark-ssl
self.__capture = Closable_LCapture(interface="lo",#interface,
#bpf_filter=capture_filter,#sub(r"port \d*", capture_filter, f"port {self.port}"),
display_filter="ssl",
custom_parameters= ['-o', 'ssl.desegment_ssl_records:TRUE',
'-o', 'ssl.desegment_ssl_application_data:TRUE',
'-o','tcp.desegment_tcp_streams:TRUE',
'-o', f"ssl.keylog_file:{'//'.join(__file__.split('/')[:-1])}//.mitmproxy//ssl.key"]
#decryption_key=(pem.parse_file())[0].as_text()
)
def non_realtime_process(self):
print("Enabled non-realtime packet process in Interceptor")
def print_packet(packet:Packet):
pass
#echo(type(packet.ssl), file=self.stdout)
#echo(dir(packet.ssl), file=self.stdout)
#echo(f"{packet.ip.src} -> {packet.ip.dst}", file=self.stdout)
#capture.set_debug(True)
self.__capture.apply_on_packets(print_packet)
# Properties
@property
def address(self) -> str:
return f"{self.ip}:{self.port}"
# Static methods
@staticmethod
async def save_image(data:bytes, image_num:int, content:str):
image = Image.open(io.BytesIO(data))
extension = search(r'[\/](.+)[;?]',content).group(1)
#echo(f"image{image_num}.{f}", file=Sniffer_event.stdout)
image.save(f"./Results/Images/image{image_num}.{extension}")
@staticmethod
def keys_lower(data:tuple) -> tuple:
return (data[0].lower(), data[1])
@staticmethod
def __get_open_port(ip:str) -> int:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind((ip, 0))
p = s.getsockname()[1]
s.close()
return p
class Closable_LCapture(LiveCapture):
from asyncio.subprocess import Process as AProcess
import asyncio
lcapture_tshark:AProcess=None
def __init__(self, *args, **kwargs):
self.is_open = Manager().Value(c_bool, True)
super().__init__(*args, **kwargs)
def sniff_continuously(self, packet_count=None):
"""
Captures from the set interface, returning a generator which returns packets continuously.
Can be used as follows:
for packet in capture.sniff_continuously():
print 'Woo, another packet:', packet
Note: you can also call capture.apply_on_packets(packet_callback) which should have a slight performance boost.
:param packet_count: an amount of packets to capture, then stop.
"""
self.lcapture_tshark = (self.lcapture_tshark or
self.eventloop.run_until_complete(self._get_tshark_process()))
self._running_processes.add(self.lcapture_tshark)
# Retained for backwards compatibility and to add documentation.
return self._packets_from_tshark_sync(packet_count=packet_count,
tshark_process=self.lcapture_tshark)
def _packets_from_tshark_sync(self, tshark_process, packet_count=None, timeout:float=3.0,
max_data_length:int=10000):
"""
Returns a generator of packets.
This is the sync version of packets_from_tshark. It wait for the completion of each coroutine and
reimplements reading packets in a sync way, yielding each packet as it arrives.
:param packet_count: If given, stops after this amount of packets is captured.
"""
# NOTE: This has code duplication with the async version, think about how to solve this
psml_structure, data = self.eventloop.run_until_complete(self._get_psml_struct(tshark_process.stdout))
packets_captured = 0
data = b""
try:
while self.is_open.value:
try:
packet, data = self.eventloop.run_until_complete(
self._get_packet_from_stream(tshark_process.stdout,
data,
psml_structure=psml_structure,
got_first_packet=packets_captured > 0,
timeout=timeout))
except EOFError:
echo("Caught EOF", file=Interceptor.stdout)
self._log.debug("EOF reached (sync)")
break
if(packet is False): continue
if packet:
packets_captured += 1
yield packet
if packet_count and packets_captured >= packet_count:
break
if len(data) > max_data_length:
data = b''
finally:
if tshark_process in self._running_processes:
self.eventloop.run_until_complete(self._cleanup_subprocess(tshark_process))
async def _get_packet_from_stream(self, stream, existing_data,
got_first_packet=True,
psml_structure=None,
timeout:float=3.0):
"""A coroutine which returns a single packet if it can be read from the given StreamReader.
:return a tuple of (packet, remaining_data). The packet will be None if there was not enough XML data to create
a packet. remaining_data is the leftover data which was not enough to create a packet from.
:raises EOFError if EOF was reached.
"""
import asyncio
from pyshark.tshark.tshark_json import packet_from_json_packet
from pyshark.tshark.tshark_xml import packet_from_xml_packet, psml_structure_from_xml
# yield each packet in existing_data
if self.use_json:
packet, existing_data = self._extract_packet_json_from_data(existing_data,
got_first_packet=got_first_packet)
else:
packet, existing_data = self._extract_tag_from_data(existing_data)
if packet:
if self.use_json:
packet = packet_from_json_packet(packet)
else:
packet = packet_from_xml_packet(packet, psml_structure=psml_structure)
return packet, existing_data
if(not self.is_open.value):
raise EOFError()
future = asyncio.create_task(stream.read(self.DEFAULT_BATCH_SIZE))
try:
await asyncio.wait_for(future, timeout)
except asyncio.TimeoutError:
return False, existing_data
new_data = future.result()
existing_data += new_data
if not new_data:
# Reached EOF
raise EOFError()
return None, existing_data
def close(self):
print("Closing pyshark live capture")
self.is_open.value = False
super().close()
print("Successfully closed pyshark live capture")
"""
Response:
['__abstractmethods__', '__annotations__', '__class__', '__delattr__', '__dict__',
'__dir__', '__doc__', '__eq__', '__format__', '__ge__', '__getattribute__', '__gt__',
'__hash__', '__init__', '__init_subclass__', '__le__', '__lt__', '__module__', '__ne__',
'__new__', '__reduce__', '__reduce_ex__', '__repr__', '__setattr__', '__sizeof__', '__str__',
'__subclasshook__', '__weakref__', '_abc_impl', '_get_content_type_charset', '_get_cookies',
'_guess_encoding', '_set_cookies', 'content', 'cookies', 'copy', 'data', 'decode', 'encode',
'from_state', 'get_content', 'get_state', 'get_text', 'headers', 'http_version', 'is_replay',
'make', 'raw_content', 'reason', 'refresh', 'replace', 'set_content', 'set_state', 'set_text',
'status_code', 'stream', 'text', 'timestamp_end', 'timestamp_start', 'wrap']
ResponseData:
['__abstractmethods__', '__annotations__', '__class__', '__delattr__', '__dict__', '__dir__', '__doc__', '__eq__',
'__format__', '__ge__', '__getattribute__', '__gt__', '__hash__', '__init__', '__init_subclass__', '__le__', '__lt__',
'__module__', '__ne__', '__new__', '__reduce__', '__reduce_ex__', '__repr__', '__setattr__', '__sizeof__', '__str__',
'__subclasshook__', '__weakref__', '_abc_impl', 'content', 'copy', 'from_state', 'get_state', 'headers', 'http_version',
'reason', 'set_state', 'status_code', 'timestamp_end', 'timestamp_start']
"""
"""
http.HTTPFLOW:
['__abstractmethods__', '__annotations__', '__class__', '__delattr__',
'__dict__', '__dir__', '__doc__', '__eq__', '__format__', '__ge__', '__getattribute__',
'__gt__', '__hash__', '__init__', '__init_subclass__', '__le__', '__lt__', '__module__',
'__ne__', '__new__', '__reduce__', '__reduce_ex__', '__repr__', '__setattr__', '__sizeof__',
'__str__', '__subclasshook__', '__weakref__', '_abc_impl', '_backup', '_stateobject_attributes',
'backup', 'client_conn', 'copy', 'error', 'from_state', 'get_state', 'id', 'intercept',
'intercepted', 'kill', 'killable', 'live', 'marked', 'metadata', 'mode', 'modified',
'replace', 'reply', 'request', 'response', 'resume', 'revert', 'server_conn', 'set_state', 'type']
Request:
['__abstractmethods__', '__annotations__', '__class__', '__delattr__', '__dict__', '__dir__',
'__doc__', '__eq__', '__format__', '__ge__', '__getattribute__', '__gt__', '__hash__',
'__init__', '__init_subclass__', '__le__', '__lt__', '__module__', '__ne__', '__new__',
'__reduce__', '__reduce_ex__', '__repr__', '__setattr__', '__sizeof__', '__str__',
'__subclasshook__', '__weakref__', '_abc_impl', '_get_content_type_charset', '_get_cookies',
'_get_multipart_form', '_get_query', '_get_urlencoded_form', '_guess_encoding',
'_parse_host_header', '_set_cookies', '_set_multipart_form', '_set_query', '_set_urlencoded_form',
'anticache', 'anticomp', 'constrain_encoding', 'content', 'cookies', 'copy', 'data',
'decode', 'encode', 'first_line_format', 'from_state', 'get_content', 'get_state',
'get_text', 'headers', 'host', 'host_header', 'http_version', 'is_replay', 'make',
'method', 'multipart_form', 'path', 'path_components', 'port', 'pretty_host', 'pretty_url',
'query', 'raw_content', 'replace', 'scheme', 'set_content', 'set_state', 'set_text',
'stream', 'text', 'timestamp_end', 'timestamp_start', 'url', 'urlencoded_form', 'wrap']
""" | 42.27809 | 124 | 0.626071 | 1,704 | 15,051 | 5.031103 | 0.240023 | 0.017963 | 0.011431 | 0.014697 | 0.224775 | 0.185233 | 0.123411 | 0.112446 | 0.094716 | 0.085151 | 0 | 0.004065 | 0.248223 | 15,051 | 356 | 125 | 42.27809 | 0.753601 | 0.157332 | 0 | 0.130435 | 0 | 0 | 0.084678 | 0.047946 | 0 | 0 | 0 | 0 | 0 | 1 | 0.081522 | false | 0.016304 | 0.130435 | 0.01087 | 0.309783 | 0.038043 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8489217661ac1eae3afe68e1e0d39ce20002a5f3 | 538 | py | Python | sources/rnt/mediane/normalizations/enumeration.py | bryan-brancotte/rank-aggregation-with-ties | 15fffb0b1bee3d6cef7090486a7c910e5f51195d | [
"Apache-2.0"
] | null | null | null | sources/rnt/mediane/normalizations/enumeration.py | bryan-brancotte/rank-aggregation-with-ties | 15fffb0b1bee3d6cef7090486a7c910e5f51195d | [
"Apache-2.0"
] | 11 | 2018-04-04T08:24:30.000Z | 2021-03-19T21:45:04.000Z | sources/rnt/mediane/normalizations/enumeration.py | bryan-brancotte/rank-aggregation-with-ties | 15fffb0b1bee3d6cef7090486a7c910e5f51195d | [
"Apache-2.0"
] | 1 | 2018-10-25T09:13:41.000Z | 2018-10-25T09:13:41.000Z | from django.utils.translation import ugettext_lazy as _
NONE = 'None'
UNIFICATION = 'UNIF'
PROJECTION = 'PROJ'
__tuple_list = ((UNIFICATION, _(UNIFICATION)), (PROJECTION, _(PROJECTION)), (NONE, _(NONE)),)
def as_tuple_list():
return __tuple_list
def get_from(id_enum):
for k, v in __tuple_list:
if str(k) == str(id_enum):
return v
return None
def __dummy_method_to_have_translations():
_('None_name')
_('UNIF_name')
_('PROJ_name')
_('None_desc')
_('UNIF_desc')
_('PROJ_desc') | 20.692308 | 93 | 0.654275 | 68 | 538 | 4.632353 | 0.485294 | 0.114286 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.208178 | 538 | 26 | 94 | 20.692308 | 0.739437 | 0 | 0 | 0 | 0 | 0 | 0.122449 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.157895 | false | 0 | 0.052632 | 0.052632 | 0.368421 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
848a70bcaf3c6665cc367d128e79aa6e018703cf | 1,932 | py | Python | src/datasets.py | dmilios/dirichletGPC | 7e460ca07005a5aed97937d2bf2a8a47b6f8051e | [
"Apache-2.0"
] | 12 | 2018-12-19T15:38:20.000Z | 2022-02-14T14:58:40.000Z | src/datasets.py | dmilios/dirichletGPC | 7e460ca07005a5aed97937d2bf2a8a47b6f8051e | [
"Apache-2.0"
] | null | null | null | src/datasets.py | dmilios/dirichletGPC | 7e460ca07005a5aed97937d2bf2a8a47b6f8051e | [
"Apache-2.0"
] | 1 | 2019-12-25T01:51:02.000Z | 2019-12-25T01:51:02.000Z | # Copyright 2018 Dimitrios Milios, Raffaello Camoriano,
# Pietro Michiardi,Lorenzo Rosasco, Maurizio Filippone
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import numpy as np
def normalise_unitvar (X, Xtest):
meanX = np.mean(X, 0)
stdX = np.std(X, 0)
stdX[stdX == 0] = 1 # to avoid NaN
X = (X - meanX) / stdX
Xtest = (Xtest - meanX) / stdX;
return X, Xtest
def normalise_oneminusone (X, Xtest):
minx = np.min(X, 0)
maxx = np.max(X, 0)
ranges = maxx - minx
ranges[ranges == 0] = 1 # to avoid NaN
X = (X - minx) / ranges
Xtest = (Xtest - minx) / ranges
X = X * 2 - 1
Xtest = Xtest * 2 - 1
return X, Xtest
def load_split(path, split_idx):
'''
Assumptions: The 'path' corresponds to a particular dataset
which contains the subdirectory 'splits' with contents:
train01.txt, test01.txt
train02.txt, test02.txt
...
'''
if type(split_idx) is not str:
split_idx = '{:02d}'.format(split_idx)
path_train = os.path.join(path, 'splits', 'train' + split_idx + '.txt')
data = np.loadtxt(path_train, delimiter=',')
X = data[:, :-1]
y = data[:, -1]
path_test = os.path.join(path, 'splits', 'test' + split_idx + '.txt')
data = np.loadtxt(path_test, delimiter=',')
Xtest = data[:, :-1]
ytest = data[:, -1]
return X, y, Xtest, ytest
| 32.2 | 75 | 0.624741 | 274 | 1,932 | 4.357664 | 0.456204 | 0.050251 | 0.021776 | 0.026801 | 0.103853 | 0.070352 | 0.070352 | 0 | 0 | 0 | 0 | 0.023726 | 0.258282 | 1,932 | 59 | 76 | 32.745763 | 0.809491 | 0.46118 | 0 | 0.064516 | 0 | 0 | 0.037149 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.096774 | false | 0 | 0.064516 | 0 | 0.258065 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
848b436aa24ac308d60e90b617dd1c05c1c87340 | 4,559 | py | Python | cleanup_timescale.py | djhedges/exit_speed | 86c8a36dd7c53d9f67157c359625b8d33715f917 | [
"Apache-2.0"
] | 10 | 2020-09-25T19:48:50.000Z | 2021-10-13T13:42:56.000Z | cleanup_timescale.py | djhedges/exit_speed | 86c8a36dd7c53d9f67157c359625b8d33715f917 | [
"Apache-2.0"
] | null | null | null | cleanup_timescale.py | djhedges/exit_speed | 86c8a36dd7c53d9f67157c359625b8d33715f917 | [
"Apache-2.0"
] | 1 | 2021-02-08T14:50:48.000Z | 2021-02-08T14:50:48.000Z | #!/usr/bin/python3
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Used to cleanup database entries created during testing."""
from absl import app
from absl import flags
from absl import logging
import timescale
FLAGS = flags.FLAGS
flags.DEFINE_integer('min_lap_duration_ms', 60 * 1000,
'Nukes laps with duration short than this value.')
flags.DEFINE_integer('max_lap_duration_ms', 60 * 1000 * 3, # 3 mins.
'Nukes laps with duration short than this value.')
def NukeNonLiveData(conn):
"""Delete non live data which usually generated during testing."""
with conn.cursor() as cursor:
nuke_statement = """
DELETE FROM points
WHERE session_id IN (SELECT id FROM sessions WHERE live_data = False);
DELETE FROM laps
WHERE session_id IN (SELECT id FROM sessions WHERE live_data = False);
DELETE FROM sessions
WHERE id IN (SELECT id FROM sessions WHERE live_data = False);
"""
logging.info(nuke_statement)
cursor.execute(nuke_statement)
def NukeLapsWithNoDuration(conn):
"""Delete any laps without a duration.
These are usually points logged post session in the paddock.
Args:
conn: A connection to the timescale backend.
"""
with conn.cursor() as cursor:
nuke_statement = """
DELETE FROM points
WHERE lap_id IN (SELECT id FROM laps WHERE duration_ms is NULL);
"""
logging.info(nuke_statement)
cursor.execute(nuke_statement)
nuke_statement = """
DELETE FROM laps
WHERE duration_ms is NULL;
"""
logging.info(nuke_statement)
cursor.execute(nuke_statement)
def NukeLapsByDuration(conn):
"""Delete laps based on time. These are usually traffic or the out lap."""
with conn.cursor() as cursor:
nuke_statement = """
DELETE FROM points
WHERE lap_id IN (SELECT id FROM laps
WHERE duration_ms < %s or duration_ms > %s);
DELETE FROM laps
WHERE duration_ms < %s OR duration_ms > %s;
"""
logging.info(nuke_statement)
args = (FLAGS.min_lap_duration_ms, FLAGS.max_lap_duration_ms,
FLAGS.min_lap_duration_ms, FLAGS.max_lap_duration_ms)
cursor.execute(nuke_statement, args)
def NukeHangingLaps(conn):
"""Based on prior deletes these cleans up any laps without points."""
with conn.cursor() as cursor:
select_statement = """
SELECT DISTINCT(lap_id) FROM points
"""
cursor.execute(select_statement)
laps_to_keep = set(cursor.fetchall())
select_statement = """
SELECT id FROM laps
"""
cursor.execute(select_statement)
all_lap_ids = set(cursor.fetchall())
hanging_lap_ids = all_lap_ids.difference(laps_to_keep)
nuke_statement = """
DELETE FROM laps
WHERE id IN %s
"""
logging.info(nuke_statement)
if hanging_lap_ids:
args = (tuple(hanging_lap_ids),)
cursor.execute(nuke_statement, args)
def NukeHangingSessions(conn):
"""Based on prior deletes these cleans up any sessions without laps."""
with conn.cursor() as cursor:
select_statement = """
SELECT DISTINCT(session_id) FROM laps
"""
cursor.execute(select_statement)
sessions_to_keep = set(cursor.fetchall())
select_statement = """
SELECT id FROM sessions
"""
cursor.execute(select_statement)
all_session_ids = set(cursor.fetchall())
hanging_session_ids = all_session_ids.difference(sessions_to_keep)
nuke_statement = """
DELETE FROM sessions
WHERE id IN %s
"""
logging.info(nuke_statement)
if hanging_session_ids:
args = (tuple(hanging_session_ids),)
cursor.execute(nuke_statement, args)
def CleanupTimescale():
logging.info('Cleaning up Timescale')
with timescale.ConnectToDB() as conn:
NukeNonLiveData(conn)
NukeLapsWithNoDuration(conn)
NukeLapsByDuration(conn)
# Hanging deletions should probably come last.
NukeHangingLaps(conn)
NukeHangingSessions(conn)
conn.commit()
def main(unused_argv):
CleanupTimescale()
if __name__ == '__main__':
app.run(main)
| 30.393333 | 77 | 0.704321 | 606 | 4,559 | 5.136964 | 0.270627 | 0.075169 | 0.026984 | 0.04433 | 0.517828 | 0.469643 | 0.42435 | 0.378734 | 0.353678 | 0.277867 | 0 | 0.006366 | 0.207502 | 4,559 | 149 | 78 | 30.597315 | 0.855245 | 0.238868 | 0 | 0.543689 | 0 | 0 | 0.30981 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.067961 | false | 0 | 0.038835 | 0 | 0.106796 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8490aeceeb675f3713fb1cedf7105b2e3140cf51 | 1,472 | py | Python | tempZonal/overlay1.py | Mihir-DG/Tropospheric-Temperature-and-Zonal-Wind-Profiles | 13e0c706faed446f10758341807c066670260e0b | [
"MIT"
] | null | null | null | tempZonal/overlay1.py | Mihir-DG/Tropospheric-Temperature-and-Zonal-Wind-Profiles | 13e0c706faed446f10758341807c066670260e0b | [
"MIT"
] | null | null | null | tempZonal/overlay1.py | Mihir-DG/Tropospheric-Temperature-and-Zonal-Wind-Profiles | 13e0c706faed446f10758341807c066670260e0b | [
"MIT"
] | null | null | null | from netCDF4 import Dataset as dst
import numpy as np
from matplotlib import pyplot as plt
tempnc = dst("../air.mon.mean.nc", mode='r')
zonalnc = dst("../uwnd.mon.mean.nc", mode='r')
#Datasets downloaded 21 Aug 2020
#Time length- 871 ==> 871/12 --> 7/12
def slice_per(source, step):
return [source[i::step] for i in range(step)]
def zonal_data():
months = range(871)[-68:-8]
avgs = []
levels = range(17)[0:12]
for l in levels:
item = []
for m in months:
for latitude in zonalnc.variables['uwnd'][m][l]:
item.append(np.mean(latitude))
item = slice_per(item,73)
for elem in item:
average = np.mean(elem)
avgs.append(average)
avgs = np.array(slice_per(avgs,73))
return avgs.T
def temp_data():
months = range(871)[-68:-8]
avgs = []
levels = range(17)[0:12]
for l in levels:
item = []
for m in months:
for latitude in tempnc.variables['air'][m][l]:
item.append(np.mean(latitude))
item = slice_per(item,73)
for elem in item:
average = np.mean(elem)
avgs.append(average)
avgs = np.array(slice_per(avgs,73))
return avgs.T
if __name__ == "__main__":
temp = temp_data()
zonal = zonal_data()
x = tempnc.variables['lat'][:]
levels = tempnc.variables['level'][0:12]
levels = levels[::-1]
plt.contourf(x,levels,zonal,cmap='nipy_spectral',alpha=0.7)
plt.colorbar()
plt.contour(x,levels,temp,colors='black',levels=15)
plt.xlabel("Latitude(°)")
plt.ylabel("Altitude(mbar)")
plt.savefig("windVtemp.png")
plt.show()
| 24.131148 | 60 | 0.665761 | 240 | 1,472 | 4.0125 | 0.3625 | 0.041537 | 0.018692 | 0.026999 | 0.463136 | 0.43406 | 0.43406 | 0.43406 | 0.43406 | 0.43406 | 0 | 0.045161 | 0.157609 | 1,472 | 60 | 61 | 24.533333 | 0.730645 | 0.045516 | 0 | 0.52 | 0 | 0 | 0.084226 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.06 | false | 0 | 0.06 | 0.02 | 0.18 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
849299b9fc910775a11fc0036ffa60ec51f4037c | 4,609 | py | Python | tests/buf_test.py | ClasherKasten/babi | e11d61da9387b3740fcf7c9ec631e51cfd4b194b | [
"MIT"
] | null | null | null | tests/buf_test.py | ClasherKasten/babi | e11d61da9387b3740fcf7c9ec631e51cfd4b194b | [
"MIT"
] | null | null | null | tests/buf_test.py | ClasherKasten/babi | e11d61da9387b3740fcf7c9ec631e51cfd4b194b | [
"MIT"
] | null | null | null | from __future__ import annotations
from unittest import mock
import pytest
import babi.buf
from babi.buf import Buf
def test_buf_truthiness():
assert bool(Buf([])) is False
assert bool(Buf(['a', 'b'])) is True
def test_buf_repr():
ret = repr(Buf(['a', 'b', 'c']))
assert ret == "Buf(['a', 'b', 'c'], x=0, y=0, file_y=0)"
def test_buf_item_retrieval():
buf = Buf(['a', 'b', 'c'])
assert buf[1] == 'b'
assert buf[-1] == 'c'
with pytest.raises(IndexError):
buf[3]
def test_buf_del():
lst = ['a', 'b', 'c']
buf = Buf(lst)
with buf.record() as modifications:
del buf[1]
assert lst == ['a', 'c']
buf.apply(modifications)
assert lst == ['a', 'b', 'c']
def test_buf_del_with_negative():
lst = ['a', 'b', 'c']
buf = Buf(lst)
with buf.record() as modifications:
del buf[-1]
assert lst == ['a', 'b']
buf.apply(modifications)
assert lst == ['a', 'b', 'c']
def test_buf_insert():
lst = ['a', 'b', 'c']
buf = Buf(lst)
with buf.record() as modifications:
buf.insert(1, 'q')
assert lst == ['a', 'q', 'b', 'c']
buf.apply(modifications)
assert lst == ['a', 'b', 'c']
def test_buf_insert_with_negative():
lst = ['a', 'b', 'c']
buf = Buf(lst)
with buf.record() as modifications:
buf.insert(-1, 'q')
assert lst == ['a', 'b', 'q', 'c']
buf.apply(modifications)
assert lst == ['a', 'b', 'c']
def test_buf_set_value():
lst = ['a', 'b', 'c']
buf = Buf(lst)
with buf.record() as modifications:
buf[1] = 'hello'
assert lst == ['a', 'hello', 'c']
buf.apply(modifications)
assert lst == ['a', 'b', 'c']
def test_buf_set_value_idx_negative():
lst = ['a', 'b', 'c']
buf = Buf(lst)
with buf.record() as modifications:
buf[-1] = 'hello'
assert lst == ['a', 'b', 'hello']
buf.apply(modifications)
assert lst == ['a', 'b', 'c']
def test_buf_multiple_modifications():
lst = ['a', 'b', 'c']
buf = Buf(lst)
with buf.record() as modifications:
buf[1] = 'hello'
buf.insert(1, 'ohai')
del buf[0]
assert lst == ['ohai', 'hello', 'c']
buf.apply(modifications)
assert lst == ['a', 'b', 'c']
def test_buf_iter():
buf = Buf(['a', 'b', 'c'])
buf_iter = iter(buf)
assert next(buf_iter) == 'a'
assert next(buf_iter) == 'b'
assert next(buf_iter) == 'c'
with pytest.raises(StopIteration):
next(buf_iter)
def test_buf_append():
lst = ['a', 'b', 'c']
buf = Buf(lst)
with buf.record() as modifications:
buf.append('q')
assert lst == ['a', 'b', 'c', 'q']
buf.apply(modifications)
assert lst == ['a', 'b', 'c']
def test_buf_pop_default():
lst = ['a', 'b', 'c']
buf = Buf(lst)
with buf.record() as modifications:
buf.pop()
assert lst == ['a', 'b']
buf.apply(modifications)
assert lst == ['a', 'b', 'c']
def test_buf_pop_idx():
lst = ['a', 'b', 'c']
buf = Buf(lst)
with buf.record() as modifications:
buf.pop(1)
assert lst == ['a', 'c']
buf.apply(modifications)
assert lst == ['a', 'b', 'c']
@pytest.mark.parametrize(
'new_lines',
(
pytest.param(['d', 'b', 'c'], id='replace op'),
pytest.param(['a', 'q', 'q', 'c'], id='replace different size'),
pytest.param(['c'], id='delete op'),
pytest.param(['a', 'q', 'q', 'q', 'b', 'c'], id='insert op'),
),
)
def test_replace_lines(new_lines):
lst = ['a', 'b', 'c']
buf = Buf(lst)
with buf.record() as modifications:
buf.replace_lines(new_lines)
assert lst == new_lines
buf.apply(modifications)
assert lst == ['a', 'b', 'c']
def test_restore_eof_invariant():
lst = ['a', 'b', 'c']
buf = Buf(lst)
buf.restore_eof_invariant()
assert lst == ['a', 'b', 'c', '']
buf.restore_eof_invariant()
assert lst == ['a', 'b', 'c', '']
@pytest.fixture
def fake_wcwidth():
chars = {'a': 1, 'b': 1, 'c': 1, '🔵': 2}
with mock.patch.object(babi.buf, 'wcwidth', chars.__getitem__):
yield
@pytest.mark.usefixtures('fake_wcwidth')
def test_line_positions():
buf = Buf(['a', '🔵b', 'c'])
assert buf.line_positions(0) == (0, 1)
assert buf.line_positions(1) == (0, 2, 3)
assert buf.line_positions(2) == (0, 1)
@pytest.mark.usefixtures('fake_wcwidth')
def test_set_tab_size():
buf = Buf(['\ta'])
assert buf.line_positions(0) == (0, 4, 5)
buf.set_tab_size(8)
assert buf.line_positions(0) == (0, 8, 9)
| 18.659919 | 72 | 0.533521 | 654 | 4,609 | 3.636086 | 0.134557 | 0.029437 | 0.037847 | 0.065601 | 0.633305 | 0.612279 | 0.563499 | 0.52439 | 0.52439 | 0.495795 | 0 | 0.011354 | 0.254719 | 4,609 | 246 | 73 | 18.735772 | 0.680349 | 0 | 0 | 0.47973 | 0 | 0.006757 | 0.067694 | 0 | 0 | 0 | 0 | 0 | 0.25 | 1 | 0.128378 | false | 0 | 0.033784 | 0 | 0.162162 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8493fc3f305b6cd5d37b4ba35bfc0dfffc975a3f | 18,557 | py | Python | simulation.py | laloc2496/cdn_configuration_optimization | 58cf2278456d0ef8796570f12f1d00fd68aec686 | [
"MIT"
] | null | null | null | simulation.py | laloc2496/cdn_configuration_optimization | 58cf2278456d0ef8796570f12f1d00fd68aec686 | [
"MIT"
] | null | null | null | simulation.py | laloc2496/cdn_configuration_optimization | 58cf2278456d0ef8796570f12f1d00fd68aec686 | [
"MIT"
] | null | null | null | import networkx as nx
import random, pickle, string
from src.util.gen_files import *
import threading
import time
from src.net.topology import NetTopology
from src.algorithm import *
import os, sys
from random import randint, shuffle, sample
from src.util.utils import *
from src.algorithm.cache import *
from src.util.separator_rank import *
sys.setrecursionlimit(2000000)
MAX_COUNT_INDEX = 3001946 # 3001946 3001946
def findColorCacheServer(graph, nearestColorServerInfo, currentCacheId, contentColor, packetPath):
for entryInfo in nearestColorServerInfo[currentCacheId]:
cacheColor = entryInfo["color"]
cacheId = entryInfo["node"]
cacheIdx = cacheId.split("_")[1]
if packetPath[int(cacheIdx)-1] == "1":
continue
if int(contentColor, 2) & int(cacheColor, 2) != 0:
return cacheId
return None
def findShortestCacheServer(graph, sourceId, targetId, routingTable):
if sourceId not in routingTable:
cacheIdPath = nx.dijkstra_path(graph, sourceId, targetId, "weight") #
routingTable[sourceId] = {targetId: cacheIdPath[1]}
return cacheIdPath[1]
else:
if targetId not in routingTable[sourceId]:
cacheIdPath = nx.dijkstra_path(graph, sourceId, targetId, "weight") # , "weight"
routingTable[sourceId] = {targetId: cacheIdPath[1]}
return cacheIdPath[1]
else:
return routingTable[sourceId][targetId]
def processRequestFlow(sourceCacheId, cacheDict, graph, nearestColorServerInfo, serverToColorMap, contentId, contentColor, routingTable, hitMissDict):
packetPath = ["0"] * len(cacheDict.keys())
currentCacheId = sourceCacheId
while True:
if currentCacheId == "mainServer":
return currentCacheId, hitMissDict
if cacheDict[currentCacheId].get(contentId, contentColor) != -1:
hitMissDict[currentCacheId] = {"hit":1, "miss": 0}
return currentCacheId, hitMissDict
else:
hitMissDict[currentCacheId] = {"hit":0, "miss": 1}
currentCacheIdx = currentCacheId.split("_")[1]
packetPath[int(currentCacheIdx)-1] = "1"
nextColorId = findColorCacheServer(graph, nearestColorServerInfo, currentCacheId, contentColor, packetPath)
if nextColorId == None:
currentCacheId = findShortestCacheServer(graph, currentCacheId, "mainServer", routingTable)
else:
tempCacheId = currentCacheId
currentCacheId = findShortestCacheServer(graph, currentCacheId, nextColorId, routingTable)
if currentCacheId != "mainServer":
currentCacheIdx = currentCacheId.split("_")[1]
if packetPath[int(currentCacheIdx)-1] == "1":
currentCacheId = findShortestCacheServer(graph, tempCacheId, "mainServer", routingTable)
def processResponseFlow(targetCacheId, sourceCacheId, cacheDict, graph, contentId, contentColor, fileSize):
cacheIdPath = nx.dijkstra_path(graph, targetCacheId, sourceCacheId, "weight")
cacheIdPath = cacheIdPath[1:]
traffic = 0
for cacheId in cacheIdPath:
traffic += 1
if cacheId == "mainServer":
return traffic
if cacheDict[cacheId].get(contentId, contentColor) == -1:
cacheDict[cacheId].set(contentId, contentColor, fileSize)
return traffic
def runVirtualSendFileColorBased(graph, cacheDict, client, contentId, contentColor, nearestColorServerInfo,
serverToColorMap, fileSize, routingTable):
result = {}
traffic = 0
sourceCacheId = client.replace("client_", "router_")
targetCacheId, result = processRequestFlow(sourceCacheId, cacheDict, graph, nearestColorServerInfo, serverToColorMap, contentId, contentColor, routingTable, result)
traffic += processResponseFlow(targetCacheId, sourceCacheId, cacheDict, graph, contentId, contentColor, fileSize)
return result, traffic
def runWithColorRouting(graph, cacheDict, contentToColorDict, nearestColorServerInfo,
serverToColorMap, fileSize, routingTable, runReqDict, clientList=[], interval=""):
result = {}
idx = 0
totalTraffic = 0
while True:
isEnd = True
for client in clientList:
cacheId = client.replace("client_", "Cache_")
if not cacheId in runReqDict:
continue
if idx >= len(runReqDict[cacheId][interval]):
continue
if fileSize != -1:
contentId = runReqDict[cacheId][interval][idx][0] ###
_fileSize = fileSize
else:
contentId, _fileSize, _ = runReqDict[cacheId][interval][idx]
contentColor = contentToColorDict[contentId]
hitMissDict, traffic = runVirtualSendFileColorBased(graph, cacheDict, client, contentId, contentColor,
nearestColorServerInfo, serverToColorMap, int(_fileSize), routingTable)
if int(contentId) > MAX_COUNT_INDEX:
for routerId in hitMissDict:
if routerId in result:
result[routerId]["hit"] += hitMissDict[routerId]["hit"]
result[routerId]["miss"] += hitMissDict[routerId]["miss"]
else:
result[routerId] = {"hit": hitMissDict[routerId]["hit"], "miss": hitMissDict[routerId]["miss"]}
isEnd = False
totalTraffic += traffic
if isEnd:
break
idx+= 1
return result, totalTraffic
def runVirtualSendFileShortestPath(graph, cacheDict, client, contentId, fileSize, routingTable, mode="no-color", contentColor=""):
traffic = 0
nextCacheId = client.replace("client_", "router_")
result = {}
result[nextCacheId] = {"hit": 0, "miss": 0}
if mode == "no-cache":
shortestRoutePath = nx.dijkstra_path(graph, nextCacheId, "mainServer", "weight")
shortestRoutePath = shortestRoutePath[1:]
for routerId in shortestRoutePath:
traffic += 1
if routerId == "mainServer":
break
return {}, traffic
if mode== "no-color":
if cacheDict[nextCacheId] is None:
pass
else:
if cacheDict[nextCacheId].get(contentId) != -1:
result[nextCacheId]["hit"] = 1
return result, traffic
else:
cacheDict[nextCacheId].set(contentId, fileSize)
result[nextCacheId]["miss"] = 1
while True:
routerId = findShortestCacheServer(graph, nextCacheId, "mainServer", routingTable)
result[routerId] = {"hit": 0, "miss": 0}
traffic += 1
if routerId == "mainServer":
return result, traffic
else:
if cacheDict[routerId] is None:
pass
else:
if cacheDict[routerId].get(contentId) == -1:
result[routerId]["miss"] = 1
cacheDict[routerId].set(contentId, fileSize)
else:
result[routerId]["hit"] = 1
return result, traffic
nextCacheId = routerId
else: ## mode == "tag-color"
if cacheDict[nextCacheId] is None:
pass
else:
if cacheDict[nextCacheId].get(contentId, contentColor) != -1:
result[nextCacheId]["hit"] = 1
return result, traffic
else:
cacheDict[nextCacheId].set(contentId, contentColor, fileSize)
result[nextCacheId]["miss"] = 1
while True:
routerId = findShortestCacheServer(graph, nextCacheId, "mainServer", routingTable)
traffic += 1
result[routerId] = {"hit": 0, "miss": 0}
if routerId == "mainServer":
return result, traffic
else:
if cacheDict[routerId] is None:
pass
else:
if cacheDict[routerId].get(contentId, contentColor) == -1:
result[routerId]["miss"] = 1
cacheDict[routerId].set(contentId, contentColor, fileSize)
else:
result[routerId]["hit"] = 1
return result, traffic
nextCacheId = routerId
def runWithShortestPath(graph, cacheDict, fileSize, mode, routingTable, runReqDict, clientList, interval, contentToColorDict=None):
result = {}
totalTraffic = 0
idx = 0
while True:
isEnd = True
for client in clientList:
cacheId = client.replace("client_", "Cache_")
if not cacheId in runReqDict:
continue
if idx >= len(runReqDict[cacheId][interval]):
continue
if fileSize != -1:
contentId = runReqDict[cacheId][interval][idx][0]
_fileSize = fileSize
else:
contentId, _fileSize, _ = runReqDict[cacheId][interval][idx]
if mode == "tag-color":
contentColor = str(contentToColorDict[contentId])
else:
contentColor = ""
hitMissDict, traffic = runVirtualSendFileShortestPath(graph, cacheDict, client, contentId, _fileSize, routingTable, mode, contentColor)
if int(contentId) > MAX_COUNT_INDEX:
for routerId in hitMissDict:
if routerId in result:
result[routerId]["hit"] += hitMissDict[routerId]["hit"]
result[routerId]["miss"] += hitMissDict[routerId]["miss"]
else:
result[routerId] = {"hit": hitMissDict[routerId]["hit"], "miss": hitMissDict[routerId]["miss"]}
totalTraffic += traffic
isEnd = False
if isEnd:
break
idx += 1
return result, totalTraffic
def reAssignCacheDict(config):
cacheMemoryDict = {}
for routerInfo in config["Routers"]:
routerId = routerInfo["ID"]
if "type" in routerInfo:
if routerInfo["type"] == "LRU":
cacheMemoryDict[routerId] = {"type": "LRU", "memory": LRUCache(routerInfo["maxSize"])}
elif routerInfo["type"] == "LFU":
cacheMemoryDict[routerId] = {"type": "LFU", "memory": LFUCache(routerInfo["maxSize"])}
elif routerInfo["type"] == "FIFO":
cacheMemoryDict[routerId] = {"type": "FIFO", "memory": FIFOCache(routerInfo["maxSize"])}
elif routerInfo["type"] == "Hyrid":
cacheMemoryDict[routerId] = {"type": "Hyrid",
"memory": ColorCache(None, routerInfo["capacityRatio"] , routerInfo["maxSize"])}
return cacheMemoryDict
def runSimulationWithRealDataset(interval, fileSize, mode, routingTable, topo, colorList, runReqNums, warmUpReqNums, separatorRankIncrement, parallel_idx=0):
graph = topo.graph
cacheDict = topo.cacheMemoryDict
custom_data = topo.contentGenerator.custom_data
clientIds = topo.clientIds
contentGenerator = topo.contentGenerator
if cacheDict[list(cacheDict.keys())[0]] is not None:
cacheCapacity = cacheDict[list(cacheDict.keys())[0]].maxSize
else:
cacheCapacity = 0
traffic = 0
hit, hit1, miss = 0,0,0
result = {}
for i in range(interval):
uniqueSortedContentList = topo.contentGenerator.uniqueSortedContentList["Interval%s" % str(i)]
if mode == "no-color":
hitRateDict, traffic = runWithShortestPath(graph, cacheDict, fileSize, mode, routingTable, custom_data, clientIds, "Interval"+str(i))
elif mode == "full-color": # color
nearestColorServerInfo = topo.colorRouteInfo
serverToColorMap = topo.serverToColorMap
rankInfo = compute_rank(len(colorList), cacheCapacity, fileSize, graph, nearestColorServerInfo, contentGenerator,
cacheDict, serverToColorMap, warmUpReqNums, runReqNums,
clientIds, separatorRankIncrement, "Interval"+str(i))
contentToColorDict = colorizeWithSeparatorRanks(uniqueSortedContentList, rankInfo["S"], len(colorList))
hitRateDict, traffic = runWithColorRouting(graph, cacheDict, contentToColorDict, nearestColorServerInfo,
serverToColorMap, fileSize, routingTable, custom_data, clientIds, "Interval"+str(i))
elif mode == "tag-color": # color
nearestColorServerInfo = topo.colorRouteInfo
rankInfo = compute_rank_shortest_path_with_color(len(colorList), cacheCapacity, fileSize, graph, nearestColorServerInfo, contentGenerator,
cacheDict, warmUpReqNums, runReqNums,
clientIds, separatorRankIncrement, "Interval"+str(i))
contentToColorDict = colorizeWithSeparatorRanks(uniqueSortedContentList, rankInfo["S"], len(colorList))
hitRateDict, traffic = runWithShortestPath(graph, cacheDict, fileSize, mode, routingTable, custom_data, clientIds, "Interval"+str(i), contentToColorDict)
else:
hitRateDict, traffic = runWithShortestPath(graph, cacheDict, fileSize, mode, routingTable, custom_data, clientIds, "Interval"+str(i))
return traffic
def runSimulationWithPredefinedDistribution(fileSize, mode, routingTable, topo, colorList, runReqNums, warmUpReqNums, separatorRankIncrement, generateData, uniqueSortedContentList, parallel_idx=0):
graph = topo.graph
cacheDict = topo.cacheMemoryDict
clientIds = topo.clientIds
contentGenerator = topo.contentGenerator
if cacheDict[list(cacheDict.keys())[0]] is not None:
cacheCapacity = cacheDict[list(cacheDict.keys())[0]].maxSize
else:
cacheCapacity = 0
traffic = 0
hit, hit1, miss = 0,0,0
if mode == "no-color":
hitRateDict, traffic = runWithShortestPath(graph, cacheDict, fileSize, mode, routingTable, generateData, clientIds, "noInterval")
elif mode == "full-color": # color
nearestColorServerInfo = topo.colorRouteInfo
serverToColorMap = topo.serverToColorMap
rankInfo = compute_rank(len(colorList), cacheCapacity, fileSize, graph, nearestColorServerInfo, contentGenerator,
cacheDict, serverToColorMap, warmUpReqNums, runReqNums,
clientIds, separatorRankIncrement, "noInterval", parallel_idx)
contentToColorDict = colorizeWithSeparatorRanks(uniqueSortedContentList, rankInfo["S"], len(colorList))
hitRateDict, traffic = runWithColorRouting(graph, rankInfo['cacheDict'], contentToColorDict, nearestColorServerInfo,
serverToColorMap, fileSize, routingTable, generateData, clientIds, "noInterval")
elif mode == "tag-color": # color
nearestColorServerInfo = topo.colorRouteInfo
rankInfo = compute_rank_shortest_path_with_color(len(colorList), cacheCapacity, fileSize, graph, nearestColorServerInfo, contentGenerator,
cacheDict, warmUpReqNums, runReqNums,
clientIds, separatorRankIncrement, "noInterval", parallel_idx)
contentToColorDict = colorizeWithSeparatorRanks(uniqueSortedContentList, rankInfo["S"], len(colorList))
hitRateDict, traffic = runWithShortestPath(graph, rankInfo['cacheDict'], fileSize, mode, routingTable, generateData, clientIds, "noInterval", contentToColorDict)
else:
hitRateDict, traffic = runWithShortestPath(graph, cacheDict, fileSize, mode, routingTable, generateData, clientIds, "noInterval")
return traffic
if __name__ == '__main__':
global dataPath
jsonFile = "/home/picarib_home/cdn_configuration_optimization/config/json/sbd_custom-origin.json"
configDirPath = "/home/picarib_home/cdn_configuration_optimization/config/sbd_custom-origin/"
dataPath = "/home/picarib_home/cdn_configuration_optimization/data/"
config = loadJSON(jsonFile)
interval = 1 if "custom" not in config["RequestModels"] else config["RequestModels"]["custom"]["interval"]
mode = config["RoutingMode"] # [no-cache, no-color, tag-color, full-color]
fileSize = config["FileSize"]
runReqNums = config["RunReqNums"] if "RunReqNums" in config else -1
warmUpReqNums = config["WarmUpReqNums"] if "WarmUpReqNums" in config else -1
colorNums = config["colorNums"]
separatorRankIncrement = config["separatorRankIncrement"]
colorList = [''.join(random.choices(string.ascii_uppercase + string.digits, k=6)) for i in range(colorNums)]
topo = NetTopology(config, configDirPath, mode, warmUpReqNums, fileSize, colorList)
topo.build()
routingTable = {}
# savePredefinedContent = os.path.join("tmp/", "save_content.pkl")
# if os.path.isfile(savePredefinedContent):
# with open(savePredefinedContent, "rb") as f:
# generateData, uniqueSortedContentList = pickle.load(f)
# else:
# uniqueSortedContentList = topo.contentGenerator.uniqueSortedContentList["noInterval"]
# generateData = {}
# for client in topo.clientIds:
# cacheId = client.replace("client", "Cache")
# generateData[cacheId] = {"noInterval": topo.contentGenerator.randomGen(runReqNums)}
# with open(savePredefinedContent, "wb") as f:
# pickle.dump([generateData, uniqueSortedContentList], f)
print("*** Start runing simulation ***")
if topo.contentGenerator.dist == None:
result = runSimulationWithRealDataset(interval, fileSize, mode, routingTable, topo, colorList, runReqNums, warmUpReqNums, separatorRankIncrement)
else:
MAX_COUNT_INDEX = 0
result = runSimulationWithPredefinedDistribution(fileSize, mode, routingTable, topo, colorList, runReqNums, warmUpReqNums, separatorRankIncrement, generateData, uniqueSortedContentList)
print(result)
| 49.092593 | 197 | 0.620898 | 1,526 | 18,557 | 7.500655 | 0.135649 | 0.017124 | 0.023065 | 0.020182 | 0.643107 | 0.614625 | 0.577232 | 0.563079 | 0.49004 | 0.440503 | 0 | 0.008328 | 0.28178 | 18,557 | 377 | 198 | 49.222812 | 0.850465 | 0.040254 | 0 | 0.595016 | 0 | 0 | 0.06101 | 0.01327 | 0 | 0 | 0 | 0 | 0 | 1 | 0.034268 | false | 0.012461 | 0.037383 | 0 | 0.140187 | 0.006231 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8495d4f3d80007cf3963d73184d2d8cdc2010a50 | 4,475 | py | Python | assignment3/code/lr_1d.py | cjy513203427/SML_Assignment | 630e5b73d2ce222f4adb29f91d2ee3007f8972ff | [
"MIT"
] | null | null | null | assignment3/code/lr_1d.py | cjy513203427/SML_Assignment | 630e5b73d2ce222f4adb29f91d2ee3007f8972ff | [
"MIT"
] | null | null | null | assignment3/code/lr_1d.py | cjy513203427/SML_Assignment | 630e5b73d2ce222f4adb29f91d2ee3007f8972ff | [
"MIT"
] | null | null | null | # -*- encoding: utf-8 -*-
'''
@File : lr_1d.py.py
@Modify Time @Author @Desciption
------------ ------- -----------
2021/7/5 22:51 Jonas None
'''
import numpy as np
import math
import matplotlib.pyplot as plt
from scipy.stats import norm
train_data = np.loadtxt("lin_reg_train.txt")
test_data = np.loadtxt("lin_reg_test.txt")
def Xy(data):
n = len(data)
x = np.empty((2, n))
y = np.empty((n, 1))
for i in range(0, n):
x[0, i] = data[i, 0]
x[1, i] = 1
y[i, 0] = data[i, 1] # read the second column from data and give to the y
return x, y
def phiIJ(data):
X_data = data[:, 0]
y = data[:, 1]
N = data.shape[0]
k_dim = 20
X_ = np.zeros([X_data.shape[0], 20])
for i in range(X_data.shape[0]):
for j in range(k_dim):
X_[i][j] = np.power(np.e, ((-0.5 * B) * ((X_data[i] - (j + 1) * 0.1) ** 2)))
b = np.ones(N)
X_ = np.concatenate([X_, b.reshape(N, 1)], axis=1)
return X_.T, y
def lambda_I(alpha, beta):
c = alpha / beta
I = np.zeros([21, 21])
for j in range(0, 21):
I[j, j] = c
return I
# get w
def parameter_posterior(X, y, ci):
return np.linalg.inv(X @ X.T + ci) @ X @ y
def predicted_value(x, w):
y = np.empty((len(x.T), 1))
for i in range(0, len(y)):
x_i = x.T[i]
y[i] = x_i @ w
return y
def RMSE(y_pre, y):
N = len(y_pre)
sum = 0
for i in range(0, N):
sum = sum + pow((y_pre[i] - y[i]), 2)
result = math.sqrt(sum / N)
return result
def square(x_train, x_test, a, B):
x_x = x_train @ x_train.T
B_xx = B * x_x
square = np.empty((len(x_test.T), 1))
aI = np.zeros((B_xx.shape[0], B_xx.shape[0]))
for j in range(0, B_xx.shape[0]):
aI[j][j] = a
inverse = np.linalg.inv((aI + B_xx))
for i in range(0, len(square)):
x = x_test.T[i]
x_t = np.transpose(x)
square[i] = (1 / B) + np.matmul((np.matmul(x, inverse)), x_t)
return square
def Gaussian_Distribution(mean, square, y_data):
p = np.empty((len(mean), 1))
for i in range(0, len(square)):
p1 = 1 / (math.sqrt(2 * math.pi * square[i]))
p2 = ((-1) * pow((y_data[i] - mean[i]), 2)) / (2 * square[i])
p[i] = p1 * math.exp(p2)
return p
def average_log_likelihood(p):
for i in range(len(p)):
if i == 0:
sumy = np.log(p[i])
else:
sumy = sumy + np.log(p[i])
average = sumy / len(p)
return average
if __name__ == '__main__':
B = 1 / (0.1 ** 2)
a = 0.01
x_train_bayesian_ori, y_train_bayesian_ori = Xy(train_data)
x_train, y_train = phiIJ(train_data)
test_x, test_y = phiIJ(test_data)
ci = lambda_I(a, B)
w_posterior = parameter_posterior(x_train, y_train, ci)
test_predicted_value = predicted_value(test_x, w_posterior)
test_p = Gaussian_Distribution(test_predicted_value, square(x_train, test_x, a, B), test_y)
log_l_test = average_log_likelihood(test_p)
print("the log-likelihood of the test is" + str(log_l_test))
print("RMSE test is " + str(RMSE(test_predicted_value, test_y)))
w_posterior_train = parameter_posterior(x_train, y_train, ci)
train_predicted_value = predicted_value(x_train, w_posterior_train)
train_p = Gaussian_Distribution(train_predicted_value, square(x_train, x_train, a, B),
y_train)
log_l_train = average_log_likelihood(train_p)
print("the log-likelihood of the train is" + str(log_l_train))
print("RMSE train is " + str(RMSE(train_predicted_value, y_train)))
x_ = np.linspace(np.min(x_train_bayesian_ori[0]), np.max(x_train_bayesian_ori[0]), num=100).reshape(100, 1)
x_ = np.concatenate([x_, np.ones(100).reshape(100, 1)], axis=1)
x_maped, _ = phiIJ(x_)
y_ = predicted_value(x_maped, w_posterior)
sig_p = square(x_maped, x_maped, a, B)
sig_p = np.sqrt(sig_p)
plt.plot(x_.T[0], y_, c='blue', label='prediction')
plt.scatter(x_train_bayesian_ori[0], y_train_bayesian_ori, c='black', label='original train data points')
for i in range(3):
plt.fill_between(x_.T[0], y_.reshape(100) + sig_p.reshape(100) * (i + 1.),
y_.reshape(100) - sig_p.reshape(100) * (i + 1.),
color="b", alpha=0.3)
plt.title("Bayesian Linear Regression ")
plt.xlabel('x')
plt.ylabel('y')
plt.legend()
plt.show()
| 28.322785 | 111 | 0.576313 | 766 | 4,475 | 3.16188 | 0.197128 | 0.034682 | 0.019818 | 0.036334 | 0.199835 | 0.120562 | 0.095376 | 0.022296 | 0.022296 | 0 | 0 | 0.03416 | 0.260782 | 4,475 | 157 | 112 | 28.503185 | 0.698005 | 0.052737 | 0 | 0.017699 | 0 | 0 | 0.049657 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.079646 | false | 0 | 0.035398 | 0.00885 | 0.19469 | 0.035398 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
849602ab96643a0ca50a7973accb1f2950c12d70 | 1,252 | py | Python | datasets/mvda_soil/demos.py | ryuzakyl/data-bloodhound | ae0413e748e55a0d2dbae35bbe96a672f313a64b | [
"Apache-2.0"
] | 3 | 2019-03-18T03:22:06.000Z | 2021-04-06T07:53:51.000Z | datasets/mvda_soil/demos.py | ryuzakyl/data-bloodhound | ae0413e748e55a0d2dbae35bbe96a672f313a64b | [
"Apache-2.0"
] | null | null | null | datasets/mvda_soil/demos.py | ryuzakyl/data-bloodhound | ae0413e748e55a0d2dbae35bbe96a672f313a64b | [
"Apache-2.0"
] | 2 | 2020-10-05T08:22:25.000Z | 2020-10-05T08:24:02.000Z | #!/usr/bin/env
# -*- coding: utf-8 -*-
# Copyright (C) Victor M. Mendiola Lau - All Rights Reserved
# Unauthorized copying of this file, via any medium is strictly prohibited
# Proprietary and confidential
# Written by Victor M. Mendiola Lau <ryuzakyl@gmail.com>, March 2017
import pylab
import numpy as np
from datasets.mvda_soil import load_mvda_soil
# ---------------------------------------------------------------
def plot_mvda_soil_data_set():
# loading the mvda soil data set
ds = load_mvda_soil()
# removing columns associated with classes and properties
ds = ds.iloc[:, :-1]
# plotting the data set
ds.T.plot(legend=None)
pylab.show()
def plot_mvda_soil_by_type():
# loading the mvda soil data set
ds = load_mvda_soil()
# getting unique set of varieties
y = np.unique(ds['type'].tolist())
# creating the figure and adding subplots
n_rows, n_cols = 3, 4
fig, axes = pylab.subplots(nrows=n_rows, ncols=n_cols)
# for each variety
for idx, label in enumerate(y):
i, j = idx // n_cols, idx % n_cols
axes[i, j].set_title(label)
ds[ds['type'] == label].iloc[:, :-2].T.plot(ax=axes[i, j], legend=None)
# actually showing the plot
pylab.show()
| 27.217391 | 79 | 0.632588 | 184 | 1,252 | 4.184783 | 0.538043 | 0.083117 | 0.046753 | 0.058442 | 0.101299 | 0.101299 | 0.101299 | 0.101299 | 0.101299 | 0.101299 | 0 | 0.009109 | 0.210863 | 1,252 | 45 | 80 | 27.822222 | 0.770243 | 0.464856 | 0 | 0.222222 | 0 | 0 | 0.012214 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.111111 | false | 0 | 0.166667 | 0 | 0.277778 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8496886dbc56ebec7509a1f73cdab97ecc41c0e3 | 3,487 | py | Python | gan_training/DCGAN.py | sanilpande/stars-align | cdf69f487d3e4d2cef1d455840f094e2df55415e | [
"MIT"
] | null | null | null | gan_training/DCGAN.py | sanilpande/stars-align | cdf69f487d3e4d2cef1d455840f094e2df55415e | [
"MIT"
] | null | null | null | gan_training/DCGAN.py | sanilpande/stars-align | cdf69f487d3e4d2cef1d455840f094e2df55415e | [
"MIT"
] | 1 | 2020-11-04T15:08:38.000Z | 2020-11-04T15:08:38.000Z | """
DCGAN Model to get Generator for DefenseGAN Baseline Implementation.
References:
https://www.coursera.org/learn/build-basic-generative-adversarial-networks-gans
https://pytorch.org/tutorials/beginner/dcgan_faces_tutorial.html
"""
import torch
from torch import nn
class Generator(nn.Module):
"""Generator in the DCGAN."""
def __init__(self, z_dim=10, image_channels=3, hidden_dim=64):
super(Generator, self).__init__()
self.z_dim = z_dim
self.gen_1 = self.make_gen(z_dim, hidden_dim*8, kernel_size=4, stride=1)
# Size 1024x4x4
self.gen_2 = self.make_gen(hidden_dim*8, hidden_dim*4, kernel_size=4, stride=2, padding=1)
# Size 512x8x8
self.gen_3 = self.make_gen(hidden_dim*4, hidden_dim*2, kernel_size=4, stride=2, padding=1)
# Size 256x16x16
self.gen_4 = self.make_gen(hidden_dim*2, hidden_dim, kernel_size=4, stride=2, padding=1)
# Size 128x32x32
self.gen_final = self.make_gen(hidden_dim, image_channels, kernel_size=4, stride=2, padding=1, final_layer=True)
# Size 3x64x64
def make_gen(self, input_channels, output_channels, kernel_size=4, stride=1, padding=0, final_layer=False):
"""Make a generator block with transpose convolutions."""
if not final_layer:
return nn.Sequential(
nn.ConvTranspose2d(input_channels, output_channels, kernel_size, stride, padding, bias=False),
nn.BatchNorm2d(output_channels),
nn.ReLU(),
)
else: # Final Layer
return nn.Sequential(
nn.ConvTranspose2d(input_channels, output_channels, kernel_size, stride, padding, bias=False),
nn.Tanh(),
)
def forward(self, noise):
x = noise.view(len(noise), self.z_dim, 1, 1)
x = self.gen_1(x)
x = self.gen_2(x)
x = self.gen_3(x)
x = self.gen_4(x)
out = self.gen_final(x)
return out
class Discriminator(nn.Module):
def __init__(self, image_channels=3, hidden_dim=64):
super(Discriminator, self).__init__()
self.disc_1 = self.make_disc(image_channels, hidden_dim, first_layer=True)
# Size 16x32x32
self.disc_2 = self.make_disc(hidden_dim, hidden_dim * 2)
# Size 32x16x16
self.disc_3 = self.make_disc(hidden_dim * 2, hidden_dim * 4)
# Size 64x8x8
self.disc_4 = self.make_disc(hidden_dim * 4, hidden_dim * 8)
# Size 128x4x4
self.disc_final = self.make_disc(hidden_dim * 8, 1, final_layer=True)
# Size 1x1x1
def make_disc(self, input_channels, output_channels, kernel_size=4, stride=2, final_layer=False, first_layer=False):
"""Make a discriminator block using convolutions to downsample."""
if not final_layer:
return nn.Sequential(
nn.Conv2d(input_channels, output_channels, kernel_size, stride, padding=1, bias=False),
nn.Identity() if first_layer else nn.BatchNorm2d(output_channels),
nn.LeakyReLU(0.2),
)
else:
return nn.Sequential(
nn.Conv2d(input_channels, output_channels, kernel_size=4, stride=1, bias=False),
)
def forward(self, image):
x = self.disc_1(image)
x = self.disc_2(x)
x = self.disc_3(x)
x = self.disc_4(x)
pred = self.disc_final(x)
pred = pred.flatten()
return pred
| 39.179775 | 120 | 0.632062 | 478 | 3,487 | 4.378661 | 0.219665 | 0.077401 | 0.042045 | 0.064979 | 0.466316 | 0.315815 | 0.311515 | 0.270425 | 0.211658 | 0.161491 | 0 | 0.047711 | 0.260683 | 3,487 | 88 | 121 | 39.625 | 0.764158 | 0.148265 | 0 | 0.172414 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.103448 | false | 0 | 0.034483 | 0 | 0.275862 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8496d65386d0cc0e3eff34b5236093dc29e85684 | 3,713 | py | Python | figures/scripts/metrics_roc.py | ljalil/MasterThesis | 59f4ec6356f41647cc4ac9510bcdd0c8506919b8 | [
"CC-BY-4.0"
] | 6 | 2020-06-25T16:28:02.000Z | 2022-02-12T19:53:37.000Z | figures/scripts/metrics_roc.py | ljalil/MasterThesis | 59f4ec6356f41647cc4ac9510bcdd0c8506919b8 | [
"CC-BY-4.0"
] | null | null | null | figures/scripts/metrics_roc.py | ljalil/MasterThesis | 59f4ec6356f41647cc4ac9510bcdd0c8506919b8 | [
"CC-BY-4.0"
] | 1 | 2020-08-14T13:37:32.000Z | 2020-08-14T13:37:32.000Z | # -*- coding: utf-8 -*-
"""
Created on Sat May 9 11:03:34 2020
@author: Abdeljalil
"""
import numpy as np
np.random.seed(10)
import matplotlib.pyplot as plt
from sklearn.datasets import make_classification
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import (RandomTreesEmbedding, RandomForestClassifier,
GradientBoostingClassifier)
from sklearn.preprocessing import OneHotEncoder
from sklearn.model_selection import train_test_split
from sklearn.metrics import roc_curve
from sklearn.pipeline import make_pipeline
n_estimator = 10
X, y = make_classification(n_samples=8000)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.5)
# It is important to train the ensemble of trees on a different subset
# of the training data than the linear regression model to avoid
# overfitting, in particular if the total number of leaves is
# similar to the number of training samples
X_train, X_train_lr, y_train, y_train_lr = train_test_split(
X_train, y_train, test_size=0.5)
# Unsupervised transformation based on totally random trees
rt = RandomTreesEmbedding(max_depth=3, n_estimators=n_estimator,
random_state=0)
rt_lm = LogisticRegression(max_iter=1000)
pipeline = make_pipeline(rt, rt_lm)
pipeline.fit(X_train, y_train)
y_pred_rt = pipeline.predict_proba(X_test)[:, 1]
fpr_rt_lm, tpr_rt_lm, _ = roc_curve(y_test, y_pred_rt)
# Supervised transformation based on random forests
rf = RandomForestClassifier(max_depth=3, n_estimators=n_estimator)
rf_enc = OneHotEncoder()
rf_lm = LogisticRegression(max_iter=1000)
rf.fit(X_train, y_train)
rf_enc.fit(rf.apply(X_train))
rf_lm.fit(rf_enc.transform(rf.apply(X_train_lr)), y_train_lr)
y_pred_rf_lm = rf_lm.predict_proba(rf_enc.transform(rf.apply(X_test)))[:, 1]
fpr_rf_lm, tpr_rf_lm, _ = roc_curve(y_test, y_pred_rf_lm)
# Supervised transformation based on gradient boosted trees
grd = GradientBoostingClassifier(n_estimators=n_estimator)
grd_enc = OneHotEncoder()
grd_lm = LogisticRegression(max_iter=1000)
grd.fit(X_train, y_train)
grd_enc.fit(grd.apply(X_train)[:, :, 0])
grd_lm.fit(grd_enc.transform(grd.apply(X_train_lr)[:, :, 0]), y_train_lr)
y_pred_grd_lm = grd_lm.predict_proba(
grd_enc.transform(grd.apply(X_test)[:, :, 0]))[:, 1]
fpr_grd_lm, tpr_grd_lm, _ = roc_curve(y_test, y_pred_grd_lm)
# The gradient boosted model by itself
y_pred_grd = grd.predict_proba(X_test)[:, 1]
fpr_grd, tpr_grd, _ = roc_curve(y_test, y_pred_grd)
# The random forest model by itself
y_pred_rf = rf.predict_proba(X_test)[:, 1]
fpr_rf, tpr_rf, _ = roc_curve(y_test, y_pred_rf)
plt.figure(1)
plt.plot([0, 1], [0, 1], 'k--')
plt.xlabel('False positive rate')
plt.ylabel('True positive rate')
plt.title('ROC curve')
plt.legend(loc='best')
plt.show()
#%%
fig, axis = plt.subplots(1,1, figsize=(4.5,3.5))
axis.plot([0, 1], [0, 1], 'k--', lw='.8')
auc_rf = roc_auc_score(y_test, y_pred_rf_lm)
auc_1 = roc_auc_score(y_test, y_pred_rt)
auc_grd = roc_auc_score(y_test, y_pred_grd_lm)
c1 = mlp.cm.tab20c(0)
c2 = mlp.cm.tab20c(4)
c3 = mlp.cm.tab20c(8)
axis.plot(fpr_grd_lm, tpr_grd_lm,c=c3, label='Classifier 3 (AUC={:04.3f})'.format(auc_grd)) #gbt
axis.plot(fpr_rf, tpr_rf, c=c2,label='Classifier 2 (AUC={:04.3f})'.format(auc_rf)) #rf
axis.plot(fpr_rt_lm, tpr_rt_lm, c=c1,label='Classifier 1 (AUC={:04.3f})'.format(auc_1))
axis.set_xlabel('False Positive Rate')
axis.set_ylabel('True Positive Rate')
axis.grid(ls=':')
axis.axis([0, 1, 0, 1])
axis.legend(framealpha=.5, loc='lower right')
fig.tight_layout()
#axis.plot(fpr_rf_lm, tpr_rf_lm, label='RF + LR')
#axis.plot(fpr_grd, tpr_grd, label='GBT')
fig.savefig('D:\\Thesis\\Document\\figures\\metrics_roc.pdf') | 33.45045 | 96 | 0.748721 | 647 | 3,713 | 4.017002 | 0.248841 | 0.02501 | 0.018469 | 0.030781 | 0.297807 | 0.190073 | 0.088111 | 0 | 0 | 0 | 0 | 0.029636 | 0.118503 | 3,713 | 111 | 97 | 33.45045 | 0.764436 | 0.173714 | 0 | 0 | 0 | 0 | 0.076797 | 0.015097 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.130435 | 0 | 0.130435 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
849755a42d03dcd27711cb278727ab380e899f5c | 1,729 | py | Python | annotate/ac_build.py | universalbenefitNLP/text_annotate_py | e60e29af410c4ec0c8a2b09de8ea772d0572e066 | [
"Unlicense"
] | null | null | null | annotate/ac_build.py | universalbenefitNLP/text_annotate_py | e60e29af410c4ec0c8a2b09de8ea772d0572e066 | [
"Unlicense"
] | null | null | null | annotate/ac_build.py | universalbenefitNLP/text_annotate_py | e60e29af410c4ec0c8a2b09de8ea772d0572e066 | [
"Unlicense"
] | null | null | null | import sys
import os
import argparse
import pickle
import ahocorasick
from utils.utils import check_file, ensure_dir
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
def _get_parser():
parser = argparse.ArgumentParser()
parser.add_argument('--infile', type=str, default='../data/synonym', help='Directory of input file.')
parser.add_argument('--output', type=str, default='../data', help='Directory to save output file.')
parser.add_argument('--ac_name', type=str, default='ac.pickle', help='Filename of aho-corasick.')
parser.add_argument('--mean_name', type=str, default='means.pickle', help='Filename of means.')
args = parser.parse_args()
return args
def builder():
args = _get_parser()
check_file(args.infile)
ensure_dir(args.output)
A = ahocorasick.Automaton()
origin, annotation = list(), list()
infile = open(args.infile, 'r', encoding='utf-8')
for line in infile:
line = line.rstrip()
if not line:
continue
phrase, means = line.split(':::')
if not phrase or not means:
continue
origin.append(phrase)
annotation.append(means)
infile.close()
assert len(origin) == len(annotation)
for idx, phrase in enumerate(origin):
A.add_word(phrase, (idx, phrase))
A.make_automaton()
ac_name = os.path.join(args.output, args.ac_name)
means = os.path.join(args.output, args.mean_name)
with open(ac_name, 'wb') as outfile:
pickle.dump(A, outfile, protocol=pickle.HIGHEST_PROTOCOL)
with open(means, 'wb') as outfile:
pickle.dump(annotation, outfile, protocol=pickle.HIGHEST_PROTOCOL)
if __name__ == '__main__':
builder()
| 28.816667 | 105 | 0.661076 | 227 | 1,729 | 4.881057 | 0.356828 | 0.021661 | 0.061372 | 0.032491 | 0.146209 | 0.043321 | 0 | 0 | 0 | 0 | 0 | 0.000722 | 0.198959 | 1,729 | 59 | 106 | 29.305085 | 0.799278 | 0 | 0 | 0.045455 | 0 | 0 | 0.115095 | 0 | 0 | 0 | 0 | 0 | 0.022727 | 1 | 0.045455 | false | 0 | 0.136364 | 0 | 0.204545 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
849802068a48f6b66e9845576227f8306065b792 | 3,368 | py | Python | src/count/HashCount.py | abagaria/opiq | b7df5134d8b265972ad70f7ba92b385f1e3fe5f2 | [
"MIT"
] | 13 | 2020-02-13T16:09:16.000Z | 2021-07-21T16:23:39.000Z | src/count/HashCount.py | abagaria/opiq | b7df5134d8b265972ad70f7ba92b385f1e3fe5f2 | [
"MIT"
] | 2 | 2020-08-04T01:46:37.000Z | 2021-06-28T10:30:53.000Z | src/count/HashCount.py | abagaria/opiq | b7df5134d8b265972ad70f7ba92b385f1e3fe5f2 | [
"MIT"
] | 3 | 2020-08-01T07:33:02.000Z | 2021-11-13T20:54:55.000Z | # Taken from https://github.com/openai/EPG/blob/master/epg/exploration.py
import numpy as np
import torch
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
class HashingBonusEvaluator(object):
"""Hash-based count bonus for exploration.
Tang, H., Houthooft, R., Foote, D., Stooke, A., Chen, X., Duan, Y., Schulman, J., De Turck, F., and Abbeel, P. (2017).
#Exploration: A study of count-based exploration for deep reinforcement learning.
In Advances in Neural Information Processing Systems (NIPS)
"""
def __init__(self, dim_key=128, obs_processed_flat_dim=None, bucket_sizes=None, actions=1):
# Hashing function: SimHash
if bucket_sizes is None:
# Large prime numbers
# bucket_sizes = [999931, 999953, 999959, 999961, 999979, 999983]
# Smaller prime numbers to (hopefully) have less precision errors when batching
bucket_sizes = [911, 919, 929, 937, 941, 947]
mods_list = []
for bucket_size in bucket_sizes:
mod = 1
mods = []
for _ in range(dim_key):
mods.append(mod)
mod = (mod * 2) % bucket_size
mods_list.append(mods)
self.bucket_sizes = np.asarray(bucket_sizes)
# self.mods_list = np.asarray(mods_list).T
self.mods_list = torch.tensor(mods_list).transpose(1,0).to(device).float()
self.tables = np.zeros((actions, len(bucket_sizes), np.max(bucket_sizes)))
# self.projection_matrix = np.random.normal(size=(obs_processed_flat_dim, dim_key))
self.projection_matrix = torch.normal(mean=torch.zeros(size=(obs_processed_flat_dim, dim_key)),
std=torch.ones(size=(1, obs_processed_flat_dim, dim_key))).to(device)
def project(self, obss):
return torch.sign(obss @ self.projection_matrix).float()
def compute_keys(self, obss):
binaries = torch.sign(obss @ self.projection_matrix).float()
# binaries = np.sign(np.asarray(obss).dot(self.projection_matrix))
keys = np.cast['int']((binaries @ self.mods_list).cpu()) % self.bucket_sizes
# keys = np.cast['int'](binaries.dot(self.mods_list)) % self.bucket_sizes
return keys
def inc_hash(self, obss, action):
keys = self.compute_keys(obss)
for idx in range(len(self.bucket_sizes)):
np.add.at(self.tables[action, idx], keys[:, idx], 1)
def query_hash(self, obss, action):
keys = self.compute_keys(obss)
all_counts = []
for idx in range(len(self.bucket_sizes)):
all_counts.append(self.tables[action, idx, keys[:, idx]])
return np.asarray(all_counts).min(axis=0)
def query_all_actions(self, obss):
keys = self.compute_keys(obss)
all_counts = []
for idx in range(len(self.bucket_sizes)):
all_counts.append(self.tables[:, idx, keys[:, idx]])
return np.asarray(all_counts).min(axis=0)
# def fit_before_process_samples(self, obs):
# if len(obs.shape) == 1:
# obss = [obs]
# else:
# obss = obs
# before_counts = self.query_hash(obss)
# self.inc_hash(obss)
#
# def predict(self, obs):
# counts = self.query_hash(obs)
# return 1. / np.maximum(1., np.sqrt(counts)) | 42.632911 | 122 | 0.620546 | 450 | 3,368 | 4.482222 | 0.348889 | 0.076351 | 0.044621 | 0.03768 | 0.292018 | 0.271195 | 0.242935 | 0.1765 | 0.16113 | 0.131879 | 0 | 0.028991 | 0.252375 | 3,368 | 79 | 123 | 42.632911 | 0.772041 | 0.331057 | 0 | 0.238095 | 0 | 0 | 0.004521 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.142857 | false | 0 | 0.047619 | 0.02381 | 0.309524 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8498d5ed9bad8601b8ff437aad645427031351bd | 1,435 | py | Python | app.py | salsal97/birthday-bingo-flask | e7f235d8284a2442f5e06637148f421ec7ac609c | [
"MIT"
] | null | null | null | app.py | salsal97/birthday-bingo-flask | e7f235d8284a2442f5e06637148f421ec7ac609c | [
"MIT"
] | null | null | null | app.py | salsal97/birthday-bingo-flask | e7f235d8284a2442f5e06637148f421ec7ac609c | [
"MIT"
] | null | null | null | import re
import os
import random
import json
from datetime import datetime
from flask import Flask, render_template
app = Flask(__name__)
# using Flask's app.route decorator to map the URL route / to that function:
@app.route("/")
def home():
no_of_columns=4
image_path='static/index/'
image_list=os.listdir(image_path)
image_lists=[]
for _ in range (0, no_of_columns):
temp = image_list[:]
random.shuffle(temp)
image_lists.append(temp)
image_path='index/'
return render_template("index.html",
image_lists=image_lists,
image_path=image_path
)
@app.route("/game")
def game():
with open('static/bingo.json') as f:
data = json.load(f)
abort=False
for card in data['bingo_cards']:
if card['type'] == "photo":
# Check if photo exists
if (not os.path.isfile(card['photo_cropped'])):
print(card['photo_cropped']+" is not a file!!!")
abort=True
if (not os.path.isfile(card['photo_full'])):
print(card['photo_full']+" is not a file!!!")
abort=True
if abort:
raise RuntimeError('Aborting! Fix file structure')
return render_template("bingo.html",
data=data)
@app.route("/prize")
def prize():
with open('static/bingo.json') as f:
data = json.load(f)
return render_template("prize.html",
data=data)
| 25.175439 | 76 | 0.61324 | 194 | 1,435 | 4.386598 | 0.391753 | 0.052879 | 0.070505 | 0.044653 | 0.199765 | 0.199765 | 0.199765 | 0.091657 | 0.091657 | 0.091657 | 0 | 0.001878 | 0.25784 | 1,435 | 56 | 77 | 25.625 | 0.797183 | 0.066899 | 0 | 0.177778 | 0 | 0 | 0.166916 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.066667 | false | 0 | 0.133333 | 0 | 0.266667 | 0.044444 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
849afa26cbaa85520b913e54c9ff304fa6ad9cd6 | 259 | py | Python | blink_cursor.py | macshaggy/curses-example | b6b9c03b508515cf35e0a5f24963a386568c8029 | [
"Unlicense"
] | 1 | 2022-02-15T08:24:28.000Z | 2022-02-15T08:24:28.000Z | blink_cursor.py | macshaggy/curses-example | b6b9c03b508515cf35e0a5f24963a386568c8029 | [
"Unlicense"
] | null | null | null | blink_cursor.py | macshaggy/curses-example | b6b9c03b508515cf35e0a5f24963a386568c8029 | [
"Unlicense"
] | null | null | null | import curses
screen = curses.initscr()
curses.curs_set(0)
screen.addstr(2, 2, "Hello, I disabled the cursor!")
screen.refresh()
screen.getch()
curses.curs_set(1)
screen.addstr(2, 2, "And now it's back on.")
screen.refresh()
screen.getch()
curses.endwin() | 17.266667 | 52 | 0.722008 | 42 | 259 | 4.404762 | 0.571429 | 0.108108 | 0.140541 | 0.151351 | 0.324324 | 0 | 0 | 0 | 0 | 0 | 0 | 0.026087 | 0.111969 | 259 | 15 | 53 | 17.266667 | 0.778261 | 0 | 0 | 0.363636 | 0 | 0 | 0.192308 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.090909 | 0 | 0.090909 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
849b5137ce5d8ff5f15fdccb8056c6c7fb8d7923 | 3,028 | py | Python | script/model.py | jayeolasegun/Term-Deposit-Classification | e1ce928814f26712813f600bc704183d5a9e603d | [
"CC0-1.0"
] | null | null | null | script/model.py | jayeolasegun/Term-Deposit-Classification | e1ce928814f26712813f600bc704183d5a9e603d | [
"CC0-1.0"
] | null | null | null | script/model.py | jayeolasegun/Term-Deposit-Classification | e1ce928814f26712813f600bc704183d5a9e603d | [
"CC0-1.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""
@author: Jayeola Gbenga
"""
from sklearn.metrics import precision_score, recall_score, f1_score, roc_auc_score, accuracy_score, classification_report, confusion_matrix, average_precision_score
from sklearn.model_selection import cross_val_score
from sklearn.linear_model import LogisticRegression
from sklearn.neural_network import MLPClassifier
from sklearn.ensemble import RandomForestClassifier
#from imblearn.over_sampling import SMOTE
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
import xgboost as xgb
def classifiers_cv(Xtrain, Xtest, ytrain, ytest):
classifiers = {
"LogisiticRegression": LogisticRegression(),
"RandomForest": RandomForestClassifier(max_features = 10, max_depth = 50),
"MLP": MLPClassifier(),
"XGB": xgb.XGBClassifier(),
"SVC" : SVC(),
"DCT" : DecisionTreeClassifier()
}
for key, classifier in classifiers.items():
classifier.fit(Xtrain, ytrain)
y_pred = classifier.predict(Xtest)
training_score = cross_val_score(classifier, Xtrain, ytrain, cv=5)
#test_predict = cross_val_predict(classifier, pca_Xtest, pca_ytest, cv=5 )
conf_mat = confusion_matrix(ytest, y_pred)
score = f1_score(ytest, y_pred, average = 'weighted')
print("Classifiers: ", classifier.__class__.__name__, "Has a test score of", round(training_score.mean(), 2) * 100, "% accuracy score \n")
print("Confusion matrix: \n " , conf_mat, "\n")
print("f1 Score :", score , "\n")
print("Classification report \n", classification_report(ytest, y_pred), "\n\n")
def Logisitic(Xtrain, Xtest, ytrain, ytest):
lr = LogisticRegression()
lr.fit(Xtrain, ytrain)
log_reg_pred = lr.predict(Xtest)
evaluation(ytest,log_reg_pred)
return
def Xgb(Xtrain, Xtest, ytrain, ytest):
xg = xgb.XGBClassifier()
xg.fit(Xtrain, ytrain)
xg_pred = xg.predict(Xtest)
evaluation(ytest,xg_pred)
def randfor(Xtrain, Xtest, ytrain, ytest):
randf = RandomForestClassifier(max_features = 10, max_depth = 50)
randf.fit(Xtrain, ytrain)
randf_pred = randf.predict(Xtest)
evaluation(ytest,randf_pred)
def mlp(Xtrain, Xtest, ytrain, ytest):
mlp = MLPClassifier()
mlp.fit(Xtrain, ytrain)
mlp_pred = mlp.predict(Xtest)
evaluation(ytest,mlp_pred)
def svc(Xtrain, Xtest, ytrain, ytest):
svc = SVC()
svc.fit(Xtrain, ytrain)
svc_pred = svc.predict(Xtest)
evaluation(ytest,svc_pred)
def dtc(Xtrain, Xtest, ytrain, ytest):
dtc = DecisionTreeClassifier()
dtc.fit(Xtrain, ytrain)
dtc_pred = dtc.predict(Xtest)
evaluation(ytest,dtc_pred)
def evaluation(y_test,y_pred):
print(roc_auc_score(y_test, y_pred))
print(f1_score(y_test, y_pred))
print(recall_score(y_test, y_pred))
print(precision_score(y_test, y_pred))
print(classification_report(y_test, y_pred))
print(confusion_matrix(y_test, y_pred)) | 30.897959 | 164 | 0.693857 | 379 | 3,028 | 5.327177 | 0.258575 | 0.027241 | 0.05894 | 0.076275 | 0.099059 | 0.0842 | 0.044577 | 0 | 0 | 0 | 0 | 0.00779 | 0.194518 | 3,028 | 98 | 165 | 30.897959 | 0.820008 | 0.05251 | 0 | 0 | 0 | 0 | 0.057733 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.123077 | false | 0 | 0.123077 | 0 | 0.261538 | 0.153846 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
849b79c678c32714b70be8d959429c3a52dedbd6 | 1,096 | py | Python | examples/vtrace/vtrace_main.py | cg31/cule | 6cd8e06059c3c3a193a4b2e0821dc1b9daeb726c | [
"BSD-3-Clause"
] | 208 | 2019-05-25T21:35:35.000Z | 2022-03-28T17:33:13.000Z | examples/vtrace/vtrace_main.py | cg31/cule | 6cd8e06059c3c3a193a4b2e0821dc1b9daeb726c | [
"BSD-3-Clause"
] | 30 | 2019-07-27T08:23:54.000Z | 2022-03-24T18:17:36.000Z | examples/vtrace/vtrace_main.py | cg31/cule | 6cd8e06059c3c3a193a4b2e0821dc1b9daeb726c | [
"BSD-3-Clause"
] | 27 | 2019-07-27T05:42:23.000Z | 2022-03-05T03:08:52.000Z | import os
import sys
_path = os.path.abspath(os.path.pardir)
if not _path in sys.path:
sys.path = [_path] + sys.path
from a2c.a2c_main import a2c_parser_options
from utils.launcher import main
def vtrace_parser_options(parser):
parser = a2c_parser_options(parser)
parser.add_argument('--c-hat', type=int, default=1.0, help='Trace cutting truncation level (default: 1.0)')
parser.add_argument('--rho-hat', type=int, default=1.0, help='Temporal difference truncation level (default: 1.0)')
parser.add_argument('--num-minibatches', type=int, default=16, help='number of mini-batches in the set of environments (default: 16)')
parser.add_argument('--num-steps-per-update', type=int, default=1, help='number of steps per update (default: 1)')
parser.add_argument('--benchmark', action='store_true', help='Special case: benchmark')
return parser
def vtrace_main():
if sys.version_info.major == 3:
from train import worker
else:
worker = None
sys.exit(main(vtrace_parser_options, worker))
if __name__ == '__main__':
vtrace_main()
| 33.212121 | 138 | 0.710766 | 162 | 1,096 | 4.62963 | 0.401235 | 0.064 | 0.113333 | 0.06 | 0.170667 | 0.170667 | 0.170667 | 0.109333 | 0 | 0 | 0 | 0.020563 | 0.156934 | 1,096 | 32 | 139 | 34.25 | 0.791126 | 0 | 0 | 0 | 0 | 0 | 0.278285 | 0.020073 | 0 | 0 | 0 | 0 | 0 | 1 | 0.086957 | false | 0 | 0.217391 | 0 | 0.347826 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
84a079370d636370f1b591d1c907976b21011209 | 740 | py | Python | test/Fitting/Helper/covariance.py | Marcel-Rodekamp/qcdanalysistools | 945c8201337ba0d52bc37267198d367bbe3e75e3 | [
"MIT"
] | null | null | null | test/Fitting/Helper/covariance.py | Marcel-Rodekamp/qcdanalysistools | 945c8201337ba0d52bc37267198d367bbe3e75e3 | [
"MIT"
] | null | null | null | test/Fitting/Helper/covariance.py | Marcel-Rodekamp/qcdanalysistools | 945c8201337ba0d52bc37267198d367bbe3e75e3 | [
"MIT"
] | null | null | null | import numpy as np
import qcdanalysistools as tools
Nt = 6
data = np.random.randn(1000,Nt)
# for different analysis style set Jackknife,Bootstrap or Blocking
params = tools.analysis.BootstrapParams(
t_data_size = data.shape[0],
t_num_subdatasets = 1000,
t_with_blocking = True,
t_num_blocks = 50)
#params = tools.analysis.JackknifeParams(
# t_data_size = data.shape[0],
# t_n = 1,
# t_random_leaveout = False,
# t_num_ran_indices = 106,
# t_with_blocking = True,
# t_num_blocks = 50)
#params = tools.analysis.BlockingParams(
# t_data_size = data.shape[0],
# t_num_blocks = 20)
#params = None
cov = tools.fitting.cov(data,params)
print("cov.shape =",cov.shape)
print("cov =",cov)
| 22.424242 | 66 | 0.685135 | 107 | 740 | 4.514019 | 0.439252 | 0.041408 | 0.118012 | 0.080745 | 0.335404 | 0.335404 | 0.335404 | 0.293996 | 0.198758 | 0.198758 | 0 | 0.037099 | 0.198649 | 740 | 32 | 67 | 23.125 | 0.777403 | 0.497297 | 0 | 0 | 0 | 0 | 0.061281 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.166667 | 0 | 0.166667 | 0.166667 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
84a2c67d501b3191d0fcf4e3e728aec5720bd5db | 351 | py | Python | reverse.py | abishak-cit/Python_Abi | 77008ac06d38a3838eb1b0e8408586584ad457a6 | [
"Apache-2.0"
] | null | null | null | reverse.py | abishak-cit/Python_Abi | 77008ac06d38a3838eb1b0e8408586584ad457a6 | [
"Apache-2.0"
] | null | null | null | reverse.py | abishak-cit/Python_Abi | 77008ac06d38a3838eb1b0e8408586584ad457a6 | [
"Apache-2.0"
] | null | null | null | #input of num
num = int(input())
#initial value of reverse
reverse_num = 0
#conditon for entry using while loop
while(num>0):
#remainder of the given no
remainder = num % 10
# formula of the reverse
reverse_num = (reverse_num * 10) + remainder
#floor division of the num
num = num//10
#display the reverse
print(reverse_num)
| 23.4 | 47 | 0.689459 | 54 | 351 | 4.407407 | 0.444444 | 0.168067 | 0.142857 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.02952 | 0.22792 | 351 | 14 | 48 | 25.071429 | 0.848708 | 0.464387 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.142857 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
84a5a559826a0b1cfc3399aa82d37b0bf832ff51 | 4,536 | py | Python | ppgan/datasets/photopen_dataset.py | pcwuyu/PaddleGAN | b4ff90f0c92c4d8dcaa8e25267151b82fc7aa268 | [
"Apache-2.0"
] | 3 | 2022-02-20T11:40:50.000Z | 2022-02-20T11:46:29.000Z | ppgan/datasets/photopen_dataset.py | pcwuyu/PaddleGAN | b4ff90f0c92c4d8dcaa8e25267151b82fc7aa268 | [
"Apache-2.0"
] | 38 | 2021-10-14T12:55:45.000Z | 2021-12-24T06:09:10.000Z | ppgan/datasets/photopen_dataset.py | pcwuyu/PaddleGAN | b4ff90f0c92c4d8dcaa8e25267151b82fc7aa268 | [
"Apache-2.0"
] | 1 | 2021-09-22T09:29:19.000Z | 2021-09-22T09:29:19.000Z | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import numpy as np
from PIL import Image
import paddle
import paddle.vision.transforms as T
from paddle.io import Dataset
import cv2
import random
from .builder import DATASETS
logger = logging.getLogger(__name__)
def data_transform(img, resize_w, resize_h, load_size=286, pos=[0, 0, 256, 256], flip=True, is_image=True):
if is_image:
resized = img.resize((resize_w, resize_h), Image.BICUBIC)
else:
resized = img.resize((resize_w, resize_h), Image.NEAREST)
croped = resized.crop((pos[0], pos[1], pos[2], pos[3]))
fliped = ImageOps.mirror(croped) if flip else croped
fliped = np.array(fliped) # transform to numpy array
expanded = np.expand_dims(fliped, 2) if len(fliped.shape) < 3 else fliped
transposed = np.transpose(expanded, (2, 0, 1)).astype('float32')
if is_image:
normalized = transposed / 255. * 2. - 1.
else:
normalized = transposed
return normalized
@DATASETS.register()
class PhotoPenDataset(Dataset):
def __init__(self, content_root, load_size, crop_size):
super(PhotoPenDataset, self).__init__()
inst_dir = os.path.join(content_root, 'train_inst')
_, _, inst_list = next(os.walk(inst_dir))
self.inst_list = np.sort(inst_list)
self.content_root = content_root
self.load_size = load_size
self.crop_size = crop_size
def __getitem__(self, idx):
ins = Image.open(os.path.join(self.content_root, 'train_inst', self.inst_list[idx]))
img = Image.open(os.path.join(self.content_root, 'train_img', self.inst_list[idx].replace(".png", ".jpg")))
img = img.convert('RGB')
w, h = img.size
resize_w, resize_h = 0, 0
if w < h:
resize_w, resize_h = self.load_size, int(h * self.load_size / w)
else:
resize_w, resize_h = int(w * self.load_size / h), self.load_size
left = random.randint(0, resize_w - self.crop_size)
top = random.randint(0, resize_h - self.crop_size)
flip = False
img = data_transform(img, resize_w, resize_h, load_size=self.load_size,
pos=[left, top, left + self.crop_size, top + self.crop_size], flip=flip, is_image=True)
ins = data_transform(ins, resize_w, resize_h, load_size=self.load_size,
pos=[left, top, left + self.crop_size, top + self.crop_size], flip=flip, is_image=False)
return {'img': img, 'ins': ins, 'img_path': self.inst_list[idx]}
def __len__(self):
return len(self.inst_list)
def name(self):
return 'PhotoPenDataset'
@DATASETS.register()
class PhotoPenDataset_test(Dataset):
def __init__(self, content_root, load_size, crop_size):
super(PhotoPenDataset_test, self).__init__()
inst_dir = os.path.join(content_root, 'test_inst')
_, _, inst_list = next(os.walk(inst_dir))
self.inst_list = np.sort(inst_list)
self.content_root = content_root
self.load_size = load_size
self.crop_size = crop_size
def __getitem__(self, idx):
ins = Image.open(os.path.join(self.content_root, 'test_inst', self.inst_list[idx]))
w, h = ins.size
resize_w, resize_h = 0, 0
if w < h:
resize_w, resize_h = self.load_size, int(h * self.load_size / w)
else:
resize_w, resize_h = int(w * self.load_size / h), self.load_size
left = random.randint(0, resize_w - self.crop_size)
top = random.randint(0, resize_h - self.crop_size)
flip = False
ins = data_transform(ins, resize_w, resize_h, load_size=self.load_size,
pos=[left, top, left + self.crop_size, top + self.crop_size], flip=flip, is_image=False)
return {'ins': ins, 'img_path': self.inst_list[idx]}
def __len__(self):
return len(self.inst_list)
def name(self):
return 'PhotoPenDataset'
| 38.440678 | 115 | 0.659392 | 663 | 4,536 | 4.280543 | 0.239819 | 0.059197 | 0.054968 | 0.059197 | 0.585624 | 0.572234 | 0.572234 | 0.572234 | 0.547569 | 0.487667 | 0 | 0.012557 | 0.227513 | 4,536 | 117 | 116 | 38.769231 | 0.797374 | 0.133818 | 0 | 0.55814 | 0 | 0 | 0.030659 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.104651 | false | 0 | 0.116279 | 0.046512 | 0.325581 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
84a8ab03d12084e27de0595a388320ee876c313d | 49,028 | py | Python | dnanexus/chip_workflow.py | strattan/test-merge2 | 2bc5a7c94fb06cff163ab3674dbb319e45976d17 | [
"MIT"
] | null | null | null | dnanexus/chip_workflow.py | strattan/test-merge2 | 2bc5a7c94fb06cff163ab3674dbb319e45976d17 | [
"MIT"
] | null | null | null | dnanexus/chip_workflow.py | strattan/test-merge2 | 2bc5a7c94fb06cff163ab3674dbb319e45976d17 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
'''Instantiate the ENCODE ChIP-seq workflow'''
import sys
import logging
import re
import dxpy
import time
import pprint
EPILOG = '''Notes:
Examples:
# Build blank TF workflow from fastq to peaks
%(prog)s --target tf --name "ENCODE TF ChIP-seq (no reference)" --outf "/ChIP-seq/"
# Build blank histone workflow from fastq to peaks
%(prog)s --target histone --name "ENCODE Histone ChIP-seq (no reference)" --outf "/ChIP-seq/"
# Build a pre-configured GRCh38 histone workflow, requiring only data to run
%(prog)s --target histone \\
--name "ENCODE Histone ChIP-seq (GRCh38)" \\
--chrom_sizes "ENCODE Reference Files:/GRCh38/GRCh38_EBV.chrom.sizes" \\
--genomesize hs \\
--reference "ENCODE Reference Files:/GRCh38/GRCh38_no_alt_analysis_set_GCA_000001405.15.fa.gz" \\
--outf "/ChIP-seq/"
# Build and run a complete hg19 TF workflow, specifying all inputs.
%(prog)s --target tf \\
--chrom_sizes "ENCODE Reference Files:/hg19/male.hg19.chrom.sizes" \\
--genomesize hs \\
--reference "ENCODE Reference Files:/hg19/male.hg19.tar.gz" \\
--blacklist "ENCODE Reference Files:/hg19/blacklists/wgEncodeDacMapabilityConsensusExcludable.bed.gz" \\
--outf "ENCSR464DKE-hCTCF-chr21" \\
--title "ENCSR464DKE-hCTCF-chr21" \\
--rep1 "/ChIP-seq/test_data/ENCSR464DKE-hCTCF/R1-ENCFF921SED.chr21.fq.gz" \\
--rep2 "/ChIP-seq/test_data/ENCSR464DKE-hCTCF/R2-ENCFF812KOM.chr21.fq.gz" \\
--ctl1 "/ChIP-seq/test_data/ENCSR464DKE-hCTCF/C1-ENCFF690VPV.chr21.fq.gz" \\
--ctl2 "/ChIP-seq/test_data/ENCSR464DKE-hCTCF/C2-ENCFF357TLV.chr21.fq.gz" \\
--yes
# Build and run a complete hg19 TF workflow, with a unary control.
%(prog)s --target tf \\
--chrom_sizes "ENCODE Reference Files:/hg19/male.hg19.chrom.sizes" \\
--genomesize hs \\
--reference "ENCODE Reference Files:/hg19/male.hg19.tar.gz" \\
--blacklist "ENCODE Reference Files:/hg19/blacklists/wgEncodeDacMapabilityConsensusExcludable.bed.gz" \\
--outf "ENCSR000EEB-hMAFK-chr21" \\
--title "ENCSR000EEB-hMAFK-chr21" \\
--rep1 "/ChIP-seq/test_data/ENCSR000EEB-hMAFK/R1-ENCFF000XTT.chr21.fq.gz" \\
--rep2 "/ChIP-seq/test_data/ENCSR000EEB-hMAFK/R2-ENCFF000XTU.chr21.fq.gz" \\
--ctl1 "/ChIP-seq/test_data/ENCSR000EEB-hMAFK/C1-ENCFF000XSJ.chr21.fq.gz" \\
--yes
# Build and run a complete mm10 histone workflow, specifying all inputs.
%(prog)s --target histone \\
--chrom_sizes "ENCODE Reference Files:/mm10/male.mm10.chrom.sizes" \\
--genomesize mm \\
--reference "ENCODE Reference Files:/mm10/male.mm10.tar.gz" \\
--outf "ENCSR087PLZ-mH3K9ac-chr19" \\
--title "ENCSR087PLZ-mH3K9ac-chr19" \\
--rep1 "/ChIP-seq/test_data/ENCSR087PLZ-mH3K9ac/R1-ENCFF560GLI.chr19.fq.gz" \\
--rep2 "/ChIP-seq/test_data/ENCSR087PLZ-mH3K9ac/R2-ENCFF891NNX.chr19.fq.gz" \\
--ctl1 "/ChIP-seq/test_data/ENCSR087PLZ-mH3K9ac/C1-ENCFF069WCH.chr19.fq.gz" \\
--ctl2 "/ChIP-seq/test_data/ENCSR087PLZ-mH3K9ac/C2-ENCFF101KOM.chr19.fq.gz" \\
--yes
'''
WF = {
'default': {
'wf_name': 'chip_seq',
'wf_title': 'ChIP-seq',
'wf_description': 'ENCODE ChIP-seq Analysis Pipeline',
'run_idr': True
},
'histone': {
'wf_name': 'histone_chip_seq',
'wf_title': 'Histone ChIP-seq',
'wf_description': 'ENCODE histone ChIP-seq Analysis Pipeline',
'run_idr': False
},
'tf': {
'wf_name': 'tf_chip_seq',
'wf_title': 'TF ChIP-seq',
'wf_description': 'ENCODE TF ChIP-seq Analysis Pipeline',
'run_idr': True
}
}
DEFAULT_APPLET_PROJECT = dxpy.WORKSPACE_ID
DEFAULT_OUTPUT_PROJECT = dxpy.WORKSPACE_ID
DEFAULT_OUTPUT_FOLDER = '/analysis_run'
MAPPING_APPLET_NAME = 'encode_map'
FILTER_QC_APPLET_NAME = 'filter_qc'
XCOR_APPLET_NAME = 'xcor'
XCOR_ONLY_APPLET_NAME = 'xcor_only'
SPP_APPLET_NAME = 'spp'
POOL_APPLET_NAME = 'pool'
PSEUDOREPLICATOR_APPLET_NAME = 'pseudoreplicator'
ENCODE_SPP_APPLET_NAME = 'encode_spp'
ENCODE_MACS2_APPLET_NAME = 'encode_macs2'
# IDR_APPLET_NAME='idr'
IDR2_APPLET_NAME = 'idr2'
ENCODE_IDR_APPLET_NAME = 'encode_idr'
OVERLAP_PEAKS_APPLET_NAME = 'overlap_peaks'
ACCESSION_ANALYSIS_APPLET_NAME = 'accession_analysis'
APPLETS = {}
def get_args():
import argparse
parser = argparse.ArgumentParser(
description=__doc__, epilog=EPILOG,
formatter_class=argparse.RawDescriptionHelpFormatter)
def t_or_f(arg):
ua = str(arg).upper()
if ua == 'TRUE'[:len(ua)]:
return True
elif ua == 'FALSE'[:len(ua)]:
return False
else:
assert not (True or False), "Cannot parse %s to boolean" % (arg)
parser.add_argument(
'--target',
help="ChIP target type (histone or tf)",
required=True)
parser.add_argument(
'--debug',
help="Print debug messages and hold jobs for ssh",
default=False, action='store_true')
parser.add_argument(
'--reference',
help="Reference tar to map to")
parser.add_argument(
'--chrom_sizes',
help="chrom.sizes file for bedToBigBed")
parser.add_argument(
'--genomesize',
help="Genome size string for MACS2, e.g. mm or hs")
parser.add_argument(
'--narrowpeak_as',
help=".as file for bed to bigbed",
default='ENCODE Reference Files:narrowPeak.as')
parser.add_argument(
'--gappedpeak_as',
help=".as file for bed to bigbed",
default='ENCODE Reference Files:gappedPeak.as')
parser.add_argument(
'--broadpeak_as',
help=".as file for bed to bigbed",
default='ENCODE Reference Files:broadPeak.as')
parser.add_argument(
'--rep1',
help="Replicate 1 fastq or tagAlign",
default=None, nargs='*')
parser.add_argument(
'--rep2',
help="Replicate 2 fastq or tagAlign",
default=None, nargs='*')
parser.add_argument(
'--ctl1',
help="Control for replicate 1 fastq or tagAlign",
default=None, nargs='*')
parser.add_argument(
'--ctl2',
help="Control for replicate 2 fastq or tagAlign",
default=None, nargs='*')
parser.add_argument(
'--unary_control',
help="Force one control for both reps",
default=False, action='store_true')
parser.add_argument(
'--simplicate_experiment',
help="Force single replicate (rep1)",
default=False, action='store_true')
parser.add_argument(
'--outp',
help="Output project name or ID",
default=DEFAULT_OUTPUT_PROJECT)
parser.add_argument(
'--outf',
help="Output folder name or ID",
default=DEFAULT_OUTPUT_FOLDER)
parser.add_argument(
'--use_existing_folders',
help="Reuse existing folders even if results have already been saved there",
default=False, action='store_true')
parser.add_argument(
'--name',
help="Name for new workflow")
parser.add_argument(
'--title',
help="Title for new workflow")
parser.add_argument(
'--description',
help="Description for new workflow")
parser.add_argument(
'--applets',
help="Name of project containing applets",
default=DEFAULT_APPLET_PROJECT)
parser.add_argument(
'--nomap',
help='Given tagAligns, skip to peak calling',
default=False, action='store_true')
parser.add_argument(
'--maponly',
help='Given fastqs, only map and calculate xcor but no peaks',
default=False, action='store_true')
parser.add_argument(
'--rep1pe',
help='Specify if rep1 is PE (required only if --nomap)',
type=t_or_f, default=None)
parser.add_argument(
'--rep2pe',
help='Specify if rep2 is PE (required only if --nomap)',
type=t_or_f, default=None)
parser.add_argument(
'--blacklist',
help="Blacklist to filter IDR peaks")
parser.add_argument(
'--yes',
help='Run the workflow',
default=False, action='store_true')
parser.add_argument(
'--spp_version',
help="Version string for spp",
default="1.14")
parser.add_argument(
'--pipeline_version',
help="Version string for ENCODE pipeline",
default="1.2")
parser.add_argument(
'--accession',
help='Automatically accession the results to the ENCODE Portal',
default=False, action='store_true')
parser.add_argument('--fqcheck', help="If --accession, check that analysis is based on latest fastqs on ENCODEd", type=t_or_f, default=None)
parser.add_argument('--skip_control', help="If --accession, accession no control files or metadata", type=t_or_f, default=None)
parser.add_argument('--force_patch', help="Force patching metadata for existing files", type=t_or_f, default=None)
parser.add_argument('--scrub', help="Also produce bams scrubbed of sequence information", type=t_or_f, default=None)
parser.add_argument('--fragment_length',
type=int,
help="Instead of calculating fragment length from xcor, use this fragment length",
default=None)
# parser.add_argument('--idr', help='Report peaks with and without IDR analysis', default=False, action='store_true')
# parser.add_argument('--idronly', help='Only report IDR peaks', default=None, action='store_true')
# parser.add_argument('--idrversion', help='Version of IDR to use (1 or 2)', default="2")
args = parser.parse_args()
global DEBUG
DEBUG = args.debug
if DEBUG:
logging.basicConfig(
format='%(levelname)s:%(message)s',
level=logging.DEBUG)
logging.debug("Debug logging ON")
else: # use the defaulf logging level
logging.basicConfig(
format='%(levelname)s:%(message)s')
logging.debug("rep1 is: %s" % (args.rep1))
return args
def blank_workflow(args):
return
def map_and_filter(infile, args):
if not infile:
return {None}
stages = {None}
return stages
def call_peaks(expvsctl, args):
if not expvsctl:
return {None}
stages = {None}
return stages
def resolve_project(identifier, privs='r'):
project = dxpy.find_one_project(
name=identifier,
level='VIEW',
name_mode='exact',
return_handler=True,
zero_ok=True)
if project is None:
try:
project = dxpy.get_handler(identifier)
except:
logging.error(
'Could not find a unique project with name or id %s'
% (identifier))
raise ValueError(identifier)
logging.debug(
'Project %s access level is %s'
% (project.name, project.describe()['level']))
if privs == 'w' and project.describe()['level'] == 'VIEW':
logging.error('Output project %s is read-only' % (identifier))
raise ValueError(identifier)
return project
def create_folder(project, folder_name):
if not folder_name.startswith('/'):
folder_name = '/' + folder_name
try:
project.new_folder(folder_name, parents=True)
except:
logging.error(
"Cannot create folder %s in project %s"
% (folder_name, project.name))
return None
else:
logging.info(
"New folder %s created in project %s"
% (folder_name, project.name))
return folder_name
def resolve_folder(project, identifier):
if not identifier.startswith('/'):
identifier = '/' + identifier
try:
project.list_folder(identifier)
except:
return None
else:
return identifier
def resolve_file(identifier):
logging.debug("resolve_file: %s" % (identifier))
if not identifier:
return None
m = re.match(r'''^([\w\-\ \.]+):([\w\-\ /\.]+)''', identifier)
if m:
project_identifier = m.group(1)
file_identifier = m.group(2)
else:
logging.debug("Defaulting to the current project")
project_identifier = dxpy.WORKSPACE_ID
file_identifier = identifier
project = resolve_project(project_identifier)
logging.debug("Got project %s" % (project.name))
logging.debug("Now looking for file %s" % (file_identifier))
m = re.match(r'''(^[\w\-\ /\.]+)/([\w\-\ \.]+)''', file_identifier)
if m:
folder_name = m.group(1)
if not folder_name.startswith('/'):
folder_name = '/' + folder_name
recurse = False
file_name = m.group(2)
else:
folder_name = '/'
recurse = True
file_name = file_identifier
logging.debug(
"Looking for file %s in folder %s" % (file_name, folder_name))
try:
file_handler = dxpy.find_one_data_object(
name=file_name,
folder=folder_name,
project=project.get_id(),
recurse=recurse,
more_ok=False,
zero_ok=False,
return_handler=True)
except dxpy.DXSearchError:
logging.debug(
'%s not found in project %s folder %s. Trying as file ID'
% (file_name, project.get_id(), folder_name))
file_handler = None
except:
raise
if not file_handler:
try:
file_handler = dxpy.DXFile(dxid=identifier, mode='r')
except dxpy.DXError:
logging.debug('%s not found as a dxid' % (identifier))
logging.warning('Could not find file %s.' % (identifier))
file_handler = None
except:
raise
if file_handler:
logging.info(
"Resolved file identifier %s to %s"
% (identifier, file_handler.get_id()))
return file_handler
else:
logging.warning("Failed to resolve file identifier %s" % (identifier))
return None
def find_applet_by_name(applet_name, applets_project_id):
'''Looks up an applet by name in the project that holds tools.
From Joe Dale's code.'''
cached = '*'
if (applet_name, applets_project_id) not in APPLETS:
found = dxpy.find_one_data_object(
classname="applet",
name=applet_name,
project=applets_project_id,
zero_ok=False,
more_ok=False,
return_handler=True)
APPLETS[(applet_name, applets_project_id)] = found
cached = ''
logging.info(
cached + "Resolved applet %s to %s"
% (applet_name, APPLETS[(applet_name, applets_project_id)].get_id()))
return APPLETS[(applet_name, applets_project_id)]
def main():
args = get_args()
blank_workflow = not (args.rep1 or args.rep2 or args.ctl1 or args.ctl2)
if not blank_workflow:
assert args.rep1, "Reads are required for rep1"
assert args.ctl1, "Reads are required for ctl1"
assert not args.nomap or args.rep1pe is not None, "With --nomap, endedness of rep1 must be specified witn --rep1pe"
assert not args.nomap or (not args.rep2 or args.rep2pe is not None), "With --nomap, endedness of rep2 must be specified with --rep2pe"
if not args.target:
target_type = 'default' # default
else:
target_type = args.target.lower()
if target_type not in WF.keys():
logging.error('Target type %s is not recognized')
sys.exit(2)
output_project = resolve_project(args.outp, 'w')
logging.debug('Found output project %s' % (output_project.name))
applet_project = resolve_project(args.applets, 'r')
logging.debug('Found applet project %s' % (applet_project.name))
existing_folder = resolve_folder(output_project, args.outf)
if not existing_folder:
output_folder = create_folder(output_project, args.outf)
elif args.use_existing_folders:
output_folder = existing_folder
else:
assert (existing_folder and args.use_existing_folders), 'Output folder %s exists but --use_existing_folders is %s' % (existing_folder, args.use_existing_folders)
logging.debug('Using output folder %s' % (output_folder))
workflow = dxpy.new_dxworkflow(
name=args.name or WF[target_type]['wf_name'],
title=args.title or WF[target_type]['wf_title'],
description=args.description or WF[target_type]['wf_description'],
project=output_project.get_id(),
folder=output_folder,
properties={'pipeline_version': str(args.pipeline_version)})
unary_control = args.unary_control or (not blank_workflow and args.ctl2 is None)
simplicate_experiment = args.simplicate_experiment or (args.rep1 and not args.rep2)
if not args.genomesize:
genomesize = None
else:
genomesize = args.genomesize
if not args.chrom_sizes:
chrom_sizes = None
else:
chrom_sizes = dxpy.dxlink(resolve_file(args.chrom_sizes))
if not args.blacklist:
blacklist = None
else:
blacklist = dxpy.dxlink(resolve_file(args.blacklist))
run_idr = WF[target_type]['run_idr']
if not args.nomap:
# a "superstage" is just a dict with a name, name(s) of input files,
# and then names and id's of stages that process that input
# each superstage here could be implemented as a stage in a more
# abstract workflow. That stage would then call the various applets
# that are separate
# stages here.
mapping_superstages = [ # the order of this list is important in that
{'name': 'Rep1', 'input_args': args.rep1}
]
if not simplicate_experiment:
mapping_superstages.append(
{'name': 'Rep2', 'input_args': args.rep2})
mapping_superstages.append(
{'name': 'Ctl1', 'input_args': args.ctl1})
if not unary_control and not simplicate_experiment:
mapping_superstages.append(
{'name': 'Ctl2', 'input_args': args.ctl2})
mapping_applet = find_applet_by_name(
MAPPING_APPLET_NAME, applet_project.get_id())
# mapping_output_folder = resolve_folder(
# output_project, output_folder + '/' + mapping_applet.name)
mapping_output_folder = mapping_applet.name
reference_tar = resolve_file(args.reference)
filter_qc_applet = find_applet_by_name(
FILTER_QC_APPLET_NAME, applet_project.get_id())
filter_qc_output_folder = mapping_output_folder
xcor_applet = find_applet_by_name(
XCOR_APPLET_NAME, applet_project.get_id())
xcor_output_folder = mapping_output_folder
# in the first pass create the mapping stage id's so we can use JBOR's
# to link inputs
for mapping_superstage in mapping_superstages:
superstage_name = mapping_superstage.get('name')
mapped_stage_id = workflow.add_stage(
mapping_applet,
name='Map %s' % (superstage_name),
folder=mapping_output_folder
)
mapping_superstage.update({'map_stage_id': mapped_stage_id})
# in the second pass populate the stage inputs and build other stages
rep1_stage_id = next(ss.get('map_stage_id') for ss in mapping_superstages if ss['name'] == 'Rep1')
for mapping_superstage in mapping_superstages:
superstage_name = mapping_superstage.get('name')
superstage_id = mapping_superstage.get('map_stage_id')
if mapping_superstage.get('input_args') or blank_workflow:
mapping_stage_input = {}
if superstage_name != "Rep1":
mapping_stage_input.update(
{'reference_tar': dxpy.dxlink(
{'stage': rep1_stage_id,
'inputField': 'reference_tar'})})
else:
if args.reference:
mapping_stage_input.update(
{'reference_tar': dxpy.dxlink(
reference_tar.get_id())})
if not blank_workflow:
for arg_index, input_arg in enumerate(mapping_superstage['input_args']): #read pairs assumed be in order read1,read2
reads = dxpy.dxlink(resolve_file(input_arg).get_id())
mapping_stage_input.update({'reads%d' %(arg_index+1): reads})
# this is now done in the first pass loop above
# mapped_stage_id = workflow.add_stage(
# mapping_applet,
# name='Map %s' %(superstage_name),
# folder=mapping_output_folder,
# stage_input=mapping_stage_input
# )
# mapping_superstage.update({'map_stage_id': mapped_stage_id})
workflow.update_stage(superstage_id, stage_input=mapping_stage_input)
filter_qc_stage_input = {
'input_bam': dxpy.dxlink({'stage': superstage_id, 'outputField': 'mapped_reads'}),
'paired_end': dxpy.dxlink({'stage': superstage_id, 'outputField': 'paired_end'})
}
if args.scrub is not None:
filter_qc_stage_input.update({'scrub': args.scrub})
filter_qc_stage_id = workflow.add_stage(
filter_qc_applet,
name='Filter_QC %s' %(superstage_name),
folder=filter_qc_output_folder,
stage_input=filter_qc_stage_input
)
mapping_superstage.update({'filter_qc_stage_id': filter_qc_stage_id})
xcor_stage_id = workflow.add_stage(
xcor_applet,
name='Xcor %s' %(superstage_name),
folder=xcor_output_folder,
stage_input={
'input_bam': dxpy.dxlink({'stage': filter_qc_stage_id, 'outputField': 'filtered_bam'}),
'paired_end': dxpy.dxlink({'stage': filter_qc_stage_id, 'outputField': 'paired_end'}),
'spp_version': args.spp_version
}
)
mapping_superstage.update({'xcor_stage_id': xcor_stage_id})
exp_rep1_ta = dxpy.dxlink(
{'stage': next(ss.get('xcor_stage_id') for ss in mapping_superstages if ss['name'] == 'Rep1'),
'outputField': 'tagAlign_file'})
exp_rep1_cc = dxpy.dxlink(
{'stage': next(ss.get('xcor_stage_id') for ss in mapping_superstages if ss['name'] == 'Rep1'),
'outputField': 'CC_scores_file'})
rep1_paired_end = dxpy.dxlink(
{'stage': next(ss.get('xcor_stage_id') for ss in mapping_superstages if ss['name'] == 'Rep1'),
'outputField': 'paired_end'})
if not simplicate_experiment:
exp_rep2_ta = dxpy.dxlink(
{'stage': next(ss.get('xcor_stage_id') for ss in mapping_superstages if ss['name'] == 'Rep2'),
'outputField': 'tagAlign_file'})
exp_rep2_cc = dxpy.dxlink(
{'stage': next(ss.get('xcor_stage_id') for ss in mapping_superstages if ss['name'] == 'Rep2'),
'outputField': 'CC_scores_file'})
rep2_paired_end = dxpy.dxlink(
{'stage': next(ss.get('xcor_stage_id') for ss in mapping_superstages if ss['name'] == 'Rep2'),
'outputField': 'paired_end'})
else:
exp_rep2_ta = None
exp_rep2_cc = None
rep2_paired_end = None
ctl_rep1_ta = dxpy.dxlink(
{'stage': next(ss.get('xcor_stage_id') for ss in mapping_superstages if ss['name'] == 'Ctl1'),
'outputField': 'tagAlign_file'})
if not unary_control and not simplicate_experiment:
ctl_rep2_ta = dxpy.dxlink(
{'stage': next(ss.get('xcor_stage_id') for ss in mapping_superstages if ss['name'] == 'Ctl2'),
'outputField': 'tagAlign_file'})
else:
ctl_rep2_ta = None
else: # skipped the mapping, so just bring in the inputs from arguments
if not blank_workflow:
exp_rep1_ta = dxpy.dxlink(resolve_file(args.rep1[0]).get_id())
exp_rep1_ta_desc = dxpy.describe(exp_rep1_ta)
exp_rep1_mapping_analysis_id = dxpy.describe(exp_rep1_ta_desc['createdBy']['job'])['analysis']
exp_rep1_mapping_analysis = dxpy.describe(exp_rep1_mapping_analysis_id)
rep1_xcor_stage_description = next(
stage
for stage in exp_rep1_mapping_analysis.get('stages')
if stage['execution']['executableName'] == 'xcor')
exp_rep1_cc = rep1_xcor_stage_description['execution']['output']['CC_scores_file']
if args.rep1pe is None:
print("Inferring rep1 PE-ness from analysis")
rep1_paired_end = rep1_xcor_stage_description['execution']['output']['paired_end']
else:
rep1_paired_end = args.rep1pe
if not simplicate_experiment:
exp_rep2_ta = dxpy.dxlink(resolve_file(args.rep2[0]).get_id())
exp_rep2_ta_desc = dxpy.describe(exp_rep2_ta)
exp_rep2_mapping_analysis_id = dxpy.describe(exp_rep2_ta_desc['createdBy']['job'])['analysis']
exp_rep2_mapping_analysis = dxpy.describe(exp_rep2_mapping_analysis_id)
rep2_xcor_stage_description = next(
stage
for stage in exp_rep2_mapping_analysis.get('stages')
if stage['execution']['executableName'] == 'xcor')
exp_rep2_cc = rep2_xcor_stage_description['execution']['output']['CC_scores_file']
if args.rep2pe is None:
print("Inferring rep2 PE-ness from analysis")
rep2_paired_end = rep1_xcor_stage_description['execution']['output']['paired_end']
else:
rep2_paired_end = args.rep1pe
else:
exp_rep2_ta = None
exp_rep2_cc = None
rep2_paired_end = None
ctl_rep1_ta = dxpy.dxlink(resolve_file(args.ctl1[0]).get_id())
if not unary_control and not simplicate_experiment:
ctl_rep2_ta = dxpy.dxlink(resolve_file(args.ctl2[0]).get_id())
else:
ctl_rep2_ta = None
else: # blank workflow
ctl_rep1_ta = None
ctl_rep2_ta = None
# here we need to calculate the cc scores files, because we're only
# being supplied tagAligns
# if we had mapped everything above we'd already have a handle to
# the cc file
xcor_only_applet = find_applet_by_name(XCOR_ONLY_APPLET_NAME, applet_project.get_id())
# xcor_output_folder = resolve_folder(output_project, output_folder + '/' + xcor_only_applet.name)
xcor_output_folder = xcor_only_applet.name
xcor_only_stages = []
rep1_xcor_input = {'spp_version': args.spp_version}
if args.rep1pe is not None:
rep1_xcor_input.update({'paired_end': args.rep1pe})
exp_rep1_cc_stage_id = workflow.add_stage(
xcor_only_applet,
name="Rep1 cross-correlation",
folder=xcor_output_folder,
stage_input=rep1_xcor_input
)
xcor_only_stages.append({'xcor_only_rep1_id': exp_rep1_cc_stage_id})
exp_rep1_cc = dxpy.dxlink(
{'stage': exp_rep1_cc_stage_id,
'outputField': 'CC_scores_file'})
rep1_paired_end = dxpy.dxlink(
{'stage': exp_rep1_cc_stage_id,
'outputField': 'paired_end'})
exp_rep1_ta = dxpy.dxlink(
{'stage': exp_rep1_cc_stage_id,
'inputField': 'input_tagAlign'})
if not simplicate_experiment:
rep2_xcor_input = {'spp_version': args.spp_version}
if args.rep2pe is not None:
rep2_xcor_input.update({'paired_end': args.rep2pe})
exp_rep2_cc_stage_id = workflow.add_stage(
xcor_only_applet,
name="Rep2 cross-correlation",
folder=xcor_output_folder,
stage_input=rep2_xcor_input
)
xcor_only_stages.append({'xcor_only_rep2_id': exp_rep2_cc_stage_id})
exp_rep2_cc = dxpy.dxlink(
{'stage': exp_rep2_cc_stage_id,
'outputField': 'CC_scores_file'})
rep2_paired_end = dxpy.dxlink(
{'stage': exp_rep2_cc_stage_id,
'outputField': 'paired_end'})
exp_rep2_ta = dxpy.dxlink(
{'stage': exp_rep2_cc_stage_id,
'inputField': 'input_tagAlign'})
else:
exp_rep2_cc = None
exp_rep2_ta = None
rep2_paired_end = None
if not args.maponly:
encode_macs2_applet = find_applet_by_name(ENCODE_MACS2_APPLET_NAME, applet_project.get_id())
encode_macs2_stages = []
# peaks_output_folder = resolve_folder(output_project, output_folder + '/' + encode_macs2_applet.name)
peaks_output_folder = encode_macs2_applet.name
# for simplicate experiments and/or unary controls, some of the ta inputs
# will have the value None
macs2_stage_input_mapping = {
'rep1_ta' : exp_rep1_ta,
'rep2_ta' : exp_rep2_ta,
'ctl1_ta': ctl_rep1_ta,
'ctl2_ta' : ctl_rep2_ta,
'rep1_xcor' : exp_rep1_cc,
'rep2_xcor' : exp_rep2_cc,
'rep1_paired_end': rep1_paired_end,
'rep2_paired_end': rep2_paired_end,
'narrowpeak_as': dxpy.dxlink(resolve_file(args.narrowpeak_as)),
'gappedpeak_as': dxpy.dxlink(resolve_file(args.gappedpeak_as)),
'broadpeak_as': dxpy.dxlink(resolve_file(args.broadpeak_as)),
'genomesize': genomesize,
'chrom_sizes': chrom_sizes
}
# have to prune out any arguments with value None because DX will error
# with arguments with null values
macs2_stage_input = dict([(k,v) for k,v in macs2_stage_input_mapping.iteritems() if v is not None])
encode_macs2_stage_id = workflow.add_stage(
encode_macs2_applet,
name='ENCODE Peaks',
folder=peaks_output_folder,
stage_input=macs2_stage_input
)
encode_macs2_stages.append({'name': 'ENCODE Peaks', 'stage_id': encode_macs2_stage_id})
if run_idr:
encode_spp_applet = find_applet_by_name(ENCODE_SPP_APPLET_NAME, applet_project.get_id())
encode_spp_stages = []
# idr_peaks_output_folder = resolve_folder(output_project, output_folder + '/' + encode_spp_applet.name)
idr_peaks_output_folder = encode_spp_applet.name
PEAKS_STAGE_NAME = 'SPP Peaks'
# for simplicate experiments and/or unary controls, some of the ta inputs
# will have the value None
peaks_stage_input_mapping = {
'rep1_ta' : exp_rep1_ta,
'rep2_ta' : exp_rep2_ta,
'ctl1_ta': ctl_rep1_ta,
'ctl2_ta' : ctl_rep2_ta,
'rep1_xcor' : exp_rep1_cc,
'rep2_xcor' : exp_rep2_cc,
'rep1_paired_end': rep1_paired_end,
'rep2_paired_end': rep2_paired_end,
'as_file': dxpy.dxlink(resolve_file(args.narrowpeak_as)),
'idr_peaks': True,
'spp_version': args.spp_version
}
if chrom_sizes:
peaks_stage_input_mapping.update({'chrom_sizes': chrom_sizes})
else:
peaks_stage_input_mapping.update({'chrom_sizes': dxpy.dxlink({'stage': encode_macs2_stage_id, 'inputField': 'chrom_sizes'})})
# have to prune out any arguments with value None because DX will error
# with arguments with null values
peaks_stage_input = dict([(k,v) for k,v in peaks_stage_input_mapping.iteritems() if v is not None])
encode_spp_stage_id = workflow.add_stage(
encode_spp_applet,
name=PEAKS_STAGE_NAME,
folder=idr_peaks_output_folder,
stage_input=peaks_stage_input
)
encode_spp_stages.append({'name': PEAKS_STAGE_NAME, 'stage_id': encode_spp_stage_id})
# TODO here I think we should abstract out all the IDR to one step like the two peak-calling steps
idr_applet = find_applet_by_name(IDR2_APPLET_NAME, applet_project.get_id())
encode_idr_applet = find_applet_by_name(ENCODE_IDR_APPLET_NAME, applet_project.get_id())
idr_stages = []
# idr_output_folder = resolve_folder(output_project, output_folder + '/' + idr_applet.name)
idr_output_folder = idr_applet.name
if (args.rep1 and args.ctl1 and args.rep2) or blank_workflow or simplicate_experiment:
idr_stage_id = workflow.add_stage(
idr_applet,
name='IDR Rep 1 Self-pseudoreplicates',
folder=idr_output_folder,
stage_input={
'rep1_peaks' : dxpy.dxlink(
{'stage': next(ss.get('stage_id') for ss in encode_spp_stages if ss['name'] == PEAKS_STAGE_NAME),
'outputField': 'rep1pr1_peaks'}),
'rep2_peaks' : dxpy.dxlink(
{'stage': next(ss.get('stage_id') for ss in encode_spp_stages if ss['name'] == PEAKS_STAGE_NAME),
'outputField': 'rep1pr2_peaks'}),
'pooled_peaks': dxpy.dxlink(
{'stage': next(ss.get('stage_id') for ss in encode_spp_stages if ss['name'] == PEAKS_STAGE_NAME),
'outputField': 'rep1_peaks'})
}
)
idr_stages.append({'name': 'IDR Rep 1 Self-pseudoreplicates', 'stage_id': idr_stage_id})
if not simplicate_experiment:
idr_stage_id = workflow.add_stage(
idr_applet,
name='IDR Rep 2 Self-pseudoreplicates',
folder=idr_output_folder,
stage_input={
'rep1_peaks' : dxpy.dxlink(
{'stage': next(ss.get('stage_id') for ss in encode_spp_stages if ss['name'] == PEAKS_STAGE_NAME),
'outputField': 'rep2pr1_peaks'}),
'rep2_peaks' : dxpy.dxlink(
{'stage': next(ss.get('stage_id') for ss in encode_spp_stages if ss['name'] == PEAKS_STAGE_NAME),
'outputField': 'rep2pr2_peaks'}),
'pooled_peaks': dxpy.dxlink(
{'stage': next(ss.get('stage_id') for ss in encode_spp_stages if ss['name'] == PEAKS_STAGE_NAME),
'outputField': 'rep2_peaks'})
}
)
idr_stages.append({'name': 'IDR Rep 2 Self-pseudoreplicates', 'stage_id': idr_stage_id})
idr_stage_id = workflow.add_stage(
idr_applet,
name='IDR True Replicates',
folder=idr_output_folder,
stage_input={
'rep1_peaks' : dxpy.dxlink(
{'stage': next(ss.get('stage_id') for ss in encode_spp_stages if ss['name'] == PEAKS_STAGE_NAME),
'outputField': 'rep1_peaks'}),
'rep2_peaks' : dxpy.dxlink(
{'stage': next(ss.get('stage_id') for ss in encode_spp_stages if ss['name'] == PEAKS_STAGE_NAME),
'outputField': 'rep2_peaks'}),
'pooled_peaks': dxpy.dxlink(
{'stage': next(ss.get('stage_id') for ss in encode_spp_stages if ss['name'] == PEAKS_STAGE_NAME),
'outputField': 'pooled_peaks'})
}
)
idr_stages.append({'name': 'IDR True Replicates', 'stage_id': idr_stage_id})
idr_stage_id = workflow.add_stage(
idr_applet,
name='IDR Pooled Pseudoreplicates',
folder=idr_output_folder,
stage_input={
'rep1_peaks' : dxpy.dxlink(
{'stage': next(ss.get('stage_id') for ss in encode_spp_stages if ss['name'] == PEAKS_STAGE_NAME),
'outputField': 'pooledpr1_peaks'}),
'rep2_peaks' : dxpy.dxlink(
{'stage': next(ss.get('stage_id') for ss in encode_spp_stages if ss['name'] == PEAKS_STAGE_NAME),
'outputField': 'pooledpr2_peaks'}),
'pooled_peaks': dxpy.dxlink(
{'stage': next(ss.get('stage_id') for ss in encode_spp_stages if ss['name'] == PEAKS_STAGE_NAME),
'outputField': 'pooled_peaks'})
}
)
idr_stages.append({'name': 'IDR Pooled Pseudoreplicates', 'stage_id': idr_stage_id})
final_idr_stage_input = {
'r1pr_peaks': dxpy.dxlink(
{'stage': next(ss.get('stage_id') for ss in idr_stages if ss['name'] == 'IDR Rep 1 Self-pseudoreplicates'),
'outputField': 'IDR_peaks'}),
'rep1_ta': exp_rep1_ta,
'rep1_xcor': exp_rep1_cc,
'paired_end': rep1_paired_end, # applies to replicated experiments, too
'as_file': dxpy.dxlink(resolve_file(args.narrowpeak_as)),
'rep1_signal': dxpy.dxlink(
{'stage': next(ss.get('stage_id') for ss in encode_macs2_stages if ss['name'] == 'ENCODE Peaks'),
'outputField': 'rep1_fc_signal'})
}
if not simplicate_experiment:
final_idr_stage_input.update({
'reps_peaks' : dxpy.dxlink(
{'stage': next(ss.get('stage_id') for ss in idr_stages if ss['name'] == 'IDR True Replicates'),
'outputField': 'IDR_peaks'}),
'r2pr_peaks' : dxpy.dxlink(
{'stage': next(ss.get('stage_id') for ss in idr_stages if ss['name'] == 'IDR Rep 2 Self-pseudoreplicates'),
'outputField': 'IDR_peaks'}),
'pooledpr_peaks': dxpy.dxlink(
{'stage': next(ss.get('stage_id') for ss in idr_stages if ss['name'] == 'IDR Pooled Pseudoreplicates'),
'outputField': 'IDR_peaks'}),
'rep2_ta': exp_rep2_ta,
'rep2_xcor': exp_rep2_cc,
'rep2_signal': dxpy.dxlink(
{'stage': next(ss.get('stage_id') for ss in encode_macs2_stages if ss['name'] == 'ENCODE Peaks'),
'outputField': 'rep2_fc_signal'}),
'pooled_signal': dxpy.dxlink(
{'stage': next(ss.get('stage_id') for ss in encode_macs2_stages if ss['name'] == 'ENCODE Peaks'),
'outputField': 'pooled_fc_signal'})
})
if blacklist:
final_idr_stage_input.update({'blacklist': blacklist})
if chrom_sizes:
final_idr_stage_input.update({'chrom_sizes': chrom_sizes})
else:
final_idr_stage_input.update({'chrom_sizes': dxpy.dxlink({'stage': encode_spp_stage_id, 'inputField': 'chrom_sizes'})})
final_idr_stage_id = workflow.add_stage(
encode_idr_applet,
name='Final IDR peak calls',
folder=idr_output_folder,
stage_input=final_idr_stage_input,
)
idr_stages.append({'name': 'Final IDR peak calls', 'stage_id': final_idr_stage_id})
if target_type == 'histone':
PEAKS_STAGE_NAME = "ENCODE Peaks"
overlap_peaks_applet = find_applet_by_name(OVERLAP_PEAKS_APPLET_NAME, applet_project.get_id())
overlap_peaks_stages = []
for peaktype in ['narrowpeaks', 'gappedpeaks', 'broadpeaks']:
if peaktype == 'narrowpeaks':
as_file = dxpy.dxlink(resolve_file(args.narrowpeak_as))
peak_type_extension = 'narrowPeak'
elif peaktype == 'gappedpeaks':
as_file = dxpy.dxlink(resolve_file(args.gappedpeak_as))
peak_type_extension = 'gappedPeak'
elif peaktype == 'broadpeaks':
as_file = dxpy.dxlink(resolve_file(args.broadpeak_as))
peak_type_extension = 'broadPeak'
overlap_peaks_stage_input = {
'rep1_peaks': dxpy.dxlink(
{'stage': next(ss.get('stage_id') for ss in encode_macs2_stages if ss['name'] == PEAKS_STAGE_NAME),
'outputField': 'rep1_%s' % (peaktype)}),
'rep2_peaks': dxpy.dxlink(
{'stage': next(ss.get('stage_id') for ss in encode_macs2_stages if ss['name'] == PEAKS_STAGE_NAME),
'outputField': 'rep2_%s' % (peaktype)}),
'pooled_peaks': dxpy.dxlink(
{'stage': next(ss.get('stage_id') for ss in encode_macs2_stages if ss['name'] == PEAKS_STAGE_NAME),
'outputField': 'pooled_%s' % (peaktype)}),
'pooledpr1_peaks': dxpy.dxlink(
{'stage': next(ss.get('stage_id') for ss in encode_macs2_stages if ss['name'] == PEAKS_STAGE_NAME),
'outputField': 'pooledpr1_%s' % (peaktype)}),
'pooledpr2_peaks': dxpy.dxlink(
{'stage': next(ss.get('stage_id') for ss in encode_macs2_stages if ss['name'] == PEAKS_STAGE_NAME),
'outputField': 'pooledpr2_%s' % (peaktype)}),
'rep1_ta': exp_rep1_ta,
'rep1_xcor': exp_rep1_cc,
'rep2_ta': exp_rep2_ta,
'rep2_xcor': exp_rep2_cc,
'paired_end': rep1_paired_end, # applies to replicated experiments, too
'as_file': as_file,
'peak_type': peak_type_extension,
'prefix': 'final',
'rep1_signal': dxpy.dxlink(
{'stage': next(ss.get('stage_id') for ss in encode_macs2_stages if ss['name'] == PEAKS_STAGE_NAME),
'outputField': 'rep1_fc_signal'}),
'rep2_signal': dxpy.dxlink(
{'stage': next(ss.get('stage_id') for ss in encode_macs2_stages if ss['name'] == PEAKS_STAGE_NAME),
'outputField': 'rep2_fc_signal'}),
'pooled_signal': dxpy.dxlink(
{'stage': next(ss.get('stage_id') for ss in encode_macs2_stages if ss['name'] == PEAKS_STAGE_NAME),
'outputField': 'pooled_fc_signal'})
} if not simplicate_experiment else {
'rep1_peaks': dxpy.dxlink(
{'stage': next(ss.get('stage_id') for ss in encode_macs2_stages if ss['name'] == PEAKS_STAGE_NAME),
'outputField': 'rep1pr1_%s' % (peaktype)}),
'rep2_peaks': dxpy.dxlink(
{'stage': next(ss.get('stage_id') for ss in encode_macs2_stages if ss['name'] == PEAKS_STAGE_NAME),
'outputField': 'rep1pr2_%s' % (peaktype)}),
'pooled_peaks': dxpy.dxlink(
{'stage': next(ss.get('stage_id') for ss in encode_macs2_stages if ss['name'] == PEAKS_STAGE_NAME),
'outputField': 'rep1_%s' % (peaktype)}),
'rep1_ta': exp_rep1_ta,
'rep1_xcor': exp_rep1_cc,
'paired_end': rep1_paired_end, # applies to replicated experiments, too
'as_file': as_file,
'peak_type': peak_type_extension,
'prefix': 'final',
'rep1_signal': dxpy.dxlink(
{'stage': next(ss.get('stage_id') for ss in encode_macs2_stages if ss['name'] == PEAKS_STAGE_NAME),
'outputField': 'rep1_fc_signal'})
}
if chrom_sizes:
overlap_peaks_stage_input.update({'chrom_sizes': chrom_sizes})
else:
overlap_peaks_stage_input.update({'chrom_sizes': dxpy.dxlink({'stage': encode_macs2_stage_id, 'inputField': 'chrom_sizes'})})
overlap_peaks_stage_id = workflow.add_stage(
overlap_peaks_applet,
name='Final %s' % (peaktype),
folder=peaks_output_folder,
stage_input=overlap_peaks_stage_input
)
overlap_peaks_stages.append({'name': 'Final %s' %(peaktype), 'stage_id': overlap_peaks_stage_id})
if args.yes:
if args.debug:
analysis = workflow.run({}, folder=output_folder, priority='high', debug={'debugOn': ['AppInternalError', 'AppError']}, delay_workspace_destruction=True, allow_ssh=['*'])
else:
analysis = workflow.run({}, folder=output_folder, priority='normal')
analysis.set_properties({
"target_type": target_type,
"unreplicated_experiment": str(simplicate_experiment),
"unary_control": str(unary_control)
})
print("Running %s as %s" % (analysis.name, analysis.get_id()))
if args.accession:
accession_analysis_applet = find_applet_by_name(ACCESSION_ANALYSIS_APPLET_NAME, applet_project.get_id())
accession_output_folder = '/' + accession_analysis_applet.name
accession_job_input = {
'analysis_ids': [analysis.get_id()],
'wait_on_files': []
}
if args.fqcheck is not None:
accession_job_input.update({'fqcheck' : args.fqcheck})
if args.skip_control is not None:
accession_job_input.update({'skip_control' : args.skip_control})
if args.force_patch is not None:
accession_job_input.update({'force_patch': args.force_patch})
# assert accession_stage_input['wait_on_files'], "ERROR: workflow has no wait_on_files defined, so --accession is not supported."
time.sleep(5)
max_retries = 10
retries = max_retries
while retries:
try:
accession_job = accession_analysis_applet.run(
accession_job_input,
name='Accession %s' % (analysis.name),
folder=accession_output_folder,
depends_on=analysis.describe()['dependsOn']
)
except Exception as e:
logging.error("%s launching auto-accession ... %d retries left" % (e, retries))
time.sleep(5)
retries -= 1
continue
else:
logging.info("Auto-accession will run as %s %s" % (accession_job.name, accession_job.get_id()))
break
else:
logging.error("Auto-accession failed with %s" % ())
if __name__ == '__main__':
main()
| 46.122295 | 182 | 0.571755 | 5,559 | 49,028 | 4.767764 | 0.089764 | 0.025091 | 0.029995 | 0.01811 | 0.596061 | 0.530486 | 0.449743 | 0.394318 | 0.339005 | 0.293654 | 0 | 0.015842 | 0.32149 | 49,028 | 1,062 | 183 | 46.165725 | 0.780887 | 0.057212 | 0 | 0.388286 | 0 | 0.0141 | 0.234162 | 0.034362 | 0 | 0 | 0 | 0.000942 | 0.006508 | 1 | 0.011931 | false | 0 | 0.007592 | 0.001085 | 0.037961 | 0.004338 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
84aa7a485b23bade222cd7a7bb91c2a1c86b90b1 | 5,893 | py | Python | research/cv/centernet_det/infer/sdk/main.py | leelige/mindspore | 5199e05ba3888963473f2b07da3f7bca5b9ef6dc | [
"Apache-2.0"
] | 77 | 2021-10-15T08:32:37.000Z | 2022-03-30T13:09:11.000Z | research/cv/centernet_det/infer/sdk/main.py | leelige/mindspore | 5199e05ba3888963473f2b07da3f7bca5b9ef6dc | [
"Apache-2.0"
] | 3 | 2021-10-30T14:44:57.000Z | 2022-02-14T06:57:57.000Z | research/cv/centernet_det/infer/sdk/main.py | leelige/mindspore | 5199e05ba3888963473f2b07da3f7bca5b9ef6dc | [
"Apache-2.0"
] | 24 | 2021-10-15T08:32:45.000Z | 2022-03-24T18:45:20.000Z | # !/usr/bin/env python
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the BSD 3-Clause License (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://opensource.org/licenses/BSD-3-Clause
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License
"""
Sdk internece
"""
import argparse
import json
import os
import time
import copy
import cv2
import numpy as np
from api.infer import SdkApi
from api.visual import visual_image
from api.postprocess import data_process
from api.image import get_affine_transform
import MxpiDataType_pb2 as MxpiDataType
from StreamManagerApi import StringVector
from config import config as cfg
from eval.eval_by_sdk import cal_acc
def parser_args():
"""
configuration parameter, input from outside
"""
parser = argparse.ArgumentParser(description="centernet inference")
parser.add_argument("--img_path",
type=str,
required=True,
help="image file path.")
parser.add_argument(
"--pipeline_path",
type=str,
required=False,
default="config/centernet.pipeline",
help="pipeline file path. The default is 'config/centernet.pipeline'. ")
parser.add_argument(
"--infer_mode",
type=str,
required=False,
default="infer",
help=
"infer:only infer, eval: accuracy evaluation. The default is 'infer'.")
parser.add_argument(
"--infer_result_dir",
type=str,
required=False,
default="../data/infer_result",
help=
"cache dir of inference result. The default is '../data/infer_result'."
)
parser.add_argument("--ann_file",
type=str,
required=False,
help="eval ann_file.")
arg = parser.parse_args()
return arg
def process_img(img_file):
"""
Preprocessing the images
"""
mean = np.array([0.40789654, 0.44719302, 0.47026115], dtype=np.float32)
std = np.array([0.28863828, 0.27408164, 0.27809835], dtype=np.float32)
input_size = [512, 512]
img = cv2.imread(img_file)
size = img.shape
inp_width = size[1]
inp_height = size[0]
down_ratio = 4
c = np.array([inp_width / 2., inp_height / 2.], dtype=np.float32)
s = max(inp_height, inp_width) * 1.0
img_metas = {'c': c, 's': s,
'out_height': input_size[0] // down_ratio,
'out_width': input_size[1] // down_ratio}
trans_input = get_affine_transform(c, s, 0, [input_size[0], input_size[1]])
inp_img = cv2.warpAffine(img, trans_input, (cfg.MODEL_WIDTH, cfg.MODEL_HEIGHT), flags=cv2.INTER_LINEAR)
inp_img = (inp_img.astype(np.float32) / 255. - mean) / std
eval_image = inp_img.reshape((1,) + inp_img.shape)
model_img = eval_image.transpose(0, 3, 1, 2)
return model_img, img_metas
def image_inference(pipeline_path, stream_name, img_dir, result_dir):
"""
image inference: get inference for images
"""
sdk_api = SdkApi(pipeline_path)
if not sdk_api.init():
exit(-1)
if not os.path.exists(result_dir):
os.makedirs(result_dir)
img_data_plugin_id = 0
print(f"\nBegin to inference for {img_dir}.\n")
file_list = os.listdir(img_dir)
total_len = len(file_list)
for img_id, file_name in enumerate(file_list):
if not file_name.lower().endswith((".jpg", "jpeg")):
continue
image_name, _ = os.path.splitext(file_name)
file_path = os.path.join(img_dir, file_name)
img_np, meta = process_img(file_path)
sdk_api.send_tensor_input(stream_name,
img_data_plugin_id, "appsrc0",
img_np.tobytes(), img_np.shape, cfg.TENSOR_DTYPE_FLOAT32)
keys = [b"mxpi_tensorinfer0"]
keyVec = StringVector()
for key in keys:
keyVec.push_back(key)
start_time = time.time()
infer_result = sdk_api. get_protobuf(stream_name, 0, keyVec)
end_time = time.time() - start_time
result = MxpiDataType.MxpiTensorPackageList()
result.ParseFromString(infer_result[0].messageBuf)
result = np.frombuffer(result.tensorPackageVec[0].tensorVec[0].dataStr,
dtype='float32').reshape((1, 100, 6))
img_id += 1
output = data_process(result, meta, image_name, cfg.NUM_CLASSES)
print(
f"End-2end inference, file_name: {file_path}, {img_id}/{total_len}, elapsed_time: {end_time}.\n"
)
save_pred_image_path = os.path.join(result_dir, "pred_image")
if not os.path.exists(save_pred_image_path):
os.makedirs(save_pred_image_path)
gt_image = cv2.imread(file_path)
anno = copy.deepcopy(output["annotations"])
visual_image(gt_image, anno, save_pred_image_path, score_threshold=cfg.SCORE_THRESH)
pred_res_file = os.path.join(result_dir, 'infer_{}_result.json').format(image_name)
with open(pred_res_file, 'w+') as f:
json.dump(output["annotations"], f, indent=1)
if __name__ == "__main__":
args = parser_args()
stream_name0 = cfg.STREAM_NAME.encode("utf-8")
print("stream_name0:")
print(stream_name0)
image_inference(args.pipeline_path, stream_name0, args.img_path,
args.infer_result_dir)
if args.infer_mode == "eval":
print("Infer end.")
print("Begin to eval...")
cal_acc(args.ann_file, args.infer_result_dir)
| 35.077381 | 108 | 0.642797 | 787 | 5,893 | 4.590851 | 0.316391 | 0.024356 | 0.023526 | 0.022142 | 0.052311 | 0 | 0 | 0 | 0 | 0 | 0 | 0.02816 | 0.246733 | 5,893 | 167 | 109 | 35.287425 | 0.785763 | 0.119294 | 0 | 0.113821 | 0 | 0.00813 | 0.129781 | 0.018931 | 0 | 0 | 0 | 0 | 0 | 1 | 0.02439 | false | 0 | 0.121951 | 0 | 0.162602 | 0.04878 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
84ac7100c4712d1ce87ee51e11280bbc7f7e34ca | 12,760 | py | Python | oneclick-env.py | hmsjy2017/rpi.sh | 93fce371afbda13981b46dfdda8fb3658dc69add | [
"MIT"
] | null | null | null | oneclick-env.py | hmsjy2017/rpi.sh | 93fce371afbda13981b46dfdda8fb3658dc69add | [
"MIT"
] | null | null | null | oneclick-env.py | hmsjy2017/rpi.sh | 93fce371afbda13981b46dfdda8fb3658dc69add | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import os
import base64
import argparse
import platform
bit = platform.architecture()[0]
def logo():
print("""
___ ____ _ _ _ _____
/ _ \ _ __ ___ / ___| (_) ___| | __ | ____|_ ____ __
| | | | '_ \ / _ \ | | | |/ __| |/ /____| _| | '_ \ \ / /
| |_| | | | | __/ |___| | | (__| <_____| |___| | | \ V /
\___/|_| |_|\___|\____|_|_|\___|_|\_\ |_____|_| |_|\_/ """)
copyright_title = 'ICAgICDkuIDplK7phY3nva7njq/looM='
copyright_url = 'aHR0cHM6Ly9naXRodWIuY29tL2htc2p5MjAxNy9vbmVjbGljay1lbnY='
print('')
print(base64.b64decode(copyright_title).decode('utf-8'))
print(base64.b64decode(copyright_url).decode('utf-8'))
print('')
print(' 1. 更换 APT 源 2. 添加常用软件源')
print(' 3. 安装 NodeJS 4. 卸载 NodeJS')
print(' 5. 安装 Golang 6. 卸载 Golang')
print(' 7. 安装 JDK 8. 卸载 JDK')
print(' 9. 安装 Rust 10. 卸载 Rust')
print('11. 安装 Ruby 12. 卸载 Ruby')
print('13. 安装 Docker 14. 卸载 Docker')
print('15. 安装 OpenCV 16. 退出脚本')
print('')
return copyright_title, copyright_url
# 1.更换 APT 源为国内源
def change_mirrors():
print('\n正在备份原有 APT 源')
os.system('sudo cp /etc/apt/sources.list /etc/apt/sources.list.bak')
os.system('sudo cp /etc/apt/sources.list.d/raspi.list /etc/apt/sources.list.d/raspi.list.bak')
print('\n正在更换 APT 源为 SJTU 源')
os.system('sudo sed -i "s|http://raspbian.raspberrypi.org/raspbian/|https://mirrors.sjtug.sjtu.edu.cn/raspbian/raspbian/|g" /etc/apt/sources.list')
os.system('sudo sed -i "s|http://deb.debian.org/debian|https://mirrors.sjtug.sjtu.edu.cn/debian|g" /etc/apt/sources.list')
os.system('sudo sed -i "s|http://archive.raspberrypi.org/debian/|https://mirrors.sjtug.sjtu.edu.cn/raspberrypi/debian/|g" /etc/apt/sources.list.d/raspi.list')
os.system('sudo apt update')
print('\n完成!')
# 2.添加常用软件源
def add_common_apt_repositories():
print('正在添加 Raspbian Addons 源')
os.system('sudo apt update && sudo apt install -y gnupg')
os.system('wget -qO- https://mirror.sjtu.edu.cn/raspbian-addons/KEY.gpg | sudo apt-key add -')
os.system('echo "deb https://mirror.sjtu.edu.cn/raspbian-addons/debian/ buster main" | sudo tee /etc/apt/sources.list.d/raspbian-addons.list')
os.system('sudo apt update')
print('正在添加 Deb Multimedia 源')
os.system('sudo echo "deb http://mirrors.ustc.edu.cn/deb-multimedia/ buster main non-free" >> /etc/apt/sources.list')
os.system('sudo echo "# deb-src http://mirrors.ustc.edu.cn/deb-multimedia/ buster main non-free" >> /etc/apt/sources.list')
os.system('sudo echo "deb http://mirrors.ustc.edu.cn/deb-multimedia/ buster-backports main" >> /etc/apt/sources.list')
os.system('sudo echo "# deb-src http://mirrors.ustc.edu.cn/deb-multimedia/ buster-backports main" >> /etc/apt/sources.list')
os.system('wget https://mirrors.ustc.edu.cn/deb-multimedia/pool/main/d/deb-multimedia-keyring/deb-multimedia-keyring_2016.8.1_all.deb')
os.system('sudo dpkg -i deb-multimedia-keyring_2016.8.1_all.deb*')
os.system('sudo apt update')
os.system('rm deb-multimedia-keyring_2016.8.1_all.deb*')
print('\n添加完成!')
# 3.安装 NodeJS
def install_nodejs():
print('\n正在下载 n')
os.system('sudo curl -L https://cdn.jsdelivr.net/gh/hmsjy2017/n@master/bin/n -o /usr/bin/n')
print('\n下载成功')
print('\n正在安装 NodeJS 14,请耐心等待')
os.system('sudo bash n stable')
print('\n安装成功。正在更换 npm 源为淘宝源')
os.system('npm config set registry https://registry.npm.taobao.org')
print('\n NodeJS 安装成功')
print('\n node 版本:')
os.system('node -v')
print('\n npm 版本:')
os.system('npm -v')
print('\n npm 源:')
os.system('npm config get registry')
# 4.卸载 NodeJS
def uninstall_nodejs():
print('\n正在卸载 NodeJS,请耐心等待')
os.system('sudo npm uninstall npm -g')
os.system('sudo rm -rf /usr/local/lib/node /usr/local/lib/node_modules /var/db/receipts/org.nodejs.*')
os.system('sudo rm -rf /usr/local/include/node /Users/$USER/.npm')
os.system('sudo rm /usr/local/bin/node')
os.system('sudo rm /usr/local/share/man/man1/node.1')
os.system('sudo rm /usr/local/lib/dtrace/node.d')
os.system('sudo rm /usr/bin/n')
print('\n卸载完成!')
# 5.安装 Golang
def install_golang():
print("\n正在下载 Golang,请耐心等待")
if bit == '64bit':
os.system('wget https://dl.google.com/go/go1.17.2.linux-arm64.tar.gz')
print('\n下载完成')
print('\n正在解压,请耐心等待')
os.system('sudo rm -rf /usr/local/go && sudo tar -C /usr/local -xzf go1.17.2.linux-arm64.tar.gz*')
os.system('export "PATH=$PATH:/usr/local/go/bin"')
print('\n解压完成。正在删除已下载的压缩包')
os.system('rm go1.17.2.linux-arm64.tar.gz')
elif bit == '32bit':
os.system('wget https://dl.google.com/go/go1.17.2.linux-armv6l.tar.gz')
print('\n下载完成')
print('\n正在解压,请耐心等待')
os.system('sudo rm -rf /usr/local/go && sudo tar -C /usr/local -xzf go1.17.2.linux-armv6l.tar.gz*')
#os.system('export "PATH=$PATH:/usr/local/go/bin"')
print('\n解压完成。正在删除已下载的压缩包')
os.system('rm go1.17.2.linux-armv6l.tar.gz')
print('\n Golang 安装成功!')
print('\n 请手动执行 export "PATH=$PATH:/usr/local/go/bin" ')
# 6.卸载 Golang
def uninstall_golang():
print('\n正在卸载 Golang,请耐心等待')
os.system('sudo rm -rf /usr/local/go')
print('\n卸载完成!')
# 7.安装 JDK
def install_jdk():
print('正在添加 AdoptOpenJDK 源')
os.system('wget -qO - https://adoptopenjdk.jfrog.io/adoptopenjdk/api/gpg/key/public | sudo apt-key add -')
os.system('echo "deb http://mirrors.tuna.tsinghua.edu.cn/AdoptOpenJDK/deb buster main" | sudo tee /etc/apt/sources.list.d/AdoptOpenJDK.list')
os.system('sudo apt-get update')
print('\n正在安装 AdoptOpenJDK 16,请耐心等待')
os.system('sudo apt install -y adoptopenjdk-16-hotspot')
print('\n JDK 安装成功!')
print('\n JDK 版本')
os.system('java --version')
# 8.卸载 JDK
def uninstall_jdk():
print('\n正在卸载 AdoptOpenJDK 16,请耐心等待')
os.system('sudo apt purge -y adoptopenjdk-16-hotspot')
print('\n卸载完成!')
# 9.安装 Rust
def install_rust():
print('\n正在安装 Rust,请耐心等待')
os.system('wget https://raw.fastgit.org/hmsjy2017/oneclick-env/main/rust.sh')
os.system('chmod +x rust.sh')
os.system('bash rust.sh')
#os.system('export "RUSTUP_DIST_SERVER=https://mirrors.ustc.edu.cn/rust-static"')
#os.system('export "RUSTUP_UPDATE_ROOT=https://mirrors.ustc.edu.cn/rust-static/rustup"')
os.system('wget https://cdn.jsdelivr.net/gh/rust-lang-nursery/rustup.rs/rustup-init.sh')
os.system('chmod +x rustup-init.sh')
os.system('./rustup-init.sh -y')
os.system('source $HOME/.cargo/env')
print('\n正在安装 Cargo 镜像')
os.system('curl -sSf https://raw.fastgit.org/hmsjy2017/scripts/main/cargo.sh | sh')
print('\n正在安装 Rustup 镜像')
os.system('echo "RUSTUP_DIST_SERVER=https://mirrors.ustc.edu.cn/rust-static" >> ~/.cargo/env')
os.system('rm rustup-init.sh* rust.sh*')
print('\n Rust 安装成功')
print('\n Cargo 版本:')
os.system('cargo --version')
print('\n rustup 版本:')
os.system('rustup --version')
print('\n rustc 版本:')
os.system('rustc --version')
# 10.卸载 Rust
def uninstall_rust():
print('\n正在卸载 Rust,请耐心等待')
os.system('rustup self uninstall -y')
print('\n卸载完成!')
# 11.安装 Ruby
def install_ruby():
print('\n正在安装 RVM,请耐心等待')
os.system('sudo apt update && sudo apt install -y gnupg')
os.system('gpg --recv-keys 409B6B1796C275462A1703113804BB82D39DC0E3 7D2BAF1CF37B13E2069D6956105BD0E739499BDB')
os.system('\curl -sSL https://cdn.jsdelivr.net/gh/rvm/rvm@master/binscripts/rvm-installer | bash -s stable')
os.system('sudo usermod -aG rvm root')
os.system('sudo source /etc/profile.d/rvm.sh')
os.system('source ~/.bashrc')
os.system('source ~/.bash_profile')
print('\n正在安装 Ruby,请耐心等待')
os.system('echo "ruby_url=https://cache.ruby-china.com/pub/ruby" > ~/.rvm/user/db')
os.system('rvm install 2.7.0 --disable-binary')
#可选 os.system('rvm docs generate-ri')
print('\n正在安装 gems 镜像')
os.system('gem sources --add https://mirrors.tuna.tsinghua.edu.cn/rubygems/ --remove https://rubygems.org/')
os.system('gem sources -l')
print('\n Ruby 安装成功!')
print('\n Ruby 版本:')
os.system('ruby -v')
print('\n RVM 版本:')
os.system('rvm -v')
# 12.卸载 Ruby
def uninstall_ruby():
print('\n正在卸载 Ruby,请耐心等待')
os.system('rvm remove 3.0')
print('\n正在卸载 RVM,请耐心等待')
os.system('rvm implode')
print('\n卸载完成!')
# 13.安装 Docker
def install_docker():
print('\n正在安装 Docker,请耐心等待')
os.system('sudo apt-get install -y apt-transport-https ca-certificates curl gnupg2 software-properties-common')
os.system('curl -fsSL https://download.docker.com/linux/debian/gpg | sudo apt-key add -')
if bit == '64bit':
os.system('echo "deb [arch=arm64] https://mirrors.tuna.tsinghua.edu.cn/docker-ce/linux/debian $(lsb_release -cs) stable" | sudo tee /etc/apt/sources.list.d/docker.list')
elif bit == '32bit':
os.system('echo "deb [arch=armhf] https://mirrors.tuna.tsinghua.edu.cn/docker-ce/linux/debian $(lsb_release -cs) stable" | sudo tee /etc/apt/sources.list.d/docker.list')
os.system('sudo apt-get update')
os.system('sudo apt-get install -y docker-ce')
print('\n正在配置 Docker 镜像站(DaoCloud)')
os.system('curl -sSL https://get.daocloud.io/daotools/set_mirror.sh | sh -s http://f1361db2.m.daocloud.io')
os.system('sudo service docker restart')
print('\n Docker 安装成功!')
print('\n Docker 版本:')
os.system('docker -v')
# 14.卸载 Docker
def uninstall_docker():
print('\n正在卸载 Docker,请耐心等待')
os.system('sudo apt-get purge -y docker-ce')
# 15.安装 OpenCV
def install_opencv():
print('\n正在安装 OpenCV,请耐心等待')
os.system('sudo apt-get update')
if bit == '64bit':
os.system('wget https://hub.fastgit.org/hmsjy2017/debian-pi-aarch64-apps/releases/download/v4.5.1/opencv-4.5.1-org.pifan_20210116-1_arm64.deb')
if bit == '32bit':
os.system('wget https://raw.fastgit.org/cyysky/OpenCV-Raspberry-Pi-4-Package-for-Python/master/opencv_4.5.0-1_armhf.deb')
os.system('sudo apt-get install -y ./opencv*.deb*')
os.system('pkg-config --modversion opencv4')
print('\n OpenCV 安装成功,如要卸载请使用 sudo apt purge opencv -y')
# 15.安装 LNMP 环境
#def install_lnmp():
# mysql_password = input('\n请设置 MySQL 密码: ')
# print('\n正在安装 LNMP 环境,请耐心等待')
# os.system('sudo apt-get update')
# os.system('sudo apt-get install nginx php7.3-fpm php7.3-cli php7.3-curl php7.3-gd php7.3-cgi')
# os.system('sudo service nginx start')
# os.system('sudo service php7.3-fpm restart')
# os.system('sudo rm /etc/nginx/sites-available/default')
# os.system('sudo wget https://raw.fastgit.org/hmsjy2017/oneclick-env/main/default -o /etc/nginx/sites-available/default')
# os.system('sudo apt-get install mariadb-server-10.3')
# os.system(f'sudo mysql -uroot -hlocalhost -e "create user root@'127.0.0.1' identified by \{mysql_password}\;"')
# os.system('sudo mysql -uroot -hlocalhost -e "grant all privileges on *.* to root@'localhost' with grant option;"')
# os.system('sudo mysql -uroot -hlocalhost -e "grant all privileges on *.* to root@'localhost' with grant option;"')
# os.system(f'sudo mysql -uroot -hlocalhost -e "alter user root@'127.0.0.1' identified by \{mysql_password}\;"')
# os.system('sudo mysql -uroot -p${mysql_password} -e "reset master;"')
# print('\n LNMP 环境 安装成功')
if __name__ == "__main__":
copyright = logo()
if copyright[0][10:13] != 'Dpl' or copyright[1][10:13] != '9na':
print('校验失败 退出脚本')
os._exit(0)
try:
option = input('\n请选择要执行的操作: ')
except EOFError:
pass
if int(option) == 1:
change_mirrors()
elif int(option) == 2:
add_common_apt_repositories()
elif int(option) == 3:
install_nodejs()
elif int(option) == 4:
uninstall_nodejs()
elif int(option) == 5:
install_golang()
elif int(option) == 6:
uninstall_golang()
elif int(option) == 7:
install_jdk()
elif int(option) == 8:
uninstall_jdk()
elif int(option) == 9:
install_rust()
elif int(option) == 10:
uninstall_rust()
elif int(option) == 11:
install_ruby()
elif int(option) == 12:
uninstall_ruby()
elif int(option) == 13:
install_docker()
elif int(option) == 14:
uninstall_docker()
# elif int(option) == 15:
# install_lnmp()
elif int(option) == 15:
install_opencv()
elif int(option) == 16:
os._exit(0)
else:
print('不合法的输入选项 请重新输入')
| 40.125786 | 177 | 0.636755 | 1,864 | 12,760 | 4.266094 | 0.18133 | 0.105634 | 0.073944 | 0.032067 | 0.451836 | 0.392354 | 0.368209 | 0.287852 | 0.239562 | 0.215292 | 0 | 0.031334 | 0.18464 | 12,760 | 317 | 178 | 40.252366 | 0.732987 | 0.128527 | 0 | 0.117155 | 0 | 0.150628 | 0.584695 | 0.100442 | 0 | 0 | 0 | 0 | 0 | 1 | 0.066946 | false | 0.004184 | 0.016736 | 0 | 0.087866 | 0.309623 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
84b0bec30f453aa165d3b13115fa78f5cf8b59c7 | 2,842 | py | Python | models/ssl_online.py | SaeidAbdolian/seasonal-contrast | 5395c027922569f5c5b1785ad1ccddd839749c36 | [
"Apache-2.0"
] | 74 | 2021-04-25T13:56:50.000Z | 2022-03-25T08:01:14.000Z | models/ssl_online.py | SaeidAbdolian/seasonal-contrast | 5395c027922569f5c5b1785ad1ccddd839749c36 | [
"Apache-2.0"
] | 11 | 2021-05-28T00:24:51.000Z | 2022-03-26T17:41:47.000Z | models/ssl_online.py | SaeidAbdolian/seasonal-contrast | 5395c027922569f5c5b1785ad1ccddd839749c36 | [
"Apache-2.0"
] | 8 | 2021-07-04T18:04:53.000Z | 2022-02-05T16:30:16.000Z | import torch
from torch import nn
from pytorch_lightning import Callback
from pl_bolts.models.self_supervised.evaluator import SSLEvaluator
from sklearn.metrics import average_precision_score
from datasets.bigearthnet_datamodule import BigearthnetDataModule
class SSLOnlineEvaluator(Callback):
def __init__(self, data_dir, z_dim, max_epochs=10, check_val_every_n_epoch=1, batch_size=1024, num_workers=32):
self.z_dim = z_dim
self.max_epochs = max_epochs
self.check_val_every_n_epoch = check_val_every_n_epoch
self.datamodule = BigearthnetDataModule(
data_dir=data_dir,
train_frac=0.01,
val_frac=0.01,
lmdb=True,
batch_size=batch_size,
num_workers=num_workers
)
self.datamodule.setup()
self.criterion = nn.MultiLabelSoftMarginLoss()
self.metric = lambda output, target: average_precision_score(target, output, average='micro') * 100.0
def on_pretrain_routine_start(self, trainer, pl_module):
self.classifier = SSLEvaluator(
n_input=self.z_dim,
n_classes=self.datamodule.num_classes,
n_hidden=None
).to(pl_module.device)
self.optimizer = torch.optim.Adam(self.classifier.parameters(), lr=1e-3)
def on_epoch_end(self, trainer, pl_module):
if (trainer.current_epoch + 1) % self.check_val_every_n_epoch != 0:
return
encoder = pl_module.encoder_q
self.classifier.train()
for _ in range(self.max_epochs):
for inputs, targets in self.datamodule.train_dataloader():
inputs = inputs.to(pl_module.device)
targets = targets.to(pl_module.device)
with torch.no_grad():
representations = encoder(inputs)
representations = representations.detach()
logits = self.classifier(representations)
loss = self.criterion(logits, targets)
loss.backward()
self.optimizer.step()
self.optimizer.zero_grad()
self.classifier.eval()
accuracies = []
for inputs, targets in self.datamodule.val_dataloader():
inputs = inputs.to(pl_module.device)
with torch.no_grad():
representations = encoder(inputs)
representations = representations.detach()
logits = self.classifier(representations)
preds = torch.sigmoid(logits).detach().cpu()
acc = self.metric(preds, targets)
accuracies.append(acc)
acc = torch.mean(torch.tensor(accuracies))
metrics = {'online_val_acc': acc}
trainer.logger_connector.log_metrics(metrics, {})
trainer.logger_connector.add_progress_bar_metrics(metrics)
| 35.525 | 115 | 0.64145 | 320 | 2,842 | 5.45 | 0.371875 | 0.03211 | 0.029817 | 0.03211 | 0.268349 | 0.24656 | 0.183486 | 0.149083 | 0.149083 | 0.149083 | 0 | 0.011116 | 0.271992 | 2,842 | 79 | 116 | 35.974684 | 0.831803 | 0 | 0 | 0.163934 | 0 | 0 | 0.006685 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.04918 | false | 0 | 0.098361 | 0 | 0.180328 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
84b481634f7f37995b27de2c74c109a567cd3989 | 10,111 | py | Python | deprecated/word_index_transform/word_index_transform.py | thhapke/di_textanalysis | 1a2866e9d19df3f49931dc774d0223e5600895f4 | [
"MIT"
] | 2 | 2020-04-09T06:41:02.000Z | 2020-05-07T10:56:03.000Z | deprecated/word_index_transform/word_index_transform.py | thhapke/di_textanalysis | 1a2866e9d19df3f49931dc774d0223e5600895f4 | [
"MIT"
] | null | null | null | deprecated/word_index_transform/word_index_transform.py | thhapke/di_textanalysis | 1a2866e9d19df3f49931dc774d0223e5600895f4 | [
"MIT"
] | 2 | 2020-05-19T18:30:56.000Z | 2021-11-18T09:03:50.000Z | import json
import os
import csv
import re
import pickle
import collections
import subprocess
import spacy
import sdi_utils.gensolution as gs
import sdi_utils.set_logging as slog
import sdi_utils.textfield_parser as tfp
import sdi_utils.tprogress as tp
supported_languages = ['DE', 'EN', 'ES', 'FR']
lexicon_languages = {lang: False for lang in supported_languages}
try:
api
except NameError:
class api:
queue = list()
class Message:
def __init__(self, body=None, attributes=""):
self.body = body
self.attributes = attributes
def send(port, msg):
if port == outports[1]['name']:
# wi_fn = os.path.join('/Users/Shared/data/onlinemedia/repository',msg.attributes['storage.filename']+'-words.csv')
# with open(wi_fn, 'w') as f:
# writer = csv.writer(f)
# cols = [c['name'] for c in msg.attributes['table']['columns']]
# writer.writerow(cols)
# writer.writerows(msg.body)
api.queue.append(msg)
else:
# print('{}: {}'.format(port, msg))
pass
def set_config(config):
api.config = config
class config:
## Meta data
config_params = dict()
tags = {'sdi_utils': '', 'spacy': ''}
version = "0.0.18"
operator_name = "word_index_transform"
operator_description = "word index transformation"
operator_description_long = "Transforms the index either in place or to new index."
add_readme = dict()
debug_mode = True
config_params['debug_mode'] = {'title': 'Debug mode',
'description': 'Sending debug level information to log port',
'type': 'boolean'}
language = 'None'
config_params['language'] = {'title': 'Language', 'description': 'Filter for language of media.',
'type': 'string'}
type = 'P'
config_params['type'] = {'title': 'Type',
'description': 'Define the kind of data extraction. P=Proper Nouns, '
'N=Nouns, X: Removing only stopwords.', 'type': 'string'}
# global articles
blacklist = list()
keywords = list()
lexicon = None
lexicon_stem = None
last_msg = None
id_set = list()
operator_name = 'word_extraction'
def setup_blacklist(msg):
logger, log_stream = slog.set_logging(operator_name, api.config.debug_mode)
global blacklist
logger.info('Set blacklist')
logger.debug('Attributes: {}'.format(msg.attributes))
logger.debug('Data: {}'.format(msg.body))
blacklist = msg.body
api.send(outports[0]['name'], log_stream.getvalue())
process(None)
def setup_lexicon(msg):
logger, log_stream = slog.set_logging(operator_name, api.config.debug_mode)
global lexicon, lexicon_languages, lexicon_stem
logger.info('Set lexicon')
logger.debug('Attributes: {}'.format(msg.attributes))
logger.debug('Data: {}'.format(msg.body))
try:
header = [c["name"] for c in msg.attributes['table']['columns']]
except Exception as e:
logger.error(e)
api.send(outports[0]['name'], log_stream.getvalue())
return None
lexicon = {c: dict() for c in header[1:]}
lexicon_stem = {c: dict() for c in header[1:]}
for r in msg.body:
for i, lang in enumerate(header[1:]):
lang_words = r[i + 1].split()
lw = list()
lws = list()
for w in lang_words :
if w[-1] == '*' :
lws.append(w[:-1])
else :
lw.append(w)
if lw :
lw_dict = dict.fromkeys(lw, r[0])
lexicon[lang].update(lw_dict)
if lws :
lws_dict = dict.fromkeys(lws, r[0])
lexicon_stem[lang].update(lws_dict)
for lang in header[1:]:
lexicon_languages[lang] = True
api.send(outports[0]['name'], log_stream.getvalue())
process(None)
# Checks for setup
def check_for_setup(logger, msg) :
global blacklist, lexicon, last_msg
use_blacklist = True
use_lexicon = True
logger.info("Check setup")
# Case: setupdate, check if all has been set
if msg == None:
logger.debug('Setup data received!')
if last_msg == None:
logger.info('Prerequisite message has been set, but waiting for data')
return None
else:
if len(blacklist) == 0 or lexicon == None :
logger.info("Setup not complete - blacklist: {} lexicon: {}".\
format(len(blacklist, len(lexicon))))
return None
else:
logger.info("Last msg list has been retrieved")
return last_msg
else:
logger.debug('Processing data received!')
# saving of data msg
if last_msg == None:
last_msg = msg
else:
last_msg.attributes = msg.attributes
last_msg.body.extend(msg.body)
# check if data msg should be returned or none if setup is not been done
if (len(blacklist) == 0 and use_blacklist == True) or \
(lexicon == None and use_lexicon == True):
len_lex = 0 if lexicon == None else len(lexicon)
logger.info("Setup not complete - blacklist: {} lexicon: {}".\
format(len(blacklist), len_lex))
return None
else:
logger.info('Setup is been set. Saved msg retrieved.')
msg = last_msg
last_msg = None
return msg
def process(msg):
global blacklist
global last_msg
global id_set
logger, log_stream = slog.set_logging(operator_name, api.config.debug_mode)
# Check if setup complete
msg = check_for_setup(logger, msg)
if not msg:
api.send(outports[0]['name'], log_stream.flush())
return 0
logger.info("Main Process started. Logging level: {}".format(logger.level))
time_monitor = tp.progress()
att_dict = msg.attributes
language = tfp.read_value(api.config.language)
type = tfp.read_value(api.config.type)
if len(type) > 1 :
logger.warning('Only one type can be transformed. Take only first one: {}'.format(type[0]))
type = type[0]
# DELETE all new type rows
sql = 'DELETE FROM "WORD_INDEX" WHERE "TYPE" = \'Q\' '
# COPY 'TYPE' to 'NEW TYPE'
sql = 'SELECT * FROM "WORD_INDEX" WHERE "TYPE" = \'' + type + '\' '
if language :
sql += '"LANGUAGE" = \'' + language + '\' '
# REMOVE ALL BLACKLIST
# REPLACE LEXICON
api.send(outports[0]['name'], log_stream.getvalue())
inports = [{'name': 'blacklist', 'type': 'message.list', "description": "Message with body as dictionary."},
{'name': 'lexicon', 'type': 'message.table', "description": "Message with body as lexicon."}, \
{'name': 'table', 'type': 'message.table', "description": "Message with body as table."}]
outports = [{'name': 'log', 'type': 'string', "description": "Logging data"}, \
{'name': 'data', 'type': 'message', "description": "msg with sql-statement"}]
#api.set_port_callback(inports[0]['name'], setup_blacklist)
#api.set_port_callback(inports[1]['name'], setup_keywords)
#api.set_port_callback(inports[2]['name'], setup_lexicon)
#api.set_port_callback(inports[3]['name'], process)
def test_operator():
config = api.config
config.debug_mode = True
config.type = 'P'
config.new_type = 'P'
config.language = 'None'
api.set_config(config)
# BLACKLIST
bl_filename = '/Users/Shared/data/onlinemedia/repository/blacklist_word_frequency.txt'
blacklist = list()
with open(bl_filename, mode='r') as csv_file:
rows = csv.reader(csv_file, delimiter=',')
for r in rows:
blacklist.append(r[0])
# print(csv_file.read())
bl_msg = api.Message(attributes={'filename': bl_filename}, body=blacklist)
setup_blacklist(bl_msg)
# WORD_INDEX
wi_filename = '/Users/Shared/data/onlinemedia/data/word_frequency.csv'
wi_table = list()
with open(wi_filename, 'r') as csv_file:
rows = csv.reader(csv_file,delimiter=',')
for i,r in enumerate(rows):
if i == 10000:
break
wi_table.append(r)
attributes = {'table':{'columns':['HASH_TEXT','LANGUAGE','TYPE','WORD','COUNT']}}
process(api.Message(attributes=attributes,body = wi_table))
# LEXICON
lex_filename = '/Users/Shared/data/onlinemedia/repository/lexicon_word_frequency.csv'
lexicon_list = list()
with open(lex_filename, mode='r') as csv_file:
rows = csv.reader(csv_file, delimiter=',')
headers = next(rows, None)
for r in rows:
#r[3] = r[3].replace('*', '') # only needed when lexicon in construction
lexicon_list.append(r)
attributes = {"table": {"name": lex_filename, "version": 1, "columns": list()}}
for h in headers:
attributes["table"]["columns"].append({"name": h})
lex_msg = api.Message(attributes=attributes, body=lexicon_list)
setup_lexicon(lex_msg)
if __name__ == '__main__':
test_operator()
if False :
subprocess.run(["rm",'-r','/Users/d051079/OneDrive - SAP SE/GitHub/di_textanalysis/solution/operators/textanalysis_' + api.config.version])
gs.gensolution(os.path.realpath(__file__), api.config, inports, outports)
solution_name = api.config.operator_name+'_'+api.config.version
subprocess.run(["vctl", "solution", "bundle", '/Users/d051079/OneDrive - SAP SE/GitHub/di_textanalysis/solution/operators/textanalysis_0.0.18',\
"-t", solution_name])
subprocess.run(["mv", solution_name+'.zip', '../../../solution/operators'])
| 34.986159 | 152 | 0.584908 | 1,206 | 10,111 | 4.768657 | 0.208126 | 0.014606 | 0.011302 | 0.013911 | 0.291949 | 0.214745 | 0.199444 | 0.188141 | 0.159972 | 0.147453 | 0 | 0.007583 | 0.282662 | 10,111 | 288 | 153 | 35.107639 | 0.78533 | 0.094353 | 0 | 0.197115 | 0 | 0.004808 | 0.205589 | 0.042411 | 0 | 0 | 0 | 0 | 0 | 1 | 0.038462 | false | 0.004808 | 0.057692 | 0 | 0.149038 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
84b7c410edf7c271a3e1fd7e4b7628ff057b3c2c | 21,371 | py | Python | overrule/BCS/overlap_boolean_rule.py | Viktour19/overlap-code | f5c6e63146a00f65710c38b9181bb9d12de6454f | [
"MIT"
] | 2 | 2020-07-09T03:15:58.000Z | 2022-03-09T11:57:17.000Z | overrule/BCS/overlap_boolean_rule.py | Viktour19/overlap-code | f5c6e63146a00f65710c38b9181bb9d12de6454f | [
"MIT"
] | null | null | null | overrule/BCS/overlap_boolean_rule.py | Viktour19/overlap-code | f5c6e63146a00f65710c38b9181bb9d12de6454f | [
"MIT"
] | 1 | 2021-05-18T11:55:04.000Z | 2021-05-18T11:55:04.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# ----------------------------------------------#
# OverRule: Overlap Estimation using Rule Sets #
# @Authors: Dennis Wei, Michael Oberst, #
# Fredrik D. Johansson #
# ----------------------------------------------#
import logging
import os
import numpy as np
import pandas as pd
from .load_process_data_BCS import extract_target, binarize_features
import cvxpy as cvx
from .beam_search import beam_search, beam_search_no_dup
from sklearn.metrics import roc_auc_score
class OverlapBooleanRule(object):
"""Overlap Boolean Rule class in the style of scikit-learn"""
def __init__(self, alpha=0.9, hamming=True, gamma=1, lambda0=1, lambda1=1, K=10, \
iterMax=100, eps=1e-6, silent=False, CNF=False, verbose=False, solver='ECOS', D=10,
logger=None, B=5,
rounding='coverage'):
# Fraction of overlap set to cover
self.alpha = alpha
# Use Hamming loss instead of 0-1 loss
self.hamming = hamming
# Relative weight on uniform background samples
self.gamma = gamma
# Regularization parameters
self.lambda0 = lambda0 # clause fixed cost
self.lambda1 = lambda1 # cost per literal
# Column generation parameters
self.K = K # maximum number of columns generated per iteration
self.iterMax = iterMax # maximum number of iterations
# Numerical tolerance on comparisons
self.eps = eps
# Silence output
self.silent = silent
if logger is None:
logger = logging.getLogger('OverlapBooleanRule')
self.logger = logger
# CNF instead of DNF (NOTE: CNF=True and hamming=False not supported)
self.CNF = CNF
# Verbose optimizer
self.verbose = verbose
# Solver
self.solver = solver
# LP
self.lp_obj_value = None
# Maximum Rules considered at each expansion
self.D = D
# Rounding
self.rounding = rounding
# Beam search width
self.B = B
# For get_params / set_params
# @TODO: Maybe make this class variable?
self.valid_params = ['alpha', 'hamming', 'gamma', 'lambda0',
'lambda1', 'K', 'iterMax', 'eps',
'silent', 'CNF', 'D', 'B']
def __getstate__(self):
state = self.__dict__.copy()
del state['logger']
return state
def __setstate__(self, state):
self.__dict__.update(state)
self.logger = None
def fit(self, X, y):
"""Fit model to training data"""
if not self.silent:
self.logger.info('Learning Boolean rule set on %s form %s hamming loss' % ('CNF' if self.CNF else 'DNF', 'with' if self.hamming else 'without'))
# Overlap (y = +1), non-overlap (y = 0), and uniform background (y = -1) samples
O = np.where(y > 0)[0]
N = np.where(y == 0)[0]
U = np.where(y < 0)[0]
nO = len(O)
nN = len(N)
nU = len(U)
# MKO: We should always have overlap samples, and either background or
# non-overlap samples
assert nO > 0 and (nU > 0 or nN > 0)
# Initialize with empty and singleton conjunctions, i.e. X plus all-ones feature
# Feature indicator and conjunction matrices
z = pd.DataFrame(np.eye(X.shape[1], X.shape[1]+1, 1, dtype=int), index=X.columns)
A = np.hstack((np.ones((X.shape[0],1), dtype=int), X))
# Iteration counter
self.it = 0
# Formulate master LP
# Variables
w = cvx.Variable(A.shape[1], nonneg=True)
if self.CNF:
if nN:
xiN = cvx.Variable(nN, nonneg=True)
if nU:
xiU = cvx.Variable(nU, nonneg=True)
else:
xiO = cvx.Variable(nO, nonneg=True)
if not self.hamming:
xiN = cvx.Variable(nN)
if nU:
xiU = cvx.Variable(nU)
if not nU:
self.gamma = 0
# Objective function (no penalty on empty conjunction)
lambdas = self.lambda0 + self.lambda1 * z.sum().values
lambdas[0] = 0
if not self.CNF and self.hamming:
if nU:
if nN:
obj = cvx.Minimize(cvx.sum(A[N,:] * w)/(nN*(1+self.gamma)) +\
self.gamma * cvx.sum(A[U,:] * w)/(nU*(1+self.gamma)) +\
lambdas * w)
else:
obj = cvx.Minimize(cvx.sum(A[U,:] * w)/nU + lambdas * w)
else:
obj = cvx.Minimize(cvx.sum(A[N,:] * w)/ nN + lambdas * w)
else:
if nU:
if nN:
obj = cvx.Minimize(cvx.sum(xiN)/(nN*(1+self.gamma)) +\
self.gamma * cvx.sum(xiU)/(nU*(1+self.gamma)) +\
lambdas * w)
else:
obj = cvx.Minimize(cvx.sum(xiU) / nU + lambdas * w)
else:
obj = cvx.Minimize(cvx.sum(xiN) / nN + lambdas * w)
# Constraints
if self.CNF:
if nN:
constraints = [cvx.sum(A[O,:] * w) <= (1 - self.alpha) * nO,
xiN + A[N,:] * w >= 1]
else:
constraints = [cvx.sum(A[O,:] * w) <= (1 - self.alpha) * nO]
if nU:
constraints.append(xiU + A[U,:] * w >= 1)
else:
constraints = [cvx.sum(xiO) <= (1 - self.alpha) * nO,
xiO + A[O,:] * w >= 1]
if not self.hamming:
for (ii, i) in enumerate(N):
constraints.append(xiN[ii] >= cvx.max(w[A[i,:] > 0]))
for (ii, i) in enumerate(U):
constraints.append(xiU[ii] >= cvx.max(w[A[i,:] > 0]))
# Solve problem
prob = cvx.Problem(obj, constraints)
prob.solve(verbose=self.verbose, solver=self.solver)
# Extract dual variables
r = np.zeros_like(y, dtype=float)
if self.CNF:
r[O] = constraints[0].dual_value
if nN and nU:
r[N] = -constraints[1].dual_value
r[U] = -constraints[2].dual_value
elif nU:
r[U] = -constraints[1].dual_value
else:
r[N] = -constraints[1].dual_value
else:
r[O] = -constraints[1].dual_value
if self.hamming:
if nN and nU:
r[N] = 1. / (nN * (1+self.gamma))
r[U] = self.gamma / (nU * (1+self.gamma))
elif nU:
r[U] = 1. / nU
elif nN:
r[N] = 1. / nN
else:
r[N[xiN.value < self.eps]] = 1 / (nN * (1+self.gamma))
if nU:
r[U[xiU.value < self.eps]] = self.gamma / (nU * (1+self.gamma))
if not self.silent:
self.logger.info('Initial solve completed')
# Beam search for conjunctions with negative reduced cost
if self.hamming:
# Most negative reduced cost among current variables
UB = np.dot(r, A) + lambdas
#print('UB.min():', UB.min())
UB = min(UB.min(), 0)
v, zNew, Anew = beam_search(r, X, self.lambda0, self.lambda1,
K=self.K, UB=UB, eps=self.eps, B=self.B, D=self.D)
else:
v, zNew, Anew = beam_search_no_dup(r, X, self.lambda0, self.lambda1,
z, K=self.K, eps=self.eps, B=self.B, D=self.D)
while (v < -self.eps).any() and (self.it < self.iterMax):
# Negative reduced costs found
self.it += 1
if not self.silent:
self.logger.info('Iteration: %d, Objective: %.4f' % (self.it, prob.value))
# Add to existing conjunctions
z = pd.concat([z, zNew], axis=1, ignore_index=True)
A = np.concatenate((A, Anew), axis=1)
# Reformulate master LP
# Variables
w = cvx.Variable(A.shape[1], nonneg=True)
# Objective function
lambdas = np.concatenate((lambdas, self.lambda0 + self.lambda1 * zNew.sum().values))
if not self.CNF and self.hamming:
if nU:
if nN:
obj = cvx.Minimize(cvx.sum(A[N,:] * w)/(nN*(1+self.gamma)) +\
self.gamma * cvx.sum(A[U,:] * w)/(nU*(1+self.gamma)) +\
lambdas * w)
else:
obj = cvx.Minimize(cvx.sum(A[U,:] * w)/nU + lambdas * w)
else:
obj = cvx.Minimize(cvx.sum(A[N,:] * w)/ nN + lambdas * w)
else:
if nU and nN:
obj = cvx.Minimize(cvx.sum(xiN)/(nN*(1+self.gamma)) +\
self.gamma * cvx.sum(xiU)/(nU*(1+self.gamma)) +\
lambdas * w)
elif nU:
obj = cvx.Minimize(cvx.sum(xiU) / nU + lambdas * w)
else:
obj = cvx.Minimize(cvx.sum(xiN) / nN + lambdas * w)
# Constraints
if self.CNF:
if nN:
constraints = [cvx.sum(A[O,:] * w) <= (1 - self.alpha) * nO,
xiN + A[N,:] * w >= 1]
else:
constraints = [cvx.sum(A[O,:] * w) <= (1 - self.alpha) * nO]
if nU:
constraints.append(xiU + A[U,:] * w >= 1)
else:
constraints = [cvx.sum(xiO) <= (1 - self.alpha) * nO,
xiO + A[O,:] * w >= 1]
if not self.hamming:
for (ii, i) in enumerate(N):
constraints.append(xiN[ii] >= cvx.max(w[A[i,:] > 0]))
for (ii, i) in enumerate(U):
constraints.append(xiU[ii] >= cvx.max(w[A[i,:] > 0]))
# Solve problem
prob = cvx.Problem(obj, constraints)
prob.solve(verbose=self.verbose, solver=self.solver)
# Extract dual variables
r = np.zeros_like(y, dtype=float)
if self.CNF:
r[O] = constraints[0].dual_value
if nN and nU:
r[N] = -constraints[1].dual_value
r[U] = -constraints[2].dual_value
elif nU:
r[U] = -constraints[1].dual_value
else:
r[N] = -constraints[1].dual_value
else:
r[O] = -constraints[1].dual_value
if self.hamming:
if nN and nU:
r[N] = 1. / (nN * (1+self.gamma))
r[U] = self.gamma / (nU * (1+self.gamma))
elif nU:
r[U] = 1. / nU
elif nN:
r[N] = 1. / nN
else:
r[N[xiN.value < self.eps]] = 1 / (nN * (1+self.gamma))
if nU:
r[U[xiU.value < self.eps]] = self.gamma / (nU * (1+self.gamma))
# Beam search for conjunctions with negative reduced cost
if self.hamming:
# Most negative reduced cost among current variables
UB = np.dot(r, A) + lambdas
#print('UB.min():', UB.min())
UB = min(UB.min(), 0)
v, zNew, Anew = beam_search(r, X, self.lambda0, self.lambda1,
K=self.K, UB=UB, eps=self.eps, B=self.B, D=self.D)
else:
v, zNew, Anew = beam_search_no_dup(r, X, self.lambda0, self.lambda1, z, K=self.K, eps=self.eps, D=self.D, B=self.B)
# Save generated conjunctions and coefficients
self.z = z
w = w.value
self.w_raw = w
self.lp_obj_value = prob.value
self.round_(X, y, scoring=self.rounding)
def greedy_round_(self, X, y, xi=.5, use_lp=False, gamma=None):
'''
For DNF, this starts with no conjunctions, and adds them greedily
based on a cost, which penalizes (any) inclusion of reference samples,
and rewards (new) inclusion of positive samples, and goes until it
covers at least alpha fraction of positive samples
We do the following for CNF:
+ only consider rules that would adhere to limit on positive samples
+ add rules to cover (new) reference samples, while
penalizing the coverage of positive samples
'''
A = self.compute_conjunctions(X)
R = np.arange(0, A.shape[1]) # Remaining conjunctions
U = [] # Used conjunctions
C = np.zeros(X.shape[0]) # Coverage indicator
MAX_ITER = 1000
i = 0
gamma = self.gamma if gamma is None else gamma
# Restrict conjunctions to those used by LP
if use_lp:
R = [R[i] for i in range(len(R)) if self.w_raw[i]>0]
if self.CNF:
while (i<MAX_ITER):
assert (y == 0).sum() == 0, 'Neg samps not implemented for CNF'
assert (y == -1).sum() > 0, 'No reference samples given'
# Frac of additional ref samples that each conjunction covers
if A[(y == -1) & (C < 1), :].shape[0] == 0:
self.logger.info(
"Rounded rules cover all reference samples!")
break
ref_new_cover = (A[(y == -1) & (C < 1),:][:,R] + 1e-8).mean(0)
# Positive samples covered (for each conjunction)
pos_cover = (A[(y == 1),:][:,R]).mean(0)
# Regularization
reg = self.lambda1 * self.z.values[:,R].sum(0)
# Costs (for each conjunction)
costs = xi*pos_cover - gamma*ref_new_cover + reg
# Only consider feasible new rules, which maintain the
# constraint that they cannot add too many pos samples
# NOTE: This is a bit different than the actual constraint we
# use in the LP relaxation, but is closer to what we actually
# want
feasible = (C[(y == 1), np.newaxis] + A[(y == 1),:][:, R]
).mean(0) < 1 - self.alpha
if feasible.sum() == 0:
break
costs[~feasible] = np.inf
r = np.argmin(costs) # Find min-cost conjunction
C = (C + A[:,R[r]])>0. # Update coverage
U.append(R[r])
R = np.array([R[i] for i in range(len(R)) if not i==r])
i+=1
else:
while (i<MAX_ITER) and (C[y == 1].mean() < self.alpha):
if (y==0).sum() > 0:
neg_cover = (A[(y == 0),:][:,R]).mean(0)
else:
neg_cover = 0
if (y==-1).sum() > 0:
# Fraction of reference samples that each conjunction covers
ref_cover = (A[(y == -1),:][:,R]).mean(0)
else:
ref_cover = 0
# Regularization (for each conjunction)
reg = self.lambda1 * self.z.values[:,R].sum(0)
# Positive samples newly covered (for each conjunction)
pos_new_cover = (A[(y == 1) & (C < 1),:][:,R] + 1e-8).mean(0)
# Costs (for each conjunction)
costs = neg_cover + gamma*ref_cover + reg - xi*pos_new_cover
r = np.argmin(costs) # Find min-cost conjunction
C = (C + A[:,R[r]])>0. # Update coverage
U.append(R[r])
R = np.array([R[i] for i in range(len(R)) if not i==r])
i+=1
# Zero out the rules and only take those which are used
self.w = np.zeros(A.shape[1])
self.w[U] = 1
def round_(self, X, y, scoring='coverage', xi=None, use_lp=False, gamma=None, tol=0.01):
""" Round based on scoring """
if scoring == 'roc_auc':
t_cand = np.unique(self.w_raw)
best_auc = -1
best_w = None
for i in range(len(t_cand)):
w = self.w_raw*(self.w_raw > t_cand[i])
auc = roc_auc_score(y, self.predict_(X, w))
if auc > best_auc:
best_auc = auc
best_w = w
self.w = best_w
elif scoring == 'greedy':
self.greedy_round_(X, y, xi=xi, use_lp=use_lp, gamma=gamma)
elif scoring == 'greedy_sweep':
if xi is None:
xi = np.logspace(np.log10(0.01), .5, 20)
xis = np.array([xi]).ravel()
best_xi = xis[0]
if len(xis) > 1:
best_xi = None
best_auc = 0
for xii in xis:
self.greedy_round_(X, y, xi=xii, use_lp=use_lp, gamma=gamma)
auc = roc_auc_score(y, self.predict(X))
if auc > best_auc - tol:
best_xi = xii
if auc > best_auc:
best_auc = auc
self.greedy_round_(X, y, xi=best_xi, use_lp=use_lp, gamma=gamma)
else:
A = self.compute_conjunctions(X)
w = self.w_raw
O = np.where(y > 0)[0]
nO = len(O)
# Binarize coefficients
# Candidates corresponding to all possible thresholds
wCand = (w[:, np.newaxis] >= np.append(np.unique(w), 1.5)).astype(int)
# Corresponding error variables
if self.CNF:
xiOCand = np.matmul(A[O,:], wCand)
else:
xiOCand = np.matmul(A[O,:], wCand) < 1
# Candidates that satisfy overlap coverage constraint
idxFeas = np.where(xiOCand.sum(axis=0) <= round((1 - self.alpha) * nO))[0]
if self.CNF:
# Choose the densest such candidate
self.w = wCand[:, idxFeas[0]]
else:
# Choose the sparsest such candidate
self.w = wCand[:, idxFeas[-1]]
def get_objective_value(self, X, o, rounded=True):
if rounded:
w = self.w
else:
w = self.w_raw
U = np.where(o < 0)[0]
nU = len(U)
assert nU > 0
A = self.compute_conjunctions(X)
lambdas = self.lambda0 + self.lambda1 * self.z.sum().values
lambdas[0] = 0
if not self.CNF:
obj = np.sum(A[U,:].dot(w))/nU + lambdas.dot(w)
else:
obj = np.sum(np.maximum(1 - A[U, :].dot(w), 0))/nU + lambdas.dot(w)
return obj
def compute_conjunctions(self, X):
"""Compute conjunctions of features specified in self.z"""
try:
A = 1 - (np.dot(1 - X, self.z) > 0) # Changed matmul to dot, because failed on some machines
except AttributeError:
print("Attribute 'z' does not exist, please fit model first.")
return A
def predict_(self, X, w):
"""Predict whether points belong to overlap region"""
# Compute conjunctions of features
A = self.compute_conjunctions(X)
# Predict labels
if self.CNF:
# Flip labels since model is actually a DNF for non-overlap
return 1 - (np.dot(A, w) > 0)
else:
return (np.dot(A, w) > 0).astype(int)
def predict(self, X):
"""Predict whether points belong to overlap region"""
# Use helper function
return self.predict_(X, self.w)
def predict_rules(self, X):
"""Predict whether points belong to overlap region"""
# Use helper function
A = self.compute_conjunctions(X)
if self.CNF:
# Flip labels since model is actually a DNF for non-overlap
# @TODO: Not sure if this is correct
return 1 - (A*self.w > 0)
else:
return ((A*self.w) > 0).astype(int)
def get_params(self, deep=False):
""" Returns estimator parameters """
# @TODO: Deep not implemented
return dict([(k, getattr(self, k)) for k in self.valid_params])
def set_params(self, **params):
""" Sets estimator parameters """
if not params:
return self
for k, v in params.items():
if k in self.valid_params:
setattr(self, k, v)
return self
if __name__ == '__main__':
# Load iris-"plus" data for testing
dirData = '../../Data/'
datasets = pd.read_pickle(os.path.join(dirData, 'datasets.pkl'))
ds = 'iris'
d = datasets[ds]
filePath = os.path.join(d['dirData'], d['fileName'] + '.csv')
data = pd.read_csv(filePath, names=d['colNames'], header=d['rowHeader'], error_bad_lines=False)
y = extract_target(data, **d)
# Binarize all features including negations
X = binarize_features(data, negations=True, **d)
| 39.212844 | 156 | 0.48271 | 2,658 | 21,371 | 3.820918 | 0.162528 | 0.023927 | 0.015754 | 0.020087 | 0.451457 | 0.401438 | 0.372095 | 0.34807 | 0.339602 | 0.330445 | 0 | 0.016931 | 0.39198 | 21,371 | 544 | 157 | 39.284926 | 0.764661 | 0.187029 | 0 | 0.52 | 0 | 0 | 0.027349 | 0 | 0 | 0 | 0 | 0.003676 | 0.010667 | 1 | 0.034667 | false | 0 | 0.021333 | 0 | 0.088 | 0.002667 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
84b9e9871acbe9ff363a0e59ef9598addf9b4e9d | 16,463 | py | Python | src/quo/i_o/termui.py | secretuminc/quo | c4f77d52f015c612d32ed0fc2fc79545af598f10 | [
"MIT"
] | 1 | 2021-02-15T03:56:00.000Z | 2021-02-15T03:56:00.000Z | src/quo/i_o/termui.py | chouette254/quo | fec78ae3b4a6d70501e2119868336c28c590fa50 | [
"MIT"
] | 3 | 2021-02-22T11:49:23.000Z | 2021-02-28T06:47:41.000Z | src/quo/i_o/termui.py | secretuminc/quo | c4f77d52f015c612d32ed0fc2fc79545af598f10 | [
"MIT"
] | null | null | null | import inspect
import io
import itertools
import os
import struct
import sys
import math
from typing import Any, Optional, IO
from quo.accordance import (
DEFAULT_COLUMNS,
get_winterm_size,
bit_bytes,
isatty,
strip_ansi_colors,
)
from quo.color import ansi_color_codes, _ansi_reset_all
from quo.errors import Abort, UsageError
from quo.context.current import resolve_color_default
from quo.types import Choice, convert_type
from quo.expediency import inscribe, LazyFile
# The prompt functions to use. The doc tools currently override these
# functions to customize how they work.
insert = input
def hidden_prompt_func(prompt):
import getpass
return getpass.getpass(prompt)
def _build_prompt(
text, suffix, show_default=False, default=None, show_choices=True, type=None
):
prompt = text
if type is not None and show_choices and isinstance(type, Choice):
prompt += f" ({', '.join(map(str, type.choices))})"
if default is not None and show_default:
prompt = f"{prompt} [{_format_default(default)}]"
return f"{prompt}{suffix}"
def _format_default(default):
if isinstance(default, (io.IOBase, LazyFile)) and hasattr(default, "name"):
return default.name
return default
##################################################################
#############################
##########################################################################
def confirm(
text,
default=False,
abort=False,
suffix=":>",
show_default=True,
err=False
):
"""Prompts for confirmation (yes/no question).
If the user aborts the input by sending a interrupt signal this
function will catch it and raise a :exc:`Abort` exception.
:param text: the question to ask.
:param default: the default for the prompt.
:param abort: if this is set to `True` a negative answer aborts the
exception by raising :exc:`Abort`.
:param suffix: a suffix that should be added to the prompt.
:param show_default: shows or hides the default value in the prompt.
:param err: if set to true the file defaults to ``stderr`` instead of
``stdout``, the same as with echo.
"""
prompt = _build_prompt(
text, suffix, show_default, "Yes/no" if default else "yes/No"
)
while 1:
try:
echo(prompt, nl=False, err=err)
value = insert("").lower().strip()
except (KeyboardInterrupt, EOFError):
raise Abort()
if value in ("y", "yes"):
rv = True
elif value in ("n", "no"):
rv = False
elif default is not None and value == "":
rv = default
else:
echo(f"ERROR:", bg="red", fg="black", nl=False)
echo(f"invalid input", bg="yellow", fg="black", err=err)
continue
break
if abort and not rv:
raise Abort()
return rv
############
########################################################
def prompt(
text,
default=None,
hide=False,
affirm=False,
type=None,
value_proc=None,
suffix=":> ",
show_default=True,
err=False,
show_choices=True,
):
"""Prompts a user for input. This is a convenience function that can be used to prompt a user for input later.
If the user aborts the input by sending a interrupt signal, this function will catch it and raise a :exc:`Abort` exception.
:param text: the text to show for the prompt.
:param default: the default value to use if no input happens. If this is not given it will prompt until it's aborted.
:param hide: if this is set to true then the input value will be hidden.
:param affirm: asks for confirmation for the value.
:param type: the type to use to check the value against.
:param value_proc: if this parameter is provided it's a function that is invoked instead of the type conversion to convert a value.
:param suffix: a suffix that should be added to the prompt.
:param show_default: shows or hides the default value in the prompt.
:param err: if set to true the file defaults to ``stderr`` instead of ``stdout``, the same as with echo.
:param show_choices: Show or hide choices if the passed type is a Choice. For example if type is a Choice of either day or week, show_choices is true and text is "Group by" then the prompt will be "Group by (day, week): ".
"""
result = None
def prompt_func(text):
f = hidden_prompt_func if hide else insert
try:
inscribe(text, nl=False, err=err)
return f("")
except (KeyboardInterrupt, EOFError):
# getpass doesn't print a newline if the user aborts input with ^C.
# Allegedly this behavior is inherited from getpass(3).
# A doc bug has been filed at https://bugs.python.org/issue24711
if hide:
inscribe(None, err=err)
raise Abort("You've aborted input")
if value_proc is None:
value_proc = convert_type(type, default)
prompt = _build_prompt(
text, suffix, show_default, default, show_choices, type
)
while 1:
while 1:
value = prompt_func(prompt)
if value:
break
elif default is not None:
value = default
break
try:
result = value_proc(value)
except UsageError as e:
if hide:
inscribe("ERROR: the value you entered was invalid", err=err)
else:
inscribe(f"Error: {e.message}", err=err) # noqa: B306
continue
if not affirm:
return result
while 1:
value2 = prompt_func("Repeat for confirmation: ")
if value2:
break
if value == value2:
return result
echo(f"ERROR:", nl=False, fg="black", bg="red")
echo(f"The two entered values do not match", err=err, fg="black", bg="yellow")
def terminalsize():
"""Returns the current size of the terminal as tuple in the form
``(width, height)`` in columns and rows.
"""
import shutil
if hasattr(shutil, "terminalsize"):
return shutil.terminalsize()
# We provide a sensible default for get_winterm_size() when being invoked
# inside a subprocess. Without this, it would not provide a useful input.
if get_winterm_size is not None:
size = get_winterm_size()
if size == (0, 0):
return (79, 24)
else:
return size
def ioctl_gwinsz(fd):
try:
import fcntl
import termios
cr = struct.unpack("hh", fcntl.ioctl(fd, termios.TIOCGWINSZ, "1234"))
except Exception:
return
return cr
cr = ioctl_gwinsz(0) or ioctl_gwinsz(1) or ioctl_gwinsz(2)
if not cr:
try:
fd = os.open(os.ctermid(), os.O_RDONLY)
try:
cr = ioctl_gwinsz(fd)
finally:
os.close(fd)
except Exception:
pass
if not cr or not cr[0] or not cr[1]:
cr = (os.environ.get("LINES", 25), os.environ.get("COLUMNS", DEFAULT_COLUMNS))
return int(cr[1]), int(cr[0])
def scrollable(text_or_generator, color=None):
"""This function takes a text and shows it via an environment specific
pager on stdout.
:param text_or_generator: the text to page, or alternatively, a
generator emitting the text to page.
:param color: controls if the pager supports ANSI colors or not. The
default is autodetection.
"""
color = resolve_color_default(color)
if inspect.isgeneratorfunction(text_or_generator):
i = text_or_generator()
elif isinstance(text_or_generator, str):
i = [text_or_generator]
else:
i = iter(text_or_generator)
# convert every element of i to a text type if necessary
text_generator = (el if isinstance(el, str) else str(el) for el in i)
from quo.implementation import pager
return pager(itertools.chain(text_generator, "\n"), color)
def _interpret_color(color, offset=0):
if isinstance(color, int):
return f"{38 + offset};5;{color:d}"
if isinstance(color, (tuple, list)):
r, g, b = color
return f"{38 + offset};2;{r:d};{g:d};{b:d}"
return str(ansi_color_codes[color] + offset)
def flair(
text,
fg=None,
bg=None,
bold=None,
dim=None,
hidden=None,
ul=None,
underline=None,
blink=None,
italic=None,
reverse=None,
reset=True,
strike=None,
):
"""Styles a text with ANSI styles and returns the new string. By
default the styling is self contained which means that at the end
of the string a reset code is issued. This can be prevented by
passing ``reset=False``.
Examples::
quo.inscribe(quo.style('Hello World!', foreground='green'))
quo.echo(quo.style('ATTENTION!', blink=True))
quo.echo(quo.style('Some things', reverse=True, foreground='cyan'))
quo.echo(quo.style('More colors', foreground=(255, 12, 128), background=117))
Note: v as in vblack or vred stands for vivid black or vivid red
Supported color names:
* ``black`` (might be a gray)
* ``red``
* ``green``
* ``yellow`` (might be an orange)
* ``blue``
* ``magenta``
* ``cyan``
* ``white`` (might be light gray)
* ``vblack``
* ``vred``
* ``vgreen``
* ``vyellow``
* ``vblue``
* ``vmagenta``
* ``vcyan``
* ``vwhite``
* ``reset`` (reset the color code only)
If the terminal supports it, color may also be specified as:
- An integer in the interval [0, 255]. The terminal must support
8-bit/256-color mode.
- An RGB tuple of three integers in [0, 255]. The terminal must
support 24-bit/true-color mode.
See https://en.wikipedia.org/wiki/ANSI_color and
https://gist.github.com/XVilka/8346728 for more information.
:param text: the string to style with ansi codes.
:param foreground: if provided this will become the foreground color.
:param background: if provided this will become the background color.
:param bold: if provided this will enable or disable bold mode.
:param dim: if provided this will enable or disable dim mode. This is
badly supported.
:param underline: if provided this will enable or disable underline.
:param blink: if provided this will enable or disable blinking.
:param reverse: if provided this will enable or disable inverse
rendering (foreground becomes background and the
other way round).
:param reset: by default a reset-all code is added at the end of the
string which means that styles do not carry over. This
can be disabled to compose styles.
"""
if not isinstance(text, str):
text = str(text)
bits = []
if fg:
try:
bits.append(f"\033[{_interpret_color(fg)}m")
except KeyError:
raise TypeError(f"Unknown color {fg!r}")
if bg:
try:
bits.append(f"\033[{_interpret_color(bg, 10)}m")
except KeyError:
raise TypeError(f"Unknown color {bg!r}")
if bold is not None:
bits.append(f"\033[{1 if bold else 22}m")
if dim is not None:
bits.append(f"\033[{2 if dim else 22}m")
if ul is not None:
bits.append(f"\033[{4 if ul else 24}m")
if underline is not None:
bits.append(f"\033[{4 if underline else 24}m")
if blink is not None:
bits.append(f"\033[{5 if blink else 25}m")
if reverse is not None:
bits.append(f"\033[{7 if reverse else 27}m")
if italic is not None:
bits.append(f"\x1B[3m")
if hidden is not None:
bits.append(f"\x1b[8m")
if strike is not None:
bits.append(f"\x1b[9m")
bits.append(text)
if reset:
bits.append(_ansi_reset_all)
return "".join(bits)
def unstyle(text):
"""Removes ANSI styling information from a string. Usually it's not
necessary to use this function as quo's echo function will
automatically remove styling if necessary.
:param text: the text to remove style information from.
"""
return strip_ansi_colors(text)
def edit(
text=None,
editor=None,
env=None,
require_save=True,
extension=".txt",
filename=None
):
r"""Edits the given text in the defined editor. If an editor is given
(should be the full path to the executable but the regular operating
system search path is used for finding the executable) it overrides
the detected editor. Optionally, some environment variables can be
used. If the editor is closed without changes, `None` is returned. In
case a file is edited directly the return value is always `None` and
`require_save` and `extension` are ignored.
If the editor cannot be opened a :exc:`UsageError` is raised.
Note for Windows: to simplify cross-platform usage, the newlines are
automatically converted from POSIX to Windows and vice versa. As such,
the message here will have ``\n`` as newline markers.
:param text: the text to edit.
:param editor: optionally the editor to use. Defaults to automatic
detection.
:param env: environment variables to forward to the editor.
:param require_save: if this is true, then not saving in the editor
will make the return value become `None`.
:param extension: the extension to tell the editor about. This defaults
to `.txt` but changing this might change syntax
highlighting.
:param filename: if provided it will edit this file instead of the
provided text contents. It will not use a temporary
file as an indirection in that case.
"""
from quo.implementation import Editor
editor = Editor(
editor=editor, env=env, require_save=require_save, extension=extension
)
if filename is None:
return editor.edit(text)
editor.edit_file(filename)
def launch(url, wait=False, locate=False):
"""This function launches the given URL (or filename) in the default
viewer application for this file type. If this is an executable, it
might launch the executable in a new session. The return value is
the exit code of the launched application. Usually, ``0`` indicates
success.
Examples::
quo.launch('https://quo.readthedocs.org/')
quo.launch('/my/downloaded/file', locate=True)
:param url: URL or filename of the thing to launch.
:param wait: Wait for the program to exit before returning. This
only works if the launched program blocks. In particular,
``xdg-open`` on Linux does not block.
:param locate: if this is set to `True` then instead of launching the
application associated with the URL it will attempt to
launch a file manager with the file located. This
might have weird effects if the URL does not point to
the filesystem.
"""
from quo.implementation import open_url
return open_url(url, wait=wait, locate=locate)
def raw_terminal():
from quo.implementation import raw_terminal as f
return f()
def echo(
message=None,
file: Optional[IO[str]] = None,
nl=True,
err=False,
color=None,
**styles
):
"""
quo.echo('Hello World!', fg='green')
quo.inscribe(quo.style('Hello World!', fg='green'))
All keyword arguments are forwarded to the underlying functions depending on which one they go with.
Non-string types will be converted to :class:`str`. However,
:class:`bytes` are passed directly to :meth:`inscribe` without applying
style. If you want to style bytes that represent text, call
:meth:`bytes.decode` first.
"""
if message is not None and not bit_bytes(message):
message = flair(message, **styles)
return inscribe(message, file=file, nl=nl, err=err, color=color)
| 32.860279 | 227 | 0.615137 | 2,246 | 16,463 | 4.461264 | 0.234194 | 0.007984 | 0.013473 | 0.011677 | 0.163772 | 0.151397 | 0.120559 | 0.07006 | 0.061677 | 0.056487 | 0 | 0.010602 | 0.283849 | 16,463 | 500 | 228 | 32.926 | 0.839271 | 0.47707 | 0 | 0.2 | 0 | 0 | 0.088082 | 0.013894 | 0 | 0 | 0 | 0 | 0 | 1 | 0.062745 | false | 0.011765 | 0.086275 | 0 | 0.239216 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
84b9eb0c102288c50bbed9bdfca4a239cee038fc | 3,026 | py | Python | Magnetic Field/biot_square.py | ValeriaTelles/Physics-Programs | 9fdd1b60ad5dd9c6750855bf63c2aa89383a0b1a | [
"MIT"
] | null | null | null | Magnetic Field/biot_square.py | ValeriaTelles/Physics-Programs | 9fdd1b60ad5dd9c6750855bf63c2aa89383a0b1a | [
"MIT"
] | null | null | null | Magnetic Field/biot_square.py | ValeriaTelles/Physics-Programs | 9fdd1b60ad5dd9c6750855bf63c2aa89383a0b1a | [
"MIT"
] | null | null | null | # Name: Valeria Telles
# Date: 2 March 2020
# Program: biot_square.py
import numpy as np
import matplotlib.pyplot as plt
import time as time
from matplotlib.patches import Circle
def biot(Rvec, wire, I):
mu_4pi = 10
dB = np.zeros((len(wire), 3))
R = Rvec - wire
Rsqr = np.sum( R**2, axis = 1 )
dL = (np.roll(wire, -1, axis = 0) - np.roll(wire, +1, axis = 0))/2
cr = np.cross(dL, R, axis = 1 )
dB = mu_4pi * I * cr/Rsqr[:,None]**(3/2)
dB = np.concatenate((dB, [dB[0,:]]), axis = 0)
Btot = np.array([simpson(dB[:,0], 1), simpson(dB[:,1], 1), simpson(dB[:,2], 1)])
return Btot
def simpson(f, dr):
total = dr/3*(np.sum(f[1:] + f[:-1]) + 2*np.sum(f[1::2]))
return total
def trapz(f, dL):
return dL/2*np.sum(f[1:] + f[:-1])
# setting up the square loop of wire
I = 0.01 # current, in amperes
L = 0.10 # side length of square, in m
N = 500 # number of segments per side of square
segL = L/N # segment length in m
# some useful segments to build a position array of the segments in square loop
A = L/2*np.ones(N)
B = np.arange(-L/2, L/2, segL)
# concatenate
wx = np.concatenate((B, A, -B, -A))
wy = np.concatenate((A, -B, -A, B))
wz = np.zeros(wx.size)
# column_stack
wire = np.column_stack((wx, wy, wz))
# test the biot function for a single point (the origin)
pointCalc = True
if pointCalc:
# choose a point in space at which to calculate B
point = np.array([0.0,0,0.0])
Ti = time.time()
# call the biot function to calculate B
B = biot(point, wire,I)
print(point)
print(B)
print("duration: %5f" % (time.time()-Ti) )
# Create a 2D grid of x, y points using numpy's meshgrid function
gridstep=50
nx, ny, nz = gridstep,gridstep,gridstep
x = np.linspace(-0.2, 0.2, nx)
y = np.linspace(-0.2, 0.2, ny)
z = np.linspace(-0.2, 0.2, nz)
# Set up meshgrid as needed for the particular 2D streamplot
X, Z = np.meshgrid(x,z)
# Set up 3D array, Bgrid, for x,y,z-components of B at points in space
Bgrid = np.zeros([nx,nz,3])
Ti = time.time()
# Use for loops to populate Bgrid array with relevant B-field values
for i in range(nx):
for k in range(nz):
Bgrid[k,i, :] = biot(np.array([x[i],0.,z[k]]),wire,I)
#Bgrid[k,i, :] = biot(np.array([x[i],0.1,z[k]]),wire,I)
# you can change which plane you are viewing it from as well
print("duration: %5f" % (time.time()-Ti) )
# plotting and formatting
fig, ax = plt.subplots(figsize=(10,10))
# Use streamplot to show B-field
ax.streamplot(X,Z,Bgrid[:,:,0],Bgrid[:,:,2], color = '0.50')
ax.set_aspect('equal')
ax.set_xlim((-0.2,0.2))
ax.set_ylim((-0.2,0.2))
# add circles to plot to show where wire cross sections
ax.add_artist(Circle((L/2,0),0.005,color='#aa0000'))
ax.add_artist(Circle((-L/2,0),0.005,color='#0000aa'))
ax.set_ylabel('y-direction', fontsize = '14')
ax.set_xlabel('x-direction', fontsize = '14')
ax.set_title('Magnetic Field due to a Square Loop', fontweight = 'bold', fontsize = '18') | 25.863248 | 89 | 0.616986 | 552 | 3,026 | 3.358696 | 0.326087 | 0.011866 | 0.008091 | 0.010787 | 0.161273 | 0.130529 | 0.053937 | 0.053937 | 0.053937 | 0.031284 | 0 | 0.050146 | 0.209187 | 3,026 | 117 | 89 | 25.863248 | 0.724613 | 0.307667 | 0 | 0.066667 | 0 | 0 | 0.055985 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.05 | false | 0 | 0.066667 | 0.016667 | 0.166667 | 0.066667 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
84bb50006f22e7f74b51168de711fdca99d21854 | 1,612 | py | Python | RCN_for_CAPTCHA_v1.0/src/get_test_data.py | ZhongqiuWang/RCN_for_CAPTCHA | ea231c63ea72437e1b47f8b0684f1dcbf1973fa3 | [
"MIT"
] | 2 | 2021-02-20T15:43:43.000Z | 2021-02-20T15:43:51.000Z | RCN_for_CAPTCHA_v1.0/src/get_test_data.py | ZhongqiuWang/RCN_for_CAPTCHA | ea231c63ea72437e1b47f8b0684f1dcbf1973fa3 | [
"MIT"
] | null | null | null | RCN_for_CAPTCHA_v1.0/src/get_test_data.py | ZhongqiuWang/RCN_for_CAPTCHA | ea231c63ea72437e1b47f8b0684f1dcbf1973fa3 | [
"MIT"
] | null | null | null | import os
import shutil
import numpy as np
from cv2 import imread
def get_captcha_data_iters(data_dir, test_size, target_data_dir, seed=5):
"""
获取测试图片
:param data_dir: 测试图片文件所在路径
:param test_size: 测试图片张数
:param target_data_dir: 将选出的测试图片放到该路径下
:param seed: 随机种子
:return: test_set
长度为test_size的list, 其中每个元素为一个二元tuple, (img, label), label是一个str
"""
if not os.path.isdir(data_dir):
raise IOError("Can't find your data dir '{}'".format(data_dir))
def _load_data(image_dir, num, target_dir):
loaded_data = []
samples = np.random.choice(sorted(os.listdir(image_dir)), num, replace=False)
tag = 0
for fname in samples:
filepath = os.path.join(image_dir, fname)
img = imread(filepath, 0) # 以灰度图的形式读取图片
fname_new = fname[:-4] # 删去文件名的后缀.png, 剩下的即为label
loaded_data.append((img, fname_new)) # 要加载的数据
# 把要加载的数据放入target_dir
new_name = str(tag)+'_'+fname
target_file = os.path.join(target_dir, new_name)
shutil.copy(filepath, target_file)
tag += 1
return loaded_data
def _remove_all(path):
"""
删除path下所有文件
"""
for i in os.listdir(path):
path_file = os.path.join(path, i)
if os.path.isfile(path_file):
os.remove(path_file)
np.random.seed(seed)
test_data_dir = os.path.join(target_data_dir, 'test_set')
_remove_all(test_data_dir)
test_set = _load_data(data_dir, test_size, test_data_dir)
return test_set
| 29.309091 | 85 | 0.616625 | 212 | 1,612 | 4.429245 | 0.386792 | 0.089457 | 0.046858 | 0.031949 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.005195 | 0.283499 | 1,612 | 54 | 86 | 29.851852 | 0.807792 | 0.175558 | 0 | 0 | 0 | 0 | 0.030087 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.096774 | false | 0 | 0.129032 | 0 | 0.290323 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
84bd5f607b82fa8a39bd0ca835a5cf720f40f089 | 27,550 | py | Python | overrides.py | inuitwallet/plunge_android | 0a23a78218a9852f0b9363b276c942ca2e517823 | [
"MIT"
] | null | null | null | overrides.py | inuitwallet/plunge_android | 0a23a78218a9852f0b9363b276c942ca2e517823 | [
"MIT"
] | null | null | null | overrides.py | inuitwallet/plunge_android | 0a23a78218a9852f0b9363b276c942ca2e517823 | [
"MIT"
] | null | null | null | import json
from kivy.app import App
from kivy.config import ConfigParser
from kivy.core.window import Window
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.button import Button
from kivy.uix.label import Label
from kivy.uix.popup import Popup
from kivy.uix.slider import Slider
from kivy.metrics import dp
from kivy.uix.settings import SettingString, SettingSpacer, SettingNumeric, InterfaceWithTabbedPanel, Settings
from kivy.uix.textinput import TextInput
from kivy.uix.widget import Widget
from kivy.uix.gridlayout import GridLayout
from kivy.uix.scrollview import ScrollView
from kivy.uix.spinner import Spinner
from kivy.network.urlrequest import UrlRequest
import utils
import logging
__author__ = 'woolly_sammoth'
class InterfaceWithCloseButton(InterfaceWithTabbedPanel):
def add_panel(self, panel, name, uid):
scrollview = ScrollView()
scrollview.add_widget(panel)
self.tabbedpanel.default_tab_text = 'Plunge Configuration'
self.tabbedpanel.default_tab_content = scrollview
self.tabbedpanel.tab_width = 0.000001
class SettingsWithCloseButton(Settings):
def __init__(self, *args, **kwargs):
self.interface_cls = InterfaceWithCloseButton
super(SettingsWithCloseButton, self).__init__(*args, **kwargs)
class SettingStringFocus(SettingString):
"""
Overrides the SettingString class to automatically give keyboard focus to the input field of the pop up
"""
def _create_popup(self, instance):
# create popup layout
content = BoxLayout(orientation='vertical', spacing='5dp')
popup_width = min(0.95 * Window.width, dp(500))
self.popup = popup = Popup(
title=self.title, content=content, size_hint=(None, None),
size=(popup_width, '250dp'))
# create the textinput used for numeric input
self.textinput = textinput = TextInput(
text=self.value, font_size='24sp', multiline=False,
size_hint_y=None, height='42sp')
textinput.bind(on_text_validate=self._validate)
self.textinput = textinput
# construct the content, widget are used as a spacer
content.add_widget(Widget())
content.add_widget(textinput)
content.add_widget(Widget())
content.add_widget(SettingSpacer())
# 2 buttons are created for acept or cancel the current value
btnlayout = BoxLayout(size_hint_y=None, height='50dp', spacing='5dp')
btn = Button(text='Ok')
btn.bind(on_release=self._validate)
btnlayout.add_widget(btn)
btn = Button(text='Cancel')
btn.bind(on_release=self._dismiss)
btnlayout.add_widget(btn)
content.add_widget(btnlayout)
# all done, open the popup !
popup.open()
textinput.focus = True
textinput.cursor = (1, 3000)
class SettingNumericFocus(SettingNumeric):
"""
Overrides the SettingNumeric class to automatically give keyboard focus to the input field of the pop up
"""
def _create_popup(self, instance):
# create popup layout
content = BoxLayout(orientation='vertical', spacing='5dp')
popup_width = min(0.95 * Window.width, dp(500))
self.popup = popup = Popup(
title=self.title, content=content, size_hint=(None, None),
size=(popup_width, '250dp'))
# create the textinput used for numeric input
self.textinput = textinput = TextInput(
text=self.value, font_size='24sp', multiline=False,
size_hint_y=None, height='42sp')
textinput.bind(on_text_validate=self._validate)
self.textinput = textinput
# construct the content, widget are used as a spacer
content.add_widget(Widget())
content.add_widget(textinput)
content.add_widget(Widget())
content.add_widget(SettingSpacer())
# 2 buttons are created for acept or cancel the current value
btnlayout = BoxLayout(size_hint_y=None, height='50dp', spacing='5dp')
btn = Button(text='Ok')
btn.bind(on_release=self._validate)
btnlayout.add_widget(btn)
btn = Button(text='Cancel')
btn.bind(on_release=self._dismiss)
btnlayout.add_widget(btn)
content.add_widget(btnlayout)
# all done, open the popup !
popup.open()
textinput.focus = True
textinput.cursor = (1, 3000)
class SettingStringExchange(SettingString):
"""
Overrides the SettingString class to provide a customised popup suitable for exchange data input
"""
num_rows = 0
exchange = None
chosen_api_key_pair = None
ask_max = None
bid_max = None
utils = utils.utils('')
keys_button = []
address = []
unit = []
rates = []
bot = []
logger = logging.getLogger('Plunge')
currencies = ['btc', 'ltc', 'eur', 'usd', 'ppc']
bots = ['nubot', 'pybot', 'none']
config = ConfigParser()
def on_panel(self, instance, value):
if value is None:
return
self.bind(on_release=self._create_popup)
def _dismiss(self, *largs):
if self.textinput:
self.textinput.focus = False
if self.popup:
self.popup.dismiss()
self.popup = None
self.num_rows = 0
self.keys_button = []
self.address = []
self.unit = []
self.rates = []
self.bot = []
def _validate(self, instance):
with open('user_data.json', 'a+') as user_data:
try:
saved_data = json.load(user_data)
except ValueError:
saved_data = {}
user_data.close()
saved_data[self.exchange] = []
good_records = 0
content = TextInput(multiline=True, text='Saving...', background_color=[0.13725, 0.12157, 0.12549, 0],
foreground_color=[1, 1, 1, 1])
popup = Popup(title='Saving Data for %s' % self.exchange, content=content,
size_hint=(None, None), size=(300, 500))
popup.open()
for x in range(0, self.num_rows, 1):
self.logger.info("saving row %d for %s" % (x+1, self.exchange))
content.text = '%s\nSaving row %d' % (content.text, x+1)
this_row = {}
public, secret = self.get_keys(self.keys_button[x].text)
if public is None or secret is None:
self.logger.warn("API Keys not set correctly")
content.text = '%s\n=> API Keys not set correctly' % content.text
continue
this_row['public'] = public
this_row['secret'] = secret
this_row['address'] = self.address[x].text
if not self.utils.check_checksum(this_row['address']) or not this_row['address'][:1] == 'B':
self.logger.warn("Invalid payout address %s" % this_row['address'])
content.text = '%s\n=> Invalid payout address' % content.text
continue
this_row['unit'] = self.unit[x].text
rates = self.rates[x].text
if "|" not in rates:
self.logger.warn("no rates set")
content.text = '%s\n=> No rates set' % content.text
continue
rate = rates.split(' | ')
this_row['ask'] = rate[0]
this_row['bid'] = rate[1]
if this_row['ask'] == 0.00:
this_row['ask'] = self.ask_max
if this_row['bid'] == 0.00:
this_row['bid'] = self.bid_max
this_row['bot'] = self.bot[x].text
if this_row in saved_data[self.exchange]:
self.logger.warn("data already exists")
content.text = '%s\n=> Data already exists' % content.text
continue
saved_data[self.exchange].append(this_row)
good_records += 1
content.text = '%s\nRow %d saved' % (content.text, x+1)
self.logger.info(str(this_row))
with open('user_data.json', 'w') as user_data:
user_data.write(json.dumps(saved_data))
user_data.close()
content.text = '%s\nData Saved' % content.text
self._dismiss()
value = str(good_records)
self.value = value
def _create_popup(self, instance):
"""
Create the main Exchange popup to which new rows can be added
:param instance:
:return:
"""
self.exchange = self.key
main_layout = BoxLayout(orientation='vertical', spacing='5dp')
scroll_view = ScrollView(do_scroll_x=False)
header = GridLayout(cols=5, spacing='5dp', row_default_height='50dp', row_force_default=True,
size_hint_y=None, height='50dp')
header.add_widget(Label(text='API', valign='top', size_hint_x=0.2))
header.add_widget(Label(text='NBT', valign='top', size_hint_x=0.2))
header.add_widget(Label(text='Cur', valign='top', size_hint_x=0.2))
header.add_widget(Label(text='rates', valign='top', size_hint_x=0.2))
header.add_widget(Label(text='Bot', valign='top', size_hint_x=0.2))
self.content = GridLayout(cols=5, spacing='5dp', row_default_height='50dp', row_force_default=True,
size_hint_x=1, size_hint_y=None)
self.content.bind(minimum_height=self.content.setter('height'))
main_layout.add_widget(header)
scroll_view.add_widget(self.content)
main_layout.add_widget(scroll_view)
self.popup = popup = Popup(
title=self.title, content=main_layout)
# construct the content, widget are used as a spacer
main_layout.add_widget(SettingSpacer())
# buttons are created for accept or cancel the current value
btnlayout = BoxLayout(size_hint_y=None, height='50dp', spacing='5dp')
btn = Button(text='Ok')
btn.bind(on_release=self._validate)
btnlayout.add_widget(btn)
btn = Button(text='Cancel')
btn.bind(on_release=self._dismiss)
btnlayout.add_widget(btn)
btn = Button(text='Add Row')
btn.bind(on_release=self.add_row)
btnlayout.add_widget(btn)
main_layout.add_widget(btnlayout)
self.load_data()
# all done, open the popup !
popup.open()
def load_data(self):
with open('user_data.json', 'a+') as data_file:
try:
data = json.load(data_file)
except ValueError:
data = {}
data_file.close()
if self.exchange not in data:
self.add_row(None)
return
if len(data[self.exchange]) == 0:
self.add_row(None)
return
for datum in data[self.exchange]:
self.add_row(datum)
def add_row(self, instance):
"""
Add a row to the main exchange screen
:param instance:
:return:
"""
self.num_rows += 1
keys_button = Button(text='Keys', size_hint_x=0.2, id='%d' % self.num_rows)
keys_button.bind(on_release=self.enter_keys)
self.content.add_widget(keys_button)
self.keys_button.append(keys_button)
address = TextInput(size_hint_x=0.2, padding=[6, 10, 6, 10],
multiline=False, font_size=18, id='%d' % self.num_rows)
address.bind(text=self.check_address)
self.content.add_widget(address)
self.address.append(address)
unit = Spinner(values=self.currencies, text=self.currencies[0], size_hint_x=0.2, id='%d' % self.num_rows)
self.selected_unit = self.currencies[0]
unit.bind(text=self.set_unit)
self.content.add_widget(unit)
self.unit.append(unit)
rates = Button(text='Rates', size_hint_x=0.2, id='%d' % self.num_rows)
rates.bind(on_release=self.enter_rates)
self.content.add_widget(rates)
self.rates.append(rates)
bot = Spinner(values=self.bots, text=self.bots[0], size_hint_x=0.2, id='%d' % self.num_rows)
self.selected_bot = self.bots[0]
bot.bind(text=self.set_bot)
self.content.add_widget(bot)
self.bot.append(bot)
if isinstance(instance, dict):
keys_button.text = instance['public'][:8] + ' / ' + instance['secret'][:8]
address.text = instance['address']
unit.text = instance['unit']
rates.text = instance['ask'] + ' | ' + instance['bid']
bot.text = instance['bot']
def enter_keys(self, instance):
"""
Show a pop-up in which previously entered api keys can be selected from a drop down
There are edit and add buttons on the bottom which fire other methods
:param instance:
:return:
"""
self.calling_keys_button = instance
content = BoxLayout(orientation='vertical', spacing=10)
top = BoxLayout(orientation='vertical', size_hint=(1, 0.7))
top.add_widget(Label(text='API Key Pair', size_hint=(1, None), height='70dp'))
self.api_key_spinner = Spinner(size_hint=(1, None), height='40dp')
top.add_widget(self.api_key_spinner)
self.api_key_spinner.bind(text=self.enable_edit)
top.add_widget(BoxLayout())
btnlayout = BoxLayout(spacing='5dp', size_hint=(1, 0.15))
btn = Button(text='Ok', size_hint_y=None, height='50dp')
btn.bind(on_release=self.close_api_keys_popup)
btnlayout.add_widget(btn)
btn = Button(text='Cancel', size_hint_y=None, height='50dp')
btn.bind(on_release=self.close_api_keys_popup)
btnlayout.add_widget(btn)
self.edit_keys_button = Button(text='Edit Keys', size_hint_y=None, height='50dp', disabled=True)
self.edit_keys_button.bind(on_release=self.edit_keys)
btnlayout.add_widget(self.edit_keys_button)
self.add_keys_button = Button(text='Add Keys', size_hint_y=None, height='50dp')
self.add_keys_button.bind(on_release=self.add_keys)
btnlayout.add_widget(self.add_keys_button)
content.add_widget(top)
content.add_widget(SettingSpacer())
content.add_widget(btnlayout)
popup_width = min(0.95 * Window.width, dp(500))
self.enter_keys_popup = Popup(title='API Keys', content=content, auto_dismiss=False,
size_hint=(None, None), size=(popup_width, '250dp'))
self.update_api_spinners()
if instance.text != 'Keys':
self.api_key_spinner.text = instance.text
self.enter_keys_popup.open()
def enable_edit(self, instance, value):
"""
The Edit button on the 'enter_api_keys' popup starts disabled.
It is only enabled when a selection is made in the spinner
:param instance:
:param value:
:return:
"""
if value == '':
self.edit_keys_button.disabled = True
else:
self.edit_keys_button.disabled = False
self.edit_keys_button.id = value
self.chosen_api_key_pair = value
def edit_keys(self, instance):
"""
Simply shows the add_keys popup with edit mode enabled
:param instance:
:return:
"""
self.add_keys(instance, True)
def add_keys(self, instance, edit=False):
"""
Show a different pop-up into which api_keys can be entered.
In edit mode the fields are pre-populated and a delete button is shown
:param instance:
:param edit:
:return:
"""
content = BoxLayout(orientation='vertical', spacing=10)
grid = GridLayout(cols=2, spacing=10, size_hint=(1, 0.85))
grid.add_widget(Label(text='Public', size_hint_x=None, width='100dp'))
self.add_public_key = TextInput(size_hint=(1, None), height='40dp')
self.add_public_key.bind(text=self.tab_switch)
grid.add_widget(self.add_public_key)
grid.add_widget(Label(text='Secret', size_hint_x=None, width='100dp'))
self.add_secret_key = TextInput(size_hint=(1, None), height='40dp')
self.add_secret_key.bind(text=self.tab_switch)
grid.add_widget(self.add_secret_key)
btnlayout = BoxLayout(spacing='5dp', size_hint=(1, 0.15))
ok_btn = Button(text='Ok', size_hint_y=None, height='50dp')
ok_btn.bind(on_release=self.save_api_keys)
btnlayout.add_widget(ok_btn)
btn = Button(text='Cancel', size_hint_y=None, height='50dp')
btn.bind(on_release=self.save_api_keys)
btnlayout.add_widget(btn)
self.edit_public, self.edit_secret = None, None
if edit is True:
self.edit_public, self.edit_secret = self.get_keys(instance.id)
if self.edit_public is None and self.edit_secret is None:
return
self.add_public_key.text = self.edit_public
self.add_secret_key.text = self.edit_secret
btn = Button(text='Delete', size_hint_y=None, height='50dp')
btn.bind(on_release=self.delete_api_keys)
btnlayout.add_widget(btn)
content.add_widget(SettingSpacer())
content.add_widget(grid)
content.add_widget(btnlayout)
self.add_keys_popup = Popup(title='Add API Keys', content=content, auto_dismiss=False,
size_hint=(1, None), height='250dp')
self.add_keys_popup.open()
self.add_public_key.focus = True
def tab_switch(self, instance, value):
"""
tab switches from public to secret and back
:return:
"""
if '\t' not in value:
return
instance.text = value.replace('\t', '')
if instance == self.add_public_key:
self.add_secret_key.focus = True
else:
self.add_public_key.focus = True
def update_api_spinners(self):
"""
Populate the api_key selection spinner on the 'enter_api_keys' popup
:return:
"""
api_keys = self.fetch_api_keys_from_file()
self.api_key_spinner.values = []
self.api_key_spinner.text = ''
for key_set in api_keys:
if key_set['exchange'] != self.exchange:
continue
self.api_key_spinner.values.append(key_set['public'][:8] + ' / ' + key_set['secret'][:8])
if self.chosen_api_key_pair is not None:
self.api_key_spinner.text = self.chosen_api_key_pair
def get_keys(self, keys):
"""
When supplied truncated keys (as shown in the selection spinner)
Get the full keys from the data file, ready for editting or saving
:param keys:
:return:
"""
public = None
secret = None
if keys == 'Keys':
return public, secret
keys = keys.split(' / ')
pub_key = keys[0]
sec_key = keys[1]
api_keys = self.fetch_api_keys_from_file()
for key_set in api_keys:
if key_set['exchange'] == self.exchange and key_set['public'][:8] == pub_key and key_set['secret'][:8] == sec_key:
public = key_set['public']
secret = key_set['secret']
return public, secret
def close_api_keys_popup(self, instance):
"""
close the "enter_api_keys" popup.
Cancel has no effect.
OK saves the api key selection in te main data file
:param instance:
:return:
"""
if instance.text == "Ok" and self.api_key_spinner.text != '':
self.calling_keys_button.text = self.chosen_api_key_pair
self.chosen_api_key_pair = None
self.enter_keys_popup.dismiss()
def save_api_keys(self, instance):
"""
Save the Api Keys entered into the 'add_api_keys' popup
These are saved to their own file for separate parsing
:param instance:
:return:
"""
if instance.text == "Cancel" or self.add_public_key.text == "" or self.add_secret_key.text == "":
self.add_keys_popup.dismiss()
return
api_keys = self.fetch_api_keys_from_file()
if self.edit_public is not None and self.edit_secret is not None:
for key_set in api_keys:
if key_set['exchange'] == self.exchange and key_set['public'] == self.edit_public and key_set['secret'] == self.edit_secret:
key_set['public'] = self.add_public_key.text
key_set['secret'] = self.add_secret_key.text
else:
this_keys = {'exchange': self.exchange,
'public': self.add_public_key.text,
'secret': self.add_secret_key.text}
for key_set in api_keys:
if key_set == this_keys:
return
api_keys.append(this_keys)
self.save_api_keys_to_file(api_keys)
self.chosen_api_key_pair = self.add_public_key.text[:8] + ' / ' + self.add_secret_key.text[:8]
self.update_api_spinners()
self.add_keys_popup.dismiss()
def delete_api_keys(self, instance):
"""
remove the chosen api key selection from the saved list
:param instance:
:return:
"""
with open('api_keys.json', 'r') as api_keys_file:
try:
api_keys = json.load(api_keys_file)
except ValueError:
api_keys = []
api_keys_file.close()
if self.edit_public is not None and self.edit_secret is not None:
new_api_keys = []
for key_set in api_keys:
if key_set['exchange'] == self.exchange and key_set['public'] == self.edit_public and key_set['secret'] == self.edit_secret:
continue
new_api_keys.append(key_set)
with open('api_keys.json', 'w+') as api_keys_file:
api_keys_file.write(json.dumps(new_api_keys))
api_keys_file.close()
if self.calling_keys_button.text == self.edit_public[:8] + " / " + self.edit_secret[:8]:
self.calling_keys_button.text = 'Keys'
self.chosen_api_key_pair = None
self.update_api_spinners()
self.add_keys_popup.dismiss()
def fetch_api_keys_from_file(self):
"""
get all api_keys currently saved in the api_keys.json file
:return:
"""
config_ini = App.get_running_app().get_application_config()
self.config.read(config_ini)
api_keys = self.config.getdefault('user_data', 'api_keys', '')
if api_keys == '':
api_keys = []
else:
api_keys = list(api_keys)
self.logger.info("got api keys %s" % str(api_keys))
return api_keys
def save_api_keys_to_file(self, api_keys):
"""
save the api_keys json instance back to the file
:param api_keys:
:return:
"""
config_ini = App.get_running_app().get_application_config()
self.config.read(config_ini)
self.logger.info("set api keys %s" % str(api_keys))
self.config.set('user_data', 'api_keys', str(api_keys))
def check_address(self, instance, value):
"""
validate an entered address by checking the checksum an ensuring the first character is 'B'
:param instance:
:param value:
:return:
"""
if self.utils.check_checksum(value) and value[:1] == 'B':
instance.foreground_color = (0, 0, 0, 1)
else:
instance.foreground_color = (0.93725, 0.31176, 0.17843, 1)
def set_unit(self, instance, value):
self.selected_unit = value
def set_bot(self, instance, value):
self.selected_bot = value
def set_pool_maximum_rate(self, req, result):
if self.exchange not in result:
self.rates_error(req, result)
return
if self.selected_unit.lower() not in result[self.exchange]:
self.rates_error(req, result)
return
self.ask_max = (result[self.exchange][self.selected_unit.lower()]['ask']['rate'] * 100)
self.bid_max = (result[self.exchange][self.selected_unit.lower()]['bid']['rate'] * 100)
self.ask_slider.max = self.ask_max
self.bid_slider.max = self.bid_max
def rates_error(self, req, result):
self.rates_content.add_widget(Label(text='Unable to get Maximum rate data from the server'))
self.ask_slider.max = 0
self.bid_slider.max = 0
def enter_rates(self, instance):
"""
Show a pop-up in which minimum interest rates can be entered on sliders
:param instance:
:return:
"""
self.calling_rates_button = instance
self.rates_content = BoxLayout(orientation='vertical')
config_ini = App.get_running_app().get_application_config()
self.config.read(config_ini)
url = "http://%s:%s/exchanges" % (self.config.get('server', 'host'), self.config.get('server', 'port'))
self.ask_slider = Slider(step=0.01, size_hint=(0.9, 1))
self.bid_slider = Slider(step=0.01, size_hint=(0.9, 1))
req = UrlRequest(url, self.set_pool_maximum_rate, self.rates_error, self.rates_error)
self.ask_slider.bind(on_touch_down=self.update_slider_values)
self.ask_slider.bind(on_touch_up=self.update_slider_values)
self.ask_slider.bind(on_touch_move=self.update_slider_values)
self.bid_slider.bind(on_touch_down=self.update_slider_values)
self.bid_slider.bind(on_touch_up=self.update_slider_values)
self.bid_slider.bind(on_touch_move=self.update_slider_values)
self.rates_content.add_widget(Label(text='Minimal Ask Rate'))
ask_layout = BoxLayout()
ask_layout.add_widget(self.ask_slider)
self.ask_value = Label(size_hint=(0.1, 1))
ask_layout.add_widget(self.ask_value)
self.rates_content.add_widget(ask_layout)
self.rates_content.add_widget(Label(text='Minimal Bid Rate'))
bid_layout = BoxLayout()
bid_layout.add_widget(self.bid_slider)
self.bid_value = Label(size_hint=(0.1, 1))
bid_layout.add_widget(self.bid_value)
self.rates_content.add_widget(bid_layout)
if instance.text != 'Set Rates':
rates = instance.text.split(' | ')
self.ask_slider.value = float(rates[0])
self.bid_slider.value = float(rates[1])
self.update_slider_values(None, None)
btnlayout = BoxLayout(size_hint_y=None, height='50dp', spacing='5dp')
btn = Button(text='Ok')
btn.bind(on_release=self.close_rates_popup)
btnlayout.add_widget(btn)
btn = Button(text='Cancel')
btn.bind(on_release=self.close_rates_popup)
btnlayout.add_widget(btn)
self.rates_content.add_widget(btnlayout)
popup_width = min(0.95 * Window.width, dp(500))
self.rates_popup = Popup(title='Minimal Interest Rates', content=self.rates_content, auto_dismiss=False,
size_hint=(None, None), size=(popup_width, '300dp'))
self.rates_popup.open()
def update_slider_values(self, instance, value):
self.ask_value.text = str(self.ask_slider.value)
self.bid_value.text = str(self.bid_slider.value)
def close_rates_popup(self, instance):
if instance.text == "Ok":
self.calling_rates_button.text = str(self.ask_slider.value) + ' | ' + str(self.bid_slider.value)
self.rates_popup.dismiss()
| 40.754438 | 140 | 0.616189 | 3,590 | 27,550 | 4.521727 | 0.100836 | 0.028461 | 0.026612 | 0.019898 | 0.509826 | 0.435779 | 0.358899 | 0.325633 | 0.300499 | 0.284852 | 0 | 0.014646 | 0.271361 | 27,550 | 675 | 141 | 40.814815 | 0.794012 | 0.094483 | 0 | 0.309381 | 0 | 0 | 0.054775 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.05988 | false | 0 | 0.037924 | 0 | 0.161677 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
84bfb20ccc017f5c1536067957e13d0482a7d66c | 3,370 | py | Python | scripts/read_msd2.py | pzarabadip/thermof | 44c33e37aeadce31241a878135b1531757a2b1c4 | [
"MIT"
] | null | null | null | scripts/read_msd2.py | pzarabadip/thermof | 44c33e37aeadce31241a878135b1531757a2b1c4 | [
"MIT"
] | 11 | 2017-08-15T22:11:32.000Z | 2017-08-22T23:32:17.000Z | scripts/read_msd2.py | pzarabadip/thermof | 44c33e37aeadce31241a878135b1531757a2b1c4 | [
"MIT"
] | 1 | 2017-01-31T19:44:02.000Z | 2017-01-31T19:44:02.000Z | """
Reads mean squared displacement for a list of trials and saves results to a yaml file
"""
import os
import yaml
from thermof.trajectory import Trajectory
from thermof.read import read_run_info
# --------------------------------------------------------------------------------------------------
main = ''
box1_atoms = [234, 298, 226, 290, 233, 297, 225, 289]
box2_atoms = [3818, 3882, 3810, 3874, 3817, 3881, 3809, 3873]
ipbox_atoms = box1_atoms + box2_atoms
results_file = '%s-MSD-results.yaml' % os.path.basename(main)
def msd(coordinates, unit_cell, reference_frame=0):
n_frames, n_atoms = np.shape(coordinates)[:2]
ref_coordinates = coordinates[reference_frame]
displacement = np.zeros((n_frames, 3))
for frame_idx, frame in enumerate(coordinates):
d_tot = np.zeros((3, ))
for atom_idx, (atom, ref_atom) in enumerate(zip(frame, ref_coordinates)):
d = np.zeros((3, ))
for i in range(3):
d[i] = atom[i] - ref_atom[i]
if d[i] > unit_cell[i] * 0.5:
d[i] = d[i] - unit_cell[i]
elif d[i] <= -unit_cell[i] * 0.5:
d[i] = d[i] + unit_cell[i]
d_tot += d ** 2
displacement[frame_idx] = d_tot / n_atoms
return np.sum(np.average(displacement, axis=0))
# --------------------------------------------------------------------------------------------------
trial_list = [os.path.join(main, i) for i in os.listdir(main) if os.path.isdir(os.path.join(main, i))]
results = dict(msd1=[], msd2=[], msd=[], sigma=[], epsilon=[], trial=[])
for trial_index, trial in enumerate(trial_list, start=1):
trial_name = os.path.basename(trial)
print('\n%i / %i | %s #################################' % (trial_index, len(trial_list), trial_name), flush=True)
if trial_name not in ['S6.00-E0.80', 'S6.00-E1.00']:
run_list = [os.path.join(trial, i) for i in os.listdir(trial) if os.path.isdir(os.path.join(trial, i))]
msd1_avg, msd2_avg, msd_avg = [], [], []
for run in run_list:
traj_file = os.path.join(run, 'traj.xyz')
traj = Trajectory(read=traj_file)
traj_box1 = traj.subdivide(atoms=box1_atoms)
msd1 = msd(traj_box1.coordinates, [80, 80, 80])
msd1_avg.append(msd1)
traj_box2 = traj.subdivide(atoms=box2_atoms)
msd2 = msd(traj_box2.coordinates, [80, 80, 80])
msd2_avg.append(msd1)
traj_ipbox = traj.subdivide(atoms=ipbox_atoms)
msdip = msd(traj_ipbox.coordinates, [80, 80, 80])
msd_avg.append(msdip)
results['msd1'].append(sum(msd1_avg) / len(msd1_avg))
results['msd2'].append(sum(msd2_avg) / len(msd2_avg))
results['msd'].append(sum(msd_avg) / len(msd_avg))
print('MSD1: %.2f (%i) | MSD2: %.2f (%i) MSD: %.2f (%i)'
% (results['msd1'][-1], len(msd1_avg), results['msd2'][-1], len(msd2_avg), results['msd'][-1], len(msd_avg)))
else:
results['msd1'].append(None)
results['msd2'].append(None)
results['msd'].append(None)
run_info = read_run_info(run)
results['sigma'].append(run_info['sigma'])
results['epsilon'].append(run_info['epsilon'])
results['trial'].append(os.path.basename(trial))
with open(results_file, 'w') as rfile:
yaml.dump(results, rfile)
| 43.205128 | 123 | 0.563501 | 465 | 3,370 | 3.931183 | 0.251613 | 0.032823 | 0.027352 | 0.021882 | 0.137856 | 0.071116 | 0.053611 | 0.028446 | 0.028446 | 0.028446 | 0 | 0.052731 | 0.217804 | 3,370 | 77 | 124 | 43.766234 | 0.640744 | 0.084273 | 0 | 0 | 0 | 0.016393 | 0.067598 | 0.010725 | 0 | 0 | 0 | 0 | 0 | 1 | 0.016393 | false | 0 | 0.065574 | 0 | 0.098361 | 0.032787 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
84c0de3f23adbe842d0efaadf9c2467bc66d8a1e | 4,230 | py | Python | LineDetection/line_following_opencv.py | angrajales/SDC_Eafit | ecbde0235100655baa1cbd189ab02ac2c9f35e67 | [
"Apache-2.0"
] | null | null | null | LineDetection/line_following_opencv.py | angrajales/SDC_Eafit | ecbde0235100655baa1cbd189ab02ac2c9f35e67 | [
"Apache-2.0"
] | null | null | null | LineDetection/line_following_opencv.py | angrajales/SDC_Eafit | ecbde0235100655baa1cbd189ab02ac2c9f35e67 | [
"Apache-2.0"
] | null | null | null | import cv2
import numpy as np
class LineFollowing(object):
def __init__(self):
self.polyLeft1 = 450
self.polyRight1 = 320
self.polyLeft2 = 500
self.polyRight2 = 320
self.ignore_mask_color = 255 # White color
def next_action(self, frame, slope=-1):
p_next_action = "do_nothing"
canny = self.__do_canny(frame)
frame_shape = frame.shape
vertices = np.array([[(0,frame_shape[0]),(self.polyLeft1, self.polyRight1), (self.polyLeft2, self.polyRight2), (frame_shape[1],frame_shape[0])]], dtype=np.int32)
mask = np.zeros_like(canny)
cv2.fillPoly(mask, vertices, self.ignore_mask_color)
masked_edges = cv2.bitwise_and(canny, mask)
########################### Parameter Tunning ####################################
[rho, theta, threshold, min_line_length, max_line_gap] = self.__tune_params()
line_image = np.copy(frame) * 0
############################ Finding Lines #######################################
lines = cv2.HoughLinesP(masked_edges, rho, theta, threshold, np.array([]), min_line_length, max_line_gap)
############################ Next Action #########################################
last_valid_slope = slope
lines_edges = None
if lines is None or not lines.any():
p_next_action = "do_nothing"
else:
for line in lines:
n_slope = last_valid_slope
for x1, y1, x2, y2 in line:
if int(x2 - x1) != 0:
n_slope = (y2 - y1) / (x2 - x1)
last_valid_slope = n_slope
else:
n_slope = last_valid_slope
text = ""
text1 = ""
distance = (frame.shape[0] - x1) / frame.shape[0] + (frame.shape[0] - x2) / frame.shape[0]
if distance > 0.515:
text1 = "Move Left"
elif distance < 0.485:
text1 = "Move Right"
else:
text1 = "Move Forward"
if abs(n_slope) < 1:
text = "Move either left or right"
p_next_action = "MLR"
else:
text = "Move forward"
p_next_action = "MF"
font = cv2.FONT_HERSHEY_SIMPLEX
textsize = cv2.getTextSize(text, font, 1, 2)[0]
frame_center_x = int((frame.shape[1] - textsize[0]) / 2)
frame_center_y = int((frame.shape[0] + textsize[1]) / 2)
cv2.line(line_image,(x1,y1),(x2,y2),(0,0,255),10)
distance = (frame.shape[1] - x1) / frame.shape[1] + (frame.shape[1] - x2) / frame.shape[1]
cv2.putText(frame, text, (frame_center_x, frame_center_y ), font, 1, (0, 255, 0), 5)
cv2.putText(frame, text1, (frame_center_x - 70, frame_center_y + 40, font, 1, (0, 0, 255)))
lines_edges = cv2.addWeighted(frame, 0.8, line_image, 1, 0)
return [lines_edges, p_next_action, last_valid_slope]
def __tune_params(self):
rho = 2
theta = np.pi / 180
threshold = 5
min_line_length = 40
max_line_gap = 30
return [rho, theta, threshold, min_line_length, max_line_gap]
def __do_canny(self, frame):
gray_image = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
blurred_image = cv2.GaussianBlur(gray_image, (5, 5), 0)
canny = cv2.Canny(blurred_image, 50, 150)
return canny
# if __name__ == "__main__":
# lf = LineFollowing()
# cap = cv2.VideoCapture('./outputF1.avi')
# while cap.isOpened():
# ret, frame = cap.read()
# if not ret:
# break
# [line_edges, p_next_action, last_valid_slope] = lf.next_action(frame)
# if p_next_action != "do_nothing":
# cv2.imshow("Image", line_edges)
# else:
# print("Error while trying the image...")
# continue
# if cv2.waitKey(25) & 0xFF == ord('q'):
# break
| 45.483871 | 169 | 0.500709 | 487 | 4,230 | 4.112936 | 0.293635 | 0.074888 | 0.038442 | 0.019471 | 0.164254 | 0.081378 | 0.069895 | 0.03994 | 0.03994 | 0 | 0 | 0.053507 | 0.346099 | 4,230 | 92 | 170 | 45.978261 | 0.670644 | 0.134279 | 0 | 0.114286 | 0 | 0 | 0.026988 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.057143 | false | 0 | 0.028571 | 0 | 0.142857 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
84c4318fb98bf099642661dc62436fb1ca14b9d3 | 748 | py | Python | Company-Based/amazon/load_balancer.py | saisankargochhayat/algo_quest | a24f9a22c019ab31d56bd5a7ca5ba790d54ce5dc | [
"Apache-2.0"
] | 3 | 2017-02-15T20:55:04.000Z | 2018-09-26T18:48:24.000Z | Company-Based/amazon/load_balancer.py | saisankargochhayat/algo_quest | a24f9a22c019ab31d56bd5a7ca5ba790d54ce5dc | [
"Apache-2.0"
] | 4 | 2017-10-07T18:59:20.000Z | 2019-10-08T05:43:25.000Z | Company-Based/amazon/load_balancer.py | saisankargochhayat/algo_quest | a24f9a22c019ab31d56bd5a7ca5ba790d54ce5dc | [
"Apache-2.0"
] | 1 | 2017-10-08T06:52:21.000Z | 2017-10-08T06:52:21.000Z | def load_balance(arr):
"""
:type arr: List[int]
:rtype: bool
"""
if len(arr) < 5:
return False
p_sum = [arr[0]] # Calc prefix sum
for i in range(1, len(arr)):
p_sum.append(p_sum[-1]+arr[i])
low = 1
high = len(arr)-1
while low < high:
lower = p_sum[low-1] # Sum of part before low
higher = p_sum[len(arr)-1] - p_sum[high]
mid = p_sum[high-1] - p_sum[low]
# print(lower, mid, higher, low, high)
if lower == mid == higher:
return True
elif lower < higher:
low += 1
else:
high -=1
return False
print(load_balance([2, 4, 5, 3, 3, 9, 2, 2, 2]))
print(load_balance([1,1,1,1]))
| 23.375 | 53 | 0.490642 | 115 | 748 | 3.095652 | 0.365217 | 0.089888 | 0.039326 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.050314 | 0.362299 | 748 | 31 | 54 | 24.129032 | 0.696017 | 0.147059 | 0 | 0.095238 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.047619 | false | 0 | 0 | 0 | 0.190476 | 0.095238 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
84c71a7531ae9fa2ac7181c6e7cdd2b229658408 | 1,058 | py | Python | Data Structure/Recursion/Problems/FibonacciNumber.py | xiaomaomi7/DSA | e5cb88c9f1d39fd305c0f92db4ecae6e846cff01 | [
"MIT"
] | null | null | null | Data Structure/Recursion/Problems/FibonacciNumber.py | xiaomaomi7/DSA | e5cb88c9f1d39fd305c0f92db4ecae6e846cff01 | [
"MIT"
] | null | null | null | Data Structure/Recursion/Problems/FibonacciNumber.py | xiaomaomi7/DSA | e5cb88c9f1d39fd305c0f92db4ecae6e846cff01 | [
"MIT"
] | null | null | null | '''
Author: Hongxiang Qi
Date: 22/06/2021
Description:
Write a recursive function called fib which accepts a number and returns the nth number in the
Fibonacci sequence. Recall that the Fibonacci sequence is the sequence of whole numbers 0,1,1,2,3,5,8,...
which starts with 0 and 1, and where every number thereafter is equal to the sum of the previous two numbers.
Example:
fib(4) # 3
fib(10) # 55
fib(28) # 317811
fib(35) # 9227465
'''
'''
Apporach:
1. Recursive case - the flow
fib(n) = fib(n - 1) + fib(n - 2)
2. Base condition - the stopping criterion
n = 0, return 0
n = 1, return 1
3. Unintentional case - the constraint
fib(1.1) - error - RecursionError: maximum recursion depth exceeded in comparison
fib(-1) - error - RecursionError: maximum recursion depth exceeded in comparison
'''
def fib(n):
assert n == int(n) and n >= 0, 'The input n must be an integer and positive.'
if n < 2:
return n
else:
return fib(n - 1) + fib(n - 2)
print(fib(4)) | 27.128205 | 109 | 0.643667 | 168 | 1,058 | 4.053571 | 0.5 | 0.035242 | 0.058737 | 0.023495 | 0.208517 | 0.208517 | 0.179148 | 0.179148 | 0.179148 | 0 | 0 | 0.072704 | 0.258979 | 1,058 | 39 | 110 | 27.128205 | 0.795918 | 0.42155 | 0 | 0 | 0 | 0 | 0.226804 | 0 | 0 | 0 | 0 | 0 | 0.142857 | 1 | 0.142857 | false | 0 | 0 | 0 | 0.428571 | 0.142857 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
84cb010d5b5c6e2312275904e2f2082e3246dbef | 1,216 | py | Python | test/asyncpool.py | timgates42/stream.py | 6a4945cbddaf74138eee5ba33eee3988cfceb84d | [
"MIT"
] | 34 | 2015-02-04T15:00:52.000Z | 2021-08-24T01:33:36.000Z | test/asyncpool.py | timgates42/stream.py | 6a4945cbddaf74138eee5ba33eee3988cfceb84d | [
"MIT"
] | 2 | 2016-02-16T22:02:02.000Z | 2016-03-02T18:25:00.000Z | test/asyncpool.py | timgates42/stream.py | 6a4945cbddaf74138eee5ba33eee3988cfceb84d | [
"MIT"
] | 7 | 2015-10-14T19:58:53.000Z | 2022-03-28T04:18:36.000Z | #!/usr/bin/env python2.6
import os
import sys
from pprint import pprint
from random import randint
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))
from stream import filter, map, ThreadPool, ProcessPool
## The test data
dataset = []
def alternating(n):
values = []
for i in range(1, n+1):
values.append(i)
values.append(-i)
return values
def randomized(n):
values = []
for _ in range(n):
values.append(randint(-sys.maxint, sys.maxint))
return values
for v in [10, 100, 1000] >> map(alternating):
dataset.append(v)
for v in [10, 100, 1000] >> map(randomized):
dataset.append(v)
func = filter(lambda x: x&1)
resultset = dataset >> map(lambda s: s >> func >> set) >> list
## Test scenario
def threadpool(i):
result = dataset[i] >> ThreadPool(func, poolsize=2) >> set
pprint(result)
assert result == resultset[i]
def processpool(i):
result = dataset[i] >> ProcessPool(func, poolsize=2) >> set
pprint(result)
assert result == resultset[i]
## Test cases
def test_ThreadPool():
for i in range(len(dataset)):
yield threadpool, i
def test_ProcessPool():
for i in range(len(dataset)):
yield processpool, i
if __name__ == '__main__':
import nose
nose.main()
| 17.623188 | 65 | 0.683388 | 181 | 1,216 | 4.508287 | 0.348066 | 0.034314 | 0.022059 | 0.040441 | 0.230392 | 0.230392 | 0.230392 | 0.122549 | 0.122549 | 0.122549 | 0 | 0.025743 | 0.169408 | 1,216 | 68 | 66 | 17.882353 | 0.782178 | 0.050987 | 0 | 0.292683 | 0 | 0 | 0.008718 | 0 | 0 | 0 | 0 | 0 | 0.04878 | 1 | 0.146341 | false | 0 | 0.146341 | 0 | 0.341463 | 0.073171 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
84ceb7de86289497443ba36288d408d44c8ab72e | 2,779 | py | Python | sagas/ofbiz/entity_meta_indexer.py | samlet/stack | 47db17fd4fdab264032f224dca31a4bb1d19b754 | [
"Apache-2.0"
] | 3 | 2020-01-11T13:55:38.000Z | 2020-08-25T22:34:15.000Z | sagas/ofbiz/entity_meta_indexer.py | samlet/stack | 47db17fd4fdab264032f224dca31a4bb1d19b754 | [
"Apache-2.0"
] | null | null | null | sagas/ofbiz/entity_meta_indexer.py | samlet/stack | 47db17fd4fdab264032f224dca31a4bb1d19b754 | [
"Apache-2.0"
] | 1 | 2021-01-01T05:21:44.000Z | 2021-01-01T05:21:44.000Z | from sagas.ofbiz.entities import OfEntity as e, oc, MetaEntity, all_entities
import resources_pb2 as res
import protobuf_utils
def build_field_index(ents):
field_index={}
for ent_name in ents:
ent=MetaEntity(ent_name)
if not oc.j.Utils.isViewEntity(ent.model):
for fld in ent.model.getFieldsIterator():
if not fld.getIsAutoCreatedInternal():
fld_name=fld.getName()
if fld_name not in field_index:
field_index[fld_name]=[ent_name]
else:
field_index[fld_name].append(ent_name)
return field_index
class EntityMetaIndexer(object):
def __init__(self):
self.rs=None
self.resource=None
self.rs_lookups=None
def build_samples(self):
ents=['Person', 'Party']
result=build_field_index(ents)
print(result)
def build(self):
"""
$ python -m sagas.ofbiz.entity_meta_indexer build
:return:
"""
ents = all_entities()
idx = build_field_index(ents)
print('total fields:', len(idx))
idx_b = {}
for k, v in idx.items():
idx_b[k] = res.RsEntityReference(entities=v)
rs = res.RsEntities(fieldRefs=idx_b)
protobuf_utils.write_proto_to(rs, 'data/resources/entities_index.data')
print('total field-refs:', len(rs.fieldRefs))
def init_rs(self):
from sagas.ofbiz.resources import read_resource
target = 'data/resources/entities_index.data'
self.rs = res.RsEntities()
protobuf_utils.read_proto(self.rs, target)
print('total field-refs:', len(self.rs.fieldRefs))
# print(rs.fieldRefs['lastName'])
self.resource, self.rs_lookups = read_resource()
def is_field(self, qname):
if self.rs is None: self.init_rs()
return qname in self.rs.fieldRefs
def is_description(self, word, lang='zh'):
if self.rs is None: self.init_rs()
lang_idx = self.rs_lookups.indexTable[lang]
if word in lang_idx.indexes:
keys = lang_idx.indexes[word]
for key in keys.value:
# print(word, "☞", key)
if '.description.' in key:
return True
return False
def testing(self):
"""
$ python -m sagas.ofbiz.entity_meta_indexer testing
:return:
"""
lang = 'zh'
word = '会员'
print('会员(zh) is a type description?', self.is_description(word, lang))
print('lastName is field?', self.is_field('lastName'))
print('fakeName is field?', self.is_field('fakeName'))
if __name__ == '__main__':
import fire
fire.Fire(EntityMetaIndexer)
| 30.877778 | 79 | 0.593739 | 344 | 2,779 | 4.607558 | 0.27907 | 0.037855 | 0.028391 | 0.035962 | 0.196845 | 0.078233 | 0.078233 | 0.078233 | 0 | 0 | 0 | 0.000512 | 0.297589 | 2,779 | 89 | 80 | 31.224719 | 0.810963 | 0.062612 | 0 | 0.031746 | 0 | 0 | 0.09209 | 0.026761 | 0 | 0 | 0 | 0 | 0 | 1 | 0.126984 | false | 0 | 0.079365 | 0 | 0.285714 | 0.111111 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
84cebcba7d82ad0421772f1cc2c1ed6a4c0f886b | 261 | py | Python | exercicios-com-funcoes/exercicio01.py | diegolinkk/exercicios-python-brasil | 3bf7bdf0e98cdd06c115eedae4fa01e0c25fdba5 | [
"MIT"
] | null | null | null | exercicios-com-funcoes/exercicio01.py | diegolinkk/exercicios-python-brasil | 3bf7bdf0e98cdd06c115eedae4fa01e0c25fdba5 | [
"MIT"
] | null | null | null | exercicios-com-funcoes/exercicio01.py | diegolinkk/exercicios-python-brasil | 3bf7bdf0e98cdd06c115eedae4fa01e0c25fdba5 | [
"MIT"
] | null | null | null | # Faça um programa para imprimir:
# 1
# 2 2
# 3 3 3
# .....
# n n n n n n ... n
def contador(n):
for numero in range(1,n):
string = str(numero) + " "
string *= numero
print(string)
contador(15) | 17.4 | 34 | 0.448276 | 36 | 261 | 3.25 | 0.527778 | 0.102564 | 0.128205 | 0.136752 | 0.059829 | 0.059829 | 0 | 0 | 0 | 0 | 0 | 0.059603 | 0.421456 | 261 | 15 | 35 | 17.4 | 0.715232 | 0.398467 | 0 | 0 | 0 | 0 | 0.006623 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.166667 | false | 0 | 0 | 0 | 0.166667 | 0.166667 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
84d53ac24c7879eddf6eb1cc6dd18e51a194809c | 4,592 | py | Python | pythonlibs/mantis/sg/fisher/stbase/market.py | adoggie/Tibet.6 | 3c53060edafd80b9c4dafa10699a68d86a410c66 | [
"MIT"
] | 22 | 2019-10-28T07:28:12.000Z | 2022-03-19T15:36:41.000Z | pythonlibs/mantis/sg/fisher/stbase/market.py | adoggie/Tibet.6 | 3c53060edafd80b9c4dafa10699a68d86a410c66 | [
"MIT"
] | 1 | 2019-11-07T04:54:14.000Z | 2019-11-07T07:12:48.000Z | pythonlibs/mantis/sg/fisher/stbase/market.py | adoggie/Tibet.6 | 3c53060edafd80b9c4dafa10699a68d86a410c66 | [
"MIT"
] | 13 | 2019-10-28T07:29:07.000Z | 2021-11-03T06:53:12.000Z | #coding:utf-8
import time,datetime
import os,os.path
import json
import traceback
from threading import Thread,Condition
from Queue import Queue
from collections import OrderedDict
from mantis.fundamental.utils.timeutils import timestamp_current, timestamp_to_str,datetime_to_timestamp,\
current_datetime_string,current_date_string,TimedTask
from mantis.fundamental.utils.useful import hash_object,object_assign
from mantis.fundamental.utils.useful import singleton
from base import *
import controller
class Market(object):
"""行情源 , 不同产品的接入行情方式不同,不同的券商接口"""
Bars =['1m','5m','15m','30m','60m','d','w','m','q','y']
def __init__(self,product):
self.product = product
self.generator = None
self.recorder = None
self.tick_handlers = OrderedDict() # {code:[handler,],..}
self.bar_handlers = OrderedDict() # {code-bar:[handler,],...} 600232-1m:func,..
self.thread = Thread(target=self.processThread)
self.actived = False
self.queue = Queue()
def init(self,*args,**kwargs):
self.setupGenerator(MarketGenerator())
self.setupRecorder(MarketRecorder())
return self
def setupGenerator(self,generator):
self.generator = generator
self.generator.market = self
return self
def setupRecorder(self,recorder):
self.recorder = recorder
return self
def open(self):
# 开始行情接收
if self.generator:
# self.generator.open()
pass
if self.recorder:
self.recorder.open()
self.thread.start()
return True
def close(self):
self.actived = False
# self.thread.join()
def initTradeObject(self,obj):
return obj
def subReset(self):
"""取消訂閱tick和bar"""
self.tick_handlers = OrderedDict()
self.bar_handlers = OrderedDict()
return self
def subTick(self,code,handler):
"""订阅行情周期"""
obj = self.product.getOrNewTradeObject(code)
self.initTradeObject(obj)
handlers = self.tick_handlers.get(code,[])
if not handlers:
self.tick_handlers[code] = [handler]
else:
handlers.append(handler)
return obj
def subBar(self,code,handler,cycle='1m'):
"""订阅k线数据"""
key = '{}-{}'.format(code,cycle)
obj = self.product.getOrNewTradeObject(code)
self.initTradeObject(obj)
handlers = self.bar_handlers.get(key, [])
if not handlers:
self.bar_handlers[key] = [handler,]
else:
handlers.append(handler)
return obj
def getHistoryBars(self,code,cycle,limit):
"""查询历史k线"""
pass
def onTick(self,tick):
if not tick.trade_object:
obj = self.product.getOrNewTradeObject(tick.code)
tick.trade_object = self.product.market.initTradeObject(obj)
self.tickInit(tick)
handlers = self.tick_handlers.get(tick.code,[])
for handler in handlers:
handler(tick)
if self.recorder:
self.recorder.write(tick)
tick.trade_object = None
def tickInit(self,tick):
tick.trade_object.setPrice(tick.price)
def onBar(self,bar):
if not bar.trade_object:
obj = self.product.getOrNewTradeObject(bar.code)
bar.trade_object = self.product.market.initTradeObject(obj)
k = '{}-{}'.format(bar.code,bar.cycle)
handlers = self.bar_handlers.get(k,[])
for handler in handlers:
handler(bar)
if self.recorder:
self.recorder.write(bar)
def putData(self,data):
"""接收到的行情数据置入队列,等待被读取处理 """
self.queue.put(data)
def processThread(self):
self.actived = True
while self.actived:
try:
try:
data = self.queue.get(timeout=1)
except:
continue
if not data:
continue
if isinstance(data,TickData):
self.onTick(data)
if isinstance(data,BarData):
self.onBar(data)
except:
traceback.print_exc()
# controller.getLogger().debug('Market Data Thread Exiting..')
controller.TradeController().getLogger().debug( 'Market Data Thread Exiting..')
| 29.818182 | 107 | 0.574477 | 470 | 4,592 | 5.544681 | 0.282979 | 0.041443 | 0.030698 | 0.036838 | 0.300844 | 0.23868 | 0.123561 | 0.088258 | 0.05449 | 0.05449 | 0 | 0.005756 | 0.319033 | 4,592 | 153 | 108 | 30.013072 | 0.82763 | 0.059016 | 0 | 0.318584 | 0 | 0 | 0.01404 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.141593 | false | 0.017699 | 0.106195 | 0.00885 | 0.336283 | 0.00885 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
84d6b98590fb4046c65ce15b09a1a6c1ece9068c | 14,409 | py | Python | ai/experiment.py | lgoodridge/Asteroids-AI | bf6abd2b42db1b13667060c30de13a53aaa05110 | [
"MIT"
] | 4 | 2018-11-06T16:27:11.000Z | 2021-11-12T12:23:54.000Z | ai/experiment.py | lgoodridge/Asteroids-AI | bf6abd2b42db1b13667060c30de13a53aaa05110 | [
"MIT"
] | null | null | null | ai/experiment.py | lgoodridge/Asteroids-AI | bf6abd2b42db1b13667060c30de13a53aaa05110 | [
"MIT"
] | 1 | 2022-02-25T10:35:34.000Z | 2022-02-25T10:35:34.000Z | """
Defines functions for running AI experiments.
"""
from __future__ import print_function
from ai.ai_app import AI_App
from ai.generation import Generation
from ai.utils import algorithm_id_to_generation_class, \
BEST_BRAIN_FILENAME, LOG_FILENAME, META_FILENAME
from collections import OrderedDict
from datetime import datetime
import json
import os
import settings
import shutil
import sys
import traceback
def run_experiment():
"""
Starts or continues an experiment according to
the configuration parameters set in settings.
"""
ai_app = AI_App()
generation_class = algorithm_id_to_generation_class(
settings.EXPERIMENT_ALGORITHM_ID)
algorithm_name = generation_class.get_algorithm_name()
experiment_dir = settings.EXPERIMENT_DIRECTORY
experiment_name = os.path.basename(os.path.normpath(experiment_dir))
log_filename = os.path.join(experiment_dir, LOG_FILENAME)
meta_filename = os.path.join(experiment_dir, META_FILENAME)
best_brain_filename = os.path.join(experiment_dir, BEST_BRAIN_FILENAME)
# If the experiment directory doesn't exist
# yet, create it, and start a new experiment
if not os.path.exists(experiment_dir):
print("Initializing experiment directory '%s'..." % experiment_dir)
# Create the experiment directory, ensuring
# the parent directory exists beforehand
parent_dir = os.path.dirname(os.path.normpath(experiment_dir))
if parent_dir != "" and not os.path.exists(parent_dir):
raise ValueError("Parent directory '%s' does not exist." %
parent_dir)
os.mkdir(experiment_dir)
best_fitness = 0
best_brain_tag = "N/A"
generation_idx = 0
stagnation_idx = 0
# Create the experiment meta file
meta_dict = OrderedDict({
"AI Algorithm": algorithm_name,
"Best Fitness": best_fitness,
"Best Brain": best_brain_tag,
"Generation Index": generation_idx,
"Stagnation Index": stagnation_idx,
})
try:
_write_meta_file(meta_filename, meta_dict)
except Exception as e:
print("ERROR CREATING META FILE!")
raise e
# Create the experiment log file
log = open(log_filename, "w")
current_time_str = datetime.now().strftime("%m/%d/%Y %H:%M")
print("========================================", file=log)
print("= STARTED ON %s" % current_time_str, file=log)
print("========================================", file=log)
_write_to_log(log, "Starting experiment '%s':\n" % experiment_name, True)
# If it does, resume the previous experiment
elif os.path.isdir(experiment_dir):
print("Resuming experiment '%s':" % experiment_name)
# Load the meta data
try:
meta_dict = _load_meta_file(meta_filename)
except Exception as e:
print("ERROR LOADING META FILE!")
raise e
best_fitness = meta_dict["Best Fitness"]
best_brain_tag = meta_dict["Best Brain"]
generation_idx = meta_dict["Generation Index"]
stagnation_idx = meta_dict["Stagnation Index"]
# Load the log file
log = open(log_filename, "a")
current_time_str = datetime.now().strftime("%m/%d/%Y %H:%M")
print("", file=log)
print("========================================", file=log)
print("= CONTINUED ON %s" % current_time_str, file=log)
print("========================================", file=log)
# If the provided location exists, but is
# not a directory, fail and alert the user
else:
raise ValueError(("Experiment Directory '%s' exists, but "
"is not a directory.") % experiment_dir)
generation = None
start_idx = generation_idx
# If we are continuing an experiment, load the last completed generation
if generation_idx > 0:
print("Loading generation %03d... " % (generation_idx-1), end="")
generation_dirname = os.path.join(experiment_dir,
"gen%03d" % (generation_idx-1))
generation = generation_class.load(generation_dirname, ai_app)
print("Complete")
# Create and evaluate generations of AI
# brains until an end condition is reached
for generation_idx in range(start_idx, settings.MAX_GENERATIONS+1):
generation_dirname = os.path.join(experiment_dir,
"gen%03d" % generation_idx)
generation_meta_filename = os.path.join(generation_dirname,
META_FILENAME)
# End the experiment if we've reached an end condition
if generation_idx == settings.MAX_GENERATIONS:
_write_to_log(log, "Max generations reached. Ending Experiment.\n")
break
if stagnation_idx >= settings.MAX_GENERATIONS_WITHOUT_PROGRESS:
_write_to_log(log, "Progress has stagnated. Ending Experiment.\n")
break
if os.path.isdir(generation_dirname):
shutil.rmtree(generation_dirname)
# Create the initial generation, or breed the next generation
_write_to_log(log, "Generation %d: Creating" % generation_idx)
try:
if generation is None:
generation = generation_class(generation_idx, ai_app)
else:
generation = generation.breed()
except Exception as e:
_write_to_log(log, "\nERROR CREATING GENERATION %d\n%s" % \
(generation_idx, traceback.format_exc()), True)
return
# Evaluate the generation
_write_to_log(log, " - Evaluating")
try:
generation.evaluate_fitnesses()
best_brain_id = generation.get_best_brain_id()
best_brain = generation.get_brain(best_brain_id)
except Exception as e:
_write_to_log(log, "\nERROR EVALUATING GENERATION %d\n%s" % \
(generation_idx, traceback.format_exc()), True)
return
_write_to_log(log, ": Best Fitness = %.2f - Saving" % best_brain.fitness)
# Save the generation
try:
generation.save(generation_dirname)
except Exception as e:
_write_to_log(log, "\nERROR SAVING GENERATION %d\n%s" % \
(generation_idx, traceback.format_exc()), True)
return
_write_to_log(log, "\n")
# Determine whether progress was made
progress_threshold = best_fitness + settings.PROGRESS_IMPROVEMENT_THRESHOLD
if best_brain.fitness < progress_threshold:
stagnation_idx += 1
else:
stagnation_idx = 0
# Update the best brain and fitness, if necessary
if best_brain.fitness >= best_fitness:
best_fitness = best_brain.fitness
best_brain_tag = "Generation: %03d - ID: %03d" % \
(generation_idx, best_brain_id)
best_brain_filename = os.path.join(experiment_dir,
BEST_BRAIN_FILENAME)
if os.path.exists(best_brain_filename):
os.remove(best_brain_filename)
try:
best_brain.save(best_brain_filename)
except Exception as e:
_write_to_log(log, "\nERROR SAVING BEST BRAIN OF GENERATION " + \
"%d\n%s" % (generation_idx, traceback.format_exc()), True)
return
# Update meta file
meta_dict = OrderedDict({
"AI Algorithm": algorithm_name,
"Best Fitness": best_fitness,
"Best Brain": best_brain_tag,
"Generation Index": generation_idx+1,
"Stagnation Index": stagnation_idx,
})
_write_meta_file(meta_filename, meta_dict)
# Clean up
ai_app.cleanup_simulation()
log.close()
def merge_experiments(exp_dir_list, merged_dir):
"""
Merges the best brains of the experiment directories in
the provided list into a new directory, and initializes
(but does not run) that experiment.
All arguments should be filepaths to existing parent
experiment directories, or to the desired child directory.
All parent experiments should be compatible (e.g.
using the same AI algorithm), and the child experiment
will determine its algorithm and generation population
from the configuration parameters set in settings.
"""
ai_app = AI_App()
generation_class = algorithm_id_to_generation_class(
settings.EXPERIMENT_ALGORITHM_ID)
algorithm_name = generation_class.get_algorithm_name()
experiment_name = os.path.basename(os.path.normpath(merged_dir))
log_filename = os.path.join(merged_dir, LOG_FILENAME)
meta_filename = os.path.join(merged_dir, META_FILENAME)
def get_last_gen_dir(exp_dir):
"""
Returns the filepath to the last generation
directory for the provided experiment.
"""
if not os.path.exists(exp_dir):
raise ValueError(("Parent experiment directory '%s' "
"does not exist.") % exp_dir)
gen_dirs = [x for x in os.listdir(exp_dir) if x.startswith("gen")]
if len(gen_dirs) == 0:
raise ValueError(("Parent experiment directory '%s' "
"doesn't have any completed generations") % exp_dir)
return os.path.join(exp_dir, gen_dirs[-1])
# Ensure at least two parent experiments were provided,
# then get the last generation directories for each of them
if len(exp_dir_list) < 2:
raise ValueError("There must be at least two parent experiments.")
exp_last_gens = [get_last_gen_dir(x) for x in exp_dir_list]
# Ensure the output directory doesn't already exist, then create it
if os.path.exists(merged_dir):
raise ValueError(("Output experiment directory '%s' "
"already exists.") % merged_dir)
parent_dir = os.path.dirname(os.path.normpath(merged_dir))
if parent_dir != "" and not os.path.exists(parent_dir):
raise ValueError(("Parent directory of output location '%s' "
"does not exist.") % parent_dir)
os.mkdir(merged_dir)
brains = []
brains_per_exp = int(settings.GENERATION_POPULATION / len(exp_dir_list))
leftover_brains = settings.GENERATION_POPULATION % len(exp_dir_list)
# Each experiment will contribute the first population / n
# brains from its final generation, where n is the number of
# parent experiments. If the number cannot be divided evenly,
# aribtrarily assign the leftover brains from the last experiment
# in the parent list
for parent_idx, gen_dir in enumerate(exp_last_gens):
num_brains = brains_per_exp
if parent_idx == len(exp_last_gens) - 1:
num_brains += leftover_brains
gen_brain_files = [os.path.join(gen_dir, x) for x in os.listdir(gen_dir)
if x.endswith(".brn")]
if len(gen_brain_files) < num_brains:
raise ValueError(("Parent experiment directory '%s' does not "
"have enough brains in its last generation to conribute")
% gen_dir_path)
for brain_file in gen_brain_files[:num_brains]:
brains.append(generation_class.load_brain(brain_file))
generation = generation_class(0, ai_app, brains)
# Create the experiment log file
log = open(log_filename, "w")
current_time_str = datetime.now().strftime("%m/%d/%Y %H:%M")
print("========================================", file=log)
print("= STARTED ON %s" % current_time_str, file=log)
print("=", file=log)
print("= MERGED FROM:", file=log)
for exp_dir in exp_dir_list:
print("= %s" % exp_dir, file=log)
print("========================================", file=log)
_write_to_log(log, "Initializing experiment '%s':\n"
% experiment_name, True)
# Evaluate the generation
_write_to_log(log, "Performing initial evaluation")
try:
generation.evaluate_fitnesses()
best_brain_id = generation.get_best_brain_id()
best_brain = generation.get_brain(best_brain_id)
except Exception as e:
_write_to_log(log, "\nERROR EVALUATING GENERATION %d\n%s" % \
(0, traceback.format_exc()), True)
return
# Save the generation
_write_to_log(log, ": Best Fitness = %.2f - Saving" % best_brain.fitness)
try:
generation.save(os.path.join(merged_dir, "gen000"))
except Exception as e:
_write_to_log(log, "\nERROR SAVING GENERATION %d\n%s" % \
(0, traceback.format_exc()), True)
return
_write_to_log(log, "\n")
# Save the best brain
best_brain_filename = os.path.join(merged_dir, BEST_BRAIN_FILENAME)
best_brain.save(best_brain_filename)
# Create the experiment meta file
meta_dict = OrderedDict({
"AI Algorithm": algorithm_name,
"Best Fitness": best_brain.fitness,
"Best Brain": "Generation: %03d - ID: %03d" % (0, best_brain_id),
"Generation Index": 1,
"Stagnation Index": 0,
})
try:
_write_meta_file(meta_filename, meta_dict)
except Exception as e:
print("ERROR CREATING META FILE!")
raise e
# Clean up
_write_to_log(log, "Finished initializing merged experiment.\n", True)
ai_app.cleanup_simulation()
log.close()
def _write_to_log(log, message, force_echo=False):
"""
Writes message to the provided log.
If force_echo or EXPERIMENT_ECHO_LOGS is True,
prints log messages to standard out as well.
"""
print(message, end="", file=log)
log.flush()
if force_echo or settings.EXPERIMENT_ECHO_LOGS:
print(message, end="")
sys.stdout.flush()
def _write_meta_file(filename, meta_dict):
"""
Writes the data contained in meta_dict to the specified file.
"""
if os.path.exists(filename):
os.remove(filename)
with open(filename, "w") as meta_file:
json.dump(meta_dict, meta_file, indent=4)
def _load_meta_file(filename):
"""
Loads the data from the specified file and returns it as a dictionary.
"""
with open(filename, "r") as meta_file:
result = json.load(meta_file)
return result
| 39.368852 | 83 | 0.630162 | 1,775 | 14,409 | 4.886761 | 0.154366 | 0.042541 | 0.021905 | 0.028476 | 0.468296 | 0.434402 | 0.3733 | 0.338022 | 0.295135 | 0.285681 | 0 | 0.003776 | 0.264904 | 14,409 | 365 | 84 | 39.476712 | 0.815144 | 0.16219 | 0 | 0.418972 | 0 | 0 | 0.15801 | 0.020204 | 0 | 0 | 0 | 0 | 0 | 1 | 0.023715 | false | 0 | 0.047431 | 0 | 0.102767 | 0.090909 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
84d726cdadffb9899d6b5d6a67cdc9191f4d113d | 2,655 | py | Python | tests/test_SpellingResolver.py | microsoft/cai-advanced-processing-service | 91fce8048fe275aa870083bed22d452d330ef535 | [
"MIT"
] | 9 | 2021-11-17T11:50:47.000Z | 2022-02-15T14:48:32.000Z | tests/test_SpellingResolver.py | microsoft/cai-advanced-processing-service | 91fce8048fe275aa870083bed22d452d330ef535 | [
"MIT"
] | 7 | 2021-12-03T17:05:26.000Z | 2022-03-02T04:59:50.000Z | tests/test_SpellingResolver.py | microsoft/cai-advanced-processing-service | 91fce8048fe275aa870083bed22d452d330ef535 | [
"MIT"
] | null | null | null | import unittest, json
import azure.functions as func
from ddt import ddt, data, unpack
from SpellingResolver.main import main
@ddt
class TestSpellingResolver(unittest.TestCase):
@data( ({"query":"anton marta 123",
"convertnumbers": True,
"convertsymbols": True,
"additional_symbols": {},
"allowed_symbols": ["-" ],
"locale":"de"
},
{
"original": "anton marta 123",
"resolved": "a m 123",
"resolved_nospace": "am123",
"first_letters": "a m 123",
"first_letters_nospace": "am123"
}),
({'query':'siegfried dora 2 * 7',
"convertnumbers": True,
"convertsymbols": True,
"additional_symbols": {},
"allowed_symbols": ["-" ],
'locale':'de'
},
{
"original": "siegfried dora 2 * 7",
"resolved": "s d 77",
"resolved_nospace": "sd77",
"first_letters": "s d 77",
"first_letters_nospace": "sd77"
}),
({'query':'toni berta 22',
"convertnumbers": True,
"convertsymbols": True,
"additional_symbols": {},
"allowed_symbols": ["-" ],
'locale':'de'
},
{
"original": "toni berta 22",
"resolved": "toni b 22",
"resolved_nospace": "tonib22",
"first_letters": "t b 22",
"first_letters_nospace": "tb22"
}),
({'query':'d e 3 times 7 2 times 3',
"convertnumbers": True,
"convertsymbols": True,
"additional_symbols": {},
"allowed_symbols": ["-" ],
'locale':'de'
},
{
"original": "d e 3 times 7 2 times 3",
"resolved": "d e 777 33",
"resolved_nospace": "de77733",
"first_letters": "d e 777 33",
"first_letters_nospace": "de77733"
}))
@unpack
def test_attribute_validator(self, body, expected_output):
# Build HTTP request
req = func.HttpRequest(
method = 'GET',
body = json.dumps(body).encode('utf8'),
url = '/api/SpellingResolver',
params = {}
)
# Call the function.
resp = main(req)
# Check the output.
self.assertEqual(
resp.get_body().decode(),
json.dumps(expected_output),
) | 32.777778 | 63 | 0.448588 | 221 | 2,655 | 5.257919 | 0.384615 | 0.082616 | 0.110155 | 0.123924 | 0.313253 | 0.313253 | 0.313253 | 0.313253 | 0.285714 | 0.285714 | 0 | 0.045984 | 0.418456 | 2,655 | 81 | 64 | 32.777778 | 0.706606 | 0.020716 | 0 | 0.293333 | 0 | 0 | 0.332936 | 0.041716 | 0 | 0 | 0 | 0 | 0.013333 | 1 | 0.013333 | false | 0 | 0.053333 | 0 | 0.08 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
84db039c9f5c049f0b4ccd9906d76791791b4bff | 603 | py | Python | ex112/UtilidadesCeV/dados/__init__.py | LucasIdalino/Exerc-cios-do-Curso | 4ca4610d1acfe4672c20114f891b6aabae816049 | [
"MIT"
] | null | null | null | ex112/UtilidadesCeV/dados/__init__.py | LucasIdalino/Exerc-cios-do-Curso | 4ca4610d1acfe4672c20114f891b6aabae816049 | [
"MIT"
] | null | null | null | ex112/UtilidadesCeV/dados/__init__.py | LucasIdalino/Exerc-cios-do-Curso | 4ca4610d1acfe4672c20114f891b6aabae816049 | [
"MIT"
] | null | null | null | def leiadinheiro(msg):
validade = False
while not validade:
entrada = str(input(msg)).replace(',', '.').strip()
if entrada.isalpha() or entrada == "":
print(f'Erro! {entrada} não é um preço válido')
else:
validade = True
return float(entrada)
def leiaint(msg):
ok = False
valor = 0
while True:
n = str(input(msg))
if n.isnumeric():
valor = int(n)
ok = True
else:
print("Por favor, digite um número inteiro.")
if ok:
break
return valor
| 22.333333 | 59 | 0.500829 | 68 | 603 | 4.441176 | 0.588235 | 0.05298 | 0.072848 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.002695 | 0.384743 | 603 | 26 | 60 | 23.192308 | 0.811321 | 0 | 0 | 0.090909 | 0 | 0 | 0.124792 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.090909 | false | 0 | 0 | 0 | 0.181818 | 0.090909 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |