hexsha string | size int64 | ext string | lang string | max_stars_repo_path string | max_stars_repo_name string | max_stars_repo_head_hexsha string | max_stars_repo_licenses list | max_stars_count int64 | max_stars_repo_stars_event_min_datetime string | max_stars_repo_stars_event_max_datetime string | max_issues_repo_path string | max_issues_repo_name string | max_issues_repo_head_hexsha string | max_issues_repo_licenses list | max_issues_count int64 | max_issues_repo_issues_event_min_datetime string | max_issues_repo_issues_event_max_datetime string | max_forks_repo_path string | max_forks_repo_name string | max_forks_repo_head_hexsha string | max_forks_repo_licenses list | max_forks_count int64 | max_forks_repo_forks_event_min_datetime string | max_forks_repo_forks_event_max_datetime string | content string | avg_line_length float64 | max_line_length int64 | alphanum_fraction float64 | qsc_code_num_words_quality_signal int64 | qsc_code_num_chars_quality_signal float64 | qsc_code_mean_word_length_quality_signal float64 | qsc_code_frac_words_unique_quality_signal float64 | qsc_code_frac_chars_top_2grams_quality_signal float64 | qsc_code_frac_chars_top_3grams_quality_signal float64 | qsc_code_frac_chars_top_4grams_quality_signal float64 | qsc_code_frac_chars_dupe_5grams_quality_signal float64 | qsc_code_frac_chars_dupe_6grams_quality_signal float64 | qsc_code_frac_chars_dupe_7grams_quality_signal float64 | qsc_code_frac_chars_dupe_8grams_quality_signal float64 | qsc_code_frac_chars_dupe_9grams_quality_signal float64 | qsc_code_frac_chars_dupe_10grams_quality_signal float64 | qsc_code_frac_chars_replacement_symbols_quality_signal float64 | qsc_code_frac_chars_digital_quality_signal float64 | qsc_code_frac_chars_whitespace_quality_signal float64 | qsc_code_size_file_byte_quality_signal float64 | qsc_code_num_lines_quality_signal float64 | qsc_code_num_chars_line_max_quality_signal float64 | qsc_code_num_chars_line_mean_quality_signal float64 | qsc_code_frac_chars_alphabet_quality_signal float64 | qsc_code_frac_chars_comments_quality_signal float64 | qsc_code_cate_xml_start_quality_signal float64 | qsc_code_frac_lines_dupe_lines_quality_signal float64 | qsc_code_cate_autogen_quality_signal float64 | qsc_code_frac_lines_long_string_quality_signal float64 | qsc_code_frac_chars_string_length_quality_signal float64 | qsc_code_frac_chars_long_word_length_quality_signal float64 | qsc_code_frac_lines_string_concat_quality_signal float64 | qsc_code_cate_encoded_data_quality_signal float64 | qsc_code_frac_chars_hex_words_quality_signal float64 | qsc_code_frac_lines_prompt_comments_quality_signal float64 | qsc_code_frac_lines_assert_quality_signal float64 | qsc_codepython_cate_ast_quality_signal float64 | qsc_codepython_frac_lines_func_ratio_quality_signal float64 | qsc_codepython_cate_var_zero_quality_signal bool | qsc_codepython_frac_lines_pass_quality_signal float64 | qsc_codepython_frac_lines_import_quality_signal float64 | qsc_codepython_frac_lines_simplefunc_quality_signal float64 | qsc_codepython_score_lines_no_logic_quality_signal float64 | qsc_codepython_frac_lines_print_quality_signal float64 | qsc_code_num_words int64 | qsc_code_num_chars int64 | qsc_code_mean_word_length int64 | qsc_code_frac_words_unique null | qsc_code_frac_chars_top_2grams int64 | qsc_code_frac_chars_top_3grams int64 | qsc_code_frac_chars_top_4grams int64 | qsc_code_frac_chars_dupe_5grams int64 | qsc_code_frac_chars_dupe_6grams int64 | qsc_code_frac_chars_dupe_7grams int64 | qsc_code_frac_chars_dupe_8grams int64 | qsc_code_frac_chars_dupe_9grams int64 | qsc_code_frac_chars_dupe_10grams int64 | qsc_code_frac_chars_replacement_symbols int64 | qsc_code_frac_chars_digital int64 | qsc_code_frac_chars_whitespace int64 | qsc_code_size_file_byte int64 | qsc_code_num_lines int64 | qsc_code_num_chars_line_max int64 | qsc_code_num_chars_line_mean int64 | qsc_code_frac_chars_alphabet int64 | qsc_code_frac_chars_comments int64 | qsc_code_cate_xml_start int64 | qsc_code_frac_lines_dupe_lines int64 | qsc_code_cate_autogen int64 | qsc_code_frac_lines_long_string int64 | qsc_code_frac_chars_string_length int64 | qsc_code_frac_chars_long_word_length int64 | qsc_code_frac_lines_string_concat null | qsc_code_cate_encoded_data int64 | qsc_code_frac_chars_hex_words int64 | qsc_code_frac_lines_prompt_comments int64 | qsc_code_frac_lines_assert int64 | qsc_codepython_cate_ast int64 | qsc_codepython_frac_lines_func_ratio int64 | qsc_codepython_cate_var_zero int64 | qsc_codepython_frac_lines_pass int64 | qsc_codepython_frac_lines_import int64 | qsc_codepython_frac_lines_simplefunc int64 | qsc_codepython_score_lines_no_logic int64 | qsc_codepython_frac_lines_print int64 | effective string | hits int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
033ac5b42cb933ad3ddd01f9391bd47273e14726 | 8,611 | py | Python | examples/inference/python/test/ls_quant_gpt2.py | hexisyztem/lightseq | 25265dabaaee42ee9e7b7ec43c8c04fb90292733 | [
"Apache-2.0"
] | 106 | 2019-12-06T09:02:58.000Z | 2020-09-09T07:12:21.000Z | examples/inference/python/test/ls_quant_gpt2.py | hexisyztem/lightseq | 25265dabaaee42ee9e7b7ec43c8c04fb90292733 | [
"Apache-2.0"
] | null | null | null | examples/inference/python/test/ls_quant_gpt2.py | hexisyztem/lightseq | 25265dabaaee42ee9e7b7ec43c8c04fb90292733 | [
"Apache-2.0"
] | 15 | 2019-12-09T05:44:28.000Z | 2020-09-04T03:43:56.000Z | import time
import torch
from torch import nn
from transformers import GPT2Tokenizer, GPT2LMHeadModel, GPT2Config
import lightseq.inference as lsi
from lightseq.training.ops.pytorch.quantization import (
qat_mode,
QuantLinear,
TensorQuantizer,
weight_quant_config,
)
from lightseq.training.ops.pytorch.torch_transformer_layers import (
TransformerDecoderLayer,
)
from export.util import parse_args
def ls_gpt2(model, inputs, generation_method="topk"):
torch.cuda.synchronize()
start_time = time.perf_counter()
results = None
if generation_method == "topk" or generation_method == "topp":
results = model.sample(inputs)
elif generation_method == "ppl":
results = model.ppl(inputs)[0]
torch.cuda.synchronize()
end_time = time.perf_counter()
return results, end_time - start_time
def compute_hf_ppl(model, inputs):
max_length = 512
stride = 512
end_loc = 0
nlls = []
for i in range(0, inputs.size(1), stride):
begin_loc = max(i + stride - max_length, 0)
end_loc = min(i + stride, inputs.size(1))
trg_len = end_loc - i
input_ids = inputs[:, begin_loc:end_loc].to("cuda:0")
target_ids = input_ids.clone()
target_ids[:, :-trg_len] = -100
with torch.no_grad():
outputs = model(input_ids, labels=target_ids)
neg_log_likelihood = outputs[0] * trg_len
nlls.append(neg_log_likelihood)
ppl = torch.stack(nlls).sum() / end_loc
return ppl.cpu().numpy()
def hf_gpt2(model, inputs, tokenizer, generation_method="topk"):
inputs = inputs.to("cuda:0")
torch.cuda.synchronize()
start_time = time.perf_counter()
results = None
if generation_method == "topk" or generation_method == "topp":
results = model.generate(
inputs, max_length=50, pad_token_id=tokenizer.eos_token_id
)
elif generation_method == "ppl":
results = compute_hf_ppl(model, inputs)
torch.cuda.synchronize()
end_time = time.perf_counter()
return results, end_time - start_time
def ls_generate(model, tokenizer, inputs):
print("=========lightseq=========")
print("lightseq generating...")
ls_res_ids, ls_time = ls_gpt2(model, inputs)
ls_res = tokenizer.batch_decode(ls_res_ids, skip_special_tokens=True)
print(f"lightseq time: {ls_time}s")
print("lightseq results:")
for sent in ls_res:
print(sent)
def hf_generate(model, tokenizer, inputs):
print("=========huggingface=========")
print("huggingface generating...")
hf_res_ids, hf_time = hf_gpt2(model, inputs, tokenizer)
hf_res = tokenizer.batch_decode(hf_res_ids, skip_special_tokens=True)
print(f"huggingface time: {hf_time}s")
print("huggingface results:")
for sent in hf_res:
print(sent)
def ls_ppl(model, tokenizer, inputs):
print("=========lightseq=========")
print("lightseq calculating ppl...")
ls_ppl, ls_time = ls_gpt2(model, inputs, "ppl")
print(f"lightseq time: {ls_time}s")
print("lightseq results:")
print(ls_ppl)
def hf_ppl(model, tokenizer, inputs):
print("=========huggingface=========")
print("huggingface calculating ppl...")
hf_ppl, hf_time = hf_gpt2(model, inputs, tokenizer, "ppl")
print(f"huggingface time: {hf_time}s")
print("huggingface results:")
print(hf_ppl)
def warmup(
ls_tokenizer, hf_tokenizer, ls_model, hf_model, sentences, generation_method
):
ls_inputs = ls_tokenizer(sentences, return_tensors="pt", padding=True)["input_ids"]
hf_inputs = hf_tokenizer(sentences, return_tensors="pt", padding=True)["input_ids"]
if generation_method == "topk" or generation_method == "topp":
ls_generate(ls_model, ls_tokenizer, ls_inputs)
# hf_generate(hf_model, hf_tokenizer, hf_inputs)
elif generation_method == "ppl":
ls_ppl(ls_model, ls_tokenizer, ls_inputs)
hf_ppl(hf_model, hf_tokenizer, hf_inputs)
class GptEmbedding(nn.Embedding):
def __init__(self, *args, **kwargs):
super(GptEmbedding, self).__init__(*args, **kwargs)
self.emb_quant = TensorQuantizer(weight_quant_config)
def forward(self, input_ids):
x = super(GptEmbedding, self).forward(input_ids)
x = self.emb_quant(x)
return x
def gen_gpt_enc_config(config):
gpt_enc_config = TransformerDecoderLayer.get_config(
max_batch_tokens=8192,
max_seq_len=config.max_position_embeddings,
hidden_size=config.hidden_size,
intermediate_size=4 * config.hidden_size,
nhead=config.num_attention_heads,
attn_prob_dropout_ratio=config.attn_pdrop,
activation_dropout_ratio=config.resid_pdrop,
hidden_dropout_ratio=config.resid_pdrop,
pre_layer_norm=True,
fp16=True,
local_rank=0,
nlayer=config.num_hidden_layers,
activation_fn="gelu",
has_cross_attn=False,
)
return gpt_enc_config
class LSHFGptEncoderLayer(TransformerDecoderLayer):
def __init__(self, *args, **kwargs):
super(LSHFGptEncoderLayer, self).__init__(*args, **kwargs)
def forward(self, hidden_states, attention_mask=None, *args, **kwargs):
if attention_mask is not None:
ls_attention_mask = attention_mask.squeeze()
else:
ls_attention_mask = torch.zeros(hidden_states.size()[:2])
output = super().forward(hidden_states, ls_attention_mask)
return output
def inject_ls_layer(model, config):
model.transformer.wte = GptEmbedding(config.vocab_size, config.hidden_size)
model.transformer.wte.apply(qat_mode)
for i in range(config.num_hidden_layers):
gpt_enc_config = gen_gpt_enc_config(config)
model.transformer.h[i] = LSHFGptEncoderLayer(gpt_enc_config).cuda()
model.transformer.h[i].apply(qat_mode)
q_lm_head = QuantLinear(config.n_embd, config.vocab_size, bias=False)
q_lm_head.weight = model.transformer.wte.weight
q_lm_head.weight_quant = model.transformer.wte.emb_quant
model.lm_head = q_lm_head
def main():
args = parse_args()
if args.generation_method not in ["topk", "topp", "ppl"]:
args.generation_method = "topk"
model_name = ".".join(args.model.split(".")[:-1])
ckpt_path = f"{model_name}.bin"
print("initializing gpt2 config...")
config = GPT2Config.from_pretrained("gpt2")
print("initializing gpt2 tokenizer...")
ls_tokenizer = GPT2Tokenizer.from_pretrained("gpt2")
# lightseq use len(tokenizer) as pad_token in default
ls_tokenizer.add_special_tokens({"pad_token": "[PAD]"})
print(f"lightseq tokenizer pad token id: {ls_tokenizer.pad_token_id}")
hf_tokenizer = GPT2Tokenizer.from_pretrained("gpt2")
# use EOS as PAD for huggingface to avoid warning according to https://huggingface.co/blog/how-to-generate while avoid reshaping the model embedding
hf_tokenizer.pad_token = hf_tokenizer.eos_token
print(f"huggingface tokenizer pad token id: {hf_tokenizer.pad_token_id}")
print("creating huggingface model...")
hf_model = GPT2LMHeadModel.from_pretrained("gpt2", config=config)
inject_ls_layer(hf_model, config)
state_dict = torch.load(ckpt_path, map_location="cpu")
hf_model.load_state_dict(state_dict, strict=False)
hf_model.to("cuda:0")
hf_model.eval()
print("creating lightseq model...")
ls_model = lsi.QuantGpt(args.model, max_batch_size=16)
# lightseq gpt perplexity supports batch infer with different lengths,
# but sampling doesn't support
sentences = [
"I love you, but you say that",
"I love you, but you say that",
"I love you, but you say that",
"I love you, but you say that",
]
print("====================START warmup====================")
warmup(
ls_tokenizer,
hf_tokenizer,
ls_model,
hf_model,
sentences,
args.generation_method,
)
print("====================END warmup====================")
print("tokenizing the sentences...")
ls_inputs = ls_tokenizer(sentences, return_tensors="pt", padding=True)["input_ids"]
hf_inputs = hf_tokenizer(sentences, return_tensors="pt", padding=True)["input_ids"]
if args.generation_method == "topk" or args.generation_method == "topp":
ls_generate(ls_model, ls_tokenizer, ls_inputs)
# hf_generate(hf_model, hf_tokenizer, hf_inputs)
elif args.generation_method == "ppl":
ls_ppl(ls_model, ls_tokenizer, ls_inputs)
hf_ppl(hf_model, hf_tokenizer, hf_inputs)
if __name__ == "__main__":
main()
| 34.170635 | 152 | 0.676344 | 1,118 | 8,611 | 4.929338 | 0.205725 | 0.052259 | 0.025404 | 0.013791 | 0.405915 | 0.335329 | 0.306659 | 0.259481 | 0.24569 | 0.24569 | 0 | 0.007518 | 0.196725 | 8,611 | 251 | 153 | 34.306773 | 0.789215 | 0.045291 | 0 | 0.243655 | 0 | 0 | 0.130022 | 0.03214 | 0 | 0 | 0 | 0 | 0 | 1 | 0.076142 | false | 0 | 0.040609 | 0 | 0.15736 | 0.147208 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
033c4e6e47b886b0874ca7cd97804e2f650bcb80 | 623 | py | Python | algorithms/153. Find Minimum in Rotated Sorted Array.py | vuzway9132/leetcode | e51a9ce7a6bb3e35c0fcb8c8f4f6cd5763708dbf | [
"MIT"
] | 1 | 2020-12-02T13:54:30.000Z | 2020-12-02T13:54:30.000Z | algorithms/153. Find Minimum in Rotated Sorted Array.py | vuzway9132/leetcode | e51a9ce7a6bb3e35c0fcb8c8f4f6cd5763708dbf | [
"MIT"
] | null | null | null | algorithms/153. Find Minimum in Rotated Sorted Array.py | vuzway9132/leetcode | e51a9ce7a6bb3e35c0fcb8c8f4f6cd5763708dbf | [
"MIT"
] | null | null | null | """
1. Clarification
2. Possible solutions
- Cheat
- Binary search II
3. Coding
4. Tests
"""
# T=O(n), S=O(1)
class Solution:
def findMin(self, nums: List[int]) -> int:
if not nums: return int(-inf)
return min(nums)
# T=O(lgn), S=O(1)
class Solution:
def findMin(self, nums: List[int]) -> int:
if not nums: return int(-inf)
left, right = 0, len(nums) - 1
while left < right:
mid = left + (right - left) // 2
if nums[mid] < nums[right]:
right = mid
else:
left = mid + 1
return nums[left]
| 20.766667 | 46 | 0.507223 | 86 | 623 | 3.674419 | 0.44186 | 0.085443 | 0.018987 | 0.050633 | 0.411392 | 0.411392 | 0.411392 | 0.411392 | 0.411392 | 0.411392 | 0 | 0.024938 | 0.35634 | 623 | 29 | 47 | 21.482759 | 0.763092 | 0.200642 | 0 | 0.4 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.133333 | false | 0 | 0 | 0 | 0.4 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
033cec1f9ccce81f5733fb9f495e2a48d763be85 | 1,766 | py | Python | Web/gRPC/python_practical_example/quote_service/test_cowsay_client.py | MasuqaT-NET/BlogSamples | b424b51e1c01e65f952099cfd1fa05f9ef405432 | [
"MIT"
] | 18 | 2018-01-03T23:07:26.000Z | 2021-12-30T11:44:43.000Z | Web/gRPC/python_practical_example/quote_service/test_cowsay_client.py | MasuqaT-NET/BlogSamples | b424b51e1c01e65f952099cfd1fa05f9ef405432 | [
"MIT"
] | null | null | null | Web/gRPC/python_practical_example/quote_service/test_cowsay_client.py | MasuqaT-NET/BlogSamples | b424b51e1c01e65f952099cfd1fa05f9ef405432 | [
"MIT"
] | 6 | 2018-08-09T05:17:13.000Z | 2020-05-07T09:45:33.000Z | import time
from unittest import TestCase
import grpc_testing
from grpc import StatusCode
from grpc.framework.foundation import logging_pool
from cowsay_client import CowsayClient
from cowsay_pb2 import DESCRIPTOR as COWSAY_DESCRIPTOR, QuoteRequest, QuoteResponse
from cowsay_pb2_grpc import CowsayStub
target_service = COWSAY_DESCRIPTOR.services_by_name['Cowsay']
class TestCowsayClient(TestCase):
def setUp(self):
self._client_execution_thread_pool = logging_pool.pool(1)
self._fake_time = grpc_testing.strict_fake_time(time.time())
self._real_time = grpc_testing.strict_real_time()
self._fake_time_channel = grpc_testing.channel(COWSAY_DESCRIPTOR.services_by_name.values(), self._fake_time)
self._real_time_channel = grpc_testing.channel(COWSAY_DESCRIPTOR.services_by_name.values(), self._real_time)
def tearDown(self):
self._client_execution_thread_pool.shutdown(wait=False)
def test_get_quote(self):
arguments = ('cow', 'foo')
def run(scenario, channel):
stub = CowsayStub(channel)
client = CowsayClient(stub)
return client.get_quote(*scenario)
f = self._client_execution_thread_pool.submit(run, arguments, self._real_time_channel)
invocation_metadata, request, rpc = self._real_time_channel.take_unary_unary(
target_service.methods_by_name['GetQuote'])
self.assertEqual(QuoteRequest(message='foo', animal=QuoteRequest.COW), request)
self.assertIn(('z', 'y'), invocation_metadata)
rpc.send_initial_metadata([('abc', 'def')])
rpc.terminate(QuoteResponse(output='foo2'), [('uvw', 'xyz')], StatusCode.OK, '')
result = f.result()
self.assertEqual('foo2', result)
| 36.791667 | 116 | 0.725934 | 215 | 1,766 | 5.637209 | 0.367442 | 0.039604 | 0.049505 | 0.064356 | 0.216997 | 0.168317 | 0.113861 | 0.113861 | 0.113861 | 0.113861 | 0 | 0.003427 | 0.173839 | 1,766 | 47 | 117 | 37.574468 | 0.827279 | 0 | 0 | 0 | 0 | 0 | 0.025481 | 0 | 0 | 0 | 0 | 0 | 0.090909 | 1 | 0.121212 | false | 0 | 0.242424 | 0 | 0.424242 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
03401c816f8f4430f92c4b87cef1da6e317a54e9 | 5,140 | py | Python | scripts/alpha_diversity_stats.py | dcdanko/MetaSUB_CAP | db5672b0206afb3ffe3204b0577a4a5f84b9bcd4 | [
"MIT"
] | 20 | 2017-11-02T13:36:16.000Z | 2021-07-23T12:44:28.000Z | scripts/alpha_diversity_stats.py | dcdanko/MetaSUB_CAP | db5672b0206afb3ffe3204b0577a4a5f84b9bcd4 | [
"MIT"
] | 30 | 2018-02-22T18:25:02.000Z | 2019-11-06T15:03:34.000Z | scripts/alpha_diversity_stats.py | dcdanko/MetaSUB_CAP | db5672b0206afb3ffe3204b0577a4a5f84b9bcd4 | [
"MIT"
] | 9 | 2018-04-26T22:12:08.000Z | 2020-08-06T01:04:54.000Z | #! /usr/bin/env python3
import sys
import math
import argparse as ap
from json import dumps as jdumps
from random import choices
class LevelNotFoundException(Exception):
pass
def checkLevel(taxon, level):
if level == 'species':
return ('s__' in taxon) and ('t__' not in taxon)
elif level == 'genus':
return ('g__' in taxon) and ('s__' not in taxon)
raise LevelNotFoundException()
class Sample:
def __init__(self, tool, level):
self.tool = tool
self.level = level
self.abunds = {}
self._total = None
def addLine(self, line):
taxon, abund = line.split()
if checkLevel(taxon, self.level):
self.abunds[taxon] = float(abund)
@classmethod
def parseMPA(ctype, tool, mpaFile, level):
sample = Sample(tool, level)
with open(mpaFile) as mF:
for line in mF:
sample.addLine(line)
return sample
def subset(self, n):
if n == self.total():
return self
brkpoints = [0]
rmap = {}
for i, (key, val) in enumerate(self.abunds.items()):
brkpoints.append(brkpoints[i] + val)
rmap[i] = key
i = 0
outAbunds = {}
indices = range(int(self.total()))
indices = sorted(choices(indices, k=n))
for ind in indices:
while ind >= brkpoints[i + 1]:
i += 1
key = rmap[i]
try:
outAbunds[key] += 1
except KeyError:
outAbunds[key] = 1
outSamp = Sample(self.tool, self.level)
outSamp.abunds = outAbunds
return outSamp
def total(self):
if self._total is None:
self._total = sum(self.abunds.values())
return self._total
def richness(self):
return len(self.abunds)
def shannonIndex(self):
H = 0
for count in self.abunds.values():
p = count / self.total()
assert p <= 1
H += p * math.log(p)
if H < 0:
H *= -1
return H
def ginisimpson(self):
H = 0
for count in self.abunds.values():
p = count / self.total()
assert p <= 1
H += p * p
H = 1 - H
return H
def chao1(self):
sings, doubs = 0, 1 # give doubles a pseudocount to avoid div by zero
for val in self.abunds.values():
if val == 1:
sings += 1
elif val == 2:
doubs += 1
est = (sings * sings) / (2 * doubs)
return self.richness() + est
def getSubsets(N):
vals = [1, 5, 10, 100, 500, 1000, 10 * 1000]
vals = [el * 1000 for el in vals]
out = []
for val in vals:
if val < N:
out.append(val)
else:
out.append(N)
break
return out
def handleCounts(tool, fname):
obj = {
'species': {
'richness': {},
'shannon_index': {},
'gini-simpson': {},
'chao1': {}
},
'genus': {
'richness': {},
'shannon_index': {},
'gini-simpson': {},
'chao1': {}
}
}
for level in obj.keys():
sample = Sample.parseMPA(tool, fname, level)
for subsetSize in getSubsets(sample.total()):
subsample = sample.subset(subsetSize)
key = str(subsetSize)
if subsample == sample:
key = 'all_reads'
obj[level]['shannon_index'][key] = subsample.shannonIndex()
obj[level]['richness'][key] = subsample.richness()
obj[level]['gini-simpson'][key] = subsample.ginisimpson()
obj[level]['chao1'][key] = subsample.chao1()
return obj
def handleProportions(tool, fname):
obj = {
'species': {
'richness': {},
'shannon_index': {},
'gini-simpson': {}
},
'genus': {
'richness': {},
'shannon_index': {},
'gini-simpson': {}
}
}
for level in obj.keys():
sample = Sample.parseMPA(tool, fname, level)
key = 'all_reads'
obj[level]['richness'][key] = sample.richness()
obj[level]['shannon_index'][key] = sample.shannonIndex()
obj[level]['gini-simpson'][key] = sample.ginisimpson()
return obj
def main():
args = parseArgs()
outobj = {}
for mpaFilePair in args.mpa_files:
tool, mpaFile = mpaFilePair.split(',')
if tool.lower() == 'kraken':
outobj['kraken'] = handleCounts(tool, mpaFile)
elif tool.lower() == 'metaphlan2':
outobj['metaphlan2'] = handleProportions(tool, mpaFile)
else:
sys.stderr.write('tool {} unsupported'.format(tool))
sys.stdout.write(jdumps(outobj))
def parseArgs():
parser = ap.ArgumentParser()
parser.add_argument('mpa_files', nargs='+',
help='pairs of tool_name,mpa_file')
args = parser.parse_args()
return args
if __name__ == '__main__':
main()
| 26.091371 | 78 | 0.510506 | 557 | 5,140 | 4.642729 | 0.281867 | 0.030936 | 0.024749 | 0.037123 | 0.200309 | 0.153906 | 0.122196 | 0.122196 | 0.122196 | 0.083527 | 0 | 0.015834 | 0.361089 | 5,140 | 196 | 79 | 26.22449 | 0.77162 | 0.013619 | 0 | 0.245399 | 0 | 0 | 0.074191 | 0 | 0 | 0 | 0 | 0 | 0.01227 | 1 | 0.092025 | false | 0.006135 | 0.030675 | 0.006135 | 0.220859 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0343d8a19a30189bafdda72fb63bd75d656dfc89 | 1,061 | py | Python | image_collection/migrations/0004_auto_20160113_0458.py | bitmazk/django-image-collection | 73b05ff825d74bdab64609b531d5305f9332b702 | [
"MIT"
] | null | null | null | image_collection/migrations/0004_auto_20160113_0458.py | bitmazk/django-image-collection | 73b05ff825d74bdab64609b531d5305f9332b702 | [
"MIT"
] | null | null | null | image_collection/migrations/0004_auto_20160113_0458.py | bitmazk/django-image-collection | 73b05ff825d74bdab64609b531d5305f9332b702 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import image_collection.models
class Migration(migrations.Migration):
dependencies = [
('image_collection', '0003_auto_20160113_0445'),
]
operations = [
migrations.RemoveField(
model_name='imageslide',
name='link',
),
migrations.AddField(
model_name='imageslide',
name='external_link',
field=models.URLField(help_text='E.g. "http://www.example.com/my-page/". Enter absolute URL, that the image should link to.', verbose_name='external link', blank=True),
preserve_default=True,
),
migrations.AddField(
model_name='imageslide',
name='internal_link',
field=image_collection.models.RelativeURLField(help_text='E.g. "/my-page/". Enter slug of internal pager, that the image should link to.', verbose_name='internal link', blank=True),
preserve_default=True,
),
]
| 33.15625 | 193 | 0.629595 | 115 | 1,061 | 5.617391 | 0.504348 | 0.069659 | 0.088235 | 0.106811 | 0.334365 | 0.334365 | 0.108359 | 0.108359 | 0 | 0 | 0 | 0.021465 | 0.253534 | 1,061 | 31 | 194 | 34.225806 | 0.794192 | 0.019793 | 0 | 0.4 | 0 | 0.08 | 0.282274 | 0.022158 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.12 | 0 | 0.24 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0344447ad9ea2ba9e5f78647ef0e90f1b7786110 | 7,913 | py | Python | hw1/hollygrimm_behavior_cloner.py | andyk/homework | 6f31240e7b16bb94992a87fc764839591bd034af | [
"MIT"
] | null | null | null | hw1/hollygrimm_behavior_cloner.py | andyk/homework | 6f31240e7b16bb94992a87fc764839591bd034af | [
"MIT"
] | null | null | null | hw1/hollygrimm_behavior_cloner.py | andyk/homework | 6f31240e7b16bb94992a87fc764839591bd034af | [
"MIT"
] | null | null | null | # Copying Holly Grimm's solution https://github.com/hollygrimm/cs294-homework/blob/master/hw1/bc.py
# Copy and pasting and merging it into a copy of my behavior_cloner.py code.
import argparse
import pickle
import os
import sys
import tensorflow.compat.v1 as tf
import numpy as np
from sklearn.model_selection import train_test_split
import mlflow.tensorflow
import gym
from gym import wrappers
from tqdm import tqdm
#Imports copied from hollygrimm's solution
import logging
from hollygrimm_model import Model
# The following doesn't seem to work with the way Holly Grimm builds her tensorflow model.
mlflow.tensorflow.autolog()
def config_logging(log_file):
if os.path.exists(log_file):
os.remove(log_file)
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(message)s')
fh = logging.FileHandler(log_file)
fh.setLevel(logging.DEBUG)
fh.setFormatter(formatter)
logger.addHandler(fh)
return logger
def create_model(session, obs_samples, num_observations, num_actions, logger, optimizer,
learning_rate, restore, checkpoint_dir):
model = Model(obs_samples, num_observations, num_actions, checkpoint_dir, logger,
optimizer, learning_rate)
if restore:
model.load(session)
else:
logger.info("Created model with fresh parameters")
session.run(tf.global_variables_initializer())
return model
def bc(expert_data_filename, env_name, restore, results_dir, max_timesteps=None,
optimizer='adam', num_epochs=100, learning_rate=.001, batch_size=32, keep_prob=1):
# Reset TF env
tf.reset_default_graph()
# Create a gym env.
env = gym.make(env_name)
max_steps = max_timesteps or env.spec.max_episode_steps
with open(expert_data_filename, 'rb') as f:
data = pickle.loads(f.read())
obs = np.stack(data['observations'], axis=0)
actions = np.squeeze(np.stack(data['actions'], axis=0))
x_train, x_test, y_train, y_test = train_test_split(obs, actions, test_size=0.2)
num_samples = len(x_train)
min_val_loss = sys.maxsize
with tf.Session() as session:
model = create_model(session, x_train, x_train.shape[1], y_train.shape[1], logger,
optimizer, learning_rate, restore, results_dir)
file_writer = tf.summary.FileWriter(results_dir, session.graph)
#file_writer = tf.summary.FileWriter(results_dir, session.graph)
for epoch in tqdm(range(num_epochs)):
perm = np.random.permutation(x_train.shape[0])
obs_samples = x_train[perm]
action_samples = y_train[perm]
loss = 0.
for k in range(0, obs_samples.shape[0], batch_size):
batch_loss, training_scalar = model.update(session, obs_samples[k:k + batch_size],
action_samples[k:k + batch_size],
keep_prob)
loss += batch_loss
file_writer.add_summary(training_scalar, epoch)
min_val_loss, validation_scalar = validate(model, logger, session, x_test, y_test,
epoch, batch_size, min_val_loss, results_dir)
file_writer.add_summary(validation_scalar, epoch)
# Test the updated model after each epoch of training the DNN.
new_exp = model.test_run(session, env, max_steps)
tqdm.write(
"Epoch %3d; Loss %f; Reward %f; Steps %d" % (epoch, loss / num_samples,
new_exp['reward'], new_exp['steps']))
# Write a video of the final gym test results.
env = wrappers.Monitor(env, results_dir, force=True)
results = []
for _ in tqdm(range(10)):
results.append(model.test_run(session, env, max_steps)['reward'])
logger.info("Reward mean and std dev with behavior cloning: %f(%f)" % (np.mean(results),
np.std(results)))
mlflow.log_params({"reward_mean": np.mean(results), "reward_std": np.std(results)})
return np.mean(results), np.std(results)
def validate(model, logger, session, x_test, y_test, num_epoch, batch_size, min_loss, checkpoint_dir):
avg_loss = []
# for k in range(0, x_test.shape[0], batch_size):
loss, validation_scalar = model.validate(session, x_test, y_test)
avg_loss.append(loss)
new_loss = sum(avg_loss) / len(avg_loss)
logger.info("Finished epoch %d, average validation loss = %f" % (num_epoch, new_loss))
if new_loss < min_loss: # Only save model if val loss dropped
model.save(session)
min_loss = new_loss
return min_loss, validation_scalar
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('expert_run_id', type=str)
parser.add_argument('--num_epochs', type=int, default=100)
parser.add_argument('--batch_size', type=int, default=32)
parser.add_argument("--restore", type=bool, default=False)
args = parser.parse_args()
for k, v in vars(args).items():
mlflow.log_param(k, v)
if not os.path.exists('results'):
os.makedirs('results')
log_file = os.path.join(os.getcwd(), 'results', 'train_out.log')
logger = config_logging(log_file)
#env_models = [('Ant-v1', 'data/Ant-v1_data_250_rollouts.pkl', 'experts/Ant-v1.pkl', 250),
# ('HalfCheetah-v1', 'data/HalfCheetah-v1_data_10_rollouts.pkl', 'experts/HalfCheetah-v1.pkl', 10),
# ('Hopper-v1', 'data/Hopper-v1_data_10_rollouts.pkl', 'experts/Hopper-v1.pkl', 10),
# ('Humanoid-v1', 'data/Humanoid-v1_data_250_rollouts.pkl', 'experts/Humanoid-v1.pkl', 250),
# ('Reacher-v1', 'data/Reacher-v1_data_250_rollouts.pkl', 'experts/Reacher-v1.pkl', 250),
# ('Walker2d-v1', 'data/Walker2d-v1_data_10_rollouts.pkl','experts/Walker2d-v1.pkl', 10)
# ]
#for env_name, rollout_data, expert_policy_file, num_rollouts in env_models :
# ===================================================
# read in dataset from expert policy rollouts.
mlflow_c = mlflow.tracking.MlflowClient()
expert_data_file_base = mlflow_c.download_artifacts(args.expert_run_id, "")
expert_data_file_rel_path = mlflow_c.list_artifacts(args.expert_run_id, "expert_data_file")[
0].path
expert_data_filename = expert_data_file_base + "/" + expert_data_file_rel_path
print("opening {0}".format(expert_data_filename))
env_name = mlflow_c.get_run(args.expert_run_id).data.params["envname"]
bc_results_dir = os.path.join(os.getcwd(), 'results', env_name, 'bc')
bc_reward_mean, bc_reward_std = bc(expert_data_filename, env_name, args.restore, bc_results_dir,
batch_size=args.batch_size, num_epochs=args.num_epochs)
logger.info('Behavior Cloning mean & std rewards: %f(%f))' %
(bc_reward_mean, bc_reward_std))
print("logging 'results' directory to mlflow.")
mlflow.log_artifacts('results')
# Commenting out dagger for now.
#da_results_dir = os.path.join(os.getcwd(), 'results', env_name, 'da')
#if not os.path.exists(da_results_dir):
# os.makedirs(da_results_dir)
#_,_, da_mean,da_std = dagger(rollout_data, expert_policy_file, env_name, args.restore, da_results_dir, num_rollouts)
#results.append((env_name, ex_mean, ex_std, bc_mean, bc_std, da_mean, da_std))
#for env_name, ex_mean, ex_std, bc_mean, bc_std, da_mean, da_std in results :
# logger.info('Env: %s, Expert: %f(%f), Behavior Cloning: %f(%f), Dagger: %f(%f)'%
# (env_name, ex_mean, ex_std, bc_mean, bc_std, da_mean, da_std))
| 41.429319 | 121 | 0.647289 | 1,078 | 7,913 | 4.5 | 0.237477 | 0.024737 | 0.022263 | 0.00907 | 0.248196 | 0.189239 | 0.105958 | 0.093589 | 0.06308 | 0.042053 | 0 | 0.01433 | 0.232782 | 7,913 | 190 | 122 | 41.647368 | 0.784714 | 0.248957 | 0 | 0 | 0 | 0 | 0.081712 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.035088 | false | 0 | 0.114035 | 0 | 0.184211 | 0.017544 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
03470a7c37dd728524a1ea76f0cbdccac1e546fa | 4,397 | py | Python | tree/generate.py | xi-studio/DiscreteNN | 85468da14bddfe4cbe2e07071454cdbc52ef915f | [
"MIT"
] | 1 | 2019-08-15T09:43:21.000Z | 2019-08-15T09:43:21.000Z | tree/generate.py | xi-studio/DiscreteNN | 85468da14bddfe4cbe2e07071454cdbc52ef915f | [
"MIT"
] | null | null | null | tree/generate.py | xi-studio/DiscreteNN | 85468da14bddfe4cbe2e07071454cdbc52ef915f | [
"MIT"
] | null | null | null | from __future__ import print_function
import argparse
import torch
import torch.utils.data
from torch import nn, optim
from torch.nn import functional as F
from torchvision import datasets, transforms
from torchvision.utils import save_image
import numpy as np
parser = argparse.ArgumentParser(description='VAE MNIST Example')
parser.add_argument('--batch-size', type=int, default=128, metavar='N',
help='input batch size for training (default: 128)')
parser.add_argument('--epochs', type=int, default=10, metavar='N',
help='number of epochs to train (default: 10)')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='enables CUDA training')
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
parser.add_argument('--log-interval', type=int, default=10, metavar='N',
help='how many batches to wait before logging training status')
args = parser.parse_args()
args.cuda = not args.no_cuda and torch.cuda.is_available()
torch.manual_seed(args.seed)
device = torch.device("cuda" if args.cuda else "cpu")
kwargs = {'num_workers': 1, 'pin_memory': True} if args.cuda else {}
train_loader = torch.utils.data.DataLoader(
datasets.MNIST('./data', train=True, download=True,
transform=transforms.ToTensor()),
batch_size=args.batch_size, shuffle=True, **kwargs)
test_loader = torch.utils.data.DataLoader(
datasets.MNIST('./data', train=False, transform=transforms.ToTensor()),
batch_size=args.batch_size, shuffle=False, **kwargs)
norm = torch.nn.functional.normalize
class GLU(nn.Module):
def __init__(self, c1, c2):
super(GLU, self).__init__()
self.s = nn.Linear(c1, c2)
self.g = nn.Linear(c1, c2)
def forward(self, x):
s = torch.sigmoid(self.s(x))
g = torch.relu(self.g(x))
output = s * g
return output
class Encoder(nn.Module):
def __init__(self):
super(Encoder, self).__init__()
self.fc1 = nn.Linear(784, 400)
self.fc2 = nn.Linear(400, 50)
def forward(self, x):
x = torch.relu(self.fc1(x))
phase = torch.sigmoid(self.fc2(x))
return phase
class Decoder(nn.Module):
def __init__(self):
super(Decoder, self).__init__()
self.fc1 = GLU(100, 400)
self.fc2 = nn.Linear(400, 784)
def forward(self, x):
x = self.fc1(x)
x = torch.sigmoid(self.fc2(x))
return x
class Key(nn.Module):
def __init__(self):
super(Key, self).__init__()
self.fc1 = nn.Linear(10, 50)
self.fc2 = nn.Linear(50, 50)
def forward(self, x):
x = torch.relu(self.fc1(x))
w = torch.sigmoid(self.fc2(x))
return w
class VAE(nn.Module):
def __init__(self):
super(VAE, self).__init__()
self.e = Encoder()
self.d = Decoder()
self.amplitude = Key()
def forward(self, x, c, t):
x = x.view(-1, 784)
N = x.shape[0]
w = self.amplitude(c)
phase = self.e(x)
w = w.view(N, 50, 1)
phase = phase.view(N, 50, 1)
w = w.repeat(1, 1, 100)
phase = phase.repeat(1, 1, 100)
x = torch.sin(2 * np.pi * w * t + np.pi * phase )
x = x.sum(dim=1)
x = x.view(N, 100)
noise = torch.randn_like(x)
x = noise + x
x = self.d(x)
return x, w, phase
model = VAE().to(device)
model.load_state_dict(torch.load('checkpoints/mnist/fft_400.pt'))
def test():
model.eval()
with torch.no_grad():
t = torch.arange(100)
t = t.type(torch.FloatTensor)
t = t.to(device)
c = torch.zeros(64, 10).to(device)
c[:, 4] =1
data = torch.rand(64, 1, 28, 28).to(device)
rx, w, phase= model(data, c, t)
img = rx.view(64, 1, 28, 28)
save_image(img.cpu(),
'images/sample_4.png', nrow=8)
# for i in range(100):
# rx, w, phase= model(data, c, t)
# img = rx.view(1, 1, 28, 28)
# save_image(img.cpu(),
# 'images/sample_t_%d.png' % i, nrow=1)
# data = rx
#
if __name__ == "__main__":
test()
| 28.185897 | 83 | 0.56766 | 611 | 4,397 | 3.9509 | 0.271686 | 0.03314 | 0.035211 | 0.031069 | 0.314416 | 0.299917 | 0.191384 | 0.168186 | 0.168186 | 0.052196 | 0 | 0.040947 | 0.289061 | 4,397 | 155 | 84 | 28.367742 | 0.731286 | 0.053218 | 0 | 0.091743 | 0 | 0 | 0.08639 | 0.006757 | 0 | 0 | 0 | 0 | 0 | 1 | 0.100917 | false | 0 | 0.082569 | 0 | 0.275229 | 0.009174 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0347e93cbe7693fc7a1ee576744efea58d5d82da | 4,760 | py | Python | wepy/io/yahoo.py | Jul13/wepy | 3f6acc7ecb4c9bcadf366d7ed1752660838d9dd7 | [
"Apache-2.0"
] | null | null | null | wepy/io/yahoo.py | Jul13/wepy | 3f6acc7ecb4c9bcadf366d7ed1752660838d9dd7 | [
"Apache-2.0"
] | null | null | null | wepy/io/yahoo.py | Jul13/wepy | 3f6acc7ecb4c9bcadf366d7ed1752660838d9dd7 | [
"Apache-2.0"
] | null | null | null | # Author: Gheorghe Postelnicu
from datetime import date
import pandas as pd
from io import BytesIO
from urllib.request import urlopen
class Yahoo(object):
# Taken from http://www.jarloo.com/yahoo_finance/
yahoo_query_params = {
'ticker': 's',
'average_daily_volume': 'a2',
'dividend_yield': 'y',
'dividend_per_share': 'd',
'earnings_per_share': 'e',
'est_eps_yr': 'e7',
'est_eps_next_yr': 'e8',
'ex_dividend_date': 'q',
'market_cap': 'j1',
'price_earnings_ratio': 'r',
'short_ratio': 's7',
'volume': 'v',
'52w_low': 'j',
'52w_high': 'k'
}
def __init__(self, chunk_size=500):
self.chunk_size = chunk_size
self.market_cap_pattern = '(\d+[\.]\d+)([MB])'
@staticmethod
def _convert_market_cap(str_value):
if type(str_value) != str:
return -1.
last_char = str_value[-1]
if last_char in ['B', 'M']:
base = float(str_value[:-1])
multiplier = 10. ** 9 if last_char == 'B' else 10. ** 6
return base * multiplier
return float(str_value)
def _fetch_fields(self, symbols, fields):
def chunker(symbols_):
i = 0
while i < len(symbols_):
count_chunk = min(self.chunk_size, len(symbols_) - i)
yield symbols_[i:(i + count_chunk)]
i += count_chunk
dfs = []
for chunk in chunker(symbols):
request = 'http://download.finance.yahoo.com/d/quotes.csv?s={}&f={}'.format(','.join(chunk), fields)
raw_dat = urlopen(request).read()
df = pd.read_csv(BytesIO(raw_dat), header=None)
dfs.append(df)
ret = pd.concat(dfs)
return ret
def batch_snapshot(self, tickers):
"""
Retrieves financial information for a batch of stock symbols.
Args:
tickers (list<str>): list of stock symbols
Returns:
pandas.Dataframe: dataframe with one row per symbol.
"""
ret = self._fetch_fields(tickers, ''.join(Yahoo.yahoo_query_params.values()))
ret.columns = Yahoo.yahoo_query_params.keys()
for col in ['ex_dividend_date']:
ret[col] = pd.to_datetime(ret[col])
ret['market_cap'] = [self._convert_market_cap(mc) for mc in ret.market_cap]
return ret
@staticmethod
def _history_call(ticker, from_date, to_date, params):
base_url = 'http://ichart.finance.yahoo.com/table.csv'
params.update({'s': ticker,
'a': from_date.month - 1,
'b': from_date.day,
'c': from_date.year,
'd': to_date.month - 1,
'e': to_date.day,
'f': to_date.year
})
url = '{}?{}'.format(base_url, '&'.join('{}={}'.format(k, params[k]) for k in params))
raw_dat = urlopen(url).read()
df = pd.read_csv(BytesIO(raw_dat), parse_dates=[0])
return df
def historic_close(self, tickers, from_date=date(2010, 1, 1), to_date=date.today(), join_type='outer'):
"""
Extracts the adjusted close for a set of tickers.
Args:
tickers (list(str)): stock symbol
from_date (date): start date
to_date (date): end date
join_type (str): type of join
Returns:
Dataframe indexed by date with one column by stock ticker.
"""
def fetch_adj_close(ticker, from_date_, to_date_):
dat = self._single_historic_ohlc(ticker, from_date_, to_date_)
dat['Date'] = pd.to_datetime(dat.Date, infer_datetime_format=True)
dat.set_index('Date', inplace=True)
dat.sort_index(inplace=True)
ret = dat[['Adj Close']]
ret.columns = [ticker]
return ret
dats = [fetch_adj_close(ticker, from_date_=from_date, to_date_=to_date) for ticker in tickers]
return dats[0].join(dats[1:], how=join_type)
def _single_historic_ohlc(self, ticker, from_date=date(2010, 1, 1), to_date=date.today()):
return self._history_call(ticker, from_date, to_date, {'g': 'd'})
def historic_dividends(self, ticker, from_date=date(2010, 1, 1), to_date=date.today()):
"""
Extracts the dividend payout history for an individual stock.
Args:
ticker (str): stock symbol
from_date (date): start date
to_date (date): end date
Returns:
pandas.DataFrame: dataframe with dates and dividends.
"""
return self._history_call(ticker, from_date, to_date, {'g': 'v'})
| 36.335878 | 112 | 0.563655 | 602 | 4,760 | 4.225914 | 0.302326 | 0.04717 | 0.035377 | 0.033019 | 0.216195 | 0.188679 | 0.154874 | 0.142689 | 0.120676 | 0.120676 | 0 | 0.013674 | 0.308613 | 4,760 | 130 | 113 | 36.615385 | 0.759344 | 0.157563 | 0 | 0.05814 | 0 | 0 | 0.101762 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.116279 | false | 0 | 0.046512 | 0.011628 | 0.302326 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
034a4ded8103d6d6dff5e4e2de1713b5fd8b65e6 | 1,627 | py | Python | physalia/fixtures/models.py | luiscruz/physalia | 364951d94e02b60092785db46a8c7a7299ffe2a4 | [
"MIT"
] | 13 | 2017-02-14T10:35:43.000Z | 2021-12-11T17:33:36.000Z | physalia/fixtures/models.py | luiscruz/physalia | 364951d94e02b60092785db46a8c7a7299ffe2a4 | [
"MIT"
] | 3 | 2020-02-27T12:07:21.000Z | 2021-07-25T12:52:36.000Z | physalia/fixtures/models.py | luiscruz/physalia | 364951d94e02b60092785db46a8c7a7299ffe2a4 | [
"MIT"
] | 3 | 2019-10-06T14:01:58.000Z | 2020-03-13T15:40:30.000Z | """Fixtures for models module."""
from physalia.models import Measurement
import numpy
def create_measurement(use_case='login',
app_pkg='com.package',
duration=2,
energy_consumption=30):
"""Fake data for measurement."""
return Measurement(
1485634263.096069, # timestamp
use_case, # use_case
app_pkg, # application package
'1.0.0', # version
'Nexus 5X', # device model
duration, # duration
energy_consumption # energy consumption
)
def create_random_sample(mean, std,
app_pkg='com.package',
use_case='login',
count=30, seed=1):
"""Create a sample of measurements."""
# pylint: disable=too-many-arguments
if seed is not None:
numpy.random.seed(seed)
energy_consumptions = numpy.random.normal(loc=mean,
scale=std,
size=count)
return [
create_measurement(
energy_consumption=energy_consumptions[i],
app_pkg=app_pkg,
use_case=use_case
)
for i in range(count)
]
def create_random_samples(count=30, seed=1):
"""Create a sample of measurements."""
if seed is not None:
numpy.random.seed(seed)
sample_a = create_random_sample(10.0, 1.0, count=count, seed=None)
sample_b = create_random_sample(12.0, 1.0, count=count, seed=None)
return sample_a, sample_b
| 33.204082 | 70 | 0.543331 | 178 | 1,627 | 4.797753 | 0.376404 | 0.04918 | 0.063232 | 0.037471 | 0.220141 | 0.220141 | 0.220141 | 0.17096 | 0.17096 | 0 | 0 | 0.037791 | 0.365704 | 1,627 | 48 | 71 | 33.895833 | 0.789729 | 0.149969 | 0 | 0.157895 | 0 | 0 | 0.03321 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.078947 | false | 0 | 0.052632 | 0 | 0.210526 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0354caf01e13d2c8ffb045f382e909588ad189a5 | 786 | py | Python | setup.py | rrosajp/xdcc | e8ffa143cd48745824d077a686bfc0b3f0af6193 | [
"MIT"
] | 7 | 2020-06-03T06:24:23.000Z | 2022-03-09T13:00:54.000Z | setup.py | thiagotps/xdcc | e8ffa143cd48745824d077a686bfc0b3f0af6193 | [
"MIT"
] | 3 | 2020-09-26T12:52:43.000Z | 2022-01-22T23:17:19.000Z | setup.py | rrosajp/xdcc | e8ffa143cd48745824d077a686bfc0b3f0af6193 | [
"MIT"
] | 4 | 2020-09-26T01:17:00.000Z | 2022-02-06T19:22:04.000Z | import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="xdcc",
version="0.0.3",
author="Thiago T. P. Silva",
author_email="thiagoteodoro501@gmail.com",
description="A simple XDCC downloader written in python3",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/thiagotps/xdcc",
packages=setuptools.find_packages(),
install_requires = ['irc'],
keywords="irc xdcc",
entry_points={"console_scripts": ["xdcc=xdcc.__main__:main"]},
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires='>=3.7',
)
| 30.230769 | 66 | 0.661578 | 91 | 786 | 5.538462 | 0.714286 | 0.119048 | 0.075397 | 0.119048 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.015674 | 0.188295 | 786 | 25 | 67 | 31.44 | 0.774295 | 0 | 0 | 0 | 0 | 0 | 0.398219 | 0.062341 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.043478 | 0 | 0.043478 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
035543c446ab93f942c4299a0b653ef67467158d | 9,319 | py | Python | draw-tsp-path.py | wenderlemes/gcc218_trabalho_pratico | e57aab3c1ebcbe92683052994de646d0f76e8eb8 | [
"Apache-2.0",
"CC-BY-4.0",
"MIT"
] | null | null | null | draw-tsp-path.py | wenderlemes/gcc218_trabalho_pratico | e57aab3c1ebcbe92683052994de646d0f76e8eb8 | [
"Apache-2.0",
"CC-BY-4.0",
"MIT"
] | null | null | null | draw-tsp-path.py | wenderlemes/gcc218_trabalho_pratico | e57aab3c1ebcbe92683052994de646d0f76e8eb8 | [
"Apache-2.0",
"CC-BY-4.0",
"MIT"
] | null | null | null | """Modified code from https://developers.google.com/optimization/routing/tsp#or-tools """
# Copyright Matthew Mack (c) 2020 under CC-BY 4.0: https://creativecommons.org/licenses/by/4.0/
from __future__ import print_function
import math
from ortools.constraint_solver import routing_enums_pb2
from ortools.constraint_solver import pywrapcp
from PIL import Image, ImageDraw
import os
import time
import copy
from itertools import permutations
# Change these file names to the relevant files.
ORIGINAL_IMAGE = "images/brother-1024-stipple.png"
IMAGE_TSP = "images/brother-1024-stipple.tsp"
# Change the number of points according to the base tsp file you are using.
NUMBER_OF_POINTS = 1024
NUMBER_OF_PARTITIONS = 8
INITIAL_VERTEX = 0
def create_data_model():
"""Stores the data for the problem."""
# Extracts coordinates from IMAGE_TSP and puts them into an array
list_of_nodes = []
with open(IMAGE_TSP) as f:
for _ in range(6):
next(f)
for line in f:
i,x,y = line.split()
list_of_nodes.append((int(float(x)),int(float(y))))
data = {}
# Locations in block units
data['locations'] = list_of_nodes # yapf: disable
data['num_vehicles'] = 1
data['depot'] = 0
return data
def compute_euclidean_distance_matrix(locations):
"""Creates callback to return distance between points."""
distances = {}
for from_counter, from_node in enumerate(locations):
distances[from_counter] = {}
for to_counter, to_node in enumerate(locations):
if from_counter == to_counter:
distances[from_counter][to_counter] = 0
else:
# Euclidean distance
distances[from_counter][to_counter] = (int(
math.hypot((from_node[0] - to_node[0]),
(from_node[1] - to_node[1]))))
return distances
def print_solution(manager, routing, solution):
"""Prints solution on console."""
print('Objective: {}'.format(solution.ObjectiveValue()))
index = routing.Start(0)
plan_output = 'Route:\n'
route_distance = 0
while not routing.IsEnd(index):
plan_output += ' {} ->'.format(manager.IndexToNode(index))
previous_index = index
index = solution.Value(routing.NextVar(index))
route_distance += routing.GetArcCostForVehicle(previous_index, index, 0)
plan_output += ' {}\n'.format(manager.IndexToNode(index))
print(plan_output)
plan_output += 'Objective: {}m\n'.format(route_distance)
def get_routes(solution, routing, manager):
"""Get vehicle routes from a solution and store them in an array."""
# Get vehicle routes and store them in a two dimensional array whose
# i,j entry is the jth location visited by vehicle i along its route.
routes = []
for route_nbr in range(routing.vehicles()):
index = routing.Start(route_nbr)
route = [manager.IndexToNode(index)]
#while not routing.IsEnd(index):
# index = solution.Value(routing.NextVar(index))
counter = 0
while counter < len(solution):
counter += 1
index = solution[index]
route.append(manager.IndexToNode(index))
routes.append(route)
return routes[0]
def draw_routes(nodes, path):
"""Takes a set of nodes and a path, and outputs an image of the drawn TSP path"""
tsp_path = []
for location in path:
tsp_path.append(nodes[int(location)])
original_image = Image.open(ORIGINAL_IMAGE)
width, height = original_image.size
tsp_image = Image.new("RGBA",(width,height),color='white')
tsp_image_draw = ImageDraw.Draw(tsp_image)
#tsp_image_draw.point(tsp_path,fill='black')
tsp_image_draw.line(tsp_path,fill='black',width=1)
tsp_image = tsp_image.transpose(Image.FLIP_TOP_BOTTOM)
FINAL_IMAGE = IMAGE_TSP.replace("-stipple.tsp","-tsp.png")
tsp_image.save(FINAL_IMAGE)
print("TSP solution has been drawn and can be viewed at", FINAL_IMAGE)
def nearest_neighbors_solution(distance_matrix):
visited = {i: False for i in range(NUMBER_OF_POINTS)}
nearest_neighbors = {i: -1 for i in range(NUMBER_OF_POINTS)}
last_vertex = INITIAL_VERTEX
should_continue = True
while should_continue:
should_continue = False
visited[last_vertex] = True
shortest_distance = float("inf")
closest_neighbor = -1
for i in distance_matrix[last_vertex]:
if distance_matrix[last_vertex][i] < shortest_distance and not (visited[i]):
shortest_distance = distance_matrix[last_vertex][i]
closest_neighbor = i
should_continue = True
if should_continue:
nearest_neighbors[last_vertex] = closest_neighbor
last_vertex = closest_neighbor
else:
nearest_neighbors[last_vertex] = INITIAL_VERTEX
return nearest_neighbors
def two_opt_solution(distance_matrix):
solution = nearest_neighbors_solution(distance_matrix)
original_group = convert_solution_to_group(solution)
partitions = NUMBER_OF_PARTITIONS
while(partitions > 0):
two_opt(distance_matrix, original_group, partitions)
partitions = int(partitions / 2)
new_solution = convert_group_to_solution(original_group)
return new_solution
def two_opt(distance_matrix, group, partitions):
partition_size = int(len(group)/partitions)
for k in range(partitions):
while True:
min_change = 0
min_i = -1
min_j = -1
for i in range(1 + (k*partition_size), ((k+1)*partition_size)-2):
for j in range(i+1, ((k+1)*partition_size)):
u = group[i-1]
v = group[i]
w = group[j]
x = group[(j+1) % ((k+1)*partition_size)]
current_distance = (distance_matrix[u][v] + distance_matrix[w][x])
new_distance = (distance_matrix[u][w] + distance_matrix[v][x])
change = new_distance - current_distance
if change < min_change:
min_change = change
min_i = i
min_j = j
swap_edges(group, min_i, min_j)
if min_change == 0:
break
print(min_change)
def swap_edges(group, v, w):
#Reverses the entire slice, from vertex v to vertex w (including v and w)
group[v:w+1] = group[v:w+1][::-1]
def convert_group_to_solution(group):
solution = {}
for i in range(len(group)-1):
solution[group[i]] = group[i+1]
solution[group[-1]] = NUMBER_OF_POINTS
print(solution)
return solution
def convert_solution_to_group(solution):
head = INITIAL_VERTEX
group = []
for i in range(NUMBER_OF_POINTS):
group.append(head)
head = solution[head]
return group
def calculate_group_cost(distance_matrix, group):
cost = 0
for i in range(len(group)):
cost += distance_matrix[group[i]][group[(i+1) % len(group)]]
return cost
def main():
"""Entry point of the program."""
starting_moment = time.time()
# Instantiate the data problem.
print("Step 1/5: Initialising variables")
data = create_data_model()
# Create the routing index manager.
manager = pywrapcp.RoutingIndexManager(len(data['locations']),
data['num_vehicles'], data['depot'])
# Create Routing Model.
routing = pywrapcp.RoutingModel(manager)
print("Step 2/5: Computing distance matrix")
distance_matrix = compute_euclidean_distance_matrix(data['locations'])
def distance_callback(from_index, to_index):
"""Returns the distance between the two nodes."""
# Convert from routing variable Index to distance matrix NodeIndex.
from_node = manager.IndexToNode(from_index)
to_node = manager.IndexToNode(to_index)
return distance_matrix[from_node][to_node]
transit_callback_index = routing.RegisterTransitCallback(distance_callback)
# Define cost of each arc.
routing.SetArcCostEvaluatorOfAllVehicles(transit_callback_index)
# Setting first solution heuristic.
print("Step 3/5: Setting an initial solution")
search_parameters = pywrapcp.DefaultRoutingSearchParameters()
search_parameters.first_solution_strategy = (
routing_enums_pb2.FirstSolutionStrategy.PATH_CHEAPEST_ARC)
# Solve the problem.
print("Step 4/5: Solving")
#solution = routing.SolveWithParameters(search_parameters)
#solution = nearest_neighbors_solution(distance_matrix)
solution = two_opt_solution(distance_matrix)
# Print solution on console.
if solution:
#print_solution(manager, routing, solution)
print("Step 5/5: Drawing the solution")
routes = get_routes(solution, routing, manager)
draw_routes(data['locations'], routes)
else:
print("A solution couldn't be found :(")
finishing_moment = time.time()
print("Total time elapsed during execution: " + str(finishing_moment - starting_moment) + " seconds")
print("Total distance: " + str(calculate_group_cost(distance_matrix, convert_solution_to_group(solution))))
if __name__ == '__main__':
main() | 36.402344 | 111 | 0.65962 | 1,172 | 9,319 | 5.037543 | 0.234642 | 0.054539 | 0.007114 | 0.011179 | 0.166328 | 0.04895 | 0.026931 | 0 | 0 | 0 | 0 | 0.010317 | 0.240691 | 9,319 | 256 | 112 | 36.402344 | 0.824053 | 0.159352 | 0 | 0.027174 | 0 | 0 | 0.066401 | 0.007978 | 0 | 0 | 0 | 0 | 0 | 1 | 0.076087 | false | 0 | 0.048913 | 0 | 0.173913 | 0.081522 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0357185cdbc8f0ba6e573ad391b048407dabd43e | 1,027 | py | Python | script/shaderutil.py | xzfn/toy | 5d4f6e631c662634a059a4a178174032b01cc81a | [
"MIT"
] | null | null | null | script/shaderutil.py | xzfn/toy | 5d4f6e631c662634a059a4a178174032b01cc81a | [
"MIT"
] | null | null | null | script/shaderutil.py | xzfn/toy | 5d4f6e631c662634a059a4a178174032b01cc81a | [
"MIT"
] | null | null | null |
import os
import shadercompiler
def spv_folder_to_glsl_folder(spv_folder):
return os.path.join(spv_folder, '../../toy/shader')
def glsl_from_spv(spv):
spv_folder, spv_name = os.path.split(spv)
glsl_folder = spv_folder_to_glsl_folder(spv_folder)
glsl_name = spv_name[:-4] + '.glsl'
glsl = os.path.join(glsl_folder, glsl_name)
return glsl
def reload_pipelines(pipelines):
spv_pipeline_map = {}
for pipeline in pipelines:
spvs = pipeline.get_shader_spvs()
for spv in spvs:
spv_pipeline_map.setdefault(spv, set()).add(pipeline)
outdated_pipelines = set()
for spv in spv_pipeline_map:
glsl = glsl_from_spv(spv)
if shadercompiler.is_shader_outdated(glsl, spv):
res = shadercompiler.compile_glsl(glsl, spv)
if not res:
print('ERROR reload failed')
return
outdated_pipelines.update(spv_pipeline_map[spv])
for pipeline in outdated_pipelines:
pipeline.reload_shader()
| 31.121212 | 65 | 0.666991 | 136 | 1,027 | 4.742647 | 0.279412 | 0.083721 | 0.086822 | 0.088372 | 0.093023 | 0.093023 | 0.093023 | 0 | 0 | 0 | 0 | 0.00128 | 0.239533 | 1,027 | 32 | 66 | 32.09375 | 0.824584 | 0 | 0 | 0 | 0 | 0 | 0.038986 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.111111 | false | 0 | 0.074074 | 0.037037 | 0.296296 | 0.037037 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
035a72cf11a14ddfaed1b39b4cd2c2f14c4da51e | 2,046 | py | Python | GeneralizeDEMConsole.py | tsamsonov/generalize-dem | 4e180944cd3488654240d47464cf8a0b8a7bc640 | [
"Python-2.0",
"OLDAP-2.7"
] | 16 | 2017-07-10T15:28:41.000Z | 2021-12-30T16:25:06.000Z | GeneralizeDEMConsole.py | tsamsonov/Small-Scale-Terrain-Generalization | 4e180944cd3488654240d47464cf8a0b8a7bc640 | [
"Python-2.0",
"OLDAP-2.7"
] | 4 | 2017-07-17T13:35:25.000Z | 2019-12-02T20:15:28.000Z | GeneralizeDEMConsole.py | tsamsonov/generalize-dem | 4e180944cd3488654240d47464cf8a0b8a7bc640 | [
"Python-2.0",
"OLDAP-2.7"
] | null | null | null | import sys
import time
import arcpy
import traceback
import GeneralizeDEM
if __name__ == '__main__':
# SET PARAMETERS HERE
# --------------------------------------------------------------------
demdataset = 'X:/Work/Scripts & Tools/MY/DEMGEN/mistral'
marine = 'X:/Work/Scripts & Tools/MY/DEMGEN/DEMGENEW.gdb/ne_10m_ocean_P'
output = 'X:/Work/DEMGEN/DEMGENEW.gdb/mistral_gen2'
outputcellsize = 2000
minacc1 = 40
minlen1 = 10
minacc2 = 20
minlen2 = 5
is_widen = True
widentype = 'Min/Max'
widendist = 4000
filtersize = 5
is_smooth = True
is_tiled = True
is_parallel = True
num_processes = 6
tilesize = 256
is_continued = False
continued_folder = 'X:/Work/DEMGEN/scratch1'
# --------------------------------------------------------------------
print('> Initializing GeneralizeDEM script...')
print('')
start = int(time.time())
try:
if arcpy.CheckProduct("ArcInfo") == "Available":
GeneralizeDEM.execute(demdataset, marine, output, outputcellsize,
minacc1, minlen1, minacc2, minlen2,
is_widen, widentype, widendist, filtersize,
is_smooth, is_tiled, tilesize, num_processes,
is_parallel, is_continued, continued_folder)
else:
msg = 'ArcGIS for Desktop Advanced license not available'
arcpy.AddError(msg)
except Exception:
tb = sys.exc_info()[2]
tbinfo = traceback.format_tb(tb)[0]
pymsg = "Traceback Info:\n" + tbinfo + "\nError Info:\n " + \
str(sys.exc_type) + ": " + str(sys.exc_value) + "\n"
arcpy.AddError(pymsg)
print("Processing failed")
finish = int(time.time())
seconds = finish - start
m, s = divmod(seconds, 60)
h, m = divmod(m, 60)
print('')
print("> Finished in %02d h %02d m %02d s" % (h, m, s))
print('')
input("Press Enter to continue...")
| 31.476923 | 79 | 0.540078 | 215 | 2,046 | 5 | 0.516279 | 0.018605 | 0.022326 | 0.031628 | 0.046512 | 0.046512 | 0 | 0 | 0 | 0 | 0 | 0.030199 | 0.287879 | 2,046 | 64 | 80 | 31.96875 | 0.707618 | 0.076735 | 0 | 0.056604 | 0 | 0 | 0.212202 | 0.068435 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.09434 | 0 | 0.09434 | 0.113208 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
035c76dc73552701e77d5f647636dafc183b09c4 | 534 | py | Python | reports/migrations/0097_auto_20171006_0951.py | CMU-TRP/podd-api | 6eb5c4598f848f75d131287163cd9babf2a0a0fc | [
"MIT"
] | 3 | 2020-04-26T06:28:50.000Z | 2021-04-05T08:02:26.000Z | reports/migrations/0097_auto_20171006_0951.py | CMU-TRP/podd-api | 6eb5c4598f848f75d131287163cd9babf2a0a0fc | [
"MIT"
] | 10 | 2020-06-05T17:36:10.000Z | 2022-03-11T23:16:42.000Z | reports/migrations/0097_auto_20171006_0951.py | CMU-TRP/podd-api | 6eb5c4598f848f75d131287163cd9babf2a0a0fc | [
"MIT"
] | 5 | 2021-04-08T08:43:49.000Z | 2021-11-27T06:36:46.000Z | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('reports', '0096_auto_20170920_1521'),
]
operations = [
migrations.AlterField(
model_name='reporttype',
name='notification_buffer',
field=models.FloatField(help_text=b'Radius of buffer that use to find intersects authorities', null=True, blank=True),
preserve_default=True,
),
]
| 25.428571 | 130 | 0.644195 | 55 | 534 | 6.036364 | 0.818182 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.042607 | 0.252809 | 534 | 20 | 131 | 26.7 | 0.789474 | 0.039326 | 0 | 0 | 0 | 0 | 0.225049 | 0.04501 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.142857 | 0 | 0.357143 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
035ca68338c7107344756496893369eeae016a6c | 513 | py | Python | server/functions/calcdistance.py | yrpmsg/SCSV15 | bbe953d676c082f4a30c4d5e0e0cbfcc624d969c | [
"Apache-2.0"
] | null | null | null | server/functions/calcdistance.py | yrpmsg/SCSV15 | bbe953d676c082f4a30c4d5e0e0cbfcc624d969c | [
"Apache-2.0"
] | null | null | null | server/functions/calcdistance.py | yrpmsg/SCSV15 | bbe953d676c082f4a30c4d5e0e0cbfcc624d969c | [
"Apache-2.0"
] | null | null | null | from math import radians, cos, sin, asin, sqrt, floor, pow
import math
lat1 = 11.00461011
lon1 = 76.95691543
lat2 = 11.0070471
lon2 = 76.96110704
lon1 = radians(lon1)
lon2 = radians(lon2)
lat1 = radians(lat1)
lat2 = radians(lat2)
# Haversine formula
dlon = lon2 - lon1
dlat = lat2 - lat1
a = sin(dlat / 2)**2 + cos(lat1) * cos(lat2) * sin(dlon / 2)**2
c = 2 * asin(sqrt(a))
# Radius of earth in kilometers. Use 3956 for miles
r = 6371
# calculate the result
print(c * r)
| 19 | 63 | 0.62768 | 79 | 513 | 4.075949 | 0.531646 | 0.049689 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.182292 | 0.251462 | 513 | 26 | 64 | 19.730769 | 0.65625 | 0.177388 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.125 | 0 | 0.125 | 0.0625 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
035cf0e95ad473da5f8cd509f8390672c271dc26 | 3,106 | py | Python | xmppserver/xmpp/mechanisms.py | ovekaaven/django-xmpp-server | aa391173b4cdfc98e2f6de29d24aa4273b3620c3 | [
"MIT"
] | null | null | null | xmppserver/xmpp/mechanisms.py | ovekaaven/django-xmpp-server | aa391173b4cdfc98e2f6de29d24aa4273b3620c3 | [
"MIT"
] | null | null | null | xmppserver/xmpp/mechanisms.py | ovekaaven/django-xmpp-server | aa391173b4cdfc98e2f6de29d24aa4273b3620c3 | [
"MIT"
] | null | null | null | from slixmpp.exceptions import XMPPError
from ..conf import settings
mechanisms = {}
def sasl_mech():
def register(mech):
mechanisms[mech.name] = mech
return mech
return register
class Mechanism(object):
name = None
def __init__(self, auth):
self.auth = auth
@staticmethod
async def available(auth):
return True
@property
def stream(self):
return self.auth.stream
@property
def boundjid(self):
return self.auth.stream.boundjid
async def challenge(self, data=None):
return await self.auth._async_challenge(data)
def process(self, request):
raise NotImplementedError()
class LegacyAuth(Mechanism):
name = 'xep_0078'
@staticmethod
async def available(auth):
return settings.ALLOW_LEGACY_AUTH
async def process(self, request):
if 'username' not in request or \
'resource' not in request:
raise XMPPError('not-acceptable')
username = request['username']
if not await self.auth.check_password(username,
request.get('password', '')):
raise XMPPError('not-authorized')
self.boundjid.user = username
self.boundjid.resource = request['resource']
@sasl_mech()
class Anonymous(Mechanism):
name = 'ANONYMOUS'
@staticmethod
async def available(auth):
if settings.ALLOW_ANONYMOUS_LOGIN:
return True
else:
return False
async def process(self, request):
if settings.ALLOW_ANONYMOUS_LOGIN:
username = self.auth.generate_anonymous_user()
else:
raise XMPPError('not-authorized')
self.boundjid.user = username
@sasl_mech()
class External(Mechanism):
name = 'EXTERNAL'
@staticmethod
async def available(auth):
# check client certificate, if available
cert = auth.stream.get_client_cert()
if not cert:
return False
# TODO: handle client certificates
return False
async def process(self, request):
pass
@sasl_mech()
class Plain(Mechanism):
name = 'PLAIN'
async def process(self, request):
if request.xml.text:
value = request['value']
else:
value = await self.challenge()
toks = value.split(b'\0')
if len(toks) != 3:
raise XMPPError('malformed-request')
toks = [x.decode('utf8') for x in toks]
username = toks[1]
if not await self.auth.check_password(username,
toks[2]):
raise XMPPError('not-authorized')
authcid = "%s@%s" % (username, self.stream.host)
if toks[0] != '' and toks[0] != authcid:
# authzid not supported yet
raise XMPPError('invalid-authzid')
self.boundjid.user = username
def get_sasl_by_name(name):
return mechanisms.get(name, None)
async def get_sasl_available(stream):
return [m for m in mechanisms.values() if await m.available(stream)]
| 26.775862 | 75 | 0.603348 | 345 | 3,106 | 5.350725 | 0.269565 | 0.043337 | 0.03792 | 0.05688 | 0.303359 | 0.211268 | 0.137595 | 0.097508 | 0 | 0 | 0 | 0.005064 | 0.300708 | 3,106 | 115 | 76 | 27.008696 | 0.844843 | 0.03123 | 0 | 0.388889 | 0 | 0 | 0.057903 | 0 | 0 | 0 | 0 | 0.008696 | 0 | 1 | 0.077778 | false | 0.044444 | 0.022222 | 0.033333 | 0.355556 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0362625fd6644541d017afd28f886693da68a5e3 | 7,625 | py | Python | arcade/examples/sprite_rotate_tank.py | DragonMoffon/arcade | 98fb1809363ccc537d6852be487aeae0b5fb7fb8 | [
"MIT"
] | null | null | null | arcade/examples/sprite_rotate_tank.py | DragonMoffon/arcade | 98fb1809363ccc537d6852be487aeae0b5fb7fb8 | [
"MIT"
] | null | null | null | arcade/examples/sprite_rotate_tank.py | DragonMoffon/arcade | 98fb1809363ccc537d6852be487aeae0b5fb7fb8 | [
"MIT"
] | null | null | null | """
Sprite Rotation With A Tank.
Vehicles or tower defense turrets can have parts
that can rotate toward targets.
These parts are usually represented with separate sprites
drawn relative to attachment points on the main body.
Because these sprites are usually asymmetrical,
we have to rotate them around
their attachment points on the main body.
They will look wrong otherwise!
This example allows the player to switch between
two ways of rotating a tank's turret and barrel:
1. correctly, around a point on the tank's body
2. incorrectly, around the center of the barrel.
Artwork from https://kenney.nl
If Python and Arcade are installed, this example can be run from the command line with:
python -m arcade.examples.sprite_rotate_tank
"""
import arcade
import math
TANK_SPEED = 64 # How many pixels per second the tank travels
TANK_TURNING_SPEED = 60 # how many degrees per second the tank spins by.
# This is half the length of the barrel sprite.
# We use this value to ensure the end of the barrel sit in the middle of the tank.
TANK_BARREL_LENGTH_HALF = 15
SCREEN_WIDTH = 800
SCREEN_HEIGHT = 600
SCREEN_TITLE = "Rotating Tank Example"
class RotatingSprite(arcade.Sprite):
"""
Sprite subclass which can be rotated around a point.
"""
def rotate_around_point(self, point, degrees):
"""
Rotates the sprite around a point by the set amount of degrees
:param point: The point that the sprite will rotate about
:param degrees: How many degrees to rotate the sprite
"""
# This is so the direction the sprite faces changes when rotating.
# It isn't necessary to have this.
# For example, you would want a rotating platform to always face upwards.
self.angle += degrees
# rotate the sprite around.
self.position = arcade.rotate_point(self.center_x, self.center_y,
point[0], point[1], degrees)
class ExampleWindow(arcade.Window):
def __init__(self):
super().__init__(SCREEN_WIDTH, SCREEN_HEIGHT, SCREEN_TITLE)
# Set Background to be green.
self.background_color = arcade.color.GREEN
# The tank and barrel sprite.
self.tank = arcade.Sprite(":resources:images/topdown_tanks/tankBody_dark_outline.png")
self.tank.position = SCREEN_WIDTH // 2, SCREEN_HEIGHT // 2
self.barrel = RotatingSprite(":resources:images/topdown_tanks/tankDark_barrel3_outline.png")
self.barrel.position = SCREEN_WIDTH // 2, SCREEN_HEIGHT // 2 - TANK_BARREL_LENGTH_HALF
self.tank_direction = 0.0 # If the tank is moving forward or backwards.
self.tank_turning = 0.0 # If the tank is turning left or right.
self.mouse_pos = [0, 0]
self.tank_sprite_list = arcade.SpriteList()
self.tank_sprite_list.extend([self.tank, self.barrel])
self._correct = True
self.correct_text = arcade.Text("Turret Rotation is Correct, Press P to Switch",
SCREEN_WIDTH // 2, SCREEN_HEIGHT - 25,
anchor_x='center')
self.control_text = arcade.Text("WASD to move tank, Mouse to aim",
SCREEN_WIDTH // 2, 15,
anchor_x='center')
def on_draw(self):
self.clear()
self.background.draw()
self.tank_sprite_list.draw()
self.control_text.draw()
self.correct_text.draw()
def on_update(self, delta_time: float):
self.move_tank(delta_time)
def move_tank(self, delta_time):
"""
Perform all calculations about how to move the tank.
This includes both the body and the barrel
"""
# update the angle of the tank's body alone.
# The barrel will be updated after the body is moved
self.tank.angle += TANK_SPEED * self.tank_turning * delta_time
# find how much the tank's x and y should change to move forward or back.
x_dir = (math.cos(self.tank.radians - math.pi / 2)
* self.tank_direction * TANK_SPEED * delta_time)
y_dir = (math.sin(self.tank.radians - math.pi / 2)
* self.tank_direction * TANK_SPEED * delta_time)
# we then move the tank and the barrel since they are connected together.
self.tank.center_x += x_dir
self.tank.center_y += y_dir
self.barrel.center_x += x_dir
self.barrel.center_y += y_dir
if self.correct:
# Rotate the barrel sprite around the center of the tank,
# not the center of the barrel sprite
# we need to add 90 to the angle due to orientation of the barrel texture.
# we need to remove the barrels angle as we only want the change in angle.
angle_change = (arcade.get_angle_degrees(self.tank.center_y, self.tank.center_x,
self.mouse_pos[1], self.mouse_pos[0])
- self.barrel.angle + 90)
self.barrel.rotate_around_point((self.tank.center_x, self.tank.center_y),
angle_change)
else:
# In this situation we only change the angle without changing the position which is incorrect.
# we need to add 90 to the angle due to orientation of the barrel texture.
angle = arcade.get_angle_degrees(self.tank.center_y, self.tank.center_x,
self.mouse_pos[1], self.mouse_pos[0]) + 90
self.barrel.angle = angle
def on_key_press(self, symbol: int, modifiers: int):
if symbol == arcade.key.W:
self.tank_direction += 1
elif symbol == arcade.key.S:
self.tank_direction -= 1
elif symbol == arcade.key.A:
self.tank_turning += 1
elif symbol == arcade.key.D:
self.tank_turning -= 1
elif symbol == arcade.key.P:
self.correct = bool(1 - self.correct)
self.correct_text.text = f"Turret Rotation is" \
f" {'Correct' if self.correct else 'Incorrect'}," \
f" Press P to Switch"
def on_key_release(self, symbol: int, modifiers: int):
if symbol == arcade.key.W:
self.tank_direction -= 1
elif symbol == arcade.key.S:
self.tank_direction += 1
elif symbol == arcade.key.A:
self.tank_turning -= 1
elif symbol == arcade.key.D:
self.tank_turning += 1
def on_mouse_motion(self, x: int, y: int, dx: int, dy: int):
self.mouse_pos = x, y
@property
def correct(self):
return self._correct
@correct.setter
def correct(self, value):
if value:
self._correct = True
angle = math.radians(arcade.get_angle_degrees(self.tank.center_y, self.tank.center_x,
self.mouse_pos[1], self.mouse_pos[0]))
self.barrel.center_x = (self.tank.center_x + math.cos(angle)
* TANK_BARREL_LENGTH_HALF)
self.barrel.center_y = (self.tank.center_y + math.sin(angle)
* TANK_BARREL_LENGTH_HALF)
else:
self._correct = False
self.barrel.center_x = self.tank.center_x
self.barrel.center_y = self.tank.center_y
def main():
window = ExampleWindow()
window.run()
if __name__ == '__main__':
main()
| 37.377451 | 106 | 0.610098 | 1,025 | 7,625 | 4.39122 | 0.24 | 0.063986 | 0.043546 | 0.023328 | 0.301266 | 0.246168 | 0.227505 | 0.212842 | 0.182404 | 0.182404 | 0 | 0.011031 | 0.310426 | 7,625 | 203 | 107 | 37.561576 | 0.844998 | 0.296 | 0 | 0.240741 | 0 | 0 | 0.060145 | 0.022269 | 0 | 0 | 0 | 0 | 0 | 1 | 0.101852 | false | 0 | 0.018519 | 0.009259 | 0.148148 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
03636efc34bffdf5e5bc02cd6599c5ce0ac214e9 | 4,913 | py | Python | api/modules/github/views.py | prabhakar267/travel-mate-server | 2e0aa7c6ac9963c1ee95bda5966be01293935ded | [
"MIT"
] | 43 | 2018-05-23T10:03:40.000Z | 2021-09-02T15:55:52.000Z | api/modules/github/views.py | prabhakar267/travel-mate-server | 2e0aa7c6ac9963c1ee95bda5966be01293935ded | [
"MIT"
] | 141 | 2018-05-24T16:03:12.000Z | 2021-04-30T23:47:59.000Z | api/modules/github/views.py | prabhakar267/travel-mate-server | 2e0aa7c6ac9963c1ee95bda5966be01293935ded | [
"MIT"
] | 77 | 2018-06-13T13:51:31.000Z | 2021-06-16T16:10:18.000Z | import datetime
import requests
import requests_cache
from rest_framework import status
from rest_framework.decorators import api_view
from rest_framework.response import Response
from api.commonresponses import DOWNSTREAM_ERROR_RESPONSE
from api.modules.github import constants
from api.modules.github.github_response import ContributorResponse, IssueResponse
requests_cache.install_cache(expire_after=datetime.timedelta(days=7))
@api_view(['GET'])
def get_contributors(request, project):
"""
Return list of people contributed
:param request:
:param project:
:return: 503 if github api fails
:return: 200 successful
"""
try:
api_response = requests.get(
constants.GITHUB_API_GET_CONTRIBUTORS_URL.format(project_name=project)
)
api_response_json = api_response.json()
# if authentication fails
if api_response.status_code == 401:
raise Exception("Authentication fails. Invalid github access token.")
response = []
for contributor in api_response_json:
if contributor['type'] != 'User':
continue
result = ContributorResponse(
username=contributor['login'],
url=contributor['html_url'],
avatar_url=contributor['avatar_url'],
contributions=contributor['contributions'],
repository_name=project,
)
result_as_json = result.to_json()
response.append(result_as_json)
except Exception:
return DOWNSTREAM_ERROR_RESPONSE
return Response(response)
@api_view(['GET'])
def get_all_contributors(request):
"""
Return list of people contributed
:param request:
:return: 503 if github api fails
:return: 200 successful
"""
response_dict = {}
for project in constants.ACTIVE_REPOSITORIES:
try:
api_response = requests.get(
constants.GITHUB_API_GET_CONTRIBUTORS_URL.format(project_name=project)
)
api_response_json = api_response.json()
# if authentication fails
if api_response.status_code == 401:
raise Exception("Authentication fails. Invalid github access token.")
for contributor in api_response_json:
if contributor['type'] != 'User':
continue
result = ContributorResponse(
username=contributor['login'],
url=contributor['html_url'],
avatar_url=contributor['avatar_url'],
contributions=contributor['contributions'],
repository_name=[project],
)
if result.username in response_dict.keys():
response_dict[result.username]['contributions'] += result.contributions
response_dict[result.username]['repository_name'].append(project)
else:
response_dict[result.username] = result.to_json()
except Exception:
return DOWNSTREAM_ERROR_RESPONSE
response = sorted(response_dict.values(), key=lambda x: x['contributions'], reverse=True)
return Response(response)
@api_view(['GET'])
def get_issues(request, project):
"""
Return list of issues
:param request:
:param project:
:return: 503 if github api fails
:return: 200 successful
"""
try:
api_response = requests.get(constants.GITHUB_API_GET_ISSUES_URL.format(project_name=project))
api_response_json = api_response.json()
if api_response.status_code == 404:
error_message = "Repository does not exist"
return Response(error_message, status=status.HTTP_404_NOT_FOUND)
if api_response.status_code == 401:
raise Exception("Authentication fails. Invalid github access token.")
response = []
for issue in api_response_json:
labels_length = len(issue['labels'])
tags = []
# Making custom dictionary for tags
for i in range(0, labels_length):
# Searching inside "labels" key for tag_name
for tag, tag_name in issue["labels"][i].items():
if tag in ["name"]:
label = tag_name
tags.append(label)
result = IssueResponse(
title=issue['title'],
created_at=issue['created_at'],
comments=issue['comments'],
issue_number=issue['number'],
repository_url=issue['repository_url'],
labels=tags
)
result_as_json = result.to_json()
response.append(result_as_json)
except Exception:
return DOWNSTREAM_ERROR_RESPONSE
return Response(response)
| 36.664179 | 101 | 0.612864 | 502 | 4,913 | 5.784861 | 0.227092 | 0.060606 | 0.046488 | 0.02927 | 0.599174 | 0.572658 | 0.572658 | 0.532025 | 0.51343 | 0.498278 | 0 | 0.010231 | 0.303684 | 4,913 | 133 | 102 | 36.93985 | 0.838644 | 0.09485 | 0 | 0.515464 | 0 | 0 | 0.085912 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.030928 | false | 0 | 0.092784 | 0 | 0.195876 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
036543a1fbcdcc35bf430e0b5d4150196450f6d6 | 4,910 | py | Python | mkpy3/mkpy3_finder_chart_survey_fits_image_get_v1.py | KenMighell/mkpy3 | 598126136b43fa93bc4aded5db65a1251d60a9ba | [
"MIT"
] | null | null | null | mkpy3/mkpy3_finder_chart_survey_fits_image_get_v1.py | KenMighell/mkpy3 | 598126136b43fa93bc4aded5db65a1251d60a9ba | [
"MIT"
] | null | null | null | mkpy3/mkpy3_finder_chart_survey_fits_image_get_v1.py | KenMighell/mkpy3 | 598126136b43fa93bc4aded5db65a1251d60a9ba | [
"MIT"
] | 1 | 2020-11-01T18:37:53.000Z | 2020-11-01T18:37:53.000Z | #!/usr/bin/env python3
# file://mkpy3_finder_chart_survey_fits_image_get_v1.py
# Kenneth Mighell
# SETI Institute
# =============================================================================
def mkpy3_finder_chart_survey_fits_image_get_v1(
ra_deg=None,
dec_deg=None,
radius_arcmin=None,
survey=None,
cframe=None,
verbose=None,
):
"""
Function: mkpy3_finder_chart_survey_fits_image_get_v1()
Purpose:
Gets sky survey image data around a position on the sky.
Parameters
----------
ra_deg : float (optional)
right ascencsion [deg]
dec_deg : float (optional)
declination [deg]
radius_arcmin : float (optional)
radius (halfwidth and halfheight of image) [arcmin]
survey : string (optional) [e.g., '2MASS-J', 'DSS2 Red', etc.]
survey string name
cframe : str (optional)
coordinate frame name [e.g., 'fk5', 'icrs', etc.]
verbose : bool (optional)
if True, print extra information
Returns
-------
hdu :
Header/Data Unit (HDU) of the survey FITS file
hdr :
header associated with hdu
data :
data associated with hdu
wcs :
World Coordinate System from hdu
cframe :
coordinate frame of the survey data
Kenneth Mighell
SETI Institute
"""
import astropy.units as u
from astropy.coordinates import SkyCoord
from astroquery.skyview import SkyView
from astropy.wcs import WCS
#
if ra_deg is None:
ra_deg = 291.41829 # Kepler-93b
if dec_deg is None:
dec_deg = 38.67236 # Kepler-93b
if radius_arcmin is None:
radius_arcmin = 1.99
if survey is None:
survey = "2MASS-J" # alternate: 'DSS2 Red'
# ^--- to see all surveys: astroquery.skyview.SkyView.list_surveys()
if cframe is None:
cframe = "fk5" # N.B.: '2MASS-J' uses 'fk5'
if verbose is None:
verbose = False
if verbose:
print(ra_deg, "=ra_deg")
print(dec_deg, "=dec_deg")
print(radius_arcmin, "=radius_arcmin")
print("'%s' =survey" % (survey))
print("'%s' =cframe" % (cframe))
print(verbose, "=verbose")
print()
#
# sc <--- astropy sky coordinates
sc = SkyCoord(ra=ra_deg * u.degree, dec=dec_deg * u.degree, frame=cframe)
# image list # assume that the list contains a single image
imgl = SkyView.get_images(
position=sc, survey=survey, radius=radius_arcmin * u.arcmin
)
#
# outputs:
hdu = imgl[0] # Header/Data Unit of the FITS image
hdr = hdu[0].header # header associated with the HDU
data = hdu[0].data # data associated with the HDU
wcs = WCS(hdr) # World Coordinate System from the FITS header of the survey image
#
return hdu, hdr, data, wcs, cframe
# fed
def xmkpy3_finder_chart_survey_fits_image_get_v1():
import lightkurve as lk
lk.log.setLevel("INFO")
import matplotlib.pyplot as plt
import astropy.units as u
from astropy.visualization import ImageNormalize, PercentileInterval, SqrtStretch
import os
import ntpath
# Exoplanet Kelper-138b is "KIC 7603200":
tpf = lk.search_targetpixelfile(
target="kepler-138b", mission="kepler", cadence="long", quarter=10
).download(quality_bitmask=0)
print("TPF filename:", ntpath.basename(tpf.path))
print("TPF dirname: ", os.path.dirname(tpf.path))
target = "Kepler-138b"
ra_deg = tpf.ra
dec_deg = tpf.dec
# get survey image data
width_height_arcmin = 3.00
survey = "2MASS-J"
(
survey_hdu,
survey_hdr,
survey_data,
survey_wcs,
survey_cframe,
) = mkpy3_finder_chart_survey_fits_image_get_v1(
ra_deg, dec_deg, radius_arcmin=width_height_arcmin, survey=survey, verbose=True
)
# create a matplotlib figure object
fig = plt.figure(figsize=(12, 12))
# create a matplotlib axis object with right ascension and declination axes
ax = plt.subplot(projection=survey_wcs)
norm = ImageNormalize(
survey_data, interval=PercentileInterval(99.0), stretch=SqrtStretch()
)
ax.imshow(survey_data, origin="lower", norm=norm, cmap="gray_r")
ax.set_xlabel("Right Ascension (J2000)")
ax.set_ylabel("Declination (J2000)")
ax.set_title("")
plt.suptitle(target)
# put a yellow circle at the target position
ax.scatter(
ra_deg * u.deg,
dec_deg * u.deg,
transform=ax.get_transform(survey_cframe),
s=600,
edgecolor="yellow",
facecolor="None",
lw=3,
zorder=100,
)
pname = "mkpy3_plot.png"
if pname != "":
plt.savefig(pname, bbox_inches="tight")
print(pname, " <--- plot filename has been written! :-)\n")
# fi
return None
# fed
# =============================================================================
if __name__ == "__main__":
xmkpy3_finder_chart_survey_fits_image_get_v1()
# fi
# EOF
| 26.684783 | 87 | 0.627495 | 633 | 4,910 | 4.706161 | 0.333333 | 0.016784 | 0.03424 | 0.042296 | 0.098019 | 0.098019 | 0.098019 | 0.076536 | 0.027526 | 0.027526 | 0 | 0.024606 | 0.238493 | 4,910 | 183 | 88 | 26.830601 | 0.772132 | 0.352546 | 0 | 0.042105 | 0 | 0 | 0.087261 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.021053 | false | 0 | 0.105263 | 0 | 0.147368 | 0.105263 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0366c6b949b300f8072c9d5d7dfdc2a101c2a39c | 1,737 | py | Python | marathontcp.py | StevenPG/JMXMarathonDataAggregator | a976edc2ea27255dca36f584923e3a06dbdec8c6 | [
"MIT"
] | null | null | null | marathontcp.py | StevenPG/JMXMarathonDataAggregator | a976edc2ea27255dca36f584923e3a06dbdec8c6 | [
"MIT"
] | null | null | null | marathontcp.py | StevenPG/JMXMarathonDataAggregator | a976edc2ea27255dca36f584923e3a06dbdec8c6 | [
"MIT"
] | null | null | null | """
marathontcp.py
Author: Steven Gantz
Date: 11/22/2016
These two classes are used as custom TCP Servers and its accompanying
handler that defines each request. These class are what forward the data
from the preset /metrics endpoints in the scaled marathon instances directly
to the TCP servers running from this application.
"""
# Official Imports
import socketserver
import urllib.request
class MarathonRedirectTCPServer(socketserver.TCPServer):
""" TCP Server that takes special extra arguments if needed """
def __init__(self, server_address, RequestHandlerClass, bind_and_activate=True, api_url="Empty Request"):
# As per http://stackoverflow.com/questions/15889241/send-a-variable-to-a-tcphandler-in-python
self.api_url = api_url
socketserver.TCPServer.__init__(self, server_address, RequestHandlerClass, bind_and_activate=True)
class MarathonRedirectTCPHandler(socketserver.BaseRequestHandler):
""" Makes a metrics request and forwards to preset ports through the application"""
def handle(self):
print("Retrieving metrics from http://" + self.server.api_url + "/metrics")
# Make a request to the api_url metrics and fwd to page
encoded_response = urllib.request.urlopen("http://" + self.server.api_url + "/metrics")
# Change encoded response in to simple string
header = "HTTP/1.0 200 OK \r\n"
content_type = "Content-Type: text/plain\r\n\r\n"
text_response = header + content_type + encoded_response.read().decode()
# self.request is the TCP socket connected to the client
self.request.sendall(text_response.encode())
# Read Response to close request
res = self.request.recv(1024)
| 40.395349 | 110 | 0.727691 | 229 | 1,737 | 5.406114 | 0.528384 | 0.029079 | 0.031502 | 0.033926 | 0.138934 | 0.138934 | 0.095315 | 0.095315 | 0.095315 | 0 | 0 | 0.017693 | 0.186529 | 1,737 | 42 | 111 | 41.357143 | 0.858457 | 0.43293 | 0 | 0 | 0 | 0 | 0.124088 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.133333 | false | 0 | 0.133333 | 0 | 0.4 | 0.066667 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
03672787b107ccc21fb75165c7801c0b958f1461 | 4,600 | py | Python | tests/test_io.py | Laharah/horcrux | 68f7c6aad0678b39bae888f0dfeb9d1926501a53 | [
"MIT"
] | null | null | null | tests/test_io.py | Laharah/horcrux | 68f7c6aad0678b39bae888f0dfeb9d1926501a53 | [
"MIT"
] | null | null | null | tests/test_io.py | Laharah/horcrux | 68f7c6aad0678b39bae888f0dfeb9d1926501a53 | [
"MIT"
] | null | null | null | import pytest
import io
import random
from copy import deepcopy
from horcrux import io as hio
from horcrux.hrcx_pb2 import StreamBlock
from horcrux.sss import Share, Point
@pytest.fixture()
def hx():
return hio.Horcrux(io.BytesIO())
@pytest.fixture()
def share():
return Share(b'0123456789abcdef', 2, Point(0, b'123'))
@pytest.fixture()
def two_block_hrcx():
return io.BytesIO(b'\x1b\n\x100123456789ABCDEF\x10\x04\x1a\x05\x12\x03123\x08\n\x06'
b'566784\x00\x08\x12\x06abcdef\x02\x08\x01\n\x12\x08ghijklmn')
def test_init_horcrux():
h = hio.Horcrux(io.BytesIO())
def test_horcrux__write_bytes(hx):
hx._write_bytes(b'123')
assert hx.stream.getvalue() == b'\x03123'
def test_horcurx__read_message_bytes_small(hx):
hx._write_bytes(b'123')
hx._write_bytes(b'4567890')
stream = hx.stream
del hx
stream.seek(0)
hx = hio.Horcrux(stream)
m1 = hx._read_message_bytes()
assert m1 == b'123'
m2 = hx._read_message_bytes()
assert m2 == b'4567890'
def test_horcrux__read_message_bytes_large(hx):
m1 = bytes(255 for _ in range(500))
m2 = bytes(random.getrandbits(8) for _ in range(4))
m3 = bytes(random.getrandbits(8) for _ in range(4096))
for m in (m1, m2, m3):
hx._write_bytes(m)
stream = hx.stream
del hx
stream.seek(0)
hx = hio.Horcrux(stream)
assert hx._read_message_bytes() == m1
assert hx._read_message_bytes() == m2
assert hx._read_message_bytes() == m3
def test_horcrux_write_data_block(hx):
_id = 1
data = b'my data'
hx.write_data_block(_id, data)
out = hx.stream.getvalue()
print(out)
assert out == b'\x02\x08\x01\t\x12\x07my data'
def test_horcrux_write_share_header(hx, share):
hx._write_share_header(share)
stream = hx.stream
del hx
stream.seek(0)
print(stream.getvalue())
assert stream.getvalue() == b'\x1b\n\x100123456789abcdef\x10\x02\x1a\x05\x12\x03123'
def test_horcrux_write_stream_header(hx):
header = b'u\x14Op\xa3\x13\x01Jt\xa8'
hx._write_stream_header(header)
hx._write_stream_header(header, encrypted_filename=b'testname')
stream = hx.stream
del hx
stream.seek(0)
hx = hio.Horcrux(stream)
h1 = hx._read_message_bytes()
assert h1 == b'\n\nu\x14Op\xa3\x13\x01Jt\xa8'
h2 = hx._read_message_bytes()
assert h2 == b'\n\nu\x14Op\xa3\x13\x01Jt\xa8\x1a\x08testname'
def test_horcrux_init_write(hx, share):
cryptoheader = b'u\x14Op\xa3\x13\x01Jt\xa8'
hx.init_write(share, cryptoheader, encrypted_filename=b'slkfjwnfa;')
assert hx.hrcx_id == 0
stream = hx.stream
del hx
stream.seek(0)
headers = stream.getvalue()
print(headers)
assert headers == (
b'\x1b\n\x100123456789abcdef\x10\x02\x1a'
b'\x05\x12\x03123\x18\n\nu\x14Op\xa3\x13\x01Jt\xa8\x1a\nslkfjwnfa;')
def test_horcrux_init_read(share):
stream = io.BytesIO(
b'\x1b\n\x100123456789abcdef\x10\x02\x1a'
b'\x05\x12\x03123\x18\n\nu\x14Op\xa3\x13\x01Jt\xa8\x1a\nslkfjwnfa;')
stream.seek(0)
hx = hio.Horcrux(stream)
hx.init_read()
assert hx.share == share
assert hx.hrcx_id == 0
assert hx.encrypted_filename == b'slkfjwnfa;'
assert hx.next_block_id == None
def test_horcrux_read_block(hx):
data1 = bytes(random.getrandbits(8) for _ in range(30))
data2 = bytes(random.getrandbits(8) for _ in range(30))
hx.write_data_block(33, data1)
hx.write_data_block(45, data2)
stream = hx.stream
stream.seek(0)
del hx
hx = hio.Horcrux(stream)
hx._read_next_block_id()
_id, d = hx.read_block()
assert d == data1
assert _id == 33
_id, d = hx.read_block()
assert d == data2
assert _id == 45
def test_horcrux_skip_block(hx):
data1 = bytes(255 for _ in range(30))
data2 = bytes(255 for _ in range(30))
hx.write_data_block(33, data1)
hx.write_data_block(45, data2)
stream = hx.stream
stream.seek(0)
del hx
hx = hio.Horcrux(stream)
hx._read_next_block_id()
hx.skip_block()
_id, d = hx.read_block()
assert d == data2
assert _id == 45
def test_get_horcrux_files(tmpdir, share):
fn = 'test_horcrux'
shares = [deepcopy(share) for _ in range(4)]
crypto_header = b'1234567'
expected = b'\x1b\n\x100123456789abcdef\x10\x02\x1a\x05\x12\x03123\t\n\x071234567'
hxs = hio.get_horcrux_files(fn, shares, crypto_header, outdir=tmpdir)
assert len(hxs) == 4
for h in hxs:
h.stream.close()
with open(h.stream.name, 'rb') as fin:
assert fin.read() == expected
| 27.218935 | 88 | 0.668696 | 709 | 4,600 | 4.143865 | 0.180536 | 0.038121 | 0.042886 | 0.042886 | 0.513955 | 0.42614 | 0.377127 | 0.322328 | 0.24983 | 0.24983 | 0 | 0.101824 | 0.201522 | 4,600 | 168 | 89 | 27.380952 | 0.698067 | 0 | 0 | 0.382353 | 0 | 0.036765 | 0.153043 | 0.12913 | 0 | 0 | 0 | 0 | 0.176471 | 1 | 0.110294 | false | 0 | 0.051471 | 0.022059 | 0.183824 | 0.022059 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0369ea60607087cd24210a21d5453a467593c1f0 | 1,817 | py | Python | template/diff.py | Nauja/Entropy | e418a7db68a55f17fb3e6c0c3b5018aed7002d4d | [
"MIT"
] | null | null | null | template/diff.py | Nauja/Entropy | e418a7db68a55f17fb3e6c0c3b5018aed7002d4d | [
"MIT"
] | null | null | null | template/diff.py | Nauja/Entropy | e418a7db68a55f17fb3e6c0c3b5018aed7002d4d | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
"""
A Pandoc filter to create non-code diffs. `add` and `rm` are the classes that
can be added to a `Div` or a `Span`. `add` colors the text green, and `rm`
colors the text red. For HTML, `add` also underlines the text, and `rm` also
strikes out the text.
# Example
## `Div`
Unchanged portion
::: add
New paragraph
> Quotes
More new paragraphs
:::
## `Span`
> The return type is `decltype(`_e_(`m`)`)` [for the first form]{.add}.
"""
import panflute as pf
def action(elem, doc):
if not isinstance(elem, pf.Div) and not isinstance(elem, pf.Span):
return None
color_name = None
tag_name = None
for cls in elem.classes:
color_name = cls + 'color'
if cls == 'add':
tag_name = 'ins'
elif cls == 'rm':
tag_name = 'del'
if tag_name is None:
return None
open_tag = pf.RawInline('<{}>'.format(tag_name), 'html')
open_color = pf.RawInline('{{\\color{{{}}}'.format(color_name), 'tex')
close_color = pf.RawInline('}', 'tex')
close_tag = pf.RawInline('</{}>'.format(tag_name), 'html')
color = doc.get_metadata(color_name)
attributes = {} if color is None else {'style': 'color: #{}'.format(color)}
if isinstance(elem, pf.Div):
return pf.Div(pf.Plain(open_tag),
pf.Plain(open_color),
elem,
pf.Plain(close_color),
pf.Plain(close_tag),
attributes=attributes)
elif isinstance(elem, pf.Span):
return pf.Span(open_tag,
open_color,
elem,
close_color,
close_tag,
attributes=attributes)
if __name__ == '__main__':
pf.run_filter(action)
| 25.236111 | 79 | 0.555861 | 232 | 1,817 | 4.206897 | 0.362069 | 0.043033 | 0.065574 | 0.038934 | 0.116803 | 0.063525 | 0.063525 | 0 | 0 | 0 | 0 | 0.0008 | 0.312053 | 1,817 | 71 | 80 | 25.591549 | 0.78 | 0.250963 | 0 | 0.166667 | 0 | 0 | 0.057692 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.027778 | false | 0 | 0.027778 | 0 | 0.166667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
036a6e6b57ea2d5221b7c56f2e175c6cb9c0ca3b | 1,304 | py | Python | paxful/exceptions.py | tholness/Paxful-API-Wrapper | c66620aa2ef40b97f2794998c63a6bd7504cea3c | [
"MIT"
] | null | null | null | paxful/exceptions.py | tholness/Paxful-API-Wrapper | c66620aa2ef40b97f2794998c63a6bd7504cea3c | [
"MIT"
] | null | null | null | paxful/exceptions.py | tholness/Paxful-API-Wrapper | c66620aa2ef40b97f2794998c63a6bd7504cea3c | [
"MIT"
] | 3 | 2020-08-09T17:02:06.000Z | 2021-04-13T17:45:39.000Z | from __future__ import absolute_import, unicode_literals
class PaxfulError(Exception):
"""Base (catch-all) client exception."""
class RequestError(PaxfulError):
"""Raised when an API request to fails.
:ivar message: Error message.
:vartype message: str | unicode
:ivar url: API endpoint.
:vartype url: str | unicode
:ivar body: Raw response body from Pax.
:vartype body: str | unicode
:ivar headers: Response headers.
:vartype headers: requests.structures.CaseInsensitiveDict
:ivar http_code: HTTP status code.
:vartype http_code: int
:ivar error_code: Error code from Pax.
:vartype error_code: int
:ivar response: Response object.
:vartype response: requests.Response
"""
def __init__(self, response, message, error_code=None):
self.message = message
self.url = response.url
self.body = response.text
self.headers = response.headers
self.http_code = response.status_code
self.error_code = error_code
self.response = response
Exception.__init__(self, message)
class InvalidCurrencyError(PaxfulError):
"""Raised when an invalid major currency is given."""
class InvalidOrderBookError(PaxfulError):
"""Raised when an invalid order book is given."""
| 28.977778 | 61 | 0.692485 | 153 | 1,304 | 5.751634 | 0.359477 | 0.061364 | 0.071591 | 0.078409 | 0.068182 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.220092 | 1,304 | 44 | 62 | 29.636364 | 0.86529 | 0.486963 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.071429 | false | 0 | 0.071429 | 0 | 0.428571 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
036bed92a5a2372689c9a48c62d3c2e337ec2c9b | 1,459 | py | Python | cogs/logs.py | CoffeeOrg/Coffee | 73bf194c3811bb9cf776a0a4db4c6234e471d5ce | [
"MIT"
] | 6 | 2021-02-06T05:43:40.000Z | 2021-08-01T22:55:33.000Z | cogs/logs.py | elfw/Coffee | e83868a323084b96b0df3f916090dd17ce34de93 | [
"MIT"
] | 2 | 2021-02-06T07:18:10.000Z | 2021-02-06T18:42:07.000Z | cogs/logs.py | elfw/Coffee | e83868a323084b96b0df3f916090dd17ce34de93 | [
"MIT"
] | 10 | 2021-02-06T03:31:26.000Z | 2021-09-22T04:00:23.000Z | import discord
from discord.ext import commands
from utils.database import sqlite, create_tables
class Events(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.db = sqlite.Database()
def logs(self, guild_id):
data = self.db.fetchrow("SELECT * FROM Logging WHERE guild_id=?", (guild_id,))
if data:
return data["logs_id"]
else:
return None
@commands.Cog.listener()
async def on_message_delete(self, message):
log_channel = self.bot.get_channel(self.logs(message.guild.id))
if log_channel:
embed = discord.Embed(
title="Message Deleted 📝",
description=f"**Deleted in:** `#{message.channel}`\n**Author:** `{message.author}`\n**Message:** ```{message.content}```",
color=0x2F3136
)
embed.timestamp = message.created_at
await log_channel.send(embed=embed)
@commands.Cog.listener()
async def on_message_edit(self, before, after):
log_channel = self.bot.get_channel(self.logs(before.guild.id))
if before.author.bot is True:
return None
if log_channel:
embed = discord.Embed(
title="Message Edited 📝",
description=f"**Edited in:** `#{before.channel}`\n**Author:** `{before.author}`\n**Before:** ```{before.content}```\n**Now:** ```{after.content}```",
color=0x2F3136
)
embed.timestamp = before.created_at
await log_channel.send(embed=embed)
def setup(bot):
bot.add_cog(Events(bot))
| 29.77551 | 154 | 0.651131 | 192 | 1,459 | 4.833333 | 0.333333 | 0.064655 | 0.029095 | 0.051724 | 0.396552 | 0.323276 | 0.323276 | 0.24569 | 0 | 0 | 0 | 0.010213 | 0.194654 | 1,459 | 48 | 155 | 30.395833 | 0.777872 | 0 | 0 | 0.307692 | 0 | 0.051282 | 0.217272 | 0.139136 | 0 | 0 | 0.010966 | 0 | 0 | 1 | 0.076923 | false | 0 | 0.076923 | 0 | 0.25641 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
036e3669e539c1ee359752125217465762d9b017 | 4,151 | py | Python | scripts/text/text_particles.py | mou3adb/spread_the_particle | 6cc666fded62f07380ed1e3ed52969c436295906 | [
"MIT"
] | 4 | 2020-08-18T18:33:05.000Z | 2021-05-18T23:55:56.000Z | scripts/text/text_particles.py | mou3adb/spread_the_particle | 6cc666fded62f07380ed1e3ed52969c436295906 | [
"MIT"
] | null | null | null | scripts/text/text_particles.py | mou3adb/spread_the_particle | 6cc666fded62f07380ed1e3ed52969c436295906 | [
"MIT"
] | 2 | 2021-03-03T18:57:06.000Z | 2021-05-18T20:43:44.000Z | """
The outfile structure is the following:
diameter density
birth lifetime
is_captured stuck_to_geometry theta
(blank line)
Re Ur
(blank line)
n_trajectory
x1 y1 up1 vp1 Uf1 Vf1 gradpx1 gradpy1 ap_x1 ap_y1 af_x1 af_y1
x2 y2 up2 vp2 Uf2 Vf2 gradpx2 gradpy2 ap_x2 ap_y2 af_x2 af_y2
...
xNt yNt upNt vpNt UfNt VfNt gradpxNt gradpyNt ap_xN ap_yN af_xN af_yN
"""
import sys
sys.path.append('..')
import numpy as np
from particle import Particle
#==============================================================================
def floatIt(l):
return np.array([float(e) for e in l])
def intIt(l):
return np.array([int(e) for e in l])
def write_particle(p, f):
f.write('%2.3f %1.3f\n' % (p.diameter, p.density))
f.write('%d %d\n' % (p.birth, p.lifetime))
f.write('%s %s %s\n' % (p.captured, p.stuck_to_geometry, p.theta))
f.write('\n') # blank line
f.write('%d %.1f\n' % (p.Re, p.Ur))
f.write('\n')
Nt = len(p.trajectory)
f.write('%d\n' % Nt)
for n in range(Nt):
f.write('%e '*12 % \
(p.trajectory[n,0],
p.trajectory[n,1],
p.velocities[n,0],
p.velocities[n,1],
p.fluid_velocities[n,0],
p.fluid_velocities[n,1],
p.pressure_gradients[n,0],
p.pressure_gradients[n,1],
p.accelerations[n,0],
p.accelerations[n,1],
p.fluid_accelerations[n,0],
p.fluid_accelerations[n,1]))
f.write('\n')
def write_particles(particles, outfile):
f = open(outfile, 'w')
Np = len(particles)
f.write('%d\n' % Np)
f.write('\n') # blank line
for p in particles:
write_particle(p, f)
f.write('\n')
f.close()
def read_particle(f, old_version=False):
# I kept old_version because I had many particles saved before the final
# update of this function.
diameter, density = floatIt(f.readline().strip().split())
birth, lifetime = intIt(f.readline().strip().split())
if not(old_version):
str_captured, str_stuck, str_theta = f.readline().strip().split()
theta = float(str_theta)
else:
str_captured, str_stuck = f.readline().strip().split()
captured = False if str_captured == 'False' else True
stuck = None if str_stuck == 'None' else int(str_stuck)
f.readline() # read the blank line
Re, Ur = floatIt(f.readline().strip().split())
f.readline()
Nt = int(f.readline().strip())
trajectory = []
velocities = []
fluid_velocities = []
pressure_gradients = []
accelerations = []
fluid_accelerations = []
for n in range(Nt):
if old_version:
x, y, u, v, U, V, gradpx, gradpy \
= floatIt(f.readline().strip().split())
else:
x, y, u, v, U, V, gradpx, gradpy, ap_x, ap_y, af_x, af_y \
= floatIt(f.readline().strip().split())
trajectory.append([x, y])
velocities.append([u, v])
fluid_velocities.append([U, V])
pressure_gradients.append([gradpx, gradpy])
if not(old_version):
accelerations.append([ap_x, ap_y])
fluid_accelerations.append([af_x, af_y])
pos0 = trajectory[0]
u0 = velocities[0]
p = Particle(diameter, density, birth, lifetime, pos0, u0)
p.captured, p.stuck_to_geometry = captured, stuck
p.Re, p.Ur = Re, Ur
p.trajectory = np.array(trajectory)
p.velocities = np.array(velocities)
p.fluid_velocities = np.array(fluid_velocities)
p.pressure_gradients = np.array(pressure_gradients)
if not(old_version):
p.accelerations = np.array(accelerations)
p.fluid_accelerations = np.array(fluid_accelerations)
p.theta = theta
return p
def read_particles(infile, old_version=False):
f = open(infile, 'r')
Np = int(f.readline())
f.readline() # read a blank line
particles = []
for i in range(Np):
particles.append(read_particle(f, old_version))
f.readline()
f.close()
return np.array(particles)
| 25.466258 | 79 | 0.578897 | 580 | 4,151 | 4.024138 | 0.218966 | 0.050129 | 0.047986 | 0.056984 | 0.153385 | 0.064267 | 0.015424 | 0.015424 | 0 | 0 | 0 | 0.016081 | 0.26596 | 4,151 | 162 | 80 | 25.623457 | 0.749918 | 0.141171 | 0 | 0.20202 | 0 | 0 | 0.020563 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.060606 | false | 0 | 0.030303 | 0.020202 | 0.131313 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
03740eb7b2e0fb107f339fc022308a8b7f886123 | 3,195 | py | Python | csrv/model/deck.py | mrroach/CentralServer | e377c65d8f3adf5a2d3273acd4f459be697aea56 | [
"Apache-2.0"
] | null | null | null | csrv/model/deck.py | mrroach/CentralServer | e377c65d8f3adf5a2d3273acd4f459be697aea56 | [
"Apache-2.0"
] | null | null | null | csrv/model/deck.py | mrroach/CentralServer | e377c65d8f3adf5a2d3273acd4f459be697aea56 | [
"Apache-2.0"
] | 1 | 2020-09-20T11:26:20.000Z | 2020-09-20T11:26:20.000Z | """A collection of cards."""
import random
from csrv.model import cards
from csrv.model.cards import card_info
# This import is just to pull in all the card definitions
import csrv.model.cards.corp
import csrv.model.cards.runner
class Deck(object):
def __init__(self, identity_name, card_names):
self.identity = cards.Registry.get(identity_name)
self.cards = []
self.is_valid = True
for name in card_names:
c = cards.Registry.get(name)
if c:
self.cards.append(c)
def _verify_less_than_three_copies(self):
"""Make sure we have no more than 3 copies of a single cards"""
card_list = {}
for c in self.cards:
card_list[c.NAME] = card_list.setdefault(c.NAME, 0) + 1
invalid_cards = filter(lambda x: card_list[x] > 3, card_list)
if len(invalid_cards):
return "Deck contains more than 3 copies of the following cards: {}".format(', '.join(invalid_cards))
def _verify_min_deck_size(self):
"""Make sure deck meets minimum deck size limit"""
if len(self.cards) < self.identity.MIN_DECK_SIZE:
self.is_valid = False
return "Deck does not meet minimum deck size requirement"
def _verify_influence_points(self):
"""Make sure deck doesnt exceed maximum influence points"""
influence_spent = reduce(lambda x,y: x+y.influence_cost(self.identity.FACTION), self.cards, 0)
if influence_spent > self.identity.MAX_INFLUENCE:
return "Deck contains {} influence but only {} allowed".format(influence_spent, self.identity.MAX_INFLUENCE)
def _verify_side_only(self, side):
"""Make sure we only have cards belonging to the correct side"""
if len(filter(lambda c: c.SIDE != side, self.cards)):
return "Deck contains cards from the other side (corp/runner)"
class CorpDeck(Deck):
"""A deck for a corp."""
def validate(self):
"""Return a list of errors with the deck."""
return filter(None, [
self._verify_min_deck_size(),
self._verify_influence_points(),
self._verify_less_than_three_copies(),
self._verify_in_faction_agendas(),
self._verify_agenda_points(),
self._verify_side_only(card_info.CORP)
])
def _verify_agenda_points(self):
"""Make sure deck has required agenda points based on deck size"""
agenda_points = reduce(lambda x,y: x+y.AGENDA_POINTS, self.cards, 0)
deck_size = len(self.cards)
if agenda_points/float(deck_size) < 2.0/5.0:
self.is_valid = False
return "Only {} Agenda Points in deck of {} cards".format(agenda_points, deck_size)
def _verify_in_faction_agendas(self):
"""Make sure deck only contains in faction agendas"""
agendas = filter(lambda c: c.TYPE == card_info.AGENDA, self.cards)
if len(filter(lambda a: not a.FACTION in [card_info.NEUTRAL, self.identity.FACTION], agendas)):
return "Deck contains out-of-faction Agendas"
class RunnerDeck(Deck):
"""A deck for a runner."""
def validate(self):
"""Return a list of errors with the deck."""
return filter(None, [
self._verify_min_deck_size(),
self._verify_influence_points(),
self._verify_less_than_three_copies(),
self._verify_side_only(card_info.RUNNER)
])
| 35.10989 | 114 | 0.699531 | 475 | 3,195 | 4.498947 | 0.235789 | 0.037436 | 0.028077 | 0.028077 | 0.321011 | 0.220402 | 0.13664 | 0.13664 | 0.13664 | 0.13664 | 0 | 0.004252 | 0.190297 | 3,195 | 90 | 115 | 35.5 | 0.821801 | 0.16338 | 0 | 0.233333 | 0 | 0 | 0.108945 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.15 | false | 0 | 0.083333 | 0 | 0.416667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
03766bbc82ca9d0b806101dbc0e7af7f9c47c209 | 476 | py | Python | data_structures_and_algorithms/04_menu.py | dileepabandara/return-python | fc269d577eade231bc9e3813654ce9c5848837ca | [
"MIT"
] | 1 | 2022-01-12T17:44:51.000Z | 2022-01-12T17:44:51.000Z | data_structures_and_algorithms/04_menu.py | dileepabandara/return-python | fc269d577eade231bc9e3813654ce9c5848837ca | [
"MIT"
] | null | null | null | data_structures_and_algorithms/04_menu.py | dileepabandara/return-python | fc269d577eade231bc9e3813654ce9c5848837ca | [
"MIT"
] | null | null | null | ans = True
while ans:
print("""
1.Add a Student
2.Delete a Student
3.Look Up Student Record
4.Exit/Quit
""")
ans = input("What would you like to do? ")
if ans == "1":
print("\nStudent Added")
elif ans == "2":
print("\n Student Deleted")
elif ans == "3":
print("\n Student Record Found")
elif ans == "4":
print("\n Goodbye")
ans = None
else:
print("\n Not Valid Choice Try again")
| 22.666667 | 46 | 0.521008 | 66 | 476 | 3.757576 | 0.590909 | 0.096774 | 0.104839 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.025316 | 0.336134 | 476 | 20 | 47 | 23.8 | 0.759494 | 0 | 0 | 0 | 0 | 0 | 0.460084 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.3 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0376ebb01bd1aa62d9b4075181468b5d09068e7f | 525 | py | Python | libok.py | txt/se4dm | c38c742039eaa7a15730eb655c4eed067c8a5409 | [
"Unlicense"
] | null | null | null | libok.py | txt/se4dm | c38c742039eaa7a15730eb655c4eed067c8a5409 | [
"Unlicense"
] | 9 | 2015-10-30T12:46:53.000Z | 2015-11-25T03:27:49.000Z | libok.py | txt/se4dm | c38c742039eaa7a15730eb655c4eed067c8a5409 | [
"Unlicense"
] | 2 | 2018-06-22T15:23:44.000Z | 2020-11-05T01:47:54.000Z | from __future__ import print_function, division
import sys
sys.dont_write_bytecode = True
from lib import *
@ok
def _rseed():
rseed(1)
one = list('abcdefghijklm')
assert shuffle(one) == ['m', 'h', 'j', 'f', 'a',
'g', 'l', 'd', 'e', 'c', 'i', 'k', 'b']
@ok
def _defDict():
d = DefaultDict(lambda: [])
for n,c in enumerate(list('tobeornottobe')):
d[c].append(n)
assert d == {'b': [2, 11], 'e': [3, 12],
'o': [1, 4, 7, 10], 'n': [6],
'r': [5], 't': [0, 8, 9]}
| 22.826087 | 50 | 0.491429 | 78 | 525 | 3.192308 | 0.74359 | 0.040161 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.044041 | 0.264762 | 525 | 22 | 51 | 23.863636 | 0.601036 | 0 | 0 | 0.111111 | 0 | 0 | 0.085714 | 0 | 0 | 0 | 0 | 0 | 0.111111 | 1 | 0.111111 | false | 0 | 0.166667 | 0 | 0.277778 | 0.055556 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
037803daf8f26a1fd6b807cb352e059357d3aa0d | 734 | py | Python | setup.py | ignalex/HAP-python | 855577cfcde1bf2f8562caf9fbefda3e4fa8b497 | [
"Apache-2.0"
] | 1 | 2018-09-23T20:44:46.000Z | 2018-09-23T20:44:46.000Z | setup.py | ignalex/HAP-python | 855577cfcde1bf2f8562caf9fbefda3e4fa8b497 | [
"Apache-2.0"
] | 1 | 2019-10-02T11:12:13.000Z | 2019-10-02T11:12:13.000Z | setup.py | ilyamordasov/HAP-python | 698eb612c35b5672c4aab9d7896093924cbd358c | [
"Apache-2.0"
] | null | null | null | from setuptools import setup
import pyhap.const as pyhap_const
PROJECT_NAME = 'HAP-python'
URL = 'https://github.com/ikalchev/{}'.format(PROJECT_NAME)
PROJECT_URLS = {
'Bug Reports': '{}/issues'.format(URL),
'Documentation': 'http://hap-python.readthedocs.io/en/latest/',
'Source': '{}/tree/master'.format(URL),
}
PYPI_URL = 'https://pypi.python.org/pypi/{}'.format(PROJECT_NAME)
DOWNLOAD_URL = '{}/archive/{}.zip'.format(URL, pyhap_const.__version__)
MIN_PY_VERSION = '.'.join(map(str, pyhap_const.REQUIRED_PYTHON_VER))
setup(
name=PROJECT_NAME,
version=pyhap_const.__version__,
url=URL,
project_urls=PROJECT_URLS,
download_url=DOWNLOAD_URL,
python_requires='>={}'.format(MIN_PY_VERSION),
)
| 27.185185 | 71 | 0.709809 | 96 | 734 | 5.114583 | 0.458333 | 0.101833 | 0.069246 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.117166 | 734 | 26 | 72 | 28.230769 | 0.757716 | 0 | 0 | 0 | 0 | 0 | 0.257493 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.1 | 0 | 0.1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
037a4b8c8dc9b844a65be270c4263033b7498224 | 1,291 | py | Python | experiments/2014_PLOS-Comp-Bio_Wikidemics-feasibility/scrape_mmwr.py | casmlab/quac | f7b037b15f5ff0db1b9669159f645040abce1766 | [
"ECL-2.0",
"Apache-2.0"
] | 34 | 2015-01-10T05:44:02.000Z | 2021-05-18T02:57:19.000Z | experiments/2014_PLOS-Comp-Bio_Wikidemics-feasibility/scrape_mmwr.py | casmlab/quac | f7b037b15f5ff0db1b9669159f645040abce1766 | [
"ECL-2.0",
"Apache-2.0"
] | 14 | 2015-02-15T21:58:09.000Z | 2020-06-05T18:31:47.000Z | experiments/2014_PLOS-Comp-Bio_Wikidemics-feasibility/scrape_mmwr.py | casmlab/quac | f7b037b15f5ff0db1b9669159f645040abce1766 | [
"ECL-2.0",
"Apache-2.0"
] | 19 | 2015-02-08T02:24:15.000Z | 2020-11-07T13:39:55.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Scrape the MMWR morbidity tables at http://wonder.cdc.gov/mmwr/mmwrmorb.asp. No processing is done;
we simply save the files for potential offline processing.
"""
# Copyright (c) Los Alamos National Security, LLC and others.
from __future__ import print_function, division
import requests
import codecs
import os
mmwr_table_url = 'http://wonder.cdc.gov/mmwr/mmwr_reps.asp?mmwr_year=%d&mmwr_week=%02d&mmwr_table=%s&request=Submit'
mmwr_file = '../data/mmwr/%d-%02d-%s.html'
tables = {'1', '2A', '2B', '2C', '2D', '2E', '2F', '2G', '2H', '2I', '2J', '2K', '3A', '3B', '4'}
error_messages = {'Data are not available for the week requested.', 'No records found.', 'does not exist before the week ending'}
for year in range(1996, 2015):
for week in range(1, 54):
for table in tables:
if not os.path.exists(mmwr_file % (year, week, table)):
response = requests.get(mmwr_table_url % (year, week, table))
error = False
for error_message in error_messages:
if error_message in response.text:
error = True
break
if not error:
with codecs.open(mmwr_file % (year, week, table), 'w', 'utf-8') as output:
output.write(response.text)
print('saved %s' % (mmwr_file % (year, week, table)))
| 32.275 | 130 | 0.670023 | 200 | 1,291 | 4.22 | 0.55 | 0.037915 | 0.061611 | 0.056872 | 0.122038 | 0 | 0 | 0 | 0 | 0 | 0 | 0.030047 | 0.175058 | 1,291 | 39 | 131 | 33.102564 | 0.762441 | 0.202169 | 0 | 0 | 0 | 0.045455 | 0.263261 | 0.027505 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.181818 | 0 | 0.181818 | 0.090909 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
037b522742c7eb6098ac17119575246e7a1d22e3 | 6,970 | py | Python | cluster_status.py | jtimberlake/hyper-kube-config | d624f81e04d1560b584bb7b748451dd5181e15bf | [
"MIT"
] | 29 | 2018-10-11T17:34:33.000Z | 2019-10-09T04:24:22.000Z | cluster_status.py | silvermullet/kube-auth-store | a9c6966fe7b29e0bf80f9e40027310fd4a07dbc3 | [
"MIT"
] | 14 | 2018-12-18T18:14:19.000Z | 2019-10-19T18:38:12.000Z | cluster_status.py | silvermullet/kube-auth-store | a9c6966fe7b29e0bf80f9e40027310fd4a07dbc3 | [
"MIT"
] | 6 | 2018-11-06T09:32:40.000Z | 2019-10-17T18:18:08.000Z | import json
import logging
import os
import traceback
from boto3.dynamodb.conditions import Attr, Key
import storage
from util import lambda_result
logger = logging.getLogger('cluster_status')
if os.environ.get('DEBUG'):
logger.setLevel(logging.DEBUG)
def set_cluster_status(event, context):
"""Set the status of a cluster, ie active, inactive,
maintainance_mode, etc"""
CLUSTER_TABLE = storage.get_cluster_table()
query_string_params = event.get('queryStringParameters', {})
cluster_status = query_string_params.get('cluster_status')
if cluster_status is None:
return lambda_result(
{"message": f'Must provide a status variable in uri query string'},
status_code=500)
cluster_name = query_string_params.get('cluster_name')
if cluster_name is None:
return lambda_result(
{"message": (f'Must provide a cluster_name '
f'variable in uri query string')},
status_code=500)
try:
CLUSTER_TABLE.update_item(
Key={
'id': cluster_name,
},
UpdateExpression="SET cluster_status = :r",
ExpressionAttributeValues={
':r': cluster_status
},
ReturnValues="UPDATED_NEW"
)
return lambda_result(
{"message": (f'Updated cluster status for {cluster_name} '
f'to {cluster_status}')})
except Exception:
failed_txt = f'Failed to update cluster status for {cluster_name}'
logger.exception(failed_txt)
return lambda_result({"message": failed_txt}, status_code=500)
def set_cluster_environment(event, context):
"""Set the environment of a cluster, ie dev, stage, prod"""
CLUSTER_TABLE = storage.get_cluster_table()
query_string_params = event.get('queryStringParameters', {})
environment = query_string_params.get('environment')
if environment is None:
return lambda_result(
{"message":
f'Must provide an environment param in uri query string'},
status_code=500)
cluster_name = query_string_params.get('cluster_name')
if cluster_name is None:
return lambda_result(
{"message": (f'Must provide a cluster_name '
f'variable in uri query string')},
status_code=500)
try:
CLUSTER_TABLE.update_item(
Key={
'id': cluster_name,
},
UpdateExpression="ADD environment :e",
ExpressionAttributeValues={
':e': set([environment])
},
ReturnValues="UPDATED_NEW"
)
msg = (f'Updated cluster environment for {cluster_name} '
f'to {environment}')
return lambda_result(msg)
except Exception as e:
failed_txt = f'Failed to update cluster environment for {cluster_name}'
failed_txt += "\n{} \n{}".format(
str(e), repr(traceback.format_stack()))
print(failed_txt)
return lambda_result({"message": failed_txt}, status_code=500)
def clusters_per_environment(event, context):
"""Query cluster status attribute for given environment,
requires 'environment' query param, or defaults to all clusters"""
clusters = []
environment = event.get('queryStringParameters', {}).get('environment')
items = _query_dynamodb(environment)
for cluster in items:
clusters.append(cluster['id'])
return lambda_result(clusters)
def cluster_status(event, context):
"""Query cluster status attribute for given environment,
requires 'environment' query param, or defaults to all clusters"""
clusters = []
query_string_params = event.get('queryStringParameters', {})
environment = query_string_params.get('environment')
cluster_status = query_string_params.get('cluster_status')
items = _query_dynamodb(environment, cluster_status)
for cluster in items:
clusters.append(cluster['id'])
return lambda_result(clusters)
def set_cluster_metadata(event, context):
"""Set the metadata of a cluster.
metadata is a json blob use for describing extra details about a cluster.
"""
CLUSTER_TABLE = storage.get_cluster_table()
query_string_params = event.get('queryStringParameters', {})
metadata = event.get('body', {})
cluster_name = query_string_params.get('cluster_name')
if cluster_name is None:
return lambda_result(
{"message": (f'Must provide a cluster_name '
f'variable in uri query string')},
status_code=500)
try:
if isinstance(metadata, str):
metadata = json.loads(metadata)
CLUSTER_TABLE.update_item(
Key={
'id': cluster_name,
},
UpdateExpression="set metadata = :md",
ExpressionAttributeValues={
':md': metadata
},
ReturnValues="UPDATED_NEW"
)
return lambda_result(
{"message": f'Updated cluster metadata for {cluster_name}'}
)
except Exception:
failed_txt = f'Failed to update cluster metadata for {cluster_name}'
logger.exception(failed_txt)
logger.error(json.dumps(event))
return lambda_result({"message": failed_txt}, status_code=500)
def get_cluster_metadata(event, context):
"""Get the metadata of a cluster.
metadata is a json blob use for describing extra details about a cluster.
"""
CLUSTER_TABLE = storage.get_cluster_table()
query_string_params = event.get('queryStringParameters', {})
cluster_name = query_string_params.get('cluster_name')
if cluster_name is None:
return {
"statusCode": 500,
"body": json.dumps(
{"message": (f'Must provide a cluster_name '
f'variable in uri query string')})
}
status_code = 404
db_response = CLUSTER_TABLE.get_item(
Key={
'id': cluster_name,
}
)
metadata = {}
if 'Item' in db_response:
status_code = 200
metadata = db_response['Item'].get('metadata', {})
if isinstance(metadata, str):
metadata = json.loads(metadata)
metadata['environment'] = db_response['Item'].get('environment')
metadata['status'] = db_response['Item'].get('status')
metadata['id'] = cluster_name
return lambda_result(metadata, status_code=status_code)
def _query_dynamodb(environment, status=None, metadata=False):
CLUSTER_TABLE = storage.get_cluster_table()
fkey = Attr('environment').contains(environment)
if status is not None:
fkey = fkey & Key('cluster_status').eq(status)
response = CLUSTER_TABLE.scan(
ProjectionExpression="id",
FilterExpression=fkey
)
return response.get('Items', [])
| 32.877358 | 79 | 0.628264 | 771 | 6,970 | 5.48249 | 0.159533 | 0.070263 | 0.059617 | 0.059144 | 0.647268 | 0.614384 | 0.60634 | 0.583156 | 0.538443 | 0.506506 | 0 | 0.006692 | 0.271019 | 6,970 | 211 | 80 | 33.033175 | 0.825231 | 0.082066 | 0 | 0.496894 | 0 | 0 | 0.191258 | 0.019883 | 0 | 0 | 0 | 0 | 0 | 1 | 0.043478 | false | 0 | 0.043478 | 0 | 0.186335 | 0.006211 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
037cb54aac999a27c21c13f841feb80028eba68f | 1,366 | py | Python | ote_sdk/ote_sdk/utils/labels_utils.py | ntyukaev/training_extensions | c897d42e50828fea853ceda0795e1f0e7d6e9909 | [
"Apache-2.0"
] | null | null | null | ote_sdk/ote_sdk/utils/labels_utils.py | ntyukaev/training_extensions | c897d42e50828fea853ceda0795e1f0e7d6e9909 | [
"Apache-2.0"
] | null | null | null | ote_sdk/ote_sdk/utils/labels_utils.py | ntyukaev/training_extensions | c897d42e50828fea853ceda0795e1f0e7d6e9909 | [
"Apache-2.0"
] | 1 | 2020-12-13T22:13:51.000Z | 2020-12-13T22:13:51.000Z | """
This module implements utilities for labels
"""
# Copyright (C) 2021-2022 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
from typing import List, Optional
from ote_sdk.entities.label import LabelEntity
from ote_sdk.entities.label_schema import LabelSchemaEntity
from ote_sdk.entities.scored_label import ScoredLabel
def get_empty_label(label_schema: LabelSchemaEntity) -> Optional[LabelEntity]:
"""
Get first empty label from label_schema
"""
empty_candidates = list(
set(label_schema.get_labels(include_empty=True))
- set(label_schema.get_labels(include_empty=False))
)
if empty_candidates:
return empty_candidates[0]
return None
def get_leaf_labels(label_schema: LabelSchemaEntity) -> List[LabelEntity]:
"""
Get leafs from label tree
"""
leaf_labels = []
all_labels = label_schema.get_labels(False)
for lbl in all_labels:
if not label_schema.get_children(lbl):
leaf_labels.append(lbl)
return leaf_labels
def get_ancestors_by_prediction(
label_schema: LabelSchemaEntity, prediction: ScoredLabel
) -> List[ScoredLabel]:
"""
Get all the ancestors for a given label node
"""
ancestor_labels = label_schema.get_ancestors(prediction.get_label())
return [ScoredLabel(al, prediction.probability) for al in ancestor_labels]
| 27.32 | 78 | 0.7306 | 172 | 1,366 | 5.575581 | 0.366279 | 0.114703 | 0.072993 | 0.056309 | 0.120959 | 0.072993 | 0.072993 | 0 | 0 | 0 | 0 | 0.009883 | 0.185212 | 1,366 | 49 | 79 | 27.877551 | 0.851752 | 0.170571 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.125 | false | 0 | 0.166667 | 0 | 0.458333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cee3692c9f60cfa65662bb7421bd3d405f7b7920 | 4,329 | py | Python | meggie/actions/spectrum_plot/controller/spectrum.py | Teekuningas/meggie | 0790559febb990a5487d4f0c92987066632e1d99 | [
"BSD-2-Clause-FreeBSD"
] | 4 | 2020-04-29T08:57:11.000Z | 2021-01-15T21:21:51.000Z | meggie/actions/spectrum_plot/controller/spectrum.py | Teekuningas/meggie | 0790559febb990a5487d4f0c92987066632e1d99 | [
"BSD-2-Clause-FreeBSD"
] | 16 | 2019-05-03T10:31:16.000Z | 2021-05-06T14:59:55.000Z | meggie/actions/spectrum_plot/controller/spectrum.py | cibr-jyu/meggie | 0790559febb990a5487d4f0c92987066632e1d99 | [
"BSD-2-Clause-FreeBSD"
] | 3 | 2020-12-12T09:57:00.000Z | 2020-12-20T17:12:05.000Z | """ Contains functions for plot spectrum action
"""
import mne
import numpy as np
import matplotlib.pyplot as plt
from meggie.utilities.plotting import color_cycle
from meggie.utilities.plotting import create_channel_average_plot
from meggie.utilities.channels import average_to_channel_groups
from meggie.utilities.channels import iterate_topography
from meggie.utilities.units import get_power_unit
def plot_spectrum_averages(subject, channel_groups, name, log_transformed=True):
""" Plots spectrum averages.
"""
subject_name = subject.name
spectrum = subject.spectrum.get(name)
data = spectrum.content
freqs = spectrum.freqs
ch_names = spectrum.ch_names
info = spectrum.info
colors = color_cycle(len(data))
conditions = spectrum.content.keys()
averages = {}
for key, psd in sorted(data.items()):
data_labels, averaged_data = average_to_channel_groups(
psd, info, ch_names, channel_groups)
for label_idx, label in enumerate(data_labels):
if not label in averages:
averages[label] = []
averages[label].append((key, averaged_data[label_idx]))
ch_types = sorted(set([label[0] for label in averages.keys()]))
for ch_type in ch_types:
ch_groups = sorted([label[1] for label in averages.keys()
if label[0] == ch_type])
def plot_fun(ax_idx, ax):
ch_group = ch_groups[ax_idx]
ax.set_title(ch_group)
ax.set_xlabel('Frequency (Hz)')
ax.set_ylabel('Power ({})'.format(
get_power_unit(ch_type, log_transformed)))
for color_idx, (key, curve) in enumerate(averages[(ch_type, ch_group)]):
if log_transformed:
curve = 10 * np.log10(curve)
ax.plot(freqs, curve, color=colors[color_idx])
title = ' '.join([name, ch_type])
legend = list(zip(conditions, colors))
create_channel_average_plot(len(ch_groups), plot_fun, title, legend)
plt.show()
def plot_spectrum_topo(subject, name, log_transformed=True, ch_type='meg'):
""" Plots spectrum topography.
"""
subject_name = subject.name
spectrum = subject.spectrum.get(name)
data = spectrum.content
freqs = spectrum.freqs
ch_names = spectrum.ch_names
info = spectrum.info
if ch_type == 'meg':
picked_channels = [ch_name for ch_idx, ch_name in enumerate(info['ch_names'])
if ch_idx in mne.pick_types(info, meg=True, eeg=False)]
else:
picked_channels = [ch_name for ch_idx, ch_name in enumerate(info['ch_names'])
if ch_idx in mne.pick_types(info, eeg=True, meg=False)]
info = info.copy().pick_channels(picked_channels)
colors = color_cycle(len(data))
def individual_plot(ax, info_idx, names_idx):
"""
"""
ch_name = ch_names[names_idx]
for color_idx, (key, psd) in enumerate(sorted(data.items())):
if log_transformed:
curve = 10 * np.log10(psd[names_idx])
else:
curve = psd[names_idx]
ax.plot(freqs, curve, color=colors[color_idx],
label=key)
title = ' '.join([name, ch_name])
ax.figure.canvas.set_window_title(title.replace(' ', '_'))
ax.figure.suptitle(title)
ax.set_title('')
ax.legend()
ax.set_xlabel('Frequency (Hz)')
ax.set_ylabel('Power ({})'.format(get_power_unit(
mne.io.pick.channel_type(info, info_idx),
log_transformed
)))
plt.show()
fig = plt.figure()
for ax, info_idx, names_idx in iterate_topography(
fig, info, ch_names, individual_plot):
handles = []
for color_idx, (key, psd) in enumerate(sorted(data.items())):
if log_transformed:
curve = 10 * np.log10(psd[names_idx])
else:
curve = psd[names_idx]
handles.append(ax.plot(curve, color=colors[color_idx],
linewidth=0.5, label=key)[0])
if not handles:
return
fig.legend(handles=handles)
title = '{0}_{1}'.format(name, ch_type)
fig.canvas.set_window_title(title)
plt.show()
| 30.921429 | 85 | 0.612151 | 550 | 4,329 | 4.6 | 0.190909 | 0.024901 | 0.037549 | 0.016601 | 0.46087 | 0.330435 | 0.330435 | 0.318577 | 0.290909 | 0.290909 | 0 | 0.006392 | 0.2772 | 4,329 | 139 | 86 | 31.143885 | 0.802173 | 0.024255 | 0 | 0.347368 | 0 | 0 | 0.019341 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.042105 | false | 0 | 0.084211 | 0 | 0.136842 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cee59d1b21ebd4c01ff98f1b398ff22e296663a6 | 7,733 | py | Python | data/cyclesps.py | DawyD/UNet-PS-4D | bdd31308854dbd5f309aec9bcc1f7a35f267481d | [
"MIT"
] | 1 | 2021-12-06T17:20:36.000Z | 2021-12-06T17:20:36.000Z | data/cyclesps.py | DawyD/UNet-PS-4D | bdd31308854dbd5f309aec9bcc1f7a35f267481d | [
"MIT"
] | null | null | null | data/cyclesps.py | DawyD/UNet-PS-4D | bdd31308854dbd5f309aec9bcc1f7a35f267481d | [
"MIT"
] | 3 | 2021-12-06T08:09:42.000Z | 2022-03-12T08:09:34.000Z | """
DataGenerator for CyclesPS Dataset
This file use substantial portion of code from the original CNN-PS repository https://github.com/satoshi-ikehata/CNN-PS/
"""
import numpy as np
import cv2
import os
import gc
from data.datagenerator import DataGenerator
from data.utils import rotate_images
from misc.projections import standard_proj
from tensorflow.keras.models import Model
class CyclesDataGenerator(DataGenerator):
def __init__(self, datapath, objlist=None, batch_size=256,
spatial_patch_size=5, obs_map_size=32, shuffle=False, random_illums=False,
keep_axis=True, validation_split=None, nr_rotations=1, rotation_start=0, rotation_end=2 * np.pi,
projection=standard_proj, add_raw=False, images=None, normals=None, masks=None, illum_dirs=None,
order=2, divide_maps=False, round_nearest=True, rot_2D=False, verbose=False):
self.datapath = datapath
self.objlist = objlist if objlist is not None else sorted(os.listdir(datapath + '/PRPS'))
self.verbose = verbose
super(CyclesDataGenerator, self).__init__(
batch_size=batch_size,
spatial_patch_size=spatial_patch_size,
obs_map_size=obs_map_size,
shuffle=shuffle,
random_illums=random_illums,
keep_axis=keep_axis,
validation_split=validation_split,
nr_rotations=nr_rotations,
rotation_start=rotation_start,
rotation_end=rotation_end,
projection=projection,
add_raw=add_raw,
images=images,
normals=normals,
masks=masks,
illum_dirs=illum_dirs,
order=order,
divide_maps=divide_maps,
round_nearest=round_nearest,
rot_2D=rot_2D)
def load_data(self):
objid = 0
for obj in self.objlist:
for dirb, dirn, scale in zip(['PRPS_Diffuse/' + '%s' % obj, 'PRPS/' + '%s' % obj, 'PRPS/' + '%s' % obj],
['images_diffuse', 'images_specular', 'images_metallic'],
[1, 0.5, 0.5]):
if self.verbose:
print("\rPre-loading image ({:}/{:}) {:} ".format(objid + 1, self.nr_objects, dirb), end="")
nr_ch = 3 if self.add_raw else 1
sample_path = os.path.join(self.datapath, dirb, dirn)
imgs, nmls, msks, light_dirs = self.load_sample(sample_path, scale, -1, nr_ch)
self.fill_data(imgs, nmls, msks, light_dirs, objid)
if self.verbose:
print("", end="\x1b[1K\r")
objid += 1
if self.verbose:
print()
def get_max_shape(self, rotations=None):
"""
Returns a shape of an array (height, width, channels) which all images of various sizes under all rotations fit
:param rotations: List of rotation angles (in radians)
:return: max_shape [nr_objects, height, width, channels]
"""
max_shape = [0, 0, 0, 0]
for obj in self.objlist:
for p, scale in zip(['PRPS_Diffuse/' + '%s' % obj,
'PRPS/' + '%s' % obj,
'PRPS/' + '%s' % obj], [1, 0.5, 0.5]):
max_shape[0] += 1
normal_path = os.path.join(self.datapath, p, 'gt_normal.tif')
if not os.path.exists(normal_path):
raise ValueError("Path\"{:}\"does not exists.".format(normal_path))
normals = cv2.imread(normal_path, -1)
normals = cv2.resize(normals, None, fx=scale, fy=scale, interpolation=cv2.INTER_NEAREST)
f = open(os.path.join(self.datapath, p, 'light.txt'))
data = f.read()
f.close()
lines = data.split('\n')
nr_illums = len(lines) - 1 # the last line is empty (how to fix it?)
if nr_illums > max_shape[3]:
max_shape[3] = nr_illums
if rotations is not None:
# In case of rotations, the width and height might be larger
for angle in rotations:
img_shape = rotate_images(2 * np.pi - angle, normals[..., 0], axes=(0, 1), order=0).shape
for k in range(2):
if img_shape[k] > max_shape[k+1]:
max_shape[k+1] = img_shape[k]
else:
for k in range(2):
if normals.shape[k] > max_shape[k+1]:
max_shape[k+1] = normals.shape[k]
gc.collect()
return max_shape
@staticmethod
def load_sample(dirpath, scale, illum_ids=-1, nr_channels=1):
assert illum_ids == -1
normal_path = os.path.join(dirpath, '../gt_normal.tif')
inboundary_path = os.path.join(dirpath, '../inboundary.png')
onboundary_path = os.path.join(dirpath, '../onboundary.png')
if not os.path.exists(normal_path):
raise ValueError("Path\"{:}\"does not exists.".format(normal_path))
# read ground truth surface normal
normals = np.float32(cv2.imread(normal_path, -1)) / 65535.0 # [-1,1]
normals = normals[:, :, ::-1]
normals = 2 * normals - 1
normals = cv2.resize(normals, None, fx=scale, fy=scale, interpolation=cv2.INTER_NEAREST)
normals = normals / np.sqrt(np.sum(normals**2, axis=-1, keepdims=True))
height, width = np.shape(normals)[:2]
# read mask images_metallic
if os.path.exists(inboundary_path) and os.path.exists(onboundary_path):
inboundary = cv2.imread(inboundary_path, -1)
inboundary = cv2.resize(inboundary, None, fx=scale, fy=scale, interpolation=cv2.INTER_NEAREST)
inboundary = inboundary > 0
onboundary = cv2.imread(onboundary_path, -1)
onboundary = cv2.resize(onboundary, None, fx=scale, fy=scale, interpolation=cv2.INTER_NEAREST)
onboundary = onboundary > 0
masks = inboundary | onboundary
else:
masks = normals[..., 2] > 0
masks = masks[..., None]
# read light filenames
f = open(os.path.join(dirpath, '../light.txt'))
data = f.read()
f.close()
lines = data.split('\n')
nr_illums = len(lines) - 1 # the last line is empty (how to fix it?)
light_directions = np.zeros((nr_illums, 3), np.float32)
for i, l in enumerate(lines):
s = l.split(' ')
if len(s) == 3:
light_directions[i, 0] = float(s[0])
light_directions[i, 1] = float(s[1])
light_directions[i, 2] = float(s[2])
# read images
images = np.zeros((height, width, nr_illums, nr_channels), np.float32)
for i in range(nr_illums):
if i % np.floor(nr_illums / 10) == 0:
print('.', end='')
image_path = os.path.join(dirpath, '%05d.tif' % i)
cv2_im = cv2.imread(image_path, -1) / 65535.0
cv2_im = cv2.resize(cv2_im, (height, width), interpolation=cv2.INTER_NEAREST)
if nr_channels == 1:
cv2_im = (cv2_im[:, :, 0:1] + cv2_im[:, :, 1:2] + cv2_im[:, :, 2:3]) / 3
images[:, :, i] = cv2_im
return images, normals, masks, light_directions
@staticmethod
def load_sample_test(dir_path, obj_path, scale, index=-1):
assert index == -1
obj, dirn = obj_path.split("/")
return CyclesDataGenerator.load_sample(dir_path + obj, dirn, scale)
| 40.915344 | 120 | 0.557869 | 957 | 7,733 | 4.351097 | 0.23093 | 0.017291 | 0.019212 | 0.020173 | 0.25 | 0.206532 | 0.174832 | 0.163785 | 0.163785 | 0.141691 | 0 | 0.0262 | 0.323807 | 7,733 | 188 | 121 | 41.132979 | 0.770128 | 0.079917 | 0 | 0.179856 | 0 | 0 | 0.041212 | 0 | 0 | 0 | 0 | 0 | 0.014388 | 1 | 0.035971 | false | 0 | 0.057554 | 0 | 0.122302 | 0.028777 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ceea1cba85bb3d624953e8ecf28fb6d54fd02614 | 4,429 | py | Python | code/vocabulary.py | TimothyBenger/knausj_talon | 10c2440fb3646abda1adc84ca9fd230f752eb353 | [
"MIT"
] | null | null | null | code/vocabulary.py | TimothyBenger/knausj_talon | 10c2440fb3646abda1adc84ca9fd230f752eb353 | [
"MIT"
] | null | null | null | code/vocabulary.py | TimothyBenger/knausj_talon | 10c2440fb3646abda1adc84ca9fd230f752eb353 | [
"MIT"
] | null | null | null | from talon import Context, Module
from .user_settings import get_list_from_csv
mod = Module()
ctx = Context()
mod.list("vocabulary", desc="additional vocabulary words")
# Default words that will need to be capitalized (particularly under w2l).
# NB. These defaults and those later in this file are ONLY used when
# auto-creating the corresponding settings/*.csv files. Those csv files
# determine the contents of user.vocabulary and dictate.word_map. Once they
# exist, the contents of the lists/dictionaries below are irrelevant.
_capitalize_defaults = [
"I",
"I'm",
"I've",
"I'll",
"I'd",
"Monday",
"Mondays",
"Tuesday",
"Tuesdays",
"Wednesday",
"Wednesdays",
"Thursday",
"Thursdays",
"Friday",
"Fridays",
"Saturday",
"Saturdays",
"Sunday",
"Sundays",
"January",
"February",
# March omitted because it's a regular word too
"April",
# May omitted because it's a regular word too
"June",
"July",
"August",
"September",
"October",
"November",
"December",
]
# Default words that need to be remapped.
_word_map_defaults = {
# E.g:
# "cash": "cache",
# This is the opposite ordering to words_to_replace.csv (the latter has the target word first)
}
_word_map_defaults.update({word.lower(): word for word in _capitalize_defaults})
# "dictate.word_map" is used by `actions.dictate.replace_words` to rewrite words
# Talon recognized. Entries in word_map don't change the priority with which
# Talon recognizes some words over others.
ctx.settings["dictate.word_map"] = get_list_from_csv(
"words_to_replace.csv",
headers=("Replacement", "Original"),
default=_word_map_defaults,
)
# Default words that should be added to Talon's vocabulary.
_simple_vocab_default = ["nmap", "admin", "Cisco", "Citrix", "VPN", "DNS", "Minecraft", "Ferran", "Angelos", "storageos"]
# Defaults for different pronounciations of words that need to be added to
# Talon's vocabulary.
_default_vocabulary = {
"N map": "nmap",
"under documented": "under-documented",
"koob control": "kubectl",
"cube control": "kubectl",
"keep control": "kubectl",
"chang pod": "pod",
"chang pods": "pods",
"chang node": "node",
"chang nodes": "nodes",
"chang kubernetes": "kubernetes",
"chang git": "git",
"chang pull": "pull",
"chang com": "com",
"chang delete": "delete",
"trying to lead": "delete",
"replica set": "replicaset",
"change delete": "delete",
"name space": "namespace",
"at it": "edit",
"chang sudo": "sudo",
"diagnostic yew till": "diagnosticutil",
"stateful set": "statefulset",
"in flux": "influx",
"you control": "kubectl",
"check out": "checkout",
"make directory": "mkdir",
"demon set": "daemonset",
"demon sets": "daemonsets",
"chang log": "log",
"chang logs": "log",
"koob control create from file": "kubectl create -f",
"cube control create from file": "kubectl create -f",
"keep control create from file": "kubectl create -f",
"chang seff": "ceph",
"ray doss": "RADOS",
"raydos": "RADOS",
"open sauce": "open-source",
"all namespaces": "--all-namespaces",
"output wide": "-o wide",
"etsy dee": "etcd",
"at city": "etcd",
"at cd": "etcd",
"cube system": "kube-system",
"from file": " - f ",
"with namespace": " - n ",
"chang log": "log",
"chang logs": "logs",
"change directory": "cd",
"storage class": "storageclass",
"my sequel": "mysql",
"dee bench": "dbench",
"chang hay": "hey",
"elastic search": "elasticsearch",
"elastic such": "elasticsearch",
"storage oh ess": "storageos",
"store to us": "storageos",
"store ous": "storageos",
"store joes": "store joes"
}
_default_vocabulary.update({word: word for word in _simple_vocab_default})
# "user.vocabulary" is used to explicitly add words/phrases that Talon doesn't
# recognize. Words in user.vocabulary (or other lists and captures) are
# "command-like" and their recognition is prioritized over ordinary words.
ctx.lists["user.vocabulary"] = get_list_from_csv(
"additional_words.csv",
headers=("Word(s)", "Spoken Form (If Different)"),
default=_default_vocabulary,
)
# for quick verification of the reload
# print(str(ctx.settings["dictate.word_map"]))
# print(str(ctx.lists["user.vocabulary"]))
| 29.925676 | 121 | 0.64326 | 551 | 4,429 | 5.087114 | 0.471869 | 0.019979 | 0.019979 | 0.014984 | 0.121655 | 0.078131 | 0.060293 | 0.022833 | 0 | 0 | 0 | 0.000284 | 0.204561 | 4,429 | 147 | 122 | 30.129252 | 0.795345 | 0.290133 | 0 | 0.018018 | 0 | 0 | 0.482051 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.018018 | 0 | 0.018018 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ceef88b42a5304577b2b39be8918b4680ae52465 | 9,605 | py | Python | upetem_service.py | myroslav/robot_tests.broker.upetem | 323314259faa60618113fbc37b5e1f1d79c2192b | [
"Apache-2.0"
] | null | null | null | upetem_service.py | myroslav/robot_tests.broker.upetem | 323314259faa60618113fbc37b5e1f1d79c2192b | [
"Apache-2.0"
] | null | null | null | upetem_service.py | myroslav/robot_tests.broker.upetem | 323314259faa60618113fbc37b5e1f1d79c2192b | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
from datetime import datetime, timedelta
import dateutil.parser
import pytz
import urllib
TZ = pytz.timezone('Europe/Kiev')
def adapt_data(data):
data['data']['procuringEntity']['name'] = 'testuser_tender_owner'
for x in data['data']['items']:
x['unit']['name'] = get_unit_name(x['unit']['name'])
x['deliveryAddress']['region'] = get_delivery_region(x['deliveryAddress']['region'])
x['deliveryAddress']['locality'] = convert_locality(x['deliveryAddress']['locality'])
x['deliveryDate']['startDate'] = adapt_delivery_date(x['deliveryDate']['startDate'])
x['deliveryDate']['endDate'] = adapt_delivery_date(x['deliveryDate']['endDate'])
data['data']['procuringEntity']['address']['region'] = get_delivery_region(data['data']['procuringEntity']['address']['region'])
data['data']['procuringEntity']['address']['locality'] = convert_locality(data['data']['procuringEntity']['address']['locality'])
data['data']['procuringEntity']['contactPoint']['telephone'] = data['data']['procuringEntity']['contactPoint']['telephone'][:13]
return data
def adapt_step(data, new_step):
data['data']['minimalStep']['amount'] = round(new_step, 2)
data['data']['lots'][0]['minimalStep']['amount'] = round(new_step, 2)
def adapt_unit_name(data):
return {
u"наб.": u"набір",
u"шт.": u"штуки",
u"упак.": u"упаковка"
}.get(data, data)
def adapt_data_view(data):
for x in data['data']['items']:
x['deliveryDate']['startDate'] = adapt_delivery_date(x['deliveryDate']['startDate'])
x['deliveryDate']['endDate'] = adapt_delivery_date(x['deliveryDate']['endDate'])
return data
def download_file(url, file_name, output_dir):
urllib.urlretrieve(url, ('{}/{}'.format(output_dir, file_name)))
def get_type_field(field):
value = ['deliveryDate.startDate', 'deliveryDate.endDate', 'deliveryAddress.postalCode', 'deliveryAddress.region',
'deliveryAddress.streetAddress',
'additionalClassifications.id', 'classification.id', 'unit.name', 'unit.code', 'deliveryLocation.latitude',
'deliveryLocation.longitude', 'quantity', 'deliveryAddress.locality',
'title', 'value.amount', 'value.valueAddedTaxIncluded', 'minimalStep.amount',
'minimalStep.valueAddedTaxIncluded']
text = ['description', 'deliveryAddress.countryName', 'classification.scheme', 'classification.description',
'additionalClassifications.scheme', 'additionalClassifications.description',
'value.currency', 'minimalStep.currency', 'featureOf', 'status', 'resolutionType', 'resolution', 'satisfied', 'complaintID', 'cancellationReason']
if field in value:
type_fields = 'value'
elif field in text:
type_fields = 'text'
return type_fields
def get_delivery_region(region):
if region == u"місто Київ":
delivery_region = u"м.Київ"
elif region == u"Дніпропетровська область":
delivery_region = u"Днiпропетровська область"
elif region == u"Рівненська область":
delivery_region = u"Рiвненська область"
elif region == u"Чернігівська область":
delivery_region = u"Чернiгiвська область"
else: delivery_region = region
return delivery_region
def convert_float_to_string(number):
return format(number, '.2f')
def convert_coordinates_to_string(number):
return format(number)
def adapt_delivery_date(date):
adapt_date = ''.join([date[:date.index('T') + 1], '00:00:00', date[date.index('+'):]])
return adapt_date
def parse_date(date_str):
date_str = datetime.strptime(date_str, "%d.%m.%Y %H:%M")
date = datetime(date_str.year, date_str.month, date_str.day, date_str.hour, date_str.minute, date_str.second,
date_str.microsecond)
date = TZ.localize(date).isoformat()
return date
def parse_item_date(date_str):
date_str = datetime.strptime(date_str, "%d.%m.%Y")
date = datetime(date_str.year, date_str.month, date_str.day)
date = TZ.localize(date).isoformat()
return date
def convert_date_to_string(date):
date = dateutil.parser.parse(date)
date = date.strftime("%d.%m.%Y %H:%M")
return date
def convert_item_date_to_string(date):
date = dateutil.parser.parse(date)
date = date.strftime("%d.%m.%Y")
return date
def parse_complaintPeriod_date(date_string):
date_str = datetime.strptime(date_string, "%d.%m.%Y %H:%M")
date_str -= timedelta(minutes=5)
date = datetime(date_str.year, date_str.month, date_str.day, date_str.hour, date_str.minute, date_str.second,
date_str.microsecond)
date = TZ.localize(date).isoformat()
return date
def parse_complaintPeriod_endDate(date_str):
if '-' in date_str:
date_str = datetime.strptime(date_str, "%Y-%m-%d %H:%M:%S")
else:
date_str = datetime.strptime(date_str, "%d.%m.%Y %H:%M")
date = datetime(date_str.year, date_str.month, date_str.day, date_str.hour, date_str.minute, date_str.second,
date_str.microsecond)
date = TZ.localize(date).isoformat()
return date
def capitalize_first_letter(string):
string = string.capitalize()
return string
def get_unit_name(name):
return {
u'штуки': u'шт.',
u'упаковка': u'упак.',
u'набір': u'наб.',
u'кілограми': u'кг.',
u'лот': u'лот',
u'флакон': u'флак.',
u'Флакон': u'флак.'
}.get(name, name)
def convert_locality(name):
if name == u"Київ":
adapted_name = u"М.КИЇВ"
elif name == u"Дніпропетровськ":
adapted_name = u"ДНІПРОПЕТРОВСЬКА ОБЛАСТЬ/М.ДНІПРО"
else:
adapted_name = name
return adapted_name.upper()
def convert_status(tender_status):
status = {
u'Очікування пропозицій': u'active.tendering',
u'Період аукціону': u'active.auction',
u'Період уточнень': u'active.enquiries',
u'Перед-кваліфікаційний період': u'active.pre-qualification',
u'Період оскарження': u'active.pre-qualification.stand-still'
}
return status[tender_status]
def get_claim_status(claim_status, test_name):
status = {
u'Вимога': 'claim',
u'Розглянуто': 'answered',
u'Вирішена': 'resolved',
u'Відхилено': 'cancelled',
u'Відхилена': 'declined',
u'Обробляється': 'pending',
u'Недійсна': 'invalid',
u'Проігнорована': 'ignored'
}
return status[claim_status]
def get_resolution_type(resolution):
types = {
u'Вирішено': 'resolved',
u'Задоволено': 'resolved',
u'Відхилено': 'declined',
u'Недійсно': 'invalid'
}
return types[resolution]
def convert_satisfied(value):
if value == u'Так':
satisfied = True
else:
satisfied = False
return satisfied
def get_unit(field,unit_data):
unit = unit_data.split()
unit[1] = adapt_unit_name(unit[1])
unit_value = {
'unit.code': unit[0],
'unit.name': unit[1]
}
return unit_value[field]
def convert_type_tender(key):
type_tender = {
u'Відкриті торги': 'aboveThresholdUA',
u'Відкриті торги з публікацією англ.мовою': 'aboveThresholdEU',
u'Переговорна процедура': 'reporting'
}
return type_tender[key]
def convert_data_lot(key):
data_lot = {
u'грн.': 'UAH'
}
return data_lot[key]
def convert_data_feature(key):
data_feature = {
u'Закупівлі': 'tenderer',
u'Лоту': 'lot',
u'Предмету лоту': 'item'
}
return data_feature[key]
def convert_complaintID(tender_uaid, type_complaint):
if 'complaint_number' not in globals():
complaint_number = 1
value = '%s.a%s' % (tender_uaid, complaint_number)
global complaint_number
complaint_number += 1
return value
def get_pos(featureOf):
if featureOf == u'Закупівлі':
position = 1
elif featureOf == u'Лоту':
position = 2
elif featureOf == u'Предмету лоту':
position = 1
return position
def get_value_feature(value):
value = value * 100
value = str(int(value)) + '%'
return value
def get_feature_xpath(field_name, feature_id):
xpath = {
'title': "//*[contains(@value, '" +feature_id+ "')]",
'description': "//*[contains(@value, '" +feature_id+ "')]/ancestor::tbody/tr[2]/td[2]/textarea",
'featureOf': "//*[contains(@value, '" +feature_id+ "')]/ancestor::tbody/tr[3]/td[2]//td[2]/div[1]/label"
}
return xpath[field_name]
def convert_bid_status(value):
status = {
u'Недійсна пропозиція': 'invalid'
}
return status[value]
def get_all_dates(initial_tender_data, key):
tender_period = initial_tender_data.data.tenderPeriod
start_dt = dateutil.parser.parse(tender_period['startDate'])
end_dt = dateutil.parser.parse(tender_period['endDate'])
data = {
'EndPeriod': start_dt.strftime("%d.%m.%Y %H:%M"),
'StartDate': start_dt.strftime("%d.%m.%Y %H:%M"),
'EndDate': end_dt.strftime("%d.%m.%Y %H:%M"),
}
return data.get(key, '')
def increment_identifier(data):
data['data']['procuringEntity']['identifier']['id'] = str(int(data['data']['procuringEntity']['identifier']['id']) + 1)
def convert_cause_type(key):
cause_type = {
'1': 'artContestIP',
'2': 'noCompetition',
'4': 'twiceUnsuccessful',
'5': 'additionalPurchase',
'6': 'additionalConstruction',
'7': 'stateLegalServices',
}
return cause_type[key]
| 30.785256 | 158 | 0.639771 | 1,143 | 9,605 | 5.210849 | 0.223972 | 0.044661 | 0.034755 | 0.004701 | 0.303056 | 0.238079 | 0.204668 | 0.180826 | 0.161854 | 0.161854 | 0 | 0.005233 | 0.204165 | 9,605 | 311 | 159 | 30.884244 | 0.77394 | 0.001249 | 0 | 0.168776 | 0 | 0.004219 | 0.28485 | 0.064644 | 0 | 0 | 0 | 0 | 0 | 1 | 0.147679 | false | 0 | 0.016878 | 0.016878 | 0.299578 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ceefdd4f273021acf57a211bf9db5e4727c86333 | 1,638 | py | Python | sway/tiling-indicator.py | iziGor/scripts | 0076711ab6c423d97c2dad72119fbd57e27fb250 | [
"BSD-2-Clause"
] | null | null | null | sway/tiling-indicator.py | iziGor/scripts | 0076711ab6c423d97c2dad72119fbd57e27fb250 | [
"BSD-2-Clause"
] | null | null | null | sway/tiling-indicator.py | iziGor/scripts | 0076711ab6c423d97c2dad72119fbd57e27fb250 | [
"BSD-2-Clause"
] | null | null | null | #!/usr/bin/env python3
""" Show split layout indicator
Usage:
./tiling-indicator.py
Suppoused to be used inside waybar or polybar.
Config example:
Waybar:
"custom/ws": {
"exec": "python -u $HOME/.config/sway/scripts/tiling-indicator-2.py 2> /dev/null
}
Polybar:
[module/layout]
type = custom/script
exec = PYTHONPATH=${XDG_CONFIG_HOME}/i3 python -u -m scripts.tiling-indicator.py 2> /dev/null
interval = 0
format = "<label>"
tail = true
label-font = 6
github :: https://github.com/iziGor
year :: 2021
"""
import i3ipc
i3 = i3ipc.Connection()
last = ''
# Font Awesome 5 Free:style=Solid
# layouts = { "tabbed": ("61bbf6", "\uf24d")
# , "stacked": ("00AA00", "\uf5fd")
# , "splitv": ("82B8DF", "\uf103")
# , "splith": ("CF4F88", "\uf101")
# }
layouts = { "tabbed": ("61bbf6", "\uf24d")
, "stacked": ("00AA00", "\uf5fd")
, "splitv": ("82B8DF", "\u2b9f")
, "splith": ("CF4F88", "\u2b9e")
}
# Material Icons
# layouts = {"tabbed":"\ue8d8", "stacked":"\ue3c7", "splitv":"\ue947", "splith":"\ue949"}
def on_event(sway, _):
global last
layout = sway.get_tree().find_focused().parent.layout
if not layout == last:
## polybar format output
# print("%{{F#{}}}{}%{{F-}}".format(*layouts.get(layout, ("888800", "?"))))
## waybar format output
print("<span color='#{}'>{}</span>".format(*layouts.get(layout, ("888800", "?"))))
last = layout
# Subscribe to events
i3.on("window::focus", on_event)
i3.on("binding", on_event)
# Start the main loop and wait for events to come in.
i3.main()
| 22.135135 | 93 | 0.582418 | 196 | 1,638 | 4.826531 | 0.581633 | 0.047569 | 0.035941 | 0.021142 | 0.173362 | 0.114165 | 0.114165 | 0.114165 | 0.114165 | 0 | 0 | 0.063467 | 0.211233 | 1,638 | 73 | 94 | 22.438356 | 0.668731 | 0.600733 | 0 | 0 | 0 | 0 | 0.201908 | 0.033386 | 0 | 0 | 0 | 0 | 0 | 1 | 0.058824 | false | 0 | 0.058824 | 0 | 0.117647 | 0.058824 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cef07786e7e7ac506670e6c1114e7cf83e1eb3a0 | 5,822 | py | Python | cogs/tags.py | Chrovo/Productivity | 4bdb7eecfb8ae16b013ce58a1b0421f8f791499e | [
"MIT"
] | null | null | null | cogs/tags.py | Chrovo/Productivity | 4bdb7eecfb8ae16b013ce58a1b0421f8f791499e | [
"MIT"
] | null | null | null | cogs/tags.py | Chrovo/Productivity | 4bdb7eecfb8ae16b013ce58a1b0421f8f791499e | [
"MIT"
] | null | null | null | from typing import Optional
import discord
import asyncpg
from discord.ext import commands
from .utils.pagination import create_paginated_embed
class Tags(commands.Cog):
"""Productivity's tag system."""
def __init__(self, bot:commands.Bot) -> None:
self.bot = bot
self.emoji = "🏷️ "
async def delete_check(self, ctx:commands.Context, tag_name) -> bool:
query = """
SELECT * FROM tags
WHERE tag_name = $1 AND guild_id = $2;
"""
async with self.bot.db.acquire() as connection:
async with connection.transaction():
fetched = await connection.fetchrow(query, tag_name, ctx.guild.id)
return fetched['user_id'] == ctx.author or ctx.author.guild_permissions.manage_messages
@commands.group(invoke_without_command=True)
@commands.cooldown(1, 5, commands.BucketType.user)
async def tag(self, ctx, *, tag:str):
"""A tag system!"""
async with self.bot.db.acquire() as connection:
async with connection.transaction():
try:
query = """
SELECT * FROM tags
WHERE tag_name = $1 AND guild_id = $2;
"""
tag = await connection.fetchrow(query, tag, ctx.guild.id)
return await ctx.send(tag['tag_content'])
except TypeError:
return await ctx.send("Tag not found.")
@tag.command(description="Create a tag!", aliases=['add'])
@commands.cooldown(1, 5, commands.BucketType.user)
async def create(self, ctx, name, *, content):
try:
query = """
INSERT INTO tags (user_id, guild_id, tag_name, tag_content)
VALUES ($1, $2, $3, $4);
"""
await self.bot.db.execute(query, ctx.author.id, ctx.guild.id, name, content)
await ctx.send("Succesfully created the tag!")
except Exception as e:
await ctx.send(e)
await ctx.send("An error has occurred whilst creating the tag")
@tag.command(description="Start your use of creating tags")
@commands.cooldown(1, 5, commands.BucketType.user)
async def start(self, ctx):
try:
query = """
INSERT INTO tag_users (user_id, username)
VALUES ($1, $2);
"""
await self.bot.db.execute(query, ctx.author.id, ctx.author.name)
await ctx.send("Successfully started your use of our tag system!")
except Exception:
await ctx.send("You are already in our database!")
@tag.command(description="Delete a tag!")
@commands.cooldown(1, 5, commands.BucketType.user)
async def delete(self, ctx, *, tag:str):
check = await self.delete_check(ctx, tag)
if check:
try:
query = """
DELETE FROM tags
WHERE tag_name = $1 AND guild_id = $2;
"""
await self.bot.db.execute(query, tag, ctx.guild.id)
await ctx.send("Successfully deleted tag!")
except:
await ctx.send("An error has occurred while attempting to delete the tag.")
else:
await ctx.send("You do not have permission to delete this tag!")
@commands.command(description="Look at all of the tags a member has!")
@commands.cooldown(1, 5, commands.BucketType.user)
async def tags(self, ctx, member:Optional[discord.Member]=None):
member = member or ctx.author
async with self.bot.db.acquire() as connection:
async with connection.transaction():
query = """
SELECT * FROM tags
WHERE user_id = $1 AND guild_id = $2;
"""
tags = await connection.fetch(query, member.id, ctx.guild.id)
paginate = create_paginated_embed(ctx, tags, 'tag_name', f"{member}'s tags", member.avatar_url, member.name)
await paginate.start(ctx)
@tag.command(description="Edit a tag!")
@commands.cooldown(1, 5, commands.BucketType.user)
async def edit(self, ctx, old_tag, new_name, *, new_content):
query = """
UPDATE tags
SET tag_name = $1, tag_content = $2
WHERE user_id = $3 AND tag_name = $4 AND guild_id = $5;
"""
try:
await self.bot.db.execute(query, new_name, new_content, ctx.author.id, old_tag, ctx.guild.id)
return await ctx.send("Successfully edited tag!")
except Exception:
return await ctx.send(
"""
An error occurred while editing the tag,
this is likely because u dont own this tag or it doesnt exist.
"""
)
@tag.command(description="View information about a tag!")
@commands.cooldown(1, 5, commands.BucketType.user)
async def info(self, ctx, *, tag:str):
async with self.bot.db.acquire() as connection:
async with connection.transaction():
query = """
SELECT * FROM tags
WHERE guild_id = $1 AND tag_name = $2;
"""
try:
tag_info = await connection.fetchrow(query, ctx.guild.id, tag)
owner = ctx.guild.get_member(tag_info['user_id'])
embed = discord.Embed(title=tag_info['tag_name'])
embed.add_field(name="Owner", value=owner.mention)
embed.set_author(name=owner, icon_url=owner.avatar_url)
return await ctx.send(embed=embed)
except TypeError:
return await ctx.send("Tag not found.")
def setup(bot:commands.Bot):
bot.add_cog(Tags(bot)) | 39.605442 | 124 | 0.5663 | 708 | 5,822 | 4.577684 | 0.220339 | 0.030238 | 0.051836 | 0.038877 | 0.379821 | 0.347115 | 0.339093 | 0.312249 | 0.293119 | 0.206726 | 0 | 0.008899 | 0.324459 | 5,822 | 147 | 125 | 39.605442 | 0.814645 | 0.004466 | 0 | 0.416667 | 0 | 0 | 0.251069 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.016667 | false | 0 | 0.041667 | 0 | 0.125 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cef53f21d6ccfd1533b28e30e6717c9396761c37 | 4,027 | py | Python | terminalgame/world.py | naslundx/terminalgame | d855ec33ff8a057b1308ad30c54f138343baf56f | [
"MIT"
] | null | null | null | terminalgame/world.py | naslundx/terminalgame | d855ec33ff8a057b1308ad30c54f138343baf56f | [
"MIT"
] | null | null | null | terminalgame/world.py | naslundx/terminalgame | d855ec33ff8a057b1308ad30c54f138343baf56f | [
"MIT"
] | null | null | null | import curses
from contextlib import contextmanager
from time import sleep
from typing import List, Tuple, TYPE_CHECKING
from .actions import Action
from .properties import Property
if TYPE_CHECKING:
from .object import Object
class World:
class __World:
def __init__(self, fps: int, render: bool = True):
self.fps = fps
self._objects: List["Object"] = []
self._draw_queue: List[Tuple[int, int, str]] = []
self.running = True
self._window = None
self._height, self._width = 50, 80 # s.getmaxyx()
self._key = None
if render:
_ = curses.initscr()
curses.curs_set(0)
self._window = curses.newwin(self._height, self._width, 0, 0)
self._window.keypad(True)
self._window.timeout(1000 // self.fps)
def register(self, obj: "Object"):
assert obj not in self._objects
self._objects.append(obj)
self._draw_queue.append((obj.x, obj.y, obj.sign))
def get_properties(self, x: int, y: int) -> List[Property]:
for o in self._objects:
if o.xy == (x, y) and not o.is_destroyed:
return o.properties[:]
return []
def draw(self):
while self._draw_queue:
x, y, s = self._draw_queue.pop()
if self._window:
if x in range(0, self.width) and y in range(0, self.height):
self._window.addch(y, x, s)
else:
print(x, y, s)
def tick(self):
# Handle keypress mapping
key = self.keypress
if key:
for obj in (o for o in self._objects if o.mapping):
if key in obj.mapping:
new_x, new_y = obj.x, obj.y
action = obj.mapping[key]
if action == Action.MOVE_UP:
new_y -= 1
if action == Action.MOVE_DOWN:
new_y += 1
if action == Action.MOVE_LEFT:
new_x -= 1
if action == Action.MOVE_RIGHT:
new_x += 1
if Property.SOLID not in self.get_properties(new_x, new_y):
obj.x, obj.y = new_x, new_y
# Update draw queue
for obj in self._objects:
if obj.is_destroyed:
self._draw_queue.append((obj._oldx, obj._oldy, " "))
elif obj.has_moved:
self._draw_queue.append((obj._oldx, obj._oldy, " "))
self._draw_queue.append((obj.x, obj.y, obj.sign))
obj.tick()
# Render
self.draw()
# Remove destroyed objects
self._objects = [o for o in self._objects if not o.is_destroyed]
# Get keypress
if self._window:
self._key = self._window.getch()
else:
sleep(1.0 / self.fps)
self._key = None
return self.running
def quit(self):
self.running = False
if self._window:
curses.endwin()
self._window = False
@property
def width(self):
return self._width
@property
def height(self):
return self._height
@property
def keypress(self):
return self._key if self._key != -1 else None
instance = None
def __init__(self, *args, **kwargs):
assert not World.instance
World.instance = World.__World(*args, **kwargs)
def __getattr__(self, name):
return getattr(self.instance, name)
@contextmanager
def renderer(self):
try:
yield self
finally:
self.quit()
| 31.217054 | 83 | 0.48746 | 454 | 4,027 | 4.132159 | 0.231278 | 0.053305 | 0.048507 | 0.040512 | 0.15565 | 0.145522 | 0.145522 | 0.088486 | 0.036247 | 0.036247 | 0 | 0.008591 | 0.421902 | 4,027 | 128 | 84 | 31.460938 | 0.797251 | 0.024584 | 0 | 0.141414 | 0 | 0 | 0.003571 | 0 | 0 | 0 | 0 | 0 | 0.020202 | 1 | 0.121212 | false | 0 | 0.070707 | 0.040404 | 0.292929 | 0.010101 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cef7c80ce2c92bb1a2333400473042e3222f6983 | 1,012 | py | Python | data/data-pipeline/data_pipeline/etl/constants.py | vim-usds/justice40-tool | 6691df3e318b531b0e05454a79b8560b7d307b36 | [
"CC0-1.0"
] | null | null | null | data/data-pipeline/data_pipeline/etl/constants.py | vim-usds/justice40-tool | 6691df3e318b531b0e05454a79b8560b7d307b36 | [
"CC0-1.0"
] | null | null | null | data/data-pipeline/data_pipeline/etl/constants.py | vim-usds/justice40-tool | 6691df3e318b531b0e05454a79b8560b7d307b36 | [
"CC0-1.0"
] | null | null | null | DATASET_LIST = [
{
"name": "tree_equity_score",
"module_dir": "tree_equity_score",
"class_name": "TreeEquityScoreETL",
},
{
"name": "census_acs",
"module_dir": "census_acs",
"class_name": "CensusACSETL",
},
{
"name": "ejscreen",
"module_dir": "ejscreen",
"class_name": "EJScreenETL",
},
{
"name": "housing_and_transportation",
"module_dir": "housing_and_transportation",
"class_name": "HousingTransportationETL",
},
{
"name": "hud_housing",
"module_dir": "hud_housing",
"class_name": "HudHousingETL",
},
{
"name": "calenviroscreen",
"module_dir": "calenviroscreen",
"class_name": "CalEnviroScreenETL",
},
{
"name": "hud_recap",
"module_dir": "hud_recap",
"class_name": "HudRecapETL",
},
]
CENSUS_INFO = {
"name": "census",
"module_dir": "census",
"class_name": "CensusETL",
}
| 23.534884 | 51 | 0.528656 | 82 | 1,012 | 6.134146 | 0.329268 | 0.143141 | 0.059642 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.302372 | 1,012 | 42 | 52 | 24.095238 | 0.712465 | 0 | 0 | 0 | 0 | 0 | 0.505929 | 0.075099 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cefa905aac2153e51d910363e7adb666340184b1 | 1,955 | py | Python | e2e/test_get.py | sturzl/guet | b8c453f07968b689b303e20e7a31b405c02c54ef | [
"Apache-2.0"
] | null | null | null | e2e/test_get.py | sturzl/guet | b8c453f07968b689b303e20e7a31b405c02c54ef | [
"Apache-2.0"
] | null | null | null | e2e/test_get.py | sturzl/guet | b8c453f07968b689b303e20e7a31b405c02c54ef | [
"Apache-2.0"
] | null | null | null | from e2e import DockerTest
class TestGet(DockerTest):
def test_get_current_prints_currently_set_committers(self):
self.guet_init()
self.git_init()
self.guet_add('initials1', 'name1', 'email1')
self.guet_add('initials2', 'name2', 'email2')
self.guet_start()
self.guet_set(['initials1', 'initials2'])
self.guet_get_current()
self.save_file_content('.guet/errors')
self.execute()
self.assert_text_in_logs(5, 'Currently set committers')
self.assert_text_in_logs(6, 'initials1 - name1 <email1>')
self.assert_text_in_logs(7, 'initials2 - name2 <email2>')
def test_get_committers_prints_all_committers_on_the_system(self):
self.guet_init()
self.guet_add('initials1', 'name1', 'email1')
self.guet_add('initials2', 'name2', 'email2')
self.guet_get_committers()
self.save_file_content('.guet/errors')
self.execute()
self.assert_text_in_logs(0, 'All committers')
self.assert_text_in_logs(1, 'initials1 - name1 <email1>')
self.assert_text_in_logs(2, 'initials2 - name2 <email2>')
def test_get_prints_error_message_if_trying_to_run_before_guet_init(self):
self.guet_get_committers()
self.execute()
self.assert_text_in_logs(0, ('guet has not been initialized yet! ' +
'Please do so by running the command "guet init".'))
def test_prints_help_message(self):
self.guet_init()
self.guet_get_committers(help=True)
self.execute()
self.assert_text_in_logs(0, 'usage: guet get <identifier> [-flag, ...]')
self.assert_text_in_logs(2, 'Get currently set information.')
self.assert_text_in_logs(4, 'Valid Identifier')
self.assert_text_in_logs(6, '\tcurrent - lists currently set committers')
self.assert_text_in_logs(7, '\tcommitters - lists all committers')
| 38.333333 | 89 | 0.658824 | 253 | 1,955 | 4.750988 | 0.29249 | 0.086522 | 0.139767 | 0.159734 | 0.618136 | 0.556572 | 0.413478 | 0.413478 | 0.222962 | 0.222962 | 0 | 0.025675 | 0.223018 | 1,955 | 50 | 90 | 39.1 | 0.765635 | 0 | 0 | 0.384615 | 0 | 0 | 0.261381 | 0 | 0 | 0 | 0 | 0 | 0.307692 | 1 | 0.102564 | false | 0 | 0.025641 | 0 | 0.153846 | 0.102564 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cefff0cd7583924688b44d3f4da180ddf1bf3140 | 1,813 | py | Python | lesion_tool/waimea.py | alaurent4/nighres | ffb4a478a224190ffe0112f7e4d214ad6825716e | [
"Apache-2.0"
] | null | null | null | lesion_tool/waimea.py | alaurent4/nighres | ffb4a478a224190ffe0112f7e4d214ad6825716e | [
"Apache-2.0"
] | null | null | null | lesion_tool/waimea.py | alaurent4/nighres | ffb4a478a224190ffe0112f7e4d214ad6825716e | [
"Apache-2.0"
] | 1 | 2019-01-21T10:53:38.000Z | 2019-01-21T10:53:38.000Z | #!/usr/bin/env python
"""
"""
from xml.etree.ElementTree import Element
import xml.etree.ElementTree as etree
import xml.dom.minidom
import re
import sys
import getopt
import os
from time import gmtime, strftime
from nipype import config, logging
from nighres.lesion_tool.lesion_pipeline import Lesion_extractor
def main():
try:
o, a = getopt.getopt(sys.argv[1:], "n:d:s:f:a:l:")
except getopt.GetoptError as err:
print(err)
print('waimea.py -n <directory> -d <base_directory> -s <subject> -f <freesurfer dir> -a <atlas> -l <labels>')
sys.exit(2)
if len(o) < 4:
print('waimea.py -n <directory> -d <base_directory> -s <subject> -f <freesurfer dir> -a <atlas> -l <labels>')
sys.exit(2)
for opt, arg in o:
if opt == '-n':
wf_name = arg
elif opt == '-d':
base_dir = arg
elif opt == '-s':
sub = arg
elif opt == '-f':
fsdir = arg
elif opt == '-a':
atlas = arg
elif opt == '-l':
labels = arg
wf = Lesion_extractor(wf_name=wf_name,
base_dir=base_dir,
subjects=[sub],
#main=main,
#acc=acc,
atlas=atlas,
fs_subjects_dir=fsdir,
labels=labels)
config.update_config({'logging': {'log_directory': wf.base_dir,'log_to_file': True}})
logging.update_logging(config)
config.set('execution','job_finished_timeout','20.0')
wf.config['execution'] = {'job_finished_timeout': '10.0'}
try:
wf.run()
except:
print('Error! Pipeline exited ')
raise
if __name__ == "__main__":
main()
| 29.241935 | 117 | 0.529509 | 222 | 1,813 | 4.18018 | 0.391892 | 0.037716 | 0.053879 | 0.030172 | 0.172414 | 0.172414 | 0.172414 | 0.172414 | 0.172414 | 0.172414 | 0 | 0.008382 | 0.341975 | 1,813 | 62 | 118 | 29.241935 | 0.769489 | 0.02096 | 0 | 0.12 | 0 | 0.04 | 0.199321 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.02 | false | 0 | 0.2 | 0 | 0.22 | 0.08 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
3002f09ad0b6466dc363ed3ed13747fd93bb53e6 | 2,208 | py | Python | system/__init__.py | JHUAPL/meta-system | d3e80e50d64e1a9e83d81efbcb8de1ec9cc34e03 | [
"Apache-2.0"
] | 5 | 2021-07-30T00:59:59.000Z | 2022-03-23T16:52:46.000Z | system/__init__.py | JHUAPL/meta-system | d3e80e50d64e1a9e83d81efbcb8de1ec9cc34e03 | [
"Apache-2.0"
] | null | null | null | system/__init__.py | JHUAPL/meta-system | d3e80e50d64e1a9e83d81efbcb8de1ec9cc34e03 | [
"Apache-2.0"
] | null | null | null | # **********************************************************************
# Copyright (C) 2020 Johns Hopkins University Applied Physics Laboratory
#
# All Rights Reserved.
# For any other permission, please contact the Legal Office at JHU/APL.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# **********************************************************************
from flask import Flask
from shared.config import config
from shared.log import logger
from system.extensions import FlaskExtensions, JobManagerClient, DockerClient
from system.job_queue_manager import job_queue_watchdog
cors = FlaskExtensions.cors
mail = FlaskExtensions.mail
mongodb = FlaskExtensions.mongodb
jwt = FlaskExtensions.jwt
bcrypt = FlaskExtensions.bcrypt
class FlaskApp(object):
def __init__(self):
self.app = Flask(__name__, static_folder=config.STATIC_DIR, static_url_path="")
self.app.config.update(config.dict())
bcrypt.init_app(self.app)
jwt.init_app(self.app)
mongodb.init_app(self.app)
mail.init_app(self.app)
cors.init_app(self.app)
DockerClient()
JobManagerClient()
job_queue_watchdog()
self.register_routes()
def register_routes(self):
from system.api.web import web_bp
self.app.register_blueprint(web_bp)
from system.api.info import info_bp
self.app.register_blueprint(info_bp)
from system.api.database import database_bp
self.app.register_blueprint(database_bp)
from system.api.jobs import jobs_bp
self.app.register_blueprint(jobs_bp)
from system.api.results import results_bp
self.app.register_blueprint(results_bp)
| 32.955224 | 87 | 0.682065 | 281 | 2,208 | 5.217082 | 0.44484 | 0.057299 | 0.037517 | 0.047749 | 0.088677 | 0 | 0 | 0 | 0 | 0 | 0 | 0.004452 | 0.186141 | 2,208 | 66 | 88 | 33.454545 | 0.811352 | 0.379076 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.058824 | false | 0 | 0.294118 | 0 | 0.382353 | 0.147059 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
30050dc99f412b59d4c595ff2aeb87a711b709c5 | 1,262 | py | Python | main.py | hakierspejs/corononews | f1103da57d5c39694649bf7d7ba7748541dcfbe0 | [
"WTFPL"
] | null | null | null | main.py | hakierspejs/corononews | f1103da57d5c39694649bf7d7ba7748541dcfbe0 | [
"WTFPL"
] | null | null | null | main.py | hakierspejs/corononews | f1103da57d5c39694649bf7d7ba7748541dcfbe0 | [
"WTFPL"
] | null | null | null | #!/usr/bin/env python
import flask
import requests
import lxml.html
import logging
app = flask.Flask(__name__)
LOGGER = logging.getLogger(__name__)
HN_BASE_URL = 'https://news.ycombinator.com/'
def has_virus(url):
if not url.startswith('http://') and not url.startswith('https://'):
return True
s = requests.get(url).text.lower()
for w in ['covid', 'virus']:
if w in s:
return True
return False
@app.route('/')
def main():
h = lxml.html.fromstring(requests.get(HN_BASE_URL).text)
ret = '<ol>'
for n, row in enumerate(h.xpath('//tr [@id]')[1:]):
story = row.xpath('.//a [@class="storylink"]').pop()
LOGGER.info('%d: %s', n, story.get('href'))
c_row = row.getnext()
comments = c_row.xpath('.//a [contains(@href, "item?id=")]')[-1]
comments_url = HN_BASE_URL + comments.get('href')
if has_virus(story.get('href')) or has_virus(comments_url):
continue
ret += f'''
<li>
<a href="{story.get("href")}">{story.text}</a>
(<a href="{comments_url}">{comments.text}</a>)
</li>'''
return ret
if __name__ == '__main__':
logging.basicConfig(level='INFO')
app.run(host='0.0.0.0')
| 28.681818 | 72 | 0.568938 | 173 | 1,262 | 3.976879 | 0.433526 | 0.040698 | 0.039244 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.006244 | 0.23851 | 1,262 | 43 | 73 | 29.348837 | 0.709677 | 0.015848 | 0 | 0.055556 | 0 | 0 | 0.262691 | 0.068493 | 0 | 0 | 0 | 0 | 0 | 1 | 0.055556 | false | 0 | 0.111111 | 0 | 0.277778 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
3007056835a3f4cdbc36d9cf5d7aec07fbd6a6ae | 7,134 | py | Python | code/func/func.py | lindenmp/neurodev_long | d6efc6b2e212bc6fc0669c80efcfa0b67d1e4b06 | [
"MIT"
] | null | null | null | code/func/func.py | lindenmp/neurodev_long | d6efc6b2e212bc6fc0669c80efcfa0b67d1e4b06 | [
"MIT"
] | 5 | 2020-03-24T17:56:29.000Z | 2021-12-13T20:35:48.000Z | code/func/func.py | lindenmp/neurodev_long | d6efc6b2e212bc6fc0669c80efcfa0b67d1e4b06 | [
"MIT"
] | null | null | null | # Functions for project: NormativeNeuroDev_Longitudinal
# Linden Parkes, 2019
# lindenmp@seas.upenn.edu
from IPython.display import clear_output
import numpy as np
import scipy as sp
from scipy import stats
import pandas as pd
from statsmodels.stats import multitest
def get_cmap(which_type = 'qual1', num_classes = 8):
# Returns a nice set of colors to make a nice colormap using the color schemes
# from http://colorbrewer2.org/
#
# The online tool, colorbrewer2, is copyright Cynthia Brewer, Mark Harrower and
# The Pennsylvania State University.
if which_type == 'linden':
cmap_base = np.array([[255,105,97],[97,168,255],[178,223,138],[117,112,179],[255,179,71]])
elif which_type == 'pair':
cmap_base = np.array([[124,230,199],[255,169,132]])
elif which_type == 'qual1':
cmap_base = np.array([[166,206,227],[31,120,180],[178,223,138],[51,160,44],[251,154,153],[227,26,28],
[253,191,111],[255,127,0],[202,178,214],[106,61,154],[255,255,153],[177,89,40]])
elif which_type == 'qual2':
cmap_base = np.array([[141,211,199],[255,255,179],[190,186,218],[251,128,114],[128,177,211],[253,180,98],
[179,222,105],[252,205,229],[217,217,217],[188,128,189],[204,235,197],[255,237,111]])
elif which_type == 'seq_red':
cmap_base = np.array([[255,245,240],[254,224,210],[252,187,161],[252,146,114],[251,106,74],
[239,59,44],[203,24,29],[165,15,21],[103,0,13]])
elif which_type == 'seq_blu':
cmap_base = np.array([[247,251,255],[222,235,247],[198,219,239],[158,202,225],[107,174,214],
[66,146,198],[33,113,181],[8,81,156],[8,48,107]])
elif which_type == 'redblu_pair':
cmap_base = np.array([[222,45,38],[49,130,189]])
elif which_type == 'yeo17':
cmap_base = np.array([[97,38,107], # VisCent
[194,33,39], # VisPeri
[79,130,165], # SomMotA
[44,181,140], # SomMotB
[75,148,72], # DorsAttnA
[23,116,62], # DorsAttnB
[149,77,158], # SalVentAttnA
[222,130,177], # SalVentAttnB
[75,87,61], # LimbicA
[149,166,110], # LimbicB
[210,135,47], # ContA
[132,48,73], # ContB
[92,107,131], # ContC
[218,221,50], # DefaultA
[175,49,69], # DefaultB
[41,38,99], # DefaultC
[53,75,158] # TempPar
])
elif which_type == 'yeo17_downsampled':
cmap_base = np.array([[97,38,107], # VisCent
[79,130,165], # SomMotA
[75,148,72], # DorsAttnA
[149,77,158], # SalVentAttnA
[75,87,61], # LimbicA
[210,135,47], # ContA
[218,221,50], # DefaultA
[53,75,158] # TempPar
])
if cmap_base.shape[0] > num_classes: cmap = cmap_base[0:num_classes]
else: cmap = cmap_base
cmap = cmap / 255
return cmap
def update_progress(progress, my_str = ''):
bar_length = 20
if isinstance(progress, int):
progress = float(progress)
if not isinstance(progress, float):
progress = 0
if progress < 0:
progress = 0
if progress >= 1:
progress = 1
block = int(round(bar_length * progress))
clear_output(wait = True)
text = my_str + " Progress: [{0}] {1:.1f}%".format( "#" * block + "-" * (bar_length - block), progress * 100)
print(text)
def get_synth_cov(df, cov = 'scanageYears', stp = 1):
# Synthetic cov data
X_range = [np.min(df[cov]), np.max(df[cov])]
X = np.arange(X_range[0],X_range[1],stp)
X = X.reshape(-1,1)
return X
def run_corr(df_X, df_y, typ = 'spearmanr'):
df_corr = pd.DataFrame(index = df_y.columns, columns = ['coef', 'p'])
for i, row in df_corr.iterrows():
if typ == 'spearmanr':
df_corr.loc[i] = sp.stats.spearmanr(df_X, df_y[i])
elif typ == 'pearsonr':
df_corr.loc[i] = sp.stats.pearsonr(df_X, df_y[i])
return df_corr
def get_fdr_p(p_vals):
out = multitest.multipletests(p_vals, alpha = 0.05, method = 'fdr_bh')
p_fdr = out[1]
return p_fdr
def get_fdr_p_df(p_vals):
p_fdr = pd.DataFrame(index = p_vals.index,
columns = p_vals.columns,
data = np.reshape(get_fdr_p(p_vals.values.flatten()), p_vals.shape))
return p_fdr
def mark_outliers(x, thresh = 3, c = 1.4826):
my_med = np.median(x)
mad = np.median(abs(x - my_med))/c
cut_off = mad * thresh
upper = my_med + cut_off
lower = my_med - cut_off
outliers = np.logical_or(x > upper, x < lower)
return outliers
def perc_dev(Z, thr = 2.6, sign = 'abs'):
if sign == 'abs':
bol = np.abs(Z) > thr;
elif sign == 'pos':
bol = Z > thr;
elif sign == 'neg':
bol = Z < -thr;
# count the number that have supra-threshold z-stats and store as percentage
Z_perc = np.sum(bol, axis = 1) / Z.shape[1] * 100
return Z_perc
def evd(Z, thr = 0.01, sign = 'abs'):
m = Z.shape
l = np.int(m[1] * thr) # assumes features are on dim 1, subjs on dim 0
if sign == 'abs':
T = np.sort(np.abs(Z), axis = 1)[:,m[1] - l:m[1]]
elif sign == 'pos':
T = np.sort(Z, axis = 1)[:,m[1] - l:m[1]]
elif sign == 'neg':
T = np.sort(Z, axis = 1)[:,:l]
E = sp.stats.trim_mean(T, 0.1, axis = 1)
return E
def summarise_network(df, roi_loc, network_idx, metrics = ('ct',), method = 'mean'):
df_out = pd.DataFrame()
for metric in metrics:
if metric == 'ct':
if method == 'median': df_tmp = df.filter(regex = metric).groupby(network_idx[roi_loc == 1], axis = 1).median()
if method == 'mean': df_tmp = df.filter(regex = metric).groupby(network_idx[roi_loc == 1], axis = 1).mean()
if method == 'max': df_tmp = df.filter(regex = metric).groupby(network_idx[roi_loc == 1], axis = 1).max()
my_list = [metric + '_' + str(i) for i in np.unique(network_idx[roi_loc == 1]).astype(int)]
df_tmp.columns = my_list
else:
if method == 'median': df_tmp = df.filter(regex = metric).groupby(network_idx, axis = 1).median()
if method == 'mean': df_tmp = df.filter(regex = metric).groupby(network_idx, axis = 1).mean()
if method == 'max': df_tmp = df.filter(regex = metric).groupby(network_idx, axis = 1).max()
my_list = [metric + '_' + str(i) for i in np.unique(network_idx).astype(int)]
df_tmp.columns = my_list
df_out = pd.concat((df_out, df_tmp), axis = 1)
return df_out
| 36.963731 | 123 | 0.528595 | 1,008 | 7,134 | 3.619048 | 0.343254 | 0.026316 | 0.024671 | 0.037007 | 0.219298 | 0.186678 | 0.171875 | 0.157072 | 0.141173 | 0.141173 | 0 | 0.141251 | 0.31427 | 7,134 | 192 | 124 | 37.15625 | 0.604456 | 0.094477 | 0 | 0.215827 | 0 | 0 | 0.032389 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.071942 | false | 0 | 0.043165 | 0 | 0.179856 | 0.007194 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
3007a5e506648223a9acc7a03be0c3a03d473f6f | 15,808 | py | Python | sympy/simplify/sqrtdenest.py | goodok/sympy | de84ed2139125a755ea7b6ba91d945d9fbbe5ed9 | [
"BSD-3-Clause"
] | 2 | 2015-05-11T12:26:38.000Z | 2016-08-19T00:11:03.000Z | sympy/simplify/sqrtdenest.py | goodok/sympy | de84ed2139125a755ea7b6ba91d945d9fbbe5ed9 | [
"BSD-3-Clause"
] | null | null | null | sympy/simplify/sqrtdenest.py | goodok/sympy | de84ed2139125a755ea7b6ba91d945d9fbbe5ed9 | [
"BSD-3-Clause"
] | null | null | null | from sympy.functions import sqrt, sign, root
from sympy.core import S, Wild, sympify, Mul, Add, Expr
from sympy.core.function import expand_multinomial, expand_mul
from sympy.core.symbol import Dummy
from sympy.polys import Poly, PolynomialError
from sympy.core.function import count_ops
def _mexpand(expr):
return expand_mul(expand_multinomial(expr))
def is_sqrt(expr):
"""Return True if expr is a sqrt, otherwise False."""
return expr.is_Pow and expr.exp.is_Rational and abs(expr.exp) is S.Half
def sqrt_depth(p):
"""Return the maximum depth of any square root argument of p.
>>> from sympy.functions.elementary.miscellaneous import sqrt
>>> from sympy.simplify.sqrtdenest import sqrt_depth
Neither of these square roots contains any other square roots
so the depth is 1:
>>> sqrt_depth(1 + sqrt(2)*(1 + sqrt(3)))
1
The sqrt(3) is contained within a square root so the depth is
2:
>>> sqrt_depth(1 + sqrt(2)*sqrt(1 + sqrt(3)))
2
"""
if p.is_Atom:
return 0
elif p.is_Add or p.is_Mul:
return max([sqrt_depth(x) for x in p.args])
elif is_sqrt(p):
return sqrt_depth(p.base) + 1
else:
return 0
def is_algebraic(p):
"""Return True if p is comprised of only Rationals or square roots
of Rationals and algebraic operations.
Examples
========
>>> from sympy.functions.elementary.miscellaneous import sqrt
>>> from sympy.simplify.sqrtdenest import is_algebraic
>>> from sympy import cos
>>> is_algebraic(sqrt(2)*(3/(sqrt(7) + sqrt(5)*sqrt(2))))
True
>>> is_algebraic(sqrt(2)*(3/(sqrt(7) + sqrt(5)*cos(2))))
False
"""
if p.is_Rational:
return True
elif p.is_Atom:
return False
elif is_sqrt(p) or p.is_Pow and p.exp.is_Integer:
return is_algebraic(p.base)
elif p.is_Add or p.is_Mul:
return all(is_algebraic(x) for x in p.args)
else:
return False
def subsets(n):
"""
Returns all possible subsets of the set (0, 1, ..., n-1) except the
empty set, listed in reversed lexicographical order according to binary
representation, so that the case of the fourth root is treated last.
Examples
========
>>> from sympy.simplify.sqrtdenest import subsets
>>> subsets(2)
[[1, 0], [0, 1], [1, 1]]
"""
if n == 1:
a = [[1]]
elif n == 2:
a = [[1, 0], [0, 1], [1, 1]]
elif n == 3:
a = [[1, 0, 0], [0, 1, 0], [1, 1, 0],
[0, 0, 1], [1, 0, 1], [0, 1, 1], [1, 1, 1]]
else:
b = subsets(n-1)
a0 = [x+[0] for x in b]
a1 = [x+[1] for x in b]
a = a0 + [[0]*(n-1) + [1]] + a1
return a
def sqrtdenest(expr, max_iter=3):
"""Denests sqrts in an expression that contain other square roots
if possible, otherwise returns the expr unchanged. This is based on the
algorithms of [1].
Examples
========
>>> from sympy.simplify.sqrtdenest import sqrtdenest
>>> from sympy import sqrt
>>> sqrtdenest(sqrt(5 + 2 * sqrt(6)))
sqrt(2) + sqrt(3)
See Also
========
sympy.solvers.solvers.unrad
References
==========
[1] http://www.almaden.ibm.com/cs/people/fagin/symb85.pdf
[2] D. J. Jeffrey and A. D. Rich, 'Symplifying Square Roots of Square Roots
by Denesting' (available at http://www.cybertester.com/data/denest.pdf)
"""
expr = expand_mul(sympify(expr))
for i in range(max_iter):
z = _sqrtdenest0(expr)
if expr == z:
return expr
expr = z
return expr
def _sqrt_match(p):
"""Return [a, b, r] for p.match(a + b*sqrt(r)) where, in addition to
matching, sqrt(r) also has then maximal sqrt_depth among addends of p.
Examples
========
>>> from sympy.functions.elementary.miscellaneous import sqrt
>>> from sympy.simplify.sqrtdenest import _sqrt_match
>>> _sqrt_match(1 + sqrt(2) + sqrt(2)*sqrt(3) + 2*sqrt(1+sqrt(5)))
[1 + sqrt(2) + sqrt(6), 2, 1 + sqrt(5)]
"""
p = _mexpand(p)
if p.is_Number:
res = (p, S.Zero, S.Zero)
elif p.is_Add:
pargs = list(p.args)
# to make the process canonical, the argument is included in the tuple
# so when the max is selected, it will be the largest arg having a
# given depth
v = [(sqrt_depth(x), x, i) for i, x in enumerate(pargs)]
nmax = max(v)
if nmax[0] == 0:
res = []
else:
depth, _, i = nmax
r = pargs.pop(i)
a = Add._from_args(pargs)
b = S.One
if r.is_Mul:
bv = []
rv = []
for x in r.args:
if sqrt_depth(x) < depth:
bv.append(x)
else:
rv.append(x)
b = Mul._from_args(bv)
r = Mul._from_args(rv)
res = (a, b, r**2)
else:
b, r = p.as_coeff_Mul()
if is_sqrt(r):
res = (S.Zero, b, r**2)
else:
res = []
return list(res)
class SqrtdenestStopIteration(StopIteration):
pass
def _sqrtdenest0(expr):
"""Returns expr after denesting its arguments."""
if is_sqrt(expr):
n, d = expr.as_numer_denom()
if d is S.One: # n is a square root
if n.base.is_Add:
args = n.base.args
if len(args) > 2 and all((x**2).is_Integer for x in args):
try:
return _sqrtdenest_rec(n)
except SqrtdenestStopIteration:
pass
expr = sqrt(_mexpand(Add(*[_sqrtdenest0(x) for x in args])))
return _sqrtdenest1(expr)
else:
n, d = [_sqrtdenest0(i) for i in (n, d)]
return n/d
if isinstance(expr, Expr):
args = expr.args
if args:
return expr.func(*[_sqrtdenest0(a) for a in args])
return expr
def _sqrtdenest_rec(expr):
"""Helper that denests the square root of three or more surds.
It returns the denested expression; if it cannot be denested it
throws SqrtdenestStopIteration
Algorithm: expr.base is in the extension Q_m = Q(sqrt(r_1),..,sqrt(r_k));
split expr.base = a + b*sqrt(r_k), where `a` and `b` are on
Q_(m-1) = Q(sqrt(r_1),..,sqrt(r_(k-1))); then a**2 - b**2*r_k is
on Q_(m-1); denest sqrt(a**2 - b**2*r_k) and so on.
See [1], section 6.
Examples
========
>>> from sympy import sqrt
>>> from sympy.simplify.sqrtdenest import _sqrtdenest_rec
>>> _sqrtdenest_rec(sqrt(-72*sqrt(2) + 158*sqrt(5) + 498))
-sqrt(10) + sqrt(2) + 9 + 9*sqrt(5)
>>> w=-6*sqrt(55)-6*sqrt(35)-2*sqrt(22)-2*sqrt(14)+2*sqrt(77)+6*sqrt(10)+65
>>> _sqrtdenest_rec(sqrt(w))
-sqrt(11) - sqrt(7) + sqrt(2) + 3*sqrt(5)
"""
from sympy.simplify.simplify import radsimp, split_surds, rad_rationalize
if expr.base < 0:
return sqrt(-1)*_sqrtdenest_rec(sqrt(-expr.base))
a, b = split_surds(expr.base)
if a < b:
a, b = b, a
c2 = _mexpand(a**2 - b**2)
if len(c2.args) > 2:
a1, b1 = split_surds(c2)
if a1 < b1:
a1, b1 = b1, a1
c2_1 = _mexpand(a1**2 - b1**2)
c_1 = _sqrtdenest_rec(sqrt(c2_1))
d_1 = _sqrtdenest_rec(sqrt(a1 + c_1))
num, den = rad_rationalize(b1, d_1)
c = _mexpand(d_1/sqrt(2) + num/(den*sqrt(2)))
else:
c = _sqrtdenest1(sqrt(c2))
if sqrt_depth(c) > 1:
raise SqrtdenestStopIteration
ac = a + c
if len(ac.args) >= len(expr.args):
if count_ops(ac) >= count_ops(expr.base):
raise SqrtdenestStopIteration
d = sqrtdenest(sqrt(ac))
if sqrt_depth(d) > 1:
raise SqrtdenestStopIteration
num, den = rad_rationalize(b, d)
r = d/sqrt(2) + num/(den*sqrt(2))
r = radsimp(r)
return _mexpand(r)
def _sqrtdenest1(expr):
"""Return denested expr after denesting with simpler methods or, that
failing, using the denester."""
from sympy.simplify.simplify import radsimp
if not is_sqrt(expr):
return expr
a = expr.base
if a.is_Atom:
return expr
val = _sqrt_match(a)
if not val:
return expr
a, b, r = val
# try a quick numeric denesting
d2 = _mexpand(a**2 - b**2*r)
if d2.is_Rational:
if d2.is_positive:
z = _sqrt_numeric_denest(a, b, r, d2)
if z is not None:
return z
else:
# fourth root case
# sqrtdenest(sqrt(3 + 2*sqrt(3))) =
# sqrt(2)*3**(1/4)/2 + sqrt(2)*3**(3/4)/2
dr2 = _mexpand(-d2*r)
dr = sqrt(dr2)
if dr.is_Rational:
z = _sqrt_numeric_denest(_mexpand(b*r), a, r, dr2)
if z is not None:
return z/root(r, 4)
else:
z = _sqrt_symbolic_denest(a, b, r)
if z is not None:
return z
if not is_algebraic(expr):
return expr
# now call to the denester
av0 = [a, b, r, d2]
z = _denester([radsimp(expr**2)], av0, 0, sqrt_depth(expr) - 1)[0]
if av0[1] is None:
return expr
if z is not None:
return z
return expr
def _sqrt_symbolic_denest(a, b, r):
"""Given an expression, sqrt(a + b*sqrt(b)), return the denested
expression or None.
Algorithm:
If r = ra + rb*sqrt(rr), try replacing sqrt(rr) in ``a`` with
(y**2 - ra)/rb, and if the result is a quadratic, ca*y**2 + cb*y + cc, and
(cb + b)**2 - 4*ca*cc is 0, then sqrt(a + b*sqrt(r)) can be rewritten as
sqrt(ca*(sqrt(r) + (cb + b)/(2*ca))**2).
Examples
========
>>> from sympy.simplify.sqrtdenest import _sqrt_symbolic_denest, sqrtdenest
>>> from sympy import sqrt, Symbol, Poly
>>> from sympy.abc import x
>>> a, b, r = 16 - 2*sqrt(29), 2, -10*sqrt(29) + 55
>>> _sqrt_symbolic_denest(a, b, r)
sqrt(-2*sqrt(29) + 11) + sqrt(5)
If the expression is numeric, it will be simplified:
>>> w = sqrt(sqrt(sqrt(3) + 1) + 1) + 1 + sqrt(2)
>>> sqrtdenest(sqrt((w**2).expand()))
1 + sqrt(2) + sqrt(1 + sqrt(1 + sqrt(3)))
Otherwise, it will only be simplified if assumptions allow:
>>> w = w.subs(sqrt(3), sqrt(x + 3))
>>> sqrtdenest(sqrt((w**2).expand()))
sqrt((sqrt(sqrt(sqrt(x + 3) + 1) + 1) + 1 + sqrt(2))**2)
Notice that the argument of the sqrt is a square. If x is made positive
then the sqrt of the square is resolved:
>>> _.subs(x, Symbol('x', positive=True))
sqrt(sqrt(sqrt(x + 3) + 1) + 1) + 1 + sqrt(2)
"""
a, b, r = sympify([a, b, r])
rval = _sqrt_match(r)
if not rval:
return None
ra, rb, rr = rval
if rb:
y = Dummy('y', positive=True)
try:
newa = Poly(a.subs(sqrt(rr), (y**2 - ra)/rb), y)
except PolynomialError:
return None
if newa.degree() == 2:
ca, cb, cc = newa.all_coeffs()
cb += b
if _mexpand(cb**2 - 4*ca*cc).equals(0):
z = sqrt(ca*(sqrt(r) + cb/(2*ca))**2)
if z.is_number:
z = _mexpand(Mul._from_args(z.as_content_primitive()))
return z
def _sqrt_numeric_denest(a, b, r, d2):
"""Helper that denest expr = a + b*sqrt(r), with d2 = a**2 - b**2*r > 0
or returns None if not denested.
"""
from sympy.simplify.simplify import radsimp
depthr = sqrt_depth(r)
d = sqrt(d2)
vad = a + d
# sqrt_depth(res) <= sqrt_depth(vad) + 1
# sqrt_depth(expr) = depthr + 2
# there is denesting if sqrt_depth(vad)+1 < depthr + 2
# if vad**2 is Number there is a fourth root
if sqrt_depth(vad) < depthr + 1 or (vad**2).is_Rational:
vad1 = radsimp(1/vad)
return (sqrt(vad/2) + sign(b)*sqrt((b**2*r*vad1/2).expand())).expand()
def _denester(nested, av0, h, max_depth_level):
"""Denests a list of expressions that contain nested square roots.
Algorithm based on <http://www.almaden.ibm.com/cs/people/fagin/symb85.pdf>.
It is assumed that all of the elements of 'nested' share the same
bottom-level radicand. (This is stated in the paper, on page 177, in
the paragraph immediately preceding the algorithm.)
When evaluating all of the arguments in parallel, the bottom-level
radicand only needs to be denested once. This means that calling
_denester with x arguments results in a recursive invocation with x+1
arguments; hence _denester has polynomial complexity.
However, if the arguments were evaluated separately, each call would
result in two recursive invocations, and the algorithm would have
exponential complexity.
This is discussed in the paper in the middle paragraph of page 179.
"""
from sympy.simplify.simplify import radsimp
if h > max_depth_level:
return None, None
if av0[1] is None:
return None, None
if (av0[0] is None and
all(n.is_Number for n in nested)): # no arguments are nested
for f in subsets(len(nested)): # test subset 'f' of nested
p = _mexpand(Mul(*[nested[i] for i in range(len(f)) if f[i]]))
if f.count(1) > 1 and f[-1]:
p = -p
sqp = sqrt(p)
if sqp.is_Rational:
return sqp, f # got a perfect square so return its square root.
# Otherwise, return the radicand from the previous invocation.
return sqrt(nested[-1]), [0]*len(nested)
else:
R = None
if av0[0] is not None:
values = [av0[:2]]
R = av0[2]
nested2 = [av0[3], R]
av0[0] = None
else:
values = filter(None, [_sqrt_match(expr) for expr in nested])
for v in values:
if v[2]: #Since if b=0, r is not defined
if R is not None:
if R != v[2]:
av0[1] = None
return None, None
else:
R = v[2]
if R is None:
# return the radicand from the previous invocation
return sqrt(nested[-1]), [0]*len(nested)
nested2 = [_mexpand(v[0]**2) -
_mexpand(R*v[1]**2) for v in values] + [R]
d, f = _denester(nested2, av0, h + 1, max_depth_level)
if not f:
return None, None
if not any(f[i] for i in range(len(nested))):
v = values[-1]
return sqrt(v[0] + v[1]*d), f
else:
p = Mul(*[nested[i] for i in range(len(nested)) if f[i]])
v = _sqrt_match(p)
if 1 in f and f.index(1) < len(nested) - 1 and f[len(nested) - 1]:
v[0] = -v[0]
v[1] = -v[1]
if not f[len(nested)]: #Solution denests with square roots
vad = _mexpand(v[0] + d)
if vad <= 0:
# return the radicand from the previous invocation.
return sqrt(nested[-1]), [0]*len(nested)
if not(sqrt_depth(vad) < sqrt_depth(R) + 1 or
(vad**2).is_Number):
av0[1] = None
return None, None
vad1 = radsimp(1/vad)
return _mexpand(sqrt(vad/2) +
sign(v[1])*sqrt(_mexpand(v[1]**2*R*vad1/2))), f
else: #Solution requires a fourth root
s2 = _mexpand(v[1]*R) + d
if s2 <= 0:
return sqrt(nested[-1]), [0]*len(nested)
FR, s = root(_mexpand(R), 4), sqrt(s2)
return _mexpand(s/(sqrt(2)*FR) + v[0]*FR/(sqrt(2)*s)), f
| 32.459959 | 79 | 0.546748 | 2,365 | 15,808 | 3.570402 | 0.150106 | 0.026646 | 0.004263 | 0.022383 | 0.230104 | 0.185812 | 0.128375 | 0.090123 | 0.084439 | 0.072359 | 0 | 0.037244 | 0.322305 | 15,808 | 486 | 80 | 32.526749 | 0.750957 | 0.38044 | 0 | 0.250923 | 0 | 0 | 0.000108 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.04797 | false | 0.00738 | 0.0369 | 0.00369 | 0.269373 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
3007da11fc7ae07380226f4b542b097442751381 | 17,074 | py | Python | backend/modules/doc/views/doc.py | YouFacai/iWiki | 7a2cbb514f25b72932b0212f6165cdb426243243 | [
"MIT"
] | null | null | null | backend/modules/doc/views/doc.py | YouFacai/iWiki | 7a2cbb514f25b72932b0212f6165cdb426243243 | [
"MIT"
] | null | null | null | backend/modules/doc/views/doc.py | YouFacai/iWiki | 7a2cbb514f25b72932b0212f6165cdb426243243 | [
"MIT"
] | null | null | null | import datetime
import os
import shutil
from django.conf import settings
from django.contrib.auth import get_user_model
from django.core.cache import cache
from django.db import transaction, IntegrityError
from django.db.models import Q, F
from django.http import FileResponse
from django.utils.encoding import escape_uri_path
from django.utils.translation import gettext as _
from rest_framework.decorators import action
from rest_framework.response import Response
from rest_framework.viewsets import ModelViewSet, GenericViewSet
from constents import DocAvailableChoices, RepoTypeChoices, UserTypeChoices
from modules.account.serializers import UserInfoSerializer
from modules.doc.models import Doc, DocVersion, DocCollaborator, Comment
from modules.doc.permissions import DocManagePermission, DocCommonPermission
from modules.doc.serializers import (
DocCommonSerializer,
DocListSerializer,
DocUpdateSerializer,
DocVersionSerializer,
DocPublishChartSerializer,
)
from modules.repo.models import Repo, RepoUser
from modules.repo.serializers import RepoSerializer
from utils.authenticators import SessionAuthenticate
from utils.exceptions import Error404, ParamsNotFound, UserNotExist, OperationError
from utils.paginations import NumPagination
from utils.throttlers import DocSearchThrottle
from utils.viewsets import ThrottleAPIView
USER_MODEL = get_user_model()
class DocManageView(ModelViewSet):
"""文章管理入口"""
queryset = Doc.objects.filter(is_deleted=False)
serializer_class = DocCommonSerializer
permission_classes = [
DocManagePermission,
]
def perform_create(self, serializer):
return serializer.save()
def list(self, request, *args, **kwargs):
"""个人文章"""
self.serializer_class = DocListSerializer
# 获取个人的所有文章
sql = (
"SELECT d.*, r.name 'repo_name' FROM `doc_doc` d "
"JOIN `repo_repo` r ON d.repo_id=r.id "
"JOIN `auth_user` au ON au.uid=d.creator "
"WHERE d.creator=%s AND NOT d.is_deleted "
"{} "
"ORDER BY d.id DESC;"
)
# 标题关键字搜索
search_key = request.GET.get("searchKey", "")
if search_key:
sql = sql.format("AND d.title like %s")
search_key = f"%%{search_key}%%"
self.queryset = self.queryset.raw(sql, [request.user.uid, search_key])
else:
sql = sql.format("")
self.queryset = self.queryset.raw(sql, [request.user.uid])
return super().list(request, *args, **kwargs)
def create(self, request, *args, **kwargs):
"""新建文章"""
request.data["creator"] = request.user.uid
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
with transaction.atomic():
instance = self.perform_create(serializer)
DocVersion.objects.create(**DocVersionSerializer(instance).data)
return Response({"id": instance.id})
def update(self, request, *args, **kwargs):
"""更新文章"""
partial = kwargs.pop("partial", False)
instance = self.get_object()
serializer = DocUpdateSerializer(instance, data=request.data, partial=partial)
serializer.is_valid(raise_exception=True)
with transaction.atomic():
serializer.save(update_by=request.user.uid)
DocVersion.objects.create(**DocVersionSerializer(instance).data)
return Response({"id": instance.id})
def destroy(self, request, *args, **kwargs):
instance = self.get_object()
self.perform_destroy(instance)
return Response()
@action(detail=True, methods=["GET"])
def list_collaborator(self, request, *args, **kwargs):
"""获取协作者"""
instance = self.get_object()
sql = (
"SELECT au.* "
"FROM `doc_collaborator` dc "
"JOIN `doc_doc` dd ON dd.id = dc.doc_id AND dd.id = %s "
"JOIN `auth_user` au on dc.uid = au.uid;"
)
collaborators = USER_MODEL.objects.raw(sql, [instance.id])
serializer = UserInfoSerializer(collaborators, many=True)
return Response(serializer.data)
@action(detail=True, methods=["POST"])
def add_collaborator(self, request, *args, **kwargs):
"""增加协作者"""
instance = self.get_object()
uid = request.data.get("uid")
if not uid or uid == request.user.uid:
raise OperationError()
try:
DocCollaborator.objects.create(doc_id=instance.id, uid=uid)
except IntegrityError:
raise OperationError(_("已添加该用户为协作者,请勿重复添加"))
return Response()
@action(detail=True, methods=["POST"])
def remove_collaborator(self, request, *args, **kwargs):
"""删除协作者"""
instance = self.get_object()
uid = request.data.get("uid")
if not uid or uid == request.user.uid:
raise OperationError()
DocCollaborator.objects.filter(doc_id=instance.id, uid=uid).delete()
return Response()
@action(detail=True, methods=["GET"])
def edit_status(self, request, *args, **kwargs):
"""为文章添加编辑中状态"""
instance = self.get_object()
cache_key = f"{self.__class__.__name__}:{self.action}:{instance.id}"
uid = cache.get(cache_key)
if uid is None or uid == request.user.uid:
cache.set(cache_key, request.user.uid, 60)
return Response(True)
else:
return Response(False)
@action(detail=True, methods=["GET"])
def export(self, request, *args, **kwargs):
"""导出文章"""
instance = self.get_object()
sql = (
"SELECT dc.*, au.username FROM `doc_comment` dc "
"JOIN `auth_user` au ON au.uid=dc.creator "
"WHERE dc.doc_id=%s AND NOT dc.is_deleted "
"ORDER BY dc.id DESC;"
)
comments = Comment.objects.raw(sql, [instance.id])
file_dir = os.path.join(
settings.BASE_DIR, "tmp", "doc", request.user.uid, str(instance.id)
)
if os.path.exists(file_dir):
shutil.rmtree(file_dir)
os.makedirs(file_dir)
filename = "{}.md".format(instance.title.replace(" ", "").replace("/", ""))
file_path = os.path.join(file_dir, filename)
with open(file_path, "w", encoding="utf-8") as file:
file.write(instance.content)
for comment in comments:
file.write("\n\n---\n\n")
file.write(comment.content)
file = open(file_path, "rb")
response = FileResponse(file)
response["Content-Type"] = "application/octet-stream"
response[
"Content-Disposition"
] = f"attachment; filename={escape_uri_path(filename)}"
return response
class DocCommonView(GenericViewSet):
"""文章常规入口"""
queryset = Doc.objects.filter(is_deleted=False, is_publish=True)
serializer_class = DocListSerializer
permission_classes = [DocCommonPermission]
authentication_classes = [SessionAuthenticate]
def list(self, request, *args, **kwargs):
"""获取仓库文章"""
repo_id = request.GET.get("repo_id", None)
# 没有传参直接返回
if repo_id is None:
raise Error404()
# 传入参数获取对应仓库的文章
try:
Repo.objects.get(id=repo_id, is_deleted=False)
except Repo.DoesNotExist:
raise Error404()
# 获取 仓库 的 公开或自己的 文章
sql = (
"SELECT d.*, au.username creator_name, r.name repo_name "
"FROM `doc_doc` d "
"JOIN `repo_repo` r ON r.id=d.repo_id "
"LEFT JOIN `doc_pin` dp ON dp.doc_id=d.id AND dp.in_use "
"LEFT JOIN `auth_user` au ON au.uid=d.creator "
"WHERE NOT d.`is_deleted` AND d.`is_publish` "
"AND dp.in_use IS NULL "
"AND d.repo_id = %s "
"AND (d.available = %s OR d.creator = %s) "
"AND d.title like %s "
"ORDER BY d.id DESC"
)
search_key = request.GET.get("searchKey")
search_key = f"%%{search_key}%%" if search_key else "%%"
queryset = self.queryset.raw(
sql, [repo_id, DocAvailableChoices.PUBLIC, request.user.uid, search_key]
)
page = self.paginate_queryset(queryset)
serializer = self.get_serializer(page, many=True)
return self.get_paginated_response(serializer.data)
def retrieve(self, request, *args, **kwargs):
"""获取文章详情"""
instance = self.get_object()
Doc.objects.filter(id=instance.id).update(pv=F("pv") + 1)
instance.pv += 1
serializer = DocCommonSerializer(instance)
return Response(serializer.data)
@action(detail=True, methods=["GET"])
def is_collaborator(self, request, *args, **kwargs):
"""判断是否是协作者"""
instance = self.get_object()
try:
DocCollaborator.objects.get(doc_id=instance.id, uid=request.user.uid)
return Response()
except DocCollaborator.DoesNotExist:
return Response({"result": False})
@action(detail=False, methods=["GET"])
def load_pin_doc(self, request, *args, **kwargs):
"""获取置顶文章"""
repo_id = request.GET.get("repo_id", None)
# 没有传参直接返回
if repo_id is None:
raise Error404()
# 传入参数获取对应仓库的文章
try:
Repo.objects.get(id=repo_id, is_deleted=False)
except Repo.DoesNotExist:
raise Error404()
sql = (
"SELECT distinct dd.*, au.username creator_name, rr.name repo_name "
"FROM `doc_doc` dd "
"JOIN `auth_user` au ON dd.creator=au.uid "
"JOIN `repo_repo` rr ON rr.id=dd.repo_id "
"JOIN `doc_pin` dp ON dp.doc_id=dd.id AND dp.in_use "
"WHERE rr.id=%s AND dd.available=%s "
"AND dd.is_publish AND NOT dd.is_deleted; "
)
queryset = Doc.objects.raw(sql, [repo_id, DocAvailableChoices.PUBLIC])
serializer = self.get_serializer(queryset, many=True)
return Response(serializer.data)
class DocPublicView(GenericViewSet):
"""公共入口"""
queryset = Doc.objects.filter(
is_deleted=False, is_publish=True, available=DocAvailableChoices.PUBLIC
)
authentication_classes = [SessionAuthenticate]
def list(self, request, *args, **kwargs):
# 获取 公开或成员仓库 的 公开或自己的 文章
sql = (
"SELECT d.*, au.username creator_name, r.name repo_name "
"FROM `repo_repo` r "
"JOIN `repo_user` ru ON r.id=ru.repo_id AND ru.u_type!=%s "
"JOIN `doc_doc` d ON r.id=d.repo_id "
"JOIN `auth_user` au ON au.uid=d.creator "
"WHERE NOT r.is_deleted AND (ru.uid=%s OR r.r_type=%s) "
"AND (d.available = %s OR d.creator = %s) AND NOT d.`is_deleted` AND d.`is_publish` "
"GROUP BY d.id "
"ORDER BY d.id DESC;"
)
docs = Doc.objects.raw(
sql,
[
UserTypeChoices.VISITOR,
request.user.uid,
RepoTypeChoices.PUBLIC,
DocAvailableChoices.PUBLIC,
request.user.uid,
],
)
page = NumPagination()
queryset = page.paginate_queryset(docs, request, self)
serializer = DocListSerializer(queryset, many=True)
return page.get_paginated_response(serializer.data)
@action(detail=False, methods=["GET"])
def recent(self, request, *args, **kwargs):
"""热门文章"""
cache_key = f"{self.__class__.__name__}:{self.action}"
cache_data = cache.get(cache_key)
if cache_data is not None:
return Response(cache_data)
# 公开库的近期文章
public_repo_ids = Repo.objects.filter(
r_type=RepoTypeChoices.PUBLIC, is_deleted=False
).values("id")
queryset = self.queryset.filter(repo_id__in=public_repo_ids, pv__gt=0).order_by(
"-pv"
)[:10]
serializer = DocListSerializer(queryset, many=True)
cache.set(cache_key, serializer.data, 1800)
return Response(serializer.data)
@action(detail=False, methods=["GET"])
def hot_repo(self, request, *args, **kwargs):
"""热门库"""
cache_key = f"{self.__class__.__name__}:{self.action}"
cache_data = cache.get(cache_key)
if cache_data is not None:
return Response(cache_data)
sql = (
"SELECT rr.*, dd.repo_id, COUNT(1) 'count' "
"FROM `doc_doc` dd "
"JOIN (SELECT MIN(dd2.id) 'min_id' from `doc_doc` dd2 ORDER BY dd2.id DESC LIMIT 100) dd3 "
"JOIN `repo_repo` rr ON rr.id=dd.repo_id "
"WHERE dd.id>=dd3.min_id "
"GROUP BY dd.repo_id "
"ORDER BY count DESC "
"LIMIT 10"
)
repos = Repo.objects.raw(sql)
serializer = RepoSerializer(repos, many=True)
cache.set(cache_key, serializer.data, 1800)
return Response(serializer.data)
@action(detail=False, methods=["GET"])
def user_doc(self, request, *args, **kwargs):
"""用户发布文章"""
username = request.GET.get("username")
if not username:
raise ParamsNotFound(_("用户名不能为空"))
try:
user = USER_MODEL.objects.get(username=username)
except USER_MODEL.DoesNotExist:
raise UserNotExist()
# 共同或公开仓库 的 公开文章
union_repo_ids = RepoUser.objects.filter(
Q(uid=request.user.uid) & ~Q(u_type=UserTypeChoices.VISITOR)
).values("repo_id")
allowed_repo_ids = Repo.objects.filter(
Q(r_type=RepoTypeChoices.PUBLIC) | Q(id__in=union_repo_ids)
).values("id")
docs = self.queryset.filter(
creator=user.uid, repo_id__in=allowed_repo_ids
).order_by("-id")
page = NumPagination()
queryset = page.paginate_queryset(docs, request, self)
serializer = DocListSerializer(queryset, many=True)
return page.get_paginated_response(serializer.data)
@action(detail=False, methods=["GET"])
def recent_chart(self, request, *args, **kwargs):
"""文章发布图表数据"""
cache_key = f"{self.__class__.__name__}:{self.action}"
cache_data = cache.get(cache_key)
if cache_data is not None:
return Response(cache_data)
today = datetime.datetime.today()
last_day = today - datetime.timedelta(days=30)
sql = (
"SELECT dd.id, DATE_FORMAT(dd.update_at, \"%%m-%%d\") 'date', COUNT(1) 'count' "
"FROM `doc_doc` dd "
"WHERE dd.update_at>='{}' AND NOT dd.is_deleted AND dd.available = '{}' "
"GROUP BY DATE(dd.update_at); "
).format(last_day, DocAvailableChoices.PUBLIC)
docs_count = Doc.objects.raw(sql)
serializer = DocPublishChartSerializer(docs_count, many=True)
data = {item["date"]: item["count"] for item in serializer.data}
cache.set(cache_key, data, 1800)
return Response(data)
class SearchDocView(ThrottleAPIView):
"""搜索入口"""
throttle_classes = [
DocSearchThrottle,
]
def post(self, request, *args, **kwargs):
search_key = request.data.get("searchKey")
if not search_key:
raise ParamsNotFound(_("搜索关键字不能为空"))
# 公开或成员仓库 的 公开或个人文章
sql = (
"SELECT dd.*, au.username creator_name, rr.name repo_name "
"FROM `repo_repo` rr "
"JOIN `repo_user` ru ON ru.repo_id=rr.id AND ru.u_type!=%s "
"JOIN `doc_doc` dd ON rr.id = dd.repo_id "
"JOIN `auth_user` au ON au.uid = dd.creator "
"WHERE NOT rr.is_deleted AND (ru.uid = %s OR rr.r_type = %s) "
"AND NOT dd.is_deleted AND dd.is_publish AND (dd.available = %s OR dd.creator = %s) "
"AND (({}) OR ({})) "
"GROUP BY dd.id "
"ORDER BY dd.id DESC;"
)
# 处理 key
extend_title_sqls = []
extend_content_sqls = []
params_keys = []
for key in search_key:
if key:
extend_title_sqls.append(" dd.title like %s ")
extend_content_sqls.append(" dd.content like %s ")
params_keys.append(f"%{key}%")
extend_title_sql = "AND".join(extend_title_sqls)
extend_content_sql = "AND".join(extend_content_sqls)
sql = sql.format(extend_title_sql, extend_content_sql)
docs = Doc.objects.raw(
sql,
[
UserTypeChoices.VISITOR,
request.user.uid,
RepoTypeChoices.PUBLIC,
DocAvailableChoices.PUBLIC,
request.user.uid,
*params_keys,
*params_keys,
],
)
page = NumPagination()
queryset = page.paginate_queryset(docs, request, self)
serializer = DocListSerializer(queryset, many=True)
return page.get_paginated_response(serializer.data)
| 38.541761 | 103 | 0.602554 | 2,003 | 17,074 | 4.987019 | 0.142786 | 0.014416 | 0.034037 | 0.039944 | 0.465312 | 0.388828 | 0.342377 | 0.322655 | 0.289018 | 0.247672 | 0 | 0.003979 | 0.278786 | 17,074 | 442 | 104 | 38.628959 | 0.807211 | 0.016516 | 0 | 0.357333 | 0 | 0.010667 | 0.177158 | 0.015288 | 0 | 0 | 0 | 0 | 0 | 1 | 0.053333 | false | 0 | 0.069333 | 0.002667 | 0.226667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
3007e107fbb9661b8e7f7e4c1d4a01f5c735b272 | 21,175 | py | Python | python/cli/report_study.py | mediumroast/mr_sdk | 55c7a13c5cef73e677297026b41b7ec23855391f | [
"Apache-2.0"
] | 1 | 2021-10-06T02:46:48.000Z | 2021-10-06T02:46:48.000Z | python/cli/report_study.py | mediumroast/mr_sdk | 55c7a13c5cef73e677297026b41b7ec23855391f | [
"Apache-2.0"
] | 3 | 2021-10-16T03:34:07.000Z | 2022-02-23T05:10:12.000Z | python/cli/report_study.py | mediumroast/mr_sdk | 55c7a13c5cef73e677297026b41b7ec23855391f | [
"Apache-2.0"
] | null | null | null | #!/bin/env python3
import sys
import argparse
import configparser
import docx
from docx import Document
from docx.shared import Pt, Inches
from docx.enum.dml import MSO_THEME_COLOR_INDEX
from docx.enum.section import WD_ORIENT, WD_SECTION
from datetime import datetime
from mediumroast.api.high_level import Auth as authenticate
from mediumroast.api.high_level import Studies as study
from mediumroast.api.high_level import Interactions as interaction
### General utilities
def parse_cli_args(program_name='report_study', desc='A mediumroast.io utility that generates a Microsoft Word formatted report for a study.'):
parser = argparse.ArgumentParser(prog=program_name, description=desc)
parser.add_argument('--exclude_substudies', help="The names for the substudies to exclude in a comma separated list",
type=str, dest='exclude_substudies', default=None)
parser.add_argument('--rest_url', help="The URL of the target REST server",
type=str, dest='rest_url', default='http://mr-01:3000')
parser.add_argument('--guid', help="The GUID for the study to be reported on.",
type=str, dest='guid', required=True)
parser.add_argument('--org', help="The organization name for the report.",
type=str, dest='org', required=True)
parser.add_argument('--user', help="User name",
type=str, dest='user', default='foo')
parser.add_argument('--secret', help="Secret or password",
type=str, dest='secret', default='bar')
parser.add_argument('--config_file', help="The location to the configuration files",
type=str, dest='config_file', default='./reports.ini')
cli_args = parser.parse_args()
return cli_args
def read_config(conf_file='./reports.ini'):
c = configparser.ConfigParser()
c.read(conf_file)
return c
def get_interaction_name(guid):
"""Get the interaction name by the GUID
"""
interaction_ctl = interaction(credential)
return interaction_ctl.get_name_by_guid(guid)[1]['interactionName']
def _create_header(doc_obj, conf, font_size=7):
date_string = f'{datetime.now():%Y-%m-%d %H:%M}'
s = doc_obj.sections[0]
header = s.header
header_p = header.paragraphs[0]
header_p.text = conf['org'] + "\t | \t Created on: " + date_string
style = doc_obj.styles['Header']
font = style.font
font.name = conf['font']
font.size = Pt(font_size)
header_p.style = doc_obj.styles['Header']
def _create_footer(doc_obj, conf, font_size=7):
date_string = f'{datetime.now():%Y-%m-%d %H:%M}'
s = doc_obj.sections[0]
footer = s.footer
footer_p = footer.paragraphs[0]
footer_p.text = conf['confidentiality'] + "\t | \t" + conf['copyright']
style = doc_obj.styles['Footer']
font = style.font
font.name = conf['font']
font.size = Pt(font_size)
footer_p.style = doc_obj.styles['Footer']
def _create_cover_page(doc_obj, study, conf, logo_size=60, font_size=30):
# Generics
title_font_size = Pt(font_size) # Title Font Size
logo_size = Pt(font_size*2.5)
# Organization name and logo
logo = conf['logo']
logo_title = doc_obj.add_paragraph().add_run()
logo_title.add_picture(logo, height=logo_size)
# Define the Cover Title Style
org = conf['org'] # Organization
title = "\n\nTitle: " + study['studyName']
cover_title = doc_obj.add_paragraph(title)
style = doc_obj.styles['Title']
font = style.font
font.name = conf['font']
font.size = title_font_size
cover_title.style = doc_obj.styles['Title']
# Define the Subtitle content
subtitle = "A " + org + " study report enabling attributable market insights."
cover_subtitle = doc_obj.add_paragraph("")
s = cover_subtitle.add_run(subtitle)
subtitle_font = s.font
subtitle_font.bold = True
# Define the Author content
author = "Mediumroast Barrista Robot"
cover_author = doc_obj.add_paragraph("\nAuthor: ")
a = cover_author.add_run(author)
author_font = a.font
author_font.bold = True
# Define the Creation date content
creation_date = f'{datetime.now():%Y-%m-%d %H:%M}'
cover_date = doc_obj.add_paragraph("Creation Date: ")
d = cover_date.add_run(creation_date)
date_font = d.font
date_font.bold = True
# Add a page break
doc_obj.add_page_break()
def _create_summary(doc_obj, study_doc, conf):
# Create the Introduction section
section_title = doc_obj.add_paragraph(
'Findings') # Create the Findings section
section_title.style = doc_obj.styles['Title']
doc_obj.add_heading('Introduction')
clean_intro = " ".join(study_doc['Introduction'].split("\n"))
doc_obj.add_paragraph(clean_intro)
# Create the Opportunity section
doc_obj.add_heading('Opportunity')
clean_opportunity = " ".join(study_doc['Opportunity']['text'].split("\n"))
doc_obj.add_paragraph(clean_opportunity)
# Remove the text section before we process the numbered bullets
del(study_doc['Opportunity']['text'])
for opp in study_doc['Opportunity']:
clean_opp = " ".join(study_doc['Opportunity'][opp].split("\n"))
doc_obj.add_paragraph(clean_opp, style='List Bullet')
# Create the Action section
doc_obj.add_heading('Actions')
clean_action = " ".join(study_doc['Action']['text'].split("\n"))
doc_obj.add_paragraph(clean_action)
# Remove the text section before we process the numbered bullets
del(study_doc['Action']['text'])
for action in study_doc['Action']:
clean_act = " ".join(study_doc['Action'][action].split("\n"))
doc_obj.add_paragraph(clean_act, style='List Number')
# Add a page break
doc_obj.add_page_break()
def _add_hyperlink(paragraph, text, url):
"""Taken from https://stackoverflow.com/questions/47666642/adding-an-hyperlink-in-msword-by-using-python-docx
"""
# This gets access to the document.xml.rels file and gets a new relation id value
part = paragraph.part
r_id = part.relate_to(
url, docx.opc.constants.RELATIONSHIP_TYPE.HYPERLINK, is_external=True)
# Create the w:hyperlink tag and add needed values
hyperlink = docx.oxml.shared.OxmlElement('w:hyperlink')
hyperlink.set(docx.oxml.shared.qn('r:id'), r_id, )
# Create a w:r element and a new w:rPr element
new_run = docx.oxml.shared.OxmlElement('w:r')
rPr = docx.oxml.shared.OxmlElement('w:rPr')
# Join all the xml elements together add add the required text to the w:r element
new_run.append(rPr)
new_run.text = text
hyperlink.append(new_run)
# Create a new Run object and add the hyperlink into it
r = paragraph.add_run()
r._r.append(hyperlink)
# A workaround for the lack of a hyperlink style (doesn't go purple after using the link)
# Delete this if using a template that has the hyperlink style in it
r.font.color.theme_color = MSO_THEME_COLOR_INDEX.HYPERLINK
r.font.underline = True
return hyperlink
def _create_reference(interaction_guid, substudy, doc_obj, conf, char_limit=500):
interaction_ctl = interaction(credential)
success, interaction_data = interaction_ctl.get_by_guid(interaction_guid)
if success:
doc_obj.add_heading(interaction_data['interactionName'], 2)
my_time = str(interaction_data['time'][0:2]) + \
':' + str(interaction_data['time'][2:4])
my_date = str(interaction_data['date'][0:4]) + '-' + str(interaction_data['date'][4:6]) + '-' \
+ str(interaction_data['date'][6:8])
interaction_meta = "\t\t|\t".join(['Date: ' + my_date + "\t" + my_time,
'Sub-Study Identifier: ' + substudy])
doc_obj.add_paragraph(interaction_meta)
doc_obj.add_paragraph(
interaction_data['abstract'][0:char_limit] + '...')
resource = doc_obj.add_paragraph('Interaction Resource: ')
_add_hyperlink(
resource, interaction_data['interactionName'], interaction_data['url'].replace('s3', 'http'))
else:
print(
'Something went wrong obtaining the interaction data for [' + interaction_guid + ']')
def _create_references(doc_obj, substudy_list, conf):
section_title = doc_obj.add_paragraph(
'References') # Create the References section
section_title.style = doc_obj.styles['Title']
for substudy in substudy_list:
for interaction in substudy_list[substudy]['interactions']:
interaction_guid = substudy_list[substudy]['interactions'][interaction]['GUID']
_create_reference(interaction_guid, substudy, doc_obj, conf)
def _create_quote(doc_obj, quote, indent, font_size):
my_quote = quote
my_para = doc_obj.add_paragraph(style='List Bullet')
my_para.paragraph_format.left_indent = Pt(1.5 * indent)
my_bullet = my_para.add_run(my_quote)
my_bullet.font.size = Pt(font_size)
my_para.paragraph_format.space_after = Pt(3)
def _create_quotes(doc_obj, quotes, indent, font_size, location='quotes'):
for quote in quotes:
my_quote = quotes[quote][location]
my_para = doc_obj.add_paragraph(style='List Bullet')
my_para.paragraph_format.left_indent = Pt(1.5 * indent)
my_bullet = my_para.add_run(my_quote)
my_bullet.font.size = Pt(font_size)
my_para.paragraph_format.space_after = Pt(3)
def _create_subsection(doc_obj, start_text, body_text, indent, font_size, to_bold=False, to_italics=False):
para = doc_obj.add_paragraph()
para.paragraph_format.left_indent = Pt(indent)
start_run = para.add_run(start_text)
start_run.font.bold = to_bold
start_run.font.size = Pt(font_size)
body_run=para.add_run(body_text)
body_run.font.size = Pt(font_size)
if to_italics: body_run.font.italic = to_italics
def _create_intro(doc_obj, intro_name, intro_body, heading_level=2):
doc_obj.add_heading(intro_name, level=heading_level)
doc_obj.add_paragraph(intro_body)
def _create_key_theme(doc_obj, themes, quotes, conf, include_fortune=True):
### Define the summary theme
_create_intro(doc_obj,
'Summary Theme',
conf['themes']['summary_intro'].replace("\n", " "))
## Create the definition
theme = 'summary_theme'
_create_subsection(doc_obj,
'Definition: ',
themes[theme]['description'],
int(conf['themes']['indent']),
font_size = int(conf['themes']['font_size']),
to_bold = True)
## Determine if we should include the theme fortune or not
if include_fortune:
_create_subsection(doc_obj,
'Fortune: ',
themes[theme]['fortune'][0].upper() + themes[theme]['fortune'][1:] + ' [system generated]',
int(conf['themes']['indent']),
font_size = int(conf['themes']['font_size']),
to_bold = True)
## Create the tags
_create_subsection(doc_obj,
'Tags: ',
" | ".join(themes[theme]['tags'].keys()),
int(conf['themes']['indent']),
font_size = int(conf['themes']['font_size']),
to_bold = True,
to_italics = True)
## Create the quotes
subsection_name = 'Theme Quotes'
doc_obj.add_heading(subsection_name, level=3)
_create_quotes(doc_obj,
quotes['summary'],
int(conf['themes']['indent']),
font_size = int(conf['themes']['font_size']))
### Add the discrete/detailed themes
theme_loc = 'discrete_themes'
quotes_loc = 'discrete'
## Create the starting paragraph
_create_intro(doc_obj,
'Detailed Themes',
conf['themes']['discrete_intro'].replace("\n", " "))
## Add in the individual themes and their quotes
my_themes = themes[theme_loc]
for my_theme in my_themes:
# Put in the theme identifier
_create_intro(doc_obj,
'Detailed Theme Identifier: ' + my_theme,
conf['themes']['discrete_theme_intro'].replace("\n", " "),
heading_level=3)
# Add the description
_create_subsection(doc_obj,
'Definition: ',
my_themes[my_theme]['description'],
int(conf['themes']['indent']),
font_size = int(conf['themes']['font_size']),
to_bold = True)
# Include the fortune if the setting is true
if include_fortune:
_create_subsection(doc_obj,
'Fortune: ',
my_themes[my_theme]['fortune'][0].upper() + my_themes[my_theme]['fortune'][1:] + ' [system generated]',
int(conf['themes']['indent']),
font_size = int(conf['themes']['font_size']),
to_bold = True)
# Add the tags
_create_subsection(doc_obj,
'Tags: ',
" | ".join(my_themes[my_theme]['tags'].keys()),
int(conf['themes']['indent']),
font_size = int(conf['themes']['font_size']),
to_bold = True,
to_italics = True)
# Pull in the quotes
subsection_name = 'Theme Quotes by Interaction'
doc_obj.add_heading(subsection_name, level=4)
if my_theme in quotes[quotes_loc]:
for interaction in quotes[quotes_loc][my_theme]:
doc_obj.add_heading(get_interaction_name(interaction), level=5)
the_quotes = quotes[quotes_loc][my_theme][interaction]['quotes']
# Explain that the system was not able to find a relevant quote
if not the_quotes: the_quotes=[['mediumroast.io was unable to find a relevant quote or text snippet for this theme.']]
for my_quote in the_quotes:
_create_quote(doc_obj,
my_quote[0],
int(conf['themes']['indent']),
font_size = int(conf['themes']['font_size']))
_create_subsection(doc_obj,
'Frequency: ',
str(quotes[quotes_loc][my_theme][interaction]['frequency']),
int(conf['themes']['indent']),
font_size = int(conf['themes']['font_size']),
to_bold = True,
to_italics = True)
doc_obj.add_page_break()
def _create_key_themes(doc_obj, substudies, conf, substudy_excludes=list()):
section_title = doc_obj.add_paragraph(
'Key Themes by Sub-Study') # Create the Themes section
section_title.style = doc_obj.styles['Title']
doc_obj.add_paragraph(conf['themes']['intro'].replace("\n", " "))
for substudy in substudies:
if substudy in substudy_excludes:
continue
doc_obj.add_heading('Sub-Study Identifier: ' + substudy + ' — ' + substudies[substudy]['description'], 1)
_create_key_theme(
doc_obj, substudies[substudy]['keyThemes'], substudies[substudy]['keyThemeQuotes'], conf)
def change_orientation(doc_obj):
current_section = doc_obj.sections[-1]
new_width, new_height = current_section.page_height, current_section.page_width
new_section = doc_obj.add_section(WD_SECTION.NEW_PAGE)
new_section.orientation = WD_ORIENT.LANDSCAPE
new_section.page_width = new_width
new_section.page_height = new_height
return new_section
def _create_row(the_row, id, type,freq, src, snip):
ID = 0
TYPE = 1
FREQ = 2
SNIP = 4
SRC = 3
the_row[ID].text = str(id)
the_row[TYPE].text = str(type)
the_row[FREQ].text = str(freq)
the_row[SNIP].text = str(snip)
the_row[SRC].text = str(src)
def _create_rows():
"""
For summary
create single row
For discrete
foreach theme
create single row
"""
pass
def _create_summary_theme_tables(doc_obj, substudies, conf, substudy_excludes=list()):
change_orientation(doc_obj) # Flip to landscape mode
my_widths = [Inches(1.5), Inches(0.75), Inches(0.75), Inches(1.5), Inches(3.5)]
section_title = doc_obj.add_paragraph(
'Key Theme Summary Tables') # Create the References section
section_title.style = doc_obj.styles['Title']
for substudy in substudies:
if substudy in substudy_excludes:
continue
doc_obj.add_heading('Sub-Study Identifier: ' + substudy + ' — ' + substudies[substudy]['description'], 1)
my_table = doc_obj.add_table(rows=1, cols=5)
my_table.style = 'Colorful Grid'
header_row = my_table.rows[0].cells
header_row[0].text = 'Identifier'
header_row[1].text = 'Type'
header_row[2].text = 'Frequency'
header_row[3].text = 'Source'
header_row[4].text = 'Snippet'
my_row = my_table.add_row().cells
## Process the summary theme
my_theme = 'Summary Theme'
my_type = 'Summary'
my_frequency = 'N/A'
my_interaction = list(substudies[substudy]['keyThemeQuotes']['summary'].keys())[0]
my_snippet = substudies[substudy]['keyThemeQuotes']['summary'][my_interaction]['quotes'][0]
my_source = get_interaction_name(my_interaction)
_create_row(my_row, my_theme, my_type, my_frequency, my_source, my_snippet)
## Process the discrete themes
theme_loc = 'discrete_themes'
quotes_loc = 'discrete'
## Add in the individual themes and their quotes
my_themes = substudies[substudy]['keyThemes'][theme_loc]
my_quotes = substudies[substudy]['keyThemeQuotes'][quotes_loc]
my_type = 'Detailed'
for my_theme in my_themes:
if my_theme in my_quotes:
my_row = my_table.add_row().cells
my_interaction = list(my_quotes[my_theme].keys())[0]
my_source = get_interaction_name(my_interaction)
the_quotes = my_quotes[my_theme][my_interaction]['quotes']
# Explain that the system was not able to find a relevant quote
if not the_quotes: the_quotes=[['mediumroast.io was unable to find a relevant quote or text snippet for this theme.']]
my_snippet = the_quotes[0][0]
my_frequency = my_themes[my_theme]['frequency']
_create_row(my_row, my_theme, my_type, my_frequency, my_source, my_snippet)
doc_obj.add_page_break()
change_orientation(doc_obj) # Flip to portrait mode
def report(study, conf, substudy_excludes):
# Document generics
d = Document() # Create doc object
style = d.styles['Normal']
font = style.font
font.name = conf['font']
font.size = Pt(int(conf['font_size']))
_create_cover_page(d, study, conf) # Create the cover page
_create_header(d, conf) # Create the doc header
_create_footer(d, conf) # Create the doc footer
### Intro, opportunity and actions sections
_create_summary(d, study['document'], conf)
### Key Themes
## Key Themes Summary Table
_create_summary_theme_tables(d, study['substudies'], conf, substudy_excludes)
## Detailed Key Themes
_create_key_themes(d, study['substudies'], conf, substudy_excludes)
### References
_create_references(d, study['substudies'], conf)
return d
if __name__ == "__main__":
my_args = parse_cli_args()
configurator = read_config(conf_file=my_args.config_file)
my_org = my_args.org.upper()
# Set default items from the configuration file for the report
report_conf = {
'org': configurator[my_org]['organization_name'],
'logo': configurator[my_org]['logo_image'],
'font': configurator[my_org]['font_type'],
'font_size': configurator[my_org]['font_size'],
'font_measure': configurator[my_org]['font_measure'],
'copyright': configurator[my_org]['copyright_notice'],
'confidentiality': configurator[my_org]['confidential_notice'],
'themes': {
'font_size': configurator['THEME_FORMAT']['font_size'],
'intro': configurator['THEME_FORMAT']['key_theme_intro'],
'summary_intro': configurator['THEME_FORMAT']['summary_theme_intro'],
'discrete_intro': configurator['THEME_FORMAT']['discrete_themes_intro'],
'discrete_theme_intro': configurator['THEME_FORMAT']['discrete_theme_intro'],
'indent': configurator['THEME_FORMAT']['indent'],
}
}
auth_ctl = authenticate(
user_name=my_args.user, secret=my_args.secret, rest_server_url=my_args.rest_url)
credential = auth_ctl.login()
substudy_excludes = my_args.exclude_substudies.split(',') if my_args.exclude_substudies else list()
study_ctl = study(credential)
success, study_obj = study_ctl.get_by_guid(my_args.guid)
if success:
doc_name = study_obj['studyName'].replace(
' ', '_') + "_study_report.docx"
document = report(study_obj, report_conf, substudy_excludes)
document.save(doc_name)
else:
print('CLI ERROR: This is a generic error message, as something went wrong.')
sys.exit(-1)
| 39.653558 | 143 | 0.648501 | 2,754 | 21,175 | 4.736383 | 0.133624 | 0.037259 | 0.026219 | 0.030359 | 0.381631 | 0.312634 | 0.26104 | 0.231754 | 0.192426 | 0.189589 | 0 | 0.005774 | 0.231169 | 21,175 | 533 | 144 | 39.727955 | 0.795332 | 0.103566 | 0 | 0.28866 | 0 | 0 | 0.159712 | 0.004931 | 0 | 0 | 0 | 0 | 0 | 1 | 0.054124 | false | 0.005155 | 0.030928 | 0 | 0.100515 | 0.005155 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
300d26a47173ec68c9cbc913d71ba1fd8d873df4 | 17,388 | py | Python | hubcheck/browser/browser.py | codedsk/hubcheck | 2ff506eb56ba00f035300862f8848e4168452a17 | [
"MIT"
] | 1 | 2016-02-13T13:42:23.000Z | 2016-02-13T13:42:23.000Z | hubcheck/browser/browser.py | codedsk/hubcheck | 2ff506eb56ba00f035300862f8848e4168452a17 | [
"MIT"
] | null | null | null | hubcheck/browser/browser.py | codedsk/hubcheck | 2ff506eb56ba00f035300862f8848e4168452a17 | [
"MIT"
] | null | null | null | import pprint
import logging
import datetime
from selenium import webdriver
import hubcheck.conf
# block websites that make linkcheck slow
# these are usually blocked by the workspace firewall
# mozillalabs comes from using a nightly version of firefox browser
# many of the others are from login authentication sites
PROXY_BLACKLIST = [
"http(s)?://.*mozillalabs\\.com/?.*", # testpilot.mozillalabs.com
"http(s)?://.*google-analytics\\.com/.*", # ssl.google-analytics.com
'http(s)?://.*facebook\\.com/?.*', # www.facebook.com/login.php
'http(s)?://.*fbcdn\\.com/?.*', # www.facebook.com/login.php
'http(s)?://.*accounts\\.google\\.com/?.*', # accounts.google.com
'http(s)?://.*linkedin\\.com/?.*', # linkedin.com
'http(s)?://.*twitter\\.com/?.*', # api.twitter.com
# 'http(s)?://.*purdue\\.edu/apps/account/cas/?.*', # purdue cas
]
MIMETYPES = [
"appl/text", # .doc \
"application/acad", # .dwg \
"application/acrobat", # .pdf \
"application/autocad_dwg", # .dwg \
"application/doc", # .doc, .rtf \
"application/dwg", # .dwg \
"application/eps", # .eps \
"application/futuresplash", # .swf \
"application/gzip", # .gz \
"application/gzipped", # .gz \
"application/gzip-compressed", # .gz \
"application/jpg", # .jpg \
"application/ms-powerpoint", # .ppt \
"application/msexcel", # .xls \
"application/mspowerpnt", # .ppt \
"application/mspowerpoint", # .ppt \
"application/msword", # .doc, .rtf \
"application/octet-stream", # .gz, .zip \
"application/pdf", # .pdf \
"application/photoshop", # .psd \
"application/postscript", # .ps, .avi, .eps \
"application/powerpoint", # .ppt \
"application/psd", # .psd \
"application/rss+xml", # .rss \
"application/rtf", # .rtf \
"application/tar", # .tar \
"application/vnd.ms-excel", # .xls, .xlt, .xla \
"application/vnd.ms-excel.addin.macroEnabled.12", # .xlam \
"application/vnd.ms-excel.sheet.binary.macroEnabled.12", # .xlsb \
"application/vnd.ms-excel.sheet.macroEnabled.12", # .xlsm \
"application/vnd.ms-excel.template.macroEnabled.12", # .xltm \
"application/vnd.ms-powerpoint", # .pps, .ppt, .pot, .ppa \
"application/vnd.ms-powerpoint.addin.macroEnabled.12", # .ppam \
"application/vnd.ms-powerpoint.presentation.macroEnabled.12", # .pptm \
"application/vnd.ms-powerpoint.slideshow.macroEnabled.12", # .ppsm \
"application/vnd.ms-powerpoint.template.macroEnabled.12", # .potm \
"application/vnd.ms-word", # .doc \
"application/vnd.ms-word.document.macroEnabled.12", # .docm \
"application/vnd.ms-word.template.macroEnabled.12", # .dotm \
"application/vnd.msexcel", # .xls \
"application/vnd.mspowerpoint", # .ppt \
"application/vnd.msword", # .doc \
"application/vnd.openxmlformats-officedocument.presentationml.presentation", # .pptx \
"application/vnd.openxmlformats-officedocument.presentationml.template", # .potx \
"application/vnd.openxmlformats-officedocument.presentationml.slideshow", # .ppsx \
"application/vnd.openxmlformats-officedocument.spreadsheetml.sheet", # .xlsx \
"application/vnd.openxmlformats-officedocument.spreadsheetml.template", # .xltx \
"application/vnd.openxmlformats-officedocument.wordprocessingml.document", # .docx \
"application/vnd.openxmlformats-officedocument.wordprocessingml.template", # .dotx \
"application/vnd.pdf", # .pdf \
"application/vnd-mspowerpoint", # .ppt \
"application/winword", # .doc \
"application/word", # .doc \
"application/x-acad", # .dwg \
"application/x-apple-diskimage", # .dmg \
"application/x-autocad", # .dwg \
"application/x-bibtex", # .bib \
"application/x-compress", # .gz, .tar, .zip \
"application/x-compressed", # .gz, .tar, .zip \
"application/x-dos_ms_excel", # .xls \
"application/x-dwg", # .dwg \
"application/x-endnote-refer", # .enw \
"application/x-eps", # .eps \
"application/x-excel", # .xls \
"application/x-gtar", # .tar \
"application/x-gunzip", # .gz \
"application/x-gzip", # .gz \
"application/x-jpg", # .jpg \
"application/x-m", # .ppt \
"application/x-ms-excel", # .xls \
"application/x-msexcel", # .xls \
"application/x-mspublisher", # .pub \
"application/x-msw6", # .doc \
"application/x-msword", # .doc \
"application/x-ole-storage", # .msi \
"application/x-pdf", # .pdf \
"application/x-powerpoint", # .ppt \
"application/x-rtf", # .rtf \
"application/x-shockwave-flash", # .swf \
"application/x-shockwave-flash2-preview", # .swf \
"application/x-tar", # .tar \
"application/x-troff-msvideo", # .avi \
"application/x-soffice", # .rtf \
"application/x-xml", # .xml, .pub \
"application/x-zip", # .zip \
"application/x-zip-compressed", # .zip \
"application/xls", # .xls \
"application/xml", # .xml, .pub \
"application/zip", # .zip \
"audio/aiff", # .avi, .mov \
"audio/avi", # .avi \
"audio/mp3", # .mp3 \
"audio/mp4", # .mp4 \
"audio/mpg", # .mp3 \
"audio/mpeg", # .mp3 \
"audio/mpeg3", # .mp3 \
"audio/x-midi", # .mov \
"audio/x-mp3", # .mp3 \
"audio/x-mpg", # .mp3 \
"audio/x-mpeg", # .mp3 \
"audio/x-mpeg3", # .mp3 \
"audio/x-mpegaudio", # .mp3 \
"audio/x-wav", # .mov \
"drawing/dwg", # .dwg \
"gzip/document", # .gz \
"image/avi", # .avi \
"image/eps", # .eps \
"image/gi_", # .gif \
"image/gif", # .eps, .gif \
"image/jpeg", # .jpg, .jpeg \
"image/jpg", # .jpg \
"image/jp_", # .jpg \
"image/mpeg", # .mpeg \
"image/mov", # .mov \
"image/photoshop", # .psd \
"image/pipeg", # .jpg \
"image/pjpeg", # .jpg \
"image/png", # .png \
"image/psd", # .psd \
"image/vnd.dwg", # .dwg \
"image/vnd.rn-realflash", # .swf \
"image/vnd.swiftview-jpeg", # .jpg \
"image/x-eps", # .eps \
"image/x-dwg", # .dwg \
"image/x-photoshop", # .psd \
"image/x-xbitmap", # .gif, .jpg \
"multipart/x-tar", # .tar \
"multipart/x-zip", # .zip \
"octet-stream", # possibly some .ppt files \
"text/csv", # .csv \
"text/mspg-legacyinfo", # .msi \
"text/pdf", # .pdf \
"text/richtext", # .rtf \
"text/rtf", # .rtf \
"text/x-pdf", # .pdf \
"text/xml", # .xml, .rss \
"video/avi", # .avi, .mov \
"video/mp4v-es", # .mp4 \
"video/msvideo", # .avi \
"video/quicktime", # .mov \
"video/x-flv", # .flv \
"video/x-m4v", # .m4v \
"video/x-msvideo", # .avi \
"video/x-quicktime", # .mov \
"video/xmpg2", # .avi \
"zz-application/zz-winassoc-psd", # .psd \
]
class Browser(object):
"""hubcheck webdriver interface"""
def __init__(self, mimetypes=[], downloaddir='/tmp'):
self.logger = logging.getLogger(__name__)
self.logger.info("setting up a web browser")
self._browser = None
self.wait_time = 2
self.marker = 0
self.proxy_client = None
self.proxy_blacklist = PROXY_BLACKLIST
self.profile = None
self.downloaddir = downloaddir
self.mimetypes = mimetypes
def __del__(self):
self.close()
def setup_browser_preferences(self):
"""browser preferences should be setup by subclasses
"""
pass
def start_proxy_client(self):
# setup proxy if needed
if hubcheck.conf.settings.proxy is None:
self.logger.info("proxy not started, not starting client")
return
# start the client
self.proxy_client = hubcheck.conf.settings.proxy.create_client()
# setup the proxy website blacklist
if self.proxy_client is not None:
self.logger.info("setting up proxy blacklist")
for url_re in self.proxy_blacklist:
self.logger.debug("blacklisting %s" % url_re)
self.proxy_client.blacklist(url_re,200)
def stop_proxy_client(self):
if self.proxy_client is not None:
self.logger.info("stopping proxy client")
self.proxy_client.close()
self.proxy_client = None
def setup_browser_size_and_position(self):
# set the amount of time to wait for an element to appear on the page
self._browser.implicitly_wait(self.wait_time)
# place the browser window in the upper left corner of the screen
self._browser.set_window_position(0, 0)
# resize the window to just shy of our 1024x768 screen
self._browser.set_window_size(1070,700)
def launch(self):
"""subclass should add code required to launch the browser
"""
pass
def get(self,url):
if self._browser is None:
self.launch()
self.logger.debug("retrieving url: %s" % (url))
self._browser.get(url)
def close(self):
if self._browser is None:
return
self.logger.info("closing browser")
self._browser.quit()
self._browser = None
self.profile
self.stop_proxy_client()
def error_loading_page(self,har_entry):
"""
check if there was an error loading the web page
returns True or False
"""
harurl = har_entry['request']['url']
harstatus = har_entry['response']['status']
self.logger.debug("%s returned status %s" % (harurl,harstatus))
result = None
if (harstatus >= 100) and (harstatus <= 199):
# information codes
result = False
elif (harstatus >= 200) and (harstatus <= 299):
# success codes
result = False
elif (harstatus >= 300) and (harstatus <= 399):
# redirect codes
result = False
elif (harstatus >= 400) and (harstatus <= 499):
# client error codes
# client made an invalid request (bad links)
# page does not exist
result = True
elif (harstatus >= 500) and (harstatus <= 599):
# server error codes
# client made a valid request,
# but server failed while responsing.
result = True
else:
result = True
return result
def page_load_details(self,url=None,follow_redirects=True):
"""
return the har entry for the last page loaded
follow redirects to make sure you get the har entry
for the page that was eventually loaded.
A return value of None means no page was ever loaded.
"""
if not self.proxy_client:
return None
if url is None:
url = self._browser.current_url
self.logger.debug("processing har for %s" % (url))
har = self.proxy_client.har
self.logger.debug("har entry = %s" % (pprint.pformat(har)))
return_entry = None
for entry in har['log']['entries']:
harurl = entry['request']['url']
harstatus = entry['response']['status']
if url == None:
# we are following a redirect from below
return_entry = entry
elif url == harurl:
# the original url matches the url for this har entry exactly
return_entry = entry
elif (not url.endswith('/')) and (url+'/' == harurl):
# the original url almost matches the url for this har entry
return_entry = entry
if return_entry is not None:
if follow_redirects and (harstatus >= 300) and (harstatus <= 399):
# follow the redirect (should be the next har entry)
url = None
continue
else:
# found our match
break
self.logger.debug("har for url = %s" % (pprint.pformat(return_entry)))
return return_entry
def take_screenshot(self,filename=None):
"""
Take a screen shot of the browser, store it in filename.
"""
if self._browser is None:
return
if filename is None:
dts = datetime.datetime.today().strftime("%Y%m%d%H%M%S")
filename = 'hcss_%s.png' % dts
self.logger.debug("screenshot filename: %s" % (filename))
self._browser.save_screenshot(filename)
def next_marker(self):
self.marker += 1
return self.marker
| 45.046632 | 93 | 0.41868 | 1,428 | 17,388 | 5.037815 | 0.267507 | 0.055046 | 0.028913 | 0.040867 | 0.162775 | 0.034195 | 0.027245 | 0.019461 | 0.01112 | 0.01112 | 0 | 0.010867 | 0.470784 | 17,388 | 385 | 94 | 45.163636 | 0.770919 | 0.307223 | 0 | 0.093633 | 0 | 0 | 0.317729 | 0.180696 | 0 | 0 | 0 | 0 | 0 | 1 | 0.048689 | false | 0.007491 | 0.018727 | 0 | 0.097378 | 0.011236 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
300f199f66802964a7132356800aed7b1a0be7f9 | 7,057 | py | Python | appengine/gae_defer_manager/deferred_manager/tests.py | meedan/montage | 4da0116931edc9af91f226876330645837dc9bcc | [
"Apache-2.0"
] | 6 | 2018-07-31T16:48:07.000Z | 2020-02-01T03:17:51.000Z | appengine/gae_defer_manager/deferred_manager/tests.py | meedan/montage | 4da0116931edc9af91f226876330645837dc9bcc | [
"Apache-2.0"
] | 41 | 2018-08-07T16:43:07.000Z | 2020-06-05T18:54:50.000Z | appengine/gae_defer_manager/deferred_manager/tests.py | meedan/montage | 4da0116931edc9af91f226876330645837dc9bcc | [
"Apache-2.0"
] | 1 | 2018-08-07T16:40:18.000Z | 2018-08-07T16:40:18.000Z | # -*- coding: utf8 -*-
import datetime
import mock
import os
import unittest
import webapp2
from google.appengine.ext import testbed, deferred
from google.appengine.api import queueinfo
from . import models
from .handler import application
from .wrapper import defer
TESTCONFIG_DIR = os.path.join(
os.path.dirname(os.path.realpath(__file__)), "testconfig")
def noop(*args, **kwargs):
pass
def noop_fail(*args, **kwargs):
raise Exception
def noop_permanent_fail(*args, **kwargs):
raise deferred.PermanentTaskFailure
class Foo(object):
def bar(self): pass
def __call__(self): pass
class BaseTest(unittest.TestCase):
def setUp(self):
self.testbed = testbed.Testbed()
self.testbed.activate()
self.testbed.init_datastore_v3_stub()
self.testbed.init_taskqueue_stub(root_path=TESTCONFIG_DIR)
self.taskqueue_stub = self.testbed.get_stub(testbed.TASKQUEUE_SERVICE_NAME)
super(BaseTest, self).setUp()
def reload(self, obj):
return obj.get(obj.key())
class DeferTaskTests(BaseTest):
def test_creates_state(self):
task_state = defer(noop)
queue_state = models.QueueState.get_by_key_name("default")
self.assertTrue(queue_state)
self.assertEqual(task_state.parent().key(), queue_state.key())
def test_unique_task_ref(self):
unique_until = datetime.datetime.utcnow() + datetime.timedelta(days=1)
self.assertRaises(AssertionError, defer, noop, unique_until=unique_until)
self.assertTrue(defer(noop, task_reference="project1", unique_until=unique_until))
self.assertFalse(defer(noop, task_reference="project1", unique_until=unique_until))
def test_args_repr(self):
task_state = defer(noop, 2, u"bår")
self.assertEqual(task_state.deferred_args, u"(2, u'b\\xe5r')")
def test_kwargs_repr(self):
task_state = defer(noop, foo="bår", _bar="foo")
self.assertEqual(task_state.deferred_kwargs, u"{'foo': 'b\\xc3\\xa5r'}")
def test_class_method_repr(self):
task_state = defer(Foo().bar)
self.assertEqual(task_state.deferred_function, u"<class 'deferred_manager.tests.Foo'>.bar")
def test_module_func_repr(self):
task_state = defer(noop)
self.assertEqual(task_state.deferred_function, u"deferred_manager.tests.noop")
def test_builtin_func_repr(self):
task_state = defer(map)
self.assertEqual(task_state.deferred_function, u"map")
def test_callable_obj_func_repr(self):
task_state = defer(Foo)
self.assertEqual(task_state.deferred_function, u"deferred_manager.tests.Foo")
def test_builtin_method_repr(self):
task_state = defer(datetime.datetime.utcnow)
self.assertEqual(task_state.deferred_function, u"<type 'datetime.datetime'>.utcnow")
class ModelTaskTests(unittest.TestCase):
def test_queue_state(self):
queue_state = models.QueueState(name="default")
self.assertEqual(queue_state.retry_limit, 7)
self.assertEqual(queue_state.age_limit, 2*24*3600) # 2 days
class HandlerTests(BaseTest):
def make_request(self, path, task_name, queue_name, headers=None, environ=None, **kwargs):
request_headers = {
"X-AppEngine-TaskName": task_name,
"X-AppEngine-QueueName": queue_name,
'X-AppEngine-TaskExecutionCount': kwargs.pop('retries', 0)
}
if headers:
request_headers.update(headers)
request_environ = {
"SERVER_SOFTWARE": "Development"
}
if environ:
request_environ.update(environ)
return webapp2.Request.blank('/', environ=request_environ, headers=request_headers, **kwargs)
def test_success(self):
task_state = defer(noop)
noop_pickle = deferred.serialize(noop)
request = self.make_request("/", task_state.task_name, 'default', POST=noop_pickle)
response = request.get_response(application)
self.assertEqual(response.status_int, 200)
task_state = self.reload(task_state)
self.assertTrue(task_state.task_name)
self.assertTrue(task_state.is_complete)
self.assertFalse(task_state.is_running)
self.assertFalse(task_state.is_permanently_failed)
def test_failure(self):
task_state = defer(noop_fail)
noop_pickle = deferred.serialize(noop_fail)
request = self.make_request("/", task_state.task_name, 'default', POST=noop_pickle)
response = request.get_response(application)
self.assertEqual(response.status_int, 500)
task_state = self.reload(task_state)
self.assertFalse(task_state.is_complete)
self.assertFalse(task_state.is_running)
self.assertFalse(task_state.is_permanently_failed)
def test_retry_success(self):
task_state = defer(noop)
noop_pickle = deferred.serialize(noop)
request = self.make_request("/", task_state.task_name, 'default', POST=noop_pickle, retries=2)
response = request.get_response(application)
self.assertEqual(response.status_int, 200)
task_state = self.reload(task_state)
self.assertEqual(task_state.retry_count, 2)
self.assertTrue(task_state.is_complete)
self.assertFalse(task_state.is_running)
self.assertFalse(task_state.is_permanently_failed)
def test_retry_max_retries(self):
task_state = defer(noop_fail)
# give the task an old age. tasks must fail both the retry and age conditions (if specified)
task_state.first_run = datetime.datetime.utcnow() - datetime.timedelta(days=2)
task_state.put()
noop_pickle = deferred.serialize(noop_fail)
request = self.make_request("/", task_state.task_name, 'default', POST=noop_pickle, retries=8)
response = request.get_response(application)
self.assertEqual(response.status_int, 500)
task_state = self.reload(task_state)
self.assertEqual(task_state.retry_count, 8)
self.assertTrue(task_state.is_complete)
self.assertFalse(task_state.is_running)
self.assertTrue(task_state.is_permanently_failed)
def test_permanent_failure(self):
task_state = defer(noop_permanent_fail)
noop_pickle = deferred.serialize(noop_permanent_fail)
request = self.make_request("/", task_state.task_name, 'default', POST=noop_pickle)
response = request.get_response(application)
self.assertEqual(response.status_int, 200)
task_state = self.reload(task_state)
self.assertEqual(task_state.retry_count, 0)
self.assertTrue(task_state.is_complete)
self.assertFalse(task_state.is_running)
self.assertTrue(task_state.is_permanently_failed)
def test_no_task_state(self):
noop_pickle = deferred.serialize(noop)
request = self.make_request("/", 'task1', 'default', POST=noop_pickle)
response = request.get_response(application)
self.assertEqual(response.status_int, 200)
| 34.257282 | 102 | 0.69803 | 876 | 7,057 | 5.360731 | 0.186073 | 0.111158 | 0.035136 | 0.04983 | 0.585818 | 0.559412 | 0.465716 | 0.439523 | 0.439523 | 0.40609 | 0 | 0.008289 | 0.196542 | 7,057 | 205 | 103 | 34.42439 | 0.819929 | 0.016721 | 0 | 0.3125 | 0 | 0 | 0.053937 | 0.023652 | 0 | 0 | 0 | 0 | 0.270833 | 1 | 0.166667 | false | 0.020833 | 0.069444 | 0.006944 | 0.284722 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
30113171dc48ed74cadebf84f1f1fd11cb8f6566 | 4,023 | py | Python | BdBG.py | rongjiewang/BdBG | b4a8fab0fa083aecab10f15431e37b0445722007 | [
"MIT"
] | 2 | 2018-11-21T06:39:34.000Z | 2018-11-21T06:43:53.000Z | BdBG.py | rongjiewang/BdBG | b4a8fab0fa083aecab10f15431e37b0445722007 | [
"MIT"
] | null | null | null | BdBG.py | rongjiewang/BdBG | b4a8fab0fa083aecab10f15431e37b0445722007 | [
"MIT"
] | null | null | null | """MIT License
Copyright (c) 2018 rongjiewang
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE."""
import sys
import argparse
from bucket import encodeBucketClass, decodeBucketClass
from deBruijnGraph import encodeGraphClass, decodeGraphClass
def args_check(args):
if not args.encode and not args.decode:
sys.exit("you must give a -e or -d for encode/decode")
if not args.input and not args.paired:
sys.exit("you must give a file input with -i input for single end data or -p -1 input1 -2 input2 for paired-end data")
if not args.output:
sys.exit("you must give a file output with -o output")
return
def main(args):
args_check(args)
#encode
if args.encode:
en_bucket = encodeBucketClass(args.input, args.output, args.paired, \
args.input1, args.input2, args.kmer, args.lossless, args.verbose)
en_bucket.encode()
en_graph = encodeGraphClass(args.output, args.paired, args.kmer, \
args.verbose, en_bucket.sequenceTableSave)
del en_bucket
en_graph.encode()
del en_graph
sys.exit()
#decode
else:
de_bucket = decodeBucketClass(args.input, args.output, args.verbose)
de_bucket.decode()
de_graph = decodeGraphClass(args.input, args.output, de_bucket.paired, de_bucket.readNum,\
de_bucket.bucketIndexLen, de_bucket.lossless, de_bucket.verbose)
de_graph.loadBucktData(de_bucket.bucketIndex, de_bucket.bucketCov, de_bucket.readIndexPos,\
de_bucket.readrc, de_bucket.readN, de_bucket.readLen, de_bucket.readOrder)
del de_bucket
de_graph.decode()
del de_graph
sys.exit()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description = 'BdBG')
parser.add_argument("-e", "--encode",
help="encoding",action="store_true")
parser.add_argument("-d", "--decode",
help="decoding",action="store_true")
parser.add_argument("-i", "--input",type=str,
help="inputFile")
parser.add_argument("-o", "--output",
help="outputFile")
parser.add_argument("-p", "--paired",
help="paired-end flag",action="store_true")
parser.add_argument("-1", "--input1",
help="paired-end file1")
parser.add_argument("-2", "--input2",
help="paired-end file2")
parser.add_argument("-l", "--lossless",
help="keep the reads orders, default:false, \
if encode paired-end files, default:ture ",action="store_true")
parser.add_argument("-k", "--kmer",type=int, default=15,
help="kmer size for bucket and de Bruijn graph, default=15")
parser.add_argument("-v","--verbose", action="store_true",
help="verbose information")
args = parser.parse_args()
main(args)
| 36.243243 | 126 | 0.653492 | 517 | 4,023 | 4.984526 | 0.359768 | 0.046566 | 0.065968 | 0.032596 | 0.105937 | 0.074893 | 0.01785 | 0 | 0 | 0 | 0 | 0.006634 | 0.250559 | 4,023 | 110 | 127 | 36.572727 | 0.848093 | 0.268456 | 0 | 0.033333 | 0 | 0.016667 | 0.171731 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.033333 | false | 0 | 0.066667 | 0 | 0.116667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
30119e15a78b5e7aea8cf1c27d45b2140994ce7e | 10,820 | py | Python | web/lib/console.py | jonathanverner/brython-jinja2 | cec6e16de1750203a858d0acf590f230fc3bf848 | [
"BSD-3-Clause"
] | 2 | 2020-09-13T17:51:55.000Z | 2020-11-25T18:47:12.000Z | web/lib/console.py | jonathanverner/brython-jinja2 | cec6e16de1750203a858d0acf590f230fc3bf848 | [
"BSD-3-Clause"
] | 2 | 2020-11-25T19:18:15.000Z | 2021-06-01T21:48:12.000Z | web/lib/console.py | jonathanverner/brython-jinja2 | cec6e16de1750203a858d0acf590f230fc3bf848 | [
"BSD-3-Clause"
] | null | null | null | """
This module provides the interactive Python console.
"""
import sys
import traceback
from browser import window
class Console:
"""
A class providing a console widget. The constructor accepts
a domnode which should be a textarea and it takes it over
and turns it into a python interactive console.
"""
_credits = """ Thanks to CWI, CNRI, BeOpen.com, Zope Corporation and a cast of thousands
for supporting Python development. See www.python.org for more information.
"""
_copyright = """Copyright (c) 2012, Pierre Quentel pierre.quentel@gmail.com
All Rights Reserved.
Copyright (c) 2001-2013 Python Software Foundation.
All Rights Reserved.
Copyright (c) 2000 BeOpen.com.
All Rights Reserved.
Copyright (c) 1995-2001 Corporation for National Research Initiatives.
All Rights Reserved.
Copyright (c) 1991-1995 Stichting Mathematisch Centrum, Amsterdam.
All Rights Reserved.
"""
_license = """Copyright (c) 2012, Pierre Quentel pierre.quentel@gmail.com
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer. Redistributions in binary
form must reproduce the above copyright notice, this list of conditions and
the following disclaimer in the documentation and/or other materials provided
with the distribution.
Neither the name of the <ORGANIZATION> nor the names of its contributors may
be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
"""
def __init__(self, elem):
self._elem = elem
self.credits.__repr__ = lambda: Console._credits
self.copyright.__repr__ = lambda: Console._copyright
self.license.__repr__ = lambda: Console._license
self._redirected = False
self._oldstdout = None
self._oldstderr = None
self.history = []
self.current = 0
self._status = "main" # or "block" if typing inside a block
self.current_line = ""
# execution namespace
self.editor_ns = {
'credits': self.credits,
'copyright': self.copyright,
'license': self.license,
'__name__': '__console__',
}
self._elem.bind('keypress', self.my_key_press)
self._elem.bind('keydown', self.my_key_down)
self._elem.bind('click', self.cursor_to_end)
version = sys.implementation.version
self._elem.value = "Brython %s.%s.%s on %s %s\n%s\n>>> " % (version[0],
version[1],
version[2],
window.navigator.appName,
window.navigator.appVersion,
'Type "copyright()", "credits()" or "license()" for more information.')
self._elem.focus()
self.cursor_to_end()
def add_to_ns(self, key, value):
"""
Adds key to the console's local scope. Think:
```
key=value
```
"""
self.editor_ns[key] = value
def _redirect_out(self):
if self._redirected:
sys.__console__ = False
sys.stdout = self._oldstdout
sys.stderr = self._oldstderr
self._redirected = False
else:
sys.__console__ = True
self._oldstdout = sys.stdout
self._oldstderr = sys.stderr
sys.stdout = self
sys.stderr = self
self._redirected = True
def credits(self):
self.write(self._credits)
def copyright(self):
self.write(self._copyright)
def license(self):
self.write(self._license)
def write(self, data):
self._elem.value += str(data)
def cursor_to_end(self, *_args):
pos = len(self._elem.value)
self._elem.setSelectionRange(pos, pos)
self._elem.scrollTop = self._elem.scrollHeight
def get_col(self, _area):
"""
returns the column position of the cursor
"""
sel = self._elem.selectionStart
lines = self._elem.value.split('\n')
for line in lines[:-1]:
sel -= len(line) + 1
return sel
def my_key_press(self, event):
if event.keyCode == 9: # tab key
event.preventDefault()
self._elem.value += " "
elif event.keyCode == 13: # return
src = self._elem.value
if self._status == "main":
self.current_line = src[src.rfind('>>>') + 4:]
elif self._status == "3string":
self.current_line = src[src.rfind('>>>') + 4:]
self.current_line = self.current_line.replace('\n... ', '\n')
else:
self.current_line = src[src.rfind('...') + 4:]
if self._status == 'main' and not self.current_line.strip():
self._elem.value += '\n>>> '
event.preventDefault()
return
self._elem.value += '\n'
self.history.append(self.current_line)
self.current = len(self.history)
if self._status == "main" or self._status == "3string":
try:
self._redirect_out()
_ = self.editor_ns['_'] = eval(self.current_line, self.editor_ns)
if _ is not None:
self.write(repr(_) + '\n')
self._elem.value += '>>> '
self._status = "main"
except IndentationError:
self._elem.value += '... '
self._status = "block"
except SyntaxError as msg:
if str(msg) == 'invalid syntax : triple string end not found' or \
str(msg).startswith('Unbalanced bracket'):
self._elem.value += '... '
self._status = "3string"
elif str(msg) == 'eval() argument must be an expression':
try:
self._redirect_out()
exec(self.current_line, self.editor_ns)
except:
# pylint: disable=bare-except; any exception can happen here
traceback.print_exc(self)
finally:
self._redirect_out()
self._elem.value += '>>> '
self._status = "main"
elif str(msg) == 'decorator expects function':
self._elem.value += '... '
self._status = "block"
else:
traceback.print_exc(self)
self._elem.value += '>>> '
self._status = "main"
# pylint: disable=bare-except; any exception can happen here
except:
traceback.print_exc(self)
self._elem.value += '>>> '
self._status = "main"
finally:
self._redirect_out()
elif self.current_line == "": # end of block
block = src[src.rfind('>>>') + 4:].splitlines()
block = [block[0]] + [b[4:] for b in block[1:]]
block_src = '\n'.join(block)
# status must be set before executing code in globals()
self._status = "main"
try:
self._redirect_out()
_ = exec(block_src, self.editor_ns)
if _ is not None:
print(repr(_))
# pylint: disable=bare-except; any exception can happen here
except:
traceback.print_exc(self)
finally:
self._redirect_out()
self._elem.value += '>>> '
else:
self._elem.value += '... '
self.cursor_to_end()
event.preventDefault()
def my_key_down(self, event):
if event.keyCode == 37: # left arrow
sel = self.get_col(self._elem)
if sel < 5:
event.preventDefault()
event.stopPropagation()
elif event.keyCode == 36: # line start
pos = self._elem.selectionStart
col = self.get_col(self._elem)
self._elem.setSelectionRange(pos - col + 4, pos - col + 4)
event.preventDefault()
elif event.keyCode == 38: # up
if self.current > 0:
pos = self._elem.selectionStart
col = self.get_col(self._elem)
# remove self.current line
self._elem.value = self._elem.value[:pos - col + 4]
self.current -= 1
self._elem.value += self.history[self.current]
event.preventDefault()
elif event.keyCode == 40: # down
if self.current < len(self.history) - 1:
pos = self._elem.selectionStart
col = self.get_col(self._elem)
# remove self.current line
self._elem.value = self._elem.value[:pos - col + 4]
self.current += 1
self._elem.value += self.history[self.current]
event.preventDefault()
elif event.keyCode == 8: # backspace
src = self._elem.value
lstart = src.rfind('\n')
if (lstart == -1 and len(src) < 5) or (len(src) - lstart < 6):
event.preventDefault()
event.stopPropagation()
| 39.926199 | 139 | 0.539464 | 1,154 | 10,820 | 4.906412 | 0.279029 | 0.059343 | 0.055104 | 0.039032 | 0.304839 | 0.237725 | 0.206111 | 0.183681 | 0.183681 | 0.175203 | 0 | 0.011079 | 0.365989 | 10,820 | 270 | 140 | 40.074074 | 0.814286 | 0.067837 | 0 | 0.361111 | 0 | 0 | 0.249673 | 0.004832 | 0 | 0 | 0 | 0 | 0 | 1 | 0.050926 | false | 0 | 0.013889 | 0 | 0.092593 | 0.023148 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
30124f8335d73ee7802841f7737a00cbfad26c9f | 1,333 | py | Python | lldb/test/API/functionalities/breakpoint/breakpoint_on_overload/TestBreakOnOverload.py | LaudateCorpus1/llvm-project | ff2e0f0c1112558b3f30d8afec7c9882c33c79e3 | [
"Apache-2.0"
] | null | null | null | lldb/test/API/functionalities/breakpoint/breakpoint_on_overload/TestBreakOnOverload.py | LaudateCorpus1/llvm-project | ff2e0f0c1112558b3f30d8afec7c9882c33c79e3 | [
"Apache-2.0"
] | null | null | null | lldb/test/API/functionalities/breakpoint/breakpoint_on_overload/TestBreakOnOverload.py | LaudateCorpus1/llvm-project | ff2e0f0c1112558b3f30d8afec7c9882c33c79e3 | [
"Apache-2.0"
] | null | null | null | """
Test setting a breakpoint on an overloaded function by name.
"""
import re
import lldb
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
class TestBreakpointOnOverload(TestBase):
mydir = TestBase.compute_mydir(__file__)
def check_breakpoint(self, name):
bkpt = self.target.BreakpointCreateByName(name)
self.assertEqual(bkpt.num_locations, 1, "Got one location")
addr = bkpt.locations[0].GetAddress()
self.assertTrue(addr.function.IsValid(), "Got a real function")
# On Window, the name of the function includes the return value.
# We still succeed in setting the breakpoint, but the resultant
# name is not the same.
# So just look for the name we used for the breakpoint in the
# function name, rather than doing an equality check.
self.assertIn(name, addr.function.name, "Got the right name")
def test_break_on_overload(self):
self.build()
self.target = lldbutil.run_to_breakpoint_make_target(self)
self.check_breakpoint("a_function(int)")
self.check_breakpoint("a_function(double)")
self.check_breakpoint("a_function(int, double)")
self.check_breakpoint("a_function(double, int)")
| 35.078947 | 72 | 0.685671 | 170 | 1,333 | 5.247059 | 0.452941 | 0.084081 | 0.085202 | 0.089686 | 0.152466 | 0.152466 | 0 | 0 | 0 | 0 | 0 | 0.001942 | 0.227307 | 1,333 | 37 | 73 | 36.027027 | 0.864078 | 0.24006 | 0 | 0 | 0 | 0 | 0.131868 | 0 | 0 | 0 | 0 | 0 | 0.15 | 1 | 0.1 | false | 0 | 0.25 | 0 | 0.45 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
30142c5188e376313f7d79178393a7007c7faa25 | 2,611 | py | Python | Intermedio/28 Pomodoro/main.py | YosafatM/100-days-of-Python | e81ab663b7aacb7a904f27a4e6774837cf3594a1 | [
"MIT"
] | null | null | null | Intermedio/28 Pomodoro/main.py | YosafatM/100-days-of-Python | e81ab663b7aacb7a904f27a4e6774837cf3594a1 | [
"MIT"
] | null | null | null | Intermedio/28 Pomodoro/main.py | YosafatM/100-days-of-Python | e81ab663b7aacb7a904f27a4e6774837cf3594a1 | [
"MIT"
] | null | null | null | from tkinter import *
# ---------------------------- CONSTANTS ------------------------------- #
PINK = "#e2979c"
RED = "#e7305b"
GREEN = "#9bdeac"
YELLOW = "#f7f5dd"
FONT_NAME = "Courier"
WORK_MIN = 25
SHORT_BREAK_MIN = 5
LONG_BREAK_MIN = 20
is_counting = False
reps = 0
timer = None
# ---------------------------- TIMER RESET ------------------------------- #
def reset_timer():
global timer, reps, is_counting
if timer is not None:
window.after_cancel(timer)
lb_title.config(text="Timer", fg=GREEN)
canvas.itemconfig(count_text, text="00:00")
lb_checks["text"] = ""
timer = None
reps = 0
is_counting = False
# ---------------------------- TIMER MECHANISM ------------------------------- #
def start_timer():
global is_counting, reps
if is_counting:
pass
reps += 1
if reps % 8 == 0:
lb_title.config(text="Break", fg=RED)
minutes = LONG_BREAK_MIN
elif reps % 2 == 0:
lb_title.config(text="Break", fg=PINK)
minutes = SHORT_BREAK_MIN
else:
lb_title.config(text="Work", fg=GREEN)
minutes = WORK_MIN
is_counting = True
count_down(minutes * 60)
# ---------------------------- COUNTDOWN MECHANISM ------------------------------- #
def count_down(count):
global reps
minutes = count // 60
seconds = count % 60
seconds = f"0{seconds}" if seconds < 10 else seconds
canvas.itemconfig(count_text, text=f"{minutes}:{seconds}")
if count > 0:
global timer
timer = window.after(1000, count_down, count - 1)
elif reps % 2 == 1:
global is_counting
is_counting = False
lb_checks["text"] += "✅"
start_timer() # Break
# ---------------------------- UI SETUP ------------------------------- #
window = Tk()
window.title("Pomodoro")
window.config(padx=100, pady=50, bg=YELLOW)
canvas = Canvas(width=200, height=224, bg=YELLOW, highlightthickness=0)
image = PhotoImage(file="tomato.png")
canvas.create_image(100, 112, image=image)
count_text = canvas.create_text(100, 130, text="00:00", fill="white", font=(FONT_NAME, 35, "bold"))
bt_start = Button(text="Start", highlightthickness=0, command=start_timer)
bt_reset = Button(text="Reset", highlightthickness=0, command=reset_timer)
lb_checks = Label(text="", fg=GREEN, bg=YELLOW)
lb_title = Label(text="Timer", fg=GREEN, bg=YELLOW, font=(FONT_NAME, 30, "bold"))
lb_title.grid(column=1, row=0)
canvas.grid(column=1, row=1)
bt_start.grid(column=0, row=2)
bt_reset.grid(column=2, row=2)
lb_checks.grid(column=1, row=3)
window.mainloop()
| 27.484211 | 99 | 0.573727 | 332 | 2,611 | 4.36747 | 0.307229 | 0.055172 | 0.035862 | 0.046897 | 0.074483 | 0.034483 | 0.034483 | 0 | 0 | 0 | 0 | 0.041805 | 0.193795 | 2,611 | 94 | 100 | 27.776596 | 0.646556 | 0.146304 | 0 | 0.101449 | 0 | 0 | 0.066787 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.043478 | false | 0.014493 | 0.014493 | 0 | 0.057971 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
3017c55b6cc7146b6404407438f7cdac4217ef3c | 2,529 | py | Python | dtcwt/plotting.py | santosh653/dtcwt | 01d9e87dc9abfa244a89c1f05aebf3dec6999f3a | [
"BSD-2-Clause"
] | 61 | 2015-01-04T09:21:29.000Z | 2022-03-07T16:25:02.000Z | dtcwt/plotting.py | santosh653/dtcwt | 01d9e87dc9abfa244a89c1f05aebf3dec6999f3a | [
"BSD-2-Clause"
] | 17 | 2015-04-02T13:37:07.000Z | 2018-03-07T09:57:57.000Z | dtcwt/plotting.py | santosh653/dtcwt | 01d9e87dc9abfa244a89c1f05aebf3dec6999f3a | [
"BSD-2-Clause"
] | 26 | 2015-04-16T06:22:16.000Z | 2021-12-07T09:17:44.000Z | """
Convenience functions for plotting DTCWT-related objects.
"""
from __future__ import absolute_import
import numpy as np
from matplotlib.pyplot import *
__all__ = (
'overlay_quiver',
)
def overlay_quiver(image, vectorField, level, offset):
"""Overlays nicely coloured quiver plot of complex coefficients over original full-size image,
providing a useful phase visualisation.
:param image: array holding grayscale values on the interval [0, 255] to display
:param vectorField: a single [MxNx6] numpy array of DTCWT coefficients
:param level: the transform level (1-indexed) of *vectorField*.
:param offset: Offset for DTCWT coefficients (typically 0.5)
.. note::
The *level* parameter is 1-indexed meaning that the third level has
index "3". This is unusual in Python but is kept for compatibility
with similar MATLAB routines.
Should also work with other types of complex arrays (e.g., SLP
coefficients), as long as the format is the same.
Usage example:
.. plot::
:include-source: true
import dtcwt
import dtcwt.plotting as plotting
mandrill = datasets.mandrill()
transform2d = dtcwt.Transform2d()
mandrill_t = transform2d.forward(mandrill, nlevels=5)
plotting.overlay_quiver(mandrill*255, mandrill_t.highpasses[-1], 5, 0.5)
.. codeauthor:: R. Anderson, 2005 (MATLAB)
.. codeauthor:: S. C. Forshaw, 2014 (Python)
"""
# Make sure imshow() uses the full range of greyscale values
imshow(image, cmap=cm.gray, clim=(0,255))
hold(True)
# Set up the grid for the quiver plot
g1 = np.kron(np.arange(0, vectorField[:,:,0].shape[0]).T, np.ones((1,vectorField[:,:,0].shape[1])))
g2 = np.kron(np.ones((vectorField[:,:,0].shape[0], 1)), np.arange(0, vectorField[:,:,0].shape[1]))
# Choose a coloUrmap
cmap = cm.spectral
scalefactor = np.max(np.max(np.max(np.max(np.abs(vectorField)))))
vectorField[-1,-1,:] = scalefactor
for sb in range(0, vectorField.shape[2]):
hold(True)
thiscolour = cmap(sb / float(vectorField.shape[2])) # Select colour for this subband
hq = quiver(g2*(2**level) + offset*(2**level), g1*(2**level) + offset*(2**level), np.real(vectorField[:,:,sb]), \
np.imag(vectorField[:,:,sb]), color=thiscolour, scale=scalefactor*2**level)
quiverkey(hq, 1.05, 1.00-0.035*sb, 0, "subband " + np.str(sb), coordinates='axes', color=thiscolour, labelcolor=thiscolour, labelpos='E')
hold(False)
return hq
| 34.643836 | 145 | 0.670621 | 349 | 2,529 | 4.819484 | 0.458453 | 0.017836 | 0.040428 | 0.017836 | 0.065398 | 0.043995 | 0.01308 | 0 | 0 | 0 | 0 | 0.034432 | 0.196125 | 2,529 | 72 | 146 | 35.125 | 0.792917 | 0.514433 | 0 | 0.090909 | 0 | 0 | 0.02415 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.045455 | false | 0 | 0.136364 | 0 | 0.227273 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
301a3402ce430cb702ae2f205a2ed74937b55dc4 | 2,481 | py | Python | graphdata/loglog.py | whalenpt/graphdata | d169150f860551d2049342ecf310dc1783987266 | [
"MIT"
] | null | null | null | graphdata/loglog.py | whalenpt/graphdata | d169150f860551d2049342ecf310dc1783987266 | [
"MIT"
] | null | null | null | graphdata/loglog.py | whalenpt/graphdata | d169150f860551d2049342ecf310dc1783987266 | [
"MIT"
] | null | null | null |
from graphdata.shared.shared1D import AuxPlotLabelLL1D
from graphdata.shared.shared1D import ProcessData1D
from graphdata.shared.shared1D import LoadData1D
from graphdata.shared.figsizes import LogLogSize
from graphdata.shared.shared import ExtendDictionary
from graphdata.shared.shared import ProcessComplex
from graphdata import plt
from graphdata import np
from graphdata import configs
def loglog(filename,figsize=None,decades=None,xlim=None,ylim=None,\
complex_op=None,overwrite=False,**kwargs):
"""
Loglog graph of 1D data file using Matplotlib plt.loglog
INPUTS:
filename: string
name of file containing 1D data to be plotted
figsize: tuple (width,height)
size of figure to be displayed
xlim: np.array
x-axis limits of graph
ylim: np.array
x-axis limits of graph
decades: int
number of decades of data below maximum to plot
overwrite: bool
add lines to an existing plt.semilogy graph if it exists
(default is False which will create graph on a new figure)
**kwargs: dictionary
(optional) arguments to be passed onto plt.loglog plot
OUTPUTS:
ax : matplotlib.axes.Axes
Matplotlib axes object, allows for setting limits and other manipulation of the axes
(e.g. ax.set_xlim([0,1]) would set the graph x-limits to be between 0 and 1)
"""
x,y,auxDict = LoadData1D(filename)
if complex_op is not None:
y = ProcessComplex(complex_op,y)
if decades is None:
decades = configs._G['decades']
if xlim is None:
xlim = [x[0],x[-1]]
if ylim is None:
ylim = [np.min(y),np.max(y)]
figsize = LogLogSize(figsize)
ExtendDictionary(auxDict,figsize=figsize,decades=decades,\
xlim=xlim,ylim=ylim,overwrite=overwrite)
x,y,auxDict = ProcessData1D(x,y,auxDict)
figsize = LogLogSize(figsize)
if overwrite:
labs = plt.get_figlabels()
if "LogLog" not in labs:
configs.defaultLS()
else:
configs.toggleLS()
plt.figure("LogLog",figsize=figsize)
else:
configs.defaultLS()
plt.figure(figsize=figsize)
fig = plt.loglog(x,y,configs.LS,**kwargs)
plt.grid(True)
AuxPlotLabelLL1D(auxDict)
if xlim:
plt.xlim(xlim)
if ylim:
plt.ylim(ylim)
plt.ion()
plt.show()
return fig
| 29.535714 | 96 | 0.646514 | 322 | 2,481 | 4.962733 | 0.360248 | 0.073217 | 0.071339 | 0.050688 | 0.13204 | 0.031289 | 0.031289 | 0 | 0 | 0 | 0 | 0.009424 | 0.272874 | 2,481 | 83 | 97 | 29.891566 | 0.876386 | 0.341798 | 0 | 0.133333 | 0 | 0 | 0.012402 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.022222 | false | 0 | 0.2 | 0 | 0.244444 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
301d726bb49a99005fdbccd8050406dd9847256c | 5,387 | py | Python | integration-testing/tests/suites/test_premium_account.py | pwei1018/bcrs-testing | 318845dede6ce5994b74b976d01f36a503036551 | [
"Apache-2.0"
] | 2 | 2020-10-23T22:08:34.000Z | 2021-10-19T19:37:21.000Z | integration-testing/tests/suites/test_premium_account.py | pwei1018/bcrs-testing | 318845dede6ce5994b74b976d01f36a503036551 | [
"Apache-2.0"
] | 6 | 2020-09-29T23:05:34.000Z | 2022-01-29T20:59:08.000Z | integration-testing/tests/suites/test_premium_account.py | pwei1018/bcrs-testing | 318845dede6ce5994b74b976d01f36a503036551 | [
"Apache-2.0"
] | 10 | 2020-09-29T23:05:46.000Z | 2021-11-29T23:07:10.000Z | import datetime
import json
import requests
import pytest
import random
from tests.suites.test_payment import TestPayment
from tests.utilities.settings import get_settings, get_test_data, setup_access_data
@pytest.mark.incremental
@pytest.mark.parametrize('login_session', setup_access_data('PREMIUM', ['BCSC']), indirect=True, scope='class')
@pytest.mark.usefixtures('setup_data')
class TestPremiumAccount:
__test__ = True
def test_get_user_profile(self, testing_config, logger):
"""Test get user profile. After login, the user should be created in db."""
response = requests.get(f'{testing_config.auth_api_url}/users/@me',
headers={'Authorization': f'Bearer {testing_config.keycloak_token}'})
assert response.status_code == 200
response_json = response.json()
testing_config.user_id = response_json.get('keycloakGuid')
def test_get_last_terms(self, testing_config, logger):
"""Get last version of termofuse."""
response = requests.get(f'{testing_config.auth_api_url}/documents/termsofuse',
headers={'Authorization': f'Bearer {testing_config.keycloak_token}'})
assert response.status_code == 200
response_json = response.json()
testing_config.terms_version = response_json.get('versionId')
def test_accept_terms(self, testing_config, logger):
"""Test accept termofuser."""
input_data = json.dumps({'termsversion': testing_config.terms_version, 'istermsaccepted': True})
response = requests.patch(f'{testing_config.auth_api_url}/users/@me',
headers={'Authorization': f'Bearer {testing_config.keycloak_token}',
'Content-Type': 'application/json'},
data=input_data)
assert response.status_code == 200
def test_get_user_profile(self, testing_config, logger):
"""Test get user profile."""
response = requests.get(f'{testing_config.auth_api_url}/users/@me',
headers={'Authorization': f'Bearer {testing_config.keycloak_token}'})
assert response.status_code == 200
response_json = response.json()
testing_config.user_id = response_json.get('keycloakGuid')
@pytest.mark.skip_login_as('bcsc_member')
def test_link_bcol_account(self, testing_config, logger):
"""Test link bcol account."""
load_data = random.sample(get_settings().BCOL_USERS, 1)[0]
input_data = json.dumps({
'userId': load_data.username,
'password': load_data.password
})
response = requests.post(f'{testing_config.auth_api_url}/bcol-profiles',
headers={'Authorization': f'Bearer {testing_config.keycloak_token}',
'Content-Type': 'application/json'},
data=input_data)
assert response.status_code == 200
response_json = response.json()
@pytest.mark.skip_login_as('bcsc_member')
def test_create_account(self, testing_config, logger):
"""Test create account."""
input_data = json.dumps(get_test_data(testing_config.test_data['org']))
response = requests.post(f'{testing_config.auth_api_url}/orgs',
headers={'Authorization': f'Bearer {testing_config.keycloak_token}',
'Content-Type': 'application/json'},
data=input_data)
assert response.status_code == 201
response_json = response.json()
testing_config.org_id = response_json.get('id')
def test_create_user_profile(self, testing_config, logger):
"""Test create user profile (contact information)."""
input_data = json.dumps(get_test_data(testing_config.test_data['user_profile']))
response = requests.post(f'{testing_config.auth_api_url}/users/contacts',
headers={'Authorization': f'Bearer {testing_config.keycloak_token}',
'Content-Type': 'application/json'},
data=input_data)
assert response.status_code == 201
def test_get_account(self, testing_config, logger):
"""Test get account."""
response = requests.get(f'{testing_config.auth_api_url}/orgs/{testing_config.org_id}',
headers={'Authorization': f'Bearer {testing_config.keycloak_token}'})
assert response.status_code == 200
def test_get_user_settings(self, testing_config, logger):
"""Test get user settings."""
response = requests.get(f'{testing_config.auth_api_url}/users/{testing_config.user_id}/settings',
headers={'Authorization': f'Bearer {testing_config.keycloak_token}'})
assert response.status_code == 200
def test_get_user_notifications(self, testing_config, logger):
"""Test get user notifications."""
response = requests.get(f'{testing_config.auth_api_url}/users/{testing_config.user_id}/org/{testing_config.org_id}/notifications',
headers={'Authorization': f'Bearer {testing_config.keycloak_token}'})
assert response.status_code == 200
| 51.304762 | 138 | 0.634305 | 599 | 5,387 | 5.427379 | 0.173623 | 0.16395 | 0.052292 | 0.070747 | 0.707782 | 0.689326 | 0.641956 | 0.605352 | 0.605352 | 0.507536 | 0 | 0.007954 | 0.253202 | 5,387 | 104 | 139 | 51.798077 | 0.800149 | 0.057732 | 0 | 0.518987 | 0 | 0 | 0.256915 | 0.164577 | 0 | 0 | 0 | 0 | 0.126582 | 1 | 0.126582 | false | 0.012658 | 0.088608 | 0 | 0.240506 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
30220c4e2730501b7f29fa25506bdd0fdf76716c | 16,677 | py | Python | ehr_ml/clmbr/__init__.py | som-shahlab/ehr_ml | 4f83ac5b882916a175f0d242b38d914d00bf8a7c | [
"MIT"
] | 4 | 2021-03-12T21:41:37.000Z | 2021-06-25T16:49:52.000Z | ehr_ml/clmbr/__init__.py | som-shahlab/ehr_ml | 4f83ac5b882916a175f0d242b38d914d00bf8a7c | [
"MIT"
] | 22 | 2020-11-19T00:04:27.000Z | 2022-03-02T18:16:08.000Z | ehr_ml/clmbr/__init__.py | som-shahlab/ehr_ml | 4f83ac5b882916a175f0d242b38d914d00bf8a7c | [
"MIT"
] | 2 | 2021-05-12T13:11:46.000Z | 2021-10-15T18:30:14.000Z | from __future__ import annotations
import argparse
import pickle
import numpy as np
import json
import logging
import math
import glob
import random
import os
import sys
import datetime
import time
from functools import partial
from pathlib import Path
from collections import defaultdict
from shutil import copyfile
from tqdm import tqdm
import sklearn.model_selection
import sklearn.metrics
import torch
from ..extension.clmbr import *
from .. import timeline
from .. import ontology
from .. import labeler
from .dataset import DataLoader, convert_patient_data
from .prediction_model import CLMBR
from .trainer import Trainer
from .utils import read_config, read_info, device_from_config
from ..featurizer import ColumnValue, Featurizer
from ..splits import read_time_split
from ..utils import OnlineStatistics, set_up_logging
from .opt import OpenAIAdam
from typing import Mapping, Any, Dict, Optional, Tuple
def check_dir_for_overwrite(dirname: str) -> bool:
return bool(
glob.glob(os.path.join(dirname, "*.json"))
or glob.glob(os.path.join(dirname, "checkpoints"))
)
def create_info_program() -> None:
parser = argparse.ArgumentParser(
description="Precompute training data summary statistics etc for CLMBR experiments"
)
parser.add_argument(
"input_data_dir",
type=str,
help="Location of the dataset extract to be used for CLMBR training",
)
parser.add_argument(
"save_dir", type=str, help="Location where model info is to be saved",
)
parser.add_argument(
"train_end_date", type=str, help="The end date for training"
)
parser.add_argument(
"val_end_date",
type=str,
help="The end date for validation. Should be later than the end date for training",
)
parser.add_argument(
"--min_patient_count",
type=int,
default=100,
help="Only keep statistics on codes/terms that appear for this many patients (default 100)",
)
parser.add_argument(
"--excluded_patient_file",
type=str,
help="A file containing a list of patients to exclude from training. "
"Any patient ID you plan to use for finetuning / evaluation should be "
"listed in this file. If not provided, exclude_patient_ratio must be specified.",
default=None,
)
parser.add_argument(
"--exclude_patient_ratio",
type=float,
default=None,
help="Ratio of patients to exclude from pre-training between 0 and 1."
" If provided, excluded patient IDs will "
"be randomly selected and written out to a file "
'"excluded_patient_ids.txt" in the save directory. If not '
"provided, excluded_patient_file must be specified.",
)
parser.add_argument(
"--seed",
type=int,
default=3451235,
help="Random seed (default 3451235)",
)
args = parser.parse_args()
if args.save_dir is None:
print("Error - must specify save_dir", file=sys.stderr)
exit(1)
else:
save_dir = args.save_dir
os.makedirs(save_dir, exist_ok=True)
set_up_logging(os.path.join(save_dir, "create_info.log"))
logging.info("Args: %s", str(args))
if check_dir_for_overwrite(save_dir):
print(
"Fatal error - model dir {} is not empty".format(save_dir),
file=sys.stderr,
)
logging.info("Fatal error - model dir {} is not empty".format(save_dir))
exit(1)
ontologies_path = os.path.join(args.input_data_dir, "ontology.db")
timelines_path = os.path.join(args.input_data_dir, "extract.db")
train_end_date = datetime.datetime.fromisoformat(args.train_end_date)
val_end_date = datetime.datetime.fromisoformat(args.val_end_date)
if train_end_date == val_end_date:
logging.info("Could not creat info with the same train and validation end date")
exit(1)
result = json.loads(
create_info(
timelines_path,
ontologies_path,
train_end_date,
val_end_date,
args.min_patient_count,
)
)
result["extract_dir"] = args.input_data_dir
result["extract_file"] = "extract.db"
result["train_start_date"] = "1900-01-01"
result["train_end_date"] = args.train_end_date
result["val_start_date"] = args.train_end_date
result["val_end_date"] = args.val_end_date
result["seed"] = args.seed
result["min_patient_count"] = args.min_patient_count
def remove_pids(a, x):
return [(p, c) for p, c in a if p not in x]
if args.excluded_patient_file is not None:
with open(args.excluded_patient_file) as f:
pids = {int(a) for a in f}
result["train_patient_ids_with_length"] = remove_pids(
result["train_patient_ids_with_length"], pids
)
result["val_patient_ids_with_length"] = remove_pids(
result["val_patient_ids_with_length"], pids
)
logging.info(
"Removed %d patient IDs from file %s"
% (len(pids), args.excluded_patient_file)
)
elif args.exclude_patient_ratio is not None:
assert 0 < args.exclude_patient_ratio and args.exclude_patient_ratio < 1
train_pids = set([x[0] for x in result["train_patient_ids_with_length"]])
val_pids = set([x[0] for x in result["val_patient_ids_with_length"]])
all_pids = train_pids.union(val_pids)
excluded_pids = set(
random.sample(
list(all_pids),
int(round(len(all_pids) * args.exclude_patient_ratio)),
)
)
result["train_patient_ids_with_length"] = remove_pids(
result["train_patient_ids_with_length"], excluded_pids
)
result["val_patient_ids_with_length"] = remove_pids(
result["val_patient_ids_with_length"], excluded_pids
)
with open(
os.path.join(args.save_dir, "excluded_patient_ids.txt"), "w"
) as f:
for pid in excluded_pids:
f.write("%d\n" % pid)
logging.info(
"Removed %d patient IDs using ratio %f"
% (len(excluded_pids), args.exclude_patient_ratio)
)
def count_frequent_items(counts: Mapping[Any, int], threshold: int) -> int:
return len(
{item for item, count in counts.items() if count >= threshold}
)
logging.info(
"Codes with >= 10 {}".format(
count_frequent_items(result["code_counts"], 10)
)
)
logging.info(
"Codes with >= 25 {}".format(
count_frequent_items(result["code_counts"], 25)
)
)
logging.info(
"Codes with >= 50 {}".format(
count_frequent_items(result["code_counts"], 50)
)
)
logging.info(
"Codes with >= 100 {}".format(
count_frequent_items(result["code_counts"], 100)
)
)
logging.info(
"Codes with >= 1000 {}".format(
count_frequent_items(result["code_counts"], 1000)
)
)
logging.info("Number codes: {}".format(len(result["code_counts"])))
logging.info("Number valid codes: {}".format(len(result["valid_code_map"])))
with open(os.path.join(args.save_dir, "info.json"), "w") as fp:
json.dump(result, fp)
def train_model() -> None:
parser = argparse.ArgumentParser(
description="Representation Learning Experiments"
)
# paths
parser.add_argument(
"model_dir",
type=str,
help="Location where model logs and weights should be saved",
)
parser.add_argument(
"info_dir",
type=str,
help="Location where `clmbr_create_info` results were saved",
)
parser.add_argument(
"--extract_dir",
action="store_true",
help="Use the doctorai task definition",
)
# model specification
parser.add_argument(
"--size",
default=768,
type=int,
help="Dimensionality of the output embeddings",
)
parser.add_argument(
"--encoder_type",
default="gru",
choices=["gru", "lstm", "transformer"],
help='the sequence encoder module type (default "gru")',
)
parser.add_argument("--no_tied_weights", default=False, action="store_true")
parser.add_argument(
"--rnn_layers",
default=1,
type=int,
help='number of recurrent layers to use if encoder_type is "gru" or '
'"lstm" (default 1), not used if encoder_type is "transformer"',
)
parser.add_argument(
"--dropout",
default=0,
type=float,
help="dropout percentage (default 0)",
)
# optimization specification
parser.add_argument(
"--batch_size", type=int, default=500, help="Batch size (default 500)"
)
parser.add_argument(
"--eval_batch_size",
type=int,
default=2000,
help="Batch size during evaluation (default 2000)",
)
parser.add_argument(
"--epochs",
type=int,
default=50,
help="Number of training epochs (default 50)",
)
parser.add_argument(
"--warmup_epochs",
type=int,
default=2,
help="Number of warmup epochs (default 2)",
)
parser.add_argument(
"--lr", type=float, default=0.01, help="learning rate (default 0.01)"
)
parser.add_argument(
"--l2",
default=0.01,
type=float,
help="l2 regularization strength (default 0.01)",
)
parser.add_argument(
"--device",
default="cpu",
help='Specify whether the model should be run on CPU or GPU. Can specify a specific GPU, e.g. "cuda:0" (default "cpu")',
)
parser.add_argument("--code_dropout", type=float, default=0.2)
# Day dropout added in reference to Lawrence's comment,
# although Ethan mentioned it should be removed from the API
parser.add_argument("--day_dropout", type=float, default=0.2)
args = parser.parse_args()
model_dir = args.model_dir
os.makedirs(model_dir, exist_ok=True)
if check_dir_for_overwrite(model_dir):
print(
"Fatal error - model dir {} is not empty".format(model_dir),
file=sys.stderr,
)
logging.info(
"Fatal error - model dir {} is not empty".format(model_dir)
)
exit(1)
# Try to load info.json file; see create_info above for details.
info = read_info(os.path.join(args.info_dir, "info.json"))
copyfile(
os.path.join(args.info_dir, "info.json"),
os.path.join(model_dir, "info.json"),
)
first_too_small_index = float("inf")
for code, index in info["valid_code_map"].items():
if info["code_counts"][code] < 10 * info["min_patient_count"]:
first_too_small_index = min(first_too_small_index, index)
print(len(info["valid_code_map"]), flush=True)
# Create and save config dictionary
config = {
"batch_size": args.batch_size,
"eval_batch_size": args.eval_batch_size,
"num_first": first_too_small_index,
"num_second": len(info["valid_code_map"]) - first_too_small_index,
"size": args.size,
"lr": args.lr,
"dropout": args.dropout,
"encoder_type": args.encoder_type,
"rnn_layers": args.rnn_layers,
"tied_weights": not args.no_tied_weights,
"l2": args.l2,
"b1": 0.9,
"b2": 0.999,
"e": 1e-8,
"epochs_per_cycle": args.epochs,
"warmup_epochs": args.warmup_epochs,
"code_dropout": args.code_dropout,
"day_dropout": args.day_dropout,
"model_dir": os.path.abspath(model_dir),
}
with open(os.path.join(model_dir, "config.json"), "w") as outfile:
json.dump(config, outfile)
set_up_logging(os.path.join(model_dir, "train.log"))
logging.info("Args: %s", str(args))
dataset = PatientTimelineDataset(
os.path.join(info["extract_dir"], "extract.db"),
os.path.join(info["extract_dir"], "ontology.db"),
os.path.join(args.info_dir, "info.json"),
)
random.seed(info["seed"])
model = CLMBR(config, info).to(torch.device(args.device))
trainer = Trainer(model)
trainer.train(dataset, use_pbar=False)
def debug_model() -> None:
parser = argparse.ArgumentParser(
description="Representation Learning Experiments"
)
parser.add_argument(
"--model_dir", type=str, help="Override where model is saved"
)
args = parser.parse_args()
model_dir = args.model_dir
config = read_config(os.path.join(model_dir, "config.json"))
info = read_info(os.path.join(model_dir, "info.json"))
use_cuda = torch.cuda.is_available()
model = CLMBR(config, info).to(device_from_config(use_cuda=use_cuda))
model_data = torch.load(os.path.join(model_dir, "best"), map_location="cpu")
model.load_state_dict(model_data)
loaded_data = PatientTimelineDataset(
os.path.join(info["extract_dir"], "extract.db"),
os.path.join(info["extract_dir"], "ontology.db"),
os.path.join(model_dir, "info.json"),
)
ontologies = ontology.OntologyReader(
os.path.join(info["extract_dir"], "ontology.db")
)
timelines = timeline.TimelineReader(
os.path.join(info["extract_dir"], "extract.db")
)
reverse_map = {}
for b, a in info["valid_code_map"].items():
word = ontologies.get_dictionary().get_word(b)
reverse_map[a] = word
reverse_map[len(info["valid_code_map"])] = "None"
with DataLoader(
loaded_data,
threshold=config["num_first"],
is_val=True,
batch_size=1,
seed=info["seed"],
day_dropout=0,
code_dropout=0,
) as batches:
for batch in batches:
if batch["task"][0].size()[0] == 0:
continue
values, non_text_loss = model(batch)
values = torch.sigmoid(values)
patient_id = int(batch["pid"][0])
patient = timelines.get_patient(patient_id)
original_day_indices = batch["day_index"][0]
indices, targets, seen_before, _, _, _ = batch["task"]
day_indices = indices[:, 0]
word_indices = indices[:, 1]
(
all_non_text_codes,
all_non_text_offsets,
all_non_text_codes1,
all_non_text_offsets1,
all_day_information,
all_positional_encoding,
all_lengths,
) = batch["rnn"]
all_non_text_codes = list(all_non_text_codes)
all_non_text_offsets = list(all_non_text_offsets) + [
len(all_non_text_codes)
]
print(patient_id, batch["pid"], original_day_indices)
all_seen = set()
for i, index in enumerate(original_day_indices):
day = patient.days[index]
print("------------------")
print(patient_id, i, index, day.age / 365, day.date)
words = set()
for code in day.observations:
for subword in ontologies.get_subwords(code):
words.add(ontologies.get_dictionary().get_word(subword))
all_seen.add(
ontologies.get_dictionary().get_word(subword)
)
print("Source", words)
wordsA = set()
if (i + 1) < len(all_non_text_offsets):
for code in all_non_text_codes[
all_non_text_offsets[i] : all_non_text_offsets[i + 1]
]:
wordsA.add(reverse_map[code.item()])
print("Given", wordsA)
day_mask = day_indices == i
w = word_indices[day_mask]
p = values[day_mask]
t = targets[day_mask]
f = seen_before[day_mask]
items = [
(
t_i.item(),
reverse_map[w_i.item()],
p_i.item(),
reverse_map[w_i.item()] in all_seen,
w_i.item(),
f_i.item(),
)
for p_i, t_i, w_i, f_i in zip(p, t, w, f)
]
items.sort(key=lambda x: (-x[0], x[1]))
for a in items:
print(a)
| 32.009597 | 128 | 0.59651 | 2,072 | 16,677 | 4.580598 | 0.179054 | 0.024655 | 0.04657 | 0.021073 | 0.340322 | 0.286482 | 0.227479 | 0.163629 | 0.10726 | 0.07544 | 0 | 0.012088 | 0.29064 | 16,677 | 520 | 129 | 32.071154 | 0.790194 | 0.01571 | 0 | 0.179775 | 0 | 0.002247 | 0.219906 | 0.025477 | 0 | 0 | 0 | 0 | 0.002247 | 1 | 0.013483 | false | 0 | 0.076404 | 0.006742 | 0.096629 | 0.022472 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
302482d09ddf1f774c52862c7051a379cb2cfac1 | 8,215 | py | Python | scenarios.py | Sanghyun-Hong/DeepSloth | 92b3d0d3ef3f974d8bce7b4b4a1828776227e3c6 | [
"MIT"
] | 9 | 2020-12-16T04:55:57.000Z | 2022-01-13T08:28:11.000Z | scenarios.py | Sanghyun-Hong/DeepSloth | 92b3d0d3ef3f974d8bce7b4b4a1828776227e3c6 | [
"MIT"
] | null | null | null | scenarios.py | Sanghyun-Hong/DeepSloth | 92b3d0d3ef3f974d8bce7b4b4a1828776227e3c6 | [
"MIT"
] | 1 | 2021-10-11T06:21:04.000Z | 2021-10-11T06:21:04.000Z | """
A script that partitions the dataset for transferability scenarios
"""
# basics
import numpy as np
from PIL import Image
# torch...
import torch
# custom libs
import utils
# ------------------------------------------------------------------------------
# Misc. functions
# ------------------------------------------------------------------------------
def update_numpy(acc, term, func):
if acc is None:
acc = term
else:
acc = func((acc, term))
return acc
def get_class_wise_lists(n_classes_cifar10, return_test=False):
if not return_test:
class_wise_dataset = []
for n_class in range(n_classes_cifar10):
train_data, train_labels, _, _ = af.get_cifar10_class_data(n_class) # don't use
class_wise_dataset.append((train_data, train_labels))
return class_wise_dataset
else:
class_wise_dataset = []
test_class_wise_dataset = []
for n_class in range(n_classes_cifar10):
train_data, train_labels, test_data, test_labels = af.get_cifar10_class_data(n_class) # don't use
class_wise_dataset.append((train_data, train_labels))
test_class_wise_dataset.append((test_data, test_labels))
return class_wise_dataset, test_class_wise_dataset
# ------------------------------------------------------------------------------
# Scenario related...
# ------------------------------------------------------------------------------
def scenario_1_split(int_percentages=None):
np.random.seed(0)
"""
Scenario 1) Train CIFAR10 models that use 10%, 25%, 50% of the full training set.
Chooses p% of data in each class (and corresponding labels)
Parameter int_percentages contains percentages as integers, NOT FLOATS!
Returns:
- percent_loaders (dict): each key p% contains an af.ManualData object containing p% of dataset (p% from each label)
* Loader data contains p% of images (p% of class 0, ..., p% of class 9) - consecutive
* Loader labels (np.ndarray): contains p% of labels (p% 0s, ..., p% 9s) - consecutive
"""
if int_percentages is None:
int_percentages = [10, 25, 50, 100]
print('Running scenario_1_split\n')
n_classes_cifar10 = 10
# get a list containing CIFAR10 data class by class (class k at index k)
class_wise_dataset = get_class_wise_lists(n_classes_cifar10)
percent_loaders = {}
for p in int_percentages:
subset_data = None
subset_labels = None
for n_class in range(n_classes_cifar10):
crt_train_data, crt_train_labels = class_wise_dataset[n_class]
count = crt_train_data.shape[0]
how_many_2_choose = int(count * p / 100.0)
indexes = np.random.choice(np.arange(count), how_many_2_choose, replace=False)
subset_data = update_numpy(acc=subset_data, term=np.copy(crt_train_data[indexes]), func=np.vstack)
subset_labels = update_numpy(acc=subset_labels, term=np.copy(crt_train_labels[indexes]), func=np.hstack)
# end for n_class
print(f'p={p}, data: {subset_data.shape}, labels: {subset_labels.shape}\n')
percent_loaders[p] = af.ManualData(data=subset_data, labels=subset_labels)
# end for p
np.random.seed(af.get_random_seed())
return percent_loaders
def scenario_2_split(int_classes=None):
np.random.seed(0)
"""
Scenario 2) Split CIFAR10 training set into non-overlapping 5 classes - 5 classes, 6 - 6 and 7 - 7.
Parameter int_classes_left:
- each value c is used to generate the two datasets that contain c classes
Returns:
- percent_loaders (dict): each key c contains a pair of af.ManualData meaning ( Dataset w c classes, another dataset c classes)
* Loader data contains p% of images (p% of class 0, ..., p% of class 9) - consecutive
* Loader labels (np.ndarray): contains p% of labels (p% 0s, ..., p% 9s) - consecutive
"""
if int_classes is None:
int_classes = [5, 6, 7]
print('Running scenario_2_split\n')
n_classes_cifar10 = 10
# get a list containing CIFAR10 data class by class (class k at index k)
class_wise_dataset, test_class_wise_dataset = get_class_wise_lists(n_classes_cifar10, return_test=True)
all_classes = np.arange(n_classes_cifar10)
class_loaders = {}
for classes in int_classes:
num_class_overlap = 2*(classes - 5)
class_indexes_overlap = np.random.choice(all_classes, num_class_overlap, replace=False)
left_unique_classes = np.random.choice([x for x in all_classes if x not in class_indexes_overlap], classes-num_class_overlap, replace=False)
right_unique_classes = [x for x in all_classes if (x not in class_indexes_overlap) and (x not in left_unique_classes)]
class_indexes_left = np.array(list(left_unique_classes) + list(class_indexes_overlap))
class_indexes_right = np.array(list(right_unique_classes) + list(class_indexes_overlap))
print(class_indexes_left)
print(class_indexes_right)
subset_data_left, subset_labels_left = None, None
subset_data_right, subset_labels_right = None, None
subset_test_data_left, subset_test_labels_left = None, None
subset_test_data_right, subset_test_labels_right = None, None
label_left = 0
label_right = 0
for n_class in all_classes:
crt_train_data, crt_train_labels = class_wise_dataset[n_class]
crt_test_data, crt_test_labels = test_class_wise_dataset[n_class]
if n_class in class_indexes_left:
new_train_labels = np.ones(crt_train_labels.shape) * label_left # we have to relabel the dataset because pytorch expects labels as 0,1,2,3,...
subset_data_left = update_numpy(acc=subset_data_left, term=np.copy(crt_train_data), func=np.vstack)
subset_labels_left = update_numpy(acc=subset_labels_left, term=np.copy(new_train_labels), func=np.hstack)
new_test_labels = np.ones(crt_test_labels.shape) * label_left
subset_test_data_left = update_numpy(acc=subset_test_data_left, term=np.copy(crt_test_data), func=np.vstack)
subset_test_labels_left = update_numpy(acc=subset_test_labels_left, term=np.copy(new_test_labels), func=np.hstack)
label_left += 1
if n_class in class_indexes_right:
new_train_labels = np.ones(crt_train_labels.shape) * label_right # we have to relabel the dataset because pytorch expects labels as 0,1,2,3,...
subset_data_right = update_numpy(acc=subset_data_right, term=np.copy(crt_train_data), func=np.vstack)
subset_labels_right = update_numpy(acc=subset_labels_right, term=np.copy(new_train_labels), func=np.hstack)
new_test_labels = np.ones(crt_test_labels.shape) * label_right
subset_test_data_right = update_numpy(acc=subset_test_data_right, term=np.copy(crt_test_data), func=np.vstack)
subset_test_labels_right = update_numpy(acc=subset_test_labels_right, term=np.copy(new_test_labels), func=np.hstack)
label_right += 1
# end for n_class
print(f'{classes}: train - data-left: {subset_data_left.shape}, labels-left: {subset_labels_left.shape}, data-right: {subset_data_right.shape}, labels-right: {subset_labels_right.shape}\n')
print(f'{classes}: test - data-left: {subset_test_data_left.shape}, labels-left: {subset_test_labels_left.shape}, data-right: {subset_test_data_right.shape}, labels-right: {subset_test_labels_right.shape}\n')
loaders_left = (af.ManualData(data=subset_data_left, labels=subset_labels_left), af.ManualData(data=subset_test_data_left, labels=subset_test_labels_left))
loaders_right = (af.ManualData(data=subset_data_right, labels=subset_labels_right), af.ManualData(data=subset_test_data_right, labels=subset_test_labels_right))
class_loaders[classes] = (loaders_left, loaders_right)
np.random.seed(af.get_random_seed())
# end for class_left, class_right
return class_loaders | 45.893855 | 216 | 0.667925 | 1,149 | 8,215 | 4.45953 | 0.136641 | 0.039032 | 0.046838 | 0.039032 | 0.622365 | 0.53064 | 0.356557 | 0.33509 | 0.32904 | 0.318891 | 0 | 0.014627 | 0.209373 | 8,215 | 179 | 217 | 45.893855 | 0.774288 | 0.102374 | 0 | 0.175258 | 0 | 0.020619 | 0.080312 | 0.041782 | 0 | 0 | 0 | 0 | 0 | 1 | 0.041237 | false | 0 | 0.041237 | 0 | 0.134021 | 0.072165 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
3026fa5fec5a07d66102ae71523732602b37bf87 | 6,637 | py | Python | Multi-agent Transfer RL/Transfer across tasks/Bayes-ToMoP/games.py | TJU-DRL-LAB/transfer-and-multi-task-reinforcement-learning | 2d8c12c2b5a4865c02934b63091945d3e2c92e90 | [
"MIT"
] | null | null | null | Multi-agent Transfer RL/Transfer across tasks/Bayes-ToMoP/games.py | TJU-DRL-LAB/transfer-and-multi-task-reinforcement-learning | 2d8c12c2b5a4865c02934b63091945d3e2c92e90 | [
"MIT"
] | null | null | null | Multi-agent Transfer RL/Transfer across tasks/Bayes-ToMoP/games.py | TJU-DRL-LAB/transfer-and-multi-task-reinforcement-learning | 2d8c12c2b5a4865c02934b63091945d3e2c92e90 | [
"MIT"
] | null | null | null | # coding=utf-8
import numpy as np
import imageio
from gym import spaces
import tkinter as tk
from PIL import Image, ImageTk
import matplotlib.pyplot as plt
import time
CELL, BLOCK, AGENT_GOAL, OPPONENT_GOAL, AGENT, OPPONENT = range(6)
WIN, LOSE = 5, -5
UP, RIGHT, DOWN, LEFT, HOLD = range(5)
UNIT = 40
class Soccer(tk.Tk, object):
playground = [1, 1, 1, 1, 1, 1, 1,
1, 0, 0, 0, 0, 0, 1,
3, 0, 0, 0, 0, 0, 2,
3, 0, 0, 0, 0, 0, 2,
3, 0, 0, 0, 0, 0, 2,
1, 0, 0, 0, 0, 0, 1,
1, 1, 1, 1, 1, 1, 1]
action_map = {
UP: np.array([-1, 0]),
RIGHT: np.array([0, 1]),
DOWN: np.array([1, 0]),
LEFT: np.array([0, -1]),
HOLD: np.array([0, 0])}
def __init__(self):
super(Soccer, self).__init__()
self.size = 7
self.agent = np.array([3, 1])
self.opponent = np.array([3, 5])
self.grids = np.array(self.playground).reshape(self.size, self.size)
self.agent_keep_ball = False
self.action_space = [UP, RIGHT, DOWN, LEFT, HOLD]
self.n_actions = len(self.action_space)
self.n_features = 5
self.visualize()
# low high to observe
#self.observation_space = spaces.Discrete(7 * 7 * 2)
def step(self, act_a, act_o):
new_pos_a = self.agent + self.action_map[act_a]
new_pos_o = self.opponent + self.action_map[act_o]
reward, done, s_ = 0, False, []
# opponent win
if self.grids[tuple(new_pos_o)] == 3 and not self.agent_keep_ball:
reward = LOSE
done = True
# agent win
if self.grids[tuple(new_pos_a)] == 2 and self.agent_keep_ball:
reward = WIN
done = True
# valid check for opponent and agent
if self.grids[tuple(new_pos_a)] in (1, 2, 3):
new_pos_a = self.agent
if self.grids[tuple(new_pos_o)] in (1, 2, 3):
new_pos_o = self.opponent
# collision
if np.array_equal(new_pos_a, new_pos_o) and self.grids[tuple(new_pos_a)] != 1:
self.agent_keep_ball = not self.agent_keep_ball
#print(self.canvas.coords(self.agent_rect))
self.agent = new_pos_a
self.opponent = new_pos_o
self.canvas.delete(self.agent_rect)
self.canvas.delete(self.opp_rect)
self.agent_rect = self.canvas.create_rectangle(self.agent[1] * UNIT, self.agent[0] * UNIT, (self.agent[1] + 1) * UNIT, (self.agent[0] + 1) * UNIT, fill='red')
self.opp_rect = self.canvas.create_rectangle(self.opponent[1] * UNIT, self.opponent[0] * UNIT, (self.opponent[1] + 1) * UNIT, (self.opponent[0] + 1) * UNIT, fill='blue')
self.canvas.delete(self.ball_rect)
if self.agent_keep_ball:
self.ball_rect = self.canvas.create_oval((self.agent[1] * UNIT, self.agent[0] * UNIT, (self.agent[1] + 1) * UNIT, (self.agent[0] + 1) * UNIT), fill='white')
else:
self.ball_rect = self.canvas.create_oval(self.opponent[1] * UNIT, self.opponent[0] * UNIT, (self.opponent[1] + 1) * UNIT, (self.opponent[0] + 1) * UNIT, fill='white')
s_ = [self.agent[0], self.agent[1], self.opponent[0], self.opponent[1]]
if self.agent_keep_ball:
s_.append(0)
else: s_.append(1)
s_ = np.array(s_[:5])/ 10
return s_, reward, done
# reset position and ball
def reset(self):
self.agent = np.array([3, 1])
self.opponent = np.array([3, 5])
self.agent_keep_ball = False
self.update()
s_ = [self.agent[0], self.agent[1], self.opponent[0], self.opponent[1]]
if self.agent_keep_ball:
s_.append(0)
else: s_.append(1)
s_ = np.array(s_[:5])/ 10
return s_
# render array
def render(self):
m = np.copy(self.grids)
m[tuple(self.agent)] = 4
m[tuple(self.opponent)] = 5
if self.agent_keep_ball:
m[tuple(self.agent)] += 2
else:
m[tuple(self.opponent)] += 2
#print(m, end='\n\n')
self.update()
return m.reshape(49)
# render img
def visualize(self):
self.canvas = tk.Canvas(self, bg='white',
height=self.size * UNIT,
width=self.size * UNIT)
# create grids
for c in range(0, self.size * UNIT, UNIT):
x0, y0, x1, y1 = c, 0, c, self.size * UNIT
self.canvas.create_line(x0, y0, x1, y1)
for r in range(0, self.size * UNIT, UNIT):
x0, y0, x1, y1 = 0, r, self.size * UNIT, r
self.canvas.create_line(x0, y0, x1, y1)
m = np.copy(self.grids)
m[tuple(self.agent)] = 4
m[tuple(self.opponent)] = 5
#print(m)
for j in range(self.size):
for i in range(self.size):
if m[j, i] == 1: self.canvas.create_rectangle(i * UNIT, j * UNIT, (i + 1) * UNIT, (j + 1) * UNIT, fill='black')
elif m[j, i] == 2 or m[j, i] == 3: self.canvas.create_rectangle(i * UNIT, j * UNIT, (i + 1) * UNIT, (j + 1) * UNIT, fill='white')
elif m[j, i] == 0 or m[j, i] == 4 or m[j, i] == 5: self.canvas.create_rectangle(i * UNIT, j * UNIT, (i + 1) * UNIT, (j + 1) * UNIT, fill='green')
self.agent_rect = self.canvas.create_rectangle(self.agent[1] * UNIT, self.agent[0] * UNIT, (self.agent[1] + 1) * UNIT, (self.agent[0] + 1) * UNIT, fill='red')
self.opp_rect = self.canvas.create_rectangle(self.opponent[1] * UNIT, self.opponent[0] * UNIT, (self.opponent[1] + 1) * UNIT, (self.opponent[0] + 1) * UNIT, fill='blue')
if self.agent_keep_ball:
self.ball_rect = self.canvas.create_oval((self.agent[0] * UNIT, self.agent[0] * UNIT, (self.agent[1] + 1) * UNIT, (self.agent[1] + 1) * UNIT), fill='white')
else:
self.ball_rect = self.canvas.create_oval(self.opponent[1] * UNIT, self.opponent[0] * UNIT, (self.opponent[1] + 1) * UNIT, (self.opponent[0] + 1) * UNIT, fill='white')
# pack all
self.canvas.pack()
if __name__ == '__main__':
env = Soccer()
env.reset()
# agent strategy
agent_actions = [RIGHT, RIGHT, UP, RIGHT, RIGHT, RIGHT]
# opponent strategy, you can initialize it randomly
opponent_actions = [UP, LEFT, LEFT, LEFT, LEFT, LEFT, LEFT]
for a_a, a_o in zip(agent_actions, opponent_actions):
env.render()
env.step(a_a, a_o)
time.sleep(1)
#env.after(100, run_maze)
#env.mainloop()
# env.render()
| 39.041176 | 178 | 0.552509 | 998 | 6,637 | 3.548096 | 0.152305 | 0.109291 | 0.012708 | 0.05281 | 0.57752 | 0.513697 | 0.487433 | 0.453544 | 0.433211 | 0.433211 | 0 | 0.042594 | 0.296068 | 6,637 | 169 | 179 | 39.272189 | 0.715325 | 0.061323 | 0 | 0.362903 | 0 | 0 | 0.009986 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.040323 | false | 0 | 0.056452 | 0 | 0.145161 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
302e8f1be7a7ffb783af9f6bd1bdc7f3405e6a18 | 745 | py | Python | brainutils/context.py | jimbuho/django-brain | 201237266a64e49b5c37f3d373ff6913dfbd099e | [
"BSD-2-Clause",
"Apache-2.0"
] | null | null | null | brainutils/context.py | jimbuho/django-brain | 201237266a64e49b5c37f3d373ff6913dfbd099e | [
"BSD-2-Clause",
"Apache-2.0"
] | null | null | null | brainutils/context.py | jimbuho/django-brain | 201237266a64e49b5c37f3d373ff6913dfbd099e | [
"BSD-2-Clause",
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""
.. module:: dbu - context
:platform: Unix, Windows
:synopsis: Contexto Principal por defecto
.. moduleauthor:: Diego Gonzalez <dgonzalez.jim@gmail.com>
"""
from . import configuration
from . import models
def load_context(request):
"""
Load Context
Description
Carga las variables de contexto principales
:param request:
:return:
"""
IS_TEST_MODE = configuration.isTESTMode()
IS_MAINTENANCE = configuration.isMaintenanceMode()
try:
LANGUAGES = models.Language.objects.get_active()
except:
LANGUAGES = []
return {
'IS_TEST_MODE' : IS_TEST_MODE,
'IS_MAINTENANCE' : IS_MAINTENANCE,
'LANGUAGES' : LANGUAGES
} | 19.102564 | 58 | 0.640268 | 74 | 745 | 6.297297 | 0.662162 | 0.038627 | 0.064378 | 0.06867 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.001792 | 0.251007 | 745 | 39 | 59 | 19.102564 | 0.833333 | 0.377181 | 0 | 0 | 0 | 0 | 0.082547 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.071429 | false | 0 | 0.142857 | 0 | 0.285714 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
302ff0768eb4d4a5356d5db0b329d974eeccb455 | 689 | py | Python | tests/trainer/test_multi_trainer.py | michael-aloys/knodle | 393e7ba0558036828fb228875511977c40000ed5 | [
"Apache-2.0"
] | 71 | 2021-04-26T10:39:56.000Z | 2022-03-28T14:36:16.000Z | tests/trainer/test_multi_trainer.py | michael-aloys/knodle | 393e7ba0558036828fb228875511977c40000ed5 | [
"Apache-2.0"
] | 92 | 2021-04-08T12:49:38.000Z | 2022-02-03T14:24:05.000Z | tests/trainer/test_multi_trainer.py | michael-aloys/knodle | 393e7ba0558036828fb228875511977c40000ed5 | [
"Apache-2.0"
] | 10 | 2021-07-08T06:49:28.000Z | 2022-01-15T23:28:13.000Z | from tests.trainer.generic import std_trainer_input_1
from knodle.trainer.multi_trainer import MultiTrainer
def test_auto_train(std_trainer_input_1):
(
model,
model_input_x, rule_matches_z, mapping_rules_labels_t,
y_labels
) = std_trainer_input_1
trainers = ["majority", "snorkel", "knn", "snorkel_knn"]
trainer = MultiTrainer(
name=trainers,
model=model,
mapping_rules_labels_t=mapping_rules_labels_t,
model_input_x=model_input_x,
rule_matches_z=rule_matches_z,
)
trainer.train()
metrics = trainer.test(model_input_x, y_labels)
# Check whether the code ran up to here
assert 2 == 2
| 25.518519 | 62 | 0.69521 | 94 | 689 | 4.691489 | 0.43617 | 0.090703 | 0.099773 | 0.108844 | 0.104308 | 0.104308 | 0 | 0 | 0 | 0 | 0 | 0.009416 | 0.229318 | 689 | 26 | 63 | 26.5 | 0.821092 | 0.053701 | 0 | 0 | 0 | 0 | 0.044615 | 0 | 0 | 0 | 0 | 0 | 0.052632 | 1 | 0.052632 | false | 0 | 0.105263 | 0 | 0.157895 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
3033df96925128ff7539e6a8f86e4620e486d85d | 18,447 | py | Python | iogt_content_migration/management/commands/load_v1_db.py | Albert-Jokelin/iogt | 79b1b86c11df7d61ddbbd4ce16303dfe4a1b8465 | [
"BSD-2-Clause"
] | null | null | null | iogt_content_migration/management/commands/load_v1_db.py | Albert-Jokelin/iogt | 79b1b86c11df7d61ddbbd4ce16303dfe4a1b8465 | [
"BSD-2-Clause"
] | null | null | null | iogt_content_migration/management/commands/load_v1_db.py | Albert-Jokelin/iogt | 79b1b86c11df7d61ddbbd4ce16303dfe4a1b8465 | [
"BSD-2-Clause"
] | null | null | null | from pathlib import Path
from django.core.management.base import BaseCommand
from wagtail.core.models import Page, Site, Locale
from django.core.files.images import ImageFile
from wagtail.images.models import Image
from wagtail_localize.models import Translation
from wagtail_localize.views.submit_translations import TranslationCreator
import home.models as models
import psycopg2
import psycopg2.extras
import json
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument(
'--host',
default='0.0.0.0',
help='IoGT V1 database host'
)
parser.add_argument(
'--port',
default='5432',
help='IoGT V1 database port'
)
parser.add_argument(
'--name',
default='postgres',
help='IoGT V1 database name'
)
parser.add_argument(
'--user',
default='postgres',
help='IoGT V1 database user'
)
parser.add_argument(
'--password',
default='',
help='IoGT V1 database password'
)
parser.add_argument(
'--media-dir',
required=True,
help='Path to IoGT v1 media directory'
)
parser.add_argument(
'--skip-locales',
action='store_true',
help='Skip data of locales other than default language'
)
def handle(self, *args, **options):
self.db_connect(options)
self.media_dir = options.get('media_dir')
self.skip_locales = options.get('skip_locales')
self.image_map = {}
self.page_translation_map = {}
self.v1_to_v2_page_map = {}
self.clear()
self.stdout.write('Existing site structure cleared')
root = Page.get_first_root_node()
self.migrate(root)
def clear(self):
models.FooterPage.objects.all().delete()
models.FooterIndexPage.objects.all().delete()
models.BannerPage.objects.all().delete()
models.BannerIndexPage.objects.all().delete()
models.Article.objects.all().delete()
models.Section.objects.all().delete()
models.SectionIndexPage.objects.all().delete()
models.HomePage.objects.all().delete()
Site.objects.all().delete()
Image.objects.all().delete()
def db_connect(self, options):
connection_string = self.create_connection_string(options)
self.stdout.write(f'DB connection string created, string={connection_string}')
self.v1_conn = psycopg2.connect(connection_string)
self.stdout.write('Connected to v1 DB')
def __del__(self):
try:
self.v1_conn.close()
self.stdout.write('Closed connection to v1 DB')
except AttributeError:
pass
def create_connection_string(self, options):
host = options.get('host', '0.0.0.0')
port = options.get('port', '5432')
name = options.get('name', 'postgres')
user = options.get('user', 'postgres')
password = options.get('password', '')
return f"host={host} port={port} dbname={name} user={user} password={password}"
def db_query(self, q):
cur = self.v1_conn.cursor(cursor_factory=psycopg2.extras.RealDictCursor)
cur.execute(q)
return cur
def migrate(self, root):
self.migrate_images()
self.load_page_translation_map()
home = self.create_home_page(root)
section_index_page, banner_index_page, footer_index_page = self.create_index_pages(home)
self.migrate_sections(section_index_page)
self.migrate_articles(section_index_page)
self.migrate_banners(banner_index_page)
self.migrate_footers(footer_index_page)
self.stop_translations()
Page.fix_tree()
def create_home_page(self, root):
sql = 'select * from core_main main join wagtailcore_page page on main.page_ptr_id = page.id'
cur = self.db_query(sql)
main = cur.fetchone()
cur.close()
home = None
if main:
home = models.HomePage(
title=main['title'],
draft_title=main['draft_title'],
seo_title=main['seo_title'],
slug=main['slug'],
live=main['live'],
latest_revision_created_at=main['latest_revision_created_at'],
first_published_at=main['first_published_at'],
last_published_at=main['last_published_at'],
)
root.add_child(instance=home)
else:
raise Exception('Could not find a main page in v1 DB')
cur.close()
cur = self.db_query('select * from wagtailcore_site')
v1_site = cur.fetchone()
cur.close()
if v1_site:
Site.objects.create(
hostname=v1_site['hostname'],
port=v1_site['port'],
root_page=home,
is_default_site=True,
site_name=v1_site['site_name'] if v1_site['site_name'] else 'Internet of Good Things',
)
else:
raise Exception('Could not find site in v1 DB')
return home
def create_index_pages(self, homepage):
section_index_page = models.SectionIndexPage(title='Sections')
homepage.add_child(instance=section_index_page)
banner_index_page = models.BannerIndexPage(title='Banners')
homepage.add_child(instance=banner_index_page)
footer_footer_page = models.FooterIndexPage(title='Footers')
homepage.add_child(instance=footer_footer_page)
return section_index_page, banner_index_page, footer_footer_page
def migrate_images(self):
cur = self.db_query('select * from wagtailimages_image')
content_type = self.find_content_type_id('wagtailimages', 'image')
for row in cur:
image_file = self.open_image_file(row['file'])
if image_file:
image = Image.objects.create(
title=row['title'],
file=ImageFile(image_file, name=row['file'].split('/')[-1]),
focal_point_x=row['focal_point_x'],
focal_point_y=row['focal_point_y'],
focal_point_width=row['focal_point_width'],
focal_point_height=row['focal_point_height'],
# uploaded_by_user='',
)
image.get_file_size()
image.get_file_hash()
tags = self.find_tags(content_type, row['id'])
if tags:
image.tags.add(*tags)
self.image_map.update({ row['id']: image })
cur.close()
self.stdout.write('Images migrated')
def find_content_type_id(self, app_label, model):
cur = self.db_query(f"select id from django_content_type where app_label = '{app_label}' and model = '{model}'")
content_type = cur.fetchone()
cur.close()
return content_type.get('id')
def open_image_file(self, file):
file_path = Path(self.media_dir) / file
try:
return open(file_path, 'rb')
except:
self.stdout.write(f"Image file not found: {file_path}")
def find_tags(self, content_type, object_id):
tags_query = 'select t.name from taggit_tag t join taggit_taggeditem ti on t.id = ti.tag_id where ti.content_type_id = {} and ti.object_id = {}'
cur = self.db_query(tags_query.format(content_type, object_id))
tags = [tag['name'] for tag in cur]
cur.close()
return tags
def migrate_sections(self, section_index_page):
sql = "select * " \
"from core_sectionpage csp, wagtailcore_page wcp, core_languagerelation clr, core_sitelanguage csl " \
"where csp.page_ptr_id = wcp.id " \
"and wcp.id = clr.page_id " \
"and clr.language_id = csl.id "
if self.skip_locales:
sql += " and locale = 'en' "
sql += 'order by wcp.path'
cur = self.db_query(sql)
section_page_translations = []
for row in cur:
if row['page_ptr_id'] in self.page_translation_map:
section_page_translations.append(row)
else:
self.create_section(section_index_page, row)
else:
for row in section_page_translations:
section = self.v1_to_v2_page_map.get(self.page_translation_map[row['page_ptr_id']])
locale, __ = Locale.objects.get_or_create(language_code=row['locale'])
self.translate_page(locale=locale, page=section)
translated_section = section.get_translation_or_none(locale)
if translated_section:
translated_section.title = row['title']
translated_section.draft_title = row['draft_title']
translated_section.live = row['live']
translated_section.save(update_fields=['title', 'draft_title', 'slug', 'live'])
self.stdout.write(f"Translated section, title={row['title']}")
cur.close()
def create_section(self, section_index_page, row):
section = models.Section(
title=row['title'],
draft_title=row['draft_title'],
show_in_menus=True,
font_color='1CABE2',
slug=row['slug'],
path=section_index_page.path + row['path'][12:],
depth=row['depth'],
numchild=row['numchild'],
live=row['live'],
)
section.save()
self.v1_to_v2_page_map.update({
row['page_ptr_id']: section
})
self.stdout.write(f"saved section, title={section.title}")
def migrate_articles(self, section_index_page):
sql = "select * " \
"from core_articlepage cap, wagtailcore_page wcp, core_languagerelation clr, core_sitelanguage csl " \
"where cap.page_ptr_id = wcp.id " \
"and wcp.id = clr.page_id " \
"and clr.language_id = csl.id "
if self.skip_locales:
sql += "and locale = 'en' "
sql += " and wcp.path like '000100010002%'order by wcp.path"
cur = self.db_query(sql)
article_page_translations = []
for row in cur:
if row['page_ptr_id'] in self.page_translation_map:
article_page_translations.append(row)
else:
self.create_article(section_index_page, row)
else:
for row in article_page_translations:
article = self.v1_to_v2_page_map.get(self.page_translation_map[row['page_ptr_id']])
locale, __ = Locale.objects.get_or_create(language_code=row['locale'])
self.translate_page(locale=locale, page=article)
translated_article = article.get_translation_or_none(locale)
if translated_article:
translated_article.lead_image = self.image_map.get(row['image_id'])
translated_article.title = row['title']
translated_article.draft_title = row['draft_title']
translated_article.live = row['live']
translated_article.body = self.map_article_body(row['body'])
translated_article.save(update_fields=['lead_image', 'title', 'draft_title', 'slug', 'live', 'body'])
self.stdout.write(f"Translated article, title={row['title']}")
cur.close()
def create_article(self, section_index_page, row):
article = models.Article(
lead_image=self.image_map.get(row['image_id']),
title=row['title'],
draft_title=row['draft_title'],
slug=row['slug'],
path=section_index_page.path + row['path'][12:],
depth=row['depth'],
numchild=row['numchild'],
live=row['live'],
body=self.map_article_body(row['body']),
)
try:
article.save()
self.v1_to_v2_page_map.update({
row['page_ptr_id']: article
})
except Page.DoesNotExist:
self.stdout.write(f"Skipping page with missing parent: title={row['title']}")
return
self.stdout.write(f"saved article, title={article.title}")
def map_article_body(self, v1_body):
v2_body = json.loads(v1_body)
for block in v2_body:
if block['type'] == 'paragraph':
block['type'] = 'markdown'
return json.dumps(v2_body)
def migrate_banners(self, banner_index_page):
sql = "select * " \
"from core_bannerpage cbp, wagtailcore_page wcp, core_languagerelation clr, core_sitelanguage csl " \
"where cbp.page_ptr_id = wcp.id " \
"and wcp.id = clr.page_id " \
"and clr.language_id = csl.id "
if self.skip_locales:
sql += " and locale = 'en' "
sql += ' order by wcp.path'
cur = self.db_query(sql)
banner_page_translations = []
for row in cur:
if row['page_ptr_id'] in self.page_translation_map:
banner_page_translations.append(row)
else:
self.create_banner(banner_index_page, row)
else:
for row in banner_page_translations:
banner = self.v1_to_v2_page_map.get(self.page_translation_map[row['page_ptr_id']])
locale, __ = Locale.objects.get_or_create(language_code=row['locale'])
try:
self.translate_page(locale=locale, page=banner)
except:
continue
translated_banner = banner.get_translation_or_none(locale)
if translated_banner:
translated_banner.banner_image = self.image_map.get(row['banner_id'])
translated_banner.banner_link_page = self.v1_to_v2_page_map.get(row['banner_link_page_id'])
translated_banner.title = row['title']
translated_banner.draft_title = row['draft_title']
translated_banner.live = row['live']
translated_banner.save(update_fields=['banner_image', 'title', 'draft_title', 'slug', 'live'])
self.stdout.write(f"Translated banner, title={row['title']}")
cur.close()
def create_banner(self, banner_index_page, row):
banner = models.BannerPage(
banner_image=self.image_map.get(row['banner_id']),
banner_link_page=self.v1_to_v2_page_map.get(row['banner_link_page_id']),
title=row['title'],
draft_title=row['draft_title'],
slug=row['slug'],
path=banner_index_page.path + row['path'][12:],
depth=row['depth'],
numchild=row['numchild'],
live=row['live'],
banner_description=''
)
banner.save()
self.v1_to_v2_page_map.update({
row['page_ptr_id']: banner
})
self.stdout.write(f"saved banner, title={banner.title}")
def migrate_footers(self, footer_index_page):
sql = "select * " \
"from core_footerpage cfp, core_articlepage cap, wagtailcore_page wcp, core_languagerelation clr, core_sitelanguage csl " \
"where cfp.articlepage_ptr_id = cap.page_ptr_id " \
"and cap.page_ptr_id = wcp.id " \
"and wcp.id = clr.page_id " \
"and clr.language_id = csl.id "
if self.skip_locales:
sql += " and locale = 'en' "
sql += ' order by wcp.path'
cur = self.db_query(sql)
footer_page_translations = []
for row in cur:
if row['page_ptr_id'] in self.page_translation_map:
footer_page_translations.append(row)
else:
self.create_footer(footer_index_page, row)
else:
for row in footer_page_translations:
footer = self.v1_to_v2_page_map.get(self.page_translation_map[row['page_ptr_id']])
locale, __ = Locale.objects.get_or_create(language_code=row['locale'])
self.translate_page(locale=locale, page=footer)
translated_footer = footer.get_translation_or_none(locale)
if translated_footer:
translated_footer.lead_image = self.image_map.get(row['image_id'])
translated_footer.title = row['title']
translated_footer.draft_title = row['draft_title']
translated_footer.live = row['live']
translated_footer.body = self.map_article_body(row['body'])
translated_footer.save(update_fields=['lead_image', 'title', 'draft_title', 'slug', 'live', 'body'])
self.stdout.write(f"Translated footer, title={row['title']}")
cur.close()
def create_footer(self, footer_index_page, row):
footer = models.FooterPage(
lead_image=self.image_map.get(row['image_id']),
title=row['title'],
draft_title=row['draft_title'],
slug=row['slug'],
path=footer_index_page.path + row['path'][12:],
depth=row['depth'],
numchild=row['numchild'],
live=row['live'],
body=self.map_article_body(row['body']),
)
footer.save()
self.v1_to_v2_page_map.update({
row['page_ptr_id']: footer
})
self.stdout.write(f"saved footer, title={footer.title}")
def load_page_translation_map(self):
sql = "select * " \
"from core_pagetranslation"
cur = self.db_query(sql)
for row in cur:
self.page_translation_map.update({
row['translated_page_id']: row['page_id'],
})
cur.close()
self.stdout.write('Page translation map loaded.')
def translate_page(self, locale, page):
translator = TranslationCreator(user=None, target_locales=[locale])
translator.create_translations(page)
def stop_translations(self):
Translation.objects.update(enabled=False)
self.stdout.write('Translations stopped.')
| 40.277293 | 152 | 0.58725 | 2,150 | 18,447 | 4.782791 | 0.11814 | 0.025382 | 0.015754 | 0.014004 | 0.4417 | 0.397744 | 0.350579 | 0.297092 | 0.273169 | 0.262958 | 0 | 0.006906 | 0.301404 | 18,447 | 457 | 153 | 40.365427 | 0.79103 | 0.001084 | 0 | 0.295567 | 0 | 0.002463 | 0.187246 | 0.010855 | 0 | 0 | 0 | 0 | 0 | 1 | 0.064039 | false | 0.012315 | 0.027094 | 0 | 0.115764 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
3033fd8167635b4a38bf68dbe82419686667a557 | 2,968 | py | Python | run_files/cip_area_threshold/tissue_data/run_CIP_relaxation_times.py | jessiesrr/VTdyn | 6f71ef94525d95221f5bd5e5290f4df10648cd18 | [
"MIT"
] | null | null | null | run_files/cip_area_threshold/tissue_data/run_CIP_relaxation_times.py | jessiesrr/VTdyn | 6f71ef94525d95221f5bd5e5290f4df10648cd18 | [
"MIT"
] | null | null | null | run_files/cip_area_threshold/tissue_data/run_CIP_relaxation_times.py | jessiesrr/VTdyn | 6f71ef94525d95221f5bd5e5290f4df10648cd18 | [
"MIT"
] | null | null | null | import numpy as np
import libs.contact_inhibition_lib as lib #library for simulation routines
import libs.data as data
import libs.plot as vplt #plotting library
from structure.global_constants import *
import structure.initialisation as init
from structure.cell import Tissue, BasicSpringForceNoGrowth
import matplotlib.pyplot as plt
import os
"""run a single voronoi tessellation model simulation"""
OUTDIR = "CIP_cell_division_relaxation_time2/"
l = 10 # population size N=l*l
timend = 30. # simulation time (hours)
timestep = 1.0 # time intervals to save simulation history
rand = np.random.RandomState()
simulation = lib.simulation_contact_inhibition_area_dependent #simulation routine imported from lib
threshold_area_fraction=1.0
DEATH_RATE = 1./12
rates = (DEATH_RATE,DEATH_RATE/0.4) #death_rate,division_rate
domain_size_multiplier=0.980940
eta,mu,dt=1.,-250,0.001
T_m_init=0.1
def get_relaxation_data(T_m_vals,T_m_init,eta,mu,dt,relaxtime):
history = lib.run_simulation(simulation,l,timestep,timend,rand,progress_on=True,
init_time=None,til_fix=False,save_areas=True,cycle_phase=None,eta=eta,mu=mu,dt=dt,T_m=T_m_init,
return_events=False,save_cell_histories=True,domain_size_multiplier=domain_size_multiplier,
rates=rates,threshold_area_fraction=threshold_area_fraction)
tissue = lib.run_return_final_tissue(lib.simulation_no_division(history[-1],dt,200,rand,eta),200)
division_ready = lib.check_area_threshold(tissue.mesh,threshold_area_fraction)
mother = rand.choice(division_ready)
tissue.add_daughter_cells(mother,rand)
tissue.remove(mother,True)
tissue.update(dt)
init_tissues = [tissue.copy() for T_m in T_m_vals]
for T_m,tissue in zip(T_m_vals,init_tissues):
tissue.Force = BasicSpringForceNoGrowth(mu,T_m)
histories = [lib.run(lib.simulation_no_division(tissue,dt,int(relaxtime/dt),rand,eta),int(relaxtime/dt),1) for tissue in init_tissues]
for T_m,history in zip(T_m_vals,histories):
cell1,cell2 = len(history[0])-2,len(history[0])-1
sibling_distance = get_sibling_distance(history,cell1,cell2)
mean_area = np.array([np.mean(tissue.mesh.areas[-2:]) for tissue in history])
time = np.arange(0,relaxtime,dt)
data = np.vstack((time,sibling_distance,mean_area))
try: np.savetxt(OUTDIR+"T_m=%.3f.txt"%T_m,data)
except IOError:
os.makedirs(OUTDIR)
np.savetxt(OUTDIR+"T_m=%.3f.txt"%T_m,data)
def narg(tissue,i,j):
try: return np.where(tissue.mesh.neighbours[i]==j)[0][0]
except IndexError: return np.nan
def get_sibling_distance(history,cell1,cell2):
return np.array([tissue.mesh.distances[cell1][narg(tissue,cell1,cell2)] if narg(tissue,cell1,cell2)<100 else np.nan for tissue in history])
relaxtime = 2.0
T_m_vals=[0.001,0.01,0.1,0.25,0.5,1.0,2.0]
get_relaxation_data(T_m_vals,T_m_init,eta,mu,dt,relaxtime) | 44.298507 | 143 | 0.738544 | 471 | 2,968 | 4.450106 | 0.312102 | 0.01813 | 0.017176 | 0.017176 | 0.11355 | 0.103053 | 0.069656 | 0.069656 | 0.069656 | 0.069656 | 0 | 0.032832 | 0.148248 | 2,968 | 67 | 144 | 44.298507 | 0.796282 | 0.065701 | 0 | 0 | 0 | 0 | 0.021763 | 0.01291 | 0 | 0 | 0 | 0 | 0 | 1 | 0.055556 | false | 0 | 0.166667 | 0.018519 | 0.240741 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
3035188b0c9611cb3581b6c9c987990a72ba1ab9 | 17,920 | py | Python | TAPI_RI/flask_server/tapi_server/models/connection_end_point.py | bartoszm/Snowmass-ONFOpenTransport | 874e7a3f311d915d692b27fcbd24032c89064f00 | [
"Apache-2.0"
] | null | null | null | TAPI_RI/flask_server/tapi_server/models/connection_end_point.py | bartoszm/Snowmass-ONFOpenTransport | 874e7a3f311d915d692b27fcbd24032c89064f00 | [
"Apache-2.0"
] | null | null | null | TAPI_RI/flask_server/tapi_server/models/connection_end_point.py | bartoszm/Snowmass-ONFOpenTransport | 874e7a3f311d915d692b27fcbd24032c89064f00 | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
from __future__ import absolute_import
from datetime import date, datetime # noqa: F401
from typing import List, Dict # noqa: F401
from tapi_server.models.base_model_ import Model
from tapi_server.models.name_and_value import NameAndValue # noqa: F401,E501
from tapi_server.models.operational_state_pac import OperationalStatePac # noqa: F401,E501
from tapi_server.models.resource_spec import ResourceSpec # noqa: F401,E501
from tapi_server.models.termination_pac import TerminationPac # noqa: F401,E501
from tapi_server import util
class ConnectionEndPoint(Model):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, uuid: str=None, name: List[NameAndValue]=None, operational_state: str=None, lifecycle_state: str=None, termination_direction: str=None, termination_state: str=None, layer_protocol_name: str=None, connectivity_service_end_point: str=None, parent_node_edge_point: List[str]=None, client_node_edge_point: List[str]=None, connection_port_direction: str=None, connection_port_role: str=None): # noqa: E501
"""ConnectionEndPoint - a model defined in Swagger
:param uuid: The uuid of this ConnectionEndPoint. # noqa: E501
:type uuid: str
:param name: The name of this ConnectionEndPoint. # noqa: E501
:type name: List[NameAndValue]
:param operational_state: The operational_state of this ConnectionEndPoint. # noqa: E501
:type operational_state: str
:param lifecycle_state: The lifecycle_state of this ConnectionEndPoint. # noqa: E501
:type lifecycle_state: str
:param termination_direction: The termination_direction of this ConnectionEndPoint. # noqa: E501
:type termination_direction: str
:param termination_state: The termination_state of this ConnectionEndPoint. # noqa: E501
:type termination_state: str
:param layer_protocol_name: The layer_protocol_name of this ConnectionEndPoint. # noqa: E501
:type layer_protocol_name: str
:param connectivity_service_end_point: The connectivity_service_end_point of this ConnectionEndPoint. # noqa: E501
:type connectivity_service_end_point: str
:param parent_node_edge_point: The parent_node_edge_point of this ConnectionEndPoint. # noqa: E501
:type parent_node_edge_point: List[str]
:param client_node_edge_point: The client_node_edge_point of this ConnectionEndPoint. # noqa: E501
:type client_node_edge_point: List[str]
:param connection_port_direction: The connection_port_direction of this ConnectionEndPoint. # noqa: E501
:type connection_port_direction: str
:param connection_port_role: The connection_port_role of this ConnectionEndPoint. # noqa: E501
:type connection_port_role: str
"""
self.swagger_types = {
'uuid': str,
'name': List[NameAndValue],
'operational_state': str,
'lifecycle_state': str,
'termination_direction': str,
'termination_state': str,
'layer_protocol_name': str,
'connectivity_service_end_point': str,
'parent_node_edge_point': List[str],
'client_node_edge_point': List[str],
'connection_port_direction': str,
'connection_port_role': str
}
self.attribute_map = {
'uuid': 'uuid',
'name': 'name',
'operational_state': 'operational-state',
'lifecycle_state': 'lifecycle-state',
'termination_direction': 'termination-direction',
'termination_state': 'termination-state',
'layer_protocol_name': 'layer-protocol-name',
'connectivity_service_end_point': 'connectivity-service-end-point',
'parent_node_edge_point': 'parent-node-edge-point',
'client_node_edge_point': 'client-node-edge-point',
'connection_port_direction': 'connection-port-direction',
'connection_port_role': 'connection-port-role'
}
self._uuid = uuid
self._name = name
self._operational_state = operational_state
self._lifecycle_state = lifecycle_state
self._termination_direction = termination_direction
self._termination_state = termination_state
self._layer_protocol_name = layer_protocol_name
self._connectivity_service_end_point = connectivity_service_end_point
self._parent_node_edge_point = parent_node_edge_point
self._client_node_edge_point = client_node_edge_point
self._connection_port_direction = connection_port_direction
self._connection_port_role = connection_port_role
@classmethod
def from_dict(cls, dikt) -> 'ConnectionEndPoint':
"""Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The connection-end-point of this ConnectionEndPoint. # noqa: E501
:rtype: ConnectionEndPoint
"""
return util.deserialize_model(dikt, cls)
@property
def uuid(self) -> str:
"""Gets the uuid of this ConnectionEndPoint.
UUID: An identifier that is universally unique within an identifier space, where the identifier space is itself globally unique, and immutable. An UUID carries no semantics with respect to the purpose or state of the entity. UUID here uses string representation as defined in RFC 4122. The canonical representation uses lowercase characters. Pattern: [0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-' + '[0-9a-fA-F]{4}-[0-9a-fA-F]{12} Example of a UUID in string representation: f81d4fae-7dec-11d0-a765-00a0c91e6bf6 # noqa: E501
:return: The uuid of this ConnectionEndPoint.
:rtype: str
"""
return self._uuid
@uuid.setter
def uuid(self, uuid: str):
"""Sets the uuid of this ConnectionEndPoint.
UUID: An identifier that is universally unique within an identifier space, where the identifier space is itself globally unique, and immutable. An UUID carries no semantics with respect to the purpose or state of the entity. UUID here uses string representation as defined in RFC 4122. The canonical representation uses lowercase characters. Pattern: [0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-' + '[0-9a-fA-F]{4}-[0-9a-fA-F]{12} Example of a UUID in string representation: f81d4fae-7dec-11d0-a765-00a0c91e6bf6 # noqa: E501
:param uuid: The uuid of this ConnectionEndPoint.
:type uuid: str
"""
self._uuid = uuid
@property
def name(self) -> List[NameAndValue]:
"""Gets the name of this ConnectionEndPoint.
List of names. A property of an entity with a value that is unique in some namespace but may change during the life of the entity. A name carries no semantics with respect to the purpose of the entity. # noqa: E501
:return: The name of this ConnectionEndPoint.
:rtype: List[NameAndValue]
"""
return self._name
@name.setter
def name(self, name: List[NameAndValue]):
"""Sets the name of this ConnectionEndPoint.
List of names. A property of an entity with a value that is unique in some namespace but may change during the life of the entity. A name carries no semantics with respect to the purpose of the entity. # noqa: E501
:param name: The name of this ConnectionEndPoint.
:type name: List[NameAndValue]
"""
self._name = name
@property
def operational_state(self) -> str:
"""Gets the operational_state of this ConnectionEndPoint.
:return: The operational_state of this ConnectionEndPoint.
:rtype: str
"""
return self._operational_state
@operational_state.setter
def operational_state(self, operational_state: str):
"""Sets the operational_state of this ConnectionEndPoint.
:param operational_state: The operational_state of this ConnectionEndPoint.
:type operational_state: str
"""
allowed_values = ["DISABLED", "ENABLED"] # noqa: E501
if operational_state not in allowed_values:
raise ValueError(
"Invalid value for `operational_state` ({0}), must be one of {1}"
.format(operational_state, allowed_values)
)
self._operational_state = operational_state
@property
def lifecycle_state(self) -> str:
"""Gets the lifecycle_state of this ConnectionEndPoint.
:return: The lifecycle_state of this ConnectionEndPoint.
:rtype: str
"""
return self._lifecycle_state
@lifecycle_state.setter
def lifecycle_state(self, lifecycle_state: str):
"""Sets the lifecycle_state of this ConnectionEndPoint.
:param lifecycle_state: The lifecycle_state of this ConnectionEndPoint.
:type lifecycle_state: str
"""
allowed_values = ["PLANNED", "POTENTIAL_AVAILABLE", "POTENTIAL_BUSY", "INSTALLED", "PENDING_REMOVAL"] # noqa: E501
if lifecycle_state not in allowed_values:
raise ValueError(
"Invalid value for `lifecycle_state` ({0}), must be one of {1}"
.format(lifecycle_state, allowed_values)
)
self._lifecycle_state = lifecycle_state
@property
def termination_direction(self) -> str:
"""Gets the termination_direction of this ConnectionEndPoint.
The overall directionality of the LP. - A BIDIRECTIONAL LP will have some SINK and/or SOURCE flowss. - A SINK LP can only contain elements with SINK flows or CONTRA_DIRECTION_SOURCE flows - A SOURCE LP can only contain SOURCE flows or CONTRA_DIRECTION_SINK flows # noqa: E501
:return: The termination_direction of this ConnectionEndPoint.
:rtype: str
"""
return self._termination_direction
@termination_direction.setter
def termination_direction(self, termination_direction: str):
"""Sets the termination_direction of this ConnectionEndPoint.
The overall directionality of the LP. - A BIDIRECTIONAL LP will have some SINK and/or SOURCE flowss. - A SINK LP can only contain elements with SINK flows or CONTRA_DIRECTION_SOURCE flows - A SOURCE LP can only contain SOURCE flows or CONTRA_DIRECTION_SINK flows # noqa: E501
:param termination_direction: The termination_direction of this ConnectionEndPoint.
:type termination_direction: str
"""
allowed_values = ["BIDIRECTIONAL", "SINK", "SOURCE", "UNDEFINED_OR_UNKNOWN"] # noqa: E501
if termination_direction not in allowed_values:
raise ValueError(
"Invalid value for `termination_direction` ({0}), must be one of {1}"
.format(termination_direction, allowed_values)
)
self._termination_direction = termination_direction
@property
def termination_state(self) -> str:
"""Gets the termination_state of this ConnectionEndPoint.
Indicates whether the layer is terminated and if so how. # noqa: E501
:return: The termination_state of this ConnectionEndPoint.
:rtype: str
"""
return self._termination_state
@termination_state.setter
def termination_state(self, termination_state: str):
"""Sets the termination_state of this ConnectionEndPoint.
Indicates whether the layer is terminated and if so how. # noqa: E501
:param termination_state: The termination_state of this ConnectionEndPoint.
:type termination_state: str
"""
allowed_values = ["LP_CAN_NEVER_TERMINATE", "LT_NOT_TERMINATED", "TERMINATED_SERVER_TO_CLIENT_FLOW", "TERMINATED_CLIENT_TO_SERVER_FLOW", "TERMINATED_BIDIRECTIONAL", "LT_PERMENANTLY_TERMINATED", "TERMINATION_STATE_UNKNOWN"] # noqa: E501
if termination_state not in allowed_values:
raise ValueError(
"Invalid value for `termination_state` ({0}), must be one of {1}"
.format(termination_state, allowed_values)
)
self._termination_state = termination_state
@property
def layer_protocol_name(self) -> str:
"""Gets the layer_protocol_name of this ConnectionEndPoint.
:return: The layer_protocol_name of this ConnectionEndPoint.
:rtype: str
"""
return self._layer_protocol_name
@layer_protocol_name.setter
def layer_protocol_name(self, layer_protocol_name: str):
"""Sets the layer_protocol_name of this ConnectionEndPoint.
:param layer_protocol_name: The layer_protocol_name of this ConnectionEndPoint.
:type layer_protocol_name: str
"""
allowed_values = ["OTSiA", "OCH", "OTU", "ODU", "ETH", "ETY", "DSR"] # noqa: E501
if layer_protocol_name not in allowed_values:
raise ValueError(
"Invalid value for `layer_protocol_name` ({0}), must be one of {1}"
.format(layer_protocol_name, allowed_values)
)
self._layer_protocol_name = layer_protocol_name
@property
def connectivity_service_end_point(self) -> str:
"""Gets the connectivity_service_end_point of this ConnectionEndPoint.
:return: The connectivity_service_end_point of this ConnectionEndPoint.
:rtype: str
"""
return self._connectivity_service_end_point
@connectivity_service_end_point.setter
def connectivity_service_end_point(self, connectivity_service_end_point: str):
"""Sets the connectivity_service_end_point of this ConnectionEndPoint.
:param connectivity_service_end_point: The connectivity_service_end_point of this ConnectionEndPoint.
:type connectivity_service_end_point: str
"""
self._connectivity_service_end_point = connectivity_service_end_point
@property
def parent_node_edge_point(self) -> List[str]:
"""Gets the parent_node_edge_point of this ConnectionEndPoint.
:return: The parent_node_edge_point of this ConnectionEndPoint.
:rtype: List[str]
"""
return self._parent_node_edge_point
@parent_node_edge_point.setter
def parent_node_edge_point(self, parent_node_edge_point: List[str]):
"""Sets the parent_node_edge_point of this ConnectionEndPoint.
:param parent_node_edge_point: The parent_node_edge_point of this ConnectionEndPoint.
:type parent_node_edge_point: List[str]
"""
self._parent_node_edge_point = parent_node_edge_point
@property
def client_node_edge_point(self) -> List[str]:
"""Gets the client_node_edge_point of this ConnectionEndPoint.
:return: The client_node_edge_point of this ConnectionEndPoint.
:rtype: List[str]
"""
return self._client_node_edge_point
@client_node_edge_point.setter
def client_node_edge_point(self, client_node_edge_point: List[str]):
"""Sets the client_node_edge_point of this ConnectionEndPoint.
:param client_node_edge_point: The client_node_edge_point of this ConnectionEndPoint.
:type client_node_edge_point: List[str]
"""
self._client_node_edge_point = client_node_edge_point
@property
def connection_port_direction(self) -> str:
"""Gets the connection_port_direction of this ConnectionEndPoint.
The orientation of defined flow at the EndPoint. # noqa: E501
:return: The connection_port_direction of this ConnectionEndPoint.
:rtype: str
"""
return self._connection_port_direction
@connection_port_direction.setter
def connection_port_direction(self, connection_port_direction: str):
"""Sets the connection_port_direction of this ConnectionEndPoint.
The orientation of defined flow at the EndPoint. # noqa: E501
:param connection_port_direction: The connection_port_direction of this ConnectionEndPoint.
:type connection_port_direction: str
"""
allowed_values = ["BIDIRECTIONAL", "INPUT", "OUTPUT", "UNIDENTIFIED_OR_UNKNOWN"] # noqa: E501
if connection_port_direction not in allowed_values:
raise ValueError(
"Invalid value for `connection_port_direction` ({0}), must be one of {1}"
.format(connection_port_direction, allowed_values)
)
self._connection_port_direction = connection_port_direction
@property
def connection_port_role(self) -> str:
"""Gets the connection_port_role of this ConnectionEndPoint.
Each EP of the FC has a role (e.g., working, protection, protected, symmetric, hub, spoke, leaf, root) in the context of the FC with respect to the FC function. # noqa: E501
:return: The connection_port_role of this ConnectionEndPoint.
:rtype: str
"""
return self._connection_port_role
@connection_port_role.setter
def connection_port_role(self, connection_port_role: str):
"""Sets the connection_port_role of this ConnectionEndPoint.
Each EP of the FC has a role (e.g., working, protection, protected, symmetric, hub, spoke, leaf, root) in the context of the FC with respect to the FC function. # noqa: E501
:param connection_port_role: The connection_port_role of this ConnectionEndPoint.
:type connection_port_role: str
"""
allowed_values = ["SYMMETRIC", "ROOT", "LEAF", "TRUNK", "UNKNOWN"] # noqa: E501
if connection_port_role not in allowed_values:
raise ValueError(
"Invalid value for `connection_port_role` ({0}), must be one of {1}"
.format(connection_port_role, allowed_values)
)
self._connection_port_role = connection_port_role
| 43.814181 | 536 | 0.69135 | 2,174 | 17,920 | 5.447562 | 0.101656 | 0.030904 | 0.123617 | 0.050156 | 0.777506 | 0.684877 | 0.611416 | 0.513806 | 0.42903 | 0.328717 | 0 | 0.015882 | 0.23404 | 17,920 | 408 | 537 | 43.921569 | 0.846933 | 0.461719 | 0 | 0.255952 | 0 | 0 | 0.177799 | 0.074395 | 0 | 0 | 0 | 0 | 0 | 1 | 0.154762 | false | 0 | 0.053571 | 0 | 0.291667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
30357aaf0a514c1636520e15938cba9dc811ec7d | 901 | py | Python | idom_jupyter/jupyter_server_extension.py | idom-team/idom-jupyter | 21037d41c51d4d9e23cca4486a850f2915f27d29 | [
"MIT"
] | 28 | 2020-09-12T19:59:27.000Z | 2022-03-14T10:08:13.000Z | idom_jupyter/jupyter_server_extension.py | idom-team/idom-jupyter | 21037d41c51d4d9e23cca4486a850f2915f27d29 | [
"MIT"
] | 11 | 2020-10-05T06:54:43.000Z | 2022-02-19T21:16:31.000Z | idom_jupyter/jupyter_server_extension.py | idom-team/idom-jupyter | 21037d41c51d4d9e23cca4486a850f2915f27d29 | [
"MIT"
] | null | null | null | from urllib.parse import urljoin
from appdirs import user_data_dir
from notebook.notebookapp import NotebookApp
from idom.config import IDOM_WED_MODULES_DIR
from tornado.web import StaticFileHandler
from tornado.web import Application
IDOM_WED_MODULES_DIR.current = user_data_dir("idom-jupyter", "idom-team")
def _load_jupyter_server_extension(notebook_app: NotebookApp):
web_app: Application = notebook_app.web_app
base_url = web_app.settings["base_url"]
route_pattern = urljoin(base_url, rf"_idom_web_modules/(.*)")
web_app.add_handlers(
host_pattern=".*$",
host_handlers=[
(
route_pattern,
StaticFileHandler,
{"path": str(IDOM_WED_MODULES_DIR.current.absolute())},
),
],
)
# compat for older versions of Jupyter
load_jupyter_server_extension = _load_jupyter_server_extension
| 29.064516 | 73 | 0.712542 | 110 | 901 | 5.463636 | 0.4 | 0.039933 | 0.069884 | 0.084859 | 0.079867 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.206437 | 901 | 30 | 74 | 30.033333 | 0.840559 | 0.039956 | 0 | 0 | 0 | 0 | 0.067207 | 0.025492 | 0 | 0 | 0 | 0 | 0 | 1 | 0.045455 | false | 0 | 0.272727 | 0 | 0.318182 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
303647975ecbe068790f55c360ba681d772f2c4f | 1,033 | py | Python | dobot_gym/envs/real/dobot_env.py | sandipan1/dobot_gym | acea98da2506653d45d55e15a036da583415f31d | [
"MIT"
] | 1 | 2020-11-22T11:07:01.000Z | 2020-11-22T11:07:01.000Z | dobot_gym/envs/real/dobot_env.py | sandipan1/dobot_gym | acea98da2506653d45d55e15a036da583415f31d | [
"MIT"
] | null | null | null | dobot_gym/envs/real/dobot_env.py | sandipan1/dobot_gym | acea98da2506653d45d55e15a036da583415f31d | [
"MIT"
] | 1 | 2021-01-10T09:36:25.000Z | 2021-01-10T09:36:25.000Z | ## common class for only dobot with cam
import gym
from gym import utils
from glob import glob
from dobot_gym.utils.dobot_controller import DobotController
from gym.spaces import MultiDiscrete
class DobotRealEnv(gym.Env, utils.EzPickle):
def __init__(self):
super().__init__()
# Find the port on which dobot is connected
available_ports = glob('/dev/tty*USB*')
if len(available_ports) == 0:
print('no port found for Dobot Magician')
exit(1)
def_port = available_ports[0]
self.dobot = DobotController(port=def_port)
self.observation_space = None
self.action_space = MultiDiscrete([3, 3, 3])
def compute_reward(self):
return 0
def step(self, action):
real_action = action - 1
self.dobot.moveangleinc(*real_action, r=0, q=1)
reward = self.compute_reward(image, centroid)
poses = self.dobot.get_dobot_joint()
done = False
info =None
return poses,reward, done, info | 28.694444 | 60 | 0.648596 | 135 | 1,033 | 4.792593 | 0.481481 | 0.064915 | 0.046368 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.013141 | 0.263311 | 1,033 | 36 | 61 | 28.694444 | 0.837057 | 0.075508 | 0 | 0 | 0 | 0 | 0.047269 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.115385 | false | 0 | 0.192308 | 0.038462 | 0.423077 | 0.038462 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
3037ce4b050caa1080564ccd27af84ba3f81c62a | 1,609 | py | Python | Calibration/LumiAlCaRecoProducers/test/crab3_raw_corrC.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 852 | 2015-01-11T21:03:51.000Z | 2022-03-25T21:14:00.000Z | Calibration/LumiAlCaRecoProducers/test/crab3_raw_corrC.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 30,371 | 2015-01-02T00:14:40.000Z | 2022-03-31T23:26:05.000Z | Calibration/LumiAlCaRecoProducers/test/crab3_raw_corrC.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 3,240 | 2015-01-02T05:53:18.000Z | 2022-03-31T17:24:21.000Z | #########################
#Author: Sam Higginbotham
########################
from WMCore.Configuration import Configuration
config = Configuration()
#name='Pt11to30'
config.section_("General")
config.General.requestName = 'PCC_Run2017E_Corrections'
config.General.workArea = 'RawPCCZeroBias2017'
config.section_("JobType")
config.JobType.pluginName = 'Analysis'
config.JobType.psetName = 'raw_corr_Random_cfg.py'
config.JobType.allowUndistributedCMSSW = True
config.JobType.outputFiles = ['rawPCC.csv']
config.JobType.inputFiles = ['c.db']
config.section_("Data")
#config.Data.inputDataset = '/AlCaLumiPixels/Run2017E-AlCaPCCZeroBias-PromptReco-v1/ALCARECO'
config.Data.userInputFiles=['/store/data/Run2017E/AlCaLumiPixels/ALCARECO/AlCaPCCRandom-PromptReco-v1/000/303/832/00000/E6B8ACA4-95A4-E711-9AA2-02163E014793.root']
#config.Data.lumiMask = ''
#config.Data.runRange='303382'#,297283,297278,297280,297281,297271,297227,297230,297276,297261,297266'
config.Data.ignoreLocality = True
#useParent = True
config.Data.inputDBS = 'global'
#config.Data.splitting = 'LumiBased'
config.Data.splitting = 'FileBased'
config.Data.publication = False
config.Data.unitsPerJob = 1000
#config.Data.totalUnits = -1
#config.Data.publishDbsUrl = 'test'
config.Data.outputDatasetTag = 'PCC_AlCaLumiPixels_Run2017C_1kLS_NoZeroes'
config.Data.outLFNDirBase = '/store/group/comm_luminosity/PCC/ForLumiComputations/2017/5Feb2018'
config.section_("Site")
config.Site.storageSite = 'T2_CH_CERN'
config.Site.whitelist=['T2_FR_CCIN2P3','T2_IT_Pisa','T2_UK_London_IC','T2_HU_Budapest']
#config.Site.whitelist=['T2_FR_CCIN2P3']
| 36.568182 | 163 | 0.778745 | 188 | 1,609 | 6.521277 | 0.579787 | 0.114193 | 0.030995 | 0.034258 | 0.04894 | 0.04894 | 0 | 0 | 0 | 0 | 0 | 0.100857 | 0.057178 | 1,609 | 43 | 164 | 37.418605 | 0.707317 | 0.252952 | 0 | 0 | 0 | 0.043478 | 0.371278 | 0.249562 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.043478 | 0 | 0.043478 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
3037ecccd158f87250f163febc7c04a882e857b4 | 6,819 | py | Python | active_frame.py | binhhoangtieu/C3D-tensorflow | d85ef6156abc7fcdb4ab91e5b47a50c5ef5123c6 | [
"MIT"
] | 1 | 2019-02-11T15:47:52.000Z | 2019-02-11T15:47:52.000Z | active_frame.py | binhhoangtieu/C3D-tensorflow | d85ef6156abc7fcdb4ab91e5b47a50c5ef5123c6 | [
"MIT"
] | null | null | null | active_frame.py | binhhoangtieu/C3D-tensorflow | d85ef6156abc7fcdb4ab91e5b47a50c5ef5123c6 | [
"MIT"
] | 1 | 2018-12-04T04:55:19.000Z | 2018-12-04T04:55:19.000Z | import cv2
import os
import glob
import numpy as np
from operator import itemgetter
# import matplotlib.pyplot as plt
import math
import scipy.stats as stats
def main():
video_dir = './UCF-101' #./testdata
result_dir = './UCF101-OF' #test-image
loaddata(video_dir = video_dir, depth = 24, dest_forder=result_dir)
def save_image_to_file(frame_array, folder):
for i in range(np.size(frame_array,axis=0)):
cv2.imwrite(folder +"/" + format(i,'05d')+'.jpg', frame_array[i])
def loaddata(video_dir, depth, dest_forder):
#video_dir can contain sub_directory
dirs = os.listdir(video_dir)
class_number = -1
#pbar = tqdm(total=len(files))
for dir in dirs:
path = os.path.join(video_dir, dir, '*.avi')
files = sorted(glob.glob(path),key=lambda name: path )
for filename in files:
print('Extracting file:',filename)
# frame_array = video3d_overlap(filename, depth)
# frame_array = video3d_selected_active_frame(filename, depth)
# frame_array = full_selected_active_frame(filename, depth)
frame_array = video3d_opticalflow(filename, depth)
newdir = dir + "/" + os.path.splitext(os.path.basename(filename))[0]
directory = os.path.join(dest_forder,newdir)
if not os.path.exists(directory):
os.makedirs(directory)
save_image_to_file(frame_array, directory)
def active_frames(frame_array):
d=[] #euclid distance
frames =[]
for i in range(np.size(frame_array,axis=0)-1):
d.append((np.linalg.norm(frame_array[i+1]-frame_array[i]),i,0))
#Sort d[i] accending under first column of di
d.sort(key=itemgetter(0)) #get the order of active frame
d = normal_distribution(d) #assign each d one value based on normal distribution
d.sort(key=itemgetter(1)) #re_order
frames.append(frame_array[0])
for i in range(1,np.size(d,axis=0)):
temp_frame = frame_array[i] * d[i][2]
frames.append(temp_frame)
temp_frame = np.sum(frames, axis = 0)
temp_frame = cv2.normalize(temp_frame, None, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_8U)
return np.array(temp_frame)
#This function select numbers of the most active frame in a segment
def selected_active_frame(frame_array):
max_euclidean_distance = 0
temp_frame = frame_array[0] #assign first frame
max = 0
for i in range(np.size(frame_array,axis=0)-1):
euclidean_distant = np.linalg.norm(frame_array[i+1]-frame_array[i])
if euclidean_distant > max_euclidean_distance:
max_euclidean_distance = euclidean_distant
temp_frame = frame_array[i+1]
max = i+1
# print(max)
return np.array(temp_frame)
#this function get the most active frame
def full_selected_active_frame(filename, depth):
cap_images = read_video_from_file(filename)
framearray = []
distance = []
for i in range(np.size(cap_images,axis=0)-1):
distance.append((np.linalg.norm(cap_images[i+1]-cap_images[i]),i+1))
frames = [item[1] for item in sorted(distance,key = itemgetter(0))[-depth:]]
frames.sort()
for i in range(np.size(frames,axis=0)):
framearray.append(cap_images[frames[i]])
# print(frames[i])
return framearray
def video3d_selected_active_frame(filename, depth):
cap_images = read_video_from_file(filename)
framearray = []
flatten_framearray = []
nframe = np.size(cap_images,axis = 0)
frames = [np.int(x * nframe / depth) for x in range(depth)]
# print(nframe, frames)
for i in range(np.size(frames,axis=0)):
if i < np.size(frames,axis=0)-1:
flatten_framearray = cap_images[frames[i]:frames[i+1]]
# print(frames[i],frames[i+1])
else: #last frame
flatten_framearray = cap_images[frames[i]:nframe]
# print(frames[i], nframe)
# newframe = selected_active_frame(flatten_framearray)
# framearray.append(newframe)
return np.array(framearray)
def video3d_overlap(filename, depth = 16, overlap = 5, ):
cap_images = read_video_from_file(filename)
frame_array = []
flatten_framearray = []
nframe = np.size(cap_images,axis = 0)
frames = [np.int(x * nframe / depth) for x in range(depth)]
fromframe = 0
toframe = 0
for i in range(np.size(frames)):
fromframe = frames[i] - overlap
toframe = frames[i] + overlap
if fromframe < 0: fromframe = 0
if toframe > nframe-1: toframe = nframe-1
flatten_framearray = cap_images[fromframe:toframe]
frame = active_frames(flatten_framearray)
frame_array.append(frame)
return np.array(frame_array)
def read_video_from_file(filename):
video_cap = cv2.VideoCapture(filename)
nframe = np.int(video_cap.get(cv2.CAP_PROP_FRAME_COUNT))
frameWidth = np.int(video_cap.get(cv2.CAP_PROP_FRAME_WIDTH))
frameHeight = np.int(video_cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
j = 0
ret = True
cap_images = np.empty((nframe, frameHeight, frameWidth, 3))
while (j < nframe and ret):
ret, cap_images[j] = video_cap.read()
if ret != True:
cap_images = cap_images[0:j-1]
break
else:
j += 1
return cap_images
def normal_distribution(d):
dmax = max(l[0] for l in d)
dmin = min(l[0] for l in d)
mean = (dmax - dmin)/2
sd = (mean - dmin)/3
for i in range(np.size(d,axis=0)):
temp = list(d[i])
if dmax == dmin: #2frame is definitely the same
temp[2] = 1
else:
# temp[2] = 5*i+1
temp[2] = alpha(16,i)
# temp[2] = normpdf(i,mean,sd)
# temp[2] = stats.norm(mean,sd).pdf(i)
d[i] = tuple(temp)
return d
def video3d_opticalflow(filename, depth):
framearray = []
cap_images = read_video_from_file(filename)
nframe = np.size(cap_images,axis = 0)
frames = [np.int(x * nframe / depth) for x in range(depth)]
fromframe = 0
toframe = 0
cap_images = np.asarray(cap_images, dtype=np.float32)
for i in range(np.size(frames)):
fromframe = frames[i]
toframe = frames[i] + 1
if toframe > nframe-1:
fromframe = nframe-2
toframe = nframe-1
prevframe = cv2.cvtColor(cap_images[fromframe],cv2.COLOR_BGR2GRAY)
nextframe = cv2.cvtColor(cap_images[toframe],cv2.COLOR_BGR2GRAY)
hsvImg = np.zeros((np.size(cap_images[fromframe],axis=0), np.size(cap_images[fromframe],axis=1),3))
hsvImg[..., 1] = 0
flow = cv2.calcOpticalFlowFarneback(prevframe, nextframe, None, 0.5, 3, 15, 3, 5, 1.2, 0)
mag, ang = cv2.cartToPolar(flow[..., 0], flow[..., 1])
hsvImg[..., 0] = 0.5 * ang * 180 / np.pi
hsvImg[..., 2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)
hsvImg = np.asarray(hsvImg,dtype=np.float32)
frame = cv2.cvtColor(hsvImg, cv2.COLOR_HSV2BGR)
framearray.append(frame)
return np.array(framearray)
#https://en.wikipedia.org/wiki/Normal_distribution#Probability_density_function
def normpdf(x, mean, sd):
var = float(sd)**2
pi = 3.1415926
denom = (2*pi*var)**.5
num = math.exp(-(float(x)-float(mean))**2/(2*var))
return num/denom
def alpha(T, t):
return 2*(T-t+1)-(T+1)*(Harmonic_number(T)-Harmonic_number(t-1))
def Harmonic_number(n):
if n==0:
return 0
return sum(1.0/i for i in range(1,n+1))
if __name__ == '__main__':
main() | 31.569444 | 110 | 0.706555 | 1,097 | 6,819 | 4.238833 | 0.195989 | 0.048387 | 0.014194 | 0.026022 | 0.34043 | 0.286237 | 0.224731 | 0.174624 | 0.174624 | 0.142581 | 0 | 0.029564 | 0.151782 | 6,819 | 216 | 111 | 31.569444 | 0.774378 | 0.137264 | 0 | 0.196319 | 0 | 0 | 0.009909 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.08589 | false | 0 | 0.042945 | 0.006135 | 0.202454 | 0.006135 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
30393f74ae0c5c36f82aebb7e0b95f7f112a1231 | 1,133 | py | Python | data_parser.py | JanoHorvath/k-means-clustering | c84e858c3e2bb417ffea11d441797a15c659a7ee | [
"MIT"
] | null | null | null | data_parser.py | JanoHorvath/k-means-clustering | c84e858c3e2bb417ffea11d441797a15c659a7ee | [
"MIT"
] | null | null | null | data_parser.py | JanoHorvath/k-means-clustering | c84e858c3e2bb417ffea11d441797a15c659a7ee | [
"MIT"
] | null | null | null | from random import randint
class Dataset:
def get_mock_scattered_dataset(self, numberOf, x_upper_bound, y_upper_bound):
""" Mock 2D dataset with scattered data points. """
points = []
for i in range(numberOf):
point = [randint(0,x_upper_bound), randint(0,y_upper_bound), 'black']
points.append(point)
return points
def get_mock_dataset(self, numberOf, x_upper_bound, y_upper_bound):
""" Mock 2D dataset with clustered data points. """
points = []
clusters = []
""" Creates between 2 to 10 cluster areas with random x/y values """
for i in range(randint(2, 10)):
cluster = [randint(0, x_upper_bound), randint(0, y_upper_bound)]
clusters.append(cluster)
""" Creates numberOf points each randomly assigned to one cluster area and random x/y values near that area """
for i in range(numberOf):
j = randint(0, len(clusters)-1)
point = [randint(0,30)+clusters[j][0], randint(0,30)+clusters[j][1], 'black']
points.append(point)
return points
| 33.323529 | 119 | 0.614298 | 151 | 1,133 | 4.470199 | 0.337748 | 0.118519 | 0.065185 | 0.048889 | 0.497778 | 0.385185 | 0.284444 | 0.284444 | 0.284444 | 0.284444 | 0 | 0.026829 | 0.276258 | 1,133 | 33 | 120 | 34.333333 | 0.796341 | 0.07767 | 0 | 0.421053 | 0 | 0 | 0.011737 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.105263 | false | 0 | 0.052632 | 0 | 0.315789 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
303a0f7118c7766ed025bf5dc2723fe60db8bf3e | 623 | py | Python | backend/colleges/utils.py | cesko-digital/zacni-uc | 281c56aec5509d5dd8bbbd60f054ffcd9156609e | [
"MIT"
] | 4 | 2021-02-26T09:28:14.000Z | 2021-07-08T19:21:57.000Z | backend/colleges/utils.py | cesko-digital/zacni-uc | 281c56aec5509d5dd8bbbd60f054ffcd9156609e | [
"MIT"
] | 35 | 2021-01-27T08:38:59.000Z | 2021-12-13T19:42:38.000Z | backend/colleges/utils.py | cesko-digital/zacni-uc | 281c56aec5509d5dd8bbbd60f054ffcd9156609e | [
"MIT"
] | 5 | 2021-01-21T21:35:42.000Z | 2022-01-06T10:07:58.000Z | from openpyxl import load_workbook
def import_msmt_college_registry_xlsx(path, sheet_name):
"""
Import XLSX from https://regvssp.msmt.cz/registrvssp/cvslist.aspx
(list of colleges and faculties).
Parameters:
path -- path to XLSX file
sheet_name -- "ExportVS" or "ExportFakulty"
"""
workbook = load_workbook(path)
sheet = workbook[sheet_name]
out = []
columns = [k.value.strip() for k in sheet[1]]
for i in range(2, sheet.max_row + 1):
values = [i.value for i in sheet[i]]
item = dict(zip(columns, values))
out.append(item)
return out
| 25.958333 | 69 | 0.638844 | 85 | 623 | 4.564706 | 0.576471 | 0.069588 | 0.030928 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.006424 | 0.250401 | 623 | 23 | 70 | 27.086957 | 0.824411 | 0.304976 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.090909 | false | 0 | 0.181818 | 0 | 0.363636 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
303b9455bfc79c1d55b5b456a3c9a97a10ddaa26 | 5,706 | py | Python | act/qc/arm.py | jrobrien91/ACT | 604b93d75366d23029f89d88df9053d52825c214 | [
"BSD-3-Clause"
] | 9 | 2019-03-11T19:41:34.000Z | 2019-09-17T08:34:19.000Z | act/qc/arm.py | jrobrien91/ACT | 604b93d75366d23029f89d88df9053d52825c214 | [
"BSD-3-Clause"
] | 127 | 2019-03-18T12:24:17.000Z | 2020-01-06T20:53:06.000Z | act/qc/arm.py | jrobrien91/ACT | 604b93d75366d23029f89d88df9053d52825c214 | [
"BSD-3-Clause"
] | 15 | 2019-03-11T15:30:56.000Z | 2019-11-01T19:10:11.000Z | """
Functions specifically for working with QC/DQRs from
the Atmospheric Radiation Measurement Program (ARM).
"""
import datetime as dt
import numpy as np
import requests
from act.config import DEFAULT_DATASTREAM_NAME
def add_dqr_to_qc(
obj,
variable=None,
assessment='incorrect,suspect',
exclude=None,
include=None,
normalize_assessment=True,
cleanup_qc=True,
):
"""
Function to query the ARM DQR web service for reports and
add as a new quality control test to ancillary quality control
variable. If no anicllary quality control variable exist a new
one will be created and lined to the data variable through
ancillary_variables attribure.
See online documentation from ARM Data
Quality Office on the use of the DQR web service.
https://code.arm.gov/docs/dqrws-examples/wikis/home
Information about the DQR web-service avaible at
https://adc.arm.gov/dqrws/
Parameters
----------
obj : xarray Dataset
Data object
variable : string, or list of str, or None
Variables to check DQR web service. If set to None will
attempt to update all variables.
assessment : string
assessment type to get DQRs. Current options include
'missing', 'suspect', 'incorrect' or any combination separated
by a comma.
exclude : list of strings
DQR IDs to exclude from adding into QC
include : list of strings
List of DQR IDs to include in flagging of data. Any other DQR IDs
will be ignored.
normalize_assessment : boolean
The DQR assessment term is different than the embedded QC
term. Embedded QC uses "Bad" and "Indeterminate" while
DQRs use "Incorrect" and "Suspect". Setting this will ensure
the same terms are used for both.
cleanup_qc : boolean
Call clean.cleanup() method to convert to standardized ancillary
quality control variables. Has a little bit of overhead so
if the Dataset has already been cleaned up, no need to run.
Returns
-------
obj : xarray Dataset
Data object
Examples
--------
.. code-block:: python
from act.qc.arm import add_dqr_to_qc
obj = add_dqr_to_qc(obj, variable=['temp_mean', 'atmos_pressure'])
"""
# DQR Webservice goes off datastreams, pull from object
if 'datastream' in obj.attrs:
datastream = obj.attrs['datastream']
elif '_datastream' in obj.attrs:
datastream = obj.attrs['_datastream']
else:
raise ValueError('Object does not have datastream attribute')
if datastream == DEFAULT_DATASTREAM_NAME:
raise ValueError("'datastream' name required for DQR service set to default value "
f"{datastream}. Unable to perform DQR service query.")
# Clean up QC to conform to CF conventions
if cleanup_qc:
obj.clean.cleanup()
# In order to properly flag data, get all variables if None. Exclude QC variables.
if variable is None:
variable = list(set(obj.data_vars) - set(obj.clean.matched_qc_variables))
# Check to ensure variable is list
if not isinstance(variable, (list, tuple)):
variable = [variable]
# Loop through each variable and call web service for that variable
for var_name in variable:
# Create URL
url = 'http://www.archive.arm.gov/dqrws/ARMDQR?datastream='
url += datastream
url += '&varname=' + var_name
url += ''.join(
[
'&searchmetric=',
assessment,
'&dqrfields=dqrid,starttime,endtime,metric,subject',
]
)
# Call web service
req = requests.get(url)
# Check status values and raise error if not successful
status = req.status_code
if status == 400:
raise ValueError('Check parameters')
if status == 500:
raise ValueError('DQR Webservice Temporarily Down')
# Get data and run through each dqr
dqrs = req.text.splitlines()
time = obj['time'].values
dqr_results = {}
for line in dqrs:
line = line.split('|')
dqr_no = line[0]
# Exclude DQRs if in list
if exclude is not None and dqr_no in exclude:
continue
# Only include if in include list
if include is not None and dqr_no not in include:
continue
starttime = np.datetime64(dt.datetime.utcfromtimestamp(int(line[1])))
endtime = np.datetime64(dt.datetime.utcfromtimestamp(int(line[2])))
ind = np.where((time >= starttime) & (time <= endtime))
if ind[0].size == 0:
continue
if dqr_no in dqr_results.keys():
dqr_results[dqr_no]['index'] = np.append(dqr_results[dqr_no]['index'], ind)
else:
dqr_results[dqr_no] = {
'index': ind,
'test_assessment': line[3],
'test_meaning': ': '.join([dqr_no, line[-1]]),
}
for key, value in dqr_results.items():
try:
obj.qcfilter.add_test(
var_name,
index=value['index'],
test_meaning=value['test_meaning'],
test_assessment=value['test_assessment'],
)
except IndexError:
print(f"Skipping '{var_name}' DQR application because of IndexError")
if normalize_assessment:
obj.clean.normalize_assessment(variables=var_name)
return obj
| 33.174419 | 91 | 0.608132 | 699 | 5,706 | 4.885551 | 0.354793 | 0.011713 | 0.015227 | 0.008785 | 0.115081 | 0.09019 | 0.054466 | 0.028111 | 0 | 0 | 0 | 0.004328 | 0.311602 | 5,706 | 171 | 92 | 33.368421 | 0.865071 | 0.400456 | 0 | 0.061728 | 0 | 0 | 0.163087 | 0.015251 | 0 | 0 | 0 | 0 | 0 | 1 | 0.012346 | false | 0 | 0.049383 | 0 | 0.074074 | 0.012346 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
303d67c6ff0813b9d6fd68be88e83d8ab918ad04 | 9,018 | py | Python | database/__init__.py | tclarkin/shread_dash | a45e2f2946c74526e69c087587676aaa4cb15fba | [
"CC0-1.0"
] | null | null | null | database/__init__.py | tclarkin/shread_dash | a45e2f2946c74526e69c087587676aaa4cb15fba | [
"CC0-1.0"
] | null | null | null | database/__init__.py | tclarkin/shread_dash | a45e2f2946c74526e69c087587676aaa4cb15fba | [
"CC0-1.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Sun Mar 21 11:55:27 2021
Snow-Hydrology Repo for Evaluation, Analysis, and Decision-making Dashboard (shread_dash.py) Database Initialization
This is part of dashboard loading database and other data into memory. The data for the database relies on a series of
retrieval scripts (/database/SUBS) that retrieve hydrometeorological data from online and store the data in local
databases. Part of the retrieval process is dependent on the SHREAD repository (https://github.com/tclarkin/shread).
The databases are built in SQLite.
@author: tclarkin, buriona (2020-2022)
"""
import os
import datetime as dt
from pathlib import Path
import pandas as pd
from flask_sqlalchemy import SQLAlchemy
import dash_bootstrap_components as dbc
import dash
### Launch SQLite DB Server ###
# Define directories and app
this_dir = os.path.dirname(os.path.realpath(__file__))
#this_dir = Path('C:/Programs/shread_dash/database')
app_dir = os.path.dirname(this_dir)
# define functions
def create_app():
"""
This function launches the SALAlchemy db server
"""
assets_path = Path(app_dir, 'assets')
app = dash.Dash(
__name__,
external_stylesheets=[dbc.themes.BOOTSTRAP],
update_title="Updating...",
# suppress_callback_exceptions=True,
assets_folder=assets_path
)
app.title="WCAO Dashboard"
db_path = Path(app_dir, 'database')
snodas_swe_db_path = Path(db_path, 'SHREAD', 'swe.db')
snodas_sd_db_path = Path(db_path, 'SHREAD', 'sd.db')
csas_iv_db_path = Path(db_path, 'CSAS', 'csas_iv.db')
csas_dv_db_path = Path(db_path, 'CSAS', 'csas_dv.db')
snotel_dv_db_path = Path(db_path, 'SNOTEL', 'snotel_dv.db')
usgs_dv_db_path = Path(db_path, 'FLOW', 'usgs_dv.db')
usgs_iv_db_path = Path(db_path, 'FLOW', 'usgs_iv.db')
rfc_dv_db_path = Path(db_path, 'FLOW', 'rfc_dv.db')
rfc_iv_db_path = Path(db_path, 'FLOW', 'rfc_iv.db')
ndfd_mint_db_path = Path(db_path, 'SHREAD', 'mint.db')
ndfd_maxt_db_path = Path(db_path, 'SHREAD', 'maxt.db')
#ndfd_rhm_db_path = Path(db_path, 'SHREAD', 'rhm.db')
ndfd_pop12_db_path = Path(db_path, 'SHREAD', 'pop12.db')
ndfd_qpf_db_path = Path(db_path, 'SHREAD', 'qpf.db')
ndfd_snow_db_path = Path(db_path, 'SHREAD', 'snow.db')
ndfd_sky_db_path = Path(db_path, 'SHREAD', 'sky.db')
snodas_swe_db_con_str = f'sqlite:///{snodas_swe_db_path.as_posix()}'
snodas_sd_db_con_str = f'sqlite:///{snodas_sd_db_path.as_posix()}'
csas_iv_db_con_str = f'sqlite:///{csas_iv_db_path.as_posix()}'
csas_dv_db_con_str = f'sqlite:///{csas_dv_db_path.as_posix()}'
snotel_dv_db_con_str = f'sqlite:///{snotel_dv_db_path.as_posix()}'
usgs_dv_db_con_str = f'sqlite:///{usgs_dv_db_path.as_posix()}'
usgs_iv_db_con_str = f'sqlite:///{usgs_iv_db_path.as_posix()}'
rfc_dv_db_con_str = f'sqlite:///{rfc_dv_db_path.as_posix()}'
rfc_iv_db_con_str = f'sqlite:///{rfc_iv_db_path.as_posix()}'
ndfd_mint_db_con_str = f'sqlite:///{ndfd_mint_db_path}'
ndfd_maxt_db_con_str = f'sqlite:///{ndfd_maxt_db_path}'
#ndfd_rhm_db_con_str = f'sqlite:///{ndfd_rhm_db_path}'
ndfd_pop12_db_con_str = f'sqlite:///{ndfd_pop12_db_path}'
ndfd_qpf_db_con_str = f'sqlite:///{ndfd_qpf_db_path}'
ndfd_snow_db_con_str = f'sqlite:///{ndfd_snow_db_path}'
ndfd_sky_db_con_str = f'sqlite:///{ndfd_sky_db_path}'
app.server.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.server.config['SQLALCHEMY_BINDS'] = {
'swe': snodas_swe_db_con_str,
'sd': snodas_sd_db_con_str,
'csas_iv':csas_iv_db_con_str,
'csas_dv':csas_dv_db_con_str,
'snotel_dv':snotel_dv_db_con_str,
'usgs_dv':usgs_dv_db_con_str,
'usgs_iv':usgs_iv_db_con_str,
'rfc_dv':rfc_dv_db_con_str,
'rfc_iv':rfc_iv_db_con_str,
"mint": ndfd_mint_db_con_str,
"maxt": ndfd_maxt_db_con_str,
#"rhm": ndfd_rhm_db_con_str,
"pop12": ndfd_pop12_db_con_str,
"qpf": ndfd_qpf_db_con_str,
"snow": ndfd_snow_db_con_str,
"sky": ndfd_sky_db_con_str,
}
return app
# Launch server
app = create_app()
db = SQLAlchemy(app.server)
db.reflect()
### Load in other Data ###
# Define working (data) directory
os.chdir(os.path.join(app_dir, 'database'))
# Identify files in database
csas_dir = os.path.join(app_dir, 'database', 'CSAS')
csas_files = os.listdir(csas_dir)
res_dir = os.path.join(app_dir, 'resources')
#switch working dir back to main dir so dash app can function correctly
os.chdir(app_dir)
print('Calculating bounds of SNODAS.db')
# Create list of basins
#TODO call from .csv for future user input
basin_list = [
{'label': 'NONE', 'value': None},
{'label': 'SAN JUAN - NAVAJO RES NR ARCHULETA', 'value': 'NVRN5L_F'},
{'label': 'ANIMAS - DURANGO', 'value': 'DRGC2H_F'},
{'label': 'DOLORES - MCPHEE RESERVOIR', 'value': 'MPHC2L_F'},
{'label': 'FLORIDA - LEMON RES NR DURANGO', 'value': 'LEMC2H_F'},
{'label': 'LOS PINOS - NR BAYFIELD VALLECITO RES', 'value': 'VCRC2H_F'}
]
# Set ranges of variables for use in dashboard
elevrange =[5000, 15000]
print(f' Elevations from {elevrange[0]} to {elevrange[-1]}')
elevdict = dict()
for e in range(1, 20):
elevdict[str(e * 1000)] = f"{e * 1000:,}'"
sloperange = [0.0, 100]
print(f' Slopes from {sloperange[0]} to {sloperange[-1]}')
slopedict = dict()
for s in range(0, 11):
slopedict[str(s * 10)] = f'{s * 10}°'
aspectdict = {-90: "W",
-45: "NW",
0: "N",
45: "NE",
90: "E",
135: "SE",
180: "S",
225: "SW",
270: "W",
315: "NW",
360: "N"}
# Define colors:
# https://colorbrewer2.org/?type=qualitative&scheme=Set1&n=9
color8 = ['#e41a1c','#377eb8','#4daf4a','#984ea3','#ff7f00','#a65628','#f781bf','#999999']
# Import FLOW gages and define list for dashboard drop down & add colors
usgs_gages = pd.read_csv(os.path.join(this_dir,"FLOW", "usgs_gages.csv"))
usgs_gages.index = usgs_gages.site_no
colorg = color8
while len(colorg)<len(usgs_gages):
colorg = colorg*2
usgs_gages["color"] = colorg[0:len(usgs_gages)]
# Add list for dropdown menu
usgs_list = list()
for g in usgs_gages.index:
usgs_list.append({"label": "0" + str(usgs_gages.site_no[g]) + " " + usgs_gages.name[g] + " (" + str(
usgs_gages.elev_ft[g]) + " ft | " + str(usgs_gages.area[g]) + " sq.mi.)", "value": "0" + str(g)})
# Create list of SNOTEL sites & add colors
snotel_sites = pd.read_csv(os.path.join(this_dir,"SNOTEL","snotel_sites.csv"))
snotel_sites.index = snotel_sites.triplet
colors = color8
while len(colors)<len(snotel_sites):
colors = colors*2
snotel_sites["color"] = snotel_sites["prcp_color"] = colors[0:len(snotel_sites)]
# Add list for dropdown menu
snotel_list = list()
for s in snotel_sites.index:
snotel_list.append({"label": str(snotel_sites.site_no[s]) + " " + snotel_sites.name[s] + " (" + str(
round(snotel_sites.elev_ft[s], 0)) + " ft)", "value": s})
# Create list of CSAS sites & add colors
csas_gages = pd.DataFrame()
csas_gages["site"] = ["SASP","SBSP","PTSP","SBSG"]
csas_gages["name"] = ["Swamp Angel","Senator Beck","Putney [Meteo]","Senator Beck Gage [Flow]"]
csas_gages["elev_ft"] = [11060,12186,12323,11030]
colorc = color8
while len(colorc)<len(csas_gages):
colorc = colorc*2
csas_gages["color"] = csas_gages["prcp_color"] = colorc[0:len(csas_gages)]
csas_gages.index = csas_gages["site"]
csas_list = list()
for c in csas_gages.index:
csas_list.append({"label": csas_gages.name[c] + " (" + str(
round(csas_gages.elev_ft[c], 0)) + " ft)", "value": c})
# Generate NDFD list
forecast_list = [{"label":"Flow (RFC)","value":"flow"},
{"label":"Min. Temp","value":"mint"},
{"label":"Max. Temp","value":"maxt"},
{"label":"Precip (QPF)","value":"qpf"},
{"label": "Precip Prob.", "value": "pop12"},
{"label":"Snow","value":"snow"},
#{"label":"Relative Humidity","value":"rhm"},
{"label":"Sky Coverage","value":"sky"}
]
# Import CSAS dust on snow data
try:
dust = pd.read_csv(os.path.join(csas_dir, "csas_dust.csv"))
except FileNotFoundError:
dust = pd.DataFrame()
if dust.empty:
dust_disable = True
else:
dust_disable = False
dust_ts = dust.loc[1:len(dust),]
dust_ts = dust_ts.reset_index(drop=True)
dust_ts["Date"] = pd.to_datetime(dust_ts["Date"],format="%d-%m-%y")
dust_ts.index = dust_ts.Date
dust_ts = dust_ts.drop("Date",axis=1)
dust_ts = (dust_ts.apply(pd.to_numeric)/2.54)
dust_layers = pd.DataFrame(index=dust_ts.columns)
colord = color8
while len(colord) < len(dust_layers):
colord = colord * 2
dust_layers["color"] = colord[0:len(dust_layers)]
# set initial start and end date
start_date = dt.datetime.now().date() - dt.timedelta(days=10)
end_date = dt.datetime.now().date() + dt.timedelta(days=10)
| 37.732218 | 118 | 0.662009 | 1,389 | 9,018 | 4.016559 | 0.237581 | 0.052698 | 0.045886 | 0.034415 | 0.253271 | 0.168668 | 0.04983 | 0.022943 | 0.013623 | 0 | 0 | 0.024575 | 0.178754 | 9,018 | 238 | 119 | 37.890756 | 0.728598 | 0.178643 | 0 | 0 | 0 | 0 | 0.247651 | 0.074881 | 0 | 0 | 0 | 0.004202 | 0 | 1 | 0.005917 | false | 0 | 0.04142 | 0 | 0.053254 | 0.017751 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
303f8a39fa8bc76e0faedd216816494e7f0cf74e | 357 | py | Python | scripts/test.py | germy/piprint | c4bf36ccf90cbce50ecae4e673b916f0b7a1b522 | [
"MIT"
] | null | null | null | scripts/test.py | germy/piprint | c4bf36ccf90cbce50ecae4e673b916f0b7a1b522 | [
"MIT"
] | null | null | null | scripts/test.py | germy/piprint | c4bf36ccf90cbce50ecae4e673b916f0b7a1b522 | [
"MIT"
] | null | null | null | import sys
def write():
print('Creating new text file')
name = 'test.txt' # Name of text file coerced with +.txt
try:
file = open(name,'a') # Trying to create a new file or open one
file.close()
except:
print('Something went wrong! Can\'t tell what?')
sys.exit(0) # quit Python
write() | 22.3125 | 74 | 0.560224 | 50 | 357 | 4 | 0.72 | 0.08 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.004167 | 0.327731 | 357 | 16 | 75 | 22.3125 | 0.829167 | 0.246499 | 0 | 0 | 0 | 0 | 0.227092 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.090909 | false | 0 | 0.090909 | 0 | 0.181818 | 0.181818 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
304744cf40637da26f72f55057da5c765cb35850 | 27,025 | py | Python | marketmodel/neuralsde.py | vicaws/neuralSDE-marketmodel | ffbc558fee273cbae81ffe9312fc878ba4d261d1 | [
"MIT"
] | 5 | 2021-08-19T15:24:08.000Z | 2022-03-09T07:11:41.000Z | marketmodel/neuralsde.py | vicaws/neuralSDE-marketmodel | ffbc558fee273cbae81ffe9312fc878ba4d261d1 | [
"MIT"
] | null | null | null | marketmodel/neuralsde.py | vicaws/neuralSDE-marketmodel | ffbc558fee273cbae81ffe9312fc878ba4d261d1 | [
"MIT"
] | 1 | 2021-11-10T07:55:06.000Z | 2021-11-10T07:55:06.000Z | """
Construct, train neural-SDE models and simulate trajectories from the learnt
models.
"""
# Copyright 2021 Sheng Wang.
# Affiliation: Mathematical Institute, University of Oxford
# Email: sheng.wang@maths.ox.ac.uk
import numpy as np
import os
import pandas as pd
import tensorflow as tf
import tensorflow_probability as tfp
import tensorflow_model_optimization as tfmot
import marketmodel.utils as utils
from glob import glob
from tqdm import tqdm
from marketmodel.factors import PrepTrainData
class Loss(object):
"""
Library of loss functions for neural SDE models.
"""
@staticmethod
def loss_S(dt):
"""
Loss function for the neural SDE model of S.
Parameters
__________
dt: float
Time increment.
Returns
_______
loss: method
Loss function.
"""
def loss(y_true, y_pred):
# extract data
alpha = y_pred[:, 0]
beta = y_pred[:, 1]
dS = y_true[:, 0]
S = y_true[:, 1]
# compute drift
mu = beta * S # drift term
# compute log-likelihood
l = tf.reduce_sum(2*tf.math.log(S)-alpha + tf.square(dS - mu*dt) *
tf.exp(alpha) / dt / S**2)
return l
return loss
@staticmethod
def loss_xi(dt, n_dim, n_varcov, mask_diagonal, W, G,
lbd_penalty_eq, lbd_penalty_sz):
"""
Loss function for the neural SDE model of xi.
"""
def loss(y_true, y_pred):
# get diffusion terms in the predicted values; in particular,
# diagonal terms of the diffusion matrix are taken exponentials
sigma_term = tf.transpose(
tf.where(tf.constant(mask_diagonal),
tf.transpose(tf.exp(y_pred)), tf.transpose(y_pred)))[:,
:n_varcov]
# construct the transposed diffusion matrix
sigma_tilde_T = tfp.math.fill_triangular(sigma_term, upper=True)
# get diagonal terms of the diffusion matrix
sigma_term_diagonal = tf.where(tf.constant(mask_diagonal),
tf.transpose(y_pred), 0.)
# get drift terms in the predicted values
mu_residuals = y_pred[:, n_varcov:]
# get pre-calculated terms from the inputs
## regarding diffusion scaling
proj_dX = y_true[:, :n_dim]
Omega = tf.reshape(y_true[:, n_dim:n_dim+n_dim**2],
shape=[-1, n_dim, n_dim])
det_Omega = y_true[:, n_dim+n_dim**2:n_dim+n_dim**2+1]
n1 = n_dim+n_dim**2+1
## regarding drift correction
n_bdy = W.shape[0]
corr_dirs = tf.reshape(y_true[:, n1:n1+n_dim*n_bdy],
shape=[-1, n_bdy, n_dim])
epsmu = y_true[:, n1+n_dim*n_bdy:n1+n_dim*n_bdy+n_bdy]
n2 = n1+n_dim*n_bdy+n_bdy
## regarding baseline drift
mu_base = y_true[:, n2:n2+n_dim]
n3 = n2+n_dim
## regarding MPR penalty
zed = tf.expand_dims(y_true[:, n3:], axis=-1)
# compute corrected drifts
## compute drift
mu_term = mu_base * mu_residuals
## compute weights assigned to each correction direction
mu_tilde_inner_W = tf.matmul(
mu_term, tf.constant(W.T, dtype=tf.float32))
corr_dir_inner_W = tf.reduce_sum(
corr_dirs * tf.constant(W, dtype=tf.float32), axis=-1)
gamma = tf.maximum(-mu_tilde_inner_W - epsmu, 0.) / corr_dir_inner_W
## compute corrected drift
mu_tf = mu_term + tf.reduce_sum(
tf.expand_dims(gamma, axis=-1) * corr_dirs, axis=1)
mu_tf = tf.expand_dims(mu_tf, axis=-1)
# compute log likelihood
Omega_T = tf.transpose(Omega, perm=[0, 2, 1])
sigma_tilde = tf.transpose(sigma_tilde_T, perm=[0, 2, 1])
proj_mu = tf.linalg.solve(Omega_T, mu_tf)
sol_mu = tf.linalg.triangular_solve(
sigma_tilde, proj_mu, lower=True)
sol_mu = tf.squeeze(sol_mu)
proj_dX_tf = tf.expand_dims(proj_dX, axis=-1)
sol_dX = tf.linalg.triangular_solve(
sigma_tilde, proj_dX_tf, lower=True)
sol_dX = tf.squeeze(sol_dX)
l1 = 2 * tf.reduce_sum(tf.math.log(det_Omega)) + \
2 * tf.reduce_sum(sigma_term_diagonal)
l2 = 1./dt * tf.reduce_sum(tf.square(sol_dX))
l3 = dt * tf.reduce_sum(tf.square(sol_mu))
l4 = -2 * tf.reduce_sum(sol_mu * sol_dX)
# compute the penalty term
## evaluate the X variable in the regression problem
sigma = tf.matmul(Omega_T, sigma_tilde)
G_tf = tf.expand_dims(tf.constant(G[1:], dtype=tf.float32), axis=0)
reg_Xt = tf.matmul(sigma, G_tf, transpose_a=True)
## evaluate the Y variable in the regression problem
reg_Y = tf.matmul(G_tf, mu_tf, transpose_a=True) - zed
## evaluate the OLS estimates of the regression problem
reg_XtY = tf.matmul(reg_Xt, reg_Y)
reg_XtX = tf.matmul(reg_Xt, reg_Xt, transpose_b=True)
reg_psi = tf.linalg.solve(reg_XtX, reg_XtY) #
reg_err = reg_Y - tf.matmul(reg_Xt, reg_psi, transpose_a=True)
pnty = lbd_penalty_eq * tf.reduce_sum(tf.square(reg_err)) + \
lbd_penalty_sz * tf.reduce_sum(tf.square(reg_psi))
return l1 + l2 + l3 + l4 + pnty
return loss
class Model(object):
"""
Library of constructing neural network models.
"""
@staticmethod
def construct_S(dim_input, n_obs,
pruning_sparsity, validation_split, batch_size, epochs):
# construct the fully connected model
dim_output = 2
model_S = tf.keras.Sequential([
tf.keras.layers.Dense(128, input_shape=(dim_input,),
activation=tf.nn.relu),
tf.keras.layers.Dropout(rate=0.1),
tf.keras.layers.Dense(128, activation=tf.nn.relu),
tf.keras.layers.Dropout(rate=0.1),
tf.keras.layers.Dense(128, activation=tf.nn.relu),
tf.keras.layers.Dropout(rate=0.1),
tf.keras.layers.Dense(dim_output)])
# prune the model
n_obs_train = n_obs * (1 - validation_split)
end_step = np.ceil(n_obs_train / batch_size).astype(np.int32) * epochs
pruning_schedule = tfmot.sparsity.keras.PolynomialDecay(
initial_sparsity=0, final_sparsity=pruning_sparsity,
begin_step=0, end_step=end_step
)
model_S_pruning = tfmot.sparsity.keras.prune_low_magnitude(
model_S, pruning_schedule
)
return model_S_pruning
@staticmethod
def construct_mu(dim_input):
# construct the fully connected model
dim_output = 2
model_mu = tf.keras.Sequential([
tf.keras.layers.Dense(128, input_shape=(dim_input,),
activation=tf.nn.relu),
tf.keras.layers.Dropout(rate=0.1),
tf.keras.layers.Dense(128, activation=tf.nn.relu),
tf.keras.layers.Dropout(rate=0.1),
tf.keras.layers.Dense(dim_output)])
return model_mu
@staticmethod
def construct_xi(dim_input, dim_output, n_obs,
pruning_sparsity, validation_split, batch_size, epochs):
# construct the fully connected model
model_xi = tf.keras.Sequential([
tf.keras.layers.Dense(256, input_shape=(dim_input,),
activation=tf.nn.relu),
tf.keras.layers.Dropout(rate=0.1),
tf.keras.layers.Dense(256, activation=tf.nn.relu),
tf.keras.layers.Dropout(rate=0.1),
tf.keras.layers.Dense(256, activation=tf.nn.relu),
tf.keras.layers.Dropout(rate=0.1),
tf.keras.layers.Dense(dim_output)])
# prune the model
n_obs_train = n_obs * (1 - validation_split)
end_step = np.ceil(n_obs_train / batch_size).astype(np.int32) * epochs
pruning_schedule = tfmot.sparsity.keras.PolynomialDecay(
initial_sparsity=0.0, final_sparsity=pruning_sparsity,
begin_step=0, end_step=end_step
)
model_xi_pruning = tfmot.sparsity.keras.prune_low_magnitude(
model_xi, pruning_schedule
)
return model_xi_pruning
class Train(object):
"""
Library of training methods for neural SDE models.
"""
@staticmethod
def train_S(X_S, Y_S,
pruning_sparsity=0.5, validation_split=0.1,
batch_size=512, epochs=500, rand_seed=0,
force_fit=False, model_name='model_S',
out_dir='output/checkpoint/'):
n_obs, dim_input = X_S.shape
# construct the neural network model
model_S = Model.construct_S(
dim_input, n_obs,
pruning_sparsity, validation_split, batch_size, epochs)
# compile the neural network model
model_S.compile(
loss=Loss.loss_S(1e-3),
optimizer=tf.keras.optimizers.Adam(learning_rate=1e-4)
)
# set up I/O
tag = out_dir + model_name + '_' + str(rand_seed)
checkpoint_filepath_model_S = tag
checkpoint_filepath_model_S_all = tag + '*'
csv_fname = tag + '_history.csv'
pruning_dir = out_dir + 'pruning_summary/'
if not os.path.exists(pruning_dir):
os.mkdir(pruning_dir)
# train the pruned model
tf.random.set_seed(rand_seed)
if glob(checkpoint_filepath_model_S_all) and not force_fit:
model_S.load_weights(checkpoint_filepath_model_S)
else:
model_checkpoint_callback = tf.keras.callbacks.ModelCheckpoint(
filepath=checkpoint_filepath_model_S,
save_weights_only=True,
monitor='loss',
mode='min',
save_best_only=True)
csv_logger = tf.keras.callbacks.CSVLogger(
filename=csv_fname,
separator=',',
append=False
)
history = model_S.fit(
X_S, Y_S,
epochs=epochs,
batch_size=batch_size,
validation_split=validation_split,
shuffle=True,
verbose=True,
callbacks=[
model_checkpoint_callback,
csv_logger,
tfmot.sparsity.keras.UpdatePruningStep(),
tfmot.sparsity.keras.PruningSummaries(log_dir=pruning_dir)]
)
# plot training loss history
plot_fname = tag + '_history.png'
utils.PlotLib.plot_loss_over_epochs(history, True, plot_fname)
return model_S
@staticmethod
def train_mu(X_S, mu_base,
validation_split=0.1, batch_size=512,
epochs=200, rand_seed=0, force_fit=False,
model_name='model_mu', out_dir='output/checkpoint/'):
dim_input = X_S.shape[1]
# construct the neural network model
model_mu = Model.construct_mu(dim_input)
model_mu.compile(loss='mean_absolute_error', optimizer='adam')
# set up I/O
tag = out_dir + model_name + '_' + str(rand_seed)
checkpoint_filepath_model_mu = tag
checkpoint_filepath_model_mu_all = tag + '*'
csv_fname = tag + '_history.csv'
# train the model
if glob(checkpoint_filepath_model_mu_all) and not force_fit:
model_mu.load_weights(checkpoint_filepath_model_mu)
else:
model_checkpoint_callback = tf.keras.callbacks.ModelCheckpoint(
filepath=checkpoint_filepath_model_mu,
save_weights_only=True,
monitor='loss',
mode='min',
save_best_only=True)
csv_logger = tf.keras.callbacks.CSVLogger(
filename=csv_fname,
separator=',',
append=False
)
history = model_mu.fit(
X_S, mu_base,
epochs=epochs,
batch_size=batch_size,
validation_split=validation_split,
shuffle=True, verbose=True,
callbacks=[model_checkpoint_callback,
csv_logger]
)
# plot training loss history
plot_fname = tag + '_history.png'
utils.PlotLib.plot_loss_over_epochs(history, True, plot_fname)
return model_mu
@staticmethod
def train_xi(X_xi, Y_xi, W, G,
lbd_penalty_eq, lbd_penalty_sz,
pruning_sparsity=0.5, validation_split=0.1,
batch_size=512, epochs=20000, rand_seed=0,
force_fit=False, model_name='model_xi',
out_dir='output/checkpoint/'):
n_bdy, n_dim = W.shape
n_varcov, mask_diagonal = Train._identify_diagonal_entries(n_dim)
# construct the neural network model
model_xi_pruning = Model.construct_xi(
n_dim + 1, n_dim + n_varcov, X_xi.shape[0],
pruning_sparsity, validation_split, batch_size, epochs)
model_xi_pruning.compile(
loss=Loss.loss_xi(1e-3, n_dim, n_varcov, mask_diagonal, W, G,
lbd_penalty_eq, lbd_penalty_sz),
optimizer=tf.keras.optimizers.Adam(learning_rate=1e-4),
)
# set up I/O
tag = out_dir + model_name + '_' + str(rand_seed)
checkpoint_filepath = tag
checkpoint_filepath_all = tag + '*'
csv_fname = tag + '_history.csv'
pruning_dir = out_dir + 'pruning_summary/'
if not os.path.exists(pruning_dir):
os.mkdir(pruning_dir)
# train the pruned model
tf.random.set_seed(rand_seed)
if glob(checkpoint_filepath_all) and not force_fit:
model_xi_pruning.load_weights(checkpoint_filepath)
else:
model_checkpoint_callback = tf.keras.callbacks.ModelCheckpoint(
filepath=checkpoint_filepath,
save_weights_only=True,
monitor='loss',
mode='min',
save_best_only=True)
csv_logger = tf.keras.callbacks.CSVLogger(
filename=csv_fname,
separator=',',
append=False
)
history = model_xi_pruning.fit(
X_xi, Y_xi,
epochs=epochs,
batch_size=batch_size,
validation_split=validation_split,
shuffle=True,
verbose=True,
callbacks=[
model_checkpoint_callback,
csv_logger,
tfmot.sparsity.keras.UpdatePruningStep(),
tfmot.sparsity.keras.PruningSummaries(log_dir=pruning_dir)]
)
# plot training loss history
plot_fname = tag + '_history.png'
utils.PlotLib.plot_loss_over_epochs(history, True, plot_fname)
return model_xi_pruning
@staticmethod
def predict_in_sample_model_xi(model_xi, X_xi, Y_xi, W, G):
n_dim = X_xi.shape[1] - 1
n_bdy = W.shape[0]
n_varcov, mask_diagonal = Train._identify_diagonal_entries(n_dim)
# predict underlying functions using the learnt NN
y_pred_nn = model_xi.predict(X_xi)
# get diffusion terms
mask_diagonal_np = [m[0] for m in mask_diagonal]
sigma_term = y_pred_nn.copy()
sigma_term[:, mask_diagonal_np] = np.exp(sigma_term[:, mask_diagonal_np])
sigma_term = sigma_term[:, :n_varcov]
sigma_tilde_T = Train._fill_triu(sigma_term, n_dim)
# get drift terms
mu_residuals = y_pred_nn[:, n_varcov:]
# get inputs for scaling diffusions and correcting drifts
## regarding diffusions
Omega = np.reshape(Y_xi[:, n_dim:n_dim+n_dim**2],
newshape=[-1, n_dim, n_dim])
## regarding drifts
n1 = n_dim+n_dim**2+1
corr_dirs = np.reshape(Y_xi[:, n1:n1+n_dim*n_bdy],
newshape=[-1, n_bdy, n_dim])
epsmu = Y_xi[:, n1+n_dim*n_bdy:n1+n_dim*n_bdy+n_bdy]
n2 = n1+n_dim*n_bdy+n_bdy
mu_base = Y_xi[:, n2:n2+n_dim]
# compute drift term
mu_tilde = mu_base * mu_residuals
# scale diffusion
sigma_T = np.matmul(sigma_tilde_T, Omega)
# correct drift
mu_tilde_inner_W = mu_tilde.dot(W.T)
corr_dir_inner_W = np.sum(corr_dirs * W[None, :, :], axis=-1)
gamma = np.maximum(- mu_tilde_inner_W - epsmu, 0.) / corr_dir_inner_W
mu = mu_tilde + np.sum(gamma[:, :, None] * corr_dirs, axis=1)
# LU deconposition of diffusion matrices
mat_cov = np.matmul(np.transpose(sigma_T, axes=[0, 2, 1]), sigma_T)
sigma_L = np.linalg.cholesky(mat_cov)
return mu_tilde, sigma_tilde_T, mu, sigma_T, sigma_L
@staticmethod
def _identify_diagonal_entries(n_dim):
"""
Return the Boolean logical mask array that indicates diagonal terms in
a diffusion matrix.
"""
# get the number of unknowns in the diffusion matrix
n_varcov = int(n_dim*(n_dim+1)/2)
# construct the diagonal entry mask
x = np.arange(n_varcov)
xc = np.concatenate([x, x[n_dim:][::-1]])
idxs_diagonal = [xc[i * (n_dim + 1)] for i in range(n_dim)]
mask_diagonal = np.zeros(n_varcov + n_dim, dtype=bool)
mask_diagonal[idxs_diagonal] = True
mask_diagonal = [[m] for m in mask_diagonal]
return n_varcov, mask_diagonal
@staticmethod
def _fill_triu(arrs_sigma, n_dim):
"""
Return a list of upper triangular diffusion matrices, given a list of
flat arrays that contain non-zero elements of the diffusion matrices.
"""
n_obs = arrs_sigma.shape[0]
mats_sigma = np.zeros((n_obs, n_dim, n_dim))
for i in range(n_obs):
arr_sigma = arrs_sigma[i]
xc = np.concatenate([arr_sigma, arr_sigma[n_dim:][::-1]])
g = np.reshape(xc, [n_dim, n_dim])
mats_sigma[i] = np.triu(g, k=0)
return mats_sigma
class Simulate(object):
"""
Library of forward-simulation methods.
"""
@staticmethod
def simulate_S_xi_lite(dt, N, model_S, model_xi, model_mu,
S0, X0, W, b, factor_multiplier,
dist_multiplier, proj_scale,
rho_star, epsmu_star, X_interior, reflect=False):
# simulate innovations
n_dim = X0.shape[0]
dW = np.random.normal(0, np.sqrt(dt), (n_dim + 1, N + 1))
# initialise
st = np.ones(N+1) * np.nan
xit = np.ones((n_dim, N+1)) * np.nan
st[0] = S0
xit[:, 0] = X0
mus_sim = []
vols_sim = []
n_varcov, mask_diagonal = Train._identify_diagonal_entries(n_dim)
n_reflect = 0
for i in tqdm(range(1, N+1)):
try:
# get drift and diffusion of S
xi = xit[:, i-1]
x_S = np.hstack((st[i-1]/factor_multiplier, xi))
pred_S = model_S.predict(x_S.reshape(1, -1))[0]
vol_S = np.sqrt(np.exp(-pred_S[0])) * st[i-1]
mu_S = pred_S[1] * st[i-1]
# simulate S
S_ = st[i-1] + mu_S * dt + vol_S * dW[0, i]
# get baseline drift
x_mu = np.hstack((st[i-1]/factor_multiplier, xi))
pred_mu_base = model_mu.predict(x_mu.reshape(1, -1))[0]
# get drift and diffusion of xi
x_xi = np.hstack((st[i-1]/factor_multiplier, xi))
gamma_nn = model_xi.predict(x_xi.reshape(1,-1))[0]
gamma_nn[np.array(mask_diagonal).ravel()] = np.exp(
gamma_nn[np.array(mask_diagonal).ravel()])
sigma_term = gamma_nn[:n_varcov]
xc = np.concatenate([sigma_term, sigma_term[n_dim:][::-1]])
g = np.reshape(xc, [n_dim, n_dim])
sigma_tilde = np.triu(g, k=0).T
mu_residual = gamma_nn[n_varcov:]
# scale diffusion and correct drift
mu, mat_vol = Simulate.scale_drift_diffusion(
xi, mu_residual, sigma_tilde, W, b,
dist_multiplier, proj_scale,
rho_star, epsmu_star, X_interior, pred_mu_base)
# tame coefficients
mu_norm = 1. + np.linalg.norm(mu) * np.sqrt(dt)
vol_norm = 1. + np.linalg.norm(mat_vol) * np.sqrt(dt)
# simulate xi using Euler-scheme
xi_ = xi + mu / mu_norm * dt + \
mat_vol.dot(dW[1:, i].reshape((-1, 1))).flatten()/vol_norm
if reflect:
if np.any(W.dot(xi_) - b < 0):
n_reflect += 1
print(f'Reflect simulated data point at index {i}.')
xi_ = Simulate.reflect_data(xi, xi_, W, b)
st[i] = S_
xit[:, i] = xi_
mus_sim.append(mu)
vols_sim.append(mat_vol)
except:
break
return st, xit, mus_sim, vols_sim, n_reflect
@staticmethod
def simulate_S_xi(dt, N,
model_S, model_xi, model_mu,
S, X, W, b, factor_multiplier,
dist_multiplier, proj_scale,
rho_star, epsmu_star, X_interior,
train_rand_seed, sim_rand_seed,
force_simulate=False, reflect=False,
out_dir='output/checkpoint/'):
print(f'Simulation number: {str(train_rand_seed)}_{str(sim_rand_seed)}')
# set I/O
plot_fname = f'{out_dir}simulation_{str(train_rand_seed)}' + \
f'_{str(sim_rand_seed)}.png'
data_fname = f'{out_dir}simulation_{str(train_rand_seed)}' + \
f'_{str(sim_rand_seed)}.csv'
if os.path.exists(data_fname) and not force_simulate:
return
# simulate
np.random.seed(sim_rand_seed)
S0 = S[0]
X0 = X[0, :]
st, xit, mus_sim, vols_sim, n_reflect = Simulate.simulate_S_xi_lite(
dt, N, model_S, model_xi, model_mu,
S0, X0, W, b, factor_multiplier,
dist_multiplier, proj_scale,
rho_star, epsmu_star, X_interior, reflect)
if reflect:
plot_fname = f'{out_dir}simulation_{str(train_rand_seed)}' + \
f'_{str(sim_rand_seed)}_reflect_{str(n_reflect)}.png'
data_fname = f'{out_dir}simulation_{str(train_rand_seed)}' + \
f'_{str(sim_rand_seed)}_reflect_{str(n_reflect)}.csv'
# save simulated data
out_data = np.vstack((st, xit))
columns = ['S'] + ['xi' + str(i) for i in range(1, len(X0)+1)]
out_data = pd.DataFrame(data=out_data.T, columns=columns)
out_data.to_csv(data_fname, index=False)
# plot
utils.PlotLib.plot_simulated_xi(st, xit, X, plot_fname)
return st, xit, mus_sim, vols_sim
@staticmethod
def scale_drift_diffusion(x, mu_residual, sigma_tilde, W, b,
dist_multiplier, proj_scale,
rho_star, epsmu_star, x_interior, mu_base):
"""
Scale the drift and diffusion functions.
Parameters
__________
Returns
_______
"""
n_dim = W.shape[1]
# calculate the distance of the data point to each boundary
dist_x = np.abs(W.dot(x) - b) / np.linalg.norm(W, axis=1)
# calculate the normalised distance indicators
epsilon_sigma = PrepTrainData.normalise_dist_diffusion(
dist_x, dist_multiplier, proj_scale)
# sort by distance and get first n_dim closest ones
idxs_sorted_eps = np.argsort(epsilon_sigma)
idxs_used_eps = idxs_sorted_eps[:n_dim]
Wd = W[idxs_used_eps]
epsilond_sigma = epsilon_sigma[idxs_used_eps]
# scale the diffusions
if np.max(epsilond_sigma) < 1e-8: # if the anchor point is on a corner
Omega = np.zeros((n_dim, n_dim))
else: # if the anchor point is not on the corner
# compute new bases
V = np.linalg.qr(Wd.T)[0].T
Omega = np.diag(np.sqrt(epsilond_sigma)).dot(V)
mat_a = Omega.T.dot(sigma_tilde).dot(sigma_tilde.T).dot(Omega)
mat_vol = np.linalg.cholesky(mat_a)
# scale the drifts
## compute drift
mu_tilde = mu_base * mu_residual
## compute correction directions
corr_dirs_x = x_interior - x[None, :]
epsmu_x = PrepTrainData.normalise_dist_drift(
dist_x, rho_star, epsmu_star)
mu_tilde_inner_W = W.dot(mu_tilde)
corr_dir_inner_W = np.sum(corr_dirs_x * W, axis=-1)
weights_corr_dir = np.maximum(-mu_tilde_inner_W-epsmu_x, 0.) /\
corr_dir_inner_W
## compute the corrected drift
mu = mu_tilde + np.sum(corr_dirs_x * weights_corr_dir[:, None], axis=0)
return mu, mat_vol
@staticmethod
def reflect_data(x0, x1, W, b):
mask_arb = W.dot(x1) - b < 0
# reflect data if there is arbitrage
if np.any(mask_arb):
if np.sum(mask_arb) > 1:
print('Break more than one boundaries, move to the closest '
'boundary.')
wi = W[mask_arb]
bi = b[mask_arb]
candidates = ((bi + 1e-6 - wi.dot(x0))/wi.dot((x1-x0))).\
reshape((-1, 1)) * (x1 - x0) + x0
idx_first_qualified = np.where(
np.all(candidates.dot(W.T) - b[None,:] >= 0, axis=1))[0][0]
x2 = candidates[idx_first_qualified]
else:
wi = W[mask_arb]
bi = b[mask_arb]
t = bi - wi.dot(x1)
x2 = x1 + 2 * t * wi
# if the reflected data point breaks any arbitrage bounds
if np.any(x2.dot(W.T) - b < 0):
print('Reflect failed, move back to boundary.')
t = (bi - wi.dot(x1)) / (wi.dot(x1 - x0))
x2 = x0 + t * (x1-x0)
return x2
else:
return x1
| 35.006477 | 81 | 0.559704 | 3,483 | 27,025 | 4.069767 | 0.127476 | 0.018624 | 0.009877 | 0.008466 | 0.526702 | 0.454392 | 0.417213 | 0.385608 | 0.350335 | 0.333616 | 0 | 0.01626 | 0.342313 | 27,025 | 771 | 82 | 35.051881 | 0.781254 | 0.11704 | 0 | 0.347193 | 0 | 0 | 0.033069 | 0.015384 | 0 | 0 | 0 | 0 | 0 | 1 | 0.035343 | false | 0 | 0.02079 | 0 | 0.10395 | 0.008316 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
304748bc2e685abf6332deae76ea083ce94212c7 | 2,641 | py | Python | Rock-paper-scissor/jokenpo.py | Thdahwache/python-learning-trail | 357d5d1c6cfa966e347cb5a06cb1f90e3a9ed81d | [
"MIT"
] | null | null | null | Rock-paper-scissor/jokenpo.py | Thdahwache/python-learning-trail | 357d5d1c6cfa966e347cb5a06cb1f90e3a9ed81d | [
"MIT"
] | null | null | null | Rock-paper-scissor/jokenpo.py | Thdahwache/python-learning-trail | 357d5d1c6cfa966e347cb5a06cb1f90e3a9ed81d | [
"MIT"
] | null | null | null | import random
pedra = '''
_______
---' ____)
(_____)
(_____)
(____)
---.__(___)
'''
papel = '''
_______
---' ____)____
______)
_______)
_______)
---.__________)
'''
tesoura = '''
_______
---' ____)____
______)
__________)
(____)
---.__(___)
'''
# Write your code below this line 👇
#Escolha player e computador
escolha = None
v = 0
d = 0
e = 0
#Outcomes
#pedra
def pedra_empate():
global e
print(
f"Você escolheu:\n\n {pedra} \n\n O jogo escolheu: {pedra} \n\n Vocês empataram!")
e += 1
def pedra_derrota():
global d
print(
f"Você escolheu:\n\n {pedra} \n\n O jogo escolheu: {papel} \n\n Você perdeu!")
d += 1
def pedra_vitoria():
global v
print(
f"Você escolheu:\n\n {pedra} \n\n O jogo escolheu: {tesoura} \n\n Você ganhou!")
v += 1
#papel
def papel_empate():
global e
print(
f"Você escolheu:\n\n {papel} \n\n O jogo escolheu: {papel} \n\n Vocês empataram!")
e += 1
def papel_derrota():
global d
print(
f"Você escolheu:\n\n {papel} \n\n O jogo escolheu: {tesoura} \n\n Você perdeu!")
d += 1
def papel_vitoria():
global v
print(
f"Você escolheu:\n\n {papel} \n\n O jogo escolheu: {pedra} \n\n Você ganhou!")
v += 1
#tesoura
def tesoura_empate():
global e
print(
f"Você escolheu:\n\n {tesoura} \n\n O jogo escolheu: {tesoura} \n\n Vocês empataram!")
e += 1
def tesoura_derrota():
global d
print(
f"Você escolheu:\n\n {tesoura} \n\n O jogo escolheu: {pedra} \n\n Você perdeu!")
d += 1
def tesoura_vitoria():
global v
print(
f"Você escolheu:\n\n {tesoura} \n\n O jogo escolheu: {papel} \n\n Você ganhou!")
v += 1
while escolha != "sair":
escolha = input(
"Pedra, papel ou tesoura? Digite sair para terminar ").lower()
computador = random.randint(0, 2)
if escolha == "pedra":
escolha = 0
elif escolha == "papel":
escolha = 1
elif escolha == "tesoura":
escolha = 2
elif escolha == "sair":
print(
f"\nVocê ganhou {v} vezes, perdeu {d} vezes e empatou {e} com o computador, parabéns!")
break
#Lista e resultado final
pedra_resultados = [pedra_empate, pedra_derrota, pedra_vitoria]
papel_resultados = [papel_vitoria, papel_empate, papel_vitoria]
tesoura_resultados = [tesoura_derrota, tesoura_vitoria, tesoura_empate]
resultados = [pedra_resultados, papel_resultados, tesoura_resultados]
fim = resultados[escolha][computador]()
print("")
| 19.857143 | 99 | 0.584248 | 338 | 2,641 | 4.156805 | 0.180473 | 0.038434 | 0.064057 | 0.115302 | 0.495374 | 0.495374 | 0.478292 | 0.414235 | 0.397865 | 0.260498 | 0 | 0.008915 | 0.277925 | 2,641 | 132 | 100 | 20.007576 | 0.72732 | 0.040894 | 0 | 0.489362 | 0 | 0.106383 | 0.44396 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.095745 | false | 0 | 0.010638 | 0 | 0.106383 | 0.117021 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
30508657d97ad92eed583819ab3856787d5dae52 | 1,061 | py | Python | entity/messagerepo.py | LordOfNightmares/virtual-client-assistant | 772282434472fc44caac0cd21f972d6a5cc5c5b8 | [
"Apache-2.0"
] | null | null | null | entity/messagerepo.py | LordOfNightmares/virtual-client-assistant | 772282434472fc44caac0cd21f972d6a5cc5c5b8 | [
"Apache-2.0"
] | null | null | null | entity/messagerepo.py | LordOfNightmares/virtual-client-assistant | 772282434472fc44caac0cd21f972d6a5cc5c5b8 | [
"Apache-2.0"
] | null | null | null | from entity.message import Message
from .databaserepo import DatabaseRepo
class MessageDbRepo(DatabaseRepo):
def __init__(self):
super().__init__("Messages")
def all(self, cid):
query = "SELECT * FROM " + self.table + " WHERE conversation_id = '" + str(cid) + "'"
m_results = self.db.select(query)
if m_results:
m = [Message(*m_res[1:-3], m_res[0]) for m_res in m_results]
return m
else:
return None
def get(self, id):
current = super().get(id)
m = Message(*current[:-3], id)
m.created, m.modified, m.accessed = current[3], current[4], current[5]
return m
def last(self, cid):
query = "SELECT * FROM " + self.table + " WHERE conversation_id = '" + str(
cid) + "' ORDER BY created DESC LIMIT 0,1"
m_results = self.db.select(query)
if m_results:
m_results = m_results[0]
m = Message(*m_results[1:-3], m_results[0])
return m
else:
return None
| 31.205882 | 93 | 0.556079 | 137 | 1,061 | 4.145985 | 0.335766 | 0.126761 | 0.047535 | 0.06338 | 0.397887 | 0.323944 | 0.323944 | 0.323944 | 0.323944 | 0.323944 | 0 | 0.017956 | 0.317625 | 1,061 | 33 | 94 | 32.151515 | 0.766575 | 0 | 0 | 0.392857 | 0 | 0 | 0.114986 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.142857 | false | 0 | 0.071429 | 0 | 0.428571 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
3052b87761414206a80d473cc44e97d3436908e3 | 5,043 | py | Python | src/utils/interface_audio_io.py | waverDeep/WaveBYOL | ab062c26598e0fa6ab8426498f9920048988b5c1 | [
"MIT"
] | 1 | 2022-03-15T00:00:57.000Z | 2022-03-15T00:00:57.000Z | src/utils/interface_audio_io.py | waverDeep/WaveBYOL | ab062c26598e0fa6ab8426498f9920048988b5c1 | [
"MIT"
] | null | null | null | src/utils/interface_audio_io.py | waverDeep/WaveBYOL | ab062c26598e0fa6ab8426498f9920048988b5c1 | [
"MIT"
] | null | null | null | import soundfile as sf
from tqdm import tqdm
import src.utils.interface_file_io as io
import librosa
import wave
import multiprocessing
import src.utils.interface_multiprocessing as mi
import torchaudio
import numpy as np
import torch.nn.functional as F
import torch
torchaudio.set_audio_backend("sox_io")
def audio_loader(audio_file):
return torchaudio.load(audio_file)
def cutoff(waveform, sample_rate, start, end):
cut = waveform[0][int(start*sample_rate): int(end*sample_rate+1)]
return cut.unsqueeze(0)
def random_cutoff(waveform, audio_window, index=None):
audio_length = waveform.shape[1]
if index is None:
random_index = np.random.randint(audio_length - audio_window + 1)
else:
random_index = index
cutoff_waveform = waveform[:, random_index: random_index + audio_window]
return cutoff_waveform
def audio_adjust_length(x, audio_window, fit=False):
length_adj = audio_window - len(x[0])
if length_adj > 0:
half_adj = length_adj // 2
x = F.pad(x, (half_adj, length_adj - half_adj))
audio_length = len(x[0])
if fit:
random_index = np.random.randint(audio_length - audio_window + 1)
x = x[:, random_index: random_index + audio_window]
return x
def audio_auto_trim(waveform, vad, audio_window=None):
waveform = vad(waveform)
waveform = torch.flip(waveform, [0, 1])
waveform = vad(waveform)
waveform = torch.flip(waveform, [0, 1])
if audio_window is not None:
while True:
audio_length = waveform.shape[1]
if audio_length < audio_window:
waveform = torch.cat((waveform, waveform), 1)
else:
break
return waveform
def resampling_audio(file, original_sampling_rate=44100, resampling_rate=16000):
waveform, sampling_rate = librosa(file, original_sampling_rate)
resample_waveform = librosa.resample(waveform, original_sampling_rate, resampling_rate)
return resample_waveform
def resampling_audio_list(directory_list, new_file_path, file_extension, original_sampling_rate, resampling_rate):
for dir_index, directory in directory_list:
file_list = io.get_all_file_path(directory, file_extension=file_extension)
for file_index, file in tqdm(file_list, desc=directory):
resample_waveform = resampling_audio(file, original_sampling_rate=original_sampling_rate,
resampling_rate=resampling_rate)
filename = io.get_pure_filename(file)
file_path = "{}/{}".format(new_file_path, filename)
sf.write(file_path, resample_waveform, resampling_rate)
# The parameters are prerequisite information. More specifically,
# channels, bit_depth, sampling_rate must be known to use this function.
def pcm2wav(pcm_file, wav_file=None, channels=1, bit_depth=16, sampling_rate=16000):
# Check if the options are valid.
if bit_depth % 8 != 0:
raise ValueError("bit_depth " + str(bit_depth) + " must be a multiple of 8.")
if wav_file is None:
wav_file = pcm_file.replace("pcm", "wav")
# Read the .pcm file as a binary file and store the data to pcm_data
with open(pcm_file, 'rb') as opened_pcm_file:
pcm_data = opened_pcm_file.read()
obj2write = wave.open(wav_file, 'wb')
obj2write.setnchannels(channels)
obj2write.setsampwidth(bit_depth // 8)
obj2write.setframerate(sampling_rate)
obj2write.writeframes(pcm_data)
obj2write.close()
def distributed_pcm2wav(pcm_file):
print("start data distribution...")
for pcm_index, pcm in enumerate(pcm_file):
pcm2wav(pcm)
print("end data distribution...")
class MelSpectrogramLibrosa:
"""Mel spectrogram using librosa."""
def __init__(self, fs=16000, n_fft=1024, shift=160, n_mels=64, fmin=60, fmax=7800):
self.fs, self.n_fft, self.shift, self.n_mels, self.fmin, self.fmax = fs, n_fft, shift, n_mels, fmin, fmax
self.mfb = librosa.filters.mel(sr=fs, n_fft=n_fft, n_mels=n_mels, fmin=fmin, fmax=fmax)
def __call__(self, audio):
X = librosa.stft(np.array(audio), n_fft=self.n_fft, hop_length=self.shift)
return torch.tensor(np.matmul(self.mfb, np.abs(X) ** 2 + np.finfo(float).eps))
if __name__ == '__main__':
task = ""
if task == "resampling":
directory_path = ['../../dataset/UrbanSound8K/audio']
new_save_directory = '../../dataset/UrbanSound8K/audio_16k/'
resampling_audio_list(directory_path, new_save_directory, 'wav', 44100, 16000)
elif task == 'pcm2wav':
input_dir = "../../dataset/KsponSpeech/train"
file_extension = "pcm"
divide_num = multiprocessing.cpu_count() - 1
file_list = io.get_all_file_path(input_dir, file_extension)
file_list = io.list_divider(divide_num, file_list)
print(len(file_list))
processes = mi.setup_multiproceesing(distributed_pcm2wav, data_list=file_list)
mi.start_multiprocessing(processes)
| 36.810219 | 114 | 0.691652 | 685 | 5,043 | 4.827737 | 0.281752 | 0.033263 | 0.036287 | 0.019958 | 0.166314 | 0.13547 | 0.095555 | 0.057454 | 0.057454 | 0.029634 | 0 | 0.021255 | 0.20702 | 5,043 | 136 | 115 | 37.080882 | 0.805701 | 0.052548 | 0 | 0.09901 | 0 | 0 | 0.049696 | 0.020969 | 0 | 0 | 0 | 0 | 0 | 1 | 0.108911 | false | 0 | 0.108911 | 0.009901 | 0.29703 | 0.029703 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
3052bcfb77f4439da2dc55fb76a58f27be220c78 | 6,318 | py | Python | diffusion_utils/utils.py | GallagherCommaJack/diffusion-utils | 407e350ec62e204c10366afe66b793f1d1276c1e | [
"MIT"
] | 2 | 2022-01-16T16:16:52.000Z | 2022-03-01T11:51:35.000Z | diffusion_utils/utils.py | GallagherCommaJack/diffusion-utils | 407e350ec62e204c10366afe66b793f1d1276c1e | [
"MIT"
] | null | null | null | diffusion_utils/utils.py | GallagherCommaJack/diffusion-utils | 407e350ec62e204c10366afe66b793f1d1276c1e | [
"MIT"
] | null | null | null | import math
from typing import MutableSequence, Optional, TypeVar, Union
import torch
from torch import nn
from torch import Tensor
from torch.types import Number
from einops import repeat
T = TypeVar("T")
def exists(val: Optional[T]) -> bool:
return val is not None
def default(val: Optional[T], d: T) -> T:
return d if val is None else val
def cast_tuple(val, depth: int = 1):
return val if isinstance(val, tuple) else (val,) * depth
class DropKwargs(nn.Module):
def __init__(self, inner: nn.Module):
super().__init__()
self.inner = inner
def forward(self, *args, **kwargs):
return self.inner(*args)
class SequentialKwargs(nn.Module):
def __init__(self, *modules: nn.Module):
super().__init__()
self.inner = nn.ModuleList(modules)
def forward(self, x, **kwargs):
out = x
for module in self.inner:
out = module(out, **kwargs)
return out
TensorSeq = MutableSequence[Tensor]
class PushBack(nn.Module):
def __init__(self, inner: nn.Module):
super().__init__()
self.inner = inner
def forward(
self,
xtup: TensorSeq,
) -> TensorSeq:
x = self.inner(*xtup)
xtup.append(x)
xtup[0] = x
return xtup
class PopBack(nn.Module):
def __init__(self, inner: nn.Module, key: str):
super().__init__()
self.inner = inner
self.key = key
def forward(self, xtup: TensorSeq) -> TensorSeq:
kwargs = {self.key: xtup.pop()}
x = self.inner(*xtup, **kwargs)
xtup[0] = x
return xtup
class ApplyMods(nn.Module):
def __init__(self, *mods):
super().__init__()
self.inner = nn.ModuleList(mods)
def forward(self, tup: TensorSeq) -> TensorSeq:
for i, mod in enumerate(self.inner):
tup[i] = mod(tup[i])
return tup
class ApplyMod(nn.Module):
def __init__(self, inner: nn.Module, ix: int = 0):
super().__init__()
self.inner = inner
self.ix = ix
def forward(self, tup: TensorSeq) -> TensorSeq:
tup[self.ix] = self.inner(tup[self.ix])
return tup
class RetIndex(nn.Module):
def __init__(self, ix: int = 0):
super().__init__()
self.ix = ix
def forward(self, tup: TensorSeq) -> Tensor:
return tup[self.ix]
class ClampWithGrad(torch.autograd.Function):
@staticmethod
def forward(ctx, input, min, max):
ctx.min = min
ctx.max = max
ctx.save_for_backward(input)
return input.clamp(min, max)
@staticmethod
def backward(ctx, grad_in):
(input,) = ctx.saved_tensors
return (
grad_in * (grad_in * (input - input.clamp(ctx.min, ctx.max)) >= 0),
None,
None,
)
clamp_with_grad = ClampWithGrad.apply
def clamp_exp(
t: torch.Tensor,
low: float = math.log(1e-2),
high: float = math.log(100),
):
return clamp_with_grad(t, low, high).exp()
def mk_full(d: int, init: Union[torch.Tensor, Number]):
if isinstance(init, torch.Tensor):
return init
else:
return torch.full([d], init)
@torch.no_grad()
def ema_update(model, averaged_model, decay):
"""Incorporates updated model parameters into an exponential moving averaged
version of a model. It should be called after each optimizer step."""
model_params = dict(model.named_parameters())
averaged_params = dict(averaged_model.named_parameters())
assert model_params.keys() == averaged_params.keys()
for name, param in model_params.items():
averaged_params[name].lerp_(param, 1 - decay)
model_buffers = dict(model.named_buffers())
averaged_buffers = dict(averaged_model.named_buffers())
assert model_buffers.keys() == averaged_buffers.keys()
for name, buf in model_buffers.items():
averaged_buffers[name].copy_(buf)
def get_ddpm_schedule(t):
"""Returns log SNRs for the noise schedule from the DDPM paper."""
return -torch.expm1(1e-4 + 10 * t ** 2).log()
def get_alphas_sigmas(log_snrs):
"""Returns the scaling factors for the clean image and for the noise, given
the log SNR for a timestep."""
alphas_squared = log_snrs.sigmoid()
return alphas_squared.sqrt(), (1 - alphas_squared).sqrt()
def calculate_stats(e):
e_mean = e.mean()
e_variance = (e - e_mean).pow(2).mean()
e_variance_stable = max(e_variance, 1e-5)
e_skewness = (e - e_mean).pow(3).mean() / e_variance_stable ** 1.5
e_kurtosis = (e - e_mean).pow(4).mean() / e_variance_stable ** 2
return e_mean, e_variance, e_skewness, e_kurtosis
def measure_perf(f):
start_event = torch.cuda.Event(enable_timing=True)
end_event = torch.cuda.Event(enable_timing=True)
start_event.record()
f()
# Run some things here
end_event.record()
torch.cuda.synchronize() # Wait for the events to be recorded!
elapsed_time_ms = start_event.elapsed_time(end_event)
return elapsed_time_ms
def calc_delta(t_in, t_out):
return math.pi / 2 * (t_in - t_out)
def diffusion_step(z, v, t_in, t_out):
delta = calc_delta(t_in, t_out)
z = torch.cos(delta) * z - torch.sin(delta) * v
return z
def calc_v_with_distillation_errors(net, z, t_in, t_out, *args, **kwargs):
v = net(z, t_in, *args, **kwargs)
with torch.no_grad():
delta = calc_delta(t_in, t_out)
t_mid = (t_in + t_out) / 2
z_1 = diffusion_step(z, v, t_in, t_mid)
v_2 = net(z_1, t_mid, *args, **kwargs)
z_2 = diffusion_step(z_1 < v_2, t_mid, t_out)
targets = z / torch.tan(delta) - z_2 / torch.sin(delta)
e = v.sub(targets).pow(2).mean(dim=[1, 2, 3])
return v, e
def factor_int(n):
val = math.ceil(math.sqrt(n))
val2 = int(n / val)
while val2 * val != float(n):
val -= 1
val2 = int(n / val)
return val, val2
def compute_channel_change_mat(io_ratio):
base = torch.eye(1)
if io_ratio < 1:
# reduce channels
c_in = int(1 / io_ratio)
cmat = repeat(base * io_ratio, "i1 i2 -> i1 (i2 m)", m=c_in)
else:
c_out = int(io_ratio)
cmat = repeat(base, "i1 i2 -> (i1 m) i2", m=c_out)
return cmat
def max_neg_value(tensor):
return -torch.finfo(tensor.dtype).max
| 25.893443 | 80 | 0.623932 | 915 | 6,318 | 4.102732 | 0.250273 | 0.038359 | 0.03463 | 0.02797 | 0.225892 | 0.191529 | 0.111881 | 0.071923 | 0.036761 | 0.036761 | 0 | 0.011809 | 0.249446 | 6,318 | 243 | 81 | 26 | 0.77984 | 0.059513 | 0 | 0.195266 | 0 | 0 | 0.006254 | 0 | 0 | 0 | 0 | 0 | 0.011834 | 1 | 0.189349 | false | 0 | 0.04142 | 0.047337 | 0.426036 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
3053b63c3b7a677ef567ccc253454ec06d5b2791 | 828 | py | Python | FB.py/blog.py | attia7/Test | c74f09816ba2e0798b0533e31ea8b72249dec598 | [
"MIT"
] | null | null | null | FB.py/blog.py | attia7/Test | c74f09816ba2e0798b0533e31ea8b72249dec598 | [
"MIT"
] | 11 | 2020-03-24T17:40:26.000Z | 2022-01-13T01:42:38.000Z | FB.py/blog.py | attia7/AttiaGit | c74f09816ba2e0798b0533e31ea8b72249dec598 | [
"MIT"
] | null | null | null | class Blog:
def __init__(self, title, photo, name,date,content):
self.title = title
self.photo = photo
self.name = name
self.date = date
self.content = content
blog1= Blog(title='python bisics', photo='https://images.pexels.com/photos/837140/pexels-photo-837140.jpeg',
name='Yasser',date='10-06-2019',content='Hello How r u ?')
blog2= Blog(title='python bisics', photo='https://images.pexels.com/photos/837140/pexels-photo-837140.jpeg',
name='Mohammed',date='11-16-1979',content='Hello How r u ?')
blog3= Blog(title='python bisics', photo='https://images.pexels.com/photos/837140/pexels-photo-837140.jpeg',
name='Sara',date='10-06-2019',content='Hi ')
print(blog1.name)
print(blog2.date)
print(blog3.content)
blogs=[blog1,blog2,blog3]
blogs[2].name='Ali'
blogs.remove(blogs[0])
print(blog3.name)
| 29.571429 | 109 | 0.711353 | 127 | 828 | 4.606299 | 0.330709 | 0.046154 | 0.076923 | 0.107692 | 0.536752 | 0.425641 | 0.425641 | 0.425641 | 0.425641 | 0.425641 | 0 | 0.096904 | 0.102657 | 828 | 27 | 110 | 30.666667 | 0.690444 | 0 | 0 | 0 | 0 | 0 | 0.380435 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.05 | false | 0 | 0 | 0 | 0.1 | 0.2 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
3057879143c17360d6498f89a5f3db75d2469ccb | 4,075 | py | Python | modules/RetrieveResource.py | opentargets/platform-input-support | 555c3ed091a7a3a767dc0c37054dbcd369f02252 | [
"Apache-2.0"
] | 4 | 2019-03-26T15:54:35.000Z | 2021-05-27T13:18:43.000Z | modules/RetrieveResource.py | opentargets/platform-input-support | 555c3ed091a7a3a767dc0c37054dbcd369f02252 | [
"Apache-2.0"
] | 12 | 2019-04-23T14:45:04.000Z | 2022-03-17T09:40:04.000Z | modules/RetrieveResource.py | opentargets/platform-input-support | 555c3ed091a7a3a767dc0c37054dbcd369f02252 | [
"Apache-2.0"
] | 2 | 2019-06-15T17:21:14.000Z | 2021-05-14T18:35:18.000Z | import logging
from modules.common.GoogleBucketResource import GoogleBucketResource
from modules.common.Utils import Utils
from modules.common import create_output_dir, remove_output_dir
from yapsy.PluginManager import PluginManager
from definitions import PIS_OUTPUT_DIR
logger = logging.getLogger(__name__)
class RetrieveResource(object):
def __init__(self, args, yaml):
self.simplePluginManager = PluginManager()
self.args = args
self.output_dir = args.output_dir if args.output_dir is not None else PIS_OUTPUT_DIR
self.yaml = yaml
# Warning the user about the gc credential needs for access to GC itself
def checks_gc_service_account(self):
if self.args.google_credential_key is None:
logger.info("Some of the steps might be not work properly due the lack of permissions to access to GCS. "
"Eg. Evidence")
else:
GoogleBucketResource.has_valid_auth_key(self.args.google_credential_key)
# Copy the local files to the Google Storage
def copy_to_gs(self):
if self.args.google_bucket is not None:
Utils(self.yaml.config, self.yaml.outputs).gsutil_multi_copy_to(self.args.google_bucket)
else:
logger.error("Destination bucket info missing")
# This function normalise the input inserted by the user. Lower and Upper cases can break the code if
# not managed. Eg. SO/so/So -> SO Plugin
def normalise_steps(self, steps, all_plugins_available):
normalise_steps = []
lowercase_steps = [each_step.lower() for each_step in steps]
for plugin in all_plugins_available:
if plugin.lower() in lowercase_steps:
normalise_steps.append(plugin)
lowercase_steps.remove(plugin.lower())
logger.info("Steps not found:\n" + ','.join(lowercase_steps))
return normalise_steps
# Extract and check the steps to run
def steps(self):
all_plugins_available = []
for plugin in self.simplePluginManager.getAllPlugins():
all_plugins_available.append(plugin.name)
steps_requested = self.normalise_steps(self.args.steps, all_plugins_available)
excluded_requested = self.normalise_steps(self.args.exclude, all_plugins_available)
if len(self.args.steps) == 0:
plugin_order = list(set(all_plugins_available) - set(excluded_requested))
else:
plugin_order = list(set(steps_requested))
logger.info("Steps selected:\n" + ','.join(plugin_order))
return plugin_order
# Init yapsy plugin manager
def init_plugins(self):
# Tell it the default place(s) where to find plugins
self.simplePluginManager.setPluginPlaces(["plugins"])
# Load all plugins
self.simplePluginManager.collectPlugins()
# noinspection PyBroadException
# Given a list of steps to run, this procedure executes the selected plugins/step
def run_plugins(self):
steps_to_execute = self.steps()
for plugin_name in steps_to_execute:
plugin = self.simplePluginManager.getPluginByName(plugin_name)
try:
plugin.plugin_object.process(self.yaml[plugin_name.lower()], self.yaml.outputs, self.yaml.config)
except Exception as e:
logger.info("WARNING Plugin not available {}".format(plugin_name))
logger.info(e)
def create_output_structure(self, output_dir):
"""By default the directories prod and staging are created"""
remove_output_dir(output_dir) if self.args.force_clean else logger.info("Warning: Output not deleted.")
self.yaml.outputs.prod_dir = create_output_dir(output_dir + '/prod')
self.yaml.outputs.staging_dir = create_output_dir(output_dir + '/staging')
# Retrieve the resources requested.
def run(self):
self.create_output_structure(self.output_dir)
self.init_plugins()
self.checks_gc_service_account()
self.run_plugins()
self.copy_to_gs()
| 43.351064 | 117 | 0.690307 | 518 | 4,075 | 5.225869 | 0.301158 | 0.049871 | 0.049132 | 0.019948 | 0.118212 | 0.070927 | 0 | 0 | 0 | 0 | 0 | 0.000319 | 0.230184 | 4,075 | 93 | 118 | 43.817204 | 0.862608 | 0.142822 | 0 | 0.044776 | 0 | 0 | 0.071901 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.134328 | false | 0 | 0.089552 | 0 | 0.268657 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
3062f03e110c5a790e2aa16881b269fc800f6f09 | 2,513 | py | Python | baidu_API.py | spencerpomme/coconuts-on-fire | 407d61b3583c472707a4e7b077a9a3ab12743996 | [
"Apache-2.0"
] | 1 | 2015-04-23T11:43:26.000Z | 2015-04-23T11:43:26.000Z | baidu_API.py | spencerpomme/coconuts-on-fire | 407d61b3583c472707a4e7b077a9a3ab12743996 | [
"Apache-2.0"
] | null | null | null | baidu_API.py | spencerpomme/coconuts-on-fire | 407d61b3583c472707a4e7b077a9a3ab12743996 | [
"Apache-2.0"
] | null | null | null | #!python3
"""
A simple script that uses Baidu Place API to search certain kinds of place
in a range of circular space.
This API can be called maximum 2000 times per day.
"""
import requests, json
# import psycopg2
class ConvertFailure(Exception):
def __str__(self):
return "Convertion Failed."
mykey = "IniXfqhsWAyZQpkmh5FtEVv0" # my developer key
city = "韶关"
place = "公园"
coor1 = (39.915, 116.404)
coor2 = (39.975, 116.414)
radius = 500 # meters
city_params = {
# parameters for place api
'ak': mykey,
'output': 'json',
'query': place,
'page_size': 10,
'page_num': 0,
'scope': 2,
'region': city
}
rect_params = {
# parameters for place api
'ak': mykey,
'output': 'json',
'query': place,
'page_size': 10,
'page_num': 0,
'scope': 2,
'bounds': "%s, %s, %s, %s" % (*coor1, *coor2),
'location': coor1,
'radius': radius
}
circ_params = {
# parameters for place api
'ak': mykey,
'output': 'json',
'query': place,
'page_size': 10,
'page_num': 0,
'scope': 2,
}
geocoder_params = {
# parameters for geocoder api
'ak': mykey,
'output': 'json',
'address': None
}
placeAPI = "http://api.map.baidu.com/place/v2/search"
geocoder = "http://api.map.baidu.com/geocoder/v2/"
res_city = requests.get(placeAPI, params=city_params)
res_rect = requests.get(placeAPI, params=rect_params)
res_circ = requests.get(placeAPI, params=circ_params)
# print(res_city.url)
jsonobj = json.loads(res_city.text)
print(type(jsonobj))
print(type(res_city.text))
# print(json.dumps(jsonobj, sort_keys=False, indent=4))
# Below this line defines a series of Baidu geo-data API calling functions
def addr2coor(addresses: str)->tuple:
'''
This function converts addresses to a (longitude, latitude) coordinate.
'''
for address in addresses:
geocoder_params['address'] = address
res = requests.get(geocoder, params=geocoder_params)
res.raise_for_status()
coor = json.loads(requests.get(geocoder, params=geocoder_params).text)
# print(coor)
if coor['status'] == 0:
location = coor['result']['location']
yield (address, location['lng'], location['lat'])
else:
raise ConvertFailure
def rescounter(function)->tuple:
"""A addr2coor wraper"""
pass
if __name__ == '__main__':
address_list = ["天安门", "故宫", "奥林匹克公园", "广州塔"]
cor = addr2coor(address_list)
for item in cor:
print(item)
| 23.707547 | 78 | 0.634302 | 322 | 2,513 | 4.829193 | 0.425466 | 0.054019 | 0.048875 | 0.041158 | 0.236656 | 0.200643 | 0.150482 | 0.150482 | 0.150482 | 0.150482 | 0 | 0.029141 | 0.221647 | 2,513 | 105 | 79 | 23.933333 | 0.765849 | 0.221647 | 0 | 0.289855 | 0 | 0 | 0.184664 | 0.01252 | 0 | 0 | 0 | 0 | 0 | 1 | 0.043478 | false | 0.014493 | 0.014493 | 0.014493 | 0.086957 | 0.043478 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
3063873cf1d375e656a6b627f5a8d2ac1ba1cb4a | 2,293 | py | Python | src/models/evaluate_model.py | ThordurPall/MLOpsExercises- | 8714d83477f6132893b74675e529bfeef13ece85 | [
"MIT"
] | null | null | null | src/models/evaluate_model.py | ThordurPall/MLOpsExercises- | 8714d83477f6132893b74675e529bfeef13ece85 | [
"MIT"
] | null | null | null | src/models/evaluate_model.py | ThordurPall/MLOpsExercises- | 8714d83477f6132893b74675e529bfeef13ece85 | [
"MIT"
] | 1 | 2021-06-11T12:38:38.000Z | 2021-06-11T12:38:38.000Z | # -*- coding: utf-8 -*-
import logging
from pathlib import Path
import click
import torch
from classifier import Classifier
from torchvision import datasets, transforms
@click.command()
@click.argument('data_filepath', type=click.Path(), default='data')
@click.argument('trained_model_filepath', type=click.Path(),
default='models/trained_model.pth')
def main(data_filepath, trained_model_filepath):
""" Evaluates the neural network using MNIST test data """
logger = logging.getLogger(__name__)
logger.info('Evaluating a neural network using MNIST test data')
# Load the trained model
model = Classifier()
project_dir = Path(__file__).resolve().parents[2]
state_dict = torch.load(project_dir.joinpath(trained_model_filepath))
model.load_state_dict(state_dict)
# Define a transform to normalize the data
transform = transforms.Compose([transforms.ToTensor(),
transforms.Normalize((0.5,), (0.5,)), ])
# Load the test data
test_set = datasets.MNIST(project_dir.joinpath(data_filepath),
download=False, train=False,
transform=transform)
batch_size = 64
test_loader = torch.utils.data.DataLoader(test_set, batch_size=batch_size,
shuffle=True)
# Evaluate test performance
test_correct = 0
# Turn off gradients for validation, saves memory and computations
with torch.no_grad():
model.eval() # Sets the model to evaluation mode
# Run through all the test points
for images, labels in test_loader:
# Forward pass
log_ps = model(images)
ps = torch.exp(log_ps)
# Keep track of how many are correctly classified
top_p, top_class = ps.topk(1, dim=1)
equals = top_class == labels.view(*top_class.shape)
test_correct += equals.type(torch.FloatTensor).sum().item()
test_accuracy = test_correct/len(test_set)
logger.info(str("Test Accuracy: {:.3f}".format(test_accuracy)))
if __name__ == '__main__':
log_fmt = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
logging.basicConfig(level=logging.INFO, format=log_fmt)
main()
| 35.276923 | 78 | 0.645007 | 279 | 2,293 | 5.103943 | 0.476703 | 0.042135 | 0.042135 | 0.029494 | 0.082865 | 0.043539 | 0 | 0 | 0 | 0 | 0 | 0.006985 | 0.250763 | 2,293 | 64 | 79 | 35.828125 | 0.821886 | 0.163541 | 0 | 0 | 0 | 0 | 0.101526 | 0.024198 | 0 | 0 | 0 | 0 | 0 | 1 | 0.025 | false | 0 | 0.15 | 0 | 0.175 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
3068043469fb1de1fd2079110d1f76aaf77142ca | 3,253 | py | Python | adminwindow.py | gaurav0810-ga/Spot-Counselling-Management-System | 34dffa34f1ffe016a912dc3cfcba6cd3f74eee18 | [
"Apache-2.0"
] | null | null | null | adminwindow.py | gaurav0810-ga/Spot-Counselling-Management-System | 34dffa34f1ffe016a912dc3cfcba6cd3f74eee18 | [
"Apache-2.0"
] | null | null | null | adminwindow.py | gaurav0810-ga/Spot-Counselling-Management-System | 34dffa34f1ffe016a912dc3cfcba6cd3f74eee18 | [
"Apache-2.0"
] | null | null | null | from tkinter import*
#=====importing self created module which will show the registartion form=======#
import registrationform
#=====importing self created module which will help in deleting student record from data base======#
import deletestudent
#=============importing selfcreated update student record ==============#
import updatestudent
#to import jpg image
import allotment #it will import module
#importing view database
import tables
from PIL import ImageTk #it will import Pillow librart
import smtplib
from email.message import EmailMessage
import sqlite3
import allotedstudentrecords
def student():
admin_window=Tk()
admin_window.iconbitmap("student.ico")
bg=ImageTk.PhotoImage(file="./images/login.jpg")
bg_image=Label(admin_window,image=bg)
bg_image.place(x=0,y=0,relwidth=1,relheight=1)
width = admin_window.winfo_screenwidth()
height = admin_window.winfo_screenheight()
admin_window.geometry(f'{width}x{height-100}+0+0')
admin_window.resizable(FALSE,FALSE)
admin_window.title("Student Registration system")
admin_text=Label(text="Spot Counslling Registration And Allotment System",font=("bold",30)).pack(side='top',pady=40)
# admin_text.place(x=450,y=40)
#======================registration window-=====================#
Register_button=Button(admin_window,text="Register Student",relief=GROOVE,width=15,height=5,font=("bold", 10),command=registrationform.register,bg='#BB001B',fg='white')
Register_button.place(x=70,y=150)
#-=====================student record====================#
Delete_student=Button(admin_window,text="Delete Student",relief=GROOVE,width=15,height=5,font=("bold", 10),command=deletestudent.delete_student,bg='#BB001B',fg='white')
Delete_student.place(x=70,y=350)
#============================view databasetable=========================#
View_table_button=Button(admin_window,text="View Registerd \n\n Students Records",relief=GROOVE,width=15,height=5,font=("bold", 10),command=tables.viewdatabase,bg='#BB001B',fg='white')
View_table_button.place(x=70,y=550)
#=================================update student ==================#
update_button=Button(admin_window,text="Update Student",relief=GROOVE,width=15,height=5,font=("bold", 10),command=updatestudent.updatefunc,bg='#BB001B',fg='white')
update_button.place(x=1150,y=350)
#===========================student selection table================#
Student_selection_button=Button(admin_window,text="Seat Allotment",relief=GROOVE,width=15,height=5,font=("bold", 10),command=allotment.selection,bg='#BB001B',fg='white')
Student_selection_button.place(x=1150,y=150)
#========================view alloted student records======================#
View_Alloted_button=Button(admin_window,text="View Alloted \n\n Students Records",relief=GROOVE,width=15,height=5,font=("bold", 10),command=allotedstudentrecords.viewdatabase,bg='#BB001B',fg='white')
View_Alloted_button.place(x=1150,y=550)
copy=Label(admin_window,text='Developed By Gaurav And Team ©',font=('bold',8),fg='white',bg='#01796F')
copy.pack(side='bottom',fill='x')
# admin_window.destroy()
admin_window.mainloop()
# student()
| 61.377358 | 204 | 0.670765 | 411 | 3,253 | 5.209246 | 0.311436 | 0.087342 | 0.049043 | 0.058851 | 0.312471 | 0.236805 | 0.146193 | 0.146193 | 0.146193 | 0.146193 | 0 | 0.037509 | 0.106671 | 3,253 | 52 | 205 | 62.557692 | 0.698899 | 0.245927 | 0 | 0 | 0 | 0 | 0.173822 | 0.010101 | 0 | 0 | 0 | 0 | 0 | 1 | 0.026316 | false | 0 | 0.289474 | 0 | 0.315789 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
3069d018acdd28b4231703721ede5dfa5cd4942f | 4,665 | py | Python | dnachisel/builtin_specifications/AvoidBlastMatches.py | simone-pignotti/DnaChisel | b7f0f925c9daefcc5fec903a13cfa74c3b726a7a | [
"MIT"
] | 124 | 2017-11-14T14:42:25.000Z | 2022-03-31T08:02:07.000Z | dnachisel/builtin_specifications/AvoidBlastMatches.py | simone-pignotti/DnaChisel | b7f0f925c9daefcc5fec903a13cfa74c3b726a7a | [
"MIT"
] | 65 | 2017-11-15T07:25:38.000Z | 2022-01-31T10:38:45.000Z | dnachisel/builtin_specifications/AvoidBlastMatches.py | simone-pignotti/DnaChisel | b7f0f925c9daefcc5fec903a13cfa74c3b726a7a | [
"MIT"
] | 31 | 2018-10-18T12:59:47.000Z | 2022-02-11T16:54:43.000Z | """Implementation of AvoidBlastMatches."""
from ..Specification import Specification, SpecEvaluation
# from .VoidSpecification import VoidSpecification
from ..biotools import blast_sequence
from ..Location import Location
class AvoidBlastMatches(Specification):
"""Enforce that the sequence has no BLAST matches with a given database.
WARNING: try using AvoidMatches instead, it is much better!!
Uses NCBI Blast+. Only local BLAST is supported/tested as for now
Parameters
----------
blast_db
Path to a local BLAST database. These databases can be obtained with
NCBI's `makeblastdb`. Omit the extension, e.g. `ecoli_db/ecoli_db`.
word_size
Word size used by the BLAST algorithm
perc_identity
Minimal percentage of identity for BLAST matches. 100 means that only
perfect matches are considered.
num_alignments
Number alignments
num_threads
Number of threads/CPU cores to use for the BLAST algorithm.
min_align_length
Minimal length that an alignment should have to be considered.
"""
priority = -2
best_possible_score = 0
blasts_paths = {}
def __init__(
self,
blast_db=None,
sequences=None,
word_size=4,
perc_identity=100,
num_alignments=100000,
num_threads=3,
min_align_length=20,
ungapped=True,
e_value=1e80,
culling_limit=1,
location=None,
):
"""Initialize."""
self.blast_db = blast_db
self.sequences = sequences
self.word_size = word_size
self.perc_identity = perc_identity
self.num_alignments = num_alignments
self.num_threads = num_threads
self.min_align_length = min_align_length
self.location = Location.from_data(location)
self.e_value = e_value
self.ungapped = ungapped
self.culling_limit = culling_limit
def initialized_on_problem(self, problem, role=None):
return self._copy_with_full_span_if_no_location(problem)
def evaluate(self, problem):
"""Score as (-total number of blast identities in matches)."""
location = self.location
if location is None:
location = Location(0, len(problem.sequence))
sequence = location.extract_sequence(problem.sequence)
blast_record = blast_sequence(
sequence,
blast_db=self.blast_db,
subject_sequences=self.sequences,
word_size=self.word_size,
perc_identity=self.perc_identity,
num_alignments=self.num_alignments,
num_threads=self.num_threads,
ungapped=self.ungapped,
e_value=self.e_value,
culling_limit=self.culling_limit,
task="megablast"
)
if isinstance(blast_record, list):
alignments = [
alignment
for rec in blast_record
for alignment in rec.alignments
]
else:
alignments = blast_record.alignments
query_hits = [
(
min(hit.query_start, hit.query_end) + location.start - 1,
max(hit.query_start, hit.query_end) + location.start,
1 - 2 * (hit.query_start > hit.query_end),
hit.identities,
)
for alignment in alignments
for hit in alignment.hsps
]
locations = sorted(
[
(start, end, ids)
for (start, end, strand, ids) in query_hits
if (end - start) >= self.min_align_length
]
)
score = -sum([ids for start, end, ids in locations])
locations = [Location(start, end) for start, end, ids in locations]
if locations == []:
return SpecEvaluation(
self, problem, score=1, message="Passed: no BLAST match found"
)
return SpecEvaluation(
self,
problem,
score=score,
locations=locations,
message="Failed - %s matches at %s" % (len(locations), locations),
)
def localized(self, location, problem=None, with_righthand=True):
"""Localize the evaluation."""
new_location = self.location.overlap_region(location)
if new_location is None:
return None
new_location = location.extended(
self.min_align_length - 1, right=with_righthand
)
return self.copy_with_changes(location=new_location)
def feature_label_parameters(self):
return [self.blast_db]
| 30.690789 | 78 | 0.609003 | 523 | 4,665 | 5.24283 | 0.313576 | 0.01787 | 0.030635 | 0.019694 | 0.080963 | 0.054705 | 0.027717 | 0.027717 | 0.027717 | 0 | 0 | 0.008772 | 0.315756 | 4,665 | 151 | 79 | 30.89404 | 0.850251 | 0.196999 | 0 | 0.04 | 0 | 0 | 0.017019 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.05 | false | 0.01 | 0.03 | 0.02 | 0.18 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
3069d773ef41f354c54cc922824a8eac7764671a | 8,769 | py | Python | filesysdb/__init__.py | fictorial/filesysdb | bbf1e32218b71c7c15c33ada660433fffc6fa6ab | [
"MIT"
] | 2 | 2016-06-25T16:07:09.000Z | 2020-01-18T01:56:30.000Z | filesysdb/__init__.py | fictorial/filesysdb | bbf1e32218b71c7c15c33ada660433fffc6fa6ab | [
"MIT"
] | null | null | null | filesysdb/__init__.py | fictorial/filesysdb | bbf1e32218b71c7c15c33ada660433fffc6fa6ab | [
"MIT"
] | null | null | null | from aadict import aadict
from cachetools import LRUCache
import ujson as json
import regex
from shortuuid import uuid
from functools import wraps
from glob import glob
from time import time
import logging
import os
import shutil
import unicodedata
_logger = logging.getLogger(__name__)
_basepath = None
_serialize = None
_deserialize = None
_ext = None
_db = aadict()
class UniqueConstraintError(ValueError):
pass
def normalize_text(text, lcase=True):
text = str(text).strip()
if lcase: text = text.lower()
text = unicodedata.normalize('NFKD', text)
text = regex.subn(r'\p{P}+', '', text)[0]
return text.encode('ascii', 'ignore').decode()
def bench(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
start = time()
ret = fn(*args, **kwargs)
end = time()
_logger.debug('function %s took %g secs',
fn.__name__, end - start)
return ret
return wrapper
def object_path(collection, id):
"""Returns path to the backing file of the object
with the given ``id`` in the given ``collection``.
Note that the ``id`` is made filesystem-safe by
"normalizing" its string representation."""
_logger.debug(type(id))
_logger.debug(id)
if isinstance(id, dict) and 'id' in id:
id = id['id']
normalized_id = normalize_text(str(id), lcase=False)
return os.path.join(_basepath, collection,
'%s.%s' % (normalized_id, _ext))
def collection_path(collection):
"""Returns the base path to the ``collection``"""
return os.path.join(_basepath, collection)
def load_object_at_path(path):
"""Load an object from disk at explicit path"""
with open(path, 'r') as f:
data = _deserialize(f.read())
return aadict(data)
def load_object(collection, id):
"""Load an object from disk at path based on its
``collection`` and ``id``."""
path = object_path(collection, id)
return load_object_at_path(path)
def get_object(collection, id):
"""Get an object by its ``collection``-unique ``id``"""
return _db[collection].cache[id]
def add_collection(collection,
cache_size=1000,
cache_cls=LRUCache,
**cache_args):
"""Add a collection named ``collection``."""
assert collection not in _db
cache = cache_cls(maxsize=cache_size,
missing=lambda id: load_object(collection, id),
**cache_args)
_db[collection] = aadict(cache=cache, indexes={})
def _clear():
_db.clear()
def prepare(base_path='data',
serialize=json.dumps,
deserialize=json.loads,
file_ext='json'):
"""After you have added your collections, prepare the database
for use."""
global _basepath, _deserialize, _serialize, _ext
_basepath = base_path
assert callable(serialize)
assert callable(deserialize)
_serialize = serialize
_deserialize = deserialize
_ext = file_ext
_logger.debug('preparing with base path %s and file ext %s',
_basepath, _ext)
assert len(_db)
for collection in _db.keys():
c_path = collection_path(collection)
os.makedirs(c_path, exist_ok=True)
_logger.info('collection "%s": %d objects',
collection, object_count(collection))
def object_count(collection):
"""Returns the number of objects in the given ``collection``."""
return len(glob('%s/*.%s' % (collection_path(collection), _ext)))
def each_object(collection):
"""Yields each object in the given ``collection``.
The objects are loaded from cache and failing that,
from disk."""
c_path = collection_path(collection)
paths = glob('%s/*.%s' % (c_path, _ext))
for path in paths:
yield load_object_at_path(path)
def each_object_id(collection):
"""Yields each object ID in the given ``collection``.
The objects are not loaded."""
c_path = collection_path(collection)
paths = glob('%s/*.%s' % (c_path, _ext))
for path in paths:
match = regex.match(r'.+/(.+)\.%s$' % _ext, path)
yield match.groups()[0]
@bench
def save_object(collection, obj):
"""Save an object ``obj`` to the given ``collection``.
``obj.id`` must be unique across all other existing objects in
the given collection. If ``id`` is not present in the object, a
*UUID* is assigned as the object's ``id``.
Indexes already defined on the ``collection`` are updated after
the object is saved.
Returns the object.
"""
if 'id' not in obj:
obj.id = uuid()
id = obj.id
path = object_path(collection, id)
temp_path = '%s.temp' % path
with open(temp_path, 'w') as f:
data = _serialize(obj)
f.write(data)
shutil.move(temp_path, path)
if id in _db[collection].cache:
_db[collection].cache[id] = obj
_update_indexes_for_mutated_object(collection, obj)
return obj
@bench
def delete_object(collection, obj):
try:
os.remove(object_path(collection, obj))
del _db[collection].cache[obj.id]
except:
pass
_update_indexes_for_deleted_object(collection, obj)
def indexed_value(index, obj):
values = [obj.get(f) for f in index.fields]
if callable(index.transformer):
values = index.transformer(values)
k = json.dumps(values)
return k.lower() if index.case_insensitive else k
@bench
def add_index(collection,
name,
fields,
transformer=None,
unique=False,
case_insensitive=False):
"""
Add a secondary index for a collection ``collection`` on one or
more ``fields``.
The values at each of the ``fields`` are loaded from existing
objects and their object ids added to the index.
You can later iterate the objects of an index via
``each_indexed_object``.
If you update an object and call ``save_object``, the index will
be updated with the latest values from the updated object.
If you delete an object via ``delete_object``, the object will
be removed from any indexes on the object's collection.
If a function is provided for ``transformer``, the values
extracted from each object in the collection will be passed to
the ``transformer``. The ``transformer`` should return a list
of values that will go into the index.
If ``unique`` is true, then there may only be at most one object
in the collection with a unique set of values for each the
``fields`` provided.
If ``case_insensitive`` is true, then the value stored in the
index will be lower-cased and comparisons thereto will be
lower-cased as well.
"""
assert len(name) > 0
assert len(fields) > 0
indexes = _db[collection].indexes
index = indexes.setdefault(name, aadict())
index.transformer = transformer
index.value_map = {} # json([value]) => set(object_id)
index.unique = unique
index.case_insensitive = case_insensitive
index.fields = fields
for obj in each_object(collection):
_add_to_index(index, obj)
_logger.info('added %s, %s index to collection %s on fields: %s',
'unique' if unique else 'non-unique',
'case-insensitive' if case_insensitive else 'case-sensitive',
collection, ', '.join(fields))
def _add_to_index(index, obj):
"""Adds the given object ``obj`` to the given ``index``"""
id_set = index.value_map.setdefault(indexed_value(index, obj), set())
if index.unique:
if len(id_set) > 0:
raise UniqueConstraintError()
id_set.add(obj.id)
def _remove_from_index(index, obj):
"""Removes object ``obj`` from the ``index``."""
try:
index.value_map[indexed_value(index, obj)].remove(obj.id)
except KeyError:
pass
def each_indexed_object(collection, index_name, **where):
"""Yields each object indexed by the index with
name ``name`` with ``values`` matching on indexed
field values."""
index = _db[collection].indexes[index_name]
for id in index.value_map.get(indexed_value(index, where), []):
yield get_object(collection, id)
def _update_indexes_for_mutated_object(collection, obj):
"""If an object is updated, this will simply remove
it and re-add it to the indexes defined on the
collection."""
for index in _db[collection].indexes.values():
_remove_from_index(index, obj)
_add_to_index(index, obj)
def _update_indexes_for_deleted_object(collection, obj):
"""If an object is deleted, it should no longer be
indexed so this removes the object from all indexes
on the given collection."""
for index in _db[collection].indexes.values():
_remove_from_index(index, obj)
| 30.134021 | 73 | 0.655149 | 1,188 | 8,769 | 4.690236 | 0.207071 | 0.03733 | 0.022613 | 0.017947 | 0.174803 | 0.131371 | 0.092965 | 0.046662 | 0.046662 | 0.046662 | 0 | 0.001341 | 0.234462 | 8,769 | 290 | 74 | 30.237931 | 0.828691 | 0.294218 | 0 | 0.134503 | 0 | 0 | 0.046022 | 0 | 0 | 0 | 0 | 0 | 0.035088 | 1 | 0.134503 | false | 0.017544 | 0.070175 | 0 | 0.274854 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
306d9087bb69542cb8af6dee2a42b1098927e7a0 | 1,192 | py | Python | packages/postgres-database/src/simcore_postgres_database/migration/versions/39fa67f45cc0_adds_table_for_scicrunch_rrids.py | colinRawlings/osparc-simcore | bf2f18d5bc1e574d5f4c238d08ad15156184c310 | [
"MIT"
] | 25 | 2018-04-13T12:44:12.000Z | 2022-03-12T15:01:17.000Z | packages/postgres-database/src/simcore_postgres_database/migration/versions/39fa67f45cc0_adds_table_for_scicrunch_rrids.py | colinRawlings/osparc-simcore | bf2f18d5bc1e574d5f4c238d08ad15156184c310 | [
"MIT"
] | 2,553 | 2018-01-18T17:11:55.000Z | 2022-03-31T16:26:40.000Z | packages/postgres-database/src/simcore_postgres_database/migration/versions/39fa67f45cc0_adds_table_for_scicrunch_rrids.py | mrnicegyu11/osparc-simcore | b6fa6c245dbfbc18cc74a387111a52de9b05d1f4 | [
"MIT"
] | 20 | 2018-01-18T19:45:33.000Z | 2022-03-29T07:08:47.000Z | """Adds table for scicrunch rrids
Revision ID: 39fa67f45cc0
Revises: 3452ca7b13e9
Create Date: 2020-12-15 18:16:03.581479+00:00
"""
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = "39fa67f45cc0"
down_revision = "3452ca7b13e9"
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table(
"scicrunch_resources",
sa.Column("rrid", sa.String(), nullable=False),
sa.Column("name", sa.String(), nullable=False),
sa.Column("description", sa.String(), nullable=True),
sa.Column(
"creation_date",
sa.DateTime(),
server_default=sa.text("now()"),
nullable=False,
),
sa.Column(
"last_change_date",
sa.DateTime(),
server_default=sa.text("now()"),
nullable=False,
),
sa.PrimaryKeyConstraint("rrid"),
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table("scicrunch_resources")
# ### end Alembic commands ###
| 25.913043 | 65 | 0.605705 | 130 | 1,192 | 5.461538 | 0.492308 | 0.056338 | 0.084507 | 0.088732 | 0.349296 | 0.349296 | 0.267606 | 0.267606 | 0.143662 | 0.143662 | 0 | 0.061086 | 0.258389 | 1,192 | 45 | 66 | 26.488889 | 0.742081 | 0.266779 | 0 | 0.357143 | 0 | 0 | 0.148148 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.071429 | false | 0 | 0.071429 | 0 | 0.142857 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
307114b26313aa3265e1bd2c48447893e64378ae | 2,541 | py | Python | conexionbasedato.py | feedesa/MyPythonScripts | 66f06f9d44ea6c76cfadb1a620bb468176beefe0 | [
"MIT"
] | null | null | null | conexionbasedato.py | feedesa/MyPythonScripts | 66f06f9d44ea6c76cfadb1a620bb468176beefe0 | [
"MIT"
] | null | null | null | conexionbasedato.py | feedesa/MyPythonScripts | 66f06f9d44ea6c76cfadb1a620bb468176beefe0 | [
"MIT"
] | null | null | null | import pymysql
from tkinter import messagebox
class Socios():
def abrir(self):
bbdd= pymysql.connect( host= "localhost", user= "root", passwd="", db= "ejemplo1")
return bbdd
def alta(self,datos):
'''
datos[0]: id
datos[1]: nombre
'''
bbdd=self.abrir()
cursor=bbdd.cursor()
sql = "INSERT INTO Socios (NOMBRE, CUOTAPAGA)\
values('{}','{}')".format(datos[0],datos[1])
print (sql)
cursor.execute(sql)
bbdd.commit()
messagebox.showinfo(message = "registro exitoso", title = "Aviso")
# except:
# bbdd.rollback()
# messagebox.showinfo(message= "No registrado", title = "Aviso" )
bbdd.close()
def mostrarlistadosocio(self):
bbdd= self.abrir()
cursor=bbdd.cursor()
sql="SELECT * FROM socios"
cursor.execute(sql)
datoslistadocompleto= cursor.fetchall()
bbdd.commit()
bbdd.close()
# for lista in datoslistadocompleto:
# print(lista)
return datoslistadocompleto
# def editarTabla(self, a_editar):
# bbdd= pymysql.connect( host= "localhost", user= "root", passwd="", db= "ejemplo1")
# cursor= bbdd.cursor()
# sql="ALTER TABLE SOCIOS AUTO_INCREMENT = 1"
# bbdd.commit()
# cursor.execute(sql)
# print(sql)
# bbdd.close()
# sql = "INSERT INTO SOCIOS (nombre, sexo )\
# values( '{}','{}')".format(datos[0],datos[1] )
# print (sql)
# #sql="insert into articulos(descripcion, precio) values (%s,%s)"
# try:
# cursor.execute(sql)
# bbdd.commit()
# #messagebox.showinfo(message = "registro exitoso", title = "Aviso")
# except:
# bbdd.rollback()
# #messagebox.showinfo(message= "No registrado", title = "Aviso" )
# bbdd.close()
# bbdd= pymysql.connect( host= "localhost", user= "root", passwd="", db= "ejemplo1")
# cursor= bbdd.cursor()
# cursor.execute("DELETE FROM SOCIOS WHERE ID= 3")
# bbdd.commit()
# bbdd.close()
# bbdd= pymysql.connect( host= "localhost", user= "root", passwd="", db= "ejemplo1")
# cursor= bbdd.cursor()
# cursor.execute("ALTER TABLE SOCIOS AUTO_INCREMENT = 1")
# # "CREATE TABLE Socios (id INT PRIMARY KEY AUTO_INCREMENT, NOMBRE VARCHAR(50), CUOTAPAGA VARCHAR(2))")
# bbdd.commit()
# bbdd.close()
| 21 | 104 | 0.547816 | 255 | 2,541 | 5.443137 | 0.301961 | 0.056196 | 0.057637 | 0.063401 | 0.603746 | 0.569885 | 0.526657 | 0.480548 | 0.434438 | 0.434438 | 0 | 0.009045 | 0.303817 | 2,541 | 120 | 105 | 21.175 | 0.775579 | 0.536009 | 0 | 0.4 | 0 | 0 | 0.059621 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.12 | false | 0.04 | 0.08 | 0 | 0.32 | 0.04 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
30713a8fbba0af913226f90bffb00ec0ccd49f74 | 1,420 | py | Python | HLTrigger/Configuration/python/Tools/dasFileQuery.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 852 | 2015-01-11T21:03:51.000Z | 2022-03-25T21:14:00.000Z | HLTrigger/Configuration/python/Tools/dasFileQuery.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 30,371 | 2015-01-02T00:14:40.000Z | 2022-03-31T23:26:05.000Z | HLTrigger/Configuration/python/Tools/dasFileQuery.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 3,240 | 2015-01-02T05:53:18.000Z | 2022-03-31T17:24:21.000Z | import sys
import json
import das_client
def dasFileQuery(dataset):
query = 'dataset dataset=%s' % dataset
host = 'https://cmsweb.cern.ch' # default
idx = 0 # default
limit = 0 # unlimited
debug = 0 # default
thr = 300 # default
ckey = "" # default
cert = "" # default
jsondict = das_client.get_data(host, query, idx, limit, debug, thr, ckey, cert)
# check if the pattern matches none, many, or one dataset
if not jsondict['data'] or not jsondict['data'][0]['dataset']:
sys.stderr.write('Error: the pattern "%s" does not match any dataset\n' % dataset)
sys.exit(1)
return []
elif len(jsondict['data']) > 1:
sys.stderr.write('Error: the pattern "%s" matches multiple datasets\n' % dataset)
for d in jsondict['data']:
sys.stderr.write(' %s\n' % d['dataset'][0]['name'])
sys.exit(1)
return []
else:
# expand the dataset name
dataset = jsondict['data'][0]['dataset'][0]['name']
query = 'file dataset=%s' % dataset
jsondict = das_client.get_data(host, query, idx, limit, debug, thr, ckey, cert)
# parse the results in JSON format, and extract the list of files
files = sorted( f['file'][0]['name'] for f in jsondict['data'] )
return files
| 39.444444 | 86 | 0.554225 | 178 | 1,420 | 4.393258 | 0.38764 | 0.092072 | 0.053708 | 0.051151 | 0.222506 | 0.222506 | 0.222506 | 0.14578 | 0.14578 | 0.14578 | 0 | 0.014418 | 0.316197 | 1,420 | 35 | 87 | 40.571429 | 0.790937 | 0.141549 | 0 | 0.206897 | 0 | 0 | 0.18807 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.034483 | false | 0 | 0.103448 | 0 | 0.241379 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
3073c042c76656cc6f035106fb8e66424b847e5f | 477 | py | Python | 00-modules/builtin_modules/pickle_examples.py | cccaaannn/useful_functions | 1570cda8c642a39f04ed9f22ebeeab2bfb9e9424 | [
"MIT"
] | null | null | null | 00-modules/builtin_modules/pickle_examples.py | cccaaannn/useful_functions | 1570cda8c642a39f04ed9f22ebeeab2bfb9e9424 | [
"MIT"
] | null | null | null | 00-modules/builtin_modules/pickle_examples.py | cccaaannn/useful_functions | 1570cda8c642a39f04ed9f22ebeeab2bfb9e9424 | [
"MIT"
] | null | null | null | import pickle
# pickle can serialize python objects
data = {1:"hi", 2: "there"}
# convert to byte
byte_data = pickle.dumps(data)
# convert back to python object
data2 = pickle.loads(byte_data)
# ----------using with files----------
filename = ""
# write to a file
pickle.dump(data, open(filename, "wb" ))
with open(filename, "wb") as f:
pickle.dump(data, f, protocol=pickle.HIGHEST_PROTOCOL)
# read from file
unpickled_object = pickle.load(open(filename ,"rb"))
| 17.666667 | 58 | 0.677149 | 68 | 477 | 4.691176 | 0.558824 | 0.112853 | 0.087774 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.007481 | 0.159329 | 477 | 26 | 59 | 18.346154 | 0.78803 | 0.312369 | 0 | 0 | 0 | 0 | 0.040752 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.111111 | 0 | 0.111111 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
30750377e92de60cf64b84cb659e89e05ec986a3 | 4,676 | py | Python | appsite/scripts/cactus/loaddb.py | inchiresolver/inchiresolver | 6b3f080a4364ebe7499298e5a1b3cd4ed165322d | [
"BSD-3-Clause"
] | 3 | 2020-10-22T06:18:17.000Z | 2021-03-19T16:49:00.000Z | appsite/scripts/cactus/loaddb.py | inchiresolver/inchiresolver | 6b3f080a4364ebe7499298e5a1b3cd4ed165322d | [
"BSD-3-Clause"
] | 11 | 2019-11-01T23:04:31.000Z | 2022-02-10T12:32:11.000Z | appsite/scripts/cactus/loaddb.py | inchiresolver/inchiresolver | 6b3f080a4364ebe7499298e5a1b3cd4ed165322d | [
"BSD-3-Clause"
] | null | null | null | from os import sys, path
from resolver.models import *
sys.path.append(path.dirname(path.dirname(path.abspath(__file__))))
sys.path.append('/home/app')
from client.lib.cactus_client import CactusClient
def run():
Organization.objects.all().delete()
Inchi.objects.all().delete()
Publisher.objects.all().delete()
EntryPoint.objects.all().delete()
EndPoint.objects.all().delete()
MediaType.objects.all().delete()
client = CactusClient()
m1 = MediaType.create(
name="text/plain",
description="plain text media type"
)
m1.save()
m2 = MediaType.create(
name="image/gif",
description="GIF image",
)
m2.save()
o1 = Organization.create(
name="National Institutes of Health",
abbreviation="NIH",
href="https://www.nih.gov",
category="government",
parent=None
)
o1.save()
o2 = Organization.create(
name="National Cancer Institute",
abbreviation="NCI",
href="https://www.cancer.gov",
category="government",
parent=o1
)
o2.save()
p1 = Publisher.create(
name="NCI Computer-Aided Drug Design (CADD) Group",
category="group",
organization=o2
)
p1.save()
p2 = Publisher.create(
name="Marc Nicklaus",
category="person",
email="marc.nicklaus@email.com",
address="Frederick, MD 21702-1201, USA",
href="https://ccr2.cancer.gov/resources/CBL/Scientists/Nicklaus.aspx",
orcid="https://orcid.org/0000-0002-4775-7030",
organization=o2,
parent=p1
)
p2.save()
e0 = EntryPoint.create(
name="NCI/CADD InChI Resolver",
description="Demonstration InChI Resolver of the NCI/CADD group",
category="self",
href="https://cactus.inchi-resolver.org",
entrypoint_href="https://cactus.inchi-resolver.org/_self",
publisher=p1
)
e0.save()
e1 = EntryPoint.create(
name="Chemical Identifier Resolver",
description="This service works as a resolver for different chemical structure identifiers and allows "
"the conversion of a given structure identifier into another representation or structure "
"identifier. It can be used via a web form or a simple URL API.",
category="api",
href="http://cactus.nci.nih.gov/chemical/structure",
publisher=p2,
parent=e0
)
e1.save()
e2 = EntryPoint.create(
name="InChI Trust Root Resolver",
description="Root InChI Resolver at InChI Trust",
category="resolver",
href="http://root.inchi-resolver.org"
)
e2.save()
x1 = EndPoint.create(
entrypoint=e1,
category="uritemplate",
uri="{+stdinchi|+stdinchikey}/smiles",
description="Standard InChI to SMILES conversion",
request_methods=['GET']
)
x1.save()
x1.accept_header_media_types.add(m1)
x1.content_media_types.add(m1)
x2 = EndPoint.create(
entrypoint=e1,
category="uritemplate",
uri="{+stdinchi,+stdinchikey}/iupac_name",
description="Standard InChI to IUPAC name conversion",
request_methods=['GET']
)
x2.save()
x2.accept_header_media_types.add(m1)
x2.content_media_types.add(m1)
x3 = EndPoint.create(
entrypoint=e1,
category="uritemplate",
uri="{+stdinchi,+stdinchikey}/image",
description="InChI to SMILES conversion",
request_methods=['GET']
)
x3.save()
x3.accept_header_media_types.add(m1)
x3.content_media_types.add(m1)
x4 = EndPoint.create(
entrypoint=e1,
category="uritemplate",
uri="{+smiles}/stdinchi",
description="SMILES to stdinchi conversion",
)
x4.save()
x4.accept_header_media_types.add(m1)
x4.content_media_types.add(m1)
# x5 = EndPoint.create(
# entrypoint=e1,
# category="uritemplate",
# uri="{+smiles}/stdinchikey",
# description="SMILES to stdinchikey conversion",
# )
# x5.save()
# x5.accept_header_media_types.add(m1)
# x5.content_media_types.add(m1)
for j in range(1, 10):
ilist = client.fetch_inchi(range(j * 10, j * 10 + 10))
for cid, i in ilist:
print("Loading: %s" % (i,))
try:
inchi = Inchi.create(
string=i
)
print('{} {}'.format(inchi, inchi.added))
inchi.save()
inchi.entrypoints.add(e1)
except Exception as e:
print(e)
| 27.833333 | 111 | 0.594311 | 523 | 4,676 | 5.24283 | 0.323136 | 0.03647 | 0.047411 | 0.054705 | 0.259664 | 0.213713 | 0.141867 | 0.112691 | 0.073304 | 0 | 0 | 0.028402 | 0.27716 | 4,676 | 167 | 112 | 28 | 0.78284 | 0.049829 | 0 | 0.096296 | 0 | 0 | 0.286262 | 0.026844 | 0 | 0 | 0 | 0 | 0 | 1 | 0.007407 | false | 0 | 0.022222 | 0 | 0.02963 | 0.022222 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
307b75907e2ef43110ad5b30dc9de4dad44b596b | 1,454 | py | Python | misago/acl/admin.py | HenryChenV/iJiangNan | 68f156d264014939f0302222e16e3125119dd3e3 | [
"MIT"
] | 1 | 2017-07-25T03:04:36.000Z | 2017-07-25T03:04:36.000Z | misago/acl/admin.py | HenryChenV/iJiangNan | 68f156d264014939f0302222e16e3125119dd3e3 | [
"MIT"
] | null | null | null | misago/acl/admin.py | HenryChenV/iJiangNan | 68f156d264014939f0302222e16e3125119dd3e3 | [
"MIT"
] | null | null | null | from django.conf.urls import url
from django.utils.translation import ugettext_lazy as _
from .views import DeleteRole, EditRole, NewRole, RolesList, RoleUsers
class MisagoAdminExtension(object):
def register_urlpatterns(self, urlpatterns):
# Permissions section
urlpatterns.namespace(r'^permissions/', 'permissions')
# Roles
urlpatterns.namespace(r'^users/', 'users', 'permissions')
urlpatterns.patterns(
'permissions:users',
url(r'^$', RolesList.as_view(), name='index'),
url(r'^new/$', NewRole.as_view(), name='new'),
url(r'^edit/(?P<pk>\d+)/$', EditRole.as_view(), name='edit'),
url(r'^users/(?P<pk>\d+)/$', RoleUsers.as_view(), name='users'),
url(r'^delete/(?P<pk>\d+)/$', DeleteRole.as_view(), name='delete'),
)
def register_navigation_nodes(self, site):
site.add_node(
name=_("Permissions"),
icon='fa fa-adjust',
parent='misago:admin',
after='misago:admin:users:accounts:index',
namespace='misago:admin:permissions',
link='misago:admin:permissions:users:index',
)
site.add_node(
name=_("User roles"),
icon='fa fa-th-large',
parent='misago:admin:permissions',
namespace='misago:admin:permissions:users',
link='misago:admin:permissions:users:index',
)
| 37.282051 | 79 | 0.586657 | 156 | 1,454 | 5.378205 | 0.365385 | 0.091776 | 0.059595 | 0.096544 | 0.085816 | 0.085816 | 0 | 0 | 0 | 0 | 0 | 0 | 0.253783 | 1,454 | 38 | 80 | 38.263158 | 0.773272 | 0.017194 | 0 | 0.129032 | 0 | 0 | 0.278401 | 0.143058 | 0 | 0 | 0 | 0 | 0 | 1 | 0.064516 | false | 0 | 0.096774 | 0 | 0.193548 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
307c7a8c88bf6ef5840be4d2b400a14178b101d4 | 5,138 | py | Python | src/visualization.py | DianaTaukin/DSD-SATN | 5a4ab5e3cfcb00e72ca27cf5ec10a8d8e29ef312 | [
"Apache-2.0"
] | 71 | 2020-04-06T08:23:30.000Z | 2022-03-21T03:40:11.000Z | src/visualization.py | DianaTaukin/DSD-SATN | 5a4ab5e3cfcb00e72ca27cf5ec10a8d8e29ef312 | [
"Apache-2.0"
] | 10 | 2020-04-11T14:45:52.000Z | 2021-08-19T04:44:13.000Z | src/visualization.py | DianaTaukin/DSD-SATN | 5a4ab5e3cfcb00e72ca27cf5ec10a8d8e29ef312 | [
"Apache-2.0"
] | 8 | 2020-05-19T12:18:49.000Z | 2022-03-22T08:04:27.000Z | from base import *
import utils.neuralrenderer_render as nr
class Visualizer(object):
def __init__(self,high_resolution=False):
self.high_resolution = high_resolution
self.renderer = nr.get_renderer(high_resolution=self.high_resolution).cuda()
def visualize_renderer(self,verts,images):
#verts = torch.from_numpy(verts).cuda()
#verts = self.batch_orth_proj_verts(verts,cam)
#verts = torch.cat((verts[:,:,1].unsqueeze(-1),\
# -verts[:,:,2].unsqueeze(-1),verts[:,:,0].unsqueeze(-1)),dim=-1)
results = self.renderer.forward(verts)
renders = (results.detach().cpu().numpy().transpose((0,2,3,1))*256).astype(np.uint8)[:,:,:,::-1]
render_mask = ~(renders>100)#.astype(np.bool) 去除渲染结果(白底时)的黑色毛刺边
renders[render_mask] = images[render_mask]
return renders
def visulize_result(self,outputs,kps,data,name,vnum = 6, white_background=False,rtype='',nokp=False,org_name=True,org_img=False,keep_name=False):
if not keep_name:
if 'name' in data:
img_names = data['name']
else:
img_names = data['imgpath']
imgs = data['image_org'].contiguous().numpy().astype(np.uint8)[:vnum,:,:,::-1]
vnum = imgs.shape[0]
if self.high_resolution:
kps = ((kps.detach().contiguous().cpu().numpy()+1)/2 * 500).reshape(-1,14,2)[:vnum]
else:
kps = ((kps.detach().contiguous().cpu().numpy()+1)/2 * imgs.shape[1]).reshape(-1,14,2)[:vnum]
kp_imgs = []
#white_background=False
for idx in range(vnum):
if white_background:
kp_imgs.append(draw_lsp_14kp__bone(np.ones_like(imgs[idx])*255, kps[idx]))
else:
kp_imgs.append(draw_lsp_14kp__bone(imgs[idx].copy(), kps[idx]))
((cam,pose,shape), predict_verts, predict_j2d, predict_j3d, predict_Rs,verts_camed,j3d_camed) = outputs
if white_background:
rendered_imgs = self.visualize_renderer(verts_camed[:vnum], np.ones_like(imgs)*255)
else:
rendered_imgs = self.visualize_renderer(verts_camed[:vnum], imgs)
if org_img:
offsets = data['offsets'].numpy()
org_image_names = data['imgpath']
#image_org = data['org_image'].numpy()
imgs = []
#imgs = data['orgimage'].numpy()
org_image = []
for n in range(rendered_imgs.shape[0]):
org_imge = cv2.imread(org_image_names[n])#image_org[n].numpy().astype(np.uint8)
imgs.append(org_imge.copy())
resized_images = cv2.resize(rendered_imgs[n], (offsets[n,0]+1, offsets[n,1]+1), interpolation = cv2.INTER_CUBIC)
#print(offsets[n,2],(offsets[n,3]-1),offsets[n,4],(offsets[n,5]-1))
org_imge[offsets[n,2]:(offsets[n,3]-1),offsets[n,4]:(offsets[n,5]-1),:] = resized_images[offsets[n,6]:(offsets[n,7]-1+offsets[n,6]),offsets[n,8]:(offsets[n,9]+offsets[n,8]-1),:]
org_image.append(org_imge)
#imgs = np.array(imgs)
#org_image = np.array(org_image)
for idx in range(vnum):
if nokp:
if org_img:
if len(org_image[idx].shape)<3:
print(org_image_names[idx],org_image[idx].shape)
continue
result_img = np.hstack((imgs[idx], org_image[idx]))
else:
result_img = np.hstack((imgs[idx], rendered_imgs[idx]))
else:
result_img = np.hstack((imgs[idx],kp_imgs[idx], rendered_imgs[idx]))
#cv2.imwrite(name+'_{}_org_{}.jpg'.format(idx,rtype),imgs[idx])
if keep_name:
#print(name[idx])
cv2.imwrite(name[idx],result_img)
elif org_name:
cv2.imwrite('{}{}-{}'.format(name.split(os.path.basename(name))[0],img_names[idx].split('/')[-2],os.path.basename(img_names[idx])),result_img)
else:
cv2.imwrite(name+'_{}_{}.jpg'.format(idx,rtype),result_img)
def render_video(self,verts,params,images,org_image,offsets,name):
rendered_images = self.visualize_renderer(verts,params[:,:3],images)
for n in range(verts.shape[0]):
resized_images = cv2.resize(rendered_images[n], (offsets[n,0]+1, offsets[n,1]+1), interpolation = cv2.INTER_CUBIC)
org_image[n,offsets[n,2]:(offsets[n,3]-1),offsets[n,4]:(offsets[n,5]-1),:] = resized_images[offsets[n,6]:(offsets[n,7]-1+offsets[n,6]),offsets[n,8]:(offsets[n,9]+offsets[n,8]-1),:]
self.make_mp4(org_image,name)
def make_mp4(self,images,name):
num = images.shape[0]
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
output_movie = cv2.VideoWriter(name+'.mp4', fourcc, 50, (images.shape[2], images.shape[1]))
for i in range(num):
if i%100==0:
print('Writing frame: ',i,'/',num)
output_movie.write(images[i]) | 49.883495 | 194 | 0.571039 | 680 | 5,138 | 4.142647 | 0.214706 | 0.079517 | 0.022364 | 0.022719 | 0.28612 | 0.242102 | 0.220092 | 0.200923 | 0.122826 | 0.122826 | 0 | 0.033016 | 0.263137 | 5,138 | 103 | 195 | 49.883495 | 0.711041 | 0.107824 | 0 | 0.168831 | 0 | 0 | 0.017901 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.064935 | false | 0 | 0.025974 | 0 | 0.116883 | 0.025974 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
307f461b2850c76795bf48a0896004e467675d49 | 455 | py | Python | proxy/test/test_proxy.py | SFFReganDowling/test | c141daca395585710e5ff335e96ffd23ce9c71bb | [
"MIT"
] | 3 | 2015-11-26T11:44:57.000Z | 2021-12-07T18:08:53.000Z | proxy/test/test_proxy.py | SFFReganDowling/test | c141daca395585710e5ff335e96ffd23ce9c71bb | [
"MIT"
] | 5 | 2016-04-22T10:06:41.000Z | 2022-02-27T02:53:10.000Z | proxy/test/test_proxy.py | SFFReganDowling/test | c141daca395585710e5ff335e96ffd23ce9c71bb | [
"MIT"
] | null | null | null | import wsgiref.util
import flask
from proxy import proxy
# pylint: disable=W0212
def test_happy_path():
environ = {
"REQUEST_METHOD": "GET",
"PATH_INFO": "/locationforecast/1.9/",
"QUERY_STRING": "lat=59.31895603;lon=18.0517762",
"HTTP_REFERER": "https://walles.github.io/weatherclock",
}
wsgiref.util.setup_testing_defaults(environ)
request = flask.Request(environ)
proxy._proxy_request(request)
| 21.666667 | 64 | 0.674725 | 54 | 455 | 5.5 | 0.722222 | 0.074074 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.06812 | 0.193407 | 455 | 20 | 65 | 22.75 | 0.741144 | 0.046154 | 0 | 0 | 0 | 0 | 0.321759 | 0.12037 | 0 | 0 | 0 | 0 | 0 | 1 | 0.076923 | false | 0 | 0.230769 | 0 | 0.307692 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
3080c9c3e2fa02a8d02d8a5c53a2bd5e45b99323 | 6,756 | py | Python | exps/jets/top_quark_gbdt.py | ranigb/Set-Tree | fa3971f9a8ef98dbfd0f6de654efcde3006a197b | [
"MIT"
] | 17 | 2021-07-26T01:03:59.000Z | 2022-01-23T10:31:56.000Z | exps/jets/top_quark_gbdt.py | ranigb/Set-Tree | fa3971f9a8ef98dbfd0f6de654efcde3006a197b | [
"MIT"
] | 2 | 2021-12-10T09:53:48.000Z | 2022-01-25T17:08:41.000Z | exps/jets/top_quark_gbdt.py | ranigb/Set-Tree | fa3971f9a8ef98dbfd0f6de654efcde3006a197b | [
"MIT"
] | 3 | 2021-09-14T11:39:35.000Z | 2022-01-23T06:51:48.000Z | import os
import numpy as np
import argparse
import logging
import random
import pickle
from pprint import pformat
from exps.data import ParticleNetDataset
from settree.set_data import SetDataset, OPERATIONS, merge_init_datasets
import exps.eval_utils as eval
from exps.eval_utils import create_logger
data_root = '/home/royhir/projects/data/physics/top_quark/proc'
def pre_process(dataset, limit=None):
x = dataset.X
y = dataset.y
if limit is None:
limit = len(y)
inds = random.sample(range(len(y)), limit)
x_points = x['points'].take(inds, axis=0)
x_features = x['features'].take(inds, axis=0)
x_mask = x['mask'].take(inds, axis=0)
y = y.take(inds, axis=0)
y = y.argmax(1)
records = []
ys = []
for p, f, m, y in zip(x_points, x_features, x_mask, y):
try:
m_row = np.where(p.any(axis=1))[0].max()
records.append(np.concatenate((p[:m_row, :], f[:m_row, :], m[:m_row, :]),axis=1))
ys.append(y)
except:
pass
return records, np.array(ys)
def get_top_quark_datset(train=None, val=None, test=None):
train_dataset = ParticleNetDataset(os.path.join(data_root, 'train_file_0.awkd'), data_format='channel_last')
val_dataset = ParticleNetDataset(os.path.join(data_root, 'val_file_0.awkd'), data_format='channel_last')
test_dataset = ParticleNetDataset(os.path.join(data_root, 'test_file_0.awkd'), data_format='channel_last')
logging.info('Loaded raw data')
train_records, train_y = pre_process(train_dataset, limit=train)
val_records, val_y = pre_process(val_dataset, limit=val)
test_records, test_y = pre_process(test_dataset, limit=test)
logging.info('Finish pre-processing')
logging.info('train: {} val: {} test: {}'.format(len(train_y), len(val_y), len(test_y)))
return SetDataset(records=train_records, is_init=True), train_y, \
SetDataset(records=val_records, is_init=True), val_y, \
SetDataset(records=test_records, is_init=True), test_y
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--exp_name", type=str, default='test')
parser.add_argument("--splits", type=int, nargs="+", default=[1200000, 400000, 400000])
parser.add_argument("--attention_set_limit", type=int, default=6)
parser.add_argument("--use_attention_set", action='store_true')
parser.add_argument('--save', action='store_true')
parser.add_argument("--log", action='store_true')
args = parser.parse_args()
np.random.seed(42)
random.seed(42)
log_dir = os.path.join(os.path.abspath('__file__' + '/../'), 'outputs', 'top_quark')
create_logger(log_dir=log_dir,
log_name=args.exp_name,
dump=args.log)
logging.info(args)
train, val, test = args.splits
ds_train, y_train, ds_val, y_val, ds_test, y_test = get_top_quark_datset(train, val, test)
shared_gbdt_params = {'n_estimators': 50,
'learning_rate': 0.1,
'max_depth': 8,
'max_features': None,
'subsample': 0.5,
'criterion': 'mse',
'early_stopping_rounds': 5,
'random_state': 42}
logging.info('Shared params:\n' + pformat(shared_gbdt_params))
set_params = {'n_estimators': shared_gbdt_params['n_estimators'],
'operations': OPERATIONS,
'splitter': 'sklearn',
'use_attention_set': True,
'use_attention_set_comp': False,
'attention_set_limit': args.attention_set_limit,
'max_depth': shared_gbdt_params['max_depth'],
'max_features': shared_gbdt_params['max_features'],
'subsample': shared_gbdt_params['subsample'],
'random_state': shared_gbdt_params['random_state'],
'save_path': None,
'validation_fraction': 0.25,
'tol': 1e-4,
'n_iter_no_change': shared_gbdt_params['early_stopping_rounds'],
'verbose': 3}
sklearn_params = {'n_estimators': shared_gbdt_params['n_estimators'],
'criterion': 'mse',
'learning_rate': shared_gbdt_params['learning_rate'],
'max_depth': shared_gbdt_params['max_depth'],
'max_features': shared_gbdt_params['max_features'],
'subsample': shared_gbdt_params['subsample'],
'validation_fraction': 0.25,
'tol': 1e-4,
'n_iter_no_change': shared_gbdt_params['early_stopping_rounds'],
'random_state': shared_gbdt_params['random_state']}
xgboost_params = {#'tree_method': 'gpu_hist',
#'gpu_id': 7,
#'objective': 'binary:logistic',
'max_depth': shared_gbdt_params['max_depth'],
'n_jobs': 10,
'eval_metric': ['error'],
'learning_rate': shared_gbdt_params['learning_rate'],
'n_estimators': shared_gbdt_params['n_estimators'],
'colsample_bytree': shared_gbdt_params['max_features'],
'subsample': shared_gbdt_params['subsample'],
'reg_lambda': 0,
'verbosity': 0,
'random_state': shared_gbdt_params['random_state'],
'seed': shared_gbdt_params['random_state']}
x_train, x_test, x_val = eval.flatten_datasets(ds_train, ds_test,
operations_list=set_params['operations'],
ds_val=ds_val)
xgboost_gbtd = eval.train_and_predict_xgboost(xgboost_params,
x_train, y_train,
x_test, y_test,
val_x=None, val_y=None,
early_stopping_rounds=None)
ds_train_val = merge_init_datasets(ds_train, ds_val)
set_gbtd = eval.train_and_predict_set_gbdt(set_params,
ds_train_val, np.concatenate([y_train, y_val]),
ds_test, y_test,
resume=None)
if args.save:
pkl_filename = os.path.join(log_dir, '{}_model.pkl'.format(args.exp_name))
with open(pkl_filename, 'wb') as file:
pickle.dump(set_gbtd, file)
| 43.87013 | 112 | 0.568532 | 789 | 6,756 | 4.532319 | 0.238276 | 0.061521 | 0.098434 | 0.031879 | 0.35962 | 0.311521 | 0.276846 | 0.141779 | 0.11717 | 0.11717 | 0 | 0.01313 | 0.312315 | 6,756 | 153 | 113 | 44.156863 | 0.756565 | 0.010213 | 0 | 0.15748 | 0 | 0 | 0.173399 | 0.02319 | 0 | 0 | 0 | 0 | 0 | 1 | 0.015748 | false | 0.007874 | 0.086614 | 0 | 0.11811 | 0.007874 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
061a89369cd09d094301833ef16484b3a48346f9 | 2,801 | py | Python | wonk/config.py | cjduffett/wonk | 7ee7d6e444497cb91901ed4bd6de53d5aa574963 | [
"Apache-2.0"
] | 103 | 2021-09-25T03:03:32.000Z | 2022-03-20T19:13:48.000Z | wonk/config.py | cjduffett/wonk | 7ee7d6e444497cb91901ed4bd6de53d5aa574963 | [
"Apache-2.0"
] | null | null | null | wonk/config.py | cjduffett/wonk | 7ee7d6e444497cb91901ed4bd6de53d5aa574963 | [
"Apache-2.0"
] | 6 | 2021-09-27T17:50:23.000Z | 2022-02-15T22:44:12.000Z | """Manage Wonk's configuration."""
import pathlib
from typing import Any, Dict, List
import yaml
from pydantic import BaseModel
from toposort import toposort_flatten # type: ignore
from wonk.exceptions import UnknownParentError
class PolicySet(BaseModel):
"""Describes a policy set."""
name: str
managed: List[str] = []
local: List[str] = []
inherits: List[str] = []
def __ior__(self, other):
"""Append the values from another policy set onto this one's."""
# This is not an efficient algorithm, but it maintains ordering which lends stability to
# the final output files. These lists are almost always going to be very short anyway, and
# an easy to read algorithm is better than a more efficient but complex one for these
# purposes.
for value in other.managed:
if value not in self.managed:
self.managed.append(value)
for value in other.local:
if value not in self.local:
self.local.append(value)
return self
class Config(BaseModel):
"""Describes a Wonk configuration file."""
policy_sets: Dict[str, PolicySet]
def load_config(config_path: pathlib.Path = None) -> Config:
"""Load a configuration file and return its parsed contents."""
if config_path is None:
config_path = pathlib.Path("wonk.yaml")
data = yaml.load(config_path.read_text(), Loader=yaml.SafeLoader)
return parse_config(data)
def parse_config(block_all_config: Dict[str, Any]) -> Config:
"""Parse the dictionary containing all Wonk configuration into a Config object."""
try:
block_policy_sets = block_all_config["policy_sets"] or {}
except KeyError:
policy_sets = {}
else:
policy_sets = parse_policy_sets(block_policy_sets)
return Config(policy_sets=policy_sets) # type: ignore
def parse_policy_sets(block_policy_sets: Dict[str, Any]) -> Dict[str, PolicySet]:
"""Parse the dictionary containing policy set definitions into a dict of PolicySets."""
policy_sets = {}
deps = {}
for name, definition in block_policy_sets.items():
with_name = {**definition, **{"name": name}}
policy_set = PolicySet(**with_name)
policy_sets[name] = policy_set
for parent_name in policy_set.inherits:
if parent_name not in block_policy_sets:
raise UnknownParentError(name, parent_name)
# Build a dependency graph from the set of inheritance definitions from the classes.
deps[name] = set(policy_set.inherits)
for name in toposort_flatten(deps):
policy_set = policy_sets[name]
for parent_name in policy_set.inherits:
policy_set |= policy_sets[parent_name]
return policy_sets
| 30.11828 | 98 | 0.672617 | 367 | 2,801 | 4.978202 | 0.326975 | 0.098522 | 0.041051 | 0.01642 | 0.085386 | 0.067871 | 0.03503 | 0 | 0 | 0 | 0 | 0 | 0.241342 | 2,801 | 92 | 99 | 30.445652 | 0.859765 | 0.265976 | 0 | 0.08 | 0 | 0 | 0.011917 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.08 | false | 0 | 0.12 | 0 | 0.42 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
061be587da4bb712ec3ad5fa16ba9684df1988f9 | 5,325 | py | Python | models/quantize_utils.py | ARM-software/sesr | 26dd727996809fe13efb0c0f137f259c1b2d0f6e | [
"Apache-2.0"
] | 25 | 2021-11-08T12:48:09.000Z | 2022-03-29T02:56:18.000Z | models/quantize_utils.py | ARM-software/sesr | 26dd727996809fe13efb0c0f137f259c1b2d0f6e | [
"Apache-2.0"
] | 12 | 2021-10-04T05:59:56.000Z | 2022-03-29T06:06:17.000Z | models/quantize_utils.py | ARM-software/sesr | 26dd727996809fe13efb0c0f137f259c1b2d0f6e | [
"Apache-2.0"
] | 6 | 2021-11-26T09:27:18.000Z | 2022-02-24T14:52:01.000Z | # Copyright 2021 Arm Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from typing import Callable, List, Tuple, Union
import tensorflow as tf
def compute_ranges(kernel: tf.Tensor, per_channel: bool, symmetric: bool) -> Tuple[tf.Tensor, tf.Tensor]:
axes = tf.range(tf.rank(kernel) - 1) if per_channel else None
if symmetric:
quant_max = tf.stop_gradient(tf.math.reduce_max(tf.math.abs(kernel), axis=axes))
quant_min = -quant_max
else:
quant_max = tf.stop_gradient(tf.math.reduce_max(kernel, axis=axes))
quant_min = tf.stop_gradient(tf.math.reduce_min(kernel, axis=axes))
return quant_max, quant_min
@tf.custom_gradient
def floor_ste(x: tf.Tensor) -> Tuple[tf.Tensor, Callable[[tf.Tensor], List[tf.Tensor]]]:
y = tf.floor(x)
def grad(dy: tf.Tensor) -> List[tf.Tensor]:
return [dy]
return y, grad
def get_nudged_ranges_scale(
min: tf.Tensor,
max: tf.Tensor,
num_bits: int,
narrow_range: bool = False) -> Tuple[tf.Tensor, tf.Tensor, tf.Tensor]:
quant_max = tf.math.pow(2., tf.cast(num_bits, dtype=tf.dtypes.float32)) - 1.
quant_min = tf.constant(1.) if narrow_range else tf.constant(0.)
scale = (max - min) / (quant_max - quant_min)
# Rounding the zero-point to ensure one of the quantized values snap to zero
zero_point_from_min = quant_min - min / scale
nudged_zero_point = tf.round(zero_point_from_min)
nudged_zero_point = tf.where(zero_point_from_min < quant_min,
quant_min * tf.ones(shape=tf.shape(nudged_zero_point)),
nudged_zero_point)
nudged_zero_point = tf.where(zero_point_from_min > quant_max,
quant_max * tf.ones(shape=tf.shape(nudged_zero_point)),
nudged_zero_point)
# adjust/nudge the min/max to ensure zero-point snaps to real zero.
nudged_min = (quant_min - nudged_zero_point) * scale
nudged_max = (quant_max - nudged_zero_point) * scale
return nudged_min, nudged_max, scale
def fake_quant_with_min_max_vars(
inputs: tf.Tensor,
min: tf.Tensor,
max: tf.Tensor,
num_bits: int,
narrow_range: bool = False) -> tf.Tensor:
"""
This is differentiable equivalent of the utility in tf.quantization.
tf.quantization.fake_quant* utilities only allows the min/max ranges
to increase through gradients, but we would have to rely on l2_loss
to decrease the min/max ranges. This updated utility allows the gradients
to both increase and decrease the min/max ranges.
"""
nudged_min, nudged_max, scale = get_nudged_ranges_scale(min, max, num_bits, narrow_range)
clipped_data = tf.clip_by_value(inputs, nudged_min, nudged_max)
shifted_data = clipped_data - nudged_min
quant_data = floor_ste(shifted_data / scale + 0.5)
quant_data = quant_data * scale + nudged_min
return quant_data
fake_quant_with_min_max_vars_per_channel = fake_quant_with_min_max_vars
class ActivationQuantizationBlock(tf.keras.layers.Layer):
def __init__(self,
enabled: bool,
mode: str):
super().__init__()
self.enabled = enabled
self.mode = mode
if self.mode == 'train':
self.fake_quant_with_min_max_vars_fn = \
fake_quant_with_min_max_vars
elif self.mode == 'infer':
self.fake_quant_with_min_max_vars_fn = \
tf.quantization.fake_quant_with_min_max_vars
def build(self, input_shape):
if self.enabled:
self.quant_min = self.add_weight(
name='act_quant_min',
trainable=True)
self.quant_max = self.add_weight(
name='act_quant_max',
trainable=True)
if self.mode == 'train':
self.quant_initialized = tf.Variable(False, trainable=False)
def init_quant_ranges(self, inputs: tf.Tensor) -> None:
quant_max, quant_min = compute_ranges(inputs, per_channel=False, symmetric=False)
self.quant_max.assign(quant_max)
self.quant_min.assign(quant_min)
self.quant_initialized.assign(True)
def call(self, inputs):
if self.enabled:
if self.mode == "train":
if not self.quant_initialized:
self.init_quant_ranges(inputs)
return self.fake_quant_with_min_max_vars_fn(
inputs,
min=self.quant_min,
max=self.quant_max,
num_bits=8,
narrow_range=False)
else:
return inputs
| 40.037594 | 105 | 0.637934 | 725 | 5,325 | 4.441379 | 0.268966 | 0.047205 | 0.041925 | 0.039752 | 0.298137 | 0.206522 | 0.13882 | 0.13882 | 0.111801 | 0.08882 | 0 | 0.004831 | 0.261408 | 5,325 | 132 | 106 | 40.340909 | 0.813883 | 0.209765 | 0 | 0.2 | 0 | 0 | 0.011055 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.1 | false | 0 | 0.022222 | 0.011111 | 0.211111 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
061ff9f824e772b63623e52f1ce6ebb062cf98da | 3,192 | py | Python | sorting/sorting_train.py | soumen-chakrabarti/gumbel_sinkhorn | aedf8adbc7f123821374da84a23e51d3a0cf54c5 | [
"Apache-2.0"
] | 65 | 2017-09-24T19:38:34.000Z | 2022-01-18T16:07:05.000Z | sorting/sorting_train.py | soumen-chakrabarti/gumbel_sinkhorn | aedf8adbc7f123821374da84a23e51d3a0cf54c5 | [
"Apache-2.0"
] | null | null | null | sorting/sorting_train.py | soumen-chakrabarti/gumbel_sinkhorn | aedf8adbc7f123821374da84a23e51d3a0cf54c5 | [
"Apache-2.0"
] | 22 | 2017-10-01T12:55:38.000Z | 2022-01-13T19:33:15.000Z | # Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Trains a model that sorts numbers, keeping loss summaries in tensorboard.
The flag hparam has to be passed as a string of comma separated statements of
the form hparam=value, where the hparam's are any of the listed in the
dictionary DEFAULT_HPARAMS.
See the README.md file for further compilation and running instructions.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import sorting_model
flags = tf.app.flags
gfile = tf.gfile
FLAGS = flags.FLAGS
flags.DEFINE_string('hparams', '', 'Hyperparameters')
flags.DEFINE_integer('num_iters', 500, 'Number of iterations')
flags.DEFINE_integer(
'save_summaries_secs', 30,
'The frequency with which summaries are saved, in seconds.')
flags.DEFINE_integer(
'save_interval_secs', 30,
'The frequency with which the model is saved, in seconds.')
flags.DEFINE_string('exp_log_dir', '/tmp/sorting/',
'Directory where to write event logs.')
flags.DEFINE_integer('max_to_keep', 1, 'Maximum number of checkpoints to keep')
DEFAULT_HPARAMS = tf.contrib.training.HParams(n_numbers=50,
lr=0.1,
temperature=1.0,
batch_size=10,
prob_inc=1.0,
samples_per_num=5,
n_iter_sinkhorn=10,
n_units=32,
noise_factor=1.0,
optimizer='adam',
keep_prob=1.)
def main(_):
hparams = DEFAULT_HPARAMS
hparams.parse(FLAGS.hparams)
if not gfile.Exists(FLAGS.exp_log_dir):
gfile.MakeDirs(FLAGS.exp_log_dir)
tf.reset_default_graph()
g = tf.Graph()
model = sorting_model.SortingModel(g, hparams)
with g.as_default():
model.set_input()
model.build_network()
model.build_l2s_loss()
model.build_optimizer()
model.add_summaries_train()
with tf.Session():
tf.contrib.slim.learning.train(
train_op=model.train_op,
logdir=FLAGS.exp_log_dir,
global_step=model.global_step,
saver=tf.train.Saver(max_to_keep=FLAGS.max_to_keep),
number_of_steps=FLAGS.num_iters,
save_summaries_secs=FLAGS.save_summaries_secs,
save_interval_secs=FLAGS.save_interval_secs)
if __name__ == '__main__':
tf.app.run(main)
| 35.466667 | 79 | 0.635965 | 410 | 3,192 | 4.736585 | 0.456098 | 0.030896 | 0.037075 | 0.021627 | 0.053553 | 0.027806 | 0 | 0 | 0 | 0 | 0 | 0.015344 | 0.285401 | 3,192 | 89 | 80 | 35.865169 | 0.836037 | 0.274123 | 0 | 0.036364 | 0 | 0 | 0.139687 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.018182 | false | 0 | 0.090909 | 0 | 0.109091 | 0.018182 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0624b48ea9d4dc4aaa2193d16b26d5a5663fbaf2 | 1,578 | py | Python | tests/cli_test.py | chineseluo/opensourcetest | b0d222c8b29ff8f70a740ac2b1588a437d41b761 | [
"Apache-2.0"
] | 69 | 2020-10-20T14:25:49.000Z | 2022-02-18T02:50:20.000Z | tests/cli_test.py | aoozoo/opensourcetest | 6eaff706c9397847834ef3eef7ad57d5b7f5c5a3 | [
"Apache-2.0"
] | 6 | 2020-11-23T06:56:09.000Z | 2022-03-16T04:33:53.000Z | tests/cli_test.py | aoozoo/opensourcetest | 6eaff706c9397847834ef3eef7ad57d5b7f5c5a3 | [
"Apache-2.0"
] | 8 | 2021-02-01T03:23:20.000Z | 2022-02-18T02:50:47.000Z | #!/user/bin/env python
# -*- coding: utf-8 -*-
"""
------------------------------------
@Project : opensourcetest
@Time : 2020/11/12 15:01
@Auth : chineseluo
@Email : 848257135@qq.com
@File : cli_test.py
@IDE : PyCharm
------------------------------------
"""
import os
import sys
import unittest
from opensourcetest.cli import main
class TestCli(unittest.TestCase):
def test_show_version(self):
sys.argv = ["OST", "-V"]
with self.assertRaises(SystemExit) as cm:
main()
self.assertEqual(cm.exception.code, 0)
def test_show_help(self):
sys.argv = ["OST", "-h"]
with self.assertRaises(SystemExit) as cm:
main()
self.assertEqual(cm.exception.code, 0)
def test_show_create_http_project(self):
sys.argv = ["OST", "start_http_project"]
with self.assertRaises(SystemExit) as cm:
main()
self.assertEqual(cm.exception.code, 0)
def test_show_create_ui_project(self):
sys.argv = ["OST", "start_ui_project"]
with self.assertRaises(SystemExit) as cm:
main()
self.assertEqual(cm.exception.code, 0)
def test_show_create_app_project(self):
sys.argv = ["OST", "start_app_project"]
with self.assertRaises(SystemExit) as cm:
main()
self.assertEqual(cm.exception.code, 0)
def test_show_online_docs_address(self):
sys.argv = ["OST", "onlinedocs"]
with self.assertRaises(SystemExit) as cm:
main()
self.assertEqual(cm.exception.code, 0)
| 27.684211 | 49 | 0.591255 | 188 | 1,578 | 4.819149 | 0.335106 | 0.046358 | 0.072848 | 0.092715 | 0.646799 | 0.646799 | 0.560706 | 0.560706 | 0.560706 | 0.560706 | 0 | 0.023451 | 0.243346 | 1,578 | 56 | 50 | 28.178571 | 0.735343 | 0.166667 | 0 | 0.514286 | 0 | 0 | 0.063553 | 0 | 0 | 0 | 0 | 0 | 0.342857 | 1 | 0.171429 | false | 0 | 0.114286 | 0 | 0.314286 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0625d68871943fd8a255cbe675fa3fb3bffccaf0 | 2,364 | py | Python | multicast_client.py | mmuravytskyi/multiprotocol-chat | 1a763c53c43d1d7e07ecf066bb0ed3d9dbc73af9 | [
"MIT"
] | null | null | null | multicast_client.py | mmuravytskyi/multiprotocol-chat | 1a763c53c43d1d7e07ecf066bb0ed3d9dbc73af9 | [
"MIT"
] | 2 | 2021-05-28T10:56:22.000Z | 2021-05-28T10:56:35.000Z | multicast_client.py | mmuravytskyi/multiprotocol-chat | 1a763c53c43d1d7e07ecf066bb0ed3d9dbc73af9 | [
"MIT"
] | 1 | 2021-12-31T15:08:27.000Z | 2021-12-31T15:08:27.000Z | import socket
import struct
import config
import json
import threading
import random
def multicast_handler(client_port: int):
# create the datagram socket
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.bind(('', client_port))
# set a timeout so the socket does not block indefinitely when trying to receive data.
sock.settimeout(0.2)
# Set the time-to-live for messages to 1 so they do not go past the local network segment.
ttl = struct.pack('b', 1)
sock.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, ttl)
try:
# send request to the multicast group
print(f'CLIENT: Sending multicast message to {config.MULTICAST_IP}')
message = 'SERVER DISCOVERY'
multicast_group = (config.MULTICAST_IP, config.MULTICAST_PORT)
sock.sendto(bytes(message, encoding='utf-8'), multicast_group)
finally:
sock.close()
def tcp_handler(port: int):
sock_tcp = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock_tcp.bind(('', port))
sock_tcp.listen(5)
# empty buffer
buff = b''
while True:
print(f'CLIENT: Waiting for a TCP connection')
connection, client_address = sock_tcp.accept()
try:
print(f'CLIENT: Connection from {client_address}')
username = input('=== Provide Your Nickname === ')
connection.sendall(bytes(username, encoding='utf8'))
# receive the data in chunks and add to the buffer
while True:
print(f'CLIENT: Waiting for the server to send client base')
data = connection.recv(512)
buff += data
if not data:
break
break
finally:
print(f'CLIENT: Client base received')
res_dict = json.loads(buff.decode('utf-8'))
# print(res_dict)
print(f'CLIENT: Closing TCP connection')
# clean up the connection
connection.close()
break
if __name__ == '__main__':
port = random.randint(50_000, 65_000)
# pass selected port to the TCP thread, in order to listen on the same port
# thread in the background as daemon
th = threading.Thread(target=tcp_handler, args=(client_port,), daemon=True)
th.start()
multicast_handler(port)
th.join()
| 32.383562 | 94 | 0.629019 | 304 | 2,364 | 4.769737 | 0.427632 | 0.024828 | 0.049655 | 0.027586 | 0.089655 | 0.089655 | 0.089655 | 0 | 0 | 0 | 0 | 0.012317 | 0.278765 | 2,364 | 72 | 95 | 32.833333 | 0.838123 | 0.189086 | 0 | 0.18 | 0 | 0 | 0.163255 | 0.011024 | 0 | 0 | 0 | 0 | 0 | 1 | 0.04 | false | 0 | 0.12 | 0 | 0.16 | 0.12 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |