hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0587d07321592ddb102cc4ed98640454fd0d67f7
| 4,589
|
py
|
Python
|
RockPaperScissors.py
|
andreimaftei28/projects-on-JetBrainAcademy
|
8c2b8ab7bab5757db94e9f0b6d55c33852f64ee1
|
[
"MIT"
] | null | null | null |
RockPaperScissors.py
|
andreimaftei28/projects-on-JetBrainAcademy
|
8c2b8ab7bab5757db94e9f0b6d55c33852f64ee1
|
[
"MIT"
] | null | null | null |
RockPaperScissors.py
|
andreimaftei28/projects-on-JetBrainAcademy
|
8c2b8ab7bab5757db94e9f0b6d55c33852f64ee1
|
[
"MIT"
] | 3
|
2020-12-19T13:48:06.000Z
|
2021-08-12T18:36:33.000Z
|
"""Rock Paper Scisssors game using OOP"""
import random
from tempfile import mkstemp
from shutil import move, copymode
from os import fdopen, remove
class RockPaperScissors:
"""initializing the 'global' atributtes"""
def __init__(self):
self.defeat = {"scissors": "rock", "paper" : "scissors", "rock" : "paper"}
self.choices = ["rock", "paper", "scissors"]
self.score = 0
self.name = input("Enter your name: ")
def file(self):
"""method keeping track of players rating in 'rating.txt' file"""
file = open("rating.txt", "r+", encoding="utf-8")
for line in file:
line1 = line.rstrip()
if self.name == line1.split()[0]:
score = line1.split()[1]
self.score = int(score)
self.play()
print(line.replace(score, str(self.score)), file=file, flush=True)
file.close()
break
else:
if self.name != line1.split()[0]:
continue
else:
score = line1.split()[1]
self.play()
print(line.replace(score, str(self.score)), file=file, flush=True)
file.close()
break
else:
self.play()
print(self.name, self.score, sep=" ", file=file, flush=True)
file.close()
def play(self):
"""method is checking word imputed by user against the initial dict of words,
and increase rating if user wins,or is a draw"""
print(f"Hello, {self.name}")
self.rewrite_options()
print("Okay, let's start")
while True:
user_input = input("Enter your choice: ")
if user_input == "!rating":
print(f"Your rating: {self.score}")
continue
elif user_input == "!exit":
print("Bye!")
break
else:
choice = random.choice(self.choices)
if user_input not in self.choices:
print("Invalid input")
elif user_input == choice:
self.score += 50
print(f"There is a draw ({choice})")
elif user_input in self.defeat[choice]:
self.score += 100
print(f"Well done. The computer chose {choice} and failed")
else:
print(f"Sorry, but the computer chose {choice}")
def rewrite_file(self):
"""method updating rating of all players by rewriting 'rating.txt' file"""
names = []
dict_ = {}
fake_f = "rating.txt"
abs_path = "C:/Users/dandei/Desktop/jetBrain_projects/rock_paper_scissors/rating.txt" #change this with your path
fake_f, abs_path = mkstemp()
with fdopen(fake_f, "w") as new_file:
with open("rating.txt", "r+", encoding="utf-8") as file:
content = file.read()
content = content.split("\n")
for element in content:
if len(element) > 1:
element = element.split()
names.append(element)
dict_ = dict(names)
for key, value in dict_.items():
print(key, value, sep=" ", file=new_file)
copymode("rating.txt", abs_path)
remove("rating.txt")
move(abs_path, "rating.txt")
def rewrite_options(self):
"""method let's user choose between playing the classic game or
palying the game with more options. Changes the initial dict of words as user
inputs more options"""
choice = input("Enter your game options: ")
choices = choice.split(",")
defeat_by = {}
new_list = []
if choice == "":
return None
else:
self.choices = choices
for i in range(len(choices)):
new_list = choices[i + 1:] + choices[:i]
#wins_over
defeat_by[choices[i]] = new_list[:(len(new_list)) // 2]
self.defeat = defeat_by
#If rating.txt does not exist, it get's created here
fill = open("rating.txt", "a", encoding="utf-8")
fill.close()
#creating instance of the RockPaperScissors class
rps = RockPaperScissors()
rps.file()
rps.rewrite_file()
| 33.014388
| 123
| 0.504249
| 507
| 4,589
| 4.491124
| 0.319527
| 0.043478
| 0.022398
| 0.022398
| 0.153711
| 0.119455
| 0.089592
| 0.066755
| 0.066755
| 0.066755
| 0
| 0.007431
| 0.38418
| 4,589
| 138
| 124
| 33.253623
| 0.798301
| 0.134234
| 0
| 0.21875
| 0
| 0
| 0.127648
| 0.019068
| 0
| 0
| 0
| 0
| 0
| 1
| 0.052083
| false
| 0
| 0.041667
| 0
| 0.114583
| 0.125
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0589b9d3ea2a64dcded6b8ab04bba1a44e732a41
| 2,813
|
py
|
Python
|
src/cbc_binary_toolkit/schemas.py
|
carbonblack/cbc-binary-toolkit
|
92c90b80e3c3e0b5c2473ef2086d2ce2fb651db4
|
[
"MIT"
] | 8
|
2020-05-12T18:08:52.000Z
|
2021-12-27T06:11:00.000Z
|
src/cbc_binary_toolkit/schemas.py
|
carbonblack/cbc-binary-toolkit
|
92c90b80e3c3e0b5c2473ef2086d2ce2fb651db4
|
[
"MIT"
] | 4
|
2020-05-13T16:07:49.000Z
|
2020-06-30T18:47:14.000Z
|
src/cbc_binary_toolkit/schemas.py
|
carbonblack/cbc-binary-toolkit
|
92c90b80e3c3e0b5c2473ef2086d2ce2fb651db4
|
[
"MIT"
] | 3
|
2020-05-16T19:57:57.000Z
|
2020-11-01T08:43:31.000Z
|
# -*- coding: utf-8 -*-
# *******************************************************
# Copyright (c) VMware, Inc. 2020-2021. All Rights Reserved.
# SPDX-License-Identifier: MIT
# *******************************************************
# *
# * DISCLAIMER. THIS PROGRAM IS PROVIDED TO YOU "AS IS" WITHOUT
# * WARRANTIES OR CONDITIONS OF ANY KIND, WHETHER ORAL OR WRITTEN,
# * EXPRESS OR IMPLIED. THE AUTHOR SPECIFICALLY DISCLAIMS ANY IMPLIED
# * WARRANTIES OR CONDITIONS OF MERCHANTABILITY, SATISFACTORY QUALITY,
# * NON-INFRINGEMENT AND FITNESS FOR A PARTICULAR PURPOSE.
"""Schemas for Engine Results component"""
from schema import And, Or, Optional, Schema
IOCv2SEVSchema = Schema(
{
"id": And(str, len),
"match_type": And(str, lambda type: type in ["query", "equality", "regex"]),
"values": And([str], len),
Optional("field"): And(str, len),
Optional("link"): And(str, len),
"severity": And(int, lambda n: n > 0 and n < 11) # Needs stripped before sent to CBC
}
)
IOCv2Schema = Schema(
{
"id": And(str, len),
"match_type": And(str, lambda type: type in ["query", "equality", "regex"]),
"values": And([str], len),
Optional("field"): And(str, len),
Optional("link"): And(str, len)
}
)
ReportSchema = Schema(
{
"id": And(str, len),
"timestamp": And(int, lambda n: n > 0),
"title": And(str, len),
"description": And(str, len),
"severity": And(int, lambda n: n > 0 and n < 11),
Optional("link"): str,
Optional("tags"): [str],
"iocs_v2": [IOCv2Schema],
Optional("visibility"): str
}
)
EngineResponseSchema = Schema(
{
"iocs": [IOCv2SEVSchema],
"engine_name": And(str, len),
"binary_hash": And(str, lambda n: len(n) == 64),
"success": bool
}
)
BinaryMetadataSchema = Schema(
{
"sha256": And(str, lambda n: len(n) == 64),
"url": And(str, len),
"architecture": [str],
"available_file_size": Or(int, None),
"charset_id": Or(int, None),
"comments": Or(str, None),
"company_name": Or(str, None),
"copyright": Or(str, None),
"file_available": bool,
"file_description": Or(str, None),
"file_size": Or(int, None),
"file_version": Or(str, None),
"internal_name": Or(str, None),
"lang_id": Or(int, None),
"md5": And(str, lambda n: len(n) == 32),
"original_filename": Or(str, None),
"os_type": Or(str, None),
"private_build": Or(str, None),
"product_description": Or(str, None),
"product_name": Or(str, None),
"product_version": Or(str, None),
"special_build": Or(str, None),
"trademark": Or(str, None)
}
)
| 31.606742
| 93
| 0.542126
| 328
| 2,813
| 4.582317
| 0.375
| 0.071856
| 0.083832
| 0.045243
| 0.290086
| 0.256154
| 0.234864
| 0.209581
| 0.209581
| 0.209581
| 0
| 0.014861
| 0.258443
| 2,813
| 88
| 94
| 31.965909
| 0.705657
| 0.218983
| 0
| 0.132353
| 0
| 0
| 0.205963
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.014706
| 0
| 0.014706
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
058a7c137ede0bf5c3a55a3ce41c3dfb2936df30
| 2,079
|
py
|
Python
|
src/views/list.py
|
AllForJan/prizma-backend
|
fe866e74fa01e900cc7eab624bb5716a4bae056d
|
[
"MIT"
] | 2
|
2018-04-08T22:18:11.000Z
|
2018-04-26T08:12:46.000Z
|
src/views/list.py
|
AllForJan/prizma-backend
|
fe866e74fa01e900cc7eab624bb5716a4bae056d
|
[
"MIT"
] | null | null | null |
src/views/list.py
|
AllForJan/prizma-backend
|
fe866e74fa01e900cc7eab624bb5716a4bae056d
|
[
"MIT"
] | 2
|
2018-04-08T22:18:13.000Z
|
2018-04-08T22:18:18.000Z
|
from elasticsearch import Elasticsearch
from flask import request, jsonify
from flask_restful import Resource
from db.manager import get_conn
import settings
conn = get_conn()
def append_range_filter(f, key, _from, to):
d = {}
if _from or to:
d['range'] = {}
d['range'][key] = {}
if _from:
d['range'][key]['gte'] = _from
if to:
d['range'][key]['lte'] = to
f.append(d)
return f
class ListPO(Resource):
def get(self):
q = request.args.get('q', None)
es = Elasticsearch(
[settings.ELASTIC_HOST, ],
timeout=30, max_retries=10, retry_on_timeout=True, port=settings.ELASTIC_PORT
)
if not q:
query = {'query': {'match_all': {}}}
results = es.search(index='apa', doc_type='po', body=query)
rows = [{
'data': r['_source'], '_id': r['_id']
} for r in results['hits']['hits']]
return jsonify(rows)
rok_from = request.args.get('rok_from', None)
rok_to = request.args.get('rok_to', None)
suma_from = request.args.get('suma_from', None)
suma_to = request.args.get('suma_to')
# append filters
f = []
append_range_filter(f, 'rok', rok_from, rok_to)
append_range_filter(f, 'suma', suma_from, suma_to)
query = {
"sort": [
{"suma": {"order": "desc"}}
],
"query": {
"bool": {
"must": [
{
"match": {
"meno": {"query":q, "operator": "and"}
}
},
],
# "filter": []
}
}
}
query['query']['bool']['must'].extend(f)
results = es.search(index='apa', doc_type='po', body=query)
rows = [{
'data': r['_source'], '_id': r['_id']
} for r in results['hits']['hits']]
return rows
| 25.353659
| 89
| 0.457431
| 223
| 2,079
| 4.089686
| 0.340807
| 0.060307
| 0.076754
| 0.059211
| 0.192982
| 0.192982
| 0.192982
| 0.192982
| 0.192982
| 0.192982
| 0
| 0.00316
| 0.391053
| 2,079
| 81
| 90
| 25.666667
| 0.71722
| 0.012987
| 0
| 0.166667
| 0
| 0
| 0.100537
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.033333
| false
| 0
| 0.083333
| 0
| 0.183333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
058f090a9e7707433a3105b87e3e591439fed2ac
| 8,377
|
py
|
Python
|
code/train/train_model.py
|
96jhwei/Genetic-U-Net
|
25116f01afcf8ed4386cd0fc258da15e1c982cb5
|
[
"MIT"
] | 14
|
2021-09-09T11:22:17.000Z
|
2022-03-14T10:06:36.000Z
|
code/train/train_model.py
|
96jhwei/Genetic-U-Net
|
25116f01afcf8ed4386cd0fc258da15e1c982cb5
|
[
"MIT"
] | 1
|
2021-11-24T10:30:36.000Z
|
2021-11-24T10:30:36.000Z
|
code/train/train_model.py
|
96jhwei/Genetic-U-Net
|
25116f01afcf8ed4386cd0fc258da15e1c982cb5
|
[
"MIT"
] | 5
|
2021-11-02T09:29:49.000Z
|
2022-03-25T09:44:25.000Z
|
import numpy
from torch.utils.data import DataLoader
from tqdm import tqdm
from loss.FocalLoss import FocalLossForSigmoid
import torch
from metrics.calculate_metrics import calculate_metrics
import shutil
from metrics.average_meter import AverageMeter
import torch.multiprocessing
from torch.nn.utils.clip_grad import clip_grad_norm_
import os
import sys
import numpy as np
import random
from thop import profile
from .util.get_optimizer import get_optimizer
from dataset.util.get_datasets import get_datasets
import multiprocessing as mp
sys.path.append('../')
def train_one_model(optimizer_name, learning_rate, l2_weight_decay, gen_num, ind_num, model, batch_size, epochs, device,
train_set_name, valid_set_name,
train_set_root, valid_set_root, exp_name,
mode='train'):
seed = 12
torch.cuda.empty_cache()
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
random.seed(seed)
np.random.seed(seed)
torch.backends.cudnn.benchmark = True
model.to(device)
model.train()
loss_func = FocalLossForSigmoid(reduction='mean').to(device)
optimizer = get_optimizer(optimizer_name, filter(lambda p: p.requires_grad, model.parameters()), learning_rate, l2_weight_decay)
train_set, num_return = get_datasets(train_set_name, train_set_root, True)
valid_set, _ = get_datasets(valid_set_name, valid_set_root, False)
train_loader = DataLoader(dataset=train_set, batch_size=batch_size, shuffle=True, num_workers=3)
valid_loader = DataLoader(dataset=valid_set, batch_size=1, shuffle=False, num_workers=1)
best_f1_score = 0
flag = 0
count = 0
valid_epoch = 80
metrics_name = ['flops', 'param', 'accuracy', 'recall', 'specificity', 'precision', 'f1_score', 'auroc', 'iou']
metrics = {}
for metric_name in metrics_name:
if metric_name == 'flops' or metric_name == 'param':
metrics.update({metric_name: 100})
else:
metrics.update({metric_name: 0})
try:
for i in range(epochs):
train_tqdm_batch = tqdm(iterable=train_loader, total=numpy.ceil(len(train_set) / batch_size))
for images, targets in train_tqdm_batch:
images, targets = images.to(device), targets.to(device)
optimizer.zero_grad()
preds = model(images)
loss = loss_func(preds, targets)
loss.backward()
clip_grad_norm_(model.parameters(), 0.1)
optimizer.step()
train_tqdm_batch.close()
print('gens_{} individual_{}_epoch_{} train end'.format(gen_num, ind_num, i))
epoch_acc = AverageMeter()
epoch_recall = AverageMeter()
epoch_precision = AverageMeter()
epoch_specificity = AverageMeter()
epoch_f1_score = AverageMeter()
epoch_iou = AverageMeter()
epoch_auroc = AverageMeter()
if (i >= valid_epoch):
with torch.no_grad():
model.eval()
valid_tqdm_batch = tqdm(iterable=valid_loader, total=numpy.ceil(len(valid_set) / 1))
for images, targets in valid_tqdm_batch:
images = images.to(device)
targets = targets.to(device)
preds = model(images)
(acc, recall, specificity, precision,
f1_score, iou, auroc) = calculate_metrics(preds=preds, targets=targets, device=device)
epoch_acc.update(acc)
epoch_recall.update(recall)
epoch_precision.update(precision)
epoch_specificity.update(specificity)
epoch_f1_score.update(f1_score)
epoch_iou.update(iou)
epoch_auroc.update(auroc)
if i == valid_epoch:
flops, param = profile(model=model, inputs=(images,), verbose=False)
flops = flops / 1e11
param = param / 1e6
print('gens_{} individual_{}_epoch_{} validate end'.format(gen_num, ind_num, i))
print('acc:{} | recall:{} | spe:{} | pre:{} | f1_score:{} | auroc:{}'
.format(epoch_acc.val,
epoch_recall.val,
epoch_specificity.val,
epoch_precision.val,
epoch_f1_score.val,
epoch_auroc.val))
if epoch_f1_score.val > best_f1_score:
best_f1_score = epoch_f1_score.val
flag = i
count = 0
for key in list(metrics):
if key == 'flops':
metrics[key] = flops
elif key == 'param':
metrics[key] = param
elif key == 'accuracy':
metrics[key] = epoch_acc.val
elif key == 'recall':
metrics[key] = epoch_recall.val
elif key == 'specificity':
metrics[key] = epoch_specificity.val
elif key == 'precision':
metrics[key] = epoch_precision.val
elif key == 'f1_score':
metrics[key] = epoch_f1_score.val
elif key == 'auroc':
metrics[key] = epoch_auroc.val
elif key == 'iou':
metrics[key] = epoch_iou.val
else:
raise NotImplementedError
import pandas as pd
from os.path import join
performance_df = pd.DataFrame(
data=[[gen_num, ind_num, epoch_acc.val, epoch_recall.val, epoch_specificity.val,
epoch_precision.val,
epoch_f1_score.val, epoch_iou.val, epoch_auroc.val]],
columns=['epoch', 'individual', 'acc', 'recall',
'specificity', 'precision', 'f1_score', 'iou',
'auroc', ]
)
performance_csv_path = join(os.path.abspath('.'), 'exps/{}/csv'.format(exp_name),
'gens_{} individual_{} performance.csv'.format(gen_num, ind_num))
performance_df.to_csv(performance_csv_path)
else:
if i >= valid_epoch:
count += 1
end = None
if i > valid_epoch + 15 and best_f1_score < 0.50:
end = True
if (count >= 70) or end:
print('current best epoch_{} best_f1_score:'.format(flag), best_f1_score)
print('gens_{} individual_{} train early stop'.format(gen_num, ind_num))
print('=======================================================================')
valid_tqdm_batch.close()
return metrics, True
print('current best epoch_{} best_f1_score:'.format(flag), best_f1_score)
valid_tqdm_batch.close()
print('current best epoch_{} best_f1_score:'.format(flag), best_f1_score)
print('=======================================================================')
except RuntimeError as exception:
images.detach_()
del images
del model
del targets
return metrics, False
return metrics, True
| 45.037634
| 133
| 0.493613
| 808
| 8,377
| 4.863861
| 0.209158
| 0.040967
| 0.02799
| 0.018321
| 0.183715
| 0.132061
| 0.132061
| 0.106616
| 0.084224
| 0.084224
| 0
| 0.01127
| 0.406828
| 8,377
| 185
| 134
| 45.281081
| 0.779634
| 0
| 0
| 0.1125
| 0
| 0
| 0.083374
| 0.022705
| 0
| 0
| 0
| 0
| 0
| 1
| 0.00625
| false
| 0
| 0.125
| 0
| 0.15
| 0.05625
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
059016200f557d7398f34c3a96008e7fee9686c3
| 961
|
py
|
Python
|
dataset/check_for_duplicates.py
|
mathildor/TF-SegNet
|
dff209c8174b5e8fa77b4c2644298f6903a09445
|
[
"MIT"
] | 98
|
2017-11-06T15:55:22.000Z
|
2022-03-22T11:29:47.000Z
|
dataset/check_for_duplicates.py
|
yingz9/TF-SegNet
|
dff209c8174b5e8fa77b4c2644298f6903a09445
|
[
"MIT"
] | 8
|
2017-11-15T06:05:41.000Z
|
2019-06-19T06:53:03.000Z
|
dataset/check_for_duplicates.py
|
yingz9/TF-SegNet
|
dff209c8174b5e8fa77b4c2644298f6903a09445
|
[
"MIT"
] | 34
|
2017-11-06T03:05:54.000Z
|
2022-01-25T16:00:09.000Z
|
import os
from PIL import Image
import numpy
from PIL import ImageChops
""" TESTED:
No duplicates in:
- within validation images first part (stopped because of training - took to much time)
"""
image_path="../../IR_images/combined_dataset/val_images/images"
# image_path="../../IR_images/combined_dataset/val_images/images"
images = sorted(os.listdir(image_path))
for image_file_1 in images:
for image_file_2 in images:
image1 = Image.open(os.path.join(image_path,image_file_1))
image2 = Image.open(os.path.join(image_path,image_file_2))
#pixels = image.load()
if ImageChops.difference(image1, image2).getbbox() is None:
# if(image1==image2):# and image_file_1 != image_file_2):
print("Same image!!!")
print(image_file_1)
print(image_file_2)
# else:
# print("not same")
# print(image_file_1)
# print(image_file_2)
| 26.694444
| 95
| 0.648283
| 131
| 961
| 4.519084
| 0.389313
| 0.152027
| 0.084459
| 0.057432
| 0.385135
| 0.385135
| 0.385135
| 0.385135
| 0.283784
| 0
| 0
| 0.021798
| 0.236212
| 961
| 35
| 96
| 27.457143
| 0.784741
| 0.223725
| 0
| 0
| 0
| 0
| 0.105
| 0.083333
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.285714
| 0
| 0.285714
| 0.214286
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
059038232d1c85e48c2eed487377d93d1ad944f4
| 1,983
|
py
|
Python
|
_posts/import.py
|
suepeng/suepeng.github.io
|
844e0063e0604a77886aad5eaea588c4df2792a9
|
[
"MIT"
] | null | null | null |
_posts/import.py
|
suepeng/suepeng.github.io
|
844e0063e0604a77886aad5eaea588c4df2792a9
|
[
"MIT"
] | null | null | null |
_posts/import.py
|
suepeng/suepeng.github.io
|
844e0063e0604a77886aad5eaea588c4df2792a9
|
[
"MIT"
] | null | null | null |
import os, glob
from dateutil import parser
from bs4 import BeautifulSoup
ext = lambda line, cap: line.replace("\s", "").replace(cap, "").strip()
def write_post(doc):
meta = {
'title' : ext(doc[0], "TITLE:"),
'date' : parser.parse(ext(doc[2], "DATE:")).strftime("%Y-%m-%d"),
'tag' : ext(doc[3], "PRIMARY CATEGORY:"),
'status': ext(doc[4], "STATUS:"),
'imgs' : BeautifulSoup("".join(doc), features="html.parser").find_all('img'),
}
if not os.path.exists(meta['tag']):
os.makedirs(meta['tag'])
fname = f"{meta['tag']}/{meta['date']}-{meta['title'].replace('/', ' ')}.md"
publish = 'true' if meta['status'] == 'publish' else 'false'
feature = meta['imgs'][0].attrs['src'] if len(meta['imgs']) > 0 else None
with open(fname, "wt") as f:
# write meta
f.write("---\n")
f.write(f"layout: post\n")
f.write(f"title: {meta['title']}\n")
f.write(f"date: {meta['date']}\n")
f.write(f"tag: {meta['tag']}\n")
if feature:
f.write(f"feature: \"{feature}\"\n")
f.write(f"published: {publish} \n")
f.write("---\n")
# write boddy
body = False
for d in doc:
if (d[:3] == '---'):
continue
if ('<!-- more -->' in d):
d = d.replace('<!-- more -->', "").strip()
if len(d) > 0 and body:
f.write(d)
body = ('BODY' in d) or body
print(f"done {fname}")
return True
#------------------------------
# Main
#------------------------------
if __name__ == "__main__":
posts = 0
doc = []
for idx, line in enumerate(open("raw.txt").readlines()):
if len(doc) and ('TITLE:' in line):
posts += write_post(doc)
doc, meta = [], {}
doc.append(line)
# latest post
posts += write_post(doc)
print(f"converted {posts} posts with {idx} lines")
| 31.983871
| 86
| 0.474534
| 248
| 1,983
| 3.745968
| 0.358871
| 0.064586
| 0.04521
| 0.043057
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007092
| 0.288956
| 1,983
| 61
| 87
| 32.508197
| 0.651773
| 0.050429
| 0
| 0.083333
| 0
| 0
| 0.224533
| 0.029867
| 0
| 0
| 0
| 0
| 0
| 1
| 0.020833
| false
| 0
| 0.0625
| 0
| 0.104167
| 0.041667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
059744913f1e643dc9fe5a6332d2aff7847d00ed
| 3,342
|
py
|
Python
|
Project/checking_test2.py
|
mihdenis85/psycho_test
|
51bbe82043427d48e80ff36197815212c5c2a14c
|
[
"MIT"
] | null | null | null |
Project/checking_test2.py
|
mihdenis85/psycho_test
|
51bbe82043427d48e80ff36197815212c5c2a14c
|
[
"MIT"
] | null | null | null |
Project/checking_test2.py
|
mihdenis85/psycho_test
|
51bbe82043427d48e80ff36197815212c5c2a14c
|
[
"MIT"
] | null | null | null |
def specialization(a, spec, jobs):
if a>=0 and a<=2:
return(spec + ': интерес к данной профессиональной сфере не выражен')
elif a>=3 and a<=6:
return(spec + ': профессиональная направленность и интерес выражены в средней степени. ' + 'Возможно вам будут интересны такие профессии будущего, как ' + jobs)
elif a>=7 and a<=8:
return(spec +': профессиональная направленность выражена довольно ярко и отчетливо. ' + 'Вам будут интересны такие профессии, которые будут актуальны в будущем, как '+ jobs)
def check_test2(answers):
p=0
chez1=0
t=0
z=0
h=0
n=answers[0]
if n==1:
p+=1
elif n==2:
t+=1
n=answers[1]
if n==1:
chez1+=1
elif n==2:
z+=1
n=answers[2]
if n==1:
h+=1
elif n==2:
p+=1
n=answers[3]
if n==1:
t+=1
elif n==2:
chez1+=1
n=answers[4]
if n==1:
z+=1
elif n==2:
h+=1
n=answers[5]
if n==1:
p+=1
elif n==2:
chez1+=1
n=answers[6]
if n==1:
h+=1
elif n==2:
t+=1
n=answers[7]
if n==1:
chez1+=1
elif n==2:
h+=1
n=answers[8]
if n==1:
t+=1
elif n==2:
z+=1
n=answers[9]
if n==1:
p+=1
elif n==2:
z+=1
n=answers[10]
if n==1:
p+=1
elif n==2:
t+=1
n=answers[11]
if n==1:
chez1+=1
elif n==2:
z+=1
n=answers[12]
if n==1:
h+=1
elif n==2:
p+=1
n=answers[13]
if n==1:
t+=1
elif n==2:
chez1+=1
n=answers[14]
if n==1:
z+=1
elif n==2:
h+=1
n=answers[15]
if n==1:
p+=1
elif n==2:
chez1+=1
n=answers[16]
if n==1:
h+=1
elif n==2:
t+=1
n=answers[17]
if n==1:
chez1+=1
elif n==2:
h+=1
n=answers[18]
if n==1:
t+=1
elif n==2:
z+=1
n=answers[19]
if n==1:
p+=1
elif n==2:
z+=1
pechat1=specialization(p, 'Природа', 'ИТ-генетик, Биофармаколог, Архитектор живых систем, Парковый эколог, ГМО-агроном, Портовый эколог, Сельскохозяйственный эколог, Космобиолог, Урбанист-эколог')
pechat2=specialization(t, 'Техника', 'Проектировщик композитных конструкций для транспортных средств, Проектировщик нанотехнологических материалов, Глазир, Архитектор территорий, Конструктор новых металлов')
pechat3=specialization(chez1, 'Сфера обслуживания', 'Врач, Генетический консультант, Молекулярный диетолог, Тренер творческих занятий, Личный тьютор по эстетическому развитию, Разработчик персональных пенсионных акладов')
pechat4=specialization(z, 'Точные науки и музыка(игра на музыкальных инструментах)', 'Музыкант, Танцор, Переводчик фильмов, Энергоаудитор, Оператор Многофункциональных технических комплексов, Агроном экономист, ')
pechat5=specialization(h, 'Творческие профессии', 'Создатель спецэффектов, Видеохудожник, Театральный художник, Аранжировщик, Шоураннер, Балетмейстер, Дирижёр, Живописец, Танцор, Режиссер, Художник-технолог, Science-художник, Видеограф, Специалист по озвучиванию и звуковым спецэффектам в кино, Инфо-стилист, Архитектор виртуальности')
return pechat1 + ' ' + pechat2 + ' ' + pechat3 + ' ' + pechat4 + ' ' + pechat5
| 27.85
| 339
| 0.578995
| 470
| 3,342
| 4.114894
| 0.340426
| 0.08273
| 0.041365
| 0.072389
| 0.271975
| 0.239917
| 0.239917
| 0.239917
| 0.235781
| 0.235781
| 0
| 0.060144
| 0.293537
| 3,342
| 120
| 340
| 27.85
| 0.759
| 0
| 0
| 0.672269
| 0
| 0.02521
| 0.399641
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.016807
| false
| 0
| 0
| 0
| 0.02521
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
05975def902880bc29f1fd9e4b623039913f810f
| 4,003
|
py
|
Python
|
src/upload/upload.py
|
alliance-genome/agr_ferret
|
e2ccef16308b1a8a6f1b2a3dde6e29e0530da721
|
[
"MIT"
] | 2
|
2020-07-22T14:25:00.000Z
|
2021-09-20T18:29:08.000Z
|
src/upload/upload.py
|
alliance-genome/agr_ferret
|
e2ccef16308b1a8a6f1b2a3dde6e29e0530da721
|
[
"MIT"
] | 6
|
2019-09-24T14:09:42.000Z
|
2021-06-07T15:27:55.000Z
|
src/upload/upload.py
|
alliance-genome/agr_ferret
|
e2ccef16308b1a8a6f1b2a3dde6e29e0530da721
|
[
"MIT"
] | 3
|
2020-12-19T08:57:51.000Z
|
2020-12-19T08:58:09.000Z
|
# Functions for use in downloading files.
import logging, os, requests, json, hashlib, urllib
from requests_toolbelt.utils import dump
from retry import retry
logger = logging.getLogger(__name__)
def create_md5(worker, filename, save_path):
# Generate md5
logger.info('{}: Generating md5 hash for {}.'.format(worker, filename))
hash_md5 = hashlib.md5()
with open(os.path.join(save_path, filename), 'rb') as f:
for chunk in iter(lambda: f.read(4096), b''):
hash_md5.update(chunk)
logger.info('{}: Finished generating md5 hash: {}'.format(worker, hash_md5.hexdigest()))
return hash_md5.hexdigest()
def upload_file(worker, filename, save_path, upload_file_prefix, config_info):
file_to_upload = {upload_file_prefix: open(os.path.join(save_path, filename), 'rb')}
headers = {
'Authorization': 'Bearer {}'.format(config_info.config['API_KEY'])
}
logger.debug('{}: Attempting upload of data file: {}'.format(worker, os.path.join(save_path, filename)))
logger.debug('{}: Attempting upload with header: {}'.format(worker, headers))
logger.info("{}: Uploading data to {}) ...".format(worker, config_info.config['FMS_API_URL']+'/api/data/submit/'))
response = requests.post(config_info.config['FMS_API_URL']+'/api/data/submit/', files=file_to_upload, headers=headers)
logger.info(response.text)
@retry(tries=5, delay=5, logger=logger)
def upload_process(worker, filename, save_path, data_type, data_sub_type, config_info):
release = config_info.config['ALLIANCE_RELEASE']
upload_file_prefix = '{}_{}_{}'.format(release, data_type, data_sub_type)
generated_md5 = create_md5(worker, filename, save_path)
# Attempt to grab MD5 for the latest version of the file.
logger.debug(config_info.config['FMS_API_URL'] + '/api/datafile/by/{}/{}?latest=true'.format(data_type, data_sub_type))
url_to_check = config_info.config['FMS_API_URL'] + '/api/datafile/by/{}/{}?latest=true'.format(data_type, data_sub_type)
chip_response = urllib.request.urlopen(url_to_check)
chip_data = data = json.loads(chip_response.read().decode(chip_response.info().get_param('charset') or 'utf-8'))
logger.debug('{}: Retrieved API data from chipmunk: {}'.format(worker, chip_data))
# Check for existing MD5
logger.info('{}: Checking for existing MD5 from chipmunk.'.format(worker))
# Logic for uploading new files based on existing and new MD5s.
if not chip_data:
logger.info('{}: No response received from the FMS. A new file will be uploaded.'.format(worker))
logger.info('{}: File: {}'.format(worker, filename))
upload_file(worker, filename, save_path, upload_file_prefix, config_info)
else:
existing_md5 = chip_data[0].get('md5Sum')
if existing_md5:
logger.info('{}: Previous MD5 found: {}'.format(worker, existing_md5))
if existing_md5 == generated_md5:
logger.info('{}: Existing MD5 matches the newly generated MD5. The file will not be uploaded.'.format(worker))
logger.info('{}: File: {}'.format(worker, filename))
logger.info('{}: Existing: {} New: {}'.format(worker, existing_md5, generated_md5))
else:
logger.info('{}: Existing MD5 does not match the newly generated MD5. A new file will be uploaded.'.format(worker))
logger.info('{}: File: {}'.format(worker, filename))
logger.info('{}: Existing: {} New: {}'.format(worker, existing_md5, generated_md5))
upload_file(worker, filename, save_path, upload_file_prefix, config_info)
else:
logger.info('{}: Existing MD5 not found. A new file will be uploaded.'.format(worker))
logger.info('{}: File: {}'.format(worker, filename))
logger.info('{}: Existing: {} New: {}'.format(worker, existing_md5, generated_md5))
upload_file(worker, filename, save_path, upload_file_prefix, config_info)
| 51.320513
| 131
| 0.673995
| 526
| 4,003
| 4.937262
| 0.241445
| 0.087794
| 0.048518
| 0.059299
| 0.433577
| 0.401232
| 0.367347
| 0.367347
| 0.342703
| 0.313439
| 0
| 0.012515
| 0.181614
| 4,003
| 77
| 132
| 51.987013
| 0.78022
| 0.048214
| 0
| 0.236364
| 0
| 0
| 0.239222
| 0.017876
| 0
| 0
| 0
| 0
| 0
| 1
| 0.054545
| false
| 0
| 0.054545
| 0
| 0.127273
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
05995048419b1dbd1bd29b14c238cf37023f8b47
| 2,740
|
py
|
Python
|
lib/strider/virt/vagrantbox.py
|
jcftang/strider
|
432a68eb1303541b6d955bd6ecf7439d1f9b0d48
|
[
"Apache-2.0"
] | 16
|
2016-02-10T13:06:50.000Z
|
2021-02-28T06:21:16.000Z
|
lib/strider/virt/vagrantbox.py
|
jcftang/strider
|
432a68eb1303541b6d955bd6ecf7439d1f9b0d48
|
[
"Apache-2.0"
] | 4
|
2016-02-20T16:33:40.000Z
|
2016-05-28T10:46:06.000Z
|
lib/strider/virt/vagrantbox.py
|
jcftang/strider
|
432a68eb1303541b6d955bd6ecf7439d1f9b0d48
|
[
"Apache-2.0"
] | 1
|
2016-09-01T11:06:56.000Z
|
2016-09-01T11:06:56.000Z
|
import vagrant
import os
from subprocess import CalledProcessError
from strider.common.instance_data import InstanceData, SshData
import strider.common.logger
class Vagrantbox(object):
def __init__(self,
name=None,
ssh=None,
basebox=None,
bake_name=None,
bake_description=None,
user_data=None):
self.name = name
self.bake_name = bake_name
self.basebox = basebox
self.ssh = ssh
self.log = strider.utils.logger.get_logger('Vagrant')
if type(self.ssh) != dict:
raise Exception("expecting 'ssh' to be a dictionary")
self.vagrant_instance = vagrant.Vagrant()
def describe(self):
details = self._details()
if details is None:
return InstanceData(present=False)
else:
if self.ssh['username'] is not None:
username = self.ssh['username']
else:
username = "vagrant"
if self.ssh['private_key_path'] is not None:
private_key_path = self.ssh['private_key_path']
else:
private_key_path = details['IdentityFile']
port = details['Port']
host = details['HostName']
ssh_data = SshData(keyfile=private_key_path,
user=username,
host=host,
port=port)
return InstanceData(present=True,
provider_specific=details,
ssh=ssh_data)
def destroy(self):
self.log("destroying instance")
try:
self.vagrant_instance.destroy()
except CalledProcessError:
self.log("already destroyed instance")
try:
os.remove("./Vagrantfile")
except OSError:
self.log("already removed Vagrantfile")
def up(self):
self.log("determining if we need to create an instance")
try:
self.vagrant_instance.init(box_name=self.basebox)
except CalledProcessError:
self.log("already initialised instance")
try:
self.log("bring up instance")
self.vagrant_instance.up()
except CalledProcessError:
self.log("already up")
def _details(self):
try:
conf = self.vagrant_instance.conf()
return conf
except CalledProcessError:
self.log("No instance running")
return None
def bake(self):
self.log("baking vagrant box")
os.system("vagrant package --output {}.box".format(self.bake_name))
self.up()
| 31.860465
| 75
| 0.550365
| 277
| 2,740
| 5.32491
| 0.310469
| 0.047458
| 0.064407
| 0.084068
| 0.146441
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.362044
| 2,740
| 85
| 76
| 32.235294
| 0.843822
| 0
| 0
| 0.16
| 0
| 0
| 0.135766
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.08
| false
| 0
| 0.066667
| 0
| 0.213333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
552672dd092eb5fb84094dd67c6ad2cf6eb3df04
| 4,739
|
py
|
Python
|
python/aces/lutFormats/tests/UnitTestsLutFormats.py
|
aforsythe/clf
|
47ba8bee31bd13e4f23632c7b0a38293be31c019
|
[
"AMPAS"
] | 43
|
2015-07-09T23:13:41.000Z
|
2022-02-04T15:45:42.000Z
|
python/aces/lutFormats/tests/UnitTestsLutFormats.py
|
aforsythe/clf
|
47ba8bee31bd13e4f23632c7b0a38293be31c019
|
[
"AMPAS"
] | 1
|
2019-09-18T14:30:39.000Z
|
2019-09-18T14:30:39.000Z
|
python/aces/lutFormats/tests/UnitTestsLutFormats.py
|
aforsythe/clf
|
47ba8bee31bd13e4f23632c7b0a38293be31c019
|
[
"AMPAS"
] | 9
|
2015-07-10T15:26:55.000Z
|
2020-08-20T11:52:47.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
The Academy / ASC Common LUT Format Sample Implementations are provided by the
Academy under the following terms and conditions:
Copyright © 2015 Academy of Motion Picture Arts and Sciences ("A.M.P.A.S.").
Portions contributed by others as indicated. All rights reserved.
A worldwide, royalty-free, non-exclusive right to copy, modify, create
derivatives, and use, in source and binary forms, is hereby granted, subject to
acceptance of this license. Performance of any of the aforementioned acts
indicates acceptance to be bound by the following terms and conditions:
* Copies of source code, in whole or in part, must retain the above copyright
notice, this list of conditions and the Disclaimer of Warranty.
* Use in binary form must retain the above copyright notice, this list of
conditions and the Disclaimer of Warranty in the documentation and/or other
materials provided with the distribution.
* Nothing in this license shall be deemed to grant any rights to trademarks,
copyrights, patents, trade secrets or any other intellectual property of
A.M.P.A.S. or any contributors, except as expressly stated herein.
* Neither the name "A.M.P.A.S." nor the name of any other contributors to this
software may be used to endorse or promote products derivative of or based on
this software without express prior written permission of A.M.P.A.S. or the
contributors, as appropriate.
This license shall be construed pursuant to the laws of the State of California,
and any disputes related thereto shall be subject to the jurisdiction of the
courts therein.
Disclaimer of Warranty: THIS SOFTWARE IS PROVIDED BY A.M.P.A.S. AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND
NON-INFRINGEMENT ARE DISCLAIMED. IN NO EVENT SHALL A.M.P.A.S., OR ANY
CONTRIBUTORS OR DISTRIBUTORS, BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, RESITUTIONARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
WITHOUT LIMITING THE GENERALITY OF THE FOREGOING, THE ACADEMY SPECIFICALLY
DISCLAIMS ANY REPRESENTATIONS OR WARRANTIES WHATSOEVER RELATED TO PATENT OR
OTHER INTELLECTUAL PROPERTY RIGHTS IN THE ACES CONTAINER REFERENCE
IMPLEMENTATION, OR APPLICATIONS THEREOF, HELD BY PARTIES OTHER THAN A.M.P.A.S.,
WHETHER DISCLOSED OR UNDISCLOSED.
"""
__author__ = 'Haarm-Pieter Duiker'
__copyright__ = 'Copyright (C) 2015 Academy of Motion Picture Arts and Sciences'
__maintainer__ = 'Academy of Motion Picture Arts and Sciences'
__email__ = 'acessupport@oscars.org'
__status__ = 'Production'
__major_version__ = '1'
__minor_version__ = '0'
__change_version__ = '0'
__version__ = '.'.join((__major_version__,
__minor_version__,
__change_version__))
'''
Simple tests of the lutFormats module
Should be turned into a proper set of unit tests.
'''
import os
import sys
# Make sure we can import lutFormats
sys.path.append(os.path.join(os.path.dirname(__file__), '..', '..'))
import lutFormats
tmpDir = "/tmp"
#aces1OCIOConfirDir = "/work/client/academy/ocio/hpd/OpenColorIO-Configs/aces_1.0.0"
aces1OCIOConfirDir = "/path/to/OpenColorIO-Configs/aces_1.0.0"
spiPath = "%s/luts/ACEScc_to_linear.spi1d" % aces1OCIOConfirDir
cspPath = "%s/baked/maya/sRGB (D60 sim.) for ACEScg Maya.csp" % aces1OCIOConfirDir
spipl = lutFormats.Registry.read( spiPath )
csppl = lutFormats.Registry.read( cspPath )
newSpiPath = "%s/ACEScc_to_linear_new.spi1d" % tmpDir
lutFormats.Registry.write(spipl, newSpiPath)
newSpi3dPath = "%s/srgb_new.spi3d" % tmpDir
lutFormats.Registry.write(csppl, newSpi3dPath, lutDataFormat="3D")
newCspPath = "%s/srgb_new_3d.csp" % tmpDir
lutFormats.Registry.write(csppl, newCspPath, lutDataFormat="3D")
newCsp1DPath = "%s/srgb_new_1d.csp" % tmpDir
lutFormats.Registry.write(csppl, newCsp1DPath)
newCsp1D3DPath = "%s/srgb_new_1d3d.csp" % tmpDir
lutFormats.Registry.write(csppl, newCsp1D3DPath, lutDataFormat="1D_3D_1D")
newClf1D3DPath = "%s/srgb_new_1d3d.clf" % tmpDir
lutFormats.Registry.write(csppl, newClf1D3DPath, lutDataFormat="1D_3D_1D")
newCtl1DPath = "%s/srgb_new_1d.ctl" % tmpDir
lutFormats.Registry.write(csppl, newCtl1DPath)
newCtl1D3DPath = "%s/srgb_new_3d.ctl" % tmpDir
lutFormats.Registry.write(csppl, newCtl1D3DPath, lutDataFormat="3D")
| 40.853448
| 84
| 0.779067
| 687
| 4,739
| 5.256186
| 0.423581
| 0.049848
| 0.053171
| 0.064248
| 0.211299
| 0.15702
| 0.091941
| 0.078649
| 0.043755
| 0.043755
| 0
| 0.015222
| 0.140536
| 4,739
| 115
| 85
| 41.208696
| 0.871102
| 0.606246
| 0
| 0
| 0
| 0
| 0.265225
| 0.068298
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.081081
| 0
| 0.081081
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
552683d69b93369ce9f2b67f499349c272254782
| 10,177
|
py
|
Python
|
edexOsgi/com.raytheon.edex.plugin.gfe/utility/common_static/base/gfe/textproducts/templates/product/Hazard_CFW_MultiPil.py
|
srcarter3/awips2
|
37f31f5e88516b9fd576eaa49d43bfb762e1d174
|
[
"Apache-2.0"
] | null | null | null |
edexOsgi/com.raytheon.edex.plugin.gfe/utility/common_static/base/gfe/textproducts/templates/product/Hazard_CFW_MultiPil.py
|
srcarter3/awips2
|
37f31f5e88516b9fd576eaa49d43bfb762e1d174
|
[
"Apache-2.0"
] | null | null | null |
edexOsgi/com.raytheon.edex.plugin.gfe/utility/common_static/base/gfe/textproducts/templates/product/Hazard_CFW_MultiPil.py
|
srcarter3/awips2
|
37f31f5e88516b9fd576eaa49d43bfb762e1d174
|
[
"Apache-2.0"
] | 1
|
2021-10-30T00:03:05.000Z
|
2021-10-30T00:03:05.000Z
|
##
# This software was developed and / or modified by Raytheon Company,
# pursuant to Contract DG133W-05-CQ-1067 with the US Government.
#
# U.S. EXPORT CONTROLLED TECHNICAL DATA
# This software product contains export-restricted data whose
# export/transfer/disclosure is restricted by U.S. law. Dissemination
# to non-U.S. persons whether in the United States or abroad requires
# an export license or other authorization.
#
# Contractor Name: Raytheon Company
# Contractor Address: 6825 Pine Street, Suite 340
# Mail Stop B8
# Omaha, NE 68106
# 402.291.0100
#
# See the AWIPS II Master Rights File ("Master Rights File.pdf") for
# further licensing information.
##
##
# This is a base file that is not intended to be overridden.
##
########################################################################
# Hazard_CFW.py
#
#
##########################################################################
import GenericHazards
import string, time, re, os, types, copy
class TextProduct(GenericHazards.TextProduct):
Definition = copy.deepcopy(GenericHazards.TextProduct.Definition)
Definition['displayName'] = None
Definition['displayName'] = "BaselineHazard_CFW_<MultiPil> (Coastal/LakeShore Flooding)"
Definition["defaultEditAreas"] = "EditAreas_PublicZones_<site>_<MultiPil>"
Definition["mapNameForCombinations"] = "Zones_<site>" # Map background for creating Combinations
# Header configuration items
Definition["productName"] = "Coastal Hazard Message" # Warning! DO NOT CHANGE.
# The productName gets substituted later in the formatter!
Definition["fullStationID"] = "<fullStationID>" # full station identifier (4letter)
Definition["wmoID"] = "<wmoID>" # WMO ID
Definition["pil"] = "<pil>" # product pil
#Definition["areaName"] = "Statename" # Name of state, such as "Georgia"
Definition["wfoCityState"] = "<wfoCityState>" # Location of WFO - city state
Definition["wfoCity"] = "<wfoCity>" # WFO Name as it should appear in a text product
Definition["textdbPil"] = "<textdbPil>" # Product ID for storing to AWIPS text database.
Definition["awipsWANPil"] = "<awipsWANPil>" # Product ID for transmitting to AWIPS WAN.
Definition["outputFile"] = "{prddir}/TEXT/CFW_<MultiPil>.txt"
Definition["bulletProd"] = 1 #If 1, the product has a bullet format
# OPTIONAL CONFIGURATION ITEMS
#Definition["database"] = "Official" # Source database. "Official", "Fcst", or "ISC"
#Definition["displayOutputDialog"] = 0 # If 1 will display results when finished
Definition["debug"] = 1
#Definition["headlineEditAreaGroup"] = "Zones" # Name of EditAreaGroup for sampling headlines
Definition["purgeTime"] = 8 # Maximum hours for expireTime from issueTime
Definition["includeCities"] = 0 # Cities included in area header
Definition["accurateCities"] = 0 # If 1, cities are based on grids;
# otherwise full list is included
Definition["cityLocation"] = "CityLocation" # City lat/lon dictionary to use
#Definition["cityDescriptor"] = "Including the cities of"
Definition["includeZoneNames"] = 1 # Zone names will be included in the area header
Definition["lineLength"] = 66 # line length
Definition["easPhrase"] = "URGENT - IMMEDIATE BROADCAST REQUESTED"
Definition["includeOverviewHeadline"] = 1 #If 1, the overview header is templated
Definition["includeOverview"] = 1 #If 1, the overview section is templated
#Definition["hazardSamplingThreshold"] = (10, None) #(%cov, #points)
###
### Text to insert below the last $$ of the product (WFO URL)
### use "" if you do not want text to appear
## Definition["urlText"] = "http://www.weather.gov/miami"
### no additional text example
Definition["urlText"] = ""
### multiple line example
## Definition["urlText"] = "For more information from NOAA/s National Weather Service visit...\n" + \
## "http://weather.gov/saltlakecity"
###
def __init__(self):
GenericHazards.TextProduct.__init__(self)
#
# These are the products allowed in the Coastal Flood Products
#
def allowedHazards(self):
allActions = ["NEW", "EXA", "EXB", "EXT", "CAN", "CON", "EXP"]
return [
('CF.W', allActions, 'CoastalFlood'), # COASTAL FLOOD WARNING
('CF.Y', allActions, 'CoastalFlood'), # COASTAL FLOOD ADVISORY
('CF.A', allActions, 'CoastalFlood'), # COASTAL FLOOD WATCH
('CF.S', allActions, 'CoastalFloodStatement'), # COASTAL FLOOD STATEMENT
('LS.W', allActions, 'CoastalFlood'), # LAKESHORE FLOOD WARNING
('LS.Y', allActions, 'CoastalFlood'), # LAKESHORE FLOOD ADVISORY
('LS.A', allActions, 'CoastalFlood'), # LAKESHORE FLOOD WATCH
('LS.S', allActions, 'CoastalFloodStatement'), # LAKESHORE FLOOD STATEMENT
('SU.W', allActions, 'HighSurf'), # HIGH SURF WARNING
('SU.Y', allActions, 'HighSurf'), # HIGH SURF ADVISORY
('BH.S', allActions, 'BeachHaz'), # Beach Hazards Statement
('RP.S', allActions, 'RipCurrent'), # HIGH RIP CURRENT RISK
]
def _bulletDict(self):
return {
"CF" : ("Coastal Flooding,Timing,Impacts"), ### coastal flood warning, advisory, watch
"LS" : ("Lake Shore Flooding,Timing,Impacts"), ### lake shore flood warning, advisory, watch
"BH" : ("Hazards,Timing,Location,Potential Impacts"), ### hazardous beach conditions
"SU" : ("Waves and Surf,Timing,Impacts"), ### high surf warning, advisory
"RP" : ("Timing,Impacts"), ### high rip current risk
}
def _bulletOrder(self):
return [
"Coastal Flooding",
"Lake Shore Flooding",
"Waves and Surf",
"Hazards",
"Timing",
"Location",
"Potential Impacts",
"Impacts",
]
#
# Overridden to allow for attribution statement
#
def _makeProduct(self, fcst, segmentAreas, argDict):
argDict["language"] = self._language
#
# This section generates the headline on the segment
#
# stuff argDict with the segmentAreas for DiscretePhrases
argDict['segmentAreas'] = segmentAreas
editArea = segmentAreas[0]
areaLabel = editArea
headlines = self.generateProduct("Hazards", argDict, area = editArea,
areaLabel=areaLabel,
timeRange = self._timeRange)
fcst = fcst + headlines
#
# This section generates the attribution statements and calls-to-action
#
hazardsC = argDict['hazards']
listOfHazards = hazardsC.getHazardList(segmentAreas)
fcst = fcst + self.hazardBodyText(listOfHazards, argDict)
#
# If an overview exists for this product, calculate it
#
self.overviewText(listOfHazards, "CFW")
#
# Clean up and return
#
fcst = self.endline(fcst, linelength=self._lineLength, breakStr=[" ", "-", "..."])
return fcst
def _postProcessProduct(self, fcst, argDict):
#
# If an overview exists for this product, insert it
#
overview = self.finalOverviewText()
overviewSearch = re.compile(r'Default overview section', re.DOTALL)
fcst = overviewSearch.sub(overview, fcst)
urgent = 0
followup = 1
prodNameKey = ''
fullKeyList = []
newList = ['NEW', 'EXA', 'EXB']
hazardsC = argDict['hazards']
segmentList = self.organizeHazards(hazardsC.rawAnalyzedTable())
for segmentAreas in segmentList:
listOfHazards = hazardsC.getHazardList(segmentAreas)
for eachHazard in listOfHazards:
if eachHazard['phensig'] not in fullKeyList:
fullKeyList.append(eachHazard['phensig'])
if eachHazard['phensig'] in ['CF.W', 'CF.A', 'LS.W', 'LS.A']:
if eachHazard['act'] in newList:
urgent = 1
# remove eas line if not urgent
if urgent == 0 and len(self._easPhrase):
fcst = fcst.replace(self._easPhrase + '\n', '', 1)
# rename the product if necessary based on VTEC codes
for each in fullKeyList:
if each in ['LS.W', 'LS.A', 'LS.Y', 'LS.S']:
productName = "Lakeshore Hazard Message"
fcst = fcst.replace(self._productName, productName, 1)
break
# Added to place line feeds in the CAP tags to keep separate from CTAs
fcst = string.replace(fcst, \
r"PRECAUTIONARY/PREPAREDNESS ACTIONS\.\.\.", \
r"\nPRECAUTIONARY/PREPAREDNESS ACTIONS\.\.\.\n")
fcst = string.replace(fcst, ".:", ".")
fcst = string.replace(fcst, "\n ","\n")
fcst = string.replace(fcst, "&&", "\n&&\n")
# Prevent empty Call to Action Tags
fcst = re.sub(r'\nPRECAUTIONARY/PREPAREDNESS ACTIONS\.\.\.\s*&&\n', \
"", fcst)
### to remove any empty framing code
fcst = re.sub("\|\*\s*\*\|", "", fcst)
### indent the bullet text
fcst = self._indentBulletText(fcst)
#
# Clean up multiple line feeds
#
fixMultiLF = re.compile(r'(\n\n)\n*', re.DOTALL)
fcst = fixMultiLF.sub(r'\1', fcst)
#
# Finish Progress Meter
#
self.setProgressPercentage(100)
self.progressMessage(0, 100, self._displayName + " Complete")
### add the url text from the configuration section
fcst = fcst + "\n" + self._urlText
return fcst
| 40.384921
| 105
| 0.584455
| 1,008
| 10,177
| 5.872024
| 0.385913
| 0.012164
| 0.011488
| 0.014192
| 0.049333
| 0.020949
| 0.013178
| 0.013178
| 0
| 0
| 0
| 0.00913
| 0.289673
| 10,177
| 251
| 106
| 40.545817
| 0.809656
| 0.34578
| 0
| 0.065041
| 0
| 0
| 0.218253
| 0.05476
| 0
| 0
| 0
| 0
| 0
| 1
| 0.04878
| false
| 0
| 0.01626
| 0.01626
| 0.121951
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
552924a7e504599cbe9d1cfc08f6a123e6773a8c
| 880
|
py
|
Python
|
setup.py
|
hubmapconsortium/python-sdk
|
17eaec434f1f65190a6e53d0055fe382841222de
|
[
"MIT"
] | null | null | null |
setup.py
|
hubmapconsortium/python-sdk
|
17eaec434f1f65190a6e53d0055fe382841222de
|
[
"MIT"
] | 8
|
2021-11-09T13:35:48.000Z
|
2022-03-04T15:56:52.000Z
|
setup.py
|
hubmapconsortium/python-sdk
|
17eaec434f1f65190a6e53d0055fe382841222de
|
[
"MIT"
] | null | null | null |
from setuptools import setup, find_packages
with open("README.md", "r") as fh:
long_description = fh.read()
setup(
name="hubmap-sdk",
version="1.0.1",
author="Hubmap",
author_email="api-developers@hubmapconsortium.org",
description="Python Client Libary to use HuBMAP web services",
long_description=long_description,
long_description_content_type="text/markdown",
packages=['hubmap_sdk'],
keywords=[
"HuBMAP Sdk",
"python"
],
install_requires=[
"certifi==2021.10.8",
"chardet==4.0.0",
"idna==2.10",
"requests==2.25.1",
"urllib3==1.26.7"
],
include_package_data=True,
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent"
],
python_requires='>=3.6'
)
| 25.142857
| 66
| 0.606818
| 102
| 880
| 5.107843
| 0.705882
| 0.115163
| 0.072937
| 0.115163
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.042105
| 0.244318
| 880
| 34
| 67
| 25.882353
| 0.741353
| 0
| 0
| 0.096774
| 0
| 0
| 0.38339
| 0.039818
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.032258
| 0
| 0.032258
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5529c5dbc7514236bc8611211cfb848e2618a841
| 2,615
|
py
|
Python
|
bayarea_urbansim/data_regeneration/export_to_h5.py
|
ual/DOE-repo-deliverable
|
4bafdd9a702a9a6466dd32ae62f440644d735d3c
|
[
"BSD-3-Clause"
] | null | null | null |
bayarea_urbansim/data_regeneration/export_to_h5.py
|
ual/DOE-repo-deliverable
|
4bafdd9a702a9a6466dd32ae62f440644d735d3c
|
[
"BSD-3-Clause"
] | null | null | null |
bayarea_urbansim/data_regeneration/export_to_h5.py
|
ual/DOE-repo-deliverable
|
4bafdd9a702a9a6466dd32ae62f440644d735d3c
|
[
"BSD-3-Clause"
] | null | null | null |
import pandas as pd
from spandex import TableLoader
import pandas.io.sql as sql
loader = TableLoader()
def db_to_df(query):
"""Executes SQL query and returns DataFrame."""
conn = loader.database._connection
return sql.read_frame(query, conn)
## Export to HDF5- get path to output file
h5_path = loader.get_path('out/regeneration/summaries/bayarea_v3.h5') ## Path to the output file
#Buildings
buildings = db_to_df('select * from building').set_index('building_id')
if 'id' in buildings.columns:
del buildings['id']
buildings['building_type_id'] = 0
buildings.building_type_id[buildings.development_type_id == 1] = 1
buildings.building_type_id[buildings.development_type_id == 2] = 3
buildings.building_type_id[buildings.development_type_id == 5] = 12
buildings.building_type_id[buildings.development_type_id == 7] = 10
buildings.building_type_id[buildings.development_type_id == 9] = 5
buildings.building_type_id[buildings.development_type_id == 10] = 4
buildings.building_type_id[buildings.development_type_id == 13] = 8
buildings.building_type_id[buildings.development_type_id == 14] = 7
buildings.building_type_id[buildings.development_type_id == 15] = 9
buildings.building_type_id[buildings.development_type_id == 13] = 8
buildings.building_type_id[buildings.development_type_id == 17] = 6
buildings.building_type_id[buildings.development_type_id == 24] = 16
#Parcels
parcels = db_to_df('select * from parcel').set_index('parcel_id')
parcels['shape_area'] = parcels.acres * 4046.86
if 'id' in parcels.columns:
del parcels['id']
if 'geom' in parcels.columns:
del parcels['geom']
if 'centroid' in parcels.columns:
del parcels['centroid']
#Jobs
jobs = db_to_df('select * from jobs').set_index('job_id')
if 'id' in jobs.columns:
del jobs['id']
#Households
hh = db_to_df('select * from households').set_index('household_id')
if 'id' in hh.columns:
del hh['id']
hh = hh.rename(columns = {'hinc':'income'})
for col in hh.columns:
hh[col] = hh[col].astype('int32')
#Zones
zones_path = loader.get_path('juris/reg/zones/zones.csv')
zones = pd.read_csv(zones_path).set_index('zone_id')
#Putting tables in the HDF5 file
store = pd.HDFStore(h5_path)
store['parcels'] = parcels # http://urbansim.org/Documentation/Parcel/ParcelTable
store['buildings'] = buildings # http://urbansim.org/Documentation/Parcel/BuildingsTable
store['households'] = hh # http://urbansim.org/Documentation/Parcel/HouseholdsTable
store['jobs'] = jobs # http://urbansim.org/Documentation/Parcel/JobsTable
store['zones'] = zones # http://urbansim.org/Documentation/Parcel/ZonesTable
store.close()
| 39.029851
| 97
| 0.757553
| 385
| 2,615
| 4.935065
| 0.280519
| 0.078947
| 0.143684
| 0.157368
| 0.476842
| 0.312632
| 0.312632
| 0.312632
| 0.106316
| 0.106316
| 0
| 0.021048
| 0.109751
| 2,615
| 67
| 98
| 39.029851
| 0.795103
| 0.169025
| 0
| 0.04
| 0
| 0
| 0.143918
| 0.030176
| 0
| 0
| 0
| 0
| 0
| 1
| 0.02
| false
| 0
| 0.06
| 0
| 0.1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
552b355ab9a4608d3f4dc4d7df2c3b24e79e210d
| 7,060
|
py
|
Python
|
minder_utils/visualisation/feature_engineering.py
|
alexcapstick/minder_utils
|
3bb9380b7796b5dd5b995ce1839ea6a94321021d
|
[
"MIT"
] | null | null | null |
minder_utils/visualisation/feature_engineering.py
|
alexcapstick/minder_utils
|
3bb9380b7796b5dd5b995ce1839ea6a94321021d
|
[
"MIT"
] | null | null | null |
minder_utils/visualisation/feature_engineering.py
|
alexcapstick/minder_utils
|
3bb9380b7796b5dd5b995ce1839ea6a94321021d
|
[
"MIT"
] | 1
|
2022-03-16T11:10:43.000Z
|
2022-03-16T11:10:43.000Z
|
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import seaborn as sns
import pandas as pd
from minder_utils.formatting.label import label_by_week, label_dataframe
from minder_utils.feature_engineering import Feature_engineer
from minder_utils.feature_engineering.calculation import *
from minder_utils.util import formatting_plots
from minder_utils.formatting import Formatting
fe = Feature_engineer(Formatting())
sns.set()
att = 'bathroom_night'
figure_title = {
'bathroom_night': 'Bathroom activity during the night',
'bathroom_daytime': 'Bathroom activity during the day',
}
patient_id = ''
def process_dataframe(df, week_shift=0):
df = df[df.id == patient_id]
map_dict = {i: j - week_shift for j, i in enumerate(df.week.sort_values().unique())}
df.week = df.week.map(map_dict)
return df
def visualise_flags(df):
for v in [True, False]:
data = df[df.valid == v]
not_labelled = True
for week in data.week.unique():
if v is True:
plt.axvline(week, 0, 0.17, color='red', label='UTI' if not_labelled else None)
not_labelled = False
elif v is False:
plt.axvline(week, 0, 0.17, color='blue', label='not UTI' if not_labelled else None)
not_labelled = False
@formatting_plots(figure_title[att])
def visualise_weekly_data(df):
df = process_dataframe(df)
sns.violinplot(data=df, x='week', y='value')
visualise_flags(df)
return df
@formatting_plots('P value, ' + figure_title[att])
def visualise_weekly_statistical_analysis(df, results):
df = process_dataframe(df, 1)
visualise_flags(df)
data = results[patient_id]
df = {'week': [], 'p_value': []}
for idx, sta in enumerate(data):
df['week'].append(idx + 1)
df['p_value'].append(sta[1])
sns.lineplot(df['week'], df['p_value'])
@formatting_plots('Body temperature')
def visualise_body_temperature(df):
df = process_dataframe(df)
visualise_flags(df)
sns.lineplot(df.week, df.value)
def visualise_data_time_lineplot(time_array, values_array, name, fill_either_side_array=None, fig = None, ax = None):
'''
This function accepts a dataframe that has a ```'time'``` column and
and a ```'value'``` column.
'''
if ax is None:
fig, ax = plt.subplots(1,1,figsize = (10,6))
ax.plot(time_array, values_array)
if not fill_either_side_array is None:
ax.fill_between(time_array,
y1=values_array-fill_either_side_array,
y2=values_array+fill_either_side_array,
alpha = 0.3)
return fig, ax
def visualise_data_time_heatmap(data_plot, name, fig = None, ax = None):
'''
This function accepts a dataframe in which the columns are the days and
the rows are the aggregated times of the day.
'''
if ax is None:
fig, axes = plt.subplots(1,1,figsize = (10,6))
ax = sns.heatmap(data_plot.values, cmap = 'Blues', cbar_kws={'label': name})
ax.invert_yaxis()
x_tick_loc = np.arange(0, data_plot.shape[1], 90)
ax.set_xticks(x_tick_loc + 0.5)
ax.set_xticklabels(data_plot.columns.astype(str)[x_tick_loc].values)
y_tick_loc = np.arange(0, data_plot.shape[0], 3)
ax.set_yticks(y_tick_loc + 0.5)
ax.set_yticklabels([pd.to_datetime(time).strftime("%H:%M") for time in data_plot.index.values[y_tick_loc]], rotation = 0)
ax.set_xlabel('Day')
ax.set_ylabel('Time of Day')
return fig, ax
def visualise_activity_daily_data(fe):
'''
Arguments
---------
- fe: class:
The feature engineering class that produces the data.
'''
activity_daily = fe.activity_specific_agg(agg='daily', load_smaller_aggs = True)
activity_daily = label_dataframe(activity_daily, days_either_side=0)
activity_daily=activity_daily.rename(columns = {'valid':'UTI Label'})
activity_daily['Feature'] = activity_daily['location'].map(fe.info)
sns.set_theme('talk')
fig_list = []
axes_list = []
for feature in activity_daily['location'].unique():
data_plot = activity_daily[activity_daily['location'].isin([feature])]
fig, ax = plt.subplots(1,1,figsize = (8,6))
ax = sns.boxplot(data=data_plot, x='value', y = 'Feature', hue='UTI Label', ax=ax, **{'showfliers':False})
ax.set_ylabel(None)
ax.set_yticks([])
ax.set_title('{}'.format(fe.info[feature]))
ax.set_xlabel('Value')
fig_list.append(fig)
axes_list.append(ax)
return fig_list, axes_list
def visualise_activity_weekly_data(fe):
'''
Arguments
---------
- fe: class:
The feature engineering class that produces the data.
'''
activity_weekly = fe.activity_specific_agg(agg='weekly', load_smaller_aggs = True)
activity_weekly = label_by_week(activity_weekly)
activity_weekly=activity_weekly.rename(columns = {'valid':'UTI Label'})
activity_weekly['Feature'] = activity_weekly['location'].map(fe.info)
sns.set_theme('talk')
fig_list = []
axes_list = []
for feature in activity_weekly['location'].unique():
data_plot = activity_weekly[activity_weekly['location'].isin([feature])]
fig, ax = plt.subplots(1,1,figsize = (8,6))
ax = sns.boxplot(data=data_plot, x='value', y = 'Feature', hue='UTI Label', ax=ax, **{'showfliers':False})
ax.set_ylabel(None)
ax.set_yticks([])
ax.set_title('{}'.format(fe.info[feature]))
ax.set_xlabel('Value')
fig_list.append(fig)
axes_list.append(ax)
return fig_list, axes_list
def visualise_activity_evently_data(fe):
'''
Arguments
---------
- fe: class:
The feature engineering class that produces the data.
'''
activity_evently = fe.activity_specific_agg(agg='evently', load_smaller_aggs = True)
activity_evently = label_dataframe(activity_evently, days_either_side=0)
activity_evently=activity_evently.rename(columns = {'valid':'UTI Label'})
activity_evently['Feature'] = activity_evently['location'].map(fe.info)
sns.set_theme('talk')
fig_list = []
axes_list = []
for feature in activity_evently['location'].unique():
data_plot = activity_evently[activity_evently['location'].isin([feature])]
fig, ax = plt.subplots(1,1,figsize = (8,6))
ax = sns.boxplot(data=data_plot, x='value', y = 'Feature', hue='UTI Label', ax=ax, **{'showfliers':False})
ax.set_ylabel(None)
ax.set_yticks([])
ax.set_title('{}'.format(fe.info[feature]))
ax.set_xlabel('Value')
fig_list.append(fig)
axes_list.append(ax)
return fig_list, axes_list
if __name__ == '__main__':
results = weekly_compare(getattr(fe, att), kolmogorov_smirnov)
df = label_by_week(getattr(fe, att))
visualise_weekly_data(df)
visualise_weekly_statistical_analysis(df)
visualise_body_temperature(label_by_week(fe.body_temperature))
| 28.699187
| 125
| 0.657507
| 973
| 7,060
| 4.538541
| 0.18705
| 0.02038
| 0.014946
| 0.02038
| 0.525362
| 0.397871
| 0.340353
| 0.328804
| 0.304348
| 0.26721
| 0
| 0.009331
| 0.210623
| 7,060
| 245
| 126
| 28.816327
| 0.783061
| 0.069688
| 0
| 0.347518
| 0
| 0
| 0.079168
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.070922
| false
| 0
| 0.06383
| 0
| 0.184397
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
552c410668701cd1585658195d593e1b5751e350
| 442
|
py
|
Python
|
code-everyday-challenge/n159_cyclically_rotate.py
|
ved93/deliberate-practice-challenges
|
2fccdbb9d2baaa16f888055c081a8d04804c0045
|
[
"MIT"
] | null | null | null |
code-everyday-challenge/n159_cyclically_rotate.py
|
ved93/deliberate-practice-challenges
|
2fccdbb9d2baaa16f888055c081a8d04804c0045
|
[
"MIT"
] | null | null | null |
code-everyday-challenge/n159_cyclically_rotate.py
|
ved93/deliberate-practice-challenges
|
2fccdbb9d2baaa16f888055c081a8d04804c0045
|
[
"MIT"
] | null | null | null |
# https://practice.geeksforgeeks.org/problems/cyclically-rotate-an-array-by-one2614/1
# Given an array, rotate the array by one position in clock-wise direction.
# Input:
# N = 5
# A[] = {1, 2, 3, 4, 5}
# Output:
# 5 1 2 3 4
def rotate_cycle(a):
n = len(a)
tmp = a[-1]
for i in range(1,n):
a[-i] = a[-i-1]
a[0] = tmp
return a
if __name__ == "__main__":
a = [1, 2, 3,4,5]
print(rotate_cycle(a))
| 17.68
| 85
| 0.567873
| 79
| 442
| 3.050633
| 0.518987
| 0.024896
| 0.037344
| 0.049793
| 0.049793
| 0.049793
| 0
| 0
| 0
| 0
| 0
| 0.076453
| 0.260181
| 442
| 25
| 86
| 17.68
| 0.66055
| 0.475113
| 0
| 0
| 0
| 0
| 0.035714
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.1
| false
| 0
| 0
| 0
| 0.2
| 0.1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
552d7c8af23d30920337cc95fa4d7065705c0c5f
| 10,800
|
py
|
Python
|
adamw_optimizer.py
|
pwldj/Bio_XLNet_CRF
|
536053e9d74abdb2ee56000a8a779ffc1c0dd0fc
|
[
"Apache-2.0"
] | null | null | null |
adamw_optimizer.py
|
pwldj/Bio_XLNet_CRF
|
536053e9d74abdb2ee56000a8a779ffc1c0dd0fc
|
[
"Apache-2.0"
] | 2
|
2022-03-07T07:27:13.000Z
|
2022-03-07T07:27:15.000Z
|
adamw_optimizer.py
|
pwldj/MTL-BioNER
|
3fb336f517346daeec6a716fa6a657a421754bdb
|
[
"Apache-2.0"
] | 1
|
2021-05-05T08:42:53.000Z
|
2021-05-05T08:42:53.000Z
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Adamw for TensorFlow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
import tensorflow as tf
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.training import optimizer
class AdamOptimizer(optimizer.Optimizer):
def __init__(self,
learning_rate=0.001,
weight_decay_rate=0.0,
beta1=0.9,
beta2=0.999,
epsilon=1e-8,
exclude_from_weight_decay=None,
include_in_weight_decay=None,
use_locking=False,
name="Adamw"):
"""
This is a multi Gpu version of adamw.
:param learning_rate:
:param weight_decay_rate:
:param beta1:
:param beta2:
:param epsilon:
:param exclude_from_weight_decay:
:param include_in_weight_decay:
:param use_locking:
:param name:
"""
super(AdamOptimizer, self).__init__(use_locking, name)
self._lr = learning_rate
self._beta1 = beta1
self._beta2 = beta2
self._epsilon = epsilon
self._weight_decay_rate = weight_decay_rate
self._exclude_from_weight_decay = exclude_from_weight_decay
self._include_in_weight_decay = include_in_weight_decay
# Tensor versions of the constructor arguments, created in _prepare().
self._lr_t = None
self._weight_decay_rate_t = None
self._beta1_t = None
self._beta2_t = None
self._epsilon_t = None
def _get_beta_accumulators(self):
with ops.init_scope():
if context.executing_eagerly():
graph = None
else:
graph = ops.get_default_graph()
return (self._get_non_slot_variable("beta1_power", graph=graph),
self._get_non_slot_variable("beta2_power", graph=graph))
def _create_slots(self, var_list):
# Create the beta1 and beta2 accumulators on the same device as the first
# variable. Sort the var_list to make sure this device is consistent across
# workers (these need to go on the same PS, otherwise some updates are
# silently ignored).
first_var = min(var_list, key=lambda x: x.name)
self._create_non_slot_variable(
initial_value=self._beta1, name="beta1_power", colocate_with=first_var)
self._create_non_slot_variable(
initial_value=self._beta2, name="beta2_power", colocate_with=first_var)
# Create slots for the first and second moments.
for v in var_list:
self._zeros_slot(v, "adam_m", self._name)
self._zeros_slot(v, "adam_v", self._name)
def _prepare(self):
lr = self._call_if_callable(self._lr)
beta1 = self._call_if_callable(self._beta1)
beta2 = self._call_if_callable(self._beta2)
weight_decay_rate = self._call_if_callable(self._weight_decay_rate)
epsilon = self._call_if_callable(self._epsilon)
self._lr_t = ops.convert_to_tensor(lr, name="learning_rate")
self._beta1_t = ops.convert_to_tensor(beta1, name="beta1")
self._beta2_t = ops.convert_to_tensor(beta2, name="beta2")
self._weight_decay_rate_t = ops.convert_to_tensor(
weight_decay_rate, name="weight_decay_rate")
self._epsilon_t = ops.convert_to_tensor(epsilon, name="epsilon")
def _apply_dense(self, grad, var):
lr_t = math_ops.cast(self._lr_t, var.dtype.base_dtype)
beta1_t = math_ops.cast(self._beta1_t, var.dtype.base_dtype)
beta2_t = math_ops.cast(self._beta2_t, var.dtype.base_dtype)
weight_decay_rate = math_ops.cast(
self._weight_decay_rate_t, var.dtype.base_dtype)
epsilon_t = math_ops.cast(self._epsilon_t, var.dtype.base_dtype)
beta1_power, beta2_power = self._get_beta_accumulators()
beta1_power = math_ops.cast(beta1_power, var.dtype.base_dtype)
beta2_power = math_ops.cast(beta2_power, var.dtype.base_dtype)
lr = (lr_t * math_ops.sqrt(1 - beta2_power) / (1 - beta1_power))
m = self.get_slot(var, "adam_m")
v = self.get_slot(var, "adam_v")
m_t = (tf.multiply(beta1_t, m) + tf.multiply(1.0 - beta1_t, grad))
m_t = m.assign(m_t, use_locking=self._use_locking)
v_t = (tf.multiply(beta2_t, v) + tf.multiply(1.0 - beta2_t, tf.square(grad)))
v_t = v.assign(v_t, use_locking=self._use_locking)
m_t_hat = m_t / (1. - beta1_power)
v_t_hat = v_t / (1. - beta2_power)
update = m_t_hat / (tf.sqrt(v_t_hat) + epsilon_t)
if self._do_use_weight_decay(var.name):
update += weight_decay_rate * var
var_update = var - lr * update
var_update = var.assign(var_update, use_locking=self._use_locking)
return tf.group(*[var_update, m_t, v_t])
def _resource_apply_dense(self, grad, var):
lr_t = math_ops.cast(self._lr_t, var.dtype.base_dtype)
beta1_t = math_ops.cast(self._beta1_t, var.dtype.base_dtype)
beta2_t = math_ops.cast(self._beta2_t, var.dtype.base_dtype)
weight_decay_rate = math_ops.cast(
self._weight_decay_rate_t, var.dtype.base_dtype)
epsilon_t = math_ops.cast(self._epsilon_t, var.dtype.base_dtype)
beta1_power, beta2_power = self._get_beta_accumulators()
beta1_power = math_ops.cast(beta1_power, var.dtype.base_dtype)
beta2_power = math_ops.cast(beta2_power, var.dtype.base_dtype)
lr = (lr_t * math_ops.sqrt(1 - beta2_power) / (1 - beta1_power))
m = self.get_slot(var, "adam_m")
v = self.get_slot(var, "adam_v")
m_t = (tf.multiply(beta1_t, m) + tf.multiply(1.0 - beta1_t, grad))
m_t = m.assign(m_t, use_locking=self._use_locking)
v_t = (tf.multiply(beta2_t, v) + tf.multiply(1.0 - beta2_t, tf.square(grad)))
v_t = v.assign(v_t, use_locking=self._use_locking)
m_t_hat = m_t / (1. - beta1_power)
v_t_hat = v_t / (1. - beta2_power)
update = m_t_hat / (tf.sqrt(v_t_hat) + epsilon_t)
if self._do_use_weight_decay(var.name):
update += weight_decay_rate * var
var_update = var - lr * update
var_update = var.assign(var_update, use_locking=self._use_locking)
return tf.group(*[var_update, m_t, v_t])
def _apply_sparse_shared(self, grad, var, indices, scatter_add):
beta1_power, beta2_power = self._get_beta_accumulators()
beta1_power = math_ops.cast(beta1_power, var.dtype.base_dtype)
beta2_power = math_ops.cast(beta2_power, var.dtype.base_dtype)
lr_t = math_ops.cast(self._lr_t, var.dtype.base_dtype)
beta1_t = math_ops.cast(self._beta1_t, var.dtype.base_dtype)
beta2_t = math_ops.cast(self._beta2_t, var.dtype.base_dtype)
epsilon_t = math_ops.cast(self._epsilon_t, var.dtype.base_dtype)
lr = (lr_t * math_ops.sqrt(1 - beta2_power) / (1 - beta1_power))
# m_t = beta1 * m + (1 - beta1) * g_t
m = self.get_slot(var, "adam_m")
m_scaled_g_values = grad * (1 - beta1_t)
m_t = state_ops.assign(m, m * beta1_t, use_locking=self._use_locking)
with ops.control_dependencies([m_t]):
m_t = scatter_add(m, indices, m_scaled_g_values)
# v_t = beta2 * v + (1 - beta2) * (g_t * g_t)
v = self.get_slot(var, "adam_v")
v_scaled_g_values = (grad * grad) * (1 - beta2_t)
v_t = state_ops.assign(v, v * beta2_t, use_locking=self._use_locking)
with ops.control_dependencies([v_t]):
v_t = scatter_add(v, indices, v_scaled_g_values)
v_sqrt = math_ops.sqrt(v_t)
var_update = state_ops.assign_sub(
var, lr * m_t / (v_sqrt + epsilon_t), use_locking=self._use_locking)
return control_flow_ops.group(*[var_update, m_t, v_t])
def _apply_sparse(self, grad, var):
return self._apply_sparse_shared(
grad.values,
var,
grad.indices,
lambda x, i, v: state_ops.scatter_add( # pylint: disable=g-long-lambda
x,
i,
v,
use_locking=self._use_locking))
def _resource_scatter_add(self, x, i, v):
with ops.control_dependencies(
[resource_variable_ops.resource_scatter_add(x.handle, i, v)]):
return x.value()
def _resource_apply_sparse(self, grad, var, indices):
return self._apply_sparse_shared(grad, var, indices,
self._resource_scatter_add)
def _finish(self, update_ops, name_scope):
# Update the power accumulators.
with ops.control_dependencies(update_ops):
beta1_power, beta2_power = self._get_beta_accumulators()
with ops.colocate_with(beta1_power):
update_beta1 = beta1_power.assign(
beta1_power * self._beta1_t, use_locking=self._use_locking)
update_beta2 = beta2_power.assign(
beta2_power * self._beta2_t, use_locking=self._use_locking)
return control_flow_ops.group(
*update_ops + [update_beta1, update_beta2], name=name_scope)
def _do_use_weight_decay(self, param_name):
"""Whether to use L2 weight decay for `param_name`."""
if not self._weight_decay_rate:
return False
# for r in self._include_in_weight_decay:
# if re.search(r, param_name) is not None:
# return True
if self._exclude_from_weight_decay:
for r in self._exclude_from_weight_decay:
if re.search(r, param_name) is not None:
tf.logging.info('Adam WD excludes {}'.format(param_name))
return False
return True
| 44.444444
| 85
| 0.64463
| 1,542
| 10,800
| 4.148508
| 0.141375
| 0.055026
| 0.034391
| 0.05315
| 0.518212
| 0.434579
| 0.404877
| 0.39159
| 0.372049
| 0.372049
| 0
| 0.017616
| 0.253611
| 10,800
| 242
| 86
| 44.628099
| 0.775958
| 0.144167
| 0
| 0.362069
| 0
| 0
| 0.01789
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.068966
| false
| 0
| 0.068966
| 0.011494
| 0.206897
| 0.005747
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
552fdd4ea7856ad8f238ffba4056d7b666e1d19e
| 1,559
|
py
|
Python
|
backend/breach/helpers/injector.py
|
Cancelll/rupture
|
cd87481717b39de2654659b7ff436500e28a0600
|
[
"MIT"
] | 184
|
2016-03-31T04:19:42.000Z
|
2021-11-26T21:37:12.000Z
|
backend/breach/helpers/injector.py
|
Cancelll/rupture
|
cd87481717b39de2654659b7ff436500e28a0600
|
[
"MIT"
] | 212
|
2016-03-31T04:32:06.000Z
|
2017-02-26T09:34:47.000Z
|
backend/breach/helpers/injector.py
|
Cancelll/rupture
|
cd87481717b39de2654659b7ff436500e28a0600
|
[
"MIT"
] | 38
|
2016-03-31T09:09:44.000Z
|
2021-11-26T21:37:13.000Z
|
from backend.settings import BASE_DIR
import os
import subprocess
import stat
rupture_dir = os.path.abspath(os.path.join(BASE_DIR, os.pardir))
client_dir = os.path.join(rupture_dir, 'client')
def inject(victim):
_create_client(victim)
_create_injection(victim)
_run_injection(victim)
def _create_client(victim):
realtimeurl = victim.realtimeurl
victimid = victim.id
with open(os.devnull, 'w') as FNULL:
p = subprocess.Popen(
[os.path.join(client_dir, 'build.sh'), str(realtimeurl), str(victimid)],
cwd=client_dir,
stdout=FNULL,
stderr=subprocess.PIPE
)
return p.wait()
def _create_injection(victim):
sourceip = victim.sourceip
victimid = victim.id
with open(os.path.join(client_dir, 'inject.sh'), 'r') as f:
injection = f.read()
injection = injection.replace('$1', str(sourceip))
inject_file = os.path.join(client_dir, 'client_{}/inject.sh'.format(victimid))
with open(inject_file, 'w') as f:
f.write(injection)
clientid_inject = inject_file
st = os.stat(clientid_inject)
os.chmod(clientid_inject, st.st_mode | stat.S_IEXEC)
def _run_injection(victim):
victimid = victim.id
clientid_dir = os.path.join(client_dir, 'client_{}'.format(victimid))
with open(os.devnull, 'w') as FNULL:
subprocess.Popen(
os.path.join(clientid_dir, 'inject.sh'),
shell=True,
cwd=client_dir,
stdout=FNULL,
stderr=subprocess.PIPE
)
| 25.557377
| 84
| 0.645285
| 201
| 1,559
| 4.830846
| 0.288557
| 0.049434
| 0.072091
| 0.065911
| 0.313079
| 0.234809
| 0.140062
| 0.088568
| 0
| 0
| 0
| 0.000837
| 0.233483
| 1,559
| 60
| 85
| 25.983333
| 0.811715
| 0
| 0
| 0.25
| 0
| 0
| 0.042335
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.090909
| false
| 0
| 0.090909
| 0
| 0.204545
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5530fb74fc5655f0d169fed9774ccb03f4699d79
| 952
|
py
|
Python
|
wagtail_client/utils.py
|
girleffect/core-integration-demo
|
c37a0d5183d16bec6245a41e12dd90691ffa7138
|
[
"BSD-3-Clause"
] | null | null | null |
wagtail_client/utils.py
|
girleffect/core-integration-demo
|
c37a0d5183d16bec6245a41e12dd90691ffa7138
|
[
"BSD-3-Clause"
] | 19
|
2018-02-06T08:56:24.000Z
|
2018-09-11T08:05:24.000Z
|
wagtail_client/utils.py
|
girleffect/core-integration-demo
|
c37a0d5183d16bec6245a41e12dd90691ffa7138
|
[
"BSD-3-Clause"
] | 2
|
2018-05-25T09:44:03.000Z
|
2021-08-18T12:07:47.000Z
|
from urllib.parse import urlencode
from django.conf import settings
from django.contrib.sites.shortcuts import get_current_site
def provider_logout_url(request):
"""
This function is used to construct a logout URL that can be used to log the user out of
the Identity Provider (Authentication Service).
:param request:
:return:
"""
site = get_current_site(request)
if not hasattr(site, "oidcsettings"):
raise RuntimeError(f"Site {site} has no settings configured.")
parameters = {
"post_logout_redirect_uri": site.oidcsettings.wagtail_redirect_url
}
# The OIDC_STORE_ID_TOKEN setting must be set to true if we want to be able to read
# it from the session.
if "oidc_id_token" in request.session:
parameters["id_token_hint"] = request.session["oidc_id_token"]
redirect_url = settings.OIDC_OP_LOGOUT_URL + "?" + urlencode(parameters, doseq=True)
return redirect_url
| 34
| 91
| 0.722689
| 133
| 952
| 4.984962
| 0.541353
| 0.042232
| 0.042232
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.20063
| 952
| 27
| 92
| 35.259259
| 0.871222
| 0.277311
| 0
| 0
| 0
| 0
| 0.174507
| 0.036419
| 0
| 0
| 0
| 0
| 0
| 1
| 0.071429
| false
| 0
| 0.214286
| 0
| 0.357143
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
553261313f73826b4fd76c66eae4be0cde9803af
| 978
|
py
|
Python
|
connectToProteusFromMongo.py
|
erentts/Ignite-Greenhouse
|
328730399328936332b5c6f3f8dcd18bf56369b9
|
[
"MIT"
] | 4
|
2021-02-22T21:19:28.000Z
|
2021-05-03T14:19:18.000Z
|
connectToProteusFromMongo.py
|
erentts/Ignite-Greenhouse
|
328730399328936332b5c6f3f8dcd18bf56369b9
|
[
"MIT"
] | null | null | null |
connectToProteusFromMongo.py
|
erentts/Ignite-Greenhouse
|
328730399328936332b5c6f3f8dcd18bf56369b9
|
[
"MIT"
] | null | null | null |
import pymongo
import dns
import serial
from pymongo import MongoClient
import struct
cluster = MongoClient("")
serialPort = serial.Serial(port= "COM1", baudrate=9600 ,bytesize =8 , timeout =None, parity='N',stopbits=1)
db=cluster["<greenHouse>"]
collection = db["greenhouses"]
while serialPort.readline():
results = collection.find({"greenHouseName" : "SERA 1" })
for result in results:
targetTemperature = abs(int(result.get("targetTemperature")))
# declaring an integer value
int_val = targetTemperature
# converting to string
str_val = str(targetTemperature)
# converting string to bytes
byte_val = str_val.encode()
serialPort.write(byte_val)
getterThree = collection.update_one({"greenHouseName" : "SERA 1"},{"$set":{"targetTemperature" : targetTemperature }})
getter = collection.update_one({"greenHouseName" : "SERA 1"},{"$set":{"currentTemperature" : float(serialPort.read() + serialPort.read()) }})
| 31.548387
| 145
| 0.702454
| 105
| 978
| 6.47619
| 0.561905
| 0.079412
| 0.083824
| 0.097059
| 0.120588
| 0.120588
| 0.120588
| 0
| 0
| 0
| 0
| 0.012225
| 0.163599
| 978
| 30
| 146
| 32.6
| 0.819071
| 0.07771
| 0
| 0
| 0
| 0
| 0.164994
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.263158
| 0
| 0.263158
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5539d275ebd36d43b5d44642306d4d9d488a83a3
| 961
|
py
|
Python
|
s3_file_uploads/serializers.py
|
dabapps/django-s3-file-uploads
|
17ed6b4e02bd43bc925af987ff5bf971a82da434
|
[
"BSD-3-Clause"
] | 5
|
2019-05-27T03:51:30.000Z
|
2021-03-19T11:24:09.000Z
|
s3_file_uploads/serializers.py
|
dabapps/django-s3-file-uploads
|
17ed6b4e02bd43bc925af987ff5bf971a82da434
|
[
"BSD-3-Clause"
] | 7
|
2019-12-04T22:38:13.000Z
|
2021-06-10T17:50:06.000Z
|
s3_file_uploads/serializers.py
|
dabapps/django-s3-file-uploads
|
17ed6b4e02bd43bc925af987ff5bf971a82da434
|
[
"BSD-3-Clause"
] | null | null | null |
from rest_framework import serializers
from s3_file_uploads.constants import ACCESS_CONTROL_TYPES, PRIVATE
from s3_file_uploads.models import UploadedFile
class UploadedFileSerializer(serializers.ModelSerializer):
file_name = serializers.CharField(source='file.name', read_only=True)
file = serializers.URLField(source='get_download_url', read_only=True)
class Meta:
model = UploadedFile
fields = [
'id',
'created',
'modified',
'file_key',
'file',
'filename',
'file_name',
'file_path',
'user',
]
read_only_fields = [
'id',
'modfied',
'created',
'file_name',
'file_path',
'file_key'
]
class AccessControlListSerializer(serializers.Serializer):
acl = serializers.ChoiceField(choices=ACCESS_CONTROL_TYPES, default=PRIVATE)
| 27.457143
| 80
| 0.597294
| 88
| 961
| 6.272727
| 0.511364
| 0.057971
| 0.036232
| 0.061594
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.003017
| 0.310094
| 961
| 34
| 81
| 28.264706
| 0.829563
| 0
| 0
| 0.275862
| 0
| 0
| 0.131113
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.103448
| 0
| 0.310345
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
553a35ee3c9965503e444537543d6f056c2747c7
| 1,873
|
py
|
Python
|
vbts_webadmin/views/subscribers.py
|
pcarivbts/vbts-webadmin
|
0616eca6492daa3ebc26b442e8dbebda7ac06d51
|
[
"BSD-3-Clause"
] | null | null | null |
vbts_webadmin/views/subscribers.py
|
pcarivbts/vbts-webadmin
|
0616eca6492daa3ebc26b442e8dbebda7ac06d51
|
[
"BSD-3-Clause"
] | 3
|
2020-06-05T18:34:16.000Z
|
2021-06-10T20:31:18.000Z
|
vbts_webadmin/views/subscribers.py
|
pcarivbts/vbts-webadmin
|
0616eca6492daa3ebc26b442e8dbebda7ac06d51
|
[
"BSD-3-Clause"
] | 2
|
2018-07-04T00:54:50.000Z
|
2022-01-28T16:52:10.000Z
|
"""
Copyright (c) 2015-present, Philippine-California Advanced Research Institutes-
The Village Base Station Project (PCARI-VBTS). All rights reserved.
This source code is licensed under the BSD-style license found in the
LICENSE file in the root directory of this source tree.
"""
from django.contrib import messages as alerts
from django.contrib.auth.decorators import login_required
from django.core.paginator import Paginator
from django.core.paginator import EmptyPage
from django.core.paginator import PageNotAnInteger
from django.db.models import Q
from django.shortcuts import render
from django.utils.translation import ugettext as _
from vbts_subscribers.models import SipBuddies
from vbts_webadmin.forms import SearchForm
@login_required
def subscribers_list(request, template_name='subscribers/list.html'):
data = {}
if 'search' in request.GET:
subscribers = SipBuddies.objects.all()
for term in request.GET['search'].split():
subscribers = subscribers.filter(Q(name__icontains=term) |
Q(callerid__icontains=term))
data['search'] = True
alerts.info(request,
_("You've searched for: '%s'") % request.GET['search'])
else:
subscribers = SipBuddies.objects.all()
paginator = Paginator(subscribers, 15)
page = request.GET.get('page')
is_paginated = False
if paginator.num_pages > 1:
is_paginated = True
try:
subscribers = paginator.page(page)
except PageNotAnInteger:
subscribers = paginator.page(1)
except EmptyPage:
subscribers = paginator.page(paginator.num_pages)
form = SearchForm(form_action='subscribers')
data['subscribers'] = subscribers
data['is_paginated'] = is_paginated
data['form'] = form
return render(request, template_name, data)
| 33.446429
| 79
| 0.705286
| 224
| 1,873
| 5.808036
| 0.441964
| 0.061491
| 0.032283
| 0.053036
| 0.066872
| 0
| 0
| 0
| 0
| 0
| 0
| 0.005384
| 0.20662
| 1,873
| 55
| 80
| 34.054545
| 0.870121
| 0.146289
| 0
| 0.051282
| 0
| 0
| 0.070396
| 0.013199
| 0
| 0
| 0
| 0
| 0
| 1
| 0.025641
| false
| 0
| 0.25641
| 0
| 0.307692
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
553e5975ce3bca9dd2037d832b61d89b76e372a6
| 16,307
|
py
|
Python
|
examples/vq_rnn_fruit_joint/vq_fruit_joint.py
|
kastnerkyle/tfbldr
|
58ad1437d500924acd15d1c6eec4a864f57e9c7c
|
[
"BSD-3-Clause"
] | 4
|
2018-05-15T22:35:00.000Z
|
2019-02-22T01:40:49.000Z
|
examples/vq_rnn_fruit_joint/vq_fruit_joint.py
|
kastnerkyle/tfbldr
|
58ad1437d500924acd15d1c6eec4a864f57e9c7c
|
[
"BSD-3-Clause"
] | null | null | null |
examples/vq_rnn_fruit_joint/vq_fruit_joint.py
|
kastnerkyle/tfbldr
|
58ad1437d500924acd15d1c6eec4a864f57e9c7c
|
[
"BSD-3-Clause"
] | 2
|
2018-06-09T15:08:44.000Z
|
2018-11-20T10:13:48.000Z
|
from tfbldr.nodes import Conv2d
from tfbldr.nodes import ConvTranspose2d
from tfbldr.nodes import VqEmbedding
from tfbldr.nodes import BatchNorm2d
from tfbldr.nodes import Linear
from tfbldr.nodes import ReLU
from tfbldr.nodes import Sigmoid
from tfbldr.nodes import Tanh
from tfbldr.nodes import OneHot
from tfbldr.nodes import Softmax
from tfbldr.nodes import LSTMCell
from tfbldr.nodes import CategoricalCrossEntropyIndexCost
from tfbldr.nodes import CategoricalCrossEntropyLinearIndexCost
from tfbldr.nodes import BernoulliCrossEntropyCost
from tfbldr.datasets import ordered_list_iterator
from tfbldr.plot import get_viridis
from tfbldr.plot import autoaspect
from tfbldr.datasets import fetch_fruitspeech
from tfbldr import get_params_dict
from tfbldr import run_loop
from tfbldr import scan
import tensorflow as tf
import numpy as np
from collections import namedtuple, defaultdict
import itertools
viridis_cm = get_viridis()
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
fruit = fetch_fruitspeech()
minmin = np.inf
maxmax = -np.inf
for s in fruit["data"]:
si = s - s.mean()
minmin = min(minmin, si.min())
maxmax = max(maxmax, si.max())
train_data = []
valid_data = []
type_counts = defaultdict(lambda: 0)
final_audio = []
for n, s in enumerate(fruit["data"]):
type_counts[fruit["target"][n]] += 1
s = s - s.mean()
n_s = (s - minmin) / float(maxmax - minmin)
n_s = 2 * n_s - 1
#n_s = mu_law_transform(n_s, 256)
if type_counts[fruit["target"][n]] == 15:
valid_data.append(n_s)
else:
train_data.append(n_s)
def _cuts(list_of_audio, cut, step):
# make many overlapping cuts
# 8k, this means offset is ~4ms @ step of 32
real_final = []
real_idx = []
for n, s in enumerate(list_of_audio):
# cut off the end
s = s[:len(s) - len(s) % step]
starts = np.arange(0, len(s) - cut + step, step)
for st in starts:
real_final.append(s[st:st + cut][None, :, None])
real_idx.append(n)
return real_final, real_idx
cut = 256
step = 1
train_audio, train_audio_idx = _cuts(train_data, cut, step)
valid_audio, valid_audio_idx = _cuts(valid_data, cut, step)
random_state = np.random.RandomState(1999)
l1_dim = (64, 1, 4, [1, 1, 2, 1])
l2_dim = (128, 1, 4, [1, 1, 2, 1])
l3_dim = (256, 1, 4, [1, 1, 2, 1])
l3_dim = (257, 1, 4, [1, 1, 2, 1])
l4_dim = (256, 1, 4, [1, 1, 2, 1])
l5_dim = (257, 1, 1, [1, 1, 1, 1])
embedding_dim = 512
vqvae_batch_size = 50
rnn_batch_size = 50
n_hid = 512
n_clusters = 64
# goes from 256 -> 16
hardcoded_z_len = 16
# reserve 0 for "start code"
n_inputs = embedding_dim + 1
switch_step = 10000
both = True
# reserve 0 for start code
rnn_init = "truncated_normal"
forward_init = "truncated_normal"
l_dims = [l1_dim, l2_dim, l3_dim, l4_dim, l5_dim]
stride_div = np.prod([ld[-1] for ld in l_dims])
ebpad = [0, 0, 4 // 2 - 1, 0]
dbpad = [0, 0, 4 // 2 - 1, 0]
train_itr_random_state = np.random.RandomState(1122)
valid_itr_random_state = np.random.RandomState(12)
train_itr = ordered_list_iterator([train_audio], train_audio_idx, vqvae_batch_size, random_state=train_itr_random_state)
valid_itr = ordered_list_iterator([valid_audio], valid_audio_idx, vqvae_batch_size, random_state=valid_itr_random_state)
"""
for i in range(10000):
tt = train_itr.next_batch()
# tt[0][3][:, :16] == tt[0][2][:, 16:32]
"""
def create_encoder(inp, bn_flag):
l1 = Conv2d([inp], [1], l_dims[0][0], kernel_size=l_dims[0][1:3], name="enc1",
strides=l_dims[0][-1],
border_mode=ebpad,
random_state=random_state)
bn_l1 = BatchNorm2d(l1, bn_flag, name="bn_enc1")
r_l1 = ReLU(bn_l1)
l2 = Conv2d([r_l1], [l_dims[0][0]], l_dims[1][0], kernel_size=l_dims[1][1:3], name="enc2",
strides=l_dims[1][-1],
border_mode=ebpad,
random_state=random_state)
bn_l2 = BatchNorm2d(l2, bn_flag, name="bn_enc2")
r_l2 = ReLU(bn_l2)
l3 = Conv2d([r_l2], [l_dims[1][0]], l_dims[2][0], kernel_size=l_dims[2][1:3], name="enc3",
strides=l_dims[2][-1],
border_mode=ebpad,
random_state=random_state)
bn_l3 = BatchNorm2d(l3, bn_flag, name="bn_enc3")
r_l3 = ReLU(bn_l3)
l4 = Conv2d([r_l3], [l_dims[2][0]], l_dims[3][0], kernel_size=l_dims[3][1:3], name="enc4",
strides=l_dims[3][-1],
border_mode=ebpad,
random_state=random_state)
bn_l4 = BatchNorm2d(l4, bn_flag, name="bn_enc4")
r_l4 = ReLU(bn_l4)
l5 = Conv2d([r_l4], [l_dims[3][0]], l_dims[4][0], kernel_size=l_dims[4][1:3], name="enc5",
random_state=random_state)
bn_l5 = BatchNorm2d(l5, bn_flag, name="bn_enc5")
return bn_l5
def create_decoder(latent, bn_flag):
l1 = Conv2d([latent], [l_dims[-1][0]], l_dims[-2][0], kernel_size=l_dims[-1][1:3], name="dec1",
random_state=random_state)
bn_l1 = BatchNorm2d(l1, bn_flag, name="bn_dec1")
r_l1 = ReLU(bn_l1)
l2 = ConvTranspose2d([r_l1], [l_dims[-2][0]], l_dims[-3][0], kernel_size=l_dims[-2][1:3], name="dec2",
strides=l_dims[-2][-1],
border_mode=dbpad,
random_state=random_state)
bn_l2 = BatchNorm2d(l2, bn_flag, name="bn_dec2")
r_l2 = ReLU(bn_l2)
l3 = ConvTranspose2d([r_l2], [l_dims[-3][0]], l_dims[-4][0], kernel_size=l_dims[-3][1:3], name="dec3",
strides=l_dims[-3][-1],
border_mode=dbpad,
random_state=random_state)
bn_l3 = BatchNorm2d(l3, bn_flag, name="bn_dec3")
r_l3 = ReLU(bn_l3)
l4 = ConvTranspose2d([r_l3], [l_dims[-4][0]], l_dims[-5][0], kernel_size=l_dims[-4][1:3], name="dec4",
strides=l_dims[-4][-1],
border_mode=dbpad,
random_state=random_state)
bn_l4 = BatchNorm2d(l4, bn_flag, name="bn_dec4")
r_l4 = ReLU(bn_l4)
l5 = ConvTranspose2d([r_l4], [l_dims[-5][0]], 1, kernel_size=l_dims[-5][1:3], name="dec5",
strides=l_dims[-5][-1],
border_mode=dbpad,
random_state=random_state)
#s_l5 = Sigmoid(l5)
t_l5 = Tanh(l5)
return t_l5
def create_vqvae(inp, bn):
z_e_x = create_encoder(inp, bn)
z_q_x, z_i_x, z_nst_q_x, emb = VqEmbedding(z_e_x, l_dims[-1][0], embedding_dim, random_state=random_state, name="embed")
x_tilde = create_decoder(z_q_x, bn)
return x_tilde, z_e_x, z_q_x, z_i_x, z_nst_q_x, emb
def create_vqrnn(inp_tm1, inp_t, h1_init, c1_init, h1_q_init, c1_q_init):
oh_tm1 = OneHot(inp_tm1, n_inputs)
p_tm1 = Linear([oh_tm1], [n_inputs], n_hid, random_state=random_state, name="proj",
init=forward_init)
def step(x_t, h1_tm1, c1_tm1, h1_q_tm1, c1_q_tm1):
output, s = LSTMCell([x_t], [n_hid], h1_tm1, c1_tm1, n_hid,
random_state=random_state,
name="rnn1", init=rnn_init)
h1_t = s[0]
c1_t = s[1]
output, s = LSTMCell([h1_t], [n_hid], h1_q_tm1, c1_q_tm1, n_hid,
random_state=random_state,
name="rnn1_q", init=rnn_init)
h1_cq_t = s[0]
c1_q_t = s[1]
h1_q_t, h1_i_t, h1_nst_q_t, h1_emb = VqEmbedding(h1_cq_t, n_hid, n_clusters,
random_state=random_state,
name="h1_vq_emb")
# not great
h1_i_t = tf.cast(h1_i_t, tf.float32)
return output, h1_t, c1_t, h1_q_t, c1_q_t, h1_nst_q_t, h1_cq_t, h1_i_t
r = scan(step, [p_tm1], [None, h1_init, c1_init, h1_q_init, c1_q_init, None, None, None])
out = r[0]
hiddens = r[1]
cells = r[2]
q_hiddens = r[3]
q_cells = r[4]
q_nst_hiddens = r[5]
q_nvq_hiddens = r[6]
i_hiddens = r[7]
pred = Linear([out], [n_hid], n_inputs, random_state=random_state, name="out",
init=forward_init)
pred_sm = Softmax(pred)
return pred_sm, pred, hiddens, cells, q_hiddens, q_cells, q_nst_hiddens, q_nvq_hiddens, i_hiddens, oh_tm1
def create_graph():
graph = tf.Graph()
with graph.as_default():
# vqvae part
# define all the vqvae inputs and outputs
vqvae_inputs = tf.placeholder(tf.float32, shape=[None, train_audio[0].shape[0],
train_audio[0].shape[1],
train_audio[0].shape[2]])
bn_flag = tf.placeholder_with_default(tf.zeros(shape=[]), shape=[])
x_tilde, z_e_x, z_q_x, z_i_x, z_nst_q_x, z_emb = create_vqvae(vqvae_inputs, bn_flag)
#rec_loss = tf.reduce_mean(BernoulliCrossEntropyCost(x_tilde, images))
vqvae_rec_loss = tf.reduce_mean(tf.square(x_tilde - vqvae_inputs))
vqvae_vq_loss = tf.reduce_mean(tf.square(tf.stop_gradient(z_e_x) - z_nst_q_x))
vqvae_commit_loss = tf.reduce_mean(tf.square(z_e_x - tf.stop_gradient(z_nst_q_x)))
vqvae_alpha = 1.
vqvae_beta = 0.25
vqvae_loss = vqvae_rec_loss + vqvae_alpha * vqvae_vq_loss + vqvae_beta * vqvae_commit_loss
vqvae_params = get_params_dict()
# get vqvae keys now, dict is *dynamic* and shared
vqvae_params_keys = [k for k in vqvae_params.keys()]
vqvae_grads = tf.gradients(vqvae_loss, vqvae_params.values())
learning_rate = 0.0002
vqvae_optimizer = tf.train.AdamOptimizer(learning_rate, use_locking=True)
assert len(vqvae_grads) == len(vqvae_params)
j = [(g, p) for g, p in zip(vqvae_grads, vqvae_params.values())]
vqvae_train_step = vqvae_optimizer.apply_gradients(j)
# rnn part
# ultimately we will use 2 calls to feed_dict to make lookup mappings easier, but could do it like this
#rnn_inputs = tf.cast(tf.stop_gradient(tf.transpose(z_i_x, (2, 0, 1))), tf.float32)
rnn_inputs = tf.placeholder(tf.float32, shape=[None, rnn_batch_size, 1])
rnn_inputs_tm1 = rnn_inputs[:-1]
rnn_inputs_t = rnn_inputs[1:]
init_hidden = tf.placeholder(tf.float32, shape=[rnn_batch_size, n_hid])
init_cell = tf.placeholder(tf.float32, shape=[rnn_batch_size, n_hid])
init_q_hidden = tf.placeholder(tf.float32, shape=[rnn_batch_size, n_hid])
init_q_cell = tf.placeholder(tf.float32, shape=[rnn_batch_size, n_hid])
r = create_vqrnn(rnn_inputs_tm1, rnn_inputs_t, init_hidden, init_cell, init_q_hidden, init_q_cell)
pred_sm, pred, hiddens, cells, q_hiddens, q_cells, q_nst_hiddens, q_nvq_hiddens, i_hiddens, oh_tm1 = r
rnn_rec_loss = tf.reduce_mean(CategoricalCrossEntropyIndexCost(pred_sm, rnn_inputs_t))
#rnn_rec_loss = tf.reduce_mean(CategoricalCrossEntropyLinearIndexCost(pred, rnn_inputs_t))
rnn_alpha = 1.
rnn_beta = 0.25
rnn_vq_h_loss = tf.reduce_mean(tf.square(tf.stop_gradient(q_nvq_hiddens) - q_nst_hiddens))
rnn_commit_h_loss = tf.reduce_mean(tf.square(q_nvq_hiddens - tf.stop_gradient(q_nst_hiddens)))
rnn_loss = rnn_rec_loss + rnn_alpha * rnn_vq_h_loss + rnn_beta * rnn_commit_h_loss
rnn_params = {k:v for k, v in get_params_dict().items() if k not in vqvae_params_keys}
rnn_grads = tf.gradients(rnn_loss, rnn_params.values())
learning_rate = 0.0001
rnn_optimizer = tf.train.AdamOptimizer(learning_rate, use_locking=True)
assert len(rnn_grads) == len(rnn_params)
rnn_grads = [tf.clip_by_value(g, -10., 10.) if g is not None else None for g in rnn_grads]
j = [(g, p) for g, p in zip(rnn_grads, rnn_params.values())]
rnn_train_step = rnn_optimizer.apply_gradients(j)
things_names = ["vqvae_inputs",
"bn_flag",
"x_tilde",
"z_e_x",
"z_q_x",
"z_i_x",
"z_emb",
"vqvae_loss",
"vqvae_rec_loss",
"vqvae_train_step",
"rnn_inputs",
"rnn_inputs_tm1",
"rnn_inputs_t",
"init_hidden",
"init_cell",
"init_q_hidden",
"init_q_cell",
"hiddens",
"cells",
"q_hiddens",
"q_cells",
"q_nvq_hiddens",
"i_hiddens",
"pred",
"pred_sm",
"oh_tm1",
"rnn_loss",
"rnn_rec_loss",
"rnn_train_step"]
things_tf = [eval(name) for name in things_names]
for tn, tt in zip(things_names, things_tf):
graph.add_to_collection(tn, tt)
train_model = namedtuple('Model', things_names)(*things_tf)
return graph, train_model
g, vs = create_graph()
rnn_train = False
step = 0
def loop(sess, itr, extras, stateful_args):
x, = itr.next_batch()
init_h = np.zeros((rnn_batch_size, n_hid)).astype("float32")
init_c = np.zeros((rnn_batch_size, n_hid)).astype("float32")
init_q_h = np.zeros((rnn_batch_size, n_hid)).astype("float32")
init_q_c = np.zeros((rnn_batch_size, n_hid)).astype("float32")
global rnn_train
global step
if extras["train"]:
step += 1
if step > switch_step:
rnn_train = True
if both or not rnn_train:
feed = {vs.vqvae_inputs: x,
vs.bn_flag: 0.}
outs = [vs.vqvae_rec_loss, vs.vqvae_loss, vs.vqvae_train_step, vs.z_i_x]
r = sess.run(outs, feed_dict=feed)
vqvae_l = r[0]
vqvae_t_l = r[1]
vqvae_step = r[2]
if rnn_train:
feed = {vs.vqvae_inputs: x,
vs.bn_flag: 1.}
outs = [vs.vqvae_rec_loss, vs.z_i_x]
r = sess.run(outs, feed_dict=feed)
vqvae_l = r[0]
vqvae_t_l = r[1]
discrete_z = r[-1]
#discrete_z[3][:, 2:-2] == discrete_z[4][:, 1:-3]
#discrete_z = discrete_z[:, :, 1:-2]
shp = discrete_z.shape
# always start with 0
rnn_inputs = np.zeros((shp[2] + 1, shp[0], shp[1]))
rnn_inputs[1:] = discrete_z.transpose(2, 0, 1) + 1.
if both or rnn_train:
feed = {vs.rnn_inputs: rnn_inputs,
vs.init_hidden: init_h,
vs.init_cell: init_c,
vs.init_q_hidden: init_q_h,
vs.init_q_cell: init_q_c}
outs = [vs.rnn_rec_loss, vs.rnn_loss, vs.rnn_train_step]
r = sess.run(outs, feed_dict=feed)
rnn_l = r[0]
rnn_t_l = r[1]
rnn_step = r[2]
if not rnn_train:
feed = {vs.rnn_inputs: rnn_inputs,
vs.init_hidden: init_h,
vs.init_cell: init_c,
vs.init_q_hidden: init_q_h,
vs.init_q_cell: init_q_c}
outs = [vs.rnn_rec_loss]
r = sess.run(outs, feed_dict=feed)
rnn_l = r[0]
else:
feed = {vs.vqvae_inputs: x,
vs.bn_flag: 1.}
outs = [vs.vqvae_rec_loss, vs.z_i_x]
r = sess.run(outs, feed_dict=feed)
vqvae_l = r[0]
discrete_z = r[-1]
#discrete_z = discrete_z[:, :, 1:-2]
shp = discrete_z.shape
# always start with 0
rnn_inputs = np.zeros((shp[2] + 1, shp[0], shp[1]))
rnn_inputs[1:] = discrete_z.transpose(2, 0, 1) + 1.
feed = {vs.rnn_inputs: rnn_inputs,
vs.init_hidden: init_h,
vs.init_cell: init_c,
vs.init_q_hidden: init_q_h,
vs.init_q_cell: init_q_c}
outs = [vs.rnn_rec_loss]
r = sess.run(outs, feed_dict=feed)
rnn_l = r[0]
return [vqvae_l, rnn_l], None, stateful_args
with tf.Session(graph=g) as sess:
run_loop(sess,
loop, train_itr,
loop, valid_itr,
n_steps=75000,
n_train_steps_per=5000,
n_valid_steps_per=500)
| 38.189696
| 124
| 0.592506
| 2,493
| 16,307
| 3.560369
| 0.124749
| 0.048333
| 0.030644
| 0.039658
| 0.458427
| 0.403673
| 0.350721
| 0.319739
| 0.30836
| 0.268927
| 0
| 0.044752
| 0.286073
| 16,307
| 426
| 125
| 38.279343
| 0.71766
| 0.050714
| 0
| 0.229651
| 0
| 0
| 0.03218
| 0
| 0
| 0
| 0
| 0
| 0.005814
| 1
| 0.023256
| false
| 0
| 0.078488
| 0
| 0.125
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
553eb4733f79df133de3656ed4a77eb050d859d2
| 311
|
py
|
Python
|
scripts/poorscrum/poorscrum_tools.py
|
r09491/poorscrum
|
cdbbc0db03fde842f546093f46e70d03a105bbbd
|
[
"MIT"
] | null | null | null |
scripts/poorscrum/poorscrum_tools.py
|
r09491/poorscrum
|
cdbbc0db03fde842f546093f46e70d03a105bbbd
|
[
"MIT"
] | 7
|
2021-03-18T22:37:46.000Z
|
2022-03-11T23:41:39.000Z
|
scripts/poorscrum/poorscrum_tools.py
|
r09491/poorscrum
|
cdbbc0db03fde842f546093f46e70d03a105bbbd
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
def fibonacci(n):
if n == 0:
return 0
elif n == 1:
return 1
else:
return fibonacci(n-1) + fibonacci(n-2)
def story_points(start):
for i in range(10):
result = fibonacci(i)
if result >= start:
break
return result
| 17.277778
| 46
| 0.508039
| 42
| 311
| 3.738095
| 0.547619
| 0.191083
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.045918
| 0.369775
| 311
| 17
| 47
| 18.294118
| 0.755102
| 0.067524
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.153846
| false
| 0
| 0
| 0
| 0.461538
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
554005d26d7a3413df01a385a87bf09337208562
| 6,162
|
py
|
Python
|
cata/teachers/ensembles/both_rotation_ensemble.py
|
seblee97/student_teacher_catastrophic
|
9baaaf2850025ba9cf33d61c42386bc4c3b2dad2
|
[
"MIT"
] | 2
|
2021-09-13T01:44:09.000Z
|
2021-12-11T11:56:49.000Z
|
cata/teachers/ensembles/both_rotation_ensemble.py
|
seblee97/student_teacher_catastrophic
|
9baaaf2850025ba9cf33d61c42386bc4c3b2dad2
|
[
"MIT"
] | 8
|
2020-11-13T18:37:30.000Z
|
2022-02-15T15:11:51.000Z
|
cata/teachers/ensembles/both_rotation_ensemble.py
|
seblee97/student_teacher_catastrophic
|
9baaaf2850025ba9cf33d61c42386bc4c3b2dad2
|
[
"MIT"
] | null | null | null |
from typing import List
from typing import Union
import numpy as np
import torch
from cata.teachers.ensembles import base_teacher_ensemble
from cata.utils import custom_functions
class BothRotationTeacherEnsemble(base_teacher_ensemble.BaseTeacherEnsemble):
"""Teacher ensemble (primarily for mean-field limit regime) in which both feature and
readout similarities are tuned by rotation.
"""
def __init__(
self,
input_dimension: int,
hidden_dimensions: List[int],
output_dimension: int,
bias: bool,
loss_type: str,
nonlinearities: str,
scale_hidden_lr: bool,
forward_scaling: float,
unit_norm_teacher_head: bool,
weight_normalisation: bool,
noise_stds: Union[int, float],
num_teachers: int,
initialisation_std: float,
feature_rotation_alpha: float,
readout_rotation_alpha: float,
):
self._feature_rotation_alpha = feature_rotation_alpha
self._readout_rotation_alpha = readout_rotation_alpha
super().__init__(
input_dimension=input_dimension,
hidden_dimensions=hidden_dimensions,
output_dimension=output_dimension,
bias=bias,
loss_type=loss_type,
nonlinearities=nonlinearities,
scale_hidden_lr=scale_hidden_lr,
forward_scaling=forward_scaling,
unit_norm_teacher_head=unit_norm_teacher_head,
weight_normalisation=weight_normalisation,
noise_stds=noise_stds,
num_teachers=num_teachers,
initialisation_std=initialisation_std,
)
def _setup_teachers(self) -> None:
"""Setup teachers with copies across input to hidden and rotations
across hidden to output weights.
Raises:
AssertionError: If more than 2 teachers are requested.
AssertionError: If the network depth is greater than 1,
i.e. more than one hidden layer requested.
AssertionError: If the hidden dimension is not greater than 1,
this is for the notion of rotation to have meaning.
"""
assert (
self._num_teachers
) == 2, "Both rotation teachers currently implemented for 2 teachers only."
assert (
len(self._hidden_dimensions) == 1
), "Both rotation teachers currently implemented for 1 hidden layer only."
assert (
self._hidden_dimensions[0] > 1
), "Both rotation teachers only valid for hidden dimensions > 1."
teachers = [
self._init_teacher(
nonlinearity=self._nonlinearities[i], noise_std=self._noise_stds[i]
)
for i in range(self._num_teachers)
]
with torch.no_grad():
(
teacher_0_feature_weights,
teacher_1_feature_weights,
) = self._get_rotated_weights(
unrotated_weights=teachers[0].layers[0].weight.data.T,
alpha=self._feature_rotation_alpha,
normalisation=self._hidden_dimensions[0],
)
teachers[0].layers[0].weight.data = teacher_0_feature_weights.T
teachers[1].layers[0].weight.data = teacher_1_feature_weights.T
# (
# teacher_0_readout_weights,
# teacher_1_readout_weights,
# ) = self._get_rotated_weights(
# unrotated_weights=teachers[0].head.weight.data.T,
# alpha=self._readout_rotation_alpha,
# normalisation=None,
# )
(
teacher_0_readout_weights,
teacher_1_readout_weights,
) = self._get_rotated_readout_weights(teachers=teachers)
teachers[0].head.weight.data = teacher_0_readout_weights
teachers[1].head.weight.data = teacher_1_readout_weights
return teachers
def _feature_overlap(self, feature_1: torch.Tensor, feature_2: torch.Tensor):
alpha_matrix = torch.mm(feature_1, feature_2.T) / self._hidden_dimensions[0]
alpha = torch.mean(alpha_matrix.diagonal())
return alpha
def _readout_overlap(self, feature_1: torch.Tensor, feature_2: torch.Tensor):
alpha = torch.mm(feature_1, feature_2.T) / (
torch.norm(feature_1) * torch.norm(feature_2)
)
return alpha
def _get_rotated_weights(
self,
unrotated_weights: torch.Tensor,
alpha: float,
normalisation: Union[None, int],
):
if normalisation is not None:
# orthonormalise input to hidden weights of first teacher
self_overlap = (
torch.mm(unrotated_weights, unrotated_weights.T) / normalisation
)
L = torch.cholesky(self_overlap)
orthonormal_weights = torch.mm(torch.inverse(L), unrotated_weights)
else:
orthonormal_weights = unrotated_weights
# construct input to hidden weights of second teacher
second_teacher_rotated_weights = alpha * orthonormal_weights + np.sqrt(
1 - alpha ** 2
) * torch.randn(orthonormal_weights.shape)
return orthonormal_weights, second_teacher_rotated_weights
def _get_rotated_readout_weights(self, teachers: List):
theta = np.arccos(self._readout_rotation_alpha)
# keep current norms
current_norm = np.mean(
[torch.norm(teacher.head.weight) for teacher in teachers]
)
rotated_weight_vectors = custom_functions.generate_rotated_vectors(
dimension=self._hidden_dimensions[0],
theta=theta,
normalisation=current_norm,
)
teacher_0_rotated_weight_tensor = torch.Tensor(
rotated_weight_vectors[0]
).reshape(teachers[0].head.weight.data.shape)
teacher_1_rotated_weight_tensor = torch.Tensor(
rotated_weight_vectors[1]
).reshape(teachers[1].head.weight.data.shape)
return teacher_0_rotated_weight_tensor, teacher_1_rotated_weight_tensor
| 35.413793
| 89
| 0.636806
| 676
| 6,162
| 5.492604
| 0.213018
| 0.038783
| 0.026932
| 0.022623
| 0.244008
| 0.157824
| 0.123081
| 0.110154
| 0.083221
| 0.060329
| 0
| 0.011935
| 0.292924
| 6,162
| 173
| 90
| 35.618497
| 0.840257
| 0.140214
| 0
| 0.075
| 0
| 0
| 0.037286
| 0
| 0
| 0
| 0
| 0
| 0.025
| 1
| 0.05
| false
| 0
| 0.05
| 0
| 0.15
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5542014f27e11156c75907e597b9852418147144
| 7,176
|
py
|
Python
|
scripts/admin/admin.py
|
starmarek/organize-me
|
710e7acd86e887b7e4379fde18e1f375846ea59e
|
[
"MIT"
] | null | null | null |
scripts/admin/admin.py
|
starmarek/organize-me
|
710e7acd86e887b7e4379fde18e1f375846ea59e
|
[
"MIT"
] | null | null | null |
scripts/admin/admin.py
|
starmarek/organize-me
|
710e7acd86e887b7e4379fde18e1f375846ea59e
|
[
"MIT"
] | null | null | null |
import json
import logging
import os
import shlex
import subprocess
from pathlib import Path
from types import SimpleNamespace
import coloredlogs
import fire
from .adminFiles import (
DockerComposeFile,
DotenvFile,
GitlabCIFile,
JsonFile,
PackageJsonFile,
Pipfile,
RuntimeTxtFile,
YarnRCFile,
)
log = logging.getLogger("admin")
coloredlogs.install(level="DEBUG")
yarn_dir = ".yarn/releases/"
for file in os.listdir(".yarn/releases"):
if os.getenv("CORE_YARN_VER") in file:
yarn_executable = file
virtualenv_path = subprocess.run(["pipenv", "--venv"], capture_output=True, text=True, check=True).stdout.strip()
dotenv_file = DotenvFile(path=".env")
compose_file = DockerComposeFile(path="docker-compose.yml")
dotenv_template_file = DotenvFile(path=".template.env")
gitlab_ci_file = GitlabCIFile(path=".gitlab-ci.yml")
yarnrc_file = YarnRCFile(path=".yarnrc.yml")
runtime_txt_file = RuntimeTxtFile(path="runtime.txt")
pipfile_file = Pipfile(path="Pipfile")
package_json_file = PackageJsonFile(path="package.json")
verifiable_files = [compose_file, gitlab_ci_file, pipfile_file, runtime_txt_file, package_json_file, yarnrc_file]
def _update_virtualenv_vscode_pythonpath():
settings_file = JsonFile(path=".vscode/settings.json")
Path(settings_file.path.split("/")[-2]).mkdir(exist_ok=True)
if Path(settings_file.path).exists():
settings_file["python.pythonPath"] = f"{virtualenv_path}/bin/python"
settings_file.dump()
else:
settings_file.data = json.loads(json.dumps({"python.pythonPath": f"{virtualenv_path}/bin/python"}))
settings_file.dump()
log.info(f"Setting vscode pythonpath to '{virtualenv_path}/bin/python'")
def _install_pre_commit():
log.info("Installing pre-commit hooks")
subprocess.run(shlex.split(f"{virtualenv_path}/bin/pre-commit install"), check=True)
log.warning("You need to install shfmt and shellcheck on your computer in order to pre-commit hooks to work.")
def _verify_dotenvs():
log.info("Verifying dotenvs compatibility")
assert all(val == dotenv_template_file[key] for key, val in dotenv_file.data.items() if key.startswith("CORE"))
def _verify_yarn_executable():
log.info("Verifying yarn compatibility")
assert any(os.getenv("CORE_YARN_VER") in yarn_executable for yarn_executable in os.listdir(".yarn/releases"))
def _verify_versions():
curr = dotenv_file
reference = dotenv_template_file
try:
_verify_dotenvs()
reference = dotenv_file
curr = SimpleNamespace(name="files in .yarn/releases dir")
_verify_yarn_executable()
log.info("Verifying compatibility of core versions")
for ver_file in verifiable_files:
curr = ver_file
assert ver_file.verify_core_versions()
except AssertionError:
log.error(
f"There is a mismatch between {curr.name} and {reference.name}! Make sure that you are using admin script to bump versions of packages!"
)
raise
class CLI:
def __init__(self, vscode=False):
try:
self.running_in_vscode = os.environ["TERM_PROGRAM"] == "vscode"
except KeyError:
self.running_in_vscode = False
if vscode:
self.running_in_vscode = True
def update_yarn(self, ver):
log.info("Upgrading yarn")
subprocess.run([yarn_dir + yarn_executable, "set", "version", ver], check=True)
dotenv_template_file["CORE_YARN_VER"] = ver
dotenv_file["CORE_YARN_VER"] = ver
dotenv_file.dump_to_env()
package_json_file["engines"]["yarn"] = ver
package_json_file.dump()
self.containers_ground_up(cache=False)
def update_postgres(self, ver):
dotenv_template_file["CORE_POSTGRES_VER"] = ver
dotenv_file["CORE_POSTGRES_VER"] = ver
dotenv_file.dump_to_env()
self.containers_ground_up(cache=False)
def update_compose(self, ver):
ver = str(ver)
dotenv_template_file["CORE_COMPOSE_VER"] = ver
dotenv_file["CORE_COMPOSE_VER"] = ver
dotenv_file.dump_to_env()
compose_file["version"] = ver
compose_file.dump()
self.containers_ground_up(cache=False)
def update_python(self, ver):
log.info("Reinstalling your pipenv")
subprocess.run(["pipenv", "--rm"], check=True)
pipfile_file["requires"]["python_version"] = ver
pipfile_file.dump()
subprocess.run(["pipenv", "update", "--keep-outdated", "--dev"], check=True)
dotenv_template_file["CORE_PYTHON_VER"] = ver
dotenv_file["CORE_PYTHON_VER"] = ver
dotenv_file.dump_to_env()
self.containers_ground_up(cache=False)
gitlab_ci_file["variables"]["PYTHON_VERSION"] = ver
gitlab_ci_file.dump()
runtime_txt_file.data = [f"python-{ver}"]
runtime_txt_file.dump()
if self.running_in_vscode:
_update_virtualenv_vscode_pythonpath()
def update_node(self, ver):
dotenv_template_file["CORE_NODE_VER"] = ver
dotenv_file["CORE_NODE_VER"] = ver
dotenv_file.dump_to_env()
self.containers_ground_up(cache=False)
gitlab_ci_file["variables"]["NODE_VERSION"] = ver
gitlab_ci_file.dump()
package_json_file["engines"]["node"] = ver
package_json_file.dump()
def containers_build(self, cache=True):
log.info(f"Building containers with 'cache={cache}'")
subprocess.run(shlex.split(f"docker-compose build --force-rm {'' if cache else '--no-cache'}"), check=True)
def containers_logs(self, container_name=""):
try:
subprocess.run(shlex.split(f"docker-compose logs -f {container_name}"))
except KeyboardInterrupt:
pass
def containers_up(self):
log.info("Running containers")
subprocess.run(shlex.split("docker-compose up --detach --remove-orphans --force-recreate"), check=True)
def containers_ground_up(self, cache=True):
self.containers_build(cache=cache)
self.containers_up()
def init(self):
self.containers_ground_up(cache=False)
_install_pre_commit()
if self.running_in_vscode:
_update_virtualenv_vscode_pythonpath()
def install_pip(self, package, dev=False):
subprocess.run(shlex.split(f"pipenv install {package} {'--dev' if dev else ''}"), check=True)
self.containers_ground_up(cache=False)
def install_yarn(self, package, dev=False):
subprocess.run(
shlex.split(f"sudo {yarn_dir + yarn_executable} add {package} {'--dev' if dev else ''}"), check=True
)
self.containers_ground_up(cache=False)
def remove_pip(self, package):
subprocess.run(["pipenv", "uninstall", package], check=True)
self.containers_ground_up(cache=False)
def remove_yarn(self, package):
subprocess.run(["sudo", yarn_dir + yarn_executable, "remove", package], check=True)
self.containers_ground_up(cache=False)
if __name__ == "__main__":
log.info("Starting admin script")
_verify_versions()
fire.Fire(CLI)
| 32.324324
| 148
| 0.67879
| 904
| 7,176
| 5.130531
| 0.202434
| 0.030185
| 0.042691
| 0.034498
| 0.355326
| 0.318025
| 0.256361
| 0.186934
| 0.184994
| 0.154593
| 0
| 0.000175
| 0.20262
| 7,176
| 221
| 149
| 32.470588
| 0.810381
| 0
| 0
| 0.170732
| 0
| 0.006098
| 0.213907
| 0.01937
| 0
| 0
| 0
| 0
| 0.02439
| 1
| 0.121951
| false
| 0.006098
| 0.060976
| 0
| 0.189024
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5543d0392b1a991c4c0bc9b77494d93272ec2802
| 743
|
py
|
Python
|
tests/components/pages/ts.py
|
T4rk1n/dazzler
|
69c49422dc19c910445ab265b1d3481041de8f43
|
[
"MIT"
] | 15
|
2019-12-19T11:57:30.000Z
|
2021-11-15T23:34:41.000Z
|
tests/components/pages/ts.py
|
T4rk1n/dazzler
|
69c49422dc19c910445ab265b1d3481041de8f43
|
[
"MIT"
] | 196
|
2019-09-21T15:10:14.000Z
|
2022-03-31T11:07:48.000Z
|
tests/components/pages/ts.py
|
T4rk1n/dazzler
|
69c49422dc19c910445ab265b1d3481041de8f43
|
[
"MIT"
] | 7
|
2019-10-30T19:38:15.000Z
|
2021-12-01T04:54:16.000Z
|
from dazzler.system import Page
from dazzler.components import core
from tests.components import ts_components as tsc
page = Page(
__name__,
core.Container([
tsc.TypedComponent(
'override',
children=core.Container('foobar'),
num=2,
text='foobar',
boo=True,
arr=[1, 2, 'mixed'],
arr_str=['foo', 'bar'],
arr_num=[7, 8, 9],
arr_obj_lit=[{'name': 'foo'}],
obj={'anything': 'possible'},
enumeration='foo',
union=7,
style={'border': '1px solid rgb(0,0,255)'},
class_name='other'
),
tsc.TypedClassComponent('class based', children='clazz')
])
)
| 27.518519
| 64
| 0.51144
| 78
| 743
| 4.74359
| 0.628205
| 0.059459
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.026639
| 0.343203
| 743
| 26
| 65
| 28.576923
| 0.731557
| 0
| 0
| 0
| 0
| 0
| 0.142665
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.12
| 0
| 0.12
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
55454283c60ef0107317118c446ed4395d8f58a5
| 4,464
|
py
|
Python
|
src/gistsgetter/app.py
|
pmfrank/gistsgetter
|
a19f59604ebf1cb13c641d25c4461b4347bba58a
|
[
"MIT"
] | null | null | null |
src/gistsgetter/app.py
|
pmfrank/gistsgetter
|
a19f59604ebf1cb13c641d25c4461b4347bba58a
|
[
"MIT"
] | null | null | null |
src/gistsgetter/app.py
|
pmfrank/gistsgetter
|
a19f59604ebf1cb13c641d25c4461b4347bba58a
|
[
"MIT"
] | null | null | null |
"""
An application dedicated to creating, editing, and deleting Gists in GitHub
"""
from __future__ import absolute_import
import toga
import pyperclip
from toga.style import Pack
from toga.style.pack import COLUMN, ROW
from .common.Search import search
from functools import partial
class GistsGetter(toga.App):
def startup(self):
"""
Construct and show the Toga application.
Usually, you would add your application to a main content box.
We then create a main window (with a name matching the app), and
show the main window.
"""
main_box = toga.Box(style=Pack(direction=COLUMN))
top_box = toga.Box(style=Pack(direction=ROW, padding=5, alignment='top'))
middle_box = toga.Box(style=Pack(direction=ROW,padding=5, alignment='center', flex=1))
button_box = toga.Box(style=Pack(padding=5, alignment='right'))
bottom_box = toga.Box(style=Pack(direction=ROW, padding=(5,5,20,5), alignment='bottom')) # Padding - Top, Right, Botom, Left
select_label = toga.Label('Search By', style=Pack(padding=5, alignment='center'))
self.select = toga.Selection(items=['UserID','GistID'])
self.select_input = toga.TextInput(style=Pack(padding=5, flex=1),placeholder='User or Gist ID')
# Line preserved for prostarity will be using helper functions to do search with externale functions
# select_button = toga.Button('Search',style=Pack(padding=5),on_press=partial(search,string = 'x'))
select_button = toga.Button('Search', style=Pack(padding=5), on_press=self.search_by)
self.results = toga.MultilineTextInput(style=Pack(padding=(0,5), flex=1),readonly = True)
copy_button = toga.Button('Copy to Clipboard', style=Pack(padding=5),on_press=self.copy_to_clipboard)
button_box.add(copy_button)
middle_box.add(self.results)
middle_box.add(button_box)
top_box.add(select_label)
top_box.add(self.select)
top_box.add(self.select_input)
top_box.add(select_button)
login_label = toga.Label('Username', style=Pack(padding=5, alignment='left'))
self.login_input = toga.TextInput(style=Pack(padding=5,alignment='left',flex=1))
pw_label = toga.Label('Password', style=Pack(padding=5, alignment='right'))
self.pw_input = toga.PasswordInput(style=Pack(padding=4,alignment='right',flex=1))
bottom_box.add(login_label)
bottom_box.add(self.login_input)
bottom_box.add(pw_label)
bottom_box.add(self.pw_input)
main_box.add(top_box)
main_box.add(middle_box)
main_box.add(bottom_box)
self.main_window = toga.MainWindow(title=self.formal_name, size=(640,480))
self.main_window.content = main_box
self.main_window.show()
def search_by(self, widget):
global results
if not self.select_input.value or not self.login_input.value or not self.pw_input:
self.results.value = 'All fields required'
return
if self.select.value == 'UserID':
self.results.value = 'Feature not implemented'
return
else:
global gist_id
gist_id = self.select_input.value
url = self.__get_token('https://api.github.com/gists{/gist_id}','{')
results = search(url, self.login_input.value,self.pw_input.value)
for filename in results:
print(results[filename])
self.results.value = results
def copy_to_clipboard(self, widget):
global results
for filename in results:
pyperclip.copy(results[filename])
def __get_token(self, string, delim):
tokens = string.split(delim)
url = tokens[0]
for token in tokens[1:]:
token = token[:-1]
if '/' in token : token = token[1:]
if token in globals():
if '=' in url:
url = url + globals()[token]
else:
url = url + '/' + globals()[token]
if ',' in token:
token = token[1:]
print(token)
multitokens = token.split(',')
for multitoken in multitokens:
if multitoken in globals():
url = url + '&' + multitoken + '=' + globals()[multitoken]
return url
def main():
return GistsGetter()
| 38.817391
| 132
| 0.622536
| 573
| 4,464
| 4.720768
| 0.242583
| 0.053235
| 0.065065
| 0.056562
| 0.24695
| 0.190018
| 0.126063
| 0.088355
| 0.088355
| 0.073937
| 0
| 0.010632
| 0.262545
| 4,464
| 114
| 133
| 39.157895
| 0.811057
| 0.111783
| 0
| 0.097561
| 0
| 0
| 0.054504
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.060976
| false
| 0.02439
| 0.085366
| 0.012195
| 0.207317
| 0.02439
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5549b2fc2c6d6a256c772a1fa6b1cb0ba16583fe
| 7,401
|
py
|
Python
|
src/qcar/src/qcar/q_essential.py
|
bchampp/scylla
|
6ec27877cc03c200a874cd0eb25a36c866471250
|
[
"MIT"
] | null | null | null |
src/qcar/src/qcar/q_essential.py
|
bchampp/scylla
|
6ec27877cc03c200a874cd0eb25a36c866471250
|
[
"MIT"
] | null | null | null |
src/qcar/src/qcar/q_essential.py
|
bchampp/scylla
|
6ec27877cc03c200a874cd0eb25a36c866471250
|
[
"MIT"
] | null | null | null |
from quanser.hardware import HIL, HILError, PWMMode
from quanser.multimedia import Video3D, VideoCapture, Video3DStreamType, MediaError, ImageFormat, ImageDataType
from quanser.devices import RPLIDAR, RangingMeasurements, RangingMeasurementMode, DeviceError, RangingDistance
from .q_misc import Utilities
import numpy as np
import pygame
import time
saturate = Utilities.saturate
# region: Cameras
class Camera3D():
def __init__(self, mode='RGB&DEPTH', frame_width_RGB=1920, frame_height_RGB=1080, frame_rate_RGB=30.0, frame_width_depth=1280, frame_height_depth=720, frame_rate_depth=15.0, device_id='0'):
'''This function configures the Intel Realsense RGB and depth cameras for use.
Outputs:
video3d - video3d object, you must call video3d.start_streaming() before your main loop
stream_RGB - stream object to be passed to the read method
image_buffer_RGB - buffer array that will be updated by the read method
stream_depth - stream object to be passed to the read method
image_buffer_depth - buffer array that will be updated by the read method'''
self.mode = mode
self.stream_index = 0
self.image_buffer_RGB = np.zeros((frame_height_RGB, frame_width_RGB, 3), dtype=np.uint8)
self.image_buffer_depth_px = np.zeros((frame_height_depth, frame_width_depth, 1), dtype=np.uint8)
self.image_buffer_depth_m = np.zeros((frame_height_depth, frame_width_depth, 1), dtype=np.float32)
try:
self.video3d = Video3D(device_id)
if mode == 'RGB':
self.stream_RGB = self.video3d.stream_open(Video3DStreamType.COLOR, self.stream_index, frame_rate_RGB, frame_width_RGB, frame_height_RGB, ImageFormat.ROW_MAJOR_INTERLEAVED_BGR, ImageDataType.UINT8)
elif mode == 'DEPTH':
self.stream_depth = self.video3d.stream_open(Video3DStreamType.DEPTH, self.stream_index, frame_rate_depth, frame_width_depth, frame_height_depth, ImageFormat.ROW_MAJOR_GREYSCALE, ImageDataType.UINT8)
else:
self.stream_RGB = self.video3d.stream_open(Video3DStreamType.COLOR, self.stream_index, frame_rate_RGB, frame_width_RGB, frame_height_RGB, ImageFormat.ROW_MAJOR_INTERLEAVED_BGR, ImageDataType.UINT8)
self.stream_depth = self.video3d.stream_open(Video3DStreamType.DEPTH, self.stream_index, frame_rate_depth, frame_width_depth, frame_height_depth, ImageFormat.ROW_MAJOR_GREYSCALE, ImageDataType.UINT8)
self.video3d.start_streaming()
except MediaError as me:
print(me.get_error_message())
def terminate(self):
'''This function terminates the RGB and depth video and stream objects correctly.
Inputs:
video3d - video object from the configure method
stream_RGB - RGB stream object from the configure method
stream_depth - depth stream object from the configure method '''
try:
self.video3d.stop_streaming()
if self.mode == 'RGB':
self.stream_RGB.close()
elif self.mode == 'DEPTH':
self.stream_depth.close()
else:
self.stream_RGB.close()
self.stream_depth.close()
self.video3d.close()
except MediaError as me:
print(me.get_error_message())
def read_RGB(self):
'''This function reads an image from the RGB camera for use.
Outputs:
timestamp - timestamp corresponding to the frame read '''
timestamp = -1
try:
frame = self.stream_RGB.get_frame()
while not frame:
frame = self.stream_RGB.get_frame()
frame.get_data(self.image_buffer_RGB)
timestamp = frame.get_timestamp()
frame.release()
except KeyboardInterrupt:
pass
except MediaError as me:
print(me.get_error_message())
finally:
return timestamp
def read_depth(self, dataMode='px'):
'''This function reads an image from the depth camera for use.
dataMode is 'px' for pixels or 'm' for meters. Use corresponding image buffer.
Outputs:
timestamp - timestamp corresponding to the frame read '''
timestamp = -1
try:
frame = self.stream_depth.get_frame()
while not frame:
frame = self.stream_depth.get_frame()
if dataMode == 'px':
frame.get_data(self.image_buffer_depth_px)
elif dataMode == 'm':
frame.get_meters(self.image_buffer_depth_m)
timestamp = frame.get_timestamp()
frame.release()
except KeyboardInterrupt:
pass
except MediaError as me:
print(me.get_error_message())
finally:
return timestamp
class Camera2D():
def __init__(self, camera_id="0", frame_width=640, frame_height=480, frame_rate=30.0):
'''This function configures the 2D camera for use based on the camera_id provided.'''
self.url = "video://localhost:"+camera_id
self.image_data = np.zeros((frame_height, frame_width, 3), dtype=np.uint8)
try:
# self.capture = VideoCapture(self.url, frame_rate, frame_width, frame_height, ImageFormat.ROW_MAJOR_INTERLEAVED_BGR, ImageDataType.UINT8, self.image_data, None, 0)
self.capture = VideoCapture(self.url, frame_rate, frame_width, frame_height, ImageFormat.ROW_MAJOR_INTERLEAVED_BGR, ImageDataType.UINT8, None, 0)
self.capture.start()
except MediaError as me:
print(me.get_error_message())
def read(self):
'''This function reads a frame, updating the corresponding image buffer.'''
try:
# self.capture.read()
self.capture.read(self.image_data)
except MediaError as me:
print(me.get_error_message())
except KeyboardInterrupt:
print('User Interupted')
def reset(self):
'''This function resets the 2D camera stream by stopping and starting the capture service.'''
try:
self.capture.stop()
self.capture.start()
except MediaError as me:
print(me.get_error_message())
def terminate(self):
'''This function terminates the 2D camera operation.'''
try:
self.capture.stop()
self.capture.close()
except MediaError as me:
print(me.get_error_message())
# endregion
# region: LIDAR
class LIDAR():
def __init__(self, num_measurements=720):
#
self.num_measurements = num_measurements
# self.measurements = [RangingMeasurement() for x in range(self.num_measurements)]
# self.measurements = RangingMeasurements(num_measurements)
self.measurements = RangingMeasurements(num_measurements)
self.distances = np.zeros((num_measurements,1), dtype=np.float64)
self.angles = np.zeros((num_measurements,1), dtype=np.float64)
# self.angles = np.linspace(0, 2*np.pi-(2*np.pi/num_measurements), num_measurements, dtype=np.float64)
self.lidar = RPLIDAR()
# self.maxDistance = 18.0
try:
self.lidar.open("serial-cpu://localhost:2?baud='115200',word='8',parity='none',stop='1',flow='none',dsr='on'", RangingDistance.LONG)
except DeviceError as de:
if de.error_code == -34:
pass
else:
print(de.get_error_message())
def terminate(self):
try:
self.lidar.close()
except DeviceError as de:
if de.error_code == -34:
pass
else:
print(de.get_error_message())
def read(self):
try:
self.lidar.read(RangingMeasurementMode.NORMAL, 0, 0, self.measurements)
self.distances = np.array(self.measurements.distance)
# self.distances = np.append( np.flip( self.distances[0:int(self.num_measurements/4)] ) ,
# np.flip( self.distances[int(self.num_measurements/4):]) )
# self.distances[self.distances > self.maxDistance] = self.maxDistance
# self.distances[self.distances > self.maxDistance] = 0
self.angles = np.array(self.measurements.heading)
except DeviceError as de:
if de.error_code == -34:
pass
else:
print(de.get_error_message())
# endregion
| 37.190955
| 211
| 0.740576
| 1,026
| 7,401
| 5.154971
| 0.190058
| 0.032142
| 0.031197
| 0.030251
| 0.587068
| 0.551333
| 0.485536
| 0.463036
| 0.426168
| 0.418227
| 0
| 0.01822
| 0.154574
| 7,401
| 199
| 212
| 37.190955
| 0.827074
| 0.284961
| 0
| 0.616541
| 0
| 0.007519
| 0.03
| 0.0175
| 0
| 0
| 0
| 0
| 0
| 1
| 0.082707
| false
| 0.037594
| 0.052632
| 0
| 0.172932
| 0.090226
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
554a7b61e03b3173856a7a579bde9d2c36a7f575
| 1,689
|
py
|
Python
|
ex071.py
|
cristianoandrad/ExerciciosPythonCursoEmVideo
|
362603436b71c8ef8386d7a9ab3c5fed0b8d63f7
|
[
"MIT"
] | null | null | null |
ex071.py
|
cristianoandrad/ExerciciosPythonCursoEmVideo
|
362603436b71c8ef8386d7a9ab3c5fed0b8d63f7
|
[
"MIT"
] | null | null | null |
ex071.py
|
cristianoandrad/ExerciciosPythonCursoEmVideo
|
362603436b71c8ef8386d7a9ab3c5fed0b8d63f7
|
[
"MIT"
] | null | null | null |
'''Crie um programa que simule o funcionamento de um caixa eletrônico. No início, pergunte ao usuário qual será o valor a ser sacado (número inteiro) e o programa vai informar quantas cédulas de cada valor serão entregues. OBS:
considere que o caixa possui cédulas de R$50, R$20, R$10 e R$1.'''
'''print('--' * 15)
print('{:^30}'.format('Banco CEV'))
print('--' * 15)
valor = int(input('Qual o valor que você quer sacar R$ '))
c50 = valor % 50
c20 = c50 % 20
c10 = c20 % 10
c1 = c10 % 1
b50 = valor - c50
b20 = valor - b50 - c20
b10 = valor - b50 - b20 - c10
b1 = valor - b50 - b20 - b10 - c1
print(f'Total de {b50/50:.0f} celulas de R$ 50,00')
print(f'Total de {b20/20:.0f} celulas de R$ 20,00')
print(f'Total de {b10/10:.0f} celulas de R$ 10,00')
print(f'Total de {b1/1:.0f} celulas de R$ 1,00')
print('--' * 15)
print('Volte sempre ao Banco CEV! Tenha um bom dia')'''
'''valor = int(input("informe o valor a ser sacado : "))
nota50 = valor // 50
valor %= 50
nota20 = valor // 20
valor %= 20
nota10 = valor // 10
valor %= 10
nota1 = valor // 1
print(f"notas de 50 = {nota50}")
print(f"notas de 20 = {nota20}")
print(f"notas de 10 = {nota10}")
print(f"notas de 1 = {nota1}")'''
print('--' * 15)
print('{:^30}'.format('Banco CEV'))
print('--' * 15)
valor = int(input('Qual o valor que você quer sacar R$ '))
total = valor
cel = 50
contCel = 0
while True:
if total >= cel:
total -= cel
contCel += 1
else:
print(f'O total de {contCel} céluldas de R$ {cel}.')
if cel == 50:
cel = 20
elif cel == 20:
cel = 10
elif cel == 10:
cel = 1
contCel = 0
if total == 0:
break
| 27.241935
| 227
| 0.587922
| 282
| 1,689
| 3.521277
| 0.297872
| 0.054381
| 0.04431
| 0.052367
| 0.2286
| 0.151057
| 0.151057
| 0.151057
| 0.151057
| 0.151057
| 0
| 0.119462
| 0.251628
| 1,689
| 61
| 228
| 27.688525
| 0.666139
| 0.170515
| 0
| 0.181818
| 0
| 0
| 0.190196
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.181818
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
554c5ff1d984eee7cf69842945a06a7b43f122ff
| 919
|
py
|
Python
|
common.py
|
hoostus/prime-harvesting
|
6606b94ea7859fbf217dbea4ace856e3fa4d154e
|
[
"BlueOak-1.0.0",
"Apache-2.0"
] | 23
|
2016-09-07T06:13:37.000Z
|
2022-02-17T23:49:03.000Z
|
common.py
|
hoostus/prime-harvesting
|
6606b94ea7859fbf217dbea4ace856e3fa4d154e
|
[
"BlueOak-1.0.0",
"Apache-2.0"
] | null | null | null |
common.py
|
hoostus/prime-harvesting
|
6606b94ea7859fbf217dbea4ace856e3fa4d154e
|
[
"BlueOak-1.0.0",
"Apache-2.0"
] | 12
|
2016-06-30T17:27:39.000Z
|
2021-12-12T07:54:27.000Z
|
import itertools
import math
import simulate
import harvesting
import plot
from decimal import setcontext, ExtendedContext
# Don't raise exception when we divide by zero
#setcontext(ExtendedContext)
#getcontext().prec = 5
def compare_prime_vs_rebalancing(series, years=30, title=''):
(r1, r2) = itertools.tee(series)
x = simulate.withdrawals(r1, years=years)
y = simulate.withdrawals(r2, years=years, harvesting=harvesting.N_60_RebalanceHarvesting)
s1 = [n.withdraw_r for n in x]
s2 = [n.withdraw_r for n in y]
ceiling = max(max(s1), max(s2))
if ceiling < 100000:
ceiling = int(math.ceil(ceiling / 10000) * 10000)
else:
ceiling = int(math.ceil(ceiling / 100000) * 100000)
plot.plot_two(s1, s2, s1_title='Prime Harvesting', s2_title='Annual Rebalancing',
y_lim=[0,ceiling],
x_label='Year of Retirement', title=title)
| 31.689655
| 93
| 0.677911
| 124
| 919
| 4.927419
| 0.5
| 0.081833
| 0.032733
| 0.042553
| 0.134206
| 0.052373
| 0
| 0
| 0
| 0
| 0
| 0.063624
| 0.213275
| 919
| 28
| 94
| 32.821429
| 0.781466
| 0.100109
| 0
| 0
| 0
| 0
| 0.063183
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.05
| false
| 0
| 0.3
| 0
| 0.35
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
554ef62e12daf1b4dd0a910c08086098d9a39602
| 769
|
py
|
Python
|
tests/hdx/scraper/test_utils.py
|
mcarans/hdx-python-scraper
|
ce17c672591979d4601bd125a38b86ea81a9f3c4
|
[
"MIT"
] | null | null | null |
tests/hdx/scraper/test_utils.py
|
mcarans/hdx-python-scraper
|
ce17c672591979d4601bd125a38b86ea81a9f3c4
|
[
"MIT"
] | null | null | null |
tests/hdx/scraper/test_utils.py
|
mcarans/hdx-python-scraper
|
ce17c672591979d4601bd125a38b86ea81a9f3c4
|
[
"MIT"
] | null | null | null |
from datetime import datetime
from hdx.data.dataset import Dataset
from hdx.scraper.utilities import (
get_isodate_from_dataset_date,
string_params_to_dict,
)
class TestUtils:
def test_string_params_to_dict(self):
result = string_params_to_dict("a: 123, b: 345")
assert result == {"a": "123", "b": "345"}
result = string_params_to_dict("a:123,b:345")
assert result == {"a": "123", "b": "345"}
def test_get_isodate_from_dataset_date(self, configuration):
dataset = Dataset(
{
"dataset_date": "[2022-01-11T02:24:08.241 TO 2022-01-11T02:24:08.241]"
}
)
result = get_isodate_from_dataset_date(dataset, datetime.now())
assert result == "2022-01-11"
| 29.576923
| 86
| 0.629389
| 103
| 769
| 4.436893
| 0.339806
| 0.09628
| 0.122538
| 0.157549
| 0.47046
| 0.306346
| 0.227571
| 0.227571
| 0.227571
| 0.227571
| 0
| 0.113597
| 0.244473
| 769
| 25
| 87
| 30.76
| 0.672978
| 0
| 0
| 0.2
| 0
| 0.05
| 0.149545
| 0.062419
| 0
| 0
| 0
| 0
| 0.15
| 1
| 0.1
| false
| 0
| 0.15
| 0
| 0.3
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
554fb560fa2735d2073c8f53fb708577f43575e0
| 3,796
|
py
|
Python
|
store/models.py
|
Dokeey/Buy-Sell
|
9d70eb8649d79962657cc4be896e437908de537b
|
[
"MIT"
] | 7
|
2019-03-25T14:43:41.000Z
|
2021-09-16T01:44:41.000Z
|
store/models.py
|
Dokeey/Buy-Sell
|
9d70eb8649d79962657cc4be896e437908de537b
|
[
"MIT"
] | 80
|
2019-03-25T09:25:00.000Z
|
2020-02-09T01:01:09.000Z
|
store/models.py
|
Dokeey/Buy-Sell
|
9d70eb8649d79962657cc4be896e437908de537b
|
[
"MIT"
] | 4
|
2019-03-25T13:58:07.000Z
|
2021-11-26T09:12:32.000Z
|
from random import randrange
from django.conf import settings
from django.contrib.contenttypes.fields import GenericRelation
from django.db import models
from hitcount.models import HitCountMixin, HitCount
from imagekit.models import ProcessedImageField
from pilkit.processors import ResizeToFill
from django_cleanup import cleanup
from store.fields import DefaultStaticProcessedImageField
def get_random():
rand = randrange(1,10)
return '/static/profile/{}.png'.format(rand)
# @cleanup.ignore
class StoreProfile(models.Model, HitCountMixin):
user = models.OneToOneField(settings.AUTH_USER_MODEL, verbose_name="유저", on_delete=models.CASCADE)
name = models.CharField(max_length=20, verbose_name="가게명", unique=True)
photo = DefaultStaticProcessedImageField(
verbose_name="가게 사진",
null=True,
upload_to='profile/storephoto',
processors=[ResizeToFill(200, 200)],
format='JPEG',
options={'quality': 60}
)
comment = models.TextField(max_length=200, blank=True, verbose_name="소개", default="반갑습니다.")
created_at = models.DateTimeField(verbose_name="생성일", auto_now_add=True)
hit_count_generic = GenericRelation(HitCount, object_id_field='object_pk',
related_query_name='hit_count_generic_relation')
def __str__(self):
return self.name
class Meta:
verbose_name = "가게"
verbose_name_plural = "가게"
ordering = ['-id']
from django.contrib.auth import get_user_model
User = get_user_model()
try:
user_pk = User.objects.get(username='deleteuser').id
except:
user_pk = None
class QuestionComment(models.Model):
store_profile = models.ForeignKey(StoreProfile, verbose_name="가게", on_delete=models.CASCADE)
if user_pk:
author = models.ForeignKey(settings.AUTH_USER_MODEL, verbose_name="작성자", on_delete=models.SET_DEFAULT, default=user_pk)
else:
author = models.ForeignKey(settings.AUTH_USER_MODEL, verbose_name="작성자", on_delete=models.CASCADE)
comment = models.TextField(verbose_name="문의글", max_length=1000)
created_at = models.DateTimeField(verbose_name="작성일", auto_now_add=True)
updated_at = models.DateTimeField(verbose_name="최근 업데이트", auto_now=True)
parent = models.ForeignKey('self', verbose_name="상위 댓글", null=True, blank=True, related_name='replies', on_delete=models.CASCADE)
def __str__(self):
return self.author.storeprofile.name
class Meta:
ordering = ('-created_at',)
verbose_name = "가게 문의"
verbose_name_plural = "가게 문의"
from trade.models import Item
class StoreGrade(models.Model):
store_profile = models.ForeignKey(StoreProfile, verbose_name="가게", on_delete=models.CASCADE)
if user_pk:
author = models.ForeignKey(settings.AUTH_USER_MODEL, verbose_name="작성자", on_delete=models.SET_DEFAULT ,default=user_pk)
else:
author = models.ForeignKey(settings.AUTH_USER_MODEL, verbose_name="작성자", on_delete=models.CASCADE)
store_item = models.ForeignKey(Item, verbose_name="구매한 물품", on_delete=models.SET_NULL, null=True)
grade_comment = models.TextField(verbose_name="물품평", max_length=250)
rating = models.PositiveIntegerField(
verbose_name="점수",
choices=(
(1, '★☆☆☆☆'),
(2, '★★☆☆☆'),
(3, '★★★☆☆'),
(4, '★★★★☆'),
(5, '★★★★★')
),
default=0,
db_index=True
)
created_at = models.DateTimeField(verbose_name="작성일", auto_now_add=True)
updated_at = models.DateTimeField(verbose_name="최근 업데이트", auto_now=True)
def __str__(self):
return self.author.storeprofile.name
class Meta:
ordering = ('-created_at',)
verbose_name = "가게 평점"
verbose_name_plural = "가게 평점"
| 36.5
| 133
| 0.692308
| 469
| 3,796
| 5.420043
| 0.30064
| 0.11251
| 0.049567
| 0.049567
| 0.425256
| 0.391424
| 0.363493
| 0.363493
| 0.363493
| 0.363493
| 0
| 0.00944
| 0.190727
| 3,796
| 104
| 134
| 36.5
| 0.809896
| 0.003952
| 0
| 0.285714
| 0
| 0
| 0.067989
| 0.012698
| 0
| 0
| 0
| 0
| 0
| 1
| 0.047619
| false
| 0
| 0.130952
| 0.035714
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5555b6c3e07de5a90e04d4e0ebe99f3c40e0594c
| 1,587
|
py
|
Python
|
experts/siamdw.py
|
songheony/AAA-journal
|
4306fac0afe567269b8d2f1cbef2a1c398fdde82
|
[
"MIT"
] | 9
|
2020-07-07T09:03:07.000Z
|
2021-04-22T03:38:49.000Z
|
experts/siamdw.py
|
songheony/AAA-journal
|
4306fac0afe567269b8d2f1cbef2a1c398fdde82
|
[
"MIT"
] | null | null | null |
experts/siamdw.py
|
songheony/AAA-journal
|
4306fac0afe567269b8d2f1cbef2a1c398fdde82
|
[
"MIT"
] | 1
|
2021-07-31T19:26:52.000Z
|
2021-07-31T19:26:52.000Z
|
import sys
import numpy as np
import cv2
from easydict import EasyDict as edict
from base_tracker import BaseTracker
import path_config
sys.path.append("external/SiamDW/lib")
from tracker.siamrpn import SiamRPN
import models.models as models
from utils.utils import load_pretrain
class SiamDW(BaseTracker):
def __init__(self):
super().__init__("SiamDW")
net_file = path_config.SIAMDW_MODEL
info = edict()
info.arch = "SiamRPNRes22"
info.dataset = "OTB2015"
info.epoch_test = False
info.cls_type = "thinner"
self.tracker = SiamRPN(info)
self.net = models.__dict__[info.arch](anchors_nums=5, cls_type=info.cls_type)
self.net = load_pretrain(self.net, net_file)
self.net.eval()
self.net = self.net.cuda()
def initialize(self, image_file, box):
image = cv2.imread(image_file)
if len(image.shape) == 2:
image = cv2.cvtColor(image, cv2.COLOR_GRAY2BGR)
center = np.array([box[0] + (box[2] - 1) / 2, box[1] + (box[3] - 1) / 2])
size = np.array([box[2], box[3]])
self.state = self.tracker.init(image, center, size, self.net)
def track(self, image_file):
image = cv2.imread(image_file)
if len(image.shape) == 2:
image = cv2.cvtColor(image, cv2.COLOR_GRAY2BGR)
self.state = self.tracker.track(self.state, image)
center = self.state["target_pos"]
size = self.state["target_sz"]
bbox = (center[0] - size[0] / 2, center[1] - size[1] / 2, size[0], size[1])
return bbox
| 34.5
| 85
| 0.628859
| 221
| 1,587
| 4.366516
| 0.325792
| 0.050777
| 0.022798
| 0.039378
| 0.157513
| 0.157513
| 0.157513
| 0.157513
| 0.157513
| 0.157513
| 0
| 0.02995
| 0.242596
| 1,587
| 45
| 86
| 35.266667
| 0.772879
| 0
| 0
| 0.15
| 0
| 0
| 0.044108
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.075
| false
| 0
| 0.225
| 0
| 0.35
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
555695e92a72c35957e937841df7b620e7484601
| 3,346
|
py
|
Python
|
serpent/machine_learning/reinforcement_learning/rainbow_dqn/dqn.py
|
DylanSpicker/SerpentAI
|
c48c4b072e0d1084a52eac569ad1c7fa02ac7348
|
[
"MIT"
] | null | null | null |
serpent/machine_learning/reinforcement_learning/rainbow_dqn/dqn.py
|
DylanSpicker/SerpentAI
|
c48c4b072e0d1084a52eac569ad1c7fa02ac7348
|
[
"MIT"
] | null | null | null |
serpent/machine_learning/reinforcement_learning/rainbow_dqn/dqn.py
|
DylanSpicker/SerpentAI
|
c48c4b072e0d1084a52eac569ad1c7fa02ac7348
|
[
"MIT"
] | null | null | null |
import math
import torch
class DQN(torch.nn.Module):
def __init__(self, action_space, history=4, hidden_size=512, noisy_std=0.1, quantile=True):
super().__init__()
self.atoms = 200 if quantile else 51
self.action_space = action_space
self.quantile = quantile
self.conv1 = torch.nn.Conv2d(history, 32, 8, stride=4, padding=1)
self.conv2 = torch.nn.Conv2d(32, 64, 4, stride=2)
self.conv3 = torch.nn.Conv2d(64, 64, 3)
self.fc_h_v = NoisyLinear(5184, hidden_size, std_init=noisy_std)
self.fc_h_a = NoisyLinear(5184, hidden_size, std_init=noisy_std)
self.fc_z_v = NoisyLinear(hidden_size, self.atoms, std_init=noisy_std)
self.fc_z_a = NoisyLinear(hidden_size, action_space * self.atoms, std_init=noisy_std)
def forward(self, x):
x = torch.nn.functional.relu(self.conv1(x))
x = torch.nn.functional.relu(self.conv2(x))
x = torch.nn.functional.relu(self.conv3(x))
x = x.view(-1, 5184)
v = self.fc_z_v(torch.nn.functional.relu(self.fc_h_v(x)))
a = self.fc_z_a(torch.nn.functional.relu(self.fc_h_a(x)))
v, a = v.view(-1, 1, self.atoms), a.view(-1, self.action_space, self.atoms)
q = v + a - a.mean(1, keepdim=True)
if not self.quantile:
q = torch.nn.functional.softmax(q, dim=2)
return q
def reset_noise(self):
for name, module in self.named_children():
if "fc" in name:
module.reset_noise()
class NoisyLinear(torch.nn.Module):
def __init__(self, in_features, out_features, std_init=0.4):
super().__init__()
self.in_features = in_features
self.out_features = out_features
self.std_init = std_init
self.weight_mu = torch.nn.Parameter(torch.empty(out_features, in_features))
self.weight_sigma = torch.nn.Parameter(torch.empty(out_features, in_features))
self.register_buffer("weight_epsilon", torch.empty(out_features, in_features))
self.bias_mu = torch.nn.Parameter(torch.empty(out_features))
self.bias_sigma = torch.nn.Parameter(torch.empty(out_features))
self.register_buffer("bias_epsilon", torch.empty(out_features))
self.reset_parameters()
self.reset_noise()
def reset_parameters(self):
mu_range = 1 / math.sqrt(self.in_features)
self.weight_mu.data.uniform_(-mu_range, mu_range)
self.weight_sigma.data.fill_(self.std_init / math.sqrt(self.in_features))
self.bias_mu.data.uniform_(-mu_range, mu_range)
self.bias_sigma.data.fill_(self.std_init / math.sqrt(self.out_features))
def reset_noise(self):
epsilon_in = self._scale_noise(self.in_features)
epsilon_out = self._scale_noise(self.out_features)
self.weight_epsilon.copy_(epsilon_out.ger(epsilon_in))
self.bias_epsilon.copy_(epsilon_out)
def forward(self, input):
if self.training:
return torch.nn.functional.linear(input, self.weight_mu + self.weight_sigma * self.weight_epsilon, self.bias_mu + self.bias_sigma * self.bias_epsilon)
else:
return torch.nn.functional.linear(input, self.weight_mu, self.bias_mu)
def _scale_noise(self, size):
x = torch.randn(size)
return x.sign().mul_(x.abs().sqrt_())
| 35.978495
| 162
| 0.661686
| 495
| 3,346
| 4.212121
| 0.187879
| 0.057074
| 0.065228
| 0.060432
| 0.42542
| 0.402878
| 0.344844
| 0.251319
| 0.17554
| 0.141007
| 0
| 0.021723
| 0.21578
| 3,346
| 92
| 163
| 36.369565
| 0.772866
| 0
| 0
| 0.0625
| 0
| 0
| 0.008368
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.125
| false
| 0
| 0.03125
| 0
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
55572056018bf803954acf22ae96913928e3246d
| 1,479
|
py
|
Python
|
src/modules/base/url_helper.py
|
yakii9/artificial-programmer
|
a6c1a5a47155ee4d24be729a0fa8c86ca40f85d1
|
[
"MIT"
] | 1
|
2018-10-21T22:46:27.000Z
|
2018-10-21T22:46:27.000Z
|
src/modules/base/url_helper.py
|
yakii9/artificial-programmer
|
a6c1a5a47155ee4d24be729a0fa8c86ca40f85d1
|
[
"MIT"
] | 1
|
2018-10-29T04:34:13.000Z
|
2018-11-01T14:32:23.000Z
|
src/modules/base/url_helper.py
|
yakii9/artificial-programmer
|
a6c1a5a47155ee4d24be729a0fa8c86ca40f85d1
|
[
"MIT"
] | 1
|
2018-10-21T22:46:48.000Z
|
2018-10-21T22:46:48.000Z
|
import urllib.request
from html.parser import HTMLParser
from urllib import parse
from modules.base.handle_timeout import timeout
class ElementsFinder(HTMLParser):
def __init__(self, base_url, page_url):
super().__init__()
self.base_url = base_url
self.page_url = page_url
self.links = set()
# When we call HTMLParser feed() this function is called when it encounters an opening tag <a>
def handle_starttag(self, tag, attrs):
if tag == 'a':
for (attribute, value) in attrs:
if attribute == 'href':
url = parse.urljoin(self.base_url, value)
self.links.add(url)
def page_links(self):
return self.links
def error(self, message):
pass
class UrlHelper:
def __init__(self):
pass
@staticmethod
@timeout(6)
def get_html(url):
try:
with urllib.request.urlopen(url) as response:
html = response.read()
return html
except Exception as e:
print(e)
def get_domain_name(self, url):
try:
results = self.get_sub_domain_name(url).split('.')
return results[-2] + '.' + results[-1]
except:
return ''
# Get sub domain name (name.example.com)
@staticmethod
def get_sub_domain_name(url):
try:
return parse.urlparse(url).netloc
except:
return ''
| 25.067797
| 98
| 0.577417
| 176
| 1,479
| 4.676136
| 0.426136
| 0.034022
| 0.040097
| 0.058323
| 0.046173
| 0
| 0
| 0
| 0
| 0
| 0
| 0.003027
| 0.329953
| 1,479
| 58
| 99
| 25.5
| 0.827447
| 0.088573
| 0
| 0.25
| 0
| 0
| 0.005204
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.181818
| false
| 0.045455
| 0.090909
| 0.022727
| 0.454545
| 0.022727
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5557b931f8213b68a545c1e272d7bfa56dc0f55f
| 7,460
|
py
|
Python
|
trainer/trainer.py
|
iprapas/dl-continuous-deployment
|
bcee578a8ae3aa74e4ede00d125cb456f6a3010e
|
[
"MIT"
] | null | null | null |
trainer/trainer.py
|
iprapas/dl-continuous-deployment
|
bcee578a8ae3aa74e4ede00d125cb456f6a3010e
|
[
"MIT"
] | null | null | null |
trainer/trainer.py
|
iprapas/dl-continuous-deployment
|
bcee578a8ae3aa74e4ede00d125cb456f6a3010e
|
[
"MIT"
] | null | null | null |
import numpy as np
import torch
from torchvision.utils import make_grid
from base import BaseTrainer
from utils import inf_loop, MetricTracker, confusion_matrix_image
import copy
import sys
import time
from model.metric import Accuracy, TopkAccuracy
def get_top_k(x, ratio):
"""it will sample the top 1-ratio of the samples."""
x_data = x.view(-1)
x_len = x_data.nelement()
top_k = max(1, int(x_len * (1 - ratio)))
# get indices and the corresponding values
if top_k == 1:
_, selected_indices = torch.max(x_data.abs(), dim=0, keepdim=True)
else:
_, selected_indices = torch.topk(
x_data.abs(), top_k, largest=True, sorted=False
)
return x_data[selected_indices], selected_indices
def get_mask(flatten_arr, indices):
mask = torch.zeros_like(flatten_arr)
mask[indices] = 1
mask = mask.bool()
return mask.float(), (~mask).float()
class Trainer(BaseTrainer):
"""
Trainer class
"""
def __init__(self, model, criterion, metric_ftns, optimizer, config, data_loader,
valid_data_loader=None, lr_scheduler=None, len_epoch=None):
super().__init__(model, criterion, metric_ftns, optimizer, config)
self.config = config
self.data_loader = data_loader
if len_epoch is None:
# epoch-based training
self.len_epoch = len(self.data_loader)
else:
# iteration-based training
self.data_loader = inf_loop(data_loader)
self.len_epoch = len_epoch
self.valid_data_loader = valid_data_loader
self.do_validation = self.valid_data_loader is not None
self.lr_scheduler = lr_scheduler
self.log_step = int(np.sqrt(data_loader.batch_size))
self.deployed_model = copy.deepcopy(self.model)
self.init_model = copy.deepcopy(self.model)
self.init_model.eval()
self.deployed_model.eval()
self.accuracy = Accuracy()
self.topkaccuracy = TopkAccuracy()
self.train_metrics = MetricTracker('loss', *[m.__name__ for m in self.metric_ftns], writer=self.writer)
self.valid_metrics = MetricTracker('loss', *[m.__name__ for m in self.metric_ftns], writer=self.writer)
def _train_epoch(self, epoch):
"""
Training logic for an epoch
:param epoch: Integer, current training epoch.
:return: A log that contains average loss and metric in this epoch.
"""
start = time.time()
self.model.train()
total_batch = 0
self.train_metrics.reset()
training_time = 0
for batch_idx, (data, target) in enumerate(self.data_loader):
data, target = data.to(self.device), target.to(self.device)
batch_start = time.time()
self.optimizer.zero_grad()
output = self.model(data)
loss = self.criterion(output, target)
loss.backward()
self.optimizer.step()
training_time += time.time() - batch_start
self.writer.set_step((epoch - 1) * self.len_epoch + batch_idx)
self.train_metrics.update('loss', loss.item())
for met in self.metric_ftns:
self.train_metrics.update(met.__name__, met(output, target))
total_batch += time.time() - batch_start
if batch_idx % self.log_step == 0:
self.logger.info('Train Epoch: {} {} Loss: {:.6f} Time per batch (ms) {}'.format(
epoch,
self._progress(batch_idx),
loss.item(), total_batch * 1000 / (batch_idx + 1)))
# self.writer.add_image('input', make_grid(data.cpu(), nrow=8, normalize=True))
# self.writer.add_figure('confusion_matrix', confusion_matrix_image(output, target))
# valid_log = self._valid_deployed(batch_idx)
# print logged informations to the screen
# for key, value in valid_log.items():
# self.logger.info('Valid deployed {:15s}: {}'.format(str(key), value))
if batch_idx == self.len_epoch:
break
log = self.train_metrics.result()
if self.do_validation:
val_log = self._valid_epoch(epoch)
log.update(**{'val_' + k: v for k, v in val_log.items()})
log['time (sec)'] = time.time() - start
log['training_time'] = training_time
if self.lr_scheduler is not None:
self.lr_scheduler.step()
return log
def _valid_epoch(self, epoch):
"""
Validate after training an epoch
:param epoch: Integer, current training epoch.
:return: A log that contains information about validation
"""
self.model.eval()
self.valid_metrics.reset()
avg_loss =0
correct = 0
total = 0
with torch.no_grad():
for batch_idx, (data, target) in enumerate(self.valid_data_loader):
data, target = data.to(self.device), target.to(self.device)
output = self.model(data)
loss = self.criterion(output, target)
avg_loss += loss.item()/len(self.valid_data_loader)
pred = torch.argmax(output, dim=1)
correct += torch.sum(pred == target).item()
total += len(target)
self.writer.set_step(epoch, 'valid')
self.writer.add_scalar('loss', avg_loss)
self.writer.add_scalar('accuracy', correct/total)
# self.writer.add_image('input', make_grid(data.cpu(), nrow=8, normalize=True))
# self.writer.add_figure('confusion_matrix', confusion_matrix_image(output, target))
# add histogram of model parameters to the tensorboard
# for name, p in self.model.named_parameters():
# self.writer.add_histogram(name, p, bins='auto')
return self.valid_metrics.result()
def _valid_deployed(self, batch):
"""
Validate after training a batch
:param epoch: Integer, current training epoch.
:return: A log that contains information about validation
"""
self.deployed_model.eval()
self.valid_metrics.reset()
with torch.no_grad():
for batch_idx, (data, target) in enumerate(self.valid_data_loader):
data, target = data.to(self.device), target.to(self.device)
output = self.model(data)
loss = self.criterion(output, target)
self.writer.set_step((batch - 1) * len(self.valid_data_loader) + batch_idx*len(target), 'valid')
self.valid_metrics.update('loss', loss.item())
for met in self.metric_ftns:
self.valid_metrics.update(met.__name__, met)
# self.writer.add_image('input', make_grid(data.cpu(), nrow=8, normalize=True))
# self.writer.add_figure('confusion_matrix', confusion_matrix_image(output, target))
return self.valid_metrics.result()
def _progress(self, batch_idx):
base = '[{}/{} ({:.0f}%)]'
if hasattr(self.data_loader, 'n_samples'):
current = batch_idx * self.data_loader.batch_size
total = self.data_loader.n_samples
else:
current = batch_idx
total = self.len_epoch
return base.format(current, total, 100.0 * current / total)
| 38.061224
| 112
| 0.604826
| 922
| 7,460
| 4.684382
| 0.199566
| 0.043992
| 0.02709
| 0.026395
| 0.441769
| 0.376013
| 0.318592
| 0.318592
| 0.293355
| 0.282241
| 0
| 0.005999
| 0.284987
| 7,460
| 195
| 113
| 38.25641
| 0.803712
| 0.187802
| 0
| 0.192
| 0
| 0
| 0.024551
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.056
| false
| 0
| 0.072
| 0
| 0.184
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
555e8fe1a5ae17b4fbc51d4ad0090a37d1dc68ba
| 3,520
|
py
|
Python
|
pycba/utils.py
|
mayermelhem/pycba
|
8f6a0da12629bac2ad1c6c8e113357f96931ef17
|
[
"Apache-2.0"
] | 10
|
2022-02-07T01:16:02.000Z
|
2022-03-12T07:56:43.000Z
|
pycba/utils.py
|
mayermelhem/pycba
|
8f6a0da12629bac2ad1c6c8e113357f96931ef17
|
[
"Apache-2.0"
] | 5
|
2022-02-08T07:42:53.000Z
|
2022-03-31T21:33:42.000Z
|
pycba/utils.py
|
mayermelhem/pycba
|
8f6a0da12629bac2ad1c6c8e113357f96931ef17
|
[
"Apache-2.0"
] | 1
|
2022-02-12T04:33:38.000Z
|
2022-02-12T04:33:38.000Z
|
"""
PyCBA - Utility functions for interacting with PyCBA
"""
import re
import numpy as np
from typing import Tuple
def parse_beam_string(
beam_string: str,
) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
"""
This function parses a beam descriptor string and returns CBA input vectors.
The beam descriptor string uses a specific format: spans lengths in float are
separated by single characters describing the terminals of that beam element.
The terminal characters are:
- P - pinned (effectively the same as roller, but retained for visualisations)
- R - roller (can occur at any terminal)
- E - encastre (i.e. fully-fixed) - can only occur at beam extremity
- F - free (e.g. cantilever end) - can only occur at beam extremity
- H - hinge - can only occur internally in the beam
Examples of beam strings are:
- *P40R20R* - 2-span, 60 m long, with pinned-roller-roller supports
- *E20H30R10F* - 3-span, 60 m long, encastre-hinge-roller-free
**Complex beam configurations may not be describable using the beam string.**
The function returns a tuple containing the necessary beam inputs for
:class:`pycba.analysis.BeamAnalysis`: `(L, EI, R, eType)`
Parameters
----------
beam_string :
The string to be parsed.
Raises
------
ValueError
When the beam string does not meet basic structural requirements.
Returns
-------
(L, EI, R, eType) : tuple(np.ndarray, np.ndarray, np.ndarray, np.ndarray)
In which:
- `L` is a vector of span lengths.
- `EI` is A vector of member flexural rigidities (prismatic).
- `R` is a vector describing the support conditions at each member end.
- `eType` is a vector of the member types.
Example
-------
This example creates a four-span beam with fixed extreme supports and
an internal hinge. ::
beam_str = "E30R30H30R30E"
(L, EI, R, eType) = cba.parse_beam_string(beam_str)
ils = cba.InfluenceLines(L, EI, R, eType)
ils.create_ils(step=0.1)
ils.plot_il(0.0, "R")
"""
beam_string = beam_string.lower()
terminals = re.findall(r"[efhpr]", beam_string)
spans_str = [m.end() for m in re.finditer(r"[efhpr]", beam_string)]
if len(terminals) < 2:
raise ValueError("At least two terminals must be defined")
if terminals[0] == "h" or terminals[-1] == "h":
raise ValueError("Cannot have a hinge at an extremity")
if len(terminals) > 2:
if any(t == "f" or t == "e" for t in terminals[1:-1]):
raise ValueError("Do not define internal free or encastre terminals")
# Get and check the span lengths
L = [
float(beam_string[spans_str[i] : spans_str[i + 1] - 1])
for i in range(len(spans_str) - 1)
]
if len(terminals) - 1 != len(L):
raise ValueError("Inconsistent terminal count and span count")
EI = 30 * 1e10 * np.ones(len(L)) * 1e-6 # kNm2 - arbitrary value
R = []
eType = [1 for l in L]
for i, t in enumerate(terminals):
if t == "p" or t == "r": # pin or roller
R.append([-1, 0])
elif t == "e": # encastre
R.append([-1, -1])
elif t == "f": # free
R.append([0, 0])
elif t == "h": # hinge
R.append([0, 0])
eType[i - 1] = 2
R = [elem for sublist in R for elem in sublist]
return (L, EI, R, eType)
| 34.174757
| 86
| 0.605682
| 503
| 3,520
| 4.200795
| 0.353877
| 0.052059
| 0.031235
| 0.051112
| 0.064363
| 0.064363
| 0.038807
| 0.038807
| 0.038807
| 0
| 0
| 0.02253
| 0.28125
| 3,520
| 102
| 87
| 34.509804
| 0.812648
| 0.546023
| 0
| 0.054054
| 0
| 0
| 0.132343
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.027027
| false
| 0
| 0.081081
| 0
| 0.135135
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
555f6946d9a27cac92dae44e27d4220ecfaf6269
| 10,363
|
py
|
Python
|
models/dcase2020_fuss_baseline/evaluate_lib.py
|
marciopuga/sound-separation
|
0b23ae22123b041b9538295f32a92151cb77bff9
|
[
"Apache-2.0"
] | 412
|
2020-03-03T05:55:53.000Z
|
2022-03-29T20:49:11.000Z
|
models/dcase2020_fuss_baseline/evaluate_lib.py
|
marciopuga/sound-separation
|
0b23ae22123b041b9538295f32a92151cb77bff9
|
[
"Apache-2.0"
] | 12
|
2020-04-09T17:47:01.000Z
|
2022-03-22T06:07:04.000Z
|
models/dcase2020_fuss_baseline/evaluate_lib.py
|
marciopuga/sound-separation
|
0b23ae22123b041b9538295f32a92151cb77bff9
|
[
"Apache-2.0"
] | 89
|
2020-03-06T08:26:44.000Z
|
2022-03-31T11:36:23.000Z
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Evaluate separated audio from a DCASE 2020 task 4 separation model."""
import os
import numpy as np
import pandas as pd
import tensorflow.compat.v1 as tf
import inference
from train import data_io
from train import metrics
from train import permutation_invariant
def _weights_for_nonzero_refs(source_waveforms):
"""Return shape (source,) weights for signals that are nonzero."""
source_norms = tf.sqrt(tf.reduce_mean(tf.square(source_waveforms), axis=-1))
return tf.greater(source_norms, 1e-8)
def _weights_for_active_seps(power_sources, power_separated):
"""Return (source,) weights for active separated signals."""
min_power = tf.reduce_min(power_sources, axis=-1, keepdims=True)
return tf.greater(power_separated, 0.01 * min_power)
def compute_metrics(source_waveforms, separated_waveforms, mixture_waveform):
"""Permutation-invariant SI-SNR, powers, and under/equal/over-separation."""
# Align separated sources to reference sources.
perm_inv_loss = permutation_invariant.wrap(
lambda tar, est: -metrics.signal_to_noise_ratio_gain_invariant(est, tar))
_, separated_waveforms = perm_inv_loss(source_waveforms[tf.newaxis],
separated_waveforms[tf.newaxis])
separated_waveforms = separated_waveforms[0] # Remove batch axis.
# Compute separated and source powers.
power_separated = tf.reduce_mean(separated_waveforms ** 2, axis=-1)
power_sources = tf.reduce_mean(source_waveforms ** 2, axis=-1)
# Compute weights for active (separated, source) pairs where source is nonzero
# and separated power is above threshold of quietest source power - 20 dB.
weights_active_refs = _weights_for_nonzero_refs(source_waveforms)
weights_active_seps = _weights_for_active_seps(
tf.boolean_mask(power_sources, weights_active_refs), power_separated)
weights_active_pairs = tf.logical_and(weights_active_refs,
weights_active_seps)
# Compute SI-SNR.
sisnr_separated = metrics.signal_to_noise_ratio_gain_invariant(
separated_waveforms, source_waveforms)
num_active_refs = tf.reduce_sum(tf.cast(weights_active_refs, tf.int32))
num_active_seps = tf.reduce_sum(tf.cast(weights_active_seps, tf.int32))
num_active_pairs = tf.reduce_sum(tf.cast(weights_active_pairs, tf.int32))
sisnr_mixture = metrics.signal_to_noise_ratio_gain_invariant(
tf.tile(mixture_waveform[tf.newaxis], (source_waveforms.shape[0], 1)),
source_waveforms)
# Compute under/equal/over separation.
under_separation = tf.cast(tf.less(num_active_seps, num_active_refs),
tf.float32)
equal_separation = tf.cast(tf.equal(num_active_seps, num_active_refs),
tf.float32)
over_separation = tf.cast(tf.greater(num_active_seps, num_active_refs),
tf.float32)
return {'sisnr_separated': sisnr_separated,
'sisnr_mixture': sisnr_mixture,
'sisnr_improvement': sisnr_separated - sisnr_mixture,
'power_separated': power_separated,
'power_sources': power_sources,
'under_separation': under_separation,
'equal_separation': equal_separation,
'over_separation': over_separation,
'weights_active_refs': weights_active_refs,
'weights_active_seps': weights_active_seps,
'weights_active_pairs': weights_active_pairs,
'num_active_refs': num_active_refs,
'num_active_seps': num_active_seps,
'num_active_pairs': num_active_pairs}
def _report_score_stats(metric_per_source_count, label='', counts=None):
"""Report mean and std dev for specified counts."""
values_all = []
if counts is None:
counts = metric_per_source_count.keys()
for count in counts:
values = metric_per_source_count[count]
values_all.extend(list(values))
return '%s for count(s) %s = %.1f +/- %.1f dB' % (
label, counts, np.mean(values_all), np.std(values_all))
def evaluate(checkpoint_path, metagraph_path, data_list_path, output_path):
"""Evaluate a model on FUSS data."""
model = inference.SeparationModel(checkpoint_path, metagraph_path)
file_list = data_io.read_lines_from_file(data_list_path, skip_fields=1)
with model.graph.as_default():
dataset = data_io.wavs_to_dataset(file_list, batch_size=1,
num_samples=160000,
repeat=False)
# Strip batch and mic dimensions.
dataset['receiver_audio'] = dataset['receiver_audio'][0, 0]
dataset['source_images'] = dataset['source_images'][0, :, 0]
# Separate with a trained model.
i = 1
max_count = 4
dict_per_source_count = lambda: {c: [] for c in range(1, max_count + 1)}
sisnr_per_source_count = dict_per_source_count()
sisnri_per_source_count = dict_per_source_count()
under_seps = []
equal_seps = []
over_seps = []
df = None
while True:
try:
waveforms = model.sess.run(dataset)
except tf.errors.OutOfRangeError:
break
separated_waveforms = model.separate(waveforms['receiver_audio'])
source_waveforms = waveforms['source_images']
if np.allclose(source_waveforms, 0):
print('WARNING: all-zeros source_waveforms tensor encountered.'
'Skiping this example...')
continue
metrics_dict = compute_metrics(source_waveforms, separated_waveforms,
waveforms['receiver_audio'])
metrics_dict = {k: v.numpy() for k, v in metrics_dict.items()}
sisnr_sep = metrics_dict['sisnr_separated']
sisnr_mix = metrics_dict['sisnr_mixture']
sisnr_imp = metrics_dict['sisnr_improvement']
weights_active_pairs = metrics_dict['weights_active_pairs']
# Create and initialize the dataframe if it doesn't exist.
if df is None:
# Need to create the dataframe.
columns = []
for metric_name, metric_value in metrics_dict.items():
if metric_value.shape:
# Per-source metric.
for i_src in range(1, max_count + 1):
columns.append(metric_name + '_source%d' % i_src)
else:
# Scalar metric.
columns.append(metric_name)
columns.sort()
df = pd.DataFrame(columns=columns)
if output_path.endswith('.csv'):
csv_path = output_path
else:
csv_path = os.path.join(output_path, 'scores.csv')
# Update dataframe with new metrics.
row_dict = {}
for metric_name, metric_value in metrics_dict.items():
if metric_value.shape:
# Per-source metric.
for i_src in range(1, max_count + 1):
row_dict[metric_name + '_source%d' % i_src] = metric_value[i_src - 1]
else:
# Scalar metric.
row_dict[metric_name] = metric_value
new_row = pd.Series(row_dict)
df = df.append(new_row, ignore_index=True)
# Store metrics per source count and report results so far.
under_seps.append(metrics_dict['under_separation'])
equal_seps.append(metrics_dict['equal_separation'])
over_seps.append(metrics_dict['over_separation'])
sisnr_per_source_count[metrics_dict['num_active_refs']].extend(
sisnr_sep[weights_active_pairs].tolist())
sisnri_per_source_count[metrics_dict['num_active_refs']].extend(
sisnr_imp[weights_active_pairs].tolist())
print('Example %d: SI-SNR sep = %.1f dB, SI-SNR mix = %.1f dB, '
'SI-SNR imp = %.1f dB, ref count = %d, sep count = %d' % (
i, np.mean(sisnr_sep), np.mean(sisnr_mix),
np.mean(sisnr_sep - sisnr_mix), metrics_dict['num_active_refs'],
metrics_dict['num_active_seps']))
if not i % 20:
# Report mean statistics and save csv every so often.
lines = [
'Metrics after %d examples:' % i,
_report_score_stats(sisnr_per_source_count, 'SI-SNR',
counts=[1]),
_report_score_stats(sisnri_per_source_count, 'SI-SNRi',
counts=[2]),
_report_score_stats(sisnri_per_source_count, 'SI-SNRi',
counts=[3]),
_report_score_stats(sisnri_per_source_count, 'SI-SNRi',
counts=[4]),
_report_score_stats(sisnri_per_source_count, 'SI-SNRi',
counts=[2, 3, 4]),
'Under separation: %.2f' % np.mean(under_seps),
'Equal separation: %.2f' % np.mean(equal_seps),
'Over separation: %.2f' % np.mean(over_seps),
]
print('')
for line in lines:
print(line)
with open(csv_path.replace('.csv', '_summary.txt'), 'w+') as f:
f.writelines([line + '\n' for line in lines])
print('\nWriting csv to %s.\n' % csv_path)
df.to_csv(csv_path)
i += 1
# Report final mean statistics.
lines = [
'Final statistics:',
_report_score_stats(sisnr_per_source_count, 'SI-SNR',
counts=[1]),
_report_score_stats(sisnri_per_source_count, 'SI-SNRi',
counts=[2]),
_report_score_stats(sisnri_per_source_count, 'SI-SNRi',
counts=[3]),
_report_score_stats(sisnri_per_source_count, 'SI-SNRi',
counts=[4]),
_report_score_stats(sisnri_per_source_count, 'SI-SNRi',
counts=[2, 3, 4]),
'Under separation: %.2f' % np.mean(under_seps),
'Equal separation: %.2f' % np.mean(equal_seps),
'Over separation: %.2f' % np.mean(over_seps),
]
print('')
for line in lines:
print(line)
with open(csv_path.replace('.csv', '_summary.txt'), 'w+') as f:
f.writelines([line + '\n' for line in lines])
# Write final csv.
print('\nWriting csv to %s.' % csv_path)
df.to_csv(csv_path)
| 41.618474
| 80
| 0.666795
| 1,365
| 10,363
| 4.770696
| 0.212454
| 0.031787
| 0.045147
| 0.030713
| 0.323864
| 0.288084
| 0.24309
| 0.195485
| 0.179361
| 0.179361
| 0
| 0.011509
| 0.228602
| 10,363
| 248
| 81
| 41.78629
| 0.803102
| 0.154009
| 0
| 0.284153
| 0
| 0.005464
| 0.122817
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.027322
| false
| 0
| 0.043716
| 0
| 0.092896
| 0.043716
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
55652d01d18ec68adf27b069baae8bf7ed3db2f4
| 1,705
|
py
|
Python
|
python/domain/compliance/model/measure.py
|
ICTU/document-as-code
|
e65fddb94513e7c2f54f248b4ce69e9e10ce42f5
|
[
"Apache-2.0"
] | 2
|
2021-01-09T17:00:51.000Z
|
2021-02-19T09:35:26.000Z
|
python/domain/compliance/model/measure.py
|
ICTU/document-as-code
|
e65fddb94513e7c2f54f248b4ce69e9e10ce42f5
|
[
"Apache-2.0"
] | null | null | null |
python/domain/compliance/model/measure.py
|
ICTU/document-as-code
|
e65fddb94513e7c2f54f248b4ce69e9e10ce42f5
|
[
"Apache-2.0"
] | 1
|
2020-02-24T15:50:05.000Z
|
2020-02-24T15:50:05.000Z
|
"""
BIO measure - defines and describes a measure for BIO compliance
"""
from domain.base import Base
class Measure(Base):
""" Measures that help to in BIO compliance. """
_explain = None
_not_applicable = None
def __init__(self, identifier, description, identifiers, url=None, done=False):
super().__init__(identifier)
self.description = description
self.identifiers = identifiers
self.url = url
self.done = done
def set_explain(self):
self.register_explain(self)
return self
def set_not_applicable(self):
self.register_not_applicable(self)
return self
# ---
@classmethod
def all_applicable_to_fragment(cls, fragment_identifier):
return [
bir_measure
for bir_measure in cls.all
for identifier in bir_measure.identifiers
if fragment_identifier.startswith(identifier)
]
# --- class property explain (rw) ---
@classmethod
def register_explain(cls, explain):
if not isinstance(explain, cls):
raise TypeError(f"explain should be {cls.__name__}, not {explain.__class__.__name__}")
cls._explain = explain
@classmethod
def explain(cls):
return cls._explain
# --- class property not_applicable (rw) ---
@classmethod
def register_not_applicable(cls, not_applicable):
if not isinstance(not_applicable, cls):
raise TypeError(f"not_applicable should be {cls.__name__}, not {not_applicable.__class__.__name__}")
cls._not_applicable = not_applicable
@classmethod
def not_applicable(cls):
return cls._not_applicable
| 26.640625
| 112
| 0.652199
| 190
| 1,705
| 5.510526
| 0.268421
| 0.161414
| 0.045845
| 0.045845
| 0.034384
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.262757
| 1,705
| 63
| 113
| 27.063492
| 0.832936
| 0.111437
| 0
| 0.175
| 0
| 0
| 0.097659
| 0.04214
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| false
| 0
| 0.025
| 0.075
| 0.425
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
556657f3480d4123e6f0535b01c6ed2f5345122d
| 615
|
py
|
Python
|
week_06/readibility.py
|
fentybit/cs50
|
a6089e8ba47d0a8990cac3e0b5b28c5f2ba9f9c3
|
[
"CNRI-Python"
] | null | null | null |
week_06/readibility.py
|
fentybit/cs50
|
a6089e8ba47d0a8990cac3e0b5b28c5f2ba9f9c3
|
[
"CNRI-Python"
] | null | null | null |
week_06/readibility.py
|
fentybit/cs50
|
a6089e8ba47d0a8990cac3e0b5b28c5f2ba9f9c3
|
[
"CNRI-Python"
] | null | null | null |
from cs50 import get_string
text = get_string("Text: ")
text_length = len(text)
letters = 0
sentences = 0
words = 1
for i in range(text_length):
if text[i].isalpha():
letters += 1
for i in range(text_length):
if ord(text[i]) == 46 or ord(text[i]) == 33 or ord(text[i]) == 63:
sentences += 1
for i in range(text_length):
if ord(text[i]) == 32:
words += 1
L = 100 * (letters / words)
S = 100 * (sentences / words)
grade = round(0.0588 * L - 0.296 * S - 15.8)
if 16 <= grade:
print("Grade 16+")
elif grade < 1:
print("Before Grade 1")
else:
print(f"Grade {grade}")
| 20.5
| 70
| 0.588618
| 103
| 615
| 3.456311
| 0.38835
| 0.070225
| 0.089888
| 0.058989
| 0.247191
| 0.247191
| 0.247191
| 0.247191
| 0.179775
| 0.179775
| 0
| 0.08658
| 0.24878
| 615
| 30
| 71
| 20.5
| 0.683983
| 0
| 0
| 0.125
| 0
| 0
| 0.068182
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.041667
| 0
| 0.041667
| 0.125
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
55686a8be609e908e7580542f40aa36255c8c155
| 12,532
|
py
|
Python
|
functions.py
|
flyingmat/pyfactorizer
|
6e607408bc21d04b09ecabfc6a579ad4058965f5
|
[
"MIT"
] | null | null | null |
functions.py
|
flyingmat/pyfactorizer
|
6e607408bc21d04b09ecabfc6a579ad4058965f5
|
[
"MIT"
] | null | null | null |
functions.py
|
flyingmat/pyfactorizer
|
6e607408bc21d04b09ecabfc6a579ad4058965f5
|
[
"MIT"
] | null | null | null |
from math import floor
remove_spaces = lambda inlst: [i for i in inlst if i != ' ']
def sf2i(inp):
if float(inp).is_integer():
return str(int(inp))
else:
return str(inp)
def fix_signs(inlst):
i = 0
while i < len(inlst):
if inlst[i] in '+-': # first sign is detected
sign = -1 if inlst[i] == '-' else 1 # sign variable assigned
while i+1 < len(inlst) and inlst[i+1] in '+-': # while more signs are present
if inlst[i+1] == '-': # invert the sign if a minus is detected
sign *= -1
del inlst[i+1] # delete each excessive sign
inlst[i] = '-' if sign == -1 else '+' # change the only sign left's value accordingly
i += 1 # keep checking for other signs
return inlst
def fix_dict(indict):
if type(indict) == dict:
return frozenset(indict.items())
else:
return indict
def get_coefficient(inlst, i):
coeff = ''
k = i - 1
while k >= 0 and inlst[k] in '1234567890.': # keep going backwards to get the full coefficient
coeff = inlst[k] + coeff
k -= 1
coeff = '1' if not coeff else coeff # if no coefficient is specified, 1 is assigned
if k >= 0 and inlst[k] == '-': # check for a minus sign
coeff = '-' + coeff
k = 0 if k < 0 else k # value correction for convert()
coeff = float(coeff)
return (coeff, k)
def get_exponent(inlst, i):
exp = ''
if i+1 < len(inlst) and inlst[i+1] == '^':
k = i + 2
while k < len(inlst) and inlst[k] in '1234567890': # keep going forward to get the full exponent
exp += inlst[k]
k += 1
else:
k = i + 1 # value correction for convert()
exp = 1 if not exp else exp # if no exponent is specified, 1 is assigned
exp = int(exp) # exponents are assumed to be positive integers
return (exp, k)
def convert(inlst):
exps = {}
i = 0
while i < len(inlst):
if inlst[i] == 'x': # if an x-term is detected
(coeff, x_start) = get_coefficient(inlst, i) # get its coefficient
(exp, x_end) = get_exponent(inlst, i) # get its exponent
if exp not in exps:
exps[exp] = coeff
else:
exps[exp] += coeff
del inlst[x_start:x_end]
i = x_start
i += 1
return exps
def solve_x0_terms(inlst):
out = 0
current_term = ''
while inlst:
item = inlst.pop(0)
if item in '#+-':
out += float(current_term) if current_term else 0
current_term = '-' if item == '-' else ''
elif item in '1234567890.':
current_term += item
out += float(current_term) if current_term else 0
return out
def divide_func(exps, div): # uses polynomial long division
newexps = {}
for current_exp in range(max(exps)-max(div), -1, -1):
if max(exps) - max(div) != current_exp: # bugfix: FOR Loop coud be changed to something more efficient (needs testing with high exponents)
continue
newexps[current_exp] = exps[max(exps)] / div[max(div)]
for exp, coeff in div.items():
m_coeff = exp + current_exp
if m_coeff not in exps:
exps[m_coeff] = 0
exps[m_coeff] -= (newexps[current_exp] * coeff)
if exps[m_coeff] == 0:
del exps[m_coeff] # deletion required because of max() in the main loop that could return a coeff with value 0
if 0 not in newexps:
newexps[0] = 0
return newexps if not exps or not exps[0] else {} # if there is a reminder, return an empty dict; could be changed to return reminder
def n_factors(n):
if type(n) == float and not n.is_integer():
raise StopIteration
else:
n = int(n)
yield (n, 1)
if n % 2 == 0:
for i in range(floor(abs(n/2)), 0, -1):
if n % i == 0:
yield (i, int(n/i))
else:
tn = floor(abs(n/2))
for i in range( (tn - 1 if tn % 2 == 0 else tn), 0, -2 ):
if n % i == 0:
yield (i, int(n/i))
def x2terms(exps):
a = exps[2] if 2 in exps else 0
b = exps[1] if 1 in exps else 0
c = exps[0] if 0 in exps else 0
return a,b,c
def delta_calc(a,b,c):
return b**2 - 4*a*c
def pow_diff(poly):
out = ()
if max(poly) % 2 == 0:
root_exp = (1.0 / 2)
else:
root_exp = (1.0 / max(poly))
root1 = (abs(poly[max(poly)]) ** root_exp) * (-1 if poly[max(poly)] < 0 else 1)
root2 = (abs(poly[0]) ** root_exp) * (-1 if poly[0] < 0 else 1)
if root1.is_integer() and root2.is_integer():
root1, root2 = int(root1), int(root2)
if max(poly) % 2 == 0:
if poly[0]*poly[max(poly)] < 0:
xm, x0 = root1, root2
out = (( { int(max(poly)/2):xm, 0:x0 }, 1 ), ( { int(max(poly)/2):(xm if xm > 0 else -xm), 0:(x0 if xm < 0 else -x0) }, 1 ))
else:
out = [( { 1:root1, 0:root2}, 1 )]
return out
def binomial_mult_3(poly, expsort):
out = ()
for x0t1, x0t2 in n_factors(poly[0]):
for xmt1, xmt2 in n_factors(poly[expsort[0]]):
if (xmt1*x0t2)+(xmt2*x0t1) == poly[expsort[1]]:
p_div1 = { expsort[1]:xmt1, 0:x0t1 }
p_div2 = { expsort[1]:xmt2, 0:x0t2 }
out = (( p_div1, 1 ), ( p_div2, 1 ))
return out
def binomial_pow3(poly, expsort):
out = ()
if expsort[0] % 3 == 0:
root1 = (abs(poly[expsort[0]]) ** (1.0/3)) * (-1 if poly[expsort[0]] < 0 else 1)
root2 = (abs(poly[0]) ** (1.0/3)) * (-1 if poly[0] < 0 else 1)
if root1.is_integer() and root2.is_integer():
if poly[expsort[1]] == 3*(root1**2)*root2 and poly[expsort[2]] == 3*(root2**2)*root1:
out = [({ expsort[2]:root1, 0:root2 }, 3)]
return out
def binomial_mult_4(poly, expsort):
out = ()
if poly[expsort[0]] / poly[expsort[2]] == poly[expsort[1]] / poly[expsort[3]]:
cfs = [poly[e] for e in expsort]
for (n3, _) in n_factors( max(abs(cfs[0]), abs(cfs[1])) - min(abs(cfs[0]), abs(cfs[1])) ):
if 0 == cfs[0] % n3 == cfs[1] % n3:
n1 = int(cfs[0]/n3)
n2 = int(cfs[1]/n3)
if cfs[3] % n2 == 0:
n4 = int(cfs[3]/n2)
out = [({ min(expsort[1],expsort[2]):n1, 0:n2 }, 1), ({ max(expsort[1],expsort[2]):n3, 0:n4 }, 1)]
break
return out
def bf_int_coordinates(exps):
out_cord = set()
for i in range(2,101):
k = 1/i
if check_fact(exps,k):
yield k
if check_fact(exps,-k):
yield -k
for i in range(1,1001):
if check_fact(exps,i):
yield i
if check_fact(exps,-i):
yield -i
def check_fact(exps,fact):
out = 0
for exp in exps:
out += exps[exp] * (fact**exp)
return round(out,15) == 0
def factorize(poly_stack, func):
poly = poly_stack.pop()
tmexp = max(poly)
div_polys = []
common_factor = 1
checknegative = set([c < 0 for c in poly.values()])
# factorizing checks
for (i, _) in n_factors(min([abs(v) for v in poly.values() if v != 0])): # if common factor in poly, divide e.g. 2x^2+4 -> 2(x^2+2)
checkmult = set() # check performed on every iteration because of coeffs changing with division
for coeff in poly.values():
checkmult.add(coeff % i)
if len(checkmult) == 1 and 0 in checkmult:
common_factor = i if checknegative != set([True]) else -i
break
if common_factor != 1:
div_polys = [ ({ 0:common_factor }, 1) ]
elif len(poly) > 2 and tmexp and poly[0] == 0: # x^5 + x^3 -> x^3(x^2 + 1)
div_polys = [ ({ 1:1, 0:0 }, min([e if e > 0 else tmexp for e in poly])) ]
elif len(poly) == 2 and max(poly) > 1 and poly[0]: # x^2 - 1 -> (x + 1)(x - 1), x^3 - 1, x^3 + 1, etc.
div_polys = pow_diff(poly)
elif len(poly) == 3 and poly[0]: # x^2 + 2x + 1 -> (x + 1)^2, 3x^2 + 7x + 2 -> (3x + 1)(x + 2), etc. max exp can be > 2
expsort = sorted(poly)[::-1]
if expsort[0] % 2 == 0 and expsort[0]-expsort[1] == expsort[1]-expsort[2]:
div_polys = binomial_mult_3(poly, expsort)
elif len(poly) == 4 and poly[0]:
expsort = sorted(poly)[::-1]
if expsort[0]-expsort[1] == expsort[1]-expsort[2] == expsort[2]-expsort[3]:
div_polys = binomial_pow3(poly, expsort)
if not div_polys: # 6x^6 + 4x^4 + 15x^2 + 10 would trigger the first check but not the second when using ELIF (one doesn't exlude the other)
if expsort[0]-expsort[2] == expsort[1]-expsort[3]:
div_polys = binomial_mult_4(poly,expsort)
if not div_polys and tmexp > 2:
# bruteforce
div_count = tmexp
for xv in bf_int_coordinates(poly):
div_polys.append(({ 1:1, 0:-xv }, 1))
div_count -= 1
if div_count == 0:
break
for p, e in div_polys:
for div_i in range(e):
poly = divide_func(poly, p)
if (max(p) > 2) or (max(p) == 2 and p[0] and delta_calc(*x2terms(p)) >= 0):
poly_stack.append(p)
else:
func.add(p, 1)
if div_polys and ((max(poly) > 2) or (max(poly) == 2 and poly[0] and delta_calc(*x2terms(poly)) >= 0)):
poly_stack.append(poly)
else:
if len(poly) == 2 and not poly[0]: # fix for ax^2 -> x^2 divided by a -> poly = {2:1,0:0}:1, should be {1:1,0:0}:2
func.add({ 1:1, 0:0 }, max(poly))
else:
func.add(poly, 1)
if poly_stack:
factorize(poly_stack, func)
def polyformat(polys, x0t):
out = ['','']
brackets = False
if len(polys) > 1 or x0t != 1:
brackets = True
out[0] += sf2i(x0t) if x0t not in (1,-1) or len(polys) == 0 else '-' if x0t == -1 else ''
for poly, exp in polys.items():
poly = dict(poly)
if len(poly) == 2 and not poly[0]:
out[1] = 'x'
if exp > 1:
out[1] += '^' + str(exp)
else:
current_poly = ''
if exp > 1:
brackets = True
expsort = sorted(poly)[::-1]
for e in expsort:
current_poly += '- ' if poly[e] < 0 else '+ ' if poly[e] > 0 else ''
if e != 0:
current_poly += sf2i(abs(poly[e])) if poly[e] not in (1,-1) else ''
current_poly += 'x'
current_poly += '^' + sf2i(e) + ' ' if e != 1 else ' '
else:
current_poly += sf2i(abs(poly[e])) if poly[e] else ''
if current_poly[0] == '+':
current_poly = current_poly[2:]
elif current_poly[0] == '-' and brackets:
current_poly = '-' + current_poly[2:]
current_poly = '(' + current_poly + ')' if brackets else current_poly
current_poly += '^' + sf2i(exp) if exp != 1 else ''
out.append(current_poly)
return ''.join(out)
class Function():
def __init__(self, data):
self.data = {}
self.x0t = 1
if type(data) == dict:
self.exps = data
else:
self.eqt = remove_spaces(data)
self.eqt = fix_signs(self.eqt)
self.exps = convert(self.eqt) # self.eqt is referenced and edited directly by convert()
if 0 not in self.exps: # 0 may already be in exps because of x^0 terms
self.exps[0] = 0
self.exps[0] += solve_x0_terms(self.eqt) # x-terms have already been removed from self.eqt
self.out = ""
def __repr__(self):
return repr(self.data)
def add(self, indict, exp):
if len(dict(indict)) == 1:
self.x0t *= ((dict(indict))[0] ** exp) # number-only terms (x^0) are managed separately
else:
self.indict = fix_dict(indict)
if self.indict in self.data:
self.data[self.indict] += exp
else:
self.data[self.indict] = exp
def factorize(self):
if set(self.exps.values()) != set([0]):
factorize([self.exps], self)
#print(self.data)
return polyformat(self.data, self.x0t)
else:
return '0'
| 39.040498
| 151
| 0.514682
| 1,866
| 12,532
| 3.383708
| 0.136656
| 0.009978
| 0.01663
| 0.006969
| 0.207159
| 0.127336
| 0.111182
| 0.071904
| 0.04878
| 0.014888
| 0
| 0.053719
| 0.343441
| 12,532
| 320
| 152
| 39.1625
| 0.713661
| 0.129828
| 0
| 0.189189
| 0
| 0
| 0.006349
| 0
| 0.003378
| 0
| 0
| 0
| 0
| 1
| 0.077703
| false
| 0
| 0.003378
| 0.006757
| 0.155405
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
556d8216ffbaa6f7a0d0816c6b1ba9baa984c1a1
| 381
|
py
|
Python
|
Problems/14.py
|
Maurya232Abhishek/Python-repository-for-basics
|
3dcec5c529a0847df07c9dcc1424675754ce6376
|
[
"MIT"
] | 2
|
2021-07-14T11:01:58.000Z
|
2021-07-14T11:02:01.000Z
|
Problems/14.py
|
Maurya232Abhishek/Python-repository-for-basics
|
3dcec5c529a0847df07c9dcc1424675754ce6376
|
[
"MIT"
] | null | null | null |
Problems/14.py
|
Maurya232Abhishek/Python-repository-for-basics
|
3dcec5c529a0847df07c9dcc1424675754ce6376
|
[
"MIT"
] | null | null | null |
def isPerCube(n):
x = n**(1/3)
x= x+0.5
x = int(x)
if x**3==n:
return True
return False
""" x = 2
while True:
y = n / (x * x)
if (x == y):
print(x)
if x == int(x):
return True
else:
return False
x = (y + x + x) / 3
print(x)"""
print(isPerCube())
| 19.05
| 28
| 0.351706
| 53
| 381
| 2.528302
| 0.339623
| 0.044776
| 0.089552
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.036458
| 0.496063
| 381
| 20
| 29
| 19.05
| 0.661458
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.125
| false
| 0
| 0
| 0
| 0.375
| 0.125
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
556f083296f917021fc8c5ac171cde72ce1bed3a
| 1,690
|
py
|
Python
|
backend/health/health_check.py
|
threefoldtech/zeroCI
|
851def4cbaebba681641ecb24c731de56277d6ed
|
[
"Apache-2.0"
] | null | null | null |
backend/health/health_check.py
|
threefoldtech/zeroCI
|
851def4cbaebba681641ecb24c731de56277d6ed
|
[
"Apache-2.0"
] | 52
|
2019-11-14T09:39:04.000Z
|
2021-03-16T10:15:55.000Z
|
backend/health/health_check.py
|
AhmedHanafy725/0-CI
|
ce73044eea2c15bcbb161a1d6f23e75e4f8d53a0
|
[
"Apache-2.0"
] | 1
|
2019-10-30T09:51:25.000Z
|
2019-10-30T09:51:25.000Z
|
import sys
sys.path.append("/sandbox/code/github/threefoldtech/zeroCI/backend")
from redis import Redis
from health_recover import Recover
from utils.utils import Utils
recover = Recover()
class Health(Utils):
def get_process_pid(self, name):
cmd = f"ps -aux | grep -v grep | grep '{name}' | awk '{{print $2}}'"
response = self.execute_cmd(cmd=cmd, timeout=5)
pids = response.stdout.split()
return pids
def test_zeroci_server(self):
"""Check zeroci server is still running
"""
pid = self.get_process_pid("python3 zeroci")
if not pid:
recover.zeroci()
def test_redis(self):
"""Check redis is still running.
"""
pid = self.get_process_pid("redis")
if not pid:
recover.redis()
try:
r = Redis()
r.set("test_redis", "test")
r.get("test_redis")
r.delete("test_redis")
except:
recover.redis()
def test_workers(self):
"""Check rq workers are up.
"""
pids = self.get_process_pid("python3 worker")
workers = len(pids)
if workers < 5:
for i in range(1, 6):
pid = self.get_process_pid(f"python3 worker{i}")
if not pid:
recover.worker(i)
def test_schedule(self):
"""Check rq schedule is up.
"""
pid = self.get_process_pid("rqscheduler")
if not pid:
recover.scheduler()
if __name__ == "__main__":
health = Health()
health.test_zeroci_server()
health.test_redis()
health.test_workers()
health.test_schedule()
| 25.606061
| 76
| 0.562722
| 205
| 1,690
| 4.468293
| 0.336585
| 0.065502
| 0.085153
| 0.092795
| 0.151747
| 0.074236
| 0.074236
| 0.074236
| 0
| 0
| 0
| 0.006975
| 0.321302
| 1,690
| 65
| 77
| 26
| 0.79163
| 0.084615
| 0
| 0.133333
| 0
| 0
| 0.13909
| 0.032301
| 0
| 0
| 0
| 0
| 0
| 1
| 0.111111
| false
| 0
| 0.088889
| 0
| 0.244444
| 0.022222
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5570f5a350941f5510b456b02cd8353c974ae345
| 13,284
|
py
|
Python
|
vesper/command/recording_importer.py
|
RichardLitt/Vesper
|
5360844f42a06942e7684121c650b08cf8616285
|
[
"MIT"
] | null | null | null |
vesper/command/recording_importer.py
|
RichardLitt/Vesper
|
5360844f42a06942e7684121c650b08cf8616285
|
[
"MIT"
] | null | null | null |
vesper/command/recording_importer.py
|
RichardLitt/Vesper
|
5360844f42a06942e7684121c650b08cf8616285
|
[
"MIT"
] | null | null | null |
"""Module containing class `RecordingImporter`."""
from pathlib import Path
import itertools
import logging
import os
from django.db import transaction
from vesper.command.command import CommandExecutionError
from vesper.django.app.models import (
DeviceConnection, Job, Recording, RecordingChannel, RecordingFile)
from vesper.singletons import recording_manager
import vesper.command.command_utils as command_utils
import vesper.command.recording_utils as recording_utils
import vesper.util.audio_file_utils as audio_file_utils
import vesper.util.signal_utils as signal_utils
import vesper.util.time_utils as time_utils
class RecordingImporter:
"""
Importer for recordings already stored in files on the Vesper server.
The recordings to be imported are specified in the `paths` argument
as server-side directory and file paths. Files from directories can
be imported either recursively or non-recursively according to the
`recursive` argument. The import does not copy or move recordings:
it stores the existing paths of their files for future reference.
The importer obtains recording metadata for imported files with the
aid of a recording file parser extension, specified by the
`recording_file_parser` argument.
"""
extension_name = 'Recording Importer'
def __init__(self, args):
self.paths = command_utils.get_required_arg('paths', args)
self.recursive = command_utils.get_optional_arg(
'recursive', args, True)
spec = command_utils.get_optional_arg('recording_file_parser', args)
self.file_parser = recording_utils.create_recording_file_parser(spec)
def execute(self, job_info):
self._job = Job.objects.get(id=job_info.job_id)
self._logger = logging.getLogger()
try:
recordings = self._get_recordings()
new_recordings, old_recordings = \
self._partition_recordings(recordings)
self._log_header(new_recordings, old_recordings)
with transaction.atomic():
self._import_recordings(new_recordings)
except Exception as e:
self._logger.error((
'Recording import failed with an exception.\n'
'The exception message was:\n'
' {}\n'
'The archive was not modified.\n'
'See below for exception traceback.').format(str(e)))
raise
else:
self._log_imports(new_recordings)
return True
def _get_recordings(self):
files = list(itertools.chain.from_iterable(
self._get_path_recording_files(path) for path in self.paths))
return recording_utils.group_recording_files(files)
def _get_path_recording_files(self, path):
if os.path.isdir(path):
return self._get_dir_recording_files(path)
else:
file = self._get_recording_file(path)
return [] if file is None else [file]
def _get_dir_recording_files(self, path):
files = []
for (dir_path, dir_names, file_names) in os.walk(path):
for file_name in file_names:
file_path = os.path.join(dir_path, file_name)
file = self._get_recording_file(Path(file_path))
if file is not None:
files.append(file)
if not self.recursive:
# Stop `os.walk` from descending into subdirectories.
del dir_names[:]
return files
def _get_recording_file(self, file_path):
if not audio_file_utils.is_wave_file_path(file_path):
return None
else:
rel_path, abs_path = self._get_recording_file_paths(file_path)
file = self._parse_recording_file(abs_path)
file.path = rel_path
_set_recording_file_channel_info(file)
return file
def _get_recording_file_paths(self, file_path):
if file_path.is_absolute():
if not file_path.exists():
raise CommandExecutionError(
'Purported recording file "{}" does not exist.')
rel_path = self._get_relative_path(file_path)
return rel_path, file_path
else:
# path is relative
abs_path = self._get_absolute_path(file_path)
return file_path, abs_path
def _get_relative_path(self, file_path):
manager = recording_manager.instance
try:
_, rel_path = manager.get_relative_recording_file_path(file_path)
except ValueError:
self._handle_bad_recording_file_path(
file_path, 'is not in', manager)
return rel_path
def _handle_bad_recording_file_path(self, file_path, condition, manager):
dir_paths = manager.recording_dir_paths
if len(dir_paths) == 1:
s = 'the recording directory "{}"'.format(dir_paths[0])
else:
path_list = str(list(dir_paths))
s = 'any of the recording directories {}'.format(path_list)
raise CommandExecutionError(
'Recording file "{}" {} {}.'.format(file_path, condition, s))
def _get_absolute_path(self, file_path):
manager = recording_manager.instance
try:
return manager.get_absolute_recording_file_path(file_path)
except ValueError:
self._handle_bad_recording_file_path(
file_path, 'could not be found in', manager)
def _parse_recording_file(self, file_path):
try:
file = self.file_parser.parse_file(str(file_path))
except ValueError as e:
raise CommandExecutionError(
'Error parsing recording file "{}": {}'.format(
file_path, str(e)))
if file.recorder is None:
file.recorder = _get_recorder(file)
return file
def _partition_recordings(self, recordings):
new_recordings = []
old_recordings = []
for r in recordings:
if self._recording_exists(r):
old_recordings.append(r)
else:
new_recordings.append(r)
return (new_recordings, old_recordings)
def _recording_exists(self, recording):
try:
Recording.objects.get(
station=recording.station,
recorder=recording.recorder,
start_time=recording.start_time)
except Recording.DoesNotExist:
return False
else:
return True
def _log_header(self, new_recordings, old_recordings):
log = self._logger.info
new_count = len(new_recordings)
old_count = len(old_recordings)
if new_count == 0 and old_count == 0:
log('Found no recordings at the specified paths.')
else:
new_text = self._get_num_recordings_text(new_count, 'new')
old_text = self._get_num_recordings_text(old_count, 'old')
log('Found {} and {} at the specified paths.'.format(
new_text, old_text))
if len(new_recordings) == 0:
log('No recordings will be imported.')
else:
log('The new recordings will be imported.')
def _get_num_recordings_text(self, count, description):
suffix = '' if count == 1 else 's'
return '{} {} recording{}'.format(count, description, suffix)
def _import_recordings(self, recordings):
for r in recordings:
end_time = signal_utils.get_end_time(
r.start_time, r.length, r.sample_rate)
creation_time = time_utils.get_utc_now()
recording = Recording(
station=r.station,
recorder=r.recorder,
num_channels=r.num_channels,
length=r.length,
sample_rate=r.sample_rate,
start_time=r.start_time,
end_time=end_time,
creation_time=creation_time,
creating_job=self._job)
recording.save()
r.model = recording
for channel_num in range(r.num_channels):
recorder_channel_num = r.recorder_channel_nums[channel_num]
mic_output = r.mic_outputs[channel_num]
channel = RecordingChannel(
recording=recording,
channel_num=channel_num,
recorder_channel_num=recorder_channel_num,
mic_output=mic_output)
channel.save()
start_index = 0
for file_num, f in enumerate(r.files):
# We store all paths in the archive database as POSIX
# paths, even on Windows, for portability, since Python's
# `pathlib` module recognizes the slash as a path separator
# on all platforms, but not the backslash.
path = f.path.as_posix()
file = RecordingFile(
recording=recording,
file_num=file_num,
start_index=start_index,
length=f.length,
path=path)
file.save()
start_index += f.length
def _log_imports(self, recordings):
for r in recordings:
log = self._logger.info
log('Imported recording {} with files:'.format(str(r.model)))
for f in r.files:
log(' {}'.format(f.path.as_posix()))
def _get_recorder(file):
end_time = signal_utils.get_end_time(
file.start_time, file.length, file.sample_rate)
station_recorders = file.station.get_station_devices(
'Audio Recorder', file.start_time, end_time)
if len(station_recorders) == 0:
raise CommandExecutionError(
'Could not find recorder for recording file "{}".'.format(
file.path))
elif len(station_recorders) > 1:
raise CommandExecutionError(
'Found more than one possible recorder for file "{}".'.format(
file.path))
else:
return station_recorders[0].device
def _set_recording_file_channel_info(file):
mic_outputs = _get_recorder_mic_outputs(file.recorder, file.start_time)
if file.recorder_channel_nums is None:
# file name did not indicate recorder channel numbers
if len(mic_outputs) != file.num_channels:
# number of connected mic outputs does not match number
# of file channels
raise CommandExecutionError((
'Could not infer recorder channel numbers for '
'recording file "{}".').format(file.path))
else:
# number of connected mic outputs matches number of file
# channels
# We assume that recorder inputs map to file channel numbers
# in increasing order.
file.recorder_channel_nums = tuple(sorted(mic_outputs.keys()))
file.mic_outputs = tuple(
_get_mic_output(mic_outputs, i, file.path)
for i in file.recorder_channel_nums)
def _get_recorder_mic_outputs(recorder, time):
"""
Gets a mapping from recorder input channel numbers to connected
microphone outputs for the specified recorder and time.
"""
connections = DeviceConnection.objects.filter(
input__device=recorder,
output__device__model__type='Microphone',
start_time__lte=time,
end_time__gt=time)
# print('recording_importer.get_recorder_mic_outputs', connections.query)
return dict((c.input.channel_num, c.output) for c in connections)
def _get_mic_output(mic_outputs, channel_num, file_path):
try:
return mic_outputs[channel_num]
except KeyError:
raise CommandExecutionError((
'Could not find microphone output connected to recorder input '
'{} for recording file "{}".').format(channel_num, file_path))
| 32.479218
| 77
| 0.564664
| 1,410
| 13,284
| 5.052482
| 0.18227
| 0.042673
| 0.016844
| 0.013476
| 0.165076
| 0.085345
| 0.044076
| 0.036216
| 0.036216
| 0.022179
| 0
| 0.001188
| 0.366531
| 13,284
| 408
| 78
| 32.558824
| 0.845395
| 0.101551
| 0
| 0.189076
| 0
| 0
| 0.074987
| 0.001773
| 0
| 0
| 0
| 0
| 0
| 1
| 0.088235
| false
| 0
| 0.096639
| 0
| 0.273109
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5576c4dbc04cfe8f5be4007143719bb7a25f5574
| 2,033
|
py
|
Python
|
Quotebot/utils.py
|
musawakiliML/Whatsapp-Bots
|
29fe6c645010ddedac1424b22c842b3e61511644
|
[
"MIT"
] | null | null | null |
Quotebot/utils.py
|
musawakiliML/Whatsapp-Bots
|
29fe6c645010ddedac1424b22c842b3e61511644
|
[
"MIT"
] | null | null | null |
Quotebot/utils.py
|
musawakiliML/Whatsapp-Bots
|
29fe6c645010ddedac1424b22c842b3e61511644
|
[
"MIT"
] | null | null | null |
import requests
def random_quote(type=''):
'''A function to get random quotes'''
if type == "today":
response_quote = requests.get("https://zenquotes.io/api/today/ff5e73b15a05ca51951b758bd7943ce803d71772")
if response_quote.status_code == 200:
quote_data = response_quote.json()
quote = quote_data[0]['q']
quote_author = quote_data[0]['a']
quote_message = f"'{quote_author.title()}' Said:{quote}"
return quote_message
else:
return f"Invalid Request {response_quote.status_code}"
elif type == "quote":
response_quote = requests.get("https://zenquotes.io/api/random/ff5e73b15a05ca51951b758bd7943ce803d71772")
if response_quote.status_code == 200:
quote_data = response_quote.json()
quote = quote_data[0]['q']
quote_author = quote_data[0]['a']
quote_message = f"'{quote_author.title()}' Said:{quote}"
return quote_message
else:
return f"Invalid Request {response_quote.status_code}"
else:
return f"Invalid Request!"
def jokes():
'''This function gets a joke'''
response_joke = requests.get("https://some-random-api.ml/joke")
if response_joke.status_code == 200:
joke = response_joke.json()
return joke['joke']
else:
return f"Invalid Request {response_joke.status_code}"
def cat_dog(input_message):
if "cat" in input_message and "gif" in input_message:
response_gif = requests.get("https://cataas.com/cat")
cat_gif = response_gif.url
return cat_gif
elif "cat" in input_message:
response_cat = requests.get("https://cataas.com/cat/cute")
cat = response_cat.url
return cat
elif "dog" in input_message:
response_dog = requests.get("https://dog.ceo/api/breeds/image/random")
dog_data = response_dog.json()['message']
return dog_data
else:
return "Invalid Request!"
| 31.765625
| 113
| 0.624693
| 246
| 2,033
| 4.963415
| 0.223577
| 0.085176
| 0.078624
| 0.075348
| 0.550369
| 0.529894
| 0.457002
| 0.457002
| 0.386568
| 0.386568
| 0
| 0.044342
| 0.256763
| 2,033
| 64
| 114
| 31.765625
| 0.763733
| 0.028037
| 0
| 0.413043
| 0
| 0
| 0.272635
| 0.066633
| 0
| 0
| 0
| 0
| 0
| 1
| 0.065217
| false
| 0
| 0.021739
| 0
| 0.326087
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
557a41cb5f2fe81007b03e1796d482334c493ead
| 3,401
|
py
|
Python
|
src/day16.py
|
dcbriccetti/advent-of-code-2021-python
|
65958fb256234cf882714d3c3306cdbf60bcc0ae
|
[
"Unlicense"
] | 4
|
2021-12-10T22:47:56.000Z
|
2021-12-26T21:35:58.000Z
|
src/day16.py
|
dcbriccetti/advent-of-code-2021-python
|
65958fb256234cf882714d3c3306cdbf60bcc0ae
|
[
"Unlicense"
] | null | null | null |
src/day16.py
|
dcbriccetti/advent-of-code-2021-python
|
65958fb256234cf882714d3c3306cdbf60bcc0ae
|
[
"Unlicense"
] | null | null | null |
from math import prod
from pathlib import Path
class BitStream:
'Deliver integers from a stream of bits created from a hexadecimal string'
bit_str: str
pos: int
def __init__(self, hex_nibbles_str: str) -> None:
def binary_nibble_str(hex_nibble_str: str) -> str:
'Convert, for example, `e` ➜ `1110`, or `0` ➜ `0000`'
nibble = int(hex_nibble_str, 16)
bits_str = bin(nibble)[2:] # Removes the 0b at the left
padding_needed = 4 - len(bits_str)
return '0' * padding_needed + bits_str
self.bit_str = ''.join(binary_nibble_str(hex_nibble_str)
for hex_nibble_str in hex_nibbles_str)
self.pos = 0
def next_int(self, num_bits: int) -> int:
'Get the next `num_bits` bits and return them parsed as a binary number'
return int(self._next_str(num_bits), 2)
def _next_str(self, num_bits) -> str:
'Return the next `num_bits` bits as a string'
bits_str = self.bit_str[:num_bits]
self.bit_str = self.bit_str[num_bits:]
self.pos += num_bits
return bits_str
class Decoder:
'Decode the BITS packet and its nested contained packets'
bits: BitStream
versions_sum: int
operators = [
sum, prod, min, max, None,
lambda vals: int(vals[0] > vals[1]),
lambda vals: int(vals[0] < vals[1]),
lambda vals: int(vals[0] == vals[1]),
]
def __init__(self, packet_hex):
self.bits = BitStream(packet_hex)
print(f'Decoder started for {len(self.bits.bit_str)} bits {packet_hex} {self.bits.bit_str}')
self.versions_sum = 0
def parse(self, level=0) -> int:
def parse_literal() -> int:
value = 0
more: bool = True
while more:
more = bool(next_int(1))
nibble: int = next_int(4)
value = (value << 4) + nibble # Slide over and drop in new bits
print(f'{value=}')
return value
def parse_operator(type: int) -> int:
def parse_subpackets_by_length(packets_length) -> list[int]:
values: list[int] = []
print(f'{packets_length=}')
stop_pos = self.bits.pos + packets_length
while self.bits.pos < stop_pos:
values.append(self.parse(level + 1))
return values
def parse_subpackets_by_count(packet_count) -> list[int]:
print(f'{packet_count=}')
return [self.parse(level + 1) for _ in range(packet_count)]
subpacket_parsers = [parse_subpackets_by_length, parse_subpackets_by_count]
length_type_id = next_int(1)
length_or_count = next_int(15 if length_type_id == 0 else 11)
values = subpacket_parsers[length_type_id](length_or_count)
return Decoder.operators[type](values)
next_int = self.bits.next_int
indent = ' ' * level
ver = next_int(3)
self.versions_sum += ver
type = next_int(3)
print(indent + f'{ver=}, {type=}, ', end='')
return parse_literal() if type == 4 else parse_operator(type)
if __name__ == '__main__':
decoder = Decoder(Path('../data/16.txt').read_text().strip())
print(f'Result: {decoder.parse()}, versions sum: {decoder.versions_sum}')
| 36.180851
| 100
| 0.586004
| 456
| 3,401
| 4.129386
| 0.265351
| 0.033457
| 0.025491
| 0.020712
| 0.121083
| 0.090813
| 0.062135
| 0.036644
| 0.036644
| 0.036644
| 0
| 0.017759
| 0.304616
| 3,401
| 93
| 101
| 36.569892
| 0.77759
| 0.104381
| 0
| 0
| 0
| 0.013333
| 0.15509
| 0.013772
| 0
| 0
| 0
| 0
| 0
| 1
| 0.133333
| false
| 0
| 0.026667
| 0
| 0.36
| 0.08
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
557ac6c635a14924685b462c2a901a11408e15a1
| 6,328
|
py
|
Python
|
Santander-spyder.py
|
Herikc2/Santander-Customer-Satisfaction
|
c868538ab06c252b2f9e51bac384b0f6e48efd70
|
[
"MIT"
] | null | null | null |
Santander-spyder.py
|
Herikc2/Santander-Customer-Satisfaction
|
c868538ab06c252b2f9e51bac384b0f6e48efd70
|
[
"MIT"
] | null | null | null |
Santander-spyder.py
|
Herikc2/Santander-Customer-Satisfaction
|
c868538ab06c252b2f9e51bac384b0f6e48efd70
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 3 17:13:15 2021
Database: https://www.kaggle.com/c/santander-customer-satisfaction
@author: Herikc Brecher
"""
# Import from libraries
from sklearn.model_selection import KFold
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier
import pandas as pd
from sklearn.preprocessing import Normalizer
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import SVC
import matplotlib.pyplot as plt
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
import seaborn as sns
import pickle
import warnings
warnings.filterwarnings("ignore")
# Loading the training dataset in CSV format
training_file = 'data/train.csv'
test_file = 'data/test.csv'
data_training = pd.read_csv(training_file)
test_data = pd.read_csv (test_file)
print(data_training.shape)
print(test_data.shape)
# Viewing the first 20 lines
data_training.head (20)
# Data type of each attribute
data_training.dtypes
# Statistical Summary
data_training.describe()
# Distribution of classes
data_training.groupby("TARGET").size()
# Dividing by class
data_class_0 = data_training[data_training['TARGET'] == 0]
data_class_1 = data_training[data_training['TARGET'] == 1]
counter_class_0 = data_class_0.shape[0]
contador_classe_1 = data_class_1.shape[0]
data_class_0_sample = data_class_0.sample(counter_class_0)
training_data = pd.concat([data_class_0_sample, data_class_1], axis = 0)
# Pearson correlation
data_training.corr(method = 'pearson')
# Finding the correlation between the target variable and the predictor variables
corr = training_data[training_data.columns [1:]].corr()['TARGET'][:].abs()
minimal_correlation = 0.02
corr2 = corr[corr > minimal_correlation]
corr2.shape
corr2
corr_keys = corr2.index.tolist()
data_filter = data_training[corr_keys]
data_filter.head(20)
data_filter.dtypes
# Filtering only the columns that have a correlation above the minimum variable
array_treino = data_training[corr_keys].values
# Separating the array into input and output components for training data
X = array_treino[:, 0:array_treino.shape[1] - 1]
Y = array_treino[:, array_treino.shape[1] - 1]
# Creating the training and test dataset
test_size = 0.30
X_training, X_testing, Y_training, Y_testing = train_test_split(X, Y, test_size = test_size)
# Generating normalized data
scaler = Normalizer (). fit (X_training)
normalizedX_treino = scaler.transform(X_training)
scaler = Normalizer().fit(X_testing)
normalizedX_teste = scaler.transform(X_testing)
Y_training = Y_training.astype('int')
Y_testing = Y_testing.astype('int')
'''
Execution of a series of classification algorithms is based on those that have the best result.
For this test, the training base is used without any treatment or data selection.
'''
# Setting the number of folds for cross validation
num_folds = 10
# Preparing the list of models
models = []
models.append(('LR', LogisticRegression()))
models.append(('LDA', LinearDiscriminantAnalysis()))
models.append(('NB', GaussianNB()))
models.append(('KNN', KNeighborsClassifier()))
models.append(('CART', DecisionTreeClassifier()))
models.append(('SVM', SVC()))
results = []
names = []
for name, model in models:
kfold = KFold (n_splits = num_folds)
cv_results = cross_val_score (model, X_training, Y_training, cv = kfold, scoring = 'accuracy')
results.append (cv_results)
names.append (name)
msg = "% s:% f (% f)"% (name, cv_results.mean (), cv_results.std ())
print (msg)
# Boxplot to compare the algorithms
fig = plt.figure ()
fig.suptitle ('Comparison of Classification Algorithms')
ax = fig.add_subplot (111)
plt.boxplot (results)
ax.set_xticklabels (names)
plt.show ()
# Function to evaluate the performance of the model and save it in a pickle format for future reuse.
def model_report(model_name):
# Print result
print("Accuracy:% .3f"% score)
# Making predictions and building the Confusion Matrix
predictions = result.predict(X_testing)
matrix = confusion_matrix(Y_testing, predictions)
print(matrix)
report = classification_report(Y_testing, predictions)
print(report)
# The precision matrix is created to visualize the number of correct cases
labels = ['SATISFIED', 'UNSATISFIED']
cm = confusion_matrix(Y_testing, predictions)
cm = pd.DataFrame(cm, index = ['0', '1'], columns = ['0', '1'])
plt.figure(figsize = (10.10))
sns.heatmap(cm, cmap = "Blues", linecolor = 'black', linewidth = 1, annot = True, fmt = '', xticklabels = labels, yticklabels = labels)
# Saving the model
file = 'models/final_classifier_model' + model_name + '.sav'
pickle.dump (model, open(file, 'wb'))
print("Saved Model!")
# Linear Regression
model = LogisticRegression()
result = model.fit(normalizedX_treino, Y_testing)
score = result.score(normalizedX_treino, Y_testing)
model_report("LR")
# Linear Discriminant Analysis
model = LinearDiscriminantAnalysis()
result = model.fit(X_training, Y_testing)
score = result.score(X_training, Y_testing)
model_report("LDA")
# KNN
model = KNeighborsClassifier()
result = model.fit(normalizedX_treino, Y_testing)
score = result.score(normalizedX_treino, Y_testing)
model_report("KNN")
# CART
model = DecisionTreeClassifier()
result = model.fit(X_training, Y_testing)
score = result.score(X_training, Y_testing)
model_report("CART")
# XGBOOST
model = XGBClassifier()
result = model.fit(X_training, Y_testing)
score = result.score(X_training, Y_testing)
model_report("XGBOOST")
# Loading the model
file = 'models model_classifier_final_XGBOOST.sav'
model_classifier = pickle.load(open(file, 'rb'))
model_prod = model_classifier.score(X_testing, Y_testing)
print("Uploaded Model")
# Print Result
print("Accuracy:% .3f"% (model_prod.mean () * 100))
| 30.423077
| 140
| 0.733881
| 838
| 6,328
| 5.361575
| 0.317422
| 0.030269
| 0.024928
| 0.022702
| 0.1785
| 0.101714
| 0.090585
| 0.090585
| 0.090585
| 0.090585
| 0
| 0.013039
| 0.163717
| 6,328
| 207
| 141
| 30.570048
| 0.835979
| 0.175727
| 0
| 0.084034
| 0
| 0
| 0.069616
| 0.01321
| 0
| 0
| 0
| 0
| 0
| 1
| 0.008403
| false
| 0
| 0.151261
| 0
| 0.159664
| 0.07563
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
557b0f82fa2e590f23c344cfc48bb3aef2ee423d
| 4,502
|
py
|
Python
|
Memorization Tool/task/tool.py
|
soukalli/jetbrain-accademy
|
fc486d439b4b54a58956e1186eb69c56b85f85f1
|
[
"MIT"
] | null | null | null |
Memorization Tool/task/tool.py
|
soukalli/jetbrain-accademy
|
fc486d439b4b54a58956e1186eb69c56b85f85f1
|
[
"MIT"
] | null | null | null |
Memorization Tool/task/tool.py
|
soukalli/jetbrain-accademy
|
fc486d439b4b54a58956e1186eb69c56b85f85f1
|
[
"MIT"
] | null | null | null |
# write your code here
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column, String, Integer
from sqlalchemy.orm import sessionmaker
engine = create_engine('sqlite:///flashcard.db?check_same_thread=False')
Base = declarative_base()
Session = sessionmaker(bind=engine)
session = Session()
successor = {'A': 'B', 'B': 'C'}
class FlashCard(Base):
__tablename__ = 'flashcard'
id = Column(Integer, primary_key=True)
question = Column(String(255))
answer = Column(String(255))
box = Column(String(1))
Base.metadata.create_all(engine)
def print_main_menu():
print("1. Add flashcards")
print("2. Practice flashcards")
print("3. Exit")
def process_menu_1():
sub_menu_choice = ""
while sub_menu_choice != "2":
print("1. Add a new flashcard")
print("2. Exit")
sub_menu_choice = input()
if sub_menu_choice == "1":
print("Question:")
question = input()
while question.strip() == "":
print("Question:")
question = input()
print("Answer:")
answer = input()
while answer.strip() == "":
print("Answer:")
answer = input()
card = FlashCard(question=question, answer=answer, box='A')
session.add(card)
session.commit()
elif sub_menu_choice != "2":
print("{0} is not an option".format(sub_menu_choice))
def update_card_status(flashcard, is_success):
if not is_success:
flashcard.box = 'A'
else:
if flashcard.box == 'C':
session.delete(flashcard)
else:
flashcard.box = successor.get(flashcard.box)
session.commit()
def process_confirmation_flashcard(flashcard):
print("Answer: {}".format(flashcard.answer))
def process_answer_flashcard(flashcard):
print('press "y" if your answer is correct:')
print('press "n" if your answer is wrong:')
choice = ""
while choice != "y" and choice != "n":
choice = input()
if choice == "y" or choice == "n":
update_card_status(flashcard, choice == "y")
break
else:
print("{0} is not an option".format(choice))
def process_update_flashcard(flashcard):
print('press "d" to delete the flashcard:')
print('press "e" to edit the flashcard:')
choice = ""
while choice != "d" and choice != "e":
choice = input()
if choice == "e":
print("current question: {0}".format(flashcard.question))
question = input("please write a new question:\n")
flashcard.question = question
print("current answer: {0}".format(flashcard.answer))
answer = input("please write a new answer:\n")
flashcard.answer = answer
global session
session.commit()
break
elif choice == "d":
session.delete(flashcard)
break
else:
print("{0} is not an option".format(choice))
def process_flashcard(flashcard):
print("Question: {}".format(flashcard.question))
print('press "y" to see the answer:')
print('press "n" to skip:')
print('press "u" to update:')
sub_menu_choice = ""
while sub_menu_choice != "n":
sub_menu_choice = input()
if sub_menu_choice == "y":
process_confirmation_flashcard(flashcard)
process_answer_flashcard(flashcard)
break
elif sub_menu_choice == "n":
process_answer_flashcard(flashcard)
break
elif sub_menu_choice == "u":
process_update_flashcard(flashcard)
break
elif sub_menu_choice != "n":
print("{0} is not an option".format(sub_menu_choice))
def process_menu_2():
flashcards = session.query(FlashCard).all()
if len(flashcards) == 0:
print('There is no flashcard to practice!')
else:
for flashcard in flashcards:
process_flashcard(flashcard)
def process_main_menu(choice):
if choice == "1":
process_menu_1()
elif choice == "2":
process_menu_2()
elif choice != "3":
print("{} is not an option".format(choice))
def main_loop():
choice = ""
while choice != "3":
print_main_menu()
choice = input()
process_main_menu(choice)
print("Bye!")
main_loop()
| 28.675159
| 72
| 0.589294
| 517
| 4,502
| 4.976789
| 0.201161
| 0.066071
| 0.070735
| 0.025262
| 0.212204
| 0.188885
| 0.188885
| 0.153906
| 0.111932
| 0.070735
| 0
| 0.00934
| 0.286539
| 4,502
| 156
| 73
| 28.858974
| 0.791719
| 0.004442
| 0
| 0.314961
| 0
| 0
| 0.144866
| 0.010268
| 0
| 0
| 0
| 0.00641
| 0
| 1
| 0.07874
| false
| 0
| 0.031496
| 0
| 0.15748
| 0.228346
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
557b20fb22a3ac884a03a5ffa7db1db58d06ea7c
| 9,862
|
py
|
Python
|
src/compass/utils/geo_metadata.py
|
vbrancat/COMPASS
|
285412ac2fc474e789e255dae16eba4485017c07
|
[
"Apache-2.0"
] | 11
|
2021-11-24T07:24:11.000Z
|
2022-03-23T16:40:13.000Z
|
src/compass/utils/geo_metadata.py
|
vbrancat/COMPASS
|
285412ac2fc474e789e255dae16eba4485017c07
|
[
"Apache-2.0"
] | 6
|
2021-12-15T16:45:58.000Z
|
2022-03-24T23:36:16.000Z
|
src/compass/utils/geo_metadata.py
|
LiangJYu/COMPASS
|
459f5d6cf05c2b7c9013f0d862bfef22af280fa6
|
[
"Apache-2.0"
] | 4
|
2021-12-07T19:45:26.000Z
|
2022-02-28T23:05:37.000Z
|
from dataclasses import dataclass
from datetime import datetime
import json
from types import SimpleNamespace
import isce3
from isce3.core import LUT2d, Poly1d, Orbit
from isce3.product import GeoGridParameters
import numpy as np
from ruamel.yaml import YAML
from shapely.geometry import Point, Polygon
from compass.utils.geo_runconfig import GeoRunConfig
from compass.utils.raster_polygon import get_boundary_polygon
from compass.utils.wrap_namespace import wrap_namespace, unwrap_to_dict
def _poly1d_from_dict(poly1d_dict) -> Poly1d:
return Poly1d(poly1d_dict['coeffs'], poly1d_dict['mean'],
poly1d_dict['std'])
def _lut2d_from_dict(lut2d_dict) -> LUT2d:
lut2d_shape = (lut2d_dict['length'], lut2d_dict['width'])
lut2d_data = np.array(lut2d_dict['data']).reshape(lut2d_shape)
return LUT2d(lut2d_dict['x_start'], lut2d_dict['y_start'],
lut2d_dict['x_spacing'], lut2d_dict['y_spacing'],
lut2d_data)
def _orbit_from_dict(orbit_dict) -> Orbit:
ref_epoch = isce3.core.DateTime(orbit_dict['ref_epoch'])
# build state vector
dt = float(orbit_dict['time']['spacing'])
t0 = ref_epoch + isce3.core.TimeDelta(float(orbit_dict['time']['first']))
n_pts = int(orbit_dict['time']['size'])
orbit_sv = [[]] * n_pts
for i in range(n_pts):
t = t0 + isce3.core.TimeDelta(i * dt)
pos = [float(orbit_dict[f'position_{xyz}'][i]) for xyz in 'xyz']
vel = [float(orbit_dict[f'velocity_{xyz}'][i]) for xyz in 'xyz']
orbit_sv[i] = isce3.core.StateVector(t, pos, vel)
return Orbit(orbit_sv, ref_epoch)
@dataclass(frozen=True)
class GeoCslcMetadata():
# subset of burst class attributes
sensing_start: datetime
sensing_stop: datetime
radar_center_frequency: float
wavelength: float
azimuth_steer_rate: float
azimuth_time_interval: float
slant_range_time: float
starting_range: float
range_sampling_rate: float
range_pixel_spacing: float
azimuth_fm_rate: Poly1d
doppler: Poly1d
range_bandwidth: float
polarization: str # {VV, VH, HH, HV}
burst_id: str # t{track_number}_iw{1,2,3}_b{burst_index}
platform_id: str # S1{A,B}
center: Point # {center lon, center lat} in degrees
border: Polygon # list of lon, lat coordinate tuples (in degrees) representing burst border
orbit: isce3.core.Orbit
orbit_direction: str
# VRT params
tiff_path: str # path to measurement tiff in SAFE/zip
i_burst: int
# window parameters
range_window_type: str
range_window_coefficient: float
runconfig: SimpleNamespace
geogrid: GeoGridParameters
nodata: str
input_data_ipf_version: str
isce3_version: str
@classmethod
def from_georunconfig(cls, cfg: GeoRunConfig):
'''Create GeoBurstMetadata class from GeoRunConfig object
Parameter:
---------
cfg : GeoRunConfig
GeoRunConfig containing geocoded burst metadata
'''
burst = cfg.bursts[0]
burst_id = burst.burst_id
geogrid = cfg.geogrids[burst_id]
# get boundary from geocoded raster
burst_id = burst.burst_id
date_str = burst.sensing_start.strftime("%Y%m%d")
pol = burst.polarization
geo_raster_path = f'{cfg.output_dir}/{burst_id}_{date_str}_{pol}.slc'
geo_boundary = get_boundary_polygon(geo_raster_path, np.nan)
center = geo_boundary.centroid
# place holders
nodata_val = '?'
ipf_ver = '?'
isce3_ver = '?'
return cls(burst.sensing_start, burst.sensing_stop,
burst.radar_center_frequency, burst.wavelength,
burst.azimuth_steer_rate, burst.azimuth_time_interval,
burst.slant_range_time, burst.starting_range,
burst.range_sampling_rate, burst.range_pixel_spacing,
burst.azimuth_fm_rate, burst.doppler.poly1d,
burst.range_bandwidth, burst.polarization, burst_id,
burst.platform_id, center, geo_boundary, burst.orbit,
burst.orbit_direction, burst.tiff_path, burst.i_burst,
burst.range_window_type, burst.range_window_coefficient,
cfg.groups, geogrid, nodata_val, ipf_ver, isce3_ver)
@classmethod
def from_file(cls, file_path: str, fmt: str):
'''Create GeoBurstMetadata class from json file
Parameter:
---------
file_path: str
File containing geocoded burst metadata
'''
if fmt == 'yaml':
yaml = YAML(typ='safe')
load = yaml.load
elif fmt == 'json':
load = json.load
else:
raise ValueError(f'{fmt} unsupported. Only "json" or "yaml" supported')
with open(file_path, 'r') as fid:
meta_dict = load(fid)
datetime_fmt = "%Y-%m-%d %H:%M:%S.%f"
sensing_start = datetime.strptime(meta_dict['sensing_start'],
datetime_fmt)
sensing_stop = datetime.strptime(meta_dict['sensing_stop'],
datetime_fmt)
azimuth_fm_rate = _poly1d_from_dict(meta_dict['azimuth_fm_rate'])
dopp_poly1d = _poly1d_from_dict(meta_dict['doppler'])
orbit = _orbit_from_dict(meta_dict['orbit'])
# init geo_runconfig
cfg = wrap_namespace(meta_dict['runconfig'])
# init geogrid
grid_dict = meta_dict['geogrid']
geogrid = GeoGridParameters(grid_dict['start_x'], grid_dict['start_y'],
grid_dict['spacing_x'],
grid_dict['spacing_y'],
grid_dict['length'], grid_dict['width'],
grid_dict['epsg'])
# get boundary from geocoded raster
product_path = cfg.product_path_group.product_path
date_str = sensing_start.strftime("%Y%m%d")
burst_id = meta_dict['burst_id']
pol = meta_dict['polarization']
output_dir = f'{product_path}/{burst_id}/{date_str}'
file_stem = f'geo_{burst_id}_{pol}'
geo_raster_path = f'{output_dir}/{file_stem}'
geo_boundary = get_boundary_polygon(geo_raster_path, np.nan)
center = geo_boundary.centroid
return cls(sensing_start, sensing_stop,
meta_dict['radar_center_frequency'],
meta_dict['wavelength'], meta_dict['azimuth_steer_rate'],
meta_dict['azimuth_time_interval'],
meta_dict['slant_range_time'], meta_dict['starting_range'],
meta_dict['range_sampling_rate'],
meta_dict['range_pixel_spacing'], azimuth_fm_rate,
dopp_poly1d, meta_dict['range_bandwidth'], pol,
meta_dict['burst_id'], meta_dict['platform_id'],
center, geo_boundary, orbit, meta_dict['orbit_direction'],
meta_dict['tiff_path'], meta_dict['i_burst'],
meta_dict['range_window_type'],
meta_dict['range_window_coefficient'], cfg, geogrid,
meta_dict['nodata'], meta_dict['input_data_ipf_version'],
meta_dict['isce3_version'])
def as_dict(self):
''' Convert self to dict for write to YAML/JSON
'''
self_as_dict = {}
for key, val in self.__dict__.items():
if key in ['border', 'center', 'sensing_start', 'sensing_stop']:
val = str(val)
elif isinstance(val, np.float64):
val = float(val)
elif key in ['azimuth_fm_rate', 'doppler']:
temp = {}
temp['order'] = val.order
temp['mean'] = val.mean
temp['std'] = val.std
temp['coeffs'] = val.coeffs
val = temp
elif key == 'orbit':
temp = {}
temp['ref_epoch'] = str(val.reference_epoch)
temp['time'] = {}
temp['time']['first'] = val.time.first
temp['time']['spacing'] = val.time.spacing
temp['time']['last'] = val.time.last
temp['time']['size'] = val.time.size
temp['position_x'] = val.position[:,0].tolist()
temp['position_y'] = val.position[:,1].tolist()
temp['position_z'] = val.position[:,2].tolist()
temp['velocity_x'] = val.velocity[:,0].tolist()
temp['velocity_y'] = val.velocity[:,1].tolist()
temp['velocity_z'] = val.velocity[:,2].tolist()
val = temp
elif key == 'runconfig':
val = unwrap_to_dict(val)
elif key == 'geogrid':
temp = {}
temp['start_x'] = val.start_x
temp['start_y'] = val.start_y
temp['spacing_x'] = val.spacing_x
temp['spacing_y'] = val.spacing_y
temp['length'] = val.length
temp['width'] = val.width
temp['epsg'] = val.epsg
val = temp
self_as_dict[key] = val
return self_as_dict
def to_file(self, dst, fmt:str):
'''Write self to file
Parameter:
---------
dst: file pointer
File object to write metadata to
fmt: ['yaml', 'json']
Format of output
'''
self_as_dict = self.as_dict()
if fmt == 'yaml':
yaml = YAML(typ='safe')
yaml.dump(self_as_dict, dst)
elif fmt == 'json':
json.dump(self_as_dict, dst, indent=4)
else:
raise ValueError(f'{fmt} unsupported. Only "json" or "yaml" supported')
| 37.930769
| 95
| 0.58548
| 1,164
| 9,862
| 4.690722
| 0.190722
| 0.042491
| 0.012821
| 0.007692
| 0.139927
| 0.078388
| 0.056044
| 0.047253
| 0.047253
| 0.047253
| 0
| 0.008589
| 0.303488
| 9,862
| 259
| 96
| 38.07722
| 0.786286
| 0.088623
| 0
| 0.135417
| 0
| 0
| 0.124135
| 0.022353
| 0
| 0
| 0
| 0
| 0
| 1
| 0.036458
| false
| 0.015625
| 0.067708
| 0.005208
| 0.291667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
557fbf2a8059c9beebbcd0bd1552ded759c8e7f0
| 2,227
|
py
|
Python
|
tests/test_db.py
|
andreasgrv/methinks
|
5c65fdb84e35b8082ee35963431a352e06f4af44
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_db.py
|
andreasgrv/methinks
|
5c65fdb84e35b8082ee35963431a352e06f4af44
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_db.py
|
andreasgrv/methinks
|
5c65fdb84e35b8082ee35963431a352e06f4af44
|
[
"BSD-3-Clause"
] | null | null | null |
import datetime
from methinks.db import Entry
import pytest
from server.app import create_app
from server.app import db as _db
from sqlalchemy import event
from sqlalchemy.orm import sessionmaker
@pytest.fixture(scope="session")
def app(request):
"""
Returns session-wide application.
"""
return create_app()
@pytest.fixture(scope="session")
def db(app, request):
"""
Returns session-wide initialised database.
"""
with app.app_context():
_db.drop_all()
_db.create_all()
@pytest.fixture(scope="function", autouse=True)
def session(app, db, request):
"""
Returns function-scoped session.
"""
with app.app_context():
conn = _db.engine.connect()
txn = conn.begin()
options = dict(bind=conn, binds={})
sess = _db.create_scoped_session(options=options)
# establish a SAVEPOINT just before beginning the test
# (http://docs.sqlalchemy.org/en/latest/orm/session_transaction.html#using-savepoint)
sess.begin_nested()
@event.listens_for(sess(), 'after_transaction_end')
def restart_savepoint(sess2, trans):
# Detecting whether this is indeed the nested transaction of the test
if trans.nested and not trans._parent.nested:
# The test should have normally called session.commit(),
# but to be safe we explicitly expire the session
sess2.expire_all()
sess.begin_nested()
_db.session = sess
yield sess
# Cleanup
sess.remove()
# This instruction rollsback any commit that were executed in the tests.
txn.rollback()
conn.close()
def test_insert(session):
e = Entry(text='My example', date=datetime.date.today())
session.add(e)
session.commit()
def test_delete(session):
e = Entry(text='My example', date=datetime.date.today())
session.add(e)
session.commit()
session.delete(e)
session.commit()
def test_find_by_hash(session):
e = Entry(text='My example', date=datetime.date.today())
session.add(e)
session.commit()
first = Entry.query.filter(Entry.hexid == e.hash).first()
assert first == e
| 26.831325
| 93
| 0.64661
| 279
| 2,227
| 5.060932
| 0.430108
| 0.046034
| 0.03966
| 0.036119
| 0.25
| 0.15085
| 0.15085
| 0.15085
| 0.15085
| 0.15085
| 0
| 0.001186
| 0.242479
| 2,227
| 82
| 94
| 27.158537
| 0.835803
| 0.22317
| 0
| 0.326531
| 0
| 0
| 0.04353
| 0.012522
| 0
| 0
| 0
| 0
| 0.020408
| 1
| 0.142857
| false
| 0
| 0.142857
| 0
| 0.306122
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5581ae54a36323a4a46f3383645e34f4c26755e1
| 2,891
|
py
|
Python
|
bin/simple_log_server.py
|
kr0nt4b/ctrl_my_home
|
fd86b479d78f94aaa5d6cc92f0f49399aaef0733
|
[
"Apache-2.0"
] | null | null | null |
bin/simple_log_server.py
|
kr0nt4b/ctrl_my_home
|
fd86b479d78f94aaa5d6cc92f0f49399aaef0733
|
[
"Apache-2.0"
] | null | null | null |
bin/simple_log_server.py
|
kr0nt4b/ctrl_my_home
|
fd86b479d78f94aaa5d6cc92f0f49399aaef0733
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python2
"""
Simple socket server using threads
"""
import socket
import sys
from thread import *
import os
import logging
HOST = '' # Symbolic name meaning all available interfaces
PORT = 9998 # Arbitrary non-privileged port
LOG_FORMAT = '%(asctime)-15s %(message)s'
SMART_LOG = '/var/log/smart/smarthome.log'
def init_logging():
smart_log_path = os.path.dirname(SMART_LOG)
if not os.path.exists(os.path.dirname(smart_log_path)):
os.mkdir(smart_log_path)
logging.basicConfig(filename=SMART_LOG, level=logging.DEBUG, format=LOG_FORMAT)
return logging.getLogger('log_server')
class LogServer:
def __init__(self):
self.logger = init_logging()
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.logger.info('Socket created')
# Bind socket to local host and port
try:
self.sock.bind((HOST, PORT))
except socket.error as msg:
self.logger.info('Bind failed. Error Code : ' + str(msg[0]) + ' Message ' + msg[1])
sys.exit()
self.logger.info('Socket bind complete')
# Start listening on socket
self.sock.listen(10)
self.logger.info('Socket now listening')
# Function for handling connections. This will be used to create threads
def client_thread(self, connection):
# Sending message to connected client
connection.send('Welcome to the logserver') # send only takes string
# infinite loop so that function do not terminate and thread do not end.
while True:
# Receiving from client
data = connection.recv(1024)
reply = 'OK\n'
if not data:
break
tokens = data.split(' ')
if len(tokens) > 1:
level = data.split(' ')[1]
if level == 'DEBUG:':
self.logger.debug(data)
if level == 'INFO:':
self.logger.info(data)
if level == 'ERROR:':
self.logger.error(data)
else:
self.logger.info(data)
connection.sendall(reply)
# came out of loop
connection.close()
def start(self):
# now keep talking with the client
while True:
# wait to accept a connection - blocking call
conn, addr = self.sock.accept()
self.logger.info('Connected with ' + addr[0] + ':' + str(addr[1]))
# start new thread takes 1st argument as a function name to be run, second
# is the tuple of arguments to the function.
start_new_thread(self.client_thread, (conn,))
self.sock.close()
if __name__ == "__main__":
log_server = LogServer()
try:
log_server.start()
except KeyboardInterrupt as e:
print(e.message)
| 28.91
| 95
| 0.590107
| 353
| 2,891
| 4.736544
| 0.419263
| 0.059809
| 0.058612
| 0.035885
| 0.02512
| 0
| 0
| 0
| 0
| 0
| 0
| 0.010015
| 0.309236
| 2,891
| 99
| 96
| 29.20202
| 0.827241
| 0.215496
| 0
| 0.1
| 0
| 0
| 0.099911
| 0.012489
| 0
| 0
| 0
| 0
| 0
| 1
| 0.066667
| false
| 0
| 0.083333
| 0
| 0.183333
| 0.016667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5581eb881f3ca5ddfe7fd5be0a7447ea5b604281
| 1,348
|
py
|
Python
|
utils/calc_drh.py
|
leogoesger/func-flow
|
c81f73998df9b02c04c19a6beae463121d5a8898
|
[
"MIT"
] | 11
|
2018-04-14T00:34:34.000Z
|
2021-05-04T17:23:50.000Z
|
utils/calc_drh.py
|
Yesicaleon/func-flow
|
c81f73998df9b02c04c19a6beae463121d5a8898
|
[
"MIT"
] | 15
|
2019-04-02T03:35:22.000Z
|
2022-02-12T13:17:11.000Z
|
utils/calc_drh.py
|
Yesicaleon/func-flow
|
c81f73998df9b02c04c19a6beae463121d5a8898
|
[
"MIT"
] | 9
|
2018-12-01T19:46:11.000Z
|
2022-03-31T17:18:15.000Z
|
import numpy as np
from utils.helpers import *
percentiles = [10, 25, 50, 75, 90]
percentile_keys = ["ten", "twenty_five", "fifty", "seventy_five", "ninty"]
def calc_drh(flow_matrix):
"""Dimensionless Hydrograph Plotter"""
average_annual_flow = calculate_average_each_column(flow_matrix)
number_of_rows = len(flow_matrix)
number_of_columns = len(flow_matrix[0, :])
normalized_matrix = np.zeros((number_of_rows, number_of_columns))
"""Initiating the DRH object with desired keys"""
drh = {}
for index, percentile in enumerate(percentiles):
drh[percentile_keys[index]] = []
drh["min"] = []
drh["max"] = []
for row_index, _ in enumerate(flow_matrix[:, 0]):
for column_index, _ in enumerate(flow_matrix[row_index, :]):
normalized_matrix[row_index, column_index] = flow_matrix[row_index,
column_index]/average_annual_flow[column_index]
for index, percentile in enumerate(percentiles):
drh[percentile_keys[index]].append(round(np.nanpercentile(
normalized_matrix[row_index, :], percentile), 2))
drh["min"].append(round(np.nanmin(normalized_matrix[row_index, :]), 2))
drh["max"].append(round(np.nanmax(normalized_matrix[row_index, :]), 2))
return drh
| 39.647059
| 116
| 0.647626
| 163
| 1,348
| 5.07362
| 0.374233
| 0.084643
| 0.101572
| 0.116082
| 0.333736
| 0.14994
| 0.14994
| 0.14994
| 0.14994
| 0.14994
| 0
| 0.014368
| 0.225519
| 1,348
| 33
| 117
| 40.848485
| 0.777778
| 0.023739
| 0
| 0.083333
| 0
| 0
| 0.038065
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.041667
| false
| 0
| 0.083333
| 0
| 0.166667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
558930319f7b3b786028343bb2be22080c9650c4
| 14,091
|
py
|
Python
|
src/icaltool/icaltool.py
|
randomchars42/icaltool
|
acf482f08bb4eb7bc000c0b2591c6d76ec8fcaac
|
[
"Unlicense"
] | null | null | null |
src/icaltool/icaltool.py
|
randomchars42/icaltool
|
acf482f08bb4eb7bc000c0b2591c6d76ec8fcaac
|
[
"Unlicense"
] | null | null | null |
src/icaltool/icaltool.py
|
randomchars42/icaltool
|
acf482f08bb4eb7bc000c0b2591c6d76ec8fcaac
|
[
"Unlicense"
] | null | null | null |
#!/usr/bin/env python3
import csv
import logging
import logging.config
import re
import argparse
import json
import sys
from .log import log
from . import datatypes
logger = logging.getLogger(__name__)
default_column_mapping = {
'DTSTART': 0,
'DTEND': 1,
'DTSTAMP': 2,
'UID': 3,
'CREATED': 4,
'DESCRIPTION': 5,
'LAST-MODIFIED': 6,
'LOCATION': 7,
'SEQUENCE': 8,
'SUMMARY': 9,
'CATEGORIES': 10,
'CLASS': 11,
'ATTACH': 12,
'TRANSP': 13,
'RRULE': 14,
'EXDATE': 15,
'STATUS': 16
}
custom_column_names = {
'DTSTART': 'DTSTART',
'DTEND': 'DTEND',
'DTSTAMP': 'DTSTAMP',
'UID': 'UID',
'CREATED': 'CREATED',
'DESCRIPTION': 'DESCRIPTION',
'LAST-MODIFIED': 'LAST-MODIFIED',
'LOCATION': 'LOCATION',
'SEQUENCE': 'SEQUENCE',
'SUMMARY': 'SUMMARY',
'CATEGORIES': 'CATEGORIES',
'CLASS': 'CLASS',
'ATTACH': 'ATTACH',
'TRANSP': 'TRANSP',
'RRULE': 'RRULE',
'EXDATE': 'EXDATE',
'STATUS': 'STATUS'
}
standard_components = [
'VCALENDAR',
'STANDARD',
'DAYLIGHT',
'VEVENT',
'VTODO',
'VJOURNAL',
'VALARM',
'VFREEBUSY'
]
class ICalTool:
"""
Tool for handling calendar data (ical) as defined in:
RFC 2445 (https://datatracker.ietf.org/doc/html/rfc2445)
"""
def __init__(self):
self._reset()
def _reset(self):
self.vcalendar = None
def setup(self, options):
# currently only understands
# {
# "COMPONENTNAME": {
# "defined_properties": {
# "PROPERTY": [(-1|0|1), "NAMEOFCLASS"],
# }
# },
# ...
# }
for key, value in options.items():
if key in standard_components:
class_object = getattr(datatypes, key)
try:
for prop, values in value['defined_properties'].items():
if not len(values) == 2:
logger.warning('illegal value for property {} in ' +
'defined_properties'.format(prop))
continue
#setattr(class_object.defined_properties, prop, values)
class_object.defined_properties[prop] = values
except KeyError:
logger.warning('did not unterstand option "{}"'.format(
key))
def load(self, file_name, component='VEVENT',
has_header=True, custom_column_names=custom_column_names,
column_mapping=default_column_mapping,
delimiter=',', quotechar='"'):
if file_name[-3:] == 'csv':
self.csv_load(file_name, component, has_header, custom_column_names,
column_mapping, delimiter, quotechar)
elif file_name[-3:] == 'ics':
self.ical_load(file_name)
else:
logger.error('invalid file given ("{}")'.format(file_name))
sys.exit()
def csv_load(self, file_name, component='VEVENT',
has_header=True, custom_column_names=custom_column_names,
column_mapping=default_column_mapping,
delimiter=',', quotechar='"'):
with open(file_name, 'r', newline='', encoding='utf-8-sig') as \
file_handle:
logger.info('opening {}'.format(file_name))
data = csv.reader(
file_handle, delimiter=delimiter, quotechar=quotechar)
if has_header:
header = next(data)
column_mapping = self._csv_get_column_mapping(
default_column_mapping, has_header, header, custom_column_names)
self.vcalendar = datatypes.VCALENDAR()
self.vcalendar.csv_parse(component, data, column_mapping)
logger.info('loaded {}'.format(file_name))
def _csv_get_column_mapping(self, default_column_mapping, has_header,
header, custom_column_names):
if not has_header:
# no headers to parse
# so use default column mapping
return default_column_mapping
# get headers from file
column_mapping = {}
i = 0
for column in header:
column_mapping[column] = i
i = i + 1
if len(custom_column_names) == 0:
return parsed_columns
# the user provided costum columns names in a dictionary
new_mapping = {}
for column_name in column_mapping.keys():
# so go through every available column
try:
# 1. the parsed column name exists in the user
# provided dictionary
new_mapping[custom_column_names[column_name]] = \
column_mapping[column_name]
except KeyError:
# 2. the name cannot be translated so copy it
new_mapping[column_name] = \
column_mapping[column_name]
return new_mapping
def ical_load(self, file_name):
with open(file_name, 'r', newline='', encoding='utf-8-sig') as \
file_handle:
logger.info('opening {}'.format(file_name))
raw = file_handle.readlines()
lines = []
vcalendar = False
# clean up
for line in raw:
# remove the trailing "\n"
line = line.rstrip("\r\n")
# do not use empty lines
if not line == '':
if not vcalendar and line == 'BEGIN:VCALENDAR':
vcalendar = True
logger.debug('recording new VCALENDAR')
elif vcalendar:
if line == 'END:VCALENDAR':
vcalendar = False
logger.debug('finished recording VCALENDAR')
# unfold lines (folded lines begin with a single whitespace
# or tab)
elif line[0] == ' ' or line[0] == "\t":
# append to previous line
lines[len(lines) - 1] += line[1:]
else:
lines.append(line)
self.vcalendar = datatypes.VCALENDAR()
self.vcalendar.ical_parse(lines)
logger.info('loaded {}'.format(file_name))
def write(self, file_name, component):
if file_name[-3:] == 'csv':
self.csv_write(file_name, component)
elif file_name[-3:] == 'ics':
self.ical_write(file_name)
else:
logger.error('invalid file given ("{}")'.format(file_name))
sys.exit()
def csv_write(self, file_name, component='VEVENT'):
lines = []
# can only write components of one type
with open(file_name, 'w') as file_handle:
logger.info('writing to {}'.format(file_name))
# get a list of known properties to use as column names
class_object = getattr(datatypes, component)
properties = []
for prop, attributes in class_object.defined_properties.items():
if attributes[0] == 2:
continue
else:
properties.append(prop)
# build header
lines.append('"' + '","'.join(properties) + '"')
# fill with data
lines.extend(self.vcalendar.csv_write(component))
file_handle.write("\r\n".join(lines))
logger.info('finished writing to {}'.format(file_name))
def ical_write(self, file_name):
with open(file_name, 'w') as file_handle:
logger.info('writing to {}'.format(file_name))
lines = self.vcalendar.ical_write()
for line in lines:
text = ''
while True:
text += line[:74] + "\r\n"
line = ' ' + line[74:]
if line == ' ':
break
file_handle.write(text)
logger.info('finished writing to {}'.format(file_name))
def filter(self, rules):
if self.vcalendar is None:
logger.warning('cannot apply rules before calendar data has been '+
'loaded')
return
# example component rule:
# - keep only events:
# COMPONENT:+VEVENT
# - filter out all events:
# COMPONENT:-VEVENT
# - filter out all events and alarms
# COMPONENT:-VEVENT,VALARM
# example property rules:
# - filter out all components with a start date between 2015 and 2017:
# DTSTART:-2015to2017
# - keep only components with a start date between 2015-10 and 2017-11:
# DTSTART:+2015-10to2017-11
# - ... attended by john.doe@mail.domain:
# DTSTART:+2015-10to2017-11;ATTENDEE:+john.doe@mail.domain
# - ... but not by jane.doe@mail.domain:
# ...;ATTENDEE:+john.doe@mail.domain|-jane.doe@mail.domain
raw_rules = rules.split(';')
parsed_rules = {}
for raw_rule in raw_rules:
try:
name, rule = raw_rule.split(':')
except ValueError:
# no ':'
logger.warning('malformed rule {}'.format(raw_rule))
continue
logger.info('found rule for {}: "{}"'.format(name, rule))
parsed_rules[name] = rule.split('|')
try:
component_rule = parsed_rules['COMPONENT'][0]
logger.debug('found component rule: "{}"'.format(component_rule))
# sanity check
if not re.match('[+-]{1}[A-Z,]+', component_rule):
logger.error('component filter cannot have inclusion and ' +
'exclusion criteria, "{}" given'.format(component_rule))
return
components_keep = component_rule[0] == '+'
components = component_rule[1:].split(',')
del parsed_rules['COMPONENT']
except KeyError:
# no component rule
# create an empty list of components to remove
components = []
components_keep = False
self.vcalendar.filter(components, components_keep,
parsed_rules)
# taken from :
# https://stackoverflow.com/questions/9027028/argparse-argument-order
class CustomAction(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
if not 'ordered_args' in namespace:
setattr(namespace, 'ordered_args', [])
previous = namespace.ordered_args
previous.append((self.dest, values))
setattr(namespace, 'ordered_args', previous)
def main():
# parse arguments
parser = argparse.ArgumentParser(
description='Tool to work with calendar data. It can read .ics ' +
'(preferred) and .csv files. You can filter the compontents ' +
'(events, todos, alarms, journals, freebusy-indicators) by their ' +
'type or the value of their properties, e.g. start date ' +
'(DTSTART) or organiser (ORGANIZER). The result can be written ' +
'back to a file, again either .ics (preferred) or .csv.',
epilog='')
parser.add_argument(
'file',
help='the file to load, either .csv or .ics (preferred)',
type=str)
parser.add_argument(
'-o',
'--output',
help='the file to write to, either .csv or .ics (preferred)',
type=str,
action=CustomAction)
parser.add_argument(
'-f',
'--filter',
help='rules to filter which component types (events, todos, alarms, ' +
'journals, freebusy-indicators) to keep / sort out',
type=str,
action=CustomAction)
parser.add_argument(
'-s',
'--setup',
help='json-string containing options, e.g. ' +
'{"VEVENT": {"defined_properties": ' +
'{"ATTENDEE": [-1, "Property"]}}} ' +
'to ignore the ATTENDEE property when parsing',
type=str)
parser.add_argument(
'-c',
'--component',
help='component type stored in the .csv-file (one of: events ' +
'[VEVENT], todos [VTODO], alarms [VALARM], journals [VJOURNAL], ' +
'freebusy-indicators [VFREEBUSY]); if no component is specified ' +
'events [VEVENT] are assumed to be the input / desired output',
type=str,
default='VEVENT')
parser.add_argument(
'-v',
'--verbosity',
action='count',
help='increase verbosity',
default=0)
args = parser.parse_args()
# setup logging
logging_config = log.config
if args.verbosity >= 3:
logging_config['handlers']['console']['level'] = 'DEBUG'
elif args.verbosity == 2:
logging_config['handlers']['console']['level'] = 'INFO'
elif args.verbosity == 1:
logging_config['handlers']['console']['level'] = 'WARNING'
else:
logging_config['handlers']['console']['level'] = 'ERROR'
logging.config.dictConfig(logging_config)
# setup ICalTool
tool = ICalTool()
if not args.setup is None:
tool.setup(json.loads(args.setup))
# load file
tool.load(args.file, component=args.component)
# do whatever
if not 'ordered_args' in args:
logger.error('nothing to do with the loaded data - exiting')
return
# process actions in order of flags
for arg, value in args.ordered_args:
if arg == 'output':
if value == args.file:
logger.error('please don\'t attempt to overwrite your input ' +
'file - while it is technically possible it seems unwise ' +
"\n cancelling")
continue
tool.write(value, component=args.component)
elif arg == 'filter':
tool.filter(value)
if __name__ == '__main__':
main()
| 33.630072
| 83
| 0.544461
| 1,477
| 14,091
| 5.065674
| 0.240352
| 0.029939
| 0.022721
| 0.011227
| 0.277332
| 0.219861
| 0.177626
| 0.116279
| 0.116279
| 0.090617
| 0
| 0.013544
| 0.339791
| 14,091
| 418
| 84
| 33.710526
| 0.790713
| 0.122631
| 0
| 0.225914
| 0
| 0
| 0.194612
| 0.001791
| 0
| 0
| 0
| 0
| 0
| 1
| 0.043189
| false
| 0
| 0.0299
| 0
| 0.099668
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
558cbd4a7ce3e41aaed8e2b86ecb2cf3f058fd07
| 20,998
|
py
|
Python
|
script.py
|
kenneth2001/Virus
|
e7d0b650d9d7a4eaab9bd87b3695b791e1f105b1
|
[
"MIT"
] | null | null | null |
script.py
|
kenneth2001/Virus
|
e7d0b650d9d7a4eaab9bd87b3695b791e1f105b1
|
[
"MIT"
] | null | null | null |
script.py
|
kenneth2001/Virus
|
e7d0b650d9d7a4eaab9bd87b3695b791e1f105b1
|
[
"MIT"
] | null | null | null |
import asyncio
import requests
from bs4 import BeautifulSoup
from datetime import date, datetime
import discord
import numpy as np
from urllib.error import HTTPError
import yt_dlp as youtube_dl
from discord.ext import commands
import os
from pytz import timezone
from yt_dlp.utils import DownloadError, ExtractorError
from util.log import pretty_output, pretty_print
from util.preprocessing import load_config, load_gif, load_user
import secrets
try:
print('LOADING config.txt')
TOKEN, TIMEZONE, MODE = load_config('config/config.txt')
print('LOADED config.txt\n')
except:
print('ERROR LOADING config.txt\n')
tz = timezone(TIMEZONE)
token = TOKEN #os.environ['token']
# 0: local, 1: repl.it
# For setting up bot on replit.com
if MODE == 1:
from util.keep_alive import keep_alive
os.environ['MPLCONFIGDIR'] = '/tmp/' #"/home/runner/Virus-demo/tmp"
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
elif MODE == 0:
import matplotlib.pyplot as plt
import sympy
else:
print('UNDEFINED MODE')
exit()
try:
print('LOADING gif.json')
gif = load_gif('config/gif.json')
print('LOADED gif.json\n')
except:
print('ERROR LOADING gif.json\n')
try:
print('LOADING user.json')
user = load_user('config/user.json')
print('LOADED user.json\n')
except:
print('ERROR LOADING user.json\n')
ytdl_format_options = {
'format': 'bestaudio/best',
'noplaylist': True,
'nocheckcertificate': True,
'ignoreerrors': False,
'logtostderr': False,
'quiet': True,
'no_warnings': True,
'default_search': 'auto',
'source_address': '0.0.0.0'
}
ffmpeg_options = {
'options': '-vn',
"before_options": "-reconnect 1 -reconnect_streamed 1 -reconnect_delay_max 5"
}
# channel_var stores all variable for differnet channels
# key: serverid
# value: 1. activated[bool] - indicate whether the music playing function is activated
# 2. bully[dict] - list of user being bullied
# 3. ctx[object]
# 4. log[list] - log of user entering / leaving voice channels
# 5. playing[bool] - indicate whether the bot is playing music
# 6. queue[list] - list of music to be played
channel_var = {}
# return gif link
def send_gif(msg):
if msg in gif.keys():
return gif[msg]
# Wong Tai Sin Fortune Sticks (黃大仙求籤)
def get_stick(tag):
num = np.random.randint(1, 101)
URL = f'https://andy.hk/divine/wongtaisin/{num}'
page = requests.get(URL)
soup = BeautifulSoup(page.content, "html.parser")
result = soup.find(id='content')
job_elements = result.find("div", class_="inner-padding col-md-5 col-md-offset-7")
stick_no = job_elements.find('h2', class_='id-color text-center').text
stick_author = job_elements.find_all('h4', class_='text-center')[0].text
stick_content = job_elements.find_all('h4', class_='text-center')[1].text
stick_explain = job_elements.text.split('仙機:')[1].split('解說及記載:')[0]
stick_story = job_elements.text.split('仙機:')[1].split('解說及記載:')[1].split('■')[0]
text = tag + '求得' + stick_no + '\n' + stick_author + '\n\n籤文:\n' + stick_content + '\n\n仙機:' + stick_explain + '\n解說及記載' + stick_story
return text
client = commands.Bot(command_prefix='#', help_command=None)
@client.event
async def on_connect():
print("Bot activated successfully")
async def initialize(server_id: int, ctx: object=None):
"""Initializing channel_var
Args:
server_id (int)
ctx (object, optional): Defaults to None.
"""
global channel_var
info = channel_var.get(server_id, -1)
if info != -1:
if channel_var[server_id]['ctx'] == None and ctx != None:
channel_var[server_id]['ctx'] = ctx
return
else:
channel_var[server_id] = {'ctx':ctx, 'queue':[], 'activated':False, 'playing':True, 'log':[], 'bully':{}}
@client.event
async def on_voice_state_update(member, before, after):
server_id = member.guild.id
await initialize(server_id)
global channel_var
if before.channel is None and after.channel is not None:
channel_var[server_id]['log'].append([datetime.now(tz).strftime('%Y-%m-%d %H:%M:%S'), '*' + str(member) + '* Entered `' + str(after.channel) + '`'])
if before.channel is not None and after.channel is None:
channel_var[server_id]['log'].append([datetime.now(tz).strftime('%Y-%m-%d %H:%M:%S'), '*' + str(member) + '* Leaved `' + str(before.channel) + '`'])
if before.channel is not None and after.channel is not None:
channel_var[server_id]['log'].append([datetime.now(tz).strftime('%Y-%m-%d %H:%M:%S'), '*' + str(member) + '* Leaved `' + str(before.channel)+ '`, Joined `' + str(after.channel) + '`'])
@client.command(name='log')
async def log(ctx):
await initialize(ctx.guild.id, ctx)
global channel_var
if len(channel_var[ctx.guild.id]['log']) == 0:
return
embed = discord.Embed(color = discord.Colour.red())
embed.set_author(name='Log (Recent 20 records)')
for field in channel_var[ctx.guild.id]['log'][-20:]:
embed.add_field(name=field[0], value=field[1], inline=False)
await ctx.send(embed=embed)
async def play_music(ctx):
while not client.is_closed():
global channel_var
if not len(channel_var[ctx.guild.id]['queue']) == 0 and ctx is not None:
server = ctx.message.guild
voice_channel = server.voice_client
if (voice_channel and voice_channel.is_connected() and not voice_channel.is_playing() and channel_var[ctx.guild.id]['playing']) == True:
server = ctx.message.guild
voice_channel = server.voice_client
try:
link = channel_var[ctx.guild.id]['queue'][0][1]
title = channel_var[ctx.guild.id]['queue'][0][2]
player = discord.FFmpegPCMAudio(link, **ffmpeg_options)
voice_channel.play(player)
await ctx.send(f'**Now playing:** {title}')
except DownloadError:
await ctx.send(f'**Download error:** {title}')
del(channel_var[ctx.guild.id]['queue'][0])
await asyncio.sleep(1)
@client.command(name='play')
async def play(ctx, *url):
url = ' '.join(url)
await initialize(ctx.guild.id, ctx)
global channel_var
def music(link):
with youtube_dl.YoutubeDL(ytdl_format_options) as ydl:
info = ydl.extract_info(link, download=False)
# Handle if the url is a playlist
if 'entries' in info:
info = info['entries'][0]
LINK = info['webpage_url']
URL = info['url']
TITLE = info['title']
return LINK, URL, TITLE
if not ctx.message.author.voice: # handle if message author is not inside any voice channel
await ctx.send("**You are not connected to a voice channel**")
return
elif ctx.message.guild.voice_client: # if bot is inside any voice channel
if ctx.message.guild.voice_client.channel != ctx.message.author.voice.channel: # if bot is not inside the author's channel
channel = ctx.message.author.voice.channel
user = await ctx.guild.fetch_member(client.user.id)
ctx.voice_client.pause()
await user.move_to(channel)
ctx.voice_client.resume()
else: # if bot is not inside any voice channel
channel = ctx.message.author.voice.channel
await channel.connect() # connect to message author's channel
server = ctx.message.guild
voice_channel = server.voice_client
if url is None or url == '':
if len(channel_var[ctx.guild.id]['queue']) == 0:
return
else:
try:
link, player_link, title = music(url)
channel_var[ctx.guild.id]['queue'].append([link, player_link, title])
except ExtractorError:
await ctx.send('**Error:** ' + url)
except HTTPError:
await ctx.send('**Error:** ' + url)
except DownloadError:
await ctx.send('**Error:** ' + url)
# activate music playing function
if channel_var[ctx.guild.id]['activated'] == False:
channel_var[ctx.guild.id]['activated'] = True
await play_music(ctx)
@client.command(name='debug')
async def debug(ctx):
def check(m):
return m.author == ctx.message.author
func_token = secrets.token_hex(10)
print("Token:", func_token)
await ctx.send('**Please type in the token displayed in console**')
msg = await client.wait_for("message", check=check)
if msg.content == func_token:
pretty_print(channel_var)
pretty_output(channel_var, filename='tmp.json')
await ctx.send(file=discord.File('tmp.json'))
else:
await ctx.send("**Only admin can use this command**")
@client.command(name='queue')
async def queue_(ctx):
await initialize(ctx.guild.id, ctx)
global channel_var
if len(channel_var[ctx.guild.id]['queue']) == 0:
await ctx.send('**Queue is empty!**')
else:
async with ctx.typing():
await ctx.send('\n'.join([f'{idx}. {item[2]}\n{item[0]}' for idx, item in enumerate(channel_var[ctx.guild.id]['queue'], start=1)]))
@client.command(name='stop')
async def stop(ctx):
voice_client = ctx.message.guild.voice_client
await voice_client.disconnect()
@client.command(name='gpa')
async def gpa(ctx):
x = round(np.random.uniform(3,4) - np.random.normal(0, 1), 2)
text = 4.0 if x > 4 else x
if text >= 3.8:
text = "Predicted GPA: " + str(text)
elif text >= 3.0:
text = "Predicted GPA: " + str(text)
elif text >= 2.5:
text = "Predicted GPA: " + str(text)
else:
text = "Predicted GPA: " + str(text)
tag = "<@" + str(ctx.message.author.id) + ">"
await ctx.message.channel.send(str(text)+tag)
@client.command(name='pause')
async def pause(ctx):
await initialize(ctx.guild.id, ctx)
global channel_var
channel_var[ctx.guild.id]['playing'] = False
if ctx.voice_client is not None:
ctx.voice_client.pause()
await ctx.send('**Paused**')
@client.command(name='resume')
async def resume(ctx):
await initialize(ctx.guild.id, ctx)
global channel_var
channel_var[ctx.guild.id]['playing'] = True
if ctx.voice_client is not None:
ctx.voice_client.resume()
await ctx.send('**Resumed**')
@client.command(name='skip')
async def skip(ctx):
await initialize(ctx.guild.id, ctx)
global channel_var
if ctx.voice_client is not None:
ctx.voice_client.stop()
await ctx.send('**Skipped**')
@client.listen()
async def on_message(message):
author = message.author
author_id = str(message.author.id)
tag = "<@" + str(message.author.id) + ">"
msg = message.content.lower()
if author == client.user:
return
#print('Debugging:', author, msg)
today = date.today()
if user.get(author_id, -1) != -1:
if user[author_id]['date'] != today:
user[author_id]['date'] = today
await message.channel.send(user[author_id]['text'] + tag)
if message.content.startswith('#hello'):
await message.channel.send("Hello World!")
gif = send_gif(msg)
if gif is not None:
await message.channel.send(gif)
@client.command(name='help')
async def help(ctx):
embed = discord.Embed(title="Virus", url="https://github.com/kenneth2001/virus", description="Discord Bot developed by YeeKiiiiii 2021", color=discord.Colour.blue())
embed.set_author(name="Virus", url="https://github.com/kenneth2001/virus", icon_url="https://user-images.githubusercontent.com/24566737/132656284-f0ff6571-631c-4cef-bed7-f575233cbf5f.png")
embed.add_field(name=':musical_note: __Music__', value="""1. `#play [url]` Play music, tested platform: Youtube, Soundcloud
2. `#pause` Pause music
3. `#resume` Resume music
4. `#skip` Play next song
5. `#queue` Display the queue
6. `#stop` Kick the bot from voice channel""", inline=False)
embed.add_field(name=':pencil2: __Graph (Developing)__', value="""1. `#plot` Create simple scatter/line plot""", inline=False)
embed.add_field(name=':black_joker: __Kidding__', value="""1. `#joke [userid] [times] [duration]`
Move a specified user into random voice channels randomly and repeatly
2. `#leavemealone` Stop yourself from being bullied
3. `#save [userid]` Recuse your friend from cyber-bullying""", inline=False)
embed.add_field(name=':man_office_worker: __Other__', value="""1. `#stick` Fortune sticks from Wong Tai Sin
2. `#gpa` Get prediction of your GPA (Maximum: 4.0)
3. `#help` Display a list of all commands aviliable
4. `#credit` Display information of the bot developer
5. `#hello` Return 'hello world'
6. `#ping` Return latency
7. `#log` Display the previous 20 in/out user
8. `#clear` Delete previous 30 messages sent by this bot / started with '#'
9. `#debug` Check parameters (for debugging)""", inline=False)
embed.add_field(name=':new: __New Features (Experimental)__', value="""1. `#when` Return the start time of the bot
2. `#dm [userid] [message]` Send message to any user privately""" )
embed.add_field(name=':frame_with_picture: __GIF__', value="Automatically return GIF if the message matches the following keywords\n`" + '` `'.join(gif.keys()) +'`', inline=False)
embed.set_footer(text="Last updated on 25 December 2021")
await ctx.send(embed=embed)
@client.command(name='ping')
async def ping(ctx):
await ctx.send(f'In {round(client.latency * 1000)}ms')
@client.command(name='stick')
async def stick(ctx):
tag = "<@" + str(ctx.message.author.id) + ">"
text = get_stick(tag)
await ctx.send(text)
@client.command(name='credit')
async def credit(ctx):
await ctx.send('Created By kenneth\nLast Update On 18/9/2021\nhttps://github.com/kenneth2001')
@client.command(name='clear')
async def clear(ctx):
def is_bot(m):
try:
return m.author == client.user or m.content[0] == '#'
except:
return False
deleted = await ctx.message.channel.purge(limit=30, check=is_bot)
await ctx.send('Deleted {} message(s)'.format(len(deleted)), delete_after=10)
@client.command(name='joke')
async def joke(ctx, userid=None, n=10, sleep_time=0.5):
await initialize(ctx.guild.id, ctx)
global channel_var
try:
userid = int(userid)
user = await ctx.guild.fetch_member(userid)
info = channel_var[ctx.guild.id]['bully'].get(userid, -1)
if info == -1:
channel_var[ctx.guild.id]['bully'][userid] = True
channel_var[ctx.guild.id]['bully'][userid] = True
tag1 = "<@" + str(ctx.message.author.id) + ">"
tag2 = "<@" + str(userid) + ">"
await ctx.send(tag1 + " is pranking " + tag2)
await ctx.send('To stop, type #leavemealone')
except:
tag = "<@" + str(ctx.message.author.id) + ">"
await ctx.send('Please provide a valid user id!' + tag)
return
while(n > 0):
if channel_var[ctx.guild.id]['bully'][userid] == False:
return
try:
if user.voice is not None:
await user.move_to(np.random.choice(ctx.guild.voice_channels))
n -= 1
except:
pass
await asyncio.sleep(sleep_time)
def generate_question():
question = ""
for i in range(6):
question += str(np.random.randint(1, 21))
question += np.random.choice(['*', '+', '-'])
question += str(np.random.randint(1, 21))
return question
@client.command(name='leavemealone')
async def leavemealone(ctx):
await initialize(ctx.guild.id, ctx)
global channel_var
info = channel_var[ctx.guild.id]['bully'].get(ctx.message.author.id, -1)
if info == -1:
channel_var[ctx.guild.id]['bully'][ctx.message.author.id] = True
def check(m):
return m.author == ctx.message.author
question = generate_question()
await ctx.send('Question: `'+question+'`\nType your answer:')
answer = int(sympy.sympify(question))
print('Answer:', answer)
msg = await client.wait_for("message", check=check)
tag = "<@" + str(ctx.message.author.id) + ">"
if int(msg.content) == answer:
channel_var[ctx.guild.id]['bully'][ctx.message.author.id] = False
await ctx.send("Good Job" + tag)
else:
await ctx.send("on9" + tag)
@client.command(name='save')
async def save(ctx, id=None):
if id is None:
await ctx.send("You must specify an id")
return
await initialize(ctx.guild.id, ctx)
global channel_var
userid = int(id)
def check(m):
return m.author == ctx.message.author
if channel_var[ctx.guild.id]['bully'].get(userid, -1) == -1:
await ctx.send("This user is not under bully list")
elif channel_var[ctx.guild.id]['bully'][userid] == False:
await ctx.send("This user is not being bullied")
else:
question = generate_question()
await ctx.send('Question: `'+question+'`\nType your answer:')
if MODE == 0:
answer = int(sympy.sympify(question))
elif MODE == 1:
answer = int(eval(question))
print('Answer:', answer)
msg = await client.wait_for("message", check=check)
tag = "<@" + str(ctx.message.author.id) + ">"
if int(msg.content) == answer:
channel_var[ctx.guild.id]['bully'][userid] = False
await ctx.send("Good Job" + tag)
else:
await ctx.send("Be careful" + tag)
# experimental
@client.command(name='plot')
async def plot(ctx):
def check(m):
return m.author == ctx.message.author
await ctx.send("1. Please Enter The Type of The Plot")
await ctx.send("a: scatter plot, b: line plot")
msg = await client.wait_for("message", check=check)
graph_type = msg.content
await ctx.send("2. Please enter the x-coordinate for all points (seperated by comma)")
msg = await client.wait_for("message", check=check)
x = [int(i) for i in msg.content.split(',')]
await ctx.send("3. Please enter the y-coordinate for all points (seperated by comma)")
msg = await client.wait_for("message", check=check)
y = [int(i) for i in msg.content.split(',')]
await ctx.send("4. Please enter the title of the plot")
msg = await client.wait_for("message", check=check)
title = msg.content
await ctx.send("5. Please enter the name of x-axis")
msg = await client.wait_for("message", check=check)
x_name = msg.content
await ctx.send("6. Please enter the name of y-axis")
msg = await client.wait_for("message", check=check)
y_name = msg.content
plt.plot(x, y, linestyle="-" if graph_type == 'b' else 'none', marker='.')
plt.title(title)
plt.xlabel(x_name)
plt.ylabel(y_name)
plt.savefig('plot.png')
await ctx.send(file=discord.File('plot.png'))
os.remove('plot.png')
plt.clf()
# experimental
@client.command(name='when')
async def when(ctx):
await ctx.send(start_time.strftime("**Bot started from %Y-%m-%d %I-%M %p**"))
# experimental
@client.command(name='dm')
async def dm(ctx, userid, *message):
try:
userid = int(userid)
user = await client.fetch_user(userid)
await user.send(' '.join(message))
await ctx.send("**Message sent successfully**")
except:
await ctx.send("**Message is not sent**")
if MODE == 1:
keep_alive() # For setting up bot on replit.com
start_time = datetime.now(tz)
client.run(token)
| 37.563506
| 194
| 0.595247
| 2,744
| 20,998
| 4.469023
| 0.172376
| 0.039958
| 0.042078
| 0.036696
| 0.385795
| 0.328468
| 0.271956
| 0.234364
| 0.200114
| 0.156405
| 0
| 0.013435
| 0.266263
| 20,998
| 558
| 195
| 37.630824
| 0.782437
| 0.045147
| 0
| 0.29638
| 0
| 0.002262
| 0.251256
| 0.001055
| 0.002262
| 0
| 0
| 0
| 0
| 1
| 0.020362
| false
| 0.002262
| 0.045249
| 0.00905
| 0.106335
| 0.036199
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
558d879413f6f88e3c45e2ca06534a675e1043f9
| 480
|
py
|
Python
|
solutions/1281-subtract-the-product-and-sum-of-digits-of-an-integer.py
|
lk-hang/leetcode
|
4c8735463bdcb9f48666e03a39eb03ee9f625cec
|
[
"MIT"
] | null | null | null |
solutions/1281-subtract-the-product-and-sum-of-digits-of-an-integer.py
|
lk-hang/leetcode
|
4c8735463bdcb9f48666e03a39eb03ee9f625cec
|
[
"MIT"
] | null | null | null |
solutions/1281-subtract-the-product-and-sum-of-digits-of-an-integer.py
|
lk-hang/leetcode
|
4c8735463bdcb9f48666e03a39eb03ee9f625cec
|
[
"MIT"
] | null | null | null |
"""
Given an integer number n, return the difference between the product of its digits and the sum of its digits.
"""
class Solution:
def subtractProductAndSum(self, n: int) -> int:
if n < 10:
return 0
running_prod = 1
running_sum = 0
while n > 0:
rest = n % 10
running_prod *= rest
running_sum += rest
n = n // 10
return running_prod - running_sum
| 25.263158
| 109
| 0.522917
| 59
| 480
| 4.152542
| 0.491525
| 0.036735
| 0.089796
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.035336
| 0.410417
| 480
| 19
| 110
| 25.263158
| 0.830389
| 0.227083
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.083333
| false
| 0
| 0
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
558e58ba058923b58851710da67bc2d4ad87a57f
| 1,031
|
py
|
Python
|
VideoIndexerDemo/VideoIndexer/application.py
|
microsoft/ai4accessibility
|
4c13d006f285e31f01d1bc71a55c20e9234713a5
|
[
"MIT"
] | 2
|
2021-07-11T06:03:43.000Z
|
2021-10-09T23:37:21.000Z
|
VideoIndexerDemo/VideoIndexer/application.py
|
microsoft/ai4accessibility
|
4c13d006f285e31f01d1bc71a55c20e9234713a5
|
[
"MIT"
] | 6
|
2021-09-08T03:07:13.000Z
|
2022-03-12T00:57:07.000Z
|
VideoIndexerDemo/VideoIndexer/application.py
|
microsoft/ai4accessibility
|
4c13d006f285e31f01d1bc71a55c20e9234713a5
|
[
"MIT"
] | 3
|
2021-02-14T18:51:31.000Z
|
2021-02-14T18:51:41.000Z
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from dotenv import load_dotenv
load_dotenv()
import os
import json
import requests
from concurrent.futures import ThreadPoolExecutor
from flask import Flask, flash, request, redirect, url_for, session
from video_captioning.main import upload_video, video_callback, train_custom_speech
executor = ThreadPoolExecutor(max_workers=20)
app = Flask("layout_detection")
@app.route('/api/v1/vc', methods=['POST'])
def vc_upload():
params = request.get_json()
return_data = upload_video(params)
return json.dumps(return_data)
@app.route('/api/v1/customspeech', methods=['POST'])
def customspeech_train():
params = request.get_json()
return_data = train_custom_speech(params)
return json.dumps(return_data)
@app.route('/api/v1/vc/callback', methods=['POST'])
def vc_callback():
params = request.get_json()
return video_callback(request.args.get('id'))
if __name__ == "__main__":
app.run(port=5000, debug=True, host='0.0.0.0')
| 29.457143
| 83
| 0.747818
| 143
| 1,031
| 5.167832
| 0.454545
| 0.054127
| 0.044655
| 0.052774
| 0.258457
| 0.200271
| 0.11908
| 0.11908
| 0.11908
| 0.11908
| 0
| 0.014412
| 0.125121
| 1,031
| 35
| 84
| 29.457143
| 0.804878
| 0.065955
| 0
| 0.192308
| 0
| 0
| 0.097815
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.115385
| false
| 0
| 0.269231
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
559154d893c3d43225a58bc587edd3aa01dea828
| 5,154
|
py
|
Python
|
code/tests/unit/api/test_enrich.py
|
CiscoSecurity/tr-05-serverless-cybercrime-tracker
|
28fcfaa220025c9e8523633a4a9a04f319656756
|
[
"MIT"
] | 3
|
2020-04-28T08:53:14.000Z
|
2020-12-17T14:25:32.000Z
|
code/tests/unit/api/test_enrich.py
|
CiscoSecurity/tr-05-serverless-cybercrime-tracker
|
28fcfaa220025c9e8523633a4a9a04f319656756
|
[
"MIT"
] | 2
|
2020-03-06T15:00:22.000Z
|
2020-06-26T11:21:52.000Z
|
code/tests/unit/api/test_enrich.py
|
CiscoSecurity/tr-05-serverless-cybercrime-tracker
|
28fcfaa220025c9e8523633a4a9a04f319656756
|
[
"MIT"
] | null | null | null |
from http import HTTPStatus
from requests.exceptions import SSLError
from pytest import fixture
from unittest import mock
from tests.unit.mock_for_tests import (
CYBERCRIME_RESPONSE_MOCK,
EXPECTED_DELIBERATE_RESPONSE,
EXPECTED_OBSERVE_RESPONSE,
EXPECTED_RESPONSE_500_ERROR,
EXPECTED_RESPONSE_404_ERROR,
CYBERCRIME_ERROR_RESPONSE_MOCK,
EXPECTED_RESPONSE_SSL_ERROR
)
def routes():
yield '/deliberate/observables'
yield '/observe/observables'
@fixture(scope='module', params=routes(), ids=lambda route: f'POST {route}')
def route(request):
return request.param
@fixture(scope='function')
def cybercrime_api_request():
with mock.patch('requests.get') as mock_request:
yield mock_request
def cybercrime_api_response(*, ok, payload=None, status_error=None):
mock_response = mock.MagicMock()
mock_response.ok = ok
if ok and not payload:
payload = CYBERCRIME_RESPONSE_MOCK
else:
mock_response.status_code = status_error
mock_response.json = lambda: payload
return mock_response
@fixture(scope='module')
def invalid_json():
return [{'type': 'unknown', 'value': ''}]
def test_enrich_call_with_invalid_json_failure(route, client, invalid_json):
response = client.post(route, json=invalid_json)
assert response.status_code == HTTPStatus.OK
@fixture(scope='module')
def valid_json():
return [{'type': 'ip', 'value': '104.24.123.62'}]
@fixture(scope='module')
def valid_json_multiple():
return [
{'type': 'ip', 'value': '104.24.123.62'},
{'type': 'ip', 'value': '0.0.0.0'},
]
def test_enrich_call_success(route, client, valid_json,
cybercrime_api_request):
cybercrime_api_request.return_value = cybercrime_api_response(ok=True)
response = client.post(route, json=valid_json)
assert response.status_code == HTTPStatus.OK
data = response.get_json()
if route == '/observe/observables':
verdicts = data['data']['verdicts']
assert verdicts['docs'][0].pop('valid_time')
judgements = data['data']['judgements']
assert judgements['docs'][0].pop('id')
assert judgements['docs'][0].pop('valid_time')
assert data == EXPECTED_OBSERVE_RESPONSE
if route == '/deliberate/observables':
verdicts = data['data']['verdicts']
assert verdicts['docs'][0].pop('valid_time')
assert data == EXPECTED_DELIBERATE_RESPONSE
def test_enrich_error_with_data(route, client, valid_json_multiple,
cybercrime_api_request):
cybercrime_api_request.side_effect = (
cybercrime_api_response(ok=True),
cybercrime_api_response(
ok=False,
payload=CYBERCRIME_ERROR_RESPONSE_MOCK,
status_error=HTTPStatus.INTERNAL_SERVER_ERROR)
)
response = client.post(route, json=valid_json_multiple)
assert response.status_code == HTTPStatus.OK
data = response.get_json()
if route == '/observe/observables':
verdicts = data['data']['verdicts']
assert verdicts['docs'][0].pop('valid_time')
judgements = data['data']['judgements']
assert judgements['docs'][0].pop('id')
assert judgements['docs'][0].pop('valid_time')
expected_response = {}
expected_response.update(EXPECTED_OBSERVE_RESPONSE)
expected_response.update(EXPECTED_RESPONSE_500_ERROR)
assert data == expected_response
if route == '/deliberate/observables':
verdicts = data['data']['verdicts']
assert verdicts['docs'][0].pop('valid_time')
expected_response = {}
expected_response.update(EXPECTED_DELIBERATE_RESPONSE)
expected_response.update(EXPECTED_RESPONSE_500_ERROR)
assert data == expected_response
def test_enrich_call_404(route, client, valid_json, cybercrime_api_request):
cybercrime_api_request.return_value = cybercrime_api_response(
ok=False,
payload=CYBERCRIME_ERROR_RESPONSE_MOCK,
status_error=HTTPStatus.NOT_FOUND
)
response = client.post(route, json=valid_json)
assert response.status_code == HTTPStatus.OK
assert response.get_json() == EXPECTED_RESPONSE_404_ERROR
def test_enrich_call_500(route, client, valid_json, cybercrime_api_request):
cybercrime_api_request.return_value = cybercrime_api_response(
ok=False,
payload=CYBERCRIME_ERROR_RESPONSE_MOCK,
status_error=HTTPStatus.INTERNAL_SERVER_ERROR
)
response = client.post(route, json=valid_json)
assert response.status_code == HTTPStatus.OK
assert response.get_json() == EXPECTED_RESPONSE_500_ERROR
def test_enrich_call_with_ssl_error(route, client,
valid_json, cybercrime_api_request):
mock_exc = mock.MagicMock()
mock_exc.reason.args.__getitem__().verify_message \
= 'self signed certificate'
cybercrime_api_request.side_effect = SSLError(mock_exc)
response = client.post(route, json=valid_json)
assert response.status_code == HTTPStatus.OK
assert response.get_json() == EXPECTED_RESPONSE_SSL_ERROR
| 29.451429
| 76
| 0.695188
| 608
| 5,154
| 5.587171
| 0.161184
| 0.065057
| 0.064763
| 0.040624
| 0.669709
| 0.607006
| 0.577568
| 0.554018
| 0.527524
| 0.527524
| 0
| 0.013546
| 0.197905
| 5,154
| 174
| 77
| 29.62069
| 0.808176
| 0
| 0
| 0.408333
| 0
| 0
| 0.089639
| 0.013388
| 0
| 0
| 0
| 0
| 0.175
| 1
| 0.108333
| false
| 0
| 0.041667
| 0.033333
| 0.191667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5593fe3d21ad82b5382d08854df0a8f99eec0ed9
| 1,900
|
py
|
Python
|
src/ensae_teaching_cs/tests/american_cities.py
|
Jerome-maker/ensae_teaching_cs
|
43ea044361ee60c00c85aea354a7b25c21c0fd07
|
[
"MIT"
] | 73
|
2015-05-12T13:12:11.000Z
|
2021-12-21T11:44:29.000Z
|
src/ensae_teaching_cs/tests/american_cities.py
|
Jerome-maker/ensae_teaching_cs
|
43ea044361ee60c00c85aea354a7b25c21c0fd07
|
[
"MIT"
] | 90
|
2015-06-23T11:11:35.000Z
|
2021-03-31T22:09:15.000Z
|
src/ensae_teaching_cs/tests/american_cities.py
|
Jerome-maker/ensae_teaching_cs
|
43ea044361ee60c00c85aea354a7b25c21c0fd07
|
[
"MIT"
] | 65
|
2015-01-13T08:23:55.000Z
|
2022-02-11T22:42:07.000Z
|
"""
@file
@brief Function to test others functionalities
"""
import os
import pandas
from pyquickhelper.loghelper import fLOG
from ..faq.faq_matplotlib import graph_cities
from ..special import tsp_kruskal_algorithm, distance_haversine
def american_cities(df_or_filename, nb_cities=-1, img=None, fLOG=fLOG):
"""
Computes the :epkg:`TSP` for american cities.
@param df_or_filename dataframe
@param nb_cities number of cities to keep
@param img image to produce
@param fLOG logging function
@return dataframe (results)
"""
def haversine(p1, p2):
return distance_haversine(p1[0], p1[1], p2[0], p2[1])
if isinstance(df_or_filename, str):
df = pandas.read_csv(df_or_filename)
else:
df = df_or_filename
df["Longitude"] = -df["Longitude"]
df = df[df.Latitude < 52]
df = df[df.Longitude > -130].copy()
fLOG(df.columns)
df = df.dropna()
if nb_cities > 0:
df = df[:nb_cities].copy()
fLOG(df.shape)
points = [(row[1], row[2], row[3])
for row in df.itertuples(index=False)]
fLOG("number of cities:", len(points))
trip = tsp_kruskal_algorithm(
points, distance=haversine, fLOG=fLOG, max_iter=10)
# trip
dftrip = pandas.DataFrame(
trip, columns=["Latitude", "Longitude", "City"])
# graph
for i in range(0, dftrip.shape[0]):
if i % 10 != 0:
dftrip.loc[i, "City"] = ""
if img is not None:
import matplotlib.pyplot as plt
fig, ax = graph_cities(dftrip, markersize=3, linked=True, fLOG=fLOG,
fontcolor="red", fontsize='16', loop=True, figsize=(32, 32))
assert ax is not None
fig.savefig(img)
assert os.path.exists(img)
plt.close('all')
fLOG("end")
return dftrip
| 29.6875
| 91
| 0.596316
| 250
| 1,900
| 4.428
| 0.424
| 0.025294
| 0.054201
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.024982
| 0.283684
| 1,900
| 63
| 92
| 30.15873
| 0.788391
| 0.178947
| 0
| 0
| 0
| 0
| 0.046834
| 0
| 0
| 0
| 0
| 0
| 0.05
| 1
| 0.05
| false
| 0
| 0.15
| 0.025
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5594b24c92581e7c3ba26f490dea8b770f2cf8fd
| 2,049
|
py
|
Python
|
tools/ntp_spoofer.py
|
dschoonwinkel/pypacker
|
58c833f40207db746b0b2995ca3835a533e0258e
|
[
"BSD-3-Clause"
] | null | null | null |
tools/ntp_spoofer.py
|
dschoonwinkel/pypacker
|
58c833f40207db746b0b2995ca3835a533e0258e
|
[
"BSD-3-Clause"
] | null | null | null |
tools/ntp_spoofer.py
|
dschoonwinkel/pypacker
|
58c833f40207db746b0b2995ca3835a533e0258e
|
[
"BSD-3-Clause"
] | null | null | null |
"""Simple NTP spoofing tool."""
from pypacker.layer12.ethernet import Ethernet
from pypacker.layer3 import ip
from pypacker.layer4.udp import UDP
from pypacker.layer567 import ntp
from pypacker import psocket
# interface to listen on
IFACE = "wlan0"
# source address which commits a NTP request and we send a wrong answer
IP_SRC = "192.168.178.27"
#
# normal NTP request
#
"""
psock_req = psocket.SocketHndl(iface_name=IFACE, mode=psocket.SocketHndl.MODE_LAYER_3)
ntp_req = ip.IP(src_s=IP_SRC, dst_s="188.138.9.208", p=ip.IP_PROTO_UDP) +\
UDP(sport=1234, dport=123) +\
ntp.NTP(li=ntp.NO_WARNING, v=3, mode=ntp.CLIENT)
print("sending NTP request and waiting for answer..")
answer = psock_req.sr(ntp_req)[0][ntp.NTP]
"""
# print("answer is: %s" % answer)
#unpack_I = struct.Struct(">I").unpack
# print("seconds since 1.1.1900: %d" % unpack_I(answer.transmit_time[0:4])[0])
# psock_req.close()
#
# spoof NTP response
#
print("waiting for NTP request")
psock = psocket.SocketHndl(iface_name=IFACE, timeout=600)
filter = lambda p: p[ntp.NTP] is not None and p[ip.IP].src_s == IP_SRC
answer = psock.recvp(filter_match_recv=filter)[0]
answer_ntp = answer[ntp.NTP]
print("got NTP packet: %s" % answer_ntp)
ntp_answer_send = Ethernet(dst=answer[Ethernet].src, src=answer[Ethernet].dst) +\
ip.IP(src=answer[ip.IP].dst, dst_s=IP_SRC, p=ip.IP_PROTO_UDP) +\
UDP(sport=answer[UDP].dport, dport=answer[UDP].sport) +\
ntp.NTP(li=ntp.NO_WARNING, v=3, mode=ntp.SERVER, stratum=2, interval=4,
update_time=answer_ntp.transmit_time,
originate_time=answer_ntp.transmit_time,
receive_time=b"\x00" * 4 + answer_ntp.transmit_time[4:],
transmit_time=b"\x00" * 4 + answer_ntp.transmit_time[4:])
# alternative packet creation
"""
ntp_answer_send = answer.create_reverse()
layer_ntp = ntp_answer_send[ntp.NTP]
layer_ntp.mode = ntp.SERVER
layer_ntp.originate_time = answer_ntp.transmit_time
layer_ntp.receive_time = layer_ntp.transmit_time = b"\x00"*4 + answer_ntp.transmit_time[4:]
"""
psock.send(ntp_answer_send.bin())
psock.close()
| 32.52381
| 91
| 0.736945
| 342
| 2,049
| 4.230994
| 0.315789
| 0.074637
| 0.072564
| 0.087077
| 0.269523
| 0.209399
| 0.144437
| 0.115411
| 0.115411
| 0.093988
| 0
| 0.037017
| 0.116642
| 2,049
| 62
| 92
| 33.048387
| 0.762431
| 0.170327
| 0
| 0
| 0
| 0
| 0.063551
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.217391
| 0
| 0.217391
| 0.086957
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5594c3feafec578628223eff5ebd91b66138d3a5
| 7,524
|
py
|
Python
|
motsfinder/exprs/test_basics.py
|
daniel-dpk/distorted-motsfinder-public
|
8c2eec174c755c55b26b568243e58c2956a35257
|
[
"MIT"
] | 4
|
2019-08-26T09:50:26.000Z
|
2022-03-02T16:11:17.000Z
|
motsfinder/exprs/test_basics.py
|
daniel-dpk/distorted-motsfinder-public
|
8c2eec174c755c55b26b568243e58c2956a35257
|
[
"MIT"
] | 5
|
2021-03-31T19:55:34.000Z
|
2021-04-01T08:29:53.000Z
|
motsfinder/exprs/test_basics.py
|
daniel-dpk/distorted-motsfinder-public
|
8c2eec174c755c55b26b568243e58c2956a35257
|
[
"MIT"
] | 1
|
2019-09-18T14:15:33.000Z
|
2019-09-18T14:15:33.000Z
|
#!/usr/bin/env python3
from __future__ import print_function
from builtins import range, map
import unittest
import sys
import pickle
import numpy as np
from mpmath import mp
from testutils import DpkTestCase
from .numexpr import NumericExpression
from .numexpr import isclose
from .basics import OffsetExpression, DivisionExpression, SimpleSinExpression
from .basics import SimpleCoshExpression
class _TestExpr1(NumericExpression):
def __init__(self, a=1, **kw):
super(_TestExpr1, self).__init__(**kw)
self.a = a
def _expr_str(self): return "a x**2, where a=%r" % self.a
def _evaluator(self, use_mp):
a = self.a
return (lambda x: a*x**2, lambda x: 2*a*x, lambda x: 2*a, self.zero)
class _TestExpr2(NumericExpression):
def __init__(self, expr, a=1):
super(_TestExpr2, self).__init__(x=expr)
self.a = a
def _expr_str(self):
return "a/x, where a=%r, x=%s" % (self.a, self.x.str())
def _evaluator(self, use_mp):
a = self.a
x = self.x.evaluator(use_mp)
def f(t):
return a/x(t)
def df(t):
return -x.diff(t)*a/x(t)**2
def ddf(t):
xt = x(t)
dxt = x.diff(t, 1)
ddxt = x.diff(t, 2)
return a*(-ddxt/xt**2 + 2*dxt**2/xt**3)
return (f, df, ddf)
class _TestExpr3(NumericExpression):
def __init__(self, expr1, expr2, a=1, b=1):
super(_TestExpr3, self).__init__(x1=expr1, x2=expr2)
self.a = a
self.b = b
def _expr_str(self):
return ("a x1 + b x2, where a=%r, b=%r, x1=%s, x2=%s"
% (self.a, self.b, self.x1.str(), self.x2.str()))
def _evaluator(self, use_mp):
a, b = self.a, self.b
x1, x2 = self.x1.evaluator(use_mp), self.x2.evaluator(use_mp)
return (lambda t: a*x1(t) + b*x2(t), lambda t: a*x1.diff(t) + b*x2.diff(t))
class _TestExprDomain(NumericExpression):
def __init__(self, domain):
super(_TestExprDomain, self).__init__(domain=domain)
self.__domain = domain
def get_domain(self): return self.__domain
def _expr_str(self): return "id"
def _evaluator(self, use_mp): return (lambda x: x, lambda x: 1, self.zero)
class TestIsclose(DpkTestCase):
def test_float(self):
self.assertTrue(isclose(1e7+1, 1e7+1, rel_tol=0, abs_tol=0))
self.assertTrue(isclose(1e7+1, 1e7, rel_tol=1e-6))
self.assertFalse(isclose(1e7+1, 1e7, rel_tol=1e-8))
self.assertTrue(isclose(1e7+1, 1e7, rel_tol=0, abs_tol=2.0))
self.assertFalse(isclose(1e7+1, 1e7, rel_tol=0, abs_tol=0.5))
def test_mpmath(self):
with mp.workdps(30):
a = mp.mpf('1e7') + mp.mpf('1e-20')
b = mp.mpf('1e7')
self.assertTrue(isclose(a, a, rel_tol=0, abs_tol=0, use_mp=True))
self.assertFalse(isclose(a, b, use_mp=True))
with mp.workdps(26):
self.assertTrue(isclose(a, b, use_mp=True))
self.assertTrue(isclose(a, b, rel_tol=1e-26, abs_tol=0, use_mp=True))
self.assertFalse(isclose(a, b, rel_tol=1e-28, abs_tol=0, use_mp=True))
self.assertTrue(isclose(a, b, rel_tol=0, abs_tol=1e-19, use_mp=True))
self.assertFalse(isclose(a, b, rel_tol=0, abs_tol=1e-21, use_mp=True))
class TestNumexpr(DpkTestCase):
def test_expressions(self):
expr = _TestExpr2(_TestExpr1())
self.assertEqual(repr(expr), "<_TestExpr2(a/x, where a=1, x=(a x**2, where a=1))>")
self.assertEqual(expr.a, 1)
expr.a = 5
self.assertEqual(expr.a, 5)
def test_name(self):
expr = _TestExpr1()
self.assertEqual(expr.name, "_TestExpr1")
expr.name = "foo"
self.assertEqual(expr.name, "foo")
def test_pickle(self):
a = 1.5
expr = _TestExpr2(_TestExpr1(-1), a=a)
expr.name = "foo"
s = pickle.dumps(expr)
expr = pickle.loads(s)
self.assertIs(type(expr), _TestExpr2)
self.assertEqual(expr.a, 1.5)
self.assertIs(type(expr.x), _TestExpr1)
self.assertEqual(expr.x.a, -1)
self.assertEqual(expr.name, "foo")
def test_pickle_domain(self):
expr = _TestExpr1(domain=(0, 1))
s = pickle.dumps(expr)
expr = pickle.loads(s)
self.assertEqual(expr.domain, (0, 1))
expr = _TestExpr1(domain=(0, mp.pi))
s = pickle.dumps(expr)
expr = pickle.loads(s)
self.assertEqual(expr.domain, (0, mp.pi))
def test_evaluators(self):
a = 1.5
expr = _TestExpr2(_TestExpr1(), a=a)
f = expr.evaluator()
for t in np.linspace(0.1, 2, 4):
self.assertAlmostEqual(f(t), a/t**2)
for t in np.linspace(0.1, 2, 4):
self.assertAlmostEqual(f.diff(t), -2*a/t**3)
for t in np.linspace(0.1, 2, 4):
self.assertAlmostEqual(f.diff(t, 2), 6*a/t**4)
with self.assertRaises(NotImplementedError):
f.diff(0, 3)
def test_string_clashing(self):
expr1 = _TestExpr1(a=1)
expr2 = _TestExpr2(2, a=3)
comp1 = _TestExpr3(expr1, expr2)
expr2 = _TestExpr2(2, a=1)
expr1 = _TestExpr1(a=3)
comp2 = _TestExpr3(expr1, expr2)
e1 = comp1.evaluator()
e2 = comp2.evaluator()
# The expressions are different:
self.assertNotEqual(e1(.5), e2(.5))
# Their string are different too:
self.assertNotEqual(repr(comp1), repr(comp2))
def test_domain(self):
expr = _TestExprDomain([-1, 1])
e = expr.evaluator()
self.assertTrue(hasattr(e, 'domain'))
self.assertFalse(hasattr(e, 'domainX'))
self.assertEqual(e.domain[0], -1)
self.assertEqual(e.domain[1], 1)
expr = _TestExprDomain(([-1, 1], [0, 10]))
e = expr.evaluator()
self.assertTrue(hasattr(e, 'domain'))
self.assertEqual(e.domainX[0], -1)
self.assertEqual(e.domainX[1], 1)
self.assertEqual(e.domainY[0], 0)
self.assertEqual(e.domainY[1], 10)
f = e.function()
self.assertTrue(hasattr(f, 'domain'))
self.assertTrue(hasattr(f, 'domainX'))
self.assertTrue(hasattr(f, 'domainY'))
self.assertFalse(hasattr(f, 'domainZ'))
class TestOffsetExpression(DpkTestCase):
def test_offset(self):
expr = OffsetExpression(_TestExpr1(), 1.0)
e = expr.evaluator()
self.assertAlmostEqual(e(0), 1.0)
self.assertAlmostEqual(e(1), 2.0)
self.assertAlmostEqual(e(2), 5.0)
self.assertAlmostEqual(e.diff(0), 0.0)
self.assertAlmostEqual(e.diff(1), 2.0)
self.assertAlmostEqual(e.diff(2), 4.0)
self.assertAlmostEqual(e.diff(1, 2), 2.0)
class TestDivisionExpression(DpkTestCase):
def test_division(self):
expr = DivisionExpression(
SimpleSinExpression(),
OffsetExpression(SimpleCoshExpression(), 2),
)
with mp.workdps(30):
f = expr.evaluator(use_mp=True)
space = mp.linspace(0, mp.pi, 10)
for n in range(1, 5):
self.assertListAlmostEqual(
[f.diff(x, n) for x in space],
[mp.diff(f, x, n) for x in space],
delta=1e-28,
)
def run_tests():
suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
return len(unittest.TextTestRunner(verbosity=2).run(suite).failures)
if __name__ == '__main__':
unittest.main()
| 34.356164
| 91
| 0.591042
| 1,049
| 7,524
| 4.102955
| 0.1449
| 0.055762
| 0.03973
| 0.013941
| 0.335967
| 0.293448
| 0.254647
| 0.218634
| 0.165195
| 0.118727
| 0
| 0.045504
| 0.263955
| 7,524
| 218
| 92
| 34.513761
| 0.731672
| 0.011164
| 0
| 0.175824
| 0
| 0.010989
| 0.029851
| 0
| 0
| 0
| 0
| 0
| 0.28022
| 1
| 0.153846
| false
| 0
| 0.065934
| 0.043956
| 0.313187
| 0.005495
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
559516145d3a91e65f7eba170cf38f3e8329840b
| 468
|
py
|
Python
|
python/Data Structures and Algorithms in Python Book/oop/fibonacciprogression.py
|
gauravssnl/Data-Structures-and-Algorithms
|
1c335c72ce514d4f95090241bbd6edf01a1141a8
|
[
"MIT"
] | 7
|
2020-05-10T09:57:23.000Z
|
2021-03-27T11:55:07.000Z
|
python/Data Structures and Algorithms in Python Book/oop/fibonacciprogression.py
|
gauravssnl/Data-Structures-and-Algorithms
|
1c335c72ce514d4f95090241bbd6edf01a1141a8
|
[
"MIT"
] | null | null | null |
python/Data Structures and Algorithms in Python Book/oop/fibonacciprogression.py
|
gauravssnl/Data-Structures-and-Algorithms
|
1c335c72ce514d4f95090241bbd6edf01a1141a8
|
[
"MIT"
] | 3
|
2021-03-27T03:42:57.000Z
|
2021-08-09T12:03:41.000Z
|
from progression import Progression
class FibonacciProgression(Progression):
def __init__(self, first=0, second=1):
super().__init__(start=first)
self._previous = second - first
def _advance(self):
self._previous, self._current = self._current, self._previous + self._current
if __name__ == "__main__":
fibonacci_progresssion = FibonacciProgression(first= 1, second= 2)
fibonacci_progresssion.print_progression(20)
| 29.25
| 85
| 0.713675
| 50
| 468
| 6.16
| 0.5
| 0.116883
| 0.103896
| 0.149351
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.015831
| 0.190171
| 468
| 15
| 86
| 31.2
| 0.796834
| 0
| 0
| 0
| 0
| 0
| 0.017094
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| false
| 0
| 0.1
| 0
| 0.4
| 0.1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
55966e42aa982766be05f8a6dbd86f8df5f992eb
| 18,587
|
py
|
Python
|
openamundsen/modules/snow/multilayermodel.py
|
openamundsen/openamundsen
|
2ac09eb34b0c72c84c421a0dac08d114a05b7b1c
|
[
"MIT"
] | 3
|
2021-05-28T06:46:36.000Z
|
2021-06-14T13:39:25.000Z
|
openamundsen/modules/snow/multilayermodel.py
|
openamundsen/openamundsen
|
2ac09eb34b0c72c84c421a0dac08d114a05b7b1c
|
[
"MIT"
] | 22
|
2021-04-28T12:31:58.000Z
|
2022-03-09T18:29:12.000Z
|
openamundsen/modules/snow/multilayermodel.py
|
openamundsen/openamundsen
|
2ac09eb34b0c72c84c421a0dac08d114a05b7b1c
|
[
"MIT"
] | 1
|
2021-06-01T12:48:54.000Z
|
2021-06-01T12:48:54.000Z
|
import numpy as np
from numba import njit, prange
from openamundsen import constants, constants as c, heatconduction
from openamundsen.snowmodel import SnowModel
from . import snow
class MultilayerSnowModel(SnowModel):
def __init__(self, model):
self.model = model
s = model.state.snow
num_snow_layers = len(model.config.snow.min_thickness)
s.add_variable('num_layers', '1', 'Number of snow layers', dtype=int, retain=True)
s.add_variable('thickness', 'm', 'Snow thickness', dim3=num_snow_layers, retain=True)
s.add_variable('density', 'kg m-3', 'Snow density', 'snow_density', dim3=num_snow_layers)
s.add_variable('ice_content', 'kg m-2', 'Ice content of snow', dim3=num_snow_layers, retain=True)
s.add_variable('liquid_water_content', 'kg m-2', 'Liquid water content of snow', 'liquid_water_content_of_snow_layer', dim3=num_snow_layers, retain=True)
s.add_variable('temp', 'K', 'Snow temperature', dim3=num_snow_layers, retain=True)
s.add_variable('therm_cond', 'W m-1 K-1', 'Thermal conductivity of snow', dim3=num_snow_layers, retain=True)
s.add_variable('heat_cap', 'J K-1 m-2', 'Areal heat capacity of snow', dim3=num_snow_layers)
def initialize(self):
roi = self.model.grid.roi
s = self.model.state.snow
s.swe[roi] = 0
s.depth[roi] = 0
s.area_fraction[roi] = 0
s.num_layers[roi] = 0
s.sublimation[roi] = 0
s.therm_cond[:, roi] = self.model.config.snow.thermal_conductivity
s.thickness[:, roi] = 0
s.ice_content[:, roi] = 0
s.liquid_water_content[:, roi] = 0
s.temp[:, roi] = constants.T0
def albedo_aging(self):
snow.albedo(self.model)
def compaction(self):
snow.compaction(self.model)
def accumulation(self):
model = self.model
s = model.state
pos = s.meteo.snowfall > 0
self.add_snow(
pos,
s.meteo.snowfall[pos],
density=snow.fresh_snow_density(s.meteo.wet_bulb_temp[pos]),
)
def heat_conduction(self):
model = self.model
s = model.state
_heat_conduction(
model.grid.roi_idxs,
s.snow.num_layers,
s.snow.thickness,
s.soil.thickness,
model.timestep,
s.snow.temp,
s.snow.therm_cond,
s.soil.therm_cond,
s.surface.heat_flux,
s.snow.heat_cap,
)
def melt(self):
model = self.model
s = model.state
_melt(
model.grid.roi_idxs,
model.timestep,
s.snow.num_layers,
s.snow.melt,
s.snow.thickness,
s.snow.temp,
s.snow.ice_content,
s.snow.liquid_water_content,
s.snow.heat_cap,
)
def sublimation(self):
model = self.model
s = model.state
# First resublimation
frost = -np.minimum(s.snow.sublimation, 0)
pos = frost > 0
self.add_snow(
pos,
frost[pos],
density=snow.fresh_snow_density(s.meteo.wet_bulb_temp[pos]),
)
# Then sublimation
_sublimation(
model.grid.roi_idxs,
model.timestep,
s.snow.num_layers,
s.snow.ice_content,
s.snow.thickness,
s.snow.sublimation,
)
def runoff(self):
model = self.model
s = model.state
_runoff(
model.grid.roi_idxs,
snow.max_liquid_water_content(model),
s.meteo.rainfall,
s.snow.num_layers,
s.snow.thickness,
s.snow.temp,
s.snow.ice_content,
s.snow.liquid_water_content,
s.snow.runoff,
s.snow.heat_cap,
)
def update_layers(self):
model = self.model
s = model.state
_update_layers(
model.grid.roi_idxs,
s.snow.num_layers,
np.array(model.config.snow.min_thickness),
s.snow.thickness,
s.snow.ice_content,
s.snow.liquid_water_content,
s.snow.heat_cap,
s.snow.temp,
s.snow.density,
s.snow.depth,
)
s.snow.albedo[s.snow.num_layers == 0] = np.nan
def update_properties(self):
snow.snow_properties(self.model)
def add_snow(
self,
pos,
ice_content,
liquid_water_content=0,
density=None,
albedo=None,
):
"""
Add snow to the top of the snowpack.
"""
model = self.model
s = model.state
ice_content = np.nan_to_num(ice_content, nan=0., copy=True)
pos_init = (s.snow.num_layers[pos] == 0) & (ice_content > 0)
pos_init_global = model.global_mask(pos_init, pos)
# If albedo is None, set it to the maximum albedo for currently snow-free pixels and keep
# the current albedo for the other pixels
if albedo is None:
albedo = s.snow.albedo[pos]
albedo[pos_init] = model.config.snow.albedo.max
s.snow.albedo[pos] = albedo
# Initialize first snow layer where necessary
s.snow.num_layers[pos_init_global] = 1
s.snow.temp[0, pos_init_global] = np.minimum(s.meteo.temp[pos_init_global], constants.T0)
# Add snow to first layer
s.snow.ice_content[0, pos] += ice_content
s.snow.liquid_water_content[0, pos] += liquid_water_content
s.snow.thickness[0, pos] += ice_content / density
@njit(cache=True, parallel=True)
def _melt(
roi_idxs,
timestep,
num_layers,
melt,
thickness,
temp,
ice_content,
liquid_water_content,
heat_cap,
):
"""
Calculate snowmelt following [1].
Parameters
----------
roi_idxs : ndarray(int, ndim=2)
(N, 2)-array specifying the (row, col) indices within the data arrays
that should be considered.
timestep : float
Model timestep (s).
num_layers : ndarray(float, ndim=2)
Number of snow layers.
melt : ndarray(float, ndim=2)
Snowmelt (kg m-2).
thickness : ndarray(float, ndim=3)
Snow thickness (m).
temp : ndarray(float, ndim=3)
Snow temperature (K).
ice_content : ndarray(float, ndim=3)
Ice content of snow (kg m-2).
liquid_water_content : ndarray(float, ndim=3)
Liquid water content of snow (kg m-2).
heat_cap : ndarray(float, ndim=3)
Areal heat capacity of snow (J K-1 m-2).
References
----------
.. [1] Essery, R. (2015). A factorial snowpack model (FSM 1.0).
Geoscientific Model Development, 8(12), 3867–3876.
https://doi.org/10.5194/gmd-8-3867-2015
"""
num_pixels = len(roi_idxs)
for idx_num in prange(num_pixels):
i, j = roi_idxs[idx_num]
ice_content_change = melt[i, j]
for k in range(num_layers[i, j]):
cold_content = heat_cap[k, i, j] * (c.T0 - temp[k, i, j])
if cold_content < 0:
ice_content_change -= cold_content / c.LATENT_HEAT_OF_FUSION
temp[k, i, j] = c.T0
if ice_content_change > 0:
if ice_content_change > ice_content[k, i, j]: # layer melts completely
ice_content_change -= ice_content[k, i, j]
thickness[k, i, j] = 0.
liquid_water_content[k, i, j] += ice_content[k, i, j]
ice_content[k, i, j] = 0.
else: # layer melts partially
thickness[k, i, j] *= (1 - ice_content_change / ice_content[k, i, j])
ice_content[k, i, j] -= ice_content_change
liquid_water_content[k, i, j] += ice_content_change
ice_content_change = 0.
@njit(cache=True, parallel=True)
def _sublimation(
roi_idxs,
timestep,
num_layers,
ice_content,
thickness,
sublimation,
):
"""
Calculate snow sublimation following [1].
Parameters
----------
roi_idxs : ndarray(int, ndim=2)
(N, 2)-array specifying the (row, col) indices within the data arrays
that should be considered.
timestep : float
Model timestep (s).
num_layers : ndarray(float, ndim=2)
Number of snow layers.
ice_content : ndarray(float, ndim=3)
Ice content of snow (kg m-2).
thickness : ndarray(float, ndim=3)
Snow thickness (m).
sublimation : ndarray(float, ndim=2)
Snow sublimation (kg m-2).
References
----------
.. [1] Essery, R. (2015). A factorial snowpack model (FSM 1.0).
Geoscientific Model Development, 8(12), 3867–3876.
https://doi.org/10.5194/gmd-8-3867-2015
"""
num_pixels = len(roi_idxs)
for idx_num in prange(num_pixels):
i, j = roi_idxs[idx_num]
ice_content_change = max(sublimation[i, j], 0.)
if ice_content_change > 0:
for k in range(num_layers[i, j]):
if ice_content_change > ice_content[k, i, j]: # complete sublimation of layer
ice_content_change -= ice_content[k, i, j]
thickness[k, i, j] = 0.
ice_content[k, i, j] = 0.
else: # partial sublimation
thickness[k, i, j] *= (1 - ice_content_change / ice_content[k, i, j])
ice_content[k, i, j] -= ice_content_change
ice_content_change = 0.
@njit(cache=True, parallel=True)
def _runoff(
roi_idxs,
max_liquid_water_content,
rainfall,
num_layers,
thickness,
temp,
ice_content,
liquid_water_content,
runoff,
heat_cap,
):
"""
Calculate snowmelt runoff following [1].
Parameters
----------
roi_idxs : ndarray(int, ndim=2)
(N, 2)-array specifying the (row, col) indices within the data arrays
that should be considered.
max_liquid_water_content : ndarray(float, ndim=3)
Maximum liquid water content (kg m-2).
rainfall : ndarray(float, ndim=2)
Rainfall amount (kg m-2).
num_layers : ndarray(float, ndim=2)
Number of snow layers.
thickness : ndarray(float, ndim=3)
Snow thickness (m).
temp : ndarray(float, ndim=3)
Snow temperature (K).
ice_content : ndarray(float, ndim=3)
Ice content of snow (kg m-2).
liquid_water_content : ndarray(float, ndim=3)
Liquid water content of snow (kg m-2).
runoff : ndarray(float, ndim=2)
Snow runoff (kg m-2).
heat_cap : ndarray(float, ndim=3)
Areal heat capacity of snow (J K-1 m-2).
References
----------
.. [1] Essery, R. (2015). A factorial snowpack model (FSM 1.0).
Geoscientific Model Development, 8(12), 3867–3876.
https://doi.org/10.5194/gmd-8-3867-2015
"""
num_pixels = len(roi_idxs)
for idx_num in prange(num_pixels):
i, j = roi_idxs[idx_num]
runoff[i, j] = rainfall[i, j]
if np.isnan(runoff[i, j]):
runoff[i, j] = 0.
for k in range(num_layers[i, j]):
liquid_water_content[k, i, j] += runoff[i, j]
if liquid_water_content[k, i, j] > max_liquid_water_content[k, i, j]:
runoff[i, j] = liquid_water_content[k, i, j] - max_liquid_water_content[k, i, j]
liquid_water_content[k, i, j] = max_liquid_water_content[k, i, j]
else:
runoff[i, j] = 0.
# Refreeze liquid water
cold_content = heat_cap[k, i, j] * (c.T0 - temp[k, i, j])
if cold_content > 0:
ice_content_change = min(
liquid_water_content[k, i, j],
cold_content / c.LATENT_HEAT_OF_FUSION,
)
liquid_water_content[k, i, j] -= ice_content_change
ice_content[k, i, j] += ice_content_change
temp[k, i, j] += c.LATENT_HEAT_OF_FUSION * ice_content_change / heat_cap[k, i, j]
@njit(parallel=True, cache=True)
def _heat_conduction(
roi_idxs,
num_layers,
snow_thickness,
soil_thickness,
timestep,
temp,
therm_cond_snow,
therm_cond_soil,
heat_flux,
heat_cap,
):
"""
Update snow layer temperatures.
Parameters
----------
roi_idxs : ndarray(int, ndim=2)
(N, 2)-array specifying the (row, col) indices within the data arrays
that should be considered.
num_layers : ndarray(float, ndim=2)
Number of snow layers.
snow_thickness : ndarray(float, ndim=3)
Snow thickness (m).
soil_thickness : ndarray(float, ndim=3)
Soil thickness (m).
timestep : float
Model timestep (s).
temp : ndarray(float, ndim=3)
Snow temperature (K).
therm_cond_snow : ndarray(float, ndim=3)
Snow thermal conductivity (W m-1 K-1).
therm_cond_soil : ndarray(float, ndim=3)
Soil thermal conductivity (W m-1 K-1).
heat_flux : ndarray(float, ndim=2)
Surface heat flux (W m-2).
heat_cap : ndarray(float, ndim=3)
Areal heat capacity of snow (J K-1 m-2).
References
----------
.. [1] Essery, R. (2015). A factorial snowpack model (FSM 1.0).
Geoscientific Model Development, 8(12), 3867–3876.
https://doi.org/10.5194/gmd-8-3867-2015
"""
num_pixels = len(roi_idxs)
for idx_num in prange(num_pixels):
i, j = roi_idxs[idx_num]
ns = num_layers[i, j]
if ns > 0:
temp[:ns, i, j] += heatconduction.temp_change(
snow_thickness[:ns, i, j],
timestep,
temp[:ns, i, j],
therm_cond_snow[:ns, i, j],
temp[-1, i, j],
soil_thickness[0, i, j],
therm_cond_soil[0, i, j],
heat_flux[i, j],
heat_cap[:ns, i, j],
)
@njit(cache=True, parallel=True)
def _update_layers(
roi_idxs,
num_layers,
min_thickness,
thickness,
ice_content,
liquid_water_content,
heat_cap,
temp,
density,
depth,
):
"""
Update snow layers.
Parameters
----------
roi_idxs : ndarray(int, ndim=2)
(N, 2)-array specifying the (row, col) indices within the data arrays
that should be considered.
num_layers : ndarray(float, ndim=2)
Number of snow layers.
min_thickness : ndarray(float, ndim=1)
Minimum snow layer thicknesses (m).
thickness : ndarray(float, ndim=3)
Snow thickness (m).
ice_content : ndarray(float, ndim=3)
Ice content of snow (kg m-2).
liquid_water_content : ndarray(float, ndim=3)
Liquid water content of snow (kg m-2).
heat_cap : ndarray(float, ndim=3)
Areal heat capacity of snow (J K-1 m-2).
temp : ndarray(float, ndim=3)
Snow temperature (K).
density : ndarray(float, ndim=3)
Snow density (kg m-3).
depth : ndarray(float, ndim=2)
Snow depth (m).
References
----------
.. [1] Essery, R. (2015). A factorial snowpack model (FSM 1.0).
Geoscientific Model Development, 8(12), 3867–3876.
https://doi.org/10.5194/gmd-8-3867-2015
"""
max_num_layers = len(min_thickness)
num_layers_prev = num_layers.copy()
thickness_prev = thickness.copy()
ice_content_prev = ice_content.copy()
liquid_water_content_prev = liquid_water_content.copy()
energy_prev = heat_cap * (temp - c.T0) # energy content (J m-2)
num_pixels = len(roi_idxs)
for idx_num in prange(num_pixels):
i, j = roi_idxs[idx_num]
num_layers[i, j] = 0
thickness[:, i, j] = 0.
ice_content[:, i, j] = 0.
liquid_water_content[:, i, j] = 0.
temp[:, i, j] = c.T0
density[:, i, j] = np.nan
internal_energy = np.zeros(max_num_layers)
if depth[i, j] > 0:
new_thickness = depth[i, j]
# Update thicknesses and number of layers
for k in range(max_num_layers):
thickness[k, i, j] = min_thickness[k]
new_thickness -= min_thickness[k]
if new_thickness <= min_thickness[k] or k == max_num_layers - 1:
thickness[k, i, j] += new_thickness
break
# Set thin snow layers to 0 to avoid numerical artifacts
# TODO should this be done at some other location?
for k in range(max_num_layers):
if thickness[k, i, j] < 1e-6:
thickness[k, i, j] = 0.
ns = (thickness[:, i, j] > 0).sum() # new number of layers
new_thickness = thickness[0, i, j]
k_new = 0
# TODO optimize this loop
for k_old in range(num_layers_prev[i, j]):
while True: # TODO replace with normal loop
weight = min(new_thickness / thickness_prev[k_old, i, j], 1.)
ice_content[k_new, i, j] += weight * ice_content_prev[k_old, i, j]
liquid_water_content[k_new, i, j] += weight * liquid_water_content_prev[k_old, i, j]
internal_energy[k_new] += weight * energy_prev[k_old, i, j]
if weight == 1.:
new_thickness -= thickness_prev[k_old, i, j]
break
thickness_prev[k_old, i, j] *= 1 - weight
ice_content_prev[k_old, i, j] *= 1 - weight
liquid_water_content_prev[k_old, i, j] *= 1 - weight
energy_prev[k_old, i, j] *= 1 - weight
k_new += 1
if k_new >= ns:
break
if weight < 1:
new_thickness = thickness[k_new, i, j]
num_layers[i, j] = ns
# Update areal heat capacity and snow temperature
heat_cap[:ns, i, j] = ( # TODO use snow_heat_capacity() for this
ice_content[:ns, i, j] * c.SPEC_HEAT_CAP_ICE
+ liquid_water_content[:ns, i, j] * c.SPEC_HEAT_CAP_WATER
)
temp[:ns, i, j] = c.T0 + internal_energy[:ns] / heat_cap[:ns, i, j]
# Update density
density[:ns, i, j] = (
(liquid_water_content[:ns, i, j] + ice_content[:ns, i, j])
/ thickness[:ns, i, j]
)
| 30.470492
| 161
| 0.559907
| 2,487
| 18,587
| 4.008042
| 0.086047
| 0.019663
| 0.075843
| 0.042636
| 0.632624
| 0.55327
| 0.514346
| 0.45325
| 0.408106
| 0.375201
| 0
| 0.026072
| 0.327272
| 18,587
| 609
| 162
| 30.520525
| 0.770713
| 0.285845
| 0
| 0.431548
| 0
| 0
| 0.026039
| 0.002691
| 0
| 0
| 0
| 0.001642
| 0
| 1
| 0.050595
| false
| 0
| 0.014881
| 0
| 0.068452
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5598bbdfc235215336c94064608a0db8ff763655
| 3,961
|
py
|
Python
|
bpmn/urls.py
|
VSSantana/SFDjango-BPMN
|
e5a3fb8da9282fd88f72a85a4b34d89d38391e36
|
[
"MIT"
] | 1
|
2021-09-21T00:02:10.000Z
|
2021-09-21T00:02:10.000Z
|
bpmn/urls.py
|
VSSantana/SFDjango-BPMN
|
e5a3fb8da9282fd88f72a85a4b34d89d38391e36
|
[
"MIT"
] | 5
|
2021-09-22T13:54:06.000Z
|
2021-09-22T14:05:56.000Z
|
bpmn/urls.py
|
marcelobbfonseca/SFDjango-BPMN
|
50565763414f52d9e84004494cf550c6fe2358fa
|
[
"MIT"
] | 1
|
2021-09-18T01:22:25.000Z
|
2021-09-18T01:22:25.000Z
|
from django.urls import path
from django.contrib.auth.views import LoginView
from .views.activity_view import *
from .views.activity_type_view import *
from .views.event_view import *
from .views.flow_view import *
from .views.lane_view import *
from .views.pool_view import *
from .views.process_type_view import *
from .views.process_view import *
from .views.sequence_view import *
urlpatterns = [
path('', LoginView.as_view(template_name='accounts/login.html'), name="login"),
path('activity_type_list/', ActivityTypeView.as_view(), name='activity_type_list'),
path('activity_type_create_form/', ActivityTypeCreate.as_view(), name='activity_type_create_form'),
path('activity_type_update_form/<int:pk>', ActivityTypeUpdate.as_view(), name='activity_type_update_form'),
path('activity_type_delete_confirmation/<int:pk>', ActivityTypeDelete.as_view(), name='activity_type_delete_confirmation'),
path('process_type_list/', ProcessTypeView.as_view(), name='process_type_list'),
path('process_type_create_form/', ProcessTypeCreate.as_view(), name='process_type_create_form'),
path('process_type_update_form/<int:pk>', ProcessTypeUpdate.as_view(), name='process_type_update_form'),
path('process_type_delete_confirmation/<int:pk>', ProcessTypeDelete.as_view(), name='process_type_delete_confirmation'),
path('pool_list/', PoolView.as_view(), name='pool_list'),
path('pool_create_form/', PoolCreate.as_view(), name='pool_create_form'),
path('pool_update_form/<int:pk>', PoolUpdate.as_view(), name='pool_update_form'),
path('pool_delete_confirmation/<int:pk>', PoolDelete.as_view(), name='pool_delete_confirmation'),
path('lane_list/', LaneView.as_view(), name='lane_list'),
path('lane_create_form/', LaneCreate.as_view(), name='lane_create_form'),
path('lane_update_form/<int:pk>', LaneUpdate.as_view(), name='lane_update_form'),
path('lane_delete_confirmation/<int:pk>', LaneDelete.as_view(), name='lane_delete_confirmation'),
path('event_list/', EventView.as_view(), name='event_list'),
path('event_create_form/', EventCreate.as_view(), name='event_create_form'),
path('event_update_form/<int:pk>', EventUpdate.as_view(), name='event_update_form'),
path('event_delete_confirmation/<int:pk>', EventDelete.as_view(), name='event_delete_confirmation'),
path('activity_list/', ActivityView.as_view(), name='activity_list'),
path('activity_create_form/', ActivityCreate.as_view(), name='activity_create_form'),
path('activity_update_form/<int:pk>', ActivityUpdate.as_view(), name='activity_update_form'),
path('activity_delete_confirmation/<int:pk>', ActivityDelete.as_view(), name='activity_delete_confirmation'),
path('sequence_list/', SequenceView.as_view(), name='sequence_list'),
path('sequence_create_form/', SequenceCreate.as_view(), name='sequence_create_form'),
path('sequence_update_form/<int:pk>', SequenceUpdate.as_view(), name='sequence_update_form'),
path('sequence_delete_confirmation/<int:pk>', SequenceDelete.as_view(), name='sequence_delete_confirmation'),
path('flow_list/', FlowView.as_view(), name='flow_list'),
path('flow_create_form/', FlowCreate.as_view(), name='flow_create_form'),
path('flow_update_form/<int:pk>', FlowUpdate.as_view(), name='flow_update_form'),
path('flow_delete_confirmation/<int:pk>', FlowDelete.as_view(), name='flow_delete_confirmation'),
path('process_list/', ProcessView.as_view(), name='process_list'),
path('process_create_form/', ProcessCreate.as_view(), name='process_create_form'),
path('process_update_form/<int:pk>', ProcessUpdate.as_view(), name='process_update_form'),
path('process_delete_confirmation/<int:pk>', ProcessDelete.as_view(), name='process_delete_confirmation'),
path('process-modeling/', ProcessModelingView.as_view(), name="process_modeling"),
path('ontology-suggestion', OntologySuggestionView.as_view(), name="ontology_suggestion")
]
| 73.351852
| 127
| 0.757637
| 519
| 3,961
| 5.406551
| 0.142582
| 0.083393
| 0.135424
| 0.048111
| 0.122238
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.081293
| 3,961
| 53
| 128
| 74.735849
| 0.771091
| 0
| 0
| 0
| 0
| 0
| 0.423378
| 0.256501
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.211538
| 0
| 0.211538
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
559adf86675fc57065409a6e9ac6154669c807e5
| 3,404
|
py
|
Python
|
edwin/__init__.py
|
AlanSwenson/edwin
|
94f62a4db6cc5123224607f92a1f552be072c708
|
[
"MIT"
] | null | null | null |
edwin/__init__.py
|
AlanSwenson/edwin
|
94f62a4db6cc5123224607f92a1f552be072c708
|
[
"MIT"
] | 8
|
2019-03-13T13:39:00.000Z
|
2019-04-02T14:58:21.000Z
|
edwin/__init__.py
|
AlanSwenson/edwin
|
94f62a4db6cc5123224607f92a1f552be072c708
|
[
"MIT"
] | null | null | null |
import eventlet
eventlet.monkey_patch()
import time
from datetime import datetime, timedelta, timezone
import pytz
from email.utils import parsedate_tz
import json
from flask import Flask, request, render_template
from threading import Thread
from tweepy import OAuthHandler, API, Stream, Cursor
from flask_socketio import (
SocketIO,
emit,
join_room,
leave_room,
close_room,
rooms,
disconnect,
)
from darksky import forecast
socketio = SocketIO()
thread = None
thread2 = None
from edwin.tweets import StdOutListener
def create_app():
app = Flask(__name__)
app.config.from_object("config")
app.config["SECRET_KEY"] = "secret!"
with app.app_context():
socketio.init_app(app, async_mode="eventlet")
CONSUMER_KEY = app.config["TWITTER_CONSUMER_KEY"]
CONSUMER_SECRET = app.config["TWITTER_CONSUMER_SECRET"]
ACCESS_TOKEN = app.config["TWITTER_ACCESS_TOKEN"]
ACCESS_TOKEN_SECRET = app.config["TWITTER_ACCESS_TOKEN_SECRET"]
TWITTER_SCREEN_NAME = app.config["TWITTER_SCREEN_NAME"]
DARKSKY_KEY = app.config["DARKSKY_KEY"]
# These config variables come from 'config.py'
auth = OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)
auth.set_access_token(ACCESS_TOKEN, ACCESS_TOKEN_SECRET)
api = API(auth, wait_on_rate_limit=True, wait_on_rate_limit_notify=True)
ids = api.friends_ids(screen_name=TWITTER_SCREEN_NAME, stringify_ids="true")
try:
dc = forecast(DARKSKY_KEY, 38.9159, -77.0446)
except:
print("failed connection to darksky")
@app.route("/", methods=["GET"])
def index():
global thread
global thread2
if thread is None:
thread = Thread(target=twitter_thread, daemon=True)
thread.start()
if thread2 is None:
thread2 = Thread(target=darksky_thread, daemon=True)
thread2.start()
return render_template("index.html")
def twitter_thread():
"""connect to twitter sreaming API and send data to client"""
stream = Stream(auth, listener)
_follow = ["15736341", "1"]
stream.filter(follow=ids, filter_level="low")
def darksky_thread():
while True:
try:
dc.refresh(extend='daily')
sunrise = convert_unix_ts(dc['daily']['data'][0]['sunriseTime'])
sunset = convert_unix_ts(dc['daily']['data'][0]['sunsetTime'])
# convert to int for a nice round whole number temperture
temp = int(dc.temperature)
except:
print("break")
sunrise = "_"
sunset = "-"
temp = "Connection Lost"
socketio.emit(
"darksky_channel",
{"temp": temp,
"sunrise": sunrise,
"sunset": sunset},
namespace="/darksky_streaming",
)
time.sleep(120)
listener = StdOutListener()
return app
def convert_unix_ts(ts):
ts= int(ts)
return datetime.fromtimestamp(ts).strftime('%-I:%M')
| 30.392857
| 84
| 0.574031
| 357
| 3,404
| 5.268908
| 0.40056
| 0.038278
| 0.042531
| 0.035088
| 0.079213
| 0.026582
| 0.026582
| 0
| 0
| 0
| 0
| 0.013632
| 0.331962
| 3,404
| 112
| 85
| 30.392857
| 0.813544
| 0.046122
| 0
| 0.047619
| 0
| 0
| 0.102129
| 0.015427
| 0
| 0
| 0
| 0
| 0
| 1
| 0.059524
| false
| 0
| 0.142857
| 0
| 0.238095
| 0.02381
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
559ae7307b62942efd1983a817dbb736879880c0
| 2,255
|
py
|
Python
|
troop/admin.py
|
packmas13/registration
|
bfb42c5479d59494b59e7c656cb04826e110e8d2
|
[
"MIT"
] | 1
|
2020-08-12T09:51:42.000Z
|
2020-08-12T09:51:42.000Z
|
troop/admin.py
|
packmas13/registration
|
bfb42c5479d59494b59e7c656cb04826e110e8d2
|
[
"MIT"
] | 46
|
2020-01-24T16:51:41.000Z
|
2022-03-29T16:03:12.000Z
|
troop/admin.py
|
packmas13/registration
|
bfb42c5479d59494b59e7c656cb04826e110e8d2
|
[
"MIT"
] | 1
|
2020-01-28T21:25:06.000Z
|
2020-01-28T21:25:06.000Z
|
from django import forms
from django.contrib import admin
from .models import Attendance, Diet, Participant, Troop
from payment.admin import DiscountInline, PaymentInline
class AttendanceInline(admin.TabularInline):
model = Participant.attendance.through
readonly_fields = ("participant",)
can_delete = False
def has_add_permission(self, request, obj=None):
return False
class AttendanceAdmin(admin.ModelAdmin):
inlines = [
AttendanceInline,
]
list_display = (
"date",
"is_main",
)
class DietInline(admin.TabularInline):
model = Participant.diet.through
readonly_fields = ("participant",)
can_delete = False
def has_add_permission(self, request, obj=None):
return False
class DietAdmin(admin.ModelAdmin):
inlines = [
DietInline,
]
class ParticipantAdmin(admin.ModelAdmin):
inlines = [
DiscountInline,
]
list_display = (
"troop",
"first_name",
"last_name",
"birthday",
"age_section",
"is_leader",
)
list_display_links = (
"first_name",
"last_name",
"birthday",
)
def formfield_for_dbfield(self, db_field, **kwargs):
formfield = super(ParticipantAdmin, self).formfield_for_dbfield(
db_field, **kwargs
)
if db_field.name == "comment":
formfield.widget = forms.Textarea(attrs=formfield.widget.attrs)
return formfield
class ParticipantInline(admin.TabularInline):
model = Participant
fields = (
"first_name",
"last_name",
"birthday",
)
readonly_fields = (
"first_name",
"last_name",
"birthday",
)
can_delete = False
show_change_link = True
def has_add_permission(self, request, obj=None):
return False
class TroopAdmin(admin.ModelAdmin):
inlines = [
ParticipantInline,
PaymentInline,
]
list_display = (
"number",
"name",
)
list_display_links = ("name",)
admin.site.register(Attendance, AttendanceAdmin)
admin.site.register(Diet, DietAdmin)
admin.site.register(Participant, ParticipantAdmin)
admin.site.register(Troop, TroopAdmin)
| 21.47619
| 75
| 0.632373
| 217
| 2,255
| 6.391705
| 0.336406
| 0.039654
| 0.063446
| 0.049027
| 0.261716
| 0.225667
| 0.180966
| 0.180966
| 0.180966
| 0.180966
| 0
| 0
| 0.267849
| 2,255
| 104
| 76
| 21.682692
| 0.840097
| 0
| 0
| 0.365854
| 0
| 0
| 0.082927
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.04878
| false
| 0
| 0.04878
| 0.036585
| 0.47561
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
559b8b906411edd79ce8b01d4b0d9cdea4c7292c
| 829
|
py
|
Python
|
demo_snippets/11_Datenvisualisierung/main.py
|
fabod/pro2
|
69b1015fa789ef05bf9b514d94b231f76bdf5e29
|
[
"MIT"
] | 2
|
2020-03-03T14:57:40.000Z
|
2020-03-20T10:59:47.000Z
|
demo_snippets/11_Datenvisualisierung/main.py
|
fabod/pro2
|
69b1015fa789ef05bf9b514d94b231f76bdf5e29
|
[
"MIT"
] | null | null | null |
demo_snippets/11_Datenvisualisierung/main.py
|
fabod/pro2
|
69b1015fa789ef05bf9b514d94b231f76bdf5e29
|
[
"MIT"
] | null | null | null |
from flask import Flask
from flask import render_template
import plotly.express as px
from plotly.offline import plot
app = Flask("Datenvisualisierung")
def data():
data = px.data.gapminder()
data_ch = data[data.country == 'Switzerland']
return data_ch
def viz():
data_ch = data()
fig = px.bar(
data_ch,
x='year', y='pop',
hover_data=['lifeExp', 'gdpPercap'],
color='lifeExp',
labels={
'pop': 'Einwohner der Schweiz',
'year': 'Jahrzehnt'
},
height=400
)
div = plot(fig, output_type="div")
return div
@app.route("/")
def index():
div = viz()
# return str([str(i) for i in data()])
return render_template('index.html', viz_div=div)
if __name__ == '__main__':
app.run(debug=True, port=5000)
| 18.422222
| 53
| 0.587455
| 105
| 829
| 4.47619
| 0.533333
| 0.051064
| 0.06383
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.01157
| 0.270205
| 829
| 44
| 54
| 18.840909
| 0.765289
| 0.043426
| 0
| 0
| 0
| 0
| 0.150442
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.1
| false
| 0
| 0.133333
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
559c155e6e0b7efb591c20bbc5e5237149bd61eb
| 2,940
|
py
|
Python
|
data_analysis/get_model_statistics.py
|
fluTN/influenza
|
40cbede52bc4e95d52369eebe4a50ad4b71369d1
|
[
"MIT"
] | 1
|
2020-10-29T09:56:31.000Z
|
2020-10-29T09:56:31.000Z
|
data_analysis/get_model_statistics.py
|
fluTN/influenza
|
40cbede52bc4e95d52369eebe4a50ad4b71369d1
|
[
"MIT"
] | null | null | null |
data_analysis/get_model_statistics.py
|
fluTN/influenza
|
40cbede52bc4e95d52369eebe4a50ad4b71369d1
|
[
"MIT"
] | 1
|
2022-01-22T11:34:29.000Z
|
2022-01-22T11:34:29.000Z
|
# -*- coding: utf-8 -*-
"""Script which can be used to compare the features obtained of two different influenza models
Usage:
get_model_statistics.py <model> [--country=<country_name>] [--no-future] [--basedir=<directory>] [--start-year=<start_year>] [--end-year=<end_year>] [--save] [--no-graph]
<baseline> Data file of the first model
<other_method> Data file of the second model
-h, --help Print this help message
"""
import pandas as pd
import numpy as np
from scipy import stats
from docopt import docopt
import os
import glob
from sklearn.metrics import mean_squared_error
import seaborn as sns
import matplotlib.pyplot as plt
sns.set()
def get_results_filename(basepath):
files = [f for f in glob.glob(basepath + "/*-prediction.csv", recursive=True)]
y = os.path.basename(files[0]).split("-")[0]
y2 = os.path.basename(files[0]).split("-")[1]
return "{}-{}".format(y, y2)
if __name__ == "__main__":
args = docopt(__doc__)
model = args["<model>"]
base_dir = args["--basedir"] if args["--basedir"] else "../complete_results"
country = args["--country"] if args["--country"] else "italy"
future = "no-future" if args["--no-future"] else "future"
# Read the baseline results and merge them
model_path = os.path.join(base_dir, args["<model>"], future, country)
season_years = get_results_filename(model_path)
model_file = os.path.join(model_path, "{}-prediction.csv".format(season_years))
# Load the data
data = pd.read_csv(model_file)
# Get only the weeks we care for
start_year = "2007-42" if not args["--start-year"] else args["--start-year"]
end_year = "2019-15" if not args["--end-year"] else args["--end-year"]
start_season = data["week"] >= start_year
end_season = data["week"] <= str(int(end_year.split("-")[0]) + 1) + "-" + end_year.split("-")[1]
total = start_season & end_season
data = data[total]
# Describe the data
print("")
print("[*] Describe the given dataset {}".format(model_file))
print(data.describe())
# Generate residuals
print("")
print("[*] Describe the residuals")
residuals = data["incidence"]-data["prediction"]
print(residuals.describe())
# Get some statistics
print("")
total_pearson = 0
for i in np.arange(0, len(data["prediction"]), 26):
total_pearson += stats.pearsonr(data["prediction"][i:i+26], data["incidence"][i:i+26])[0]
print("Pearson Correlation (value/p): ", total_pearson/(len(data["prediction"])/26))
print("")
print("Mean Squared Error: ", mean_squared_error(data["prediction"], data["incidence"]))
print("")
if not args["--no-graph"]:
ax = sns.distplot(residuals, label="Residual")
plt.figure()
ax = sns.distplot(data["incidence"], label="Incidence")
ax = sns.distplot(data["prediction"], label="Prediction")
plt.legend()
plt.show()
| 33.409091
| 172
| 0.644558
| 399
| 2,940
| 4.62406
| 0.358396
| 0.026558
| 0.019512
| 0.017344
| 0.0271
| 0.0271
| 0
| 0
| 0
| 0
| 0
| 0.013779
| 0.185374
| 2,940
| 87
| 173
| 33.793103
| 0.756576
| 0.197619
| 0
| 0.09434
| 0
| 0
| 0.196505
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.018868
| false
| 0
| 0.169811
| 0
| 0.207547
| 0.207547
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
559f3ab5a294666e58af2d7a21dc2e34d7f16b41
| 21,887
|
py
|
Python
|
sisu/summarizer.py
|
balouf/sisu
|
07541e6a02e545372452b33f7df056331397001f
|
[
"BSD-3-Clause"
] | null | null | null |
sisu/summarizer.py
|
balouf/sisu
|
07541e6a02e545372452b33f7df056331397001f
|
[
"BSD-3-Clause"
] | null | null | null |
sisu/summarizer.py
|
balouf/sisu
|
07541e6a02e545372452b33f7df056331397001f
|
[
"BSD-3-Clause"
] | null | null | null |
from scipy.sparse import vstack
from sklearn.metrics.pairwise import cosine_similarity
import numpy as np
from sisu.preprocessing.tokenizer import is_relevant_sentence, make_sentences, sanitize_text
from gismo.gismo import Gismo, covering_order
from gismo.common import auto_k
from gismo.parameters import Parameters
from gismo.corpus import Corpus
from gismo.embedding import Embedding
from sisu.embedding_idf import IdfEmbedding
def cosine_order(projection, sentences, query):
"""
Order relevant sentences by cosine similarity to the query.
Parameters
----------
projection: callable
A function that converts a text into a tuple whose first element is an embedding (typically a Gismo :meth:`~gismo.embedding.Embedding.query_projection`).
sentences: :class:`list` of :class:`dict`
Sentences as output by :func:`~sisu.summarizer.extract_sentences`.
query: :class:`str`
Target query
Returns
-------
:class:`list` of :class:`int`
Ordered list of indexes of relevant sentences, sorted by cosine similarity
"""
relevant_indices = [s['index'] for s in sentences if s['relevant']]
projected_query = projection(query)[0]
projected_sentences = vstack([projection(sentences[i]['sanitized'])[0] for i in relevant_indices])
order = np.argsort(- cosine_similarity(projected_sentences, projected_query)[:, 0])
return [relevant_indices[i] for i in order]
def extract_sentences(source, indices, getter=None, tester=None):
"""
Pick up the entries of the source corresponding to indices and build a list of sentences out of that.
Each sentence is a dictionary with the following keys:
- `index`: position of the sentence in the returned list
- `sentence`: the actual sentence
- `relevant`: a boolean that tells if the sentence is eligible for being part of the summary
- `sanitized`: for relevant sentences, a simplified version to be fed to the embedding
Parameters
----------
source: :class:`list`
list of objects
indices: iterable of :class:`int`
Indexes of the source items to select
getter: callable, optional
Tells how to convert a source entry into text.
tester: callable, optional
Tells if the sentence is eligible for being part of the summary.
Returns
-------
list of dict
Examples
--------
>>> doc1 = ("This is a short sentence! This is a sentence with reference to the url http://www.ix.com! "
... "This sentence is not too short and not too long, without URL and without citation. "
... "I have many things to say in that sentence, to the point "
... "I do not know if I will stop anytime soon but don\'t let it stop "
... "you from reading this meaninless garbage and this goes on and "
... "this goes on and this goes on and this goes on and this goes on and "
... "this goes on and this goes on and this goes on and this goes on "
... "and this goes on and this goes on and this goes on and this goes "
... "on and this goes on and this goes on and this goes on and this goes "
... "on and this goes on and that is all.")
>>> doc2 = ("This is a a sentence with some citations [3, 7]. "
... "This sentence is not too short and not too long, without URL and without citation. "
... "Note that the previous sentence is already present in doc1. "
... "The enzyme cytidine monophospho-N-acetylneuraminic acid hydroxylase (CMAH) catalyzes "
... "the synthesis of Neu5Gc by hydroxylation of Neu5Ac (Schauer et al. 1968).")
>>> extract_sentences([doc1, doc2], [1, 0]) # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
[{'index': 0, 'sentence': 'This is a a sentence with some citations [3, 7].', 'relevant': False, 'sanitized': ''},
{'index': 1, 'sentence': 'This sentence is not too short and not too long, without URL and without citation.',
'relevant': True, 'sanitized': 'This sentence is not too short and not too long without URL and without citation'},
{'index': 2, 'sentence': 'Note that the previous sentence is already present in doc1.',
'relevant': True, 'sanitized': 'Note that the previous sentence is already present in doc'},
{'index': 3, 'sentence': 'The enzyme cytidine monophospho-N-acetylneuraminic acid hydroxylase (CMAH) catalyzes
the synthesis of Neu5Gc by hydroxylation of Neu5Ac (Schauer et al. 1968).',
'relevant': False, 'sanitized': ''},
{'index': 4, 'sentence': 'This is a short sentence!', 'relevant': False, 'sanitized': ''},
{'index': 5, 'sentence': 'This is a sentence with reference to the url http://www.ix.com!',
'relevant': False, 'sanitized': ''},
{'index': 6, 'sentence': 'This sentence is not too short and not too long, without URL and without citation.',
'relevant': False, 'sanitized': ''},
{'index': 7, 'sentence': "I have many things to say in that sentence...",
'relevant': False, 'sanitized': ''}]
"""
if getter is None:
getter = str
if tester is None:
tester = is_relevant_sentence
sentences = [{'index': i, 'sentence': sent, 'relevant': tester(sent)}
for i, sent in enumerate([sent for j in indices
for sent in make_sentences(getter(source[j]))])]
used = set()
for s in sentences:
if s['sentence'] in used and s['relevant']:
s['relevant'] = False
else:
used.add(s['sentence'])
s['sanitized'] = sanitize_text(s['sentence']) if s['relevant'] else ""
return sentences
default_summarizer_parameters = {
'order': 'rank',
'text_getter': None,
'sentence_tester': is_relevant_sentence,
'itf': True,
'post_processing': lambda summa, i: summa.sentences_[i]['sentence'],
'sentence_gismo_parameters': {'post': False, 'resolution': .99},
'num_documents': None,
'num_query': None,
'num_sentences': None,
'max_chars': None}
"""
List of parameters for the summarizer with their default values.
Parameters
-----------
order: :class:`str`
Sorting function.
text_getter: callable
Extraction of text from corpus item. If not specify, the to_text of the :class:`~gismo.corpus.Corpus` will be used.
sentence_tester: callable
Function that estimates if a sentence is eligible to be part of the summary
itf: :class:`bool`
Use of ITF normalization in the sentence-level Gismo
post_processing: callable
post_processing transformation. Signature is (:class:`~sisu.summarizer.Summarizer`, :class:`int`) -> :class:`str`
sentence_gismo_parameters: :class:`dict`
Tuning of sentence-level gismo. `post` MUST be set to False.
num_documents: :class:`int` or None
Number of documents to pre-select
num_query: :class:`int` or None
Number of features to use in generic query
num_sentences: :class:`int` or None
Number of sentences to return
max_chars: :class:`int` or None
Maximal number of characters to return
"""
class Summarizer:
"""
Summarizer class.
Parameters
----------
gismo: :class:`~gismo.gismo.Gismo`
Gismo of the documents to analyze.
kwargs: :class:`dict`
Parameters of the summarizer (see :obj:`~sisu.summarizer.default_summarizer_parameters` for details).
Attributes
----------
query_: :class:`str`
Query used to summarize.
sentences_: :class:`list` of :class:`dict`
Selected sentences. Each sentence is a dictionary with the following keys:
- `index`: position of the sentence in the returned list
- `sentence`: the actual sentence
- `relevant`: a boolean that tells if the sentence is eligible for being part of the summary
- `sanitized`: for relevant sentences, a simplified version to be fed to the embedding
order_: :class:`numpy.ndarray`
Proposed incomplete ordering of the :class:`~sisu.summarizer.Summarizer.sentences_`
sentence_gismo_: :class:`~gismo.gismo.Gismo`
Gismo running at sentence level.
parameters: :class:`~gismo.parameters.Parameters`
Handler of parameters.
Examples
--------
The package contains a data folder with a toy gismo with articles related to Covid-19. We load it.
>>> gismo = Gismo(filename="toy_gismo", path="data")
Then we build a summarizer out of it. We tell to fetch the sentences from the content of the articles.
>>> summa = Summarizer(gismo, text_getter = lambda d: d['content'])
Ask for a summary on *bat* with a maximal budget of 500 characters, using pure TF-IDF sentence embedding.
>>> summa('bat', max_chars=500, itf=False) # doctest: +NORMALIZE_WHITESPACE
['By comparing the amino acid sequence of 2019-nCoV S-protein (GenBank Accession: MN908947.3) with
Bat SARS-like coronavirus isolate bat-SL-CoVZC45 and Bat SARS-like coronavirus isolate Bat-SL-CoVZXC21,
the latter two were shown to share 89.1% and 88.6% sequence identity to 2019-nCoV S-protein
(supplementary figure 1) .',
'Within our bat-hemoplasma network, genotype sharing was restricted to five host communities,
380 whereas six genotypes were each restricted to a single bat species (Fig. 5A ).']
Now a summary based on the *cosine* ordering, using the content of abstracts and pure TF-IDF sentence embedding.
>>> summa('bat', max_chars=500, order='cosine', text_getter = lambda d: d['abstract']) # doctest: +NORMALIZE_WHITESPACE
['Bat dipeptidyl peptidase 4 (DPP4) sequences were closely related to 38 those of human and non-human
primates but distinct from dromedary DPP4 sequence.',
'The multiple sequence alignment data correlated with already published reports on SARS-CoV-2
indicated that it is closely related to Bat-Severe Acute Respiratory Syndrome like coronavirus
(Bat CoV SARS-like) and wellstudied Human SARS.',
'(i.e., hemoplasmas) across a species-rich 40 bat community in Belize over two years.']
Now 4 sentences using a *coverage* ordering.
>>> summa('bat', num_sentences=4, order='coverage') # doctest: +NORMALIZE_WHITESPACE
['By comparing the amino acid sequence of 2019-nCoV S-protein (GenBank Accession: MN908947.3)
with Bat SARS-like coronavirus isolate bat-SL-CoVZC45 and Bat SARS-like coronavirus isolate
Bat-SL-CoVZXC21, the latter two were shown to share 89.1% and 88.6% sequence identity
to 2019-nCoV S-protein (supplementary figure 1) .',
'However, we have not done the IDPs analysis for ORF10 from the Bat-SL-CoVZC45 strain since we
have taken different strain of Bat CoV (reviewed strain HKU3-1) in our study.',
'To test the dependence of the hemoplasma 290 phylogeny upon the bat phylogeny and thus assess
evidence of evolutionary codivergence, we 291 applied the Procrustes Approach to Cophylogeny
(PACo) using distance matrices and the paco 292 We used hemoplasma genotype assignments to
create a network, with each node representing a 299 bat species and edges representing shared
genotypes among bat species pairs.',
'However, these phylogenetic patterns in prevalence were decoupled from those describing bat
526 species centrality in sharing hemoplasmas, such that genotype sharing was generally
restricted 527 by bat phylogeny.']
As you can see, there are some ``However, '' in the answers.
A bit of NLP post_processing can take care of those.
>>> import spacy
>>> nlp = spacy.load("en_core_web_sm")
>>> post_nlp = PostNLP(nlp)
>>> summa('bat', num_sentences=4, order='coverage', post_processing=post_nlp) # doctest: +NORMALIZE_WHITESPACE
['By comparing the amino acid sequence of 2019-nCoV S-protein (GenBank Accession: MN908947.3)
with Bat SARS-like coronavirus isolate bat-SL-CoVZC45 and Bat SARS-like coronavirus isolate
Bat-SL-CoVZXC21, the latter two were shown to share 89.1% and 88.6% sequence identity
to 2019-nCoV S-protein (supplementary figure 1) .',
'We have not done the IDPs analysis for ORF10 from the Bat-SL-CoVZC45 strain since we
have taken different strain of Bat CoV (reviewed strain HKU3-1) in our study.',
'To test the dependence of the hemoplasma 290 phylogeny upon the bat phylogeny and thus assess
evidence of evolutionary codivergence, we 291 applied the Procrustes Approach to Cophylogeny
(PACo) using distance matrices and the paco 292 We used hemoplasma genotype assignments to
create a network, with each node representing a 299 bat species and edges representing shared
genotypes among bat species pairs.',
'These phylogenetic patterns in prevalence were decoupled from those describing bat
526 species centrality in sharing hemoplasmas, such that genotype sharing was generally
restricted 527 by bat phylogeny.']
"""
def __init__(self, gismo, **kwargs):
self.gismo = gismo
self.query_ = None
self.sentences_ = None
self.order_ = None
self.sentence_gismo_ = None
self.parameters = Parameters(parameter_list=default_summarizer_parameters, **kwargs)
if self.parameters.text_getter is None:
self.parameters.text_getter = self.gismo.corpus.to_text
def rank_documents(self, query, num_query=None):
"""
Perform a Gismo query at document-level. If the query fails, builds a generic query instead.
The :attr:`~sisu.summarizer.Summarizer.gismo` and
:attr:`~sisu.summarizer.Summarizer.query_` attributes are updated.
Parameters
----------
query: :class:`str`
Input text
num_query: :class:`int`
Number of words of the generic query, is any
Returns
-------
None
"""
if num_query is None:
num_query = self.parameters.num_query
success = self.gismo.rank(query)
if success:
self.query_ = query
else:
self.query_ = " ".join(self.gismo.get_features_by_rank(k=num_query))
self.gismo.rank(self.query_)
def build_sentence_source(self, num_documents=None, getter=None, tester=None):
"""
Creates the corpus of sentences (:attr:`~sisu.summarizer.Summarizer.sentences_`)
Parameters
----------
num_documents: :class:`int`, optional
Number of documents to select (if not, Gismo will automatically decide).
getter: callable
Extraction of text from corpus item. If not specify, the to_text of the :class:`~gismo.corpus.Corpus` will be used.
tester: callable
Function that estimates if a sentence is eligible to be part of the summary.
Returns
-------
None
"""
if num_documents is None:
num_documents = self.parameters.num_documents
if getter is None:
getter = self.parameters.text_getter
if tester is None:
tester = self.parameters.sentence_tester
self.sentences_ = extract_sentences(source=self.gismo.corpus,
indices=self.gismo.get_documents_by_rank(k=num_documents,
post=False),
getter=getter,
tester=tester)
def build_sentence_gismo(self, itf=None, s_g_p=None):
"""
Creates the Gismo of sentences (:attr:`~sisu.summarizer.Summarizer.sentence_gismo_`)
Parameters
----------
itf: :class:`bool`, optional
Applies TF-IDTF embedding. I False, TF-IDF embedding is used.
s_g_p: :class:`dict`
Parameters for the sentence Gismo.
Returns
-------
None
"""
if itf is None:
itf = self.parameters.itf
if s_g_p is None:
s_g_p = self.parameters.sentence_gismo_parameters
sentence_corpus = Corpus(source=self.sentences_, to_text=lambda s: s['sanitized'])
sentence_embedding = Embedding() if itf else IdfEmbedding()
sentence_embedding.fit_ext(embedding=self.gismo.embedding)
sentence_embedding.transform(sentence_corpus)
self.sentence_gismo_ = Gismo(sentence_corpus, sentence_embedding, **s_g_p)
def build_coverage_order(self, k):
"""
Populate :attr:`~sisu.summarizer.Summarizer.order_` with a covering order with
target number of sentences *k*. The actual number of indices is stretched
by the sentence Gismo stretch factor.
Parameters
----------
k: :class:`int`
Number of optimal covering sentences.
Returns
-------
:class:`numpy.ndarray`
Covering order.
"""
p = self.sentence_gismo_.parameters(post=False)
cluster = self.sentence_gismo_.get_documents_by_cluster(k=int(k * p['stretch']), **p)
return covering_order(cluster, wide=p['wide'])
def summarize(self, query="", **kwargs):
"""
Performs a full run of all summary-related operations:
- Rank a query at document level, fallback to a generic query if the query fails;
- Extract sentences from the top documents
- Order sentences by one of the three methods proposed, *rank*, *coverage*, and *cosine*
- Apply post-processing and return list of selected sentences.
Note that calling a :class:`~sisu.summarizer.Summarizer` will call its
:meth:`~sisu.summarizer.Summarizer.summarize` method.
Parameters
----------
query: :class:`str`
Query to run.
kwargs: :class:`dict`
Runtime specific parameters
(see :obj:`~sisu.summarizer.default_summarizer_parameters` for possible arguments).
Returns
-------
:class:`list` of :class:`str`
Summary.
"""
# Instantiate parameters for the call
p = self.parameters(**kwargs)
# Perform query, fallback to generic query in case of failure
self.rank_documents(query=query, num_query=p['num_query'])
# Extract and preprocess sentences
self.build_sentence_source(num_documents=p['num_documents'], getter=p['text_getter'],
tester=p['sentence_tester'])
# Order sentences
if p['order'] == 'cosine':
self.order_ = cosine_order(self.gismo.embedding.query_projection, self.sentences_, self.query_)
elif p['order'] in {'rank', 'coverage'}:
self.build_sentence_gismo(itf=p['itf'], s_g_p=p['sentence_gismo_parameters'])
self.sentence_gismo_.rank(query)
if p['num_sentences'] is None:
p['num_sentences'] = auto_k(data=self.sentence_gismo_.diteration.x_relevance,
order=self.sentence_gismo_.diteration.x_order,
max_k=self.sentence_gismo_.parameters.max_k,
target=self.sentence_gismo_.parameters.target_k)
if p['order'] == 'rank':
self.order_ = self.sentence_gismo_.diteration.x_order
else:
self.order_ = self.build_coverage_order(p['num_sentences'])
if p['max_chars'] is None:
results = [p['post_processing'](self, i) for i in self.order_[:p['num_sentences']]]
return [txt for txt in results if len(txt)>0]
else:
results = []
length = 0
# Maximal number of sentences that will be processed
max_sentences = int(p['max_chars']/50)
for i in self.order_[:max_sentences]:
txt = p['post_processing'](self, i)
l = len(txt)
if l>0 and length+l < p['max_chars']:
results.append(txt)
length += l
if length > .98*p['max_chars']:
break
return results
def __call__(self, query="", **kwargs):
return self.summarize(query, **kwargs)
class PostNLP:
"""
Post-processor for the :class:`~sisu.summarizer.Summarizer` that leverages a spacy NLP engine.
- Discard sentences with no verb.
- Remove adverbs and punctuations that starts a sentence (e.g. "However, we ..." -> "We ...").
- Optionally, if the engine supports co-references, resolve them.
Parameters
----------
nlp: callable
A Spacy nlp engine.
coref: :class:`bool`
Resolve co-references if the nlp engine supports it.
"""
def __init__(self, nlp, coref=False):
self.nlp = nlp
self.coref = coref
def __call__(self, summa, i):
nlp_sent = self.nlp(summa.sentences_[i]['sentence'])
tags = {token.tag_ for token in nlp_sent}
if not any([t.startswith("VB") for t in tags]):
summa.sentences_[i]['relevant'] = False
return ""
while nlp_sent[0].pos_ == "ADV" and len(nlp_sent)>0:
nlp_sent = nlp_sent[1:]
if nlp_sent[0].pos_ == "PUNCT":
nlp_sent = nlp_sent[1:]
txt = nlp_sent.text
summa.sentences_[i]['sentence'] = f"{txt[0].upper()}{txt[1:]}"
if "PRP" in tags and self.coref and hasattr(nlp_sent._, 'has_coref'):
extract_str = " ".join([s['sentence'] for s in summa.sentences_[max(0, i - 2) : i + 1]])
extract = self.nlp(extract_str)
if extract._.has_coref:
resolved_extract = extract._.coref_resolved
summa.sentences_[i]['sentence'] = make_sentences(resolved_extract)[-1]
return summa.sentences_[i]['sentence']
| 46.077895
| 161
| 0.646
| 2,836
| 21,887
| 4.892454
| 0.184767
| 0.018739
| 0.01427
| 0.016865
| 0.388685
| 0.355243
| 0.333766
| 0.323387
| 0.31618
| 0.308036
| 0
| 0.012698
| 0.255174
| 21,887
| 474
| 162
| 46.175105
| 0.838425
| 0.558596
| 0
| 0.067568
| 0
| 0
| 0.080032
| 0.010139
| 0
| 0
| 0
| 0
| 0
| 1
| 0.074324
| false
| 0
| 0.067568
| 0.006757
| 0.209459
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
559fa91e2cb3fcb7a60d3f0698d9ba9ef4cfe606
| 4,482
|
py
|
Python
|
automr/bridge.py
|
hebrewsnabla/pyAutoMR
|
8e81ed7fd780abd94f8b51e48ee4b980a868c204
|
[
"Apache-2.0"
] | 5
|
2021-06-03T07:49:02.000Z
|
2022-02-21T11:35:20.000Z
|
automr/bridge.py
|
hebrewsnabla/pyAutoMR
|
8e81ed7fd780abd94f8b51e48ee4b980a868c204
|
[
"Apache-2.0"
] | 2
|
2022-01-20T08:33:59.000Z
|
2022-03-26T12:21:15.000Z
|
automr/bridge.py
|
hebrewsnabla/pyAutoMR
|
8e81ed7fd780abd94f8b51e48ee4b980a868c204
|
[
"Apache-2.0"
] | 1
|
2022-02-21T11:35:34.000Z
|
2022-02-21T11:35:34.000Z
|
import numpy as np
import os
from automr import dump_mat
from functools import partial, reduce
print = partial(print, flush=True)
einsum = partial(np.einsum, optimize=True)
def print_mol(mol):
print(mol._basis)
print(mol.atom)
print(mol._atom)
print(mol.aoslice_by_atom())
print(mol.ao_labels())
#if mol.verbose >= logger.DEBUG:
mol.stdout.write('[INPUT] ---------------- BASIS SET ---------------- \n')
mol.stdout.write('[INPUT] l, kappa, [nprim/nctr], '
'expnt, c_1 c_2 ...\n')
for atom, basis_set in mol._basis.items():
mol.stdout.write('[INPUT] %s\n' % atom)
for b in basis_set:
if isinstance(b[1], int):
kappa = b[1]
b_coeff = b[2:]
else:
kappa = 0
b_coeff = b[1:]
nprim = len(b_coeff)
nctr = len(b_coeff[0])-1
if nprim < nctr:
logger.warn(mol, 'num. primitives smaller than num. contracted basis')
mol.stdout.write('[INPUT] %d %2d [%-5d/%-4d] '
% (b[0], kappa, nprim, nctr))
for k, x in enumerate(b_coeff):
if k == 0:
mol.stdout.write('%-15.12g ' % x[0])
else:
mol.stdout.write(' '*32+'%-15.12g ' % x[0])
for c in x[1:]:
mol.stdout.write(' %4.12g' % c)
mol.stdout.write('\n')
def py2qchem(mf, basename, is_uhf=False):
if is_uhf:
mo_coeffa = mf.mo_coeff[0]
mo_coeffb = mf.mo_coeff[1]
#mo_enea = mf.mo_energy[0]
#mo_eneb = mf.mo_energy[1]
else:
mo_coeffa = mf.mo_coeff
mo_coeffb = mf.mo_coeff
#mo_enea = mf.mo_energy
#mo_eneb = mf.mo_energy
mo_enea = np.zeros(len(mo_coeffa))
mo_eneb = np.zeros(len(mo_coeffa))
Sdiag = mf.get_ovlp().diagonal()**(0.5)
mo_coeffa = einsum('ij,i->ij', mo_coeffa, Sdiag).T
mo_coeffb = einsum('ij,i->ij', mo_coeffb, Sdiag).T
#dump_mat.dump_mo(mf.mol, mo_coeffa, ncol=10)
guess_file = np.vstack([mo_coeffa, mo_coeffb, mo_enea, mo_eneb]).flatten()
tmpbasename = '/tmp/qchem/' + basename
os.system('mkdir -p ' + tmpbasename)
with open(tmpbasename + '/53.0', 'w') as f:
guess_file.tofile(f, sep='')
create_qchem_in(mf, basename)
def create_qchem_in(mf, basename, uhf=False, sph=True):
atom = mf.mol.format_atom(mf.mol.atom, unit=1)
with open(basename + '.in', 'w') as f:
f.write('$molecule\n')
f.write(' %d %d\n' % (mf.mol.charge, mf.mol.spin+1))
for a in atom:
f.write(' %s %12.6f %12.6f %12.6f\n' % (a[0], a[1][0], a[1][1], a[1][2]))
f.write('$end\n\n')
'''f.write('$rem\n')
f.write(' method = hf\n')
if uhf:
f.write(' unrestricted = true\n')
f.write(' basis = cc-pvdz\n')
f.write(' print_orbitals = true\n')
f.write(' sym_ignore = true\n')
if sph:
f.write(' purecart = 1111\n')
else:
f.write(' purecart = 2222\n')
f.write(' scf_guess_print = 2\n')
f.write(' scf_guess = read\n')
f.write(' scf_convergence = 0\n')
f.write(' thresh = 12\n')
f.write('$end\n\n')
f.write('@@@\n\n')
f.write('$molecule\n')
f.write('read\n')
f.write('$end\n\n')'''
f.write('$rem\n')
#f.write(' method = hf\n')
f.write(' correlation = pp\n')
f.write(' gvb_local = 0\n')
f.write(' gvb_n_pairs = 2\n')
f.write(' gvb_print = 1\n')
if uhf:
f.write(' unrestricted = true\n')
f.write(' basis = cc-pvdz\n')
f.write(' print_orbitals = true\n')
f.write(' sym_ignore = true\n')
if sph:
f.write(' purecart = 1111\n')
else:
f.write(' purecart = 2222\n')
f.write(' scf_guess_print = 2\n')
f.write(' scf_guess = read\n')
f.write(' thresh = 12\n')
f.write('$end\n\n')
def qchem2py(basename):
with open('/tmp/qchem/' + basename + '/53.0', 'r') as f:
data = np.fromfile(f)
print(data.shape)
n = data.shape[0]
#x = sympy.Symbol('x')
#nmo = sympy.solve(2*x*(x+1) -n, x)
nmo = int(np.sqrt(n/2.0+0.25)-0.5)
moa = data[:nmo*nmo].reshape(nmo,nmo).T
mob = data[nmo*nmo:2*nmo*nmo].reshape(nmo,nmo).T
mo = (moa, mob)
return mo
| 35.015625
| 86
| 0.51071
| 669
| 4,482
| 3.312407
| 0.234679
| 0.100181
| 0.088448
| 0.022563
| 0.38222
| 0.259477
| 0.222473
| 0.222473
| 0.219314
| 0.219314
| 0
| 0.032612
| 0.309014
| 4,482
| 128
| 87
| 35.015625
| 0.682919
| 0.055556
| 0
| 0.065217
| 0
| 0
| 0.172684
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.043478
| false
| 0
| 0.043478
| 0
| 0.097826
| 0.119565
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
55a1b6b516c4d12eb63cdf47d747201063521f8c
| 487
|
py
|
Python
|
Example/playstore.py
|
goodop/api-imjustgood.com
|
6406b531c4393fa8a4ace3c206d23895da915caf
|
[
"MIT"
] | 4
|
2021-01-01T10:20:13.000Z
|
2021-11-08T09:32:54.000Z
|
Example/playstore.py
|
goodop/api-imjustgood.com
|
6406b531c4393fa8a4ace3c206d23895da915caf
|
[
"MIT"
] | null | null | null |
Example/playstore.py
|
goodop/api-imjustgood.com
|
6406b531c4393fa8a4ace3c206d23895da915caf
|
[
"MIT"
] | 25
|
2021-01-09T18:22:32.000Z
|
2021-05-29T07:42:06.000Z
|
from justgood import imjustgood
media = imjustgood("YOUR_APIKEY_HERE")
query = "gojek" # example query
data = media.playstore(query)
# Get attributes
number = 0
result = "Playstore :"
for a in data["result"]:
number += 1
result += "\n\n{}. {}".format(number, a["title"])
result += "\nDeveloper : {}".format(a["developer"])
result += "\nThumbnail : {}".format(a["thumbnail"])
result += "\nURL : {}".format(a["pageUrl"])
print(result)
# Get JSON results
print(data)
| 24.35
| 55
| 0.63655
| 59
| 487
| 5.220339
| 0.576271
| 0.068182
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.004938
| 0.168378
| 487
| 19
| 56
| 25.631579
| 0.755556
| 0.092402
| 0
| 0
| 0
| 0
| 0.273973
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.071429
| 0
| 0.071429
| 0.142857
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
55a63e41c61dfc7f2803753c38bd275ef075fcb4
| 10,272
|
py
|
Python
|
codes/3_derive_elementary_effects.py
|
aviolinist/EEE
|
032e2029815229875048cc92dd7da24ff3f71e93
|
[
"MIT"
] | 6
|
2019-09-27T15:38:37.000Z
|
2021-02-03T13:58:01.000Z
|
codes/3_derive_elementary_effects.py
|
aviolinist/EEE
|
032e2029815229875048cc92dd7da24ff3f71e93
|
[
"MIT"
] | null | null | null |
codes/3_derive_elementary_effects.py
|
aviolinist/EEE
|
032e2029815229875048cc92dd7da24ff3f71e93
|
[
"MIT"
] | 5
|
2019-09-27T15:38:52.000Z
|
2022-03-22T17:24:37.000Z
|
#!/usr/bin/env python
from __future__ import print_function
# Copyright 2019 Juliane Mai - juliane.mai(at)uwaterloo.ca
#
# License
# This file is part of the EEE code library for "Computationally inexpensive identification
# of noninformative model parameters by sequential screening: Efficient Elementary Effects (EEE)".
#
# The EEE code library is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# The MVA code library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
# You should have received a copy of the GNU Lesser General Public License
# along with The EEE code library.
# If not, see <https://github.com/julemai/EEE/blob/master/LICENSE>.
#
# If you use this method in a publication please cite:
#
# M Cuntz & J Mai et al. (2015).
# Computationally inexpensive identification of noninformative model parameters by sequential screening.
# Water Resources Research, 51, 6417-6441.
# https://doi.org/10.1002/2015WR016907.
#
#
#
# python 3_derive_elementary_effects.py \
# -i example_ishigami-homma/model_output.pkl \
# -d example_ishigami-homma/parameters.dat \
# -m example_ishigami-homma/parameter_sets_1_para3_M.dat \
# -v example_ishigami-homma/parameter_sets_1_para3_v.dat \
# -o example_ishigami-homma/eee_results.dat
"""
Derives the Elementary Effects based on model outputs stored as dictionary in a pickle file (option -i)
using specified model parameters (option -d). The model parameters were sampled beforehand as Morris
trajectories. The Morris trajectory information is stored in two files (option -m and option -v). The
Elementary Effects are stored in a file (option -o).
History
-------
Written, JM, Mar 2019
"""
# -------------------------------------------------------------------------
# Command line arguments
#
modeloutputs = 'example_ishigami-homma/model_output.pkl'
modeloutputkey = 'All'
maskfile = 'example_ishigami-homma/parameters.dat'
morris_M = 'example_ishigami-homma/parameter_sets_1_para3_M.dat'
morris_v = 'example_ishigami-homma/parameter_sets_1_para3_v.dat'
outfile = 'example_ishigami-homma/eee_results.dat'
skip = None # number of lines to skip in Morris files
import optparse
parser = optparse.OptionParser(usage='%prog [options]',
description="Derives the Elementary Effects based on model outputs stored as dictionary in a pickle file (option -i) using specified model parameters (option -d). The model parameters were sampled beforehand as Morris trajectories. The Morris trajectory information is stored in two files (option -m and option -v). The Elementary Effects are stored in a file (option -o).")
parser.add_option('-i', '--modeloutputs', action='store',
default=modeloutputs, dest='modeloutputs', metavar='modeloutputs',
help="Name of file used to save (scalar) model outputs in a pickle file (default: 'model_output.pkl').")
parser.add_option('-k', '--modeloutputkey', action='store',
default=modeloutputkey, dest='modeloutputkey', metavar='modeloutputkey',
help="Key of model output dictionary stored in pickle output file. If 'All', all model outputs are taken into account and multi-objective EEE is applied. (default: 'All').")
parser.add_option('-d', '--maskfile', action='store', dest='maskfile', type='string',
default=maskfile, metavar='File',
help='Name of file where all model parameters are specified including their distribution, distribution parameters, default value and if included in analysis or not. (default: maskfile=parameters.dat).')
parser.add_option('-m', '--morris_M', action='store', dest='morris_M', type='string',
default=morris_M, metavar='morris_M',
help="Morris trajectory information: The UNSCALED parameter sets. (default: 'parameter_sets_1_para3_M.dat').")
parser.add_option('-v', '--morris_v', action='store', dest='morris_v', type='string',
default=morris_v, metavar='morris_v',
help="Morris trajectory information: The indicator which parameter changed between subsequent sets in a trajectory. (default: 'parameter_sets_1_para3_v.dat').")
parser.add_option('-s', '--skip', action='store',
default=skip, dest='skip', metavar='skip',
help="Number of lines to skip in Morris output files (default: None).")
parser.add_option('-o', '--outfile', action='store', dest='outfile', type='string',
default=outfile, metavar='File',
help='File containing Elementary Effect estimates of all model parameters listed in parameter information file. (default: eee_results.dat).')
(opts, args) = parser.parse_args()
modeloutputs = opts.modeloutputs
modeloutputkey = opts.modeloutputkey
maskfile = opts.maskfile
morris_M = opts.morris_M
morris_v = opts.morris_v
outfile = opts.outfile
skip = opts.skip
del parser, opts, args
# -----------------------
# add subolder scripts/lib to search path
# -----------------------
import sys
import os
dir_path = os.path.dirname(os.path.realpath(__file__))
sys.path.append(dir_path+'/lib')
import numpy as np
import pickle
from fsread import fsread # in lib/
from autostring import astr # in lib/
# -------------------------
# read parameter info file
# -------------------------
# parameter info file has following header:
# # para dist lower upper default informative(0)_or_noninformative(1)
# # mean stddev
nc,snc = fsread(maskfile, comment="#",cskip=1,snc=[0,1],nc=[2,3,4,5])
snc = np.array(snc)
para_name = snc[:,0]
para_dist = snc[:,1]
lower_bound = nc[:,0]
upper_bound = nc[:,1]
initial = nc[:,2]
# if informative(0) -> maskpara=False
# if noninformative(1) -> maskpara=True
mask_para = np.where((nc[:,3].flatten())==1.,True,False)
dims_all = np.shape(mask_para)[0]
idx_para = np.arange(dims_all)[mask_para] # indexes of parameters which will be changed [0,npara-1]
dims = np.sum(mask_para)
# pick only non-masked bounds
lower_bound_mask = lower_bound[np.where(mask_para)]
upper_bound_mask = upper_bound[np.where(mask_para)]
para_dist_mask = para_dist[np.where(mask_para)]
para_name_mask = para_name[np.where(mask_para)]
# -------------------------
# read model outputs
# -------------------------
model_output = pickle.load( open( modeloutputs, "rb" ) )
if modeloutputkey == 'All':
keys = list(model_output.keys())
else:
keys = [ modeloutputkey ]
model_output = [ np.array(model_output[ikey]) for ikey in keys ]
nkeys = len(model_output)
# -------------------------
# read Morris M
# -------------------------
ff = open(morris_M, "r")
parasets = ff.readlines()
ff.close()
if skip is None:
skip = np.int(parasets[0].strip().split(':')[1])
else:
skip = np.int(skip)
parasets = parasets[skip:]
for iparaset,paraset in enumerate(parasets):
parasets[iparaset] = list(map(float,paraset.strip().split()))
parasets = np.array(parasets)
# -------------------------
# read Morris v
# -------------------------
ff = open(morris_v, "r")
parachanged = ff.readlines()
ff.close()
if skip is None:
skip = np.int(parachanged[0].strip().split(':')[1])
else:
skip = np.int(skip)
parachanged = parachanged[skip:]
for iparachanged,parachan in enumerate(parachanged):
parachanged[iparachanged] = np.int(parachan.strip())
parachanged = np.array(parachanged)
# -------------------------
# calculate Elementary Effects
# -------------------------
ee = np.zeros([dims_all,nkeys],dtype=float)
ee_counter = np.zeros([dims_all,nkeys],dtype=int)
ntraj = np.int( np.shape(parasets)[0] / (dims+1) )
nsets = np.shape(parasets)[0]
for ikey in range(nkeys):
for iset in range(nsets):
ipara_changed = parachanged[iset]
if ipara_changed != -1:
ee_counter[ipara_changed,ikey] += 1
if ( len(np.shape(model_output[ikey])) == 1):
# scalar model output
ee[ipara_changed,ikey] += np.abs(model_output[ikey][iset]-model_output[ikey][iset+1]) / np.abs(parasets[iset,ipara_changed] - parasets[iset+1,ipara_changed])
elif ( len(np.shape(model_output[ikey])) == 2):
# 1D model output
ee[ipara_changed,ikey] += np.mean(np.abs(model_output[ikey][iset,:]-model_output[ikey][iset+1,:]) / np.abs(parasets[iset,ipara_changed] - parasets[iset+1,ipara_changed]))
else:
raise ValueError('Only scalar and 1D model outputs are supported!')
for ikey in range(nkeys):
for ipara in range(dims_all):
if ee_counter[ipara,ikey] > 0:
ee[ipara,ikey] /= ee_counter[ipara,ikey]
# -------------------------
# write final file
# -------------------------
# format:
# # model output #1: 'out1'
# # model output #2: 'out2'
# # ii para_name elemeffect(ii),ii=1:3,jj=1:1 counter(ii),ii=1:3,jj=1:1
# 1 'x_1' 0.53458196335158181 5
# 2 'x_2' 7.0822368906630215 5
# 3 'x_3' 3.5460086652980554 5
f = open(outfile, 'w')
for ikey in range(nkeys):
f.write('# model output #'+str(ikey+1)+': '+keys[ikey]+'\n')
f.write('# ii para_name elemeffect(ii),ii=1:'+str(dims_all)+',jj=1:'+str(nkeys)+' counter(ii),ii=1:'+str(dims_all)+',jj=1:'+str(nkeys)+' \n')
for ipara in range(dims_all):
f.write(str(ipara)+' '+para_name[ipara]+' '+' '.join(astr(ee[ipara,:],prec=8))+' '+' '.join(astr(ee_counter[ipara,:]))+'\n')
f.close()
print("wrote: '"+outfile+"'")
| 43.897436
| 405
| 0.633178
| 1,336
| 10,272
| 4.760479
| 0.255988
| 0.034591
| 0.031447
| 0.017925
| 0.343553
| 0.30739
| 0.245283
| 0.207233
| 0.207233
| 0.198113
| 0
| 0.020887
| 0.207652
| 10,272
| 233
| 406
| 44.085837
| 0.760536
| 0.292445
| 0
| 0.125
| 0
| 0.05
| 0.287762
| 0.045078
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.066667
| 0
| 0.066667
| 0.016667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
55a8a143755092a98ad8640901e8dbdb8d58845f
| 9,439
|
py
|
Python
|
install/app_store/tk-framework-desktopserver/v1.3.1/python/tk_framework_desktopserver/command.py
|
JoanAzpeitia/lp_sg
|
e0ee79555e419dd2ae3a5f31e5515b3f40b22a62
|
[
"MIT"
] | null | null | null |
install/app_store/tk-framework-desktopserver/v1.3.1/python/tk_framework_desktopserver/command.py
|
JoanAzpeitia/lp_sg
|
e0ee79555e419dd2ae3a5f31e5515b3f40b22a62
|
[
"MIT"
] | null | null | null |
install/app_store/tk-framework-desktopserver/v1.3.1/python/tk_framework_desktopserver/command.py
|
JoanAzpeitia/lp_sg
|
e0ee79555e419dd2ae3a5f31e5515b3f40b22a62
|
[
"MIT"
] | 1
|
2020-02-15T10:42:56.000Z
|
2020-02-15T10:42:56.000Z
|
# Copyright (c) 2013 Shotgun Software Inc.
#
# CONFIDENTIAL AND PROPRIETARY
#
# This work is provided "AS IS" and subject to the Shotgun Pipeline Toolkit
# Source Code License included in this distribution package. See LICENSE.
# By accessing, using, copying or modifying this work you indicate your
# agreement to the Shotgun Pipeline Toolkit Source Code License. All rights
# not expressly granted therein are reserved by Shotgun Software Inc.
import os
import subprocess
from threading import Thread
from Queue import Queue
import tempfile
import sys
import traceback
from .logger import get_logger
logger = get_logger(__name__)
class ReadThread(Thread):
"""
Thread that reads a pipe.
"""
def __init__(self, p_out, target_queue):
"""
Constructor.
:param p_out: Pipe to read.
:param target_queue: Queue that will accumulate the pipe output.
"""
Thread.__init__(self)
self.pipe = p_out
self.target_queue = target_queue
def run(self):
"""
Reads the contents of the pipe and adds it to the queue until the pipe
is closed.
"""
while True:
line = self.pipe.readline() # blocking read
if line == '':
break
self.target_queue.put(line)
class Command(object):
@staticmethod
def _create_temp_file():
"""
:returns: Returns the path to a temporary file.
"""
handle, path = tempfile.mkstemp(prefix="desktop_server")
os.close(handle)
return path
@staticmethod
def call_cmd(args):
"""
Runs a command in a separate process.
:param args: Command line tokens.
:returns: A tuple containing (exit code, stdout, stderr).
"""
# The commands that are being run are probably being launched from Desktop, which would
# have a TANK_CURRENT_PC environment variable set to the site configuration. Since we
# preserve that value for subprocesses (which is usually the behavior we want), the DCCs
# being launched would try to run in the project environment and would get an error due
# to the conflict.
#
# Clean up the environment to prevent that from happening.
env = os.environ.copy()
vars_to_remove = ["TANK_CURRENT_PC"]
for var in vars_to_remove:
if var in env:
del env[var]
# Launch the child process
# Due to discrepencies on how child file descriptors and shell=True are
# handled on Windows and Unix, we'll provide two implementations. See the Windows
# implementation for more details.
if sys.platform == "win32":
ret, stdout_lines, stderr_lines = Command._call_cmd_win32(args, env)
else:
ret, stdout_lines, stderr_lines = Command._call_cmd_unix(args, env)
out = ''.join(stdout_lines)
err = ''.join(stderr_lines)
return ret, out, err
@staticmethod
def _call_cmd_unix(args, env):
"""
Runs a command in a separate process. Implementation for Unix based OSes.
:param args: Command line tokens.
:param env: Environment variables to set for the subprocess.
:returns: A tuple containing (exit code, stdout, stderr).
"""
# Note: Tie stdin to a PIPE as well to avoid this python bug on windows
# http://bugs.python.org/issue3905
# Queue code taken from: http://stackoverflow.com/questions/375427/non-blocking-read-on-a-subprocess-pipe-in-python
stdout_lines = []
stderr_lines = []
try:
process = subprocess.Popen(
args,
stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
env=env
)
process.stdin.close()
stdout_q = Queue()
stderr_q = Queue()
stdout_t = ReadThread(process.stdout, stdout_q)
stdout_t.setDaemon(True)
stdout_t.start()
stderr_t = ReadThread(process.stderr, stderr_q)
stderr_t.setDaemon(True)
stderr_t.start()
# Popen.communicate() doesn't play nicely if the stdin pipe is closed
# as it tries to flush it causing an 'I/O error on closed file' error
# when run from a terminal
#
# to avoid this, lets just poll the output from the process until
# it's finished
process.wait()
try:
process.stdout.flush()
process.stderr.flush()
except IOError:
# This fails on OSX 10.7, but it looks like there's no ill side effect
# from failing on that platform so we can ignore it.
logger.exception("Error while flushing file descriptor:")
stdout_t.join()
stderr_t.join()
while not stdout_q.empty():
stdout_lines.append(stdout_q.get())
while not stderr_q.empty():
stderr_lines.append(stderr_q.get())
ret = process.returncode
except StandardError:
# Do not log the command line, it might contain sensitive information!
logger.exception("Error running subprocess:")
ret = 1
stderr_lines = traceback.format_exc().split()
stderr_lines.append("%s" % args)
return ret, stdout_lines, stderr_lines
@staticmethod
def _call_cmd_win32(args, env):
"""
Runs a command in a separate process. Implementation for Windows.
:param args: Command line tokens.
:param env: Environment variables to set for the subprocess.
:returns: A tuple containing (exit code, stdout, stderr).
"""
stdout_lines = []
stderr_lines = []
try:
stdout_path = Command._create_temp_file()
stderr_path = Command._create_temp_file()
# On Windows, file descriptors like sockets can be inherited by child
# process and are only closed when the main process and all child
# processes are closed. This is bad because it means that the port
# the websocket server uses will never be released as long as any DCCs
# or tank commands are running. Therefore, closing the Desktop and
# restarting it for example wouldn't free the port and would give the
# "port 9000 already in use" error we've seen before.
# To avoid this, close_fds needs to be specified when launching a child
# process. However, there's a catch. On Windows, specifying close_fds
# also means that you can't share stdout, stdin and stderr with the child
# process, which is required here because we want to capture the output
# of the process.
# Therefore on Windows we'll invoke the code in a shell environment. The
# output will be redirected to two temporary files which will be read
# when the child process is over.
# Ideally, we'd be using this implementation on Unix as well. After all,
# the syntax of the command line is the same. However, specifying shell=True
# on Unix means that the following ["ls", "-al"] would be invoked like this:
# ["/bin/sh", "-c", "ls", "-al"]. This means that only ls is sent to the
# shell and -al is considered to be an argument of the shell and not part
# of what needs to be launched. The naive solution would be to quote the
# argument list and pass ["\"ls -al \""] to Popen, but that would ignore
# the fact that there could already be quotes on that command line and
# they would need to be escaped as well. Python 2's only utility to
# escape strings for the command line is pipes.quote, which is deprecated.
# Because of these reasons, we'll keep both implementations for now.
args = args + ["1>", stdout_path, "2>", stderr_path]
# Prevents the cmd.exe dialog from appearing on Windows.
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
process = subprocess.Popen(
args,
close_fds=True,
startupinfo=startupinfo,
env=env,
shell=True
)
process.wait()
# Read back the output from the two.
with open(stdout_path) as stdout_file:
stdout_lines = [l for l in stdout_file]
with open(stderr_path) as stderr_file:
stderr_lines = [l for l in stderr_file]
# Track the result code.
ret = process.returncode
except StandardError:
logger.exception("Error running subprocess:")
ret = 1
stderr_lines = [traceback.format_exc().split()]
stderr_lines.append("%s" % args)
# Don't lose any sleep over temporary files that can't be deleted.
try:
os.remove(stdout_path)
except:
pass
try:
os.remove(stderr_path)
except:
pass
return ret, stdout_lines, stderr_lines
| 36.727626
| 123
| 0.604831
| 1,186
| 9,439
| 4.721754
| 0.306914
| 0.025536
| 0.018214
| 0.023571
| 0.19875
| 0.149821
| 0.13875
| 0.133393
| 0.096071
| 0.096071
| 0
| 0.005035
| 0.32673
| 9,439
| 256
| 124
| 36.871094
| 0.876161
| 0.469753
| 0
| 0.304348
| 0
| 0
| 0.027441
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.052174
| false
| 0.017391
| 0.069565
| 0
| 0.173913
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
55ae9ba4b65519bc33be7de8562a205f27c9a655
| 745
|
py
|
Python
|
brilws/cli/briltag_insertdata.py
|
xiezhen/brilws
|
e3652dd4506dff9d713184ff623b59bc11fbe2c7
|
[
"MIT"
] | 1
|
2017-03-23T16:26:06.000Z
|
2017-03-23T16:26:06.000Z
|
brilws/cli/briltag_insertdata.py
|
xiezhen/brilws
|
e3652dd4506dff9d713184ff623b59bc11fbe2c7
|
[
"MIT"
] | 1
|
2017-03-24T15:02:20.000Z
|
2017-10-02T13:43:26.000Z
|
brilws/cli/briltag_insertdata.py
|
xiezhen/brilws
|
e3652dd4506dff9d713184ff623b59bc11fbe2c7
|
[
"MIT"
] | 1
|
2019-12-06T09:23:01.000Z
|
2019-12-06T09:23:01.000Z
|
"""
Usage:
briltag insertdata [options]
Options:
-h --help Show this screen.
-c CONNECT Service name [default: onlinew]
-p AUTHPATH Authentication file
--name TAGNAME Name of the data tag
--comments COMMENTS Comments on the tag
"""
from docopt import docopt
from schema import Schema
from brilws.cli import clicommonargs
def validate(optdict):
myvalidables = ['-c','-p','--name','--comments',str]
argdict = dict((k,v) for k,v in clicommonargs.argvalidators.items() if k in myvalidables)
s = Schema(argdict)
result = s.validate(optdict)
return result
if __name__ == '__main__':
print (docopt(__doc__,options_first=True))
| 25.689655
| 93
| 0.625503
| 88
| 745
| 5.147727
| 0.625
| 0.07064
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.271141
| 745
| 28
| 94
| 26.607143
| 0.834254
| 0.421477
| 0
| 0
| 0
| 0
| 0.067308
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.090909
| false
| 0
| 0.272727
| 0
| 0.454545
| 0.090909
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
55b3f38a36b36ad5c48a9910aaae79865f7775ae
| 17,152
|
py
|
Python
|
techniques/volumerec.py
|
lleonart1984/rendezvous
|
f8f5e73fa1ede7c33d8cf08548bce1475a0cc8da
|
[
"MIT"
] | null | null | null |
techniques/volumerec.py
|
lleonart1984/rendezvous
|
f8f5e73fa1ede7c33d8cf08548bce1475a0cc8da
|
[
"MIT"
] | null | null | null |
techniques/volumerec.py
|
lleonart1984/rendezvous
|
f8f5e73fa1ede7c33d8cf08548bce1475a0cc8da
|
[
"MIT"
] | null | null | null |
from rendering.manager import *
from rendering.scenes import *
from rendering.training import *
import random
import glm
import os
import numpy as np
import math
__VOLUME_RECONSTRUCTION_SHADERS__ = os.path.dirname(__file__)+"/shaders/VR"
compile_shader_sources(__VOLUME_RECONSTRUCTION_SHADERS__)
class RayGenerator(RendererModule):
def __init__(self, device, output_dim: (int, int), mode: int, *args, **kwargs):
self.output_dim = output_dim
self.mode = mode
self.camera_buffer = None
super().__init__(device, *args, **kwargs)
def setup(self):
self.camera_buffer = self.device.create_uniform_buffer(
ProjToWorld=glm.mat4
)
pipeline = self.device.create_compute_pipeline()
pipeline.load_compute_shader(__VOLUME_RECONSTRUCTION_SHADERS__+"/raygen.comp.spv")
pipeline.bind_storage_buffer(0, ShaderStage.COMPUTE, lambda: self.pipeline.rays)
pipeline.bind_uniform(1, ShaderStage.COMPUTE, lambda: self.camera_buffer)
pipeline.bind_constants(
0, ShaderStage.COMPUTE,
dim=glm.ivec2,
mode=int,
seed=int
)
pipeline.close()
self.pipeline = pipeline
def forward_render(self, inputs):
origins, targets = inputs
origins = origins.reshape(-1, 3)
targets = targets.reshape(-1, 3)
full_rays = torch.zeros(len(origins) * self.output_dim[0] * self.output_dim[1], 6, device=origins.device)
for i, (o, t) in enumerate(zip(origins, targets)):
self.pipeline.rays = self.wrap_tensor(torch.zeros(self.output_dim[0] * self.output_dim[1], 6, device=origins.device), False)
# Setup camera
proj = glm.perspective(45, self.output_dim[1] / self.output_dim[0], 0.01, 1000)
view = glm.lookAt(glm.vec3(*o), glm.vec3(*t), glm.vec3(0, 1, 0))
proj_to_model = glm.inverse(proj * view)
self.camera_buffer.ProjToWorld = proj_to_model
with self.device.get_compute() as man:
man.set_pipeline(self.pipeline)
man.update_sets(0)
man.update_constants(
ShaderStage.COMPUTE,
dim=glm.ivec2(self.output_dim[1], self.output_dim[0]),
mode=self.mode,
seed=np.random.randint(0, 10000000)
)
man.dispatch_threads_2D(self.output_dim[1], self.output_dim[0])
t = self.get_tensor(self.pipeline.rays)
full_rays[i*self.output_dim[0]*self.output_dim[1]:(i+1)*self.output_dim[0]*self.output_dim[1]] = t
return [full_rays]
class TransmittanceRenderer(RendererModule):
def __init__(self, device, *args, **kwargs):
super().__init__(device, *args, **kwargs)
def setup(self):
self.medium_buffer = self.device.create_uniform_buffer(
scatteringAlbedo=glm.vec3,
density=float,
phase_g=float
)
pipeline = self.device.create_compute_pipeline()
pipeline.load_compute_shader(__VOLUME_RECONSTRUCTION_SHADERS__ + '/forward.comp.spv')
pipeline.bind_storage_buffer(0, ShaderStage.COMPUTE, lambda: self.forward_pipeline.grid)
pipeline.bind_storage_buffer(1, ShaderStage.COMPUTE, lambda: self.forward_pipeline.rays)
pipeline.bind_storage_buffer(2, ShaderStage.COMPUTE, lambda: self.forward_pipeline.transmittances)
pipeline.bind_uniform(3, ShaderStage.COMPUTE, lambda: self.medium_buffer)
pipeline.bind_constants(0, ShaderStage.COMPUTE,
grid_dim=glm.ivec3,
number_of_rays=int
)
pipeline.close()
self.forward_pipeline = pipeline
pipeline = self.device.create_compute_pipeline()
pipeline.load_compute_shader(__VOLUME_RECONSTRUCTION_SHADERS__ + '/backward.comp.spv')
pipeline.bind_storage_buffer(0, ShaderStage.COMPUTE, lambda: self.backward_pipeline.grid_gradients)
pipeline.bind_storage_buffer(1, ShaderStage.COMPUTE, lambda: self.backward_pipeline.rays)
pipeline.bind_storage_buffer(2, ShaderStage.COMPUTE, lambda: self.backward_pipeline.transmittances)
pipeline.bind_storage_buffer(3, ShaderStage.COMPUTE, lambda: self.backward_pipeline.transmittance_gradients)
pipeline.bind_uniform(4, ShaderStage.COMPUTE, lambda: self.medium_buffer)
pipeline.bind_constants(0, ShaderStage.COMPUTE,
grid_dim=glm.ivec3,
number_of_rays=int
)
pipeline.close()
self.backward_pipeline = pipeline
def set_medium(self, scattering_albedo: glm.vec3, density: float, phase_g: float):
self.medium_buffer.scatteringAlbedo = scattering_albedo
self.medium_buffer.density = density
self.medium_buffer.phase_g = phase_g
def forward_render(self, inputs):
rays, grid = inputs
grid_dim = grid.shape
ray_count = torch.numel(rays) // 6
self.forward_pipeline.rays = self.wrap_tensor(rays)
self.forward_pipeline.grid = self.wrap_tensor(grid)
self.forward_pipeline.transmittances = self.wrap_tensor(torch.zeros(ray_count, 3, device=rays.device), False)
with self.device.get_compute() as man:
man.set_pipeline(self.forward_pipeline)
man.update_sets(0)
man.update_constants(ShaderStage.COMPUTE,
grid_dim=glm.ivec3(grid_dim[2], grid_dim[1], grid_dim[0]),
number_of_rays=ray_count
)
man.dispatch_threads_1D(ray_count)
return [self.get_tensor(self.forward_pipeline.transmittances)]
def backward_render(self, inputs, outputs, output_gradients):
rays, grid = inputs
transmittances, = outputs
transmittance_gradients, = output_gradients
grid_dim = grid.shape
ray_count = torch.numel(rays) // 6
self.backward_pipeline.rays = self.wrap_tensor(rays)
self.backward_pipeline.transmittances = self.wrap_tensor(transmittances)
self.backward_pipeline.transmittance_gradients = self.wrap_tensor(transmittance_gradients)
self.backward_pipeline.grid_gradients = self.wrap_tensor(torch.zeros_like(grid))
with self.device.get_compute() as man:
man.set_pipeline(self.backward_pipeline)
man.update_sets(0)
man.update_constants(ShaderStage.COMPUTE,
grid_dim=glm.ivec3(grid_dim[2], grid_dim[1], grid_dim[0]),
number_of_rays=ray_count
)
man.dispatch_threads_1D(ray_count)
return [None, self.get_tensor(self.backward_pipeline.grid_gradients)]
class ResampleGrid(RendererModule):
def __init__(self, device: DeviceManager, output_dim: (int, int, int), *args, **kwargs):
self.output_dim = output_dim
super().__init__(device, *args, **kwargs)
def setup(self):
pipeline = self.device.create_compute_pipeline()
pipeline.load_compute_shader(__VOLUME_RECONSTRUCTION_SHADERS__ + "/resampling.comp.spv")
pipeline.bind_storage_buffer(0, ShaderStage.COMPUTE, lambda: self.pipeline.dst_grid)
pipeline.bind_storage_buffer(1, ShaderStage.COMPUTE, lambda: self.pipeline.src_grid)
pipeline.bind_constants(0, ShaderStage.COMPUTE,
dst_grid_dim=glm.ivec3, rem0=float,
src_grid_dim=glm.ivec3, rem1=float
)
pipeline.close()
self.pipeline = pipeline
def forward_render(self, inputs: List[torch.Tensor]):
src_grid, = inputs
self.pipeline.src_grid = self.wrap_tensor(src_grid)
self.pipeline.dst_grid = self.wrap_tensor(torch.zeros(self.output_dim, device=src_grid.device))
src_grid_dim = src_grid.shape
dst_grid_dim = self.output_dim
with self.device.get_compute() as man:
man.set_pipeline(self.pipeline)
man.update_sets(0)
man.update_constants(ShaderStage.COMPUTE,
dst_grid_dim=glm.ivec3(dst_grid_dim[2], dst_grid_dim[1], dst_grid_dim[0]),
src_grid_dim=glm.ivec3(src_grid_dim[2], src_grid_dim[1], src_grid_dim[0])
)
man.dispatch_threads_1D(dst_grid_dim[0] * dst_grid_dim[1] * dst_grid_dim[0])
return [self.get_tensor(self.pipeline.dst_grid)]
class TransmittanceGenerator(Technique):
def __init__(self, grid, output_image):
super().__init__()
self.grid = grid
self.output_image = output_image
self.width, self.height = output_image.width, output_image.height
def __setup__(self):
# rays
self.rays = self.create_buffer(6 * 4 * self.width * self.height,
BufferUsage.STORAGE | BufferUsage.TRANSFER_SRC | BufferUsage.TRANSFER_DST,
MemoryProperty.GPU)
# Transmittance
self.transmittances = self.create_buffer(3 * 4 * self.width * self.height,
BufferUsage.STORAGE | BufferUsage.TRANSFER_SRC | BufferUsage.TRANSFER_DST,
MemoryProperty.GPU)
# camera buffer
self.camera_buffer = self.create_uniform_buffer(
ProjToWorld=glm.mat4
)
# medium properties
self.medium_buffer = self.create_uniform_buffer(
scatteringAlbedo=glm.vec3,
density=float,
phase_g=float
)
pipeline = self.create_compute_pipeline()
pipeline.load_compute_shader(__VOLUME_RECONSTRUCTION_SHADERS__+'/generator.comp.spv')
pipeline.bind_storage_image(0, ShaderStage.COMPUTE, lambda: self.output_image)
pipeline.bind_storage_image(1, ShaderStage.COMPUTE, lambda: self.grid)
pipeline.bind_storage_buffer(2, ShaderStage.COMPUTE, lambda: self.rays)
pipeline.bind_storage_buffer(3, ShaderStage.COMPUTE, lambda: self.transmittances)
pipeline.bind_uniform(4, ShaderStage.COMPUTE, lambda: self.camera_buffer)
pipeline.bind_uniform(5, ShaderStage.COMPUTE, lambda: self.medium_buffer)
pipeline.close()
self.pipeline = pipeline
self.set_camera(glm.vec3(0,0,-3), glm.vec3(0,0,0))
self.set_medium(glm.vec3(1,1,1), 10, 0.875)
def set_camera(self, look_from: glm.vec3, look_to: glm.vec3):
# Setup camera
proj = glm.perspective(45, self.width / self.height, 0.01, 1000)
view = glm.lookAt(look_from, look_to, glm.vec3(0, 1, 0))
proj_to_model = glm.inverse(proj * view)
self.camera_buffer.ProjToWorld = proj_to_model
def set_medium(self, scattering_albedo: glm.vec3, density: float, phase_g: float):
self.medium_buffer.scatteringAlbedo = scattering_albedo
self.medium_buffer.density = density
self.medium_buffer.phase_g = phase_g
def __dispatch__(self):
with self.get_compute() as man:
man.set_pipeline(self.pipeline)
man.update_sets(0)
man.dispatch_threads_2D(self.width, self.height)
class TransmittanceForward(Technique):
def __init__(self, rays_resolver, grid_dim: (int, int, int), grid_resolver, transmittance_resolver):
super().__init__()
self.rays_resolver = rays_resolver # input
self.grid_resolver = grid_resolver # params
self.transmittance_resolver = transmittance_resolver # output
self.grid_dim = glm.ivec3(grid_dim)
def set_medium(self, scattering_albedo: glm.vec3, density: float, phase_g: float):
self.medium_buffer.scatteringAlbedo = scattering_albedo
self.medium_buffer.density = density
self.medium_buffer.phase_g = phase_g
def __setup__(self):
# medium properties
self.medium_buffer = self.create_uniform_buffer(
scatteringAlbedo=glm.vec3,
density=float,
phase_g=float
)
pipeline = self.create_compute_pipeline()
pipeline.load_compute_shader(__VOLUME_RECONSTRUCTION_SHADERS__ + '/forward.comp.spv')
pipeline.bind_storage_buffer(0, ShaderStage.COMPUTE, self.grid_resolver)
pipeline.bind_storage_buffer(1, ShaderStage.COMPUTE, self.rays_resolver)
pipeline.bind_storage_buffer(2, ShaderStage.COMPUTE, self.transmittance_resolver)
pipeline.bind_uniform(3, ShaderStage.COMPUTE, lambda: self.medium_buffer)
pipeline.bind_constants(0, ShaderStage.COMPUTE,
grid_dim = glm.ivec3,
number_of_rays = int
)
pipeline.close()
self.pipeline = pipeline
self.set_medium(glm.vec3(1, 1, 1), 10, 0.875)
def __dispatch__(self):
rays = self.rays_resolver()
with self.get_compute() as man:
man.set_pipeline(self.pipeline)
man.update_sets(0)
ray_count = rays.size // (4*3*2)
man.update_constants(ShaderStage.COMPUTE,
grid_dim=self.grid_dim,
number_of_rays=ray_count
)
man.dispatch_threads_1D(ray_count)
class TransmittanceBackward(Technique):
def __init__(self, rays, grid_dim, gradient_densities, transmittances, gradient_transmittances):
super().__init__()
self.grid_dim = grid_dim
self.rays = rays # buffer with rays configurations (origin, direction)
self.gradient_densities = gradient_densities # Flatten grid 512x512x512 used as parameters
self.transmittances = transmittances # Float with transmittance for each ray
self.gradient_transmittances = gradient_transmittances
self.pipeline = None
def set_medium(self, scattering_albedo: glm.vec3, density: float, phase_g: float):
self.medium_buffer.scatteringAlbedo = scattering_albedo
self.medium_buffer.density = density
self.medium_buffer.phase_g = phase_g
def __setup__(self):
# medium properties
self.medium_buffer = self.create_uniform_buffer(
scatteringAlbedo=glm.vec3,
density=float,
phase_g=float
)
pipeline = self.create_compute_pipeline()
pipeline.load_compute_shader(__VOLUME_RECONSTRUCTION_SHADERS__ + '/backward.comp.spv')
pipeline.bind_storage_buffer(0, ShaderStage.COMPUTE, lambda: self.gradient_densities)
pipeline.bind_storage_buffer(1, ShaderStage.COMPUTE, lambda: self.rays)
pipeline.bind_storage_buffer(2, ShaderStage.COMPUTE, lambda: self.transmittances)
pipeline.bind_storage_buffer(3, ShaderStage.COMPUTE, lambda: self.gradient_transmittances)
pipeline.bind_uniform(4, ShaderStage.COMPUTE, lambda: self.medium_buffer)
pipeline.bind_constants(0, ShaderStage.COMPUTE,
grid_dim=glm.ivec3,
number_of_rays=int
)
pipeline.close()
self.pipeline = pipeline
self.set_medium(glm.vec3(1, 1, 1), 10, 0.875)
def __dispatch__(self):
with self.get_compute() as man:
man.clear_buffer(self.gradient_densities) # Zero grad
man.set_pipeline(self.pipeline)
man.update_sets(0)
ray_count = self.rays.size // (4 * 3 * 2)
man.update_constants(ShaderStage.COMPUTE,
grid_dim=self.grid_dim,
number_of_rays=ray_count
)
man.dispatch_threads_1D(ray_count)
class UpSampleGrid(Technique):
def __init__(self):
self.src_grid = None
self.dst_grid = None
self.src_grid_dim = glm.ivec3(0,0,0)
self.dst_grid_dim = glm.ivec3(0,0,0)
def set_src_grid(self, grid_dim, grid):
self.src_grid = grid
self.src_grid_dim = grid_dim
def set_dst_grid(self, grid_dim, grid):
self.dst_grid = grid
self.dst_grid_dim = grid_dim
def __setup__(self):
pipeline = self.create_compute_pipeline()
pipeline.load_compute_shader(__VOLUME_RECONSTRUCTION_SHADERS__+"/initialize.comp.spv")
pipeline.bind_storage_buffer(0, ShaderStage.COMPUTE, lambda: self.dst_grid)
pipeline.bind_storage_buffer(1, ShaderStage.COMPUTE, lambda: self.src_grid)
pipeline.bind_constants(0, ShaderStage.COMPUTE,
dst_grid_dim=glm.ivec3, rem0=float,
src_grid_dim=glm.ivec3, rem1=float
)
pipeline.close()
self.pipeline = pipeline
def __dispatch__(self):
with self.get_compute() as man:
man.set_pipeline(self.pipeline)
man.update_sets(0)
man.update_constants(ShaderStage.COMPUTE,
dst_grid_dim=self.dst_grid_dim,
src_grid_dim=self.src_grid_dim
)
man.dispatch_threads_1D(self.dst_grid_dim.x * self.dst_grid_dim.y * self.dst_grid_dim.z)
man.gpu_to_cpu(self.dst_grid)
| 45.983914
| 136
| 0.65007
| 2,028
| 17,152
| 5.193787
| 0.08925
| 0.037216
| 0.061521
| 0.071774
| 0.743093
| 0.670179
| 0.640748
| 0.605241
| 0.564606
| 0.528719
| 0
| 0.017657
| 0.257055
| 17,152
| 373
| 137
| 45.983914
| 0.808915
| 0.016091
| 0
| 0.490741
| 0
| 0
| 0.009252
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.089506
| false
| 0
| 0.024691
| 0
| 0.148148
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
55b6264d004418dd7f3a7bb277c12e4c208f7910
| 868
|
py
|
Python
|
basics/merge_sort.py
|
zi-NaN/algorithm_exercise
|
817916a62774145fe6387b715f76c5badbf99197
|
[
"MIT"
] | null | null | null |
basics/merge_sort.py
|
zi-NaN/algorithm_exercise
|
817916a62774145fe6387b715f76c5badbf99197
|
[
"MIT"
] | null | null | null |
basics/merge_sort.py
|
zi-NaN/algorithm_exercise
|
817916a62774145fe6387b715f76c5badbf99197
|
[
"MIT"
] | 1
|
2018-11-21T05:14:07.000Z
|
2018-11-21T05:14:07.000Z
|
def _merge_sort(arr:'list'):
if len(arr) <= 1:
return arr
begin = 0
end = len(arr)-1
middle = (begin+end)//2
first = _merge_sort(arr[begin:middle+1])
second = _merge_sort(arr[middle+1:end+1])
# merge
ptr1 = begin
ptr2 = middle+1
ptr = 0
while(ptr1<middle+1 and ptr2<end+1):
if first[ptr1] < second[ptr2-middle-1]:
arr[ptr] = first[ptr1]
ptr1 += 1
else:
arr[ptr] = second[ptr2-middle-1]
ptr2 += 1
ptr += 1
# print(ptr1, ptr2)
while(ptr1 < middle+1):
arr[ptr] = first[ptr1]
ptr1 += 1
ptr += 1
while(ptr2 < end+1):
arr[ptr] = second[ptr2-middle-1]
ptr2 += 1
ptr += 1
return arr
# test
if __name__ == '__main__':
print(_merge_sort([1, 3, 2]))
| 24.111111
| 48
| 0.483871
| 117
| 868
| 3.452991
| 0.230769
| 0.138614
| 0.108911
| 0.126238
| 0.292079
| 0.292079
| 0.292079
| 0.292079
| 0.158416
| 0.158416
| 0
| 0.080882
| 0.373272
| 868
| 36
| 49
| 24.111111
| 0.661765
| 0.032258
| 0
| 0.433333
| 0
| 0
| 0.014981
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.033333
| false
| 0
| 0
| 0
| 0.1
| 0.033333
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
55b7410f25633189b2b806b878e6eeb2f52c7ecc
| 679
|
py
|
Python
|
Data_Science/Python-Estatistica/stats-ex8.py
|
maledicente/cursos
|
00ace48da7e48b04485e4ca97b3ca9ba5f33a283
|
[
"MIT"
] | 1
|
2021-05-03T22:59:38.000Z
|
2021-05-03T22:59:38.000Z
|
Data_Science/Python-Estatistica/stats-ex8.py
|
maledicente/cursos
|
00ace48da7e48b04485e4ca97b3ca9ba5f33a283
|
[
"MIT"
] | null | null | null |
Data_Science/Python-Estatistica/stats-ex8.py
|
maledicente/cursos
|
00ace48da7e48b04485e4ca97b3ca9ba5f33a283
|
[
"MIT"
] | null | null | null |
import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import curve_fit
def cinematica(t,s0,v0,a):
s = s0 + v0*t +(a*t*t/2.0)
return s
t = np.linspace(0, 5, 500)
s0 = 0.5
v0 = 2.0
a = 1.5
s_noise = 0.5 * np.random.normal(size=t.size)
s = cinematica(t,s0,v0,a)
sdata = s + s_noise
coefs, pcov = curve_fit(cinematica, t, sdata)
plt.plot(t, sdata, 'b-', label='Deslocamento')
plt.plot(t, cinematica(t, *coefs), 'r-',label='Função ajustada')
plt.xlabel('Tempo')
plt.ylabel('Deslocamento')
plt.title('Ajuste de curva')
plt.legend()
plt.show()
print("Espaço inicial= %f" %coefs[0])
print("Velocidade inicial= %f" %coefs[1])
print("Aceleração= %f" %coefs[2])
| 20.575758
| 64
| 0.673049
| 123
| 679
| 3.682927
| 0.447154
| 0.09713
| 0.057395
| 0.066225
| 0.07064
| 0
| 0
| 0
| 0
| 0
| 0
| 0.044369
| 0.136966
| 679
| 33
| 65
| 20.575758
| 0.728669
| 0
| 0
| 0
| 0
| 0
| 0.172059
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.041667
| false
| 0
| 0.125
| 0
| 0.208333
| 0.125
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
55b93809c23b2f231b7acf1f7f0608d40af2f69c
| 1,828
|
py
|
Python
|
run.py
|
Gandor26/covid-open
|
50dcb773160edc16b107785a6bb32ae6f82fc9a7
|
[
"MIT"
] | 12
|
2020-10-29T20:52:26.000Z
|
2021-11-10T14:11:59.000Z
|
run.py
|
Gandor26/covid-open
|
50dcb773160edc16b107785a6bb32ae6f82fc9a7
|
[
"MIT"
] | 1
|
2021-02-16T09:48:39.000Z
|
2021-03-20T04:21:54.000Z
|
run.py
|
Gandor26/covid-open
|
50dcb773160edc16b107785a6bb32ae6f82fc9a7
|
[
"MIT"
] | 1
|
2020-12-05T15:51:43.000Z
|
2020-12-05T15:51:43.000Z
|
from typing import Optional, Dict
from pathlib import Path
from copy import deepcopy
from tqdm import tqdm
import torch as pt
from torch import Tensor, nn
from torch.optim import Adam
def train(
train_data: Dict[str, Tensor],
valid_data: Dict[str, Tensor],
model: nn.Module,
optimizer: Adam,
model_path: Path,
n_epochs: int,
test_size: Optional[int] = None,
log_step: int = 10,
patience: int = 10,
) -> None:
prog_bar = tqdm(total=n_epochs, unit='epoch')
best_valid = float('inf')
stop_counter = patience
for epoch in range(n_epochs):
prog_bar.update()
model = model.train()
loss_train, _ = model(**train_data, test_size=test_size)
optimizer.zero_grad()
loss_train.backward()
optimizer.step()
postfix = {'train_loss': loss_train.item()}
if (epoch+1) % log_step == 0:
if valid_data is not None:
model = model.eval()
with pt.no_grad():
loss_valid, _ = model(**valid_data)
loss_valid = loss_valid.item()
postfix['valid_loss'] = loss_valid
if loss_valid < best_valid:
best_valid = loss_valid
stop_counter = patience
else:
stop_counter -= 1
if stop_counter == 0:
break
prog_bar.set_postfix(**postfix)
prog_bar.close()
pt.save(model.state_dict(), model_path)
def inference(
data: Dict[str, Tensor],
model: nn.Module,
model_path: Path,
):
model.load_state_dict(pt.load(model_path))
model = model.eval()
with pt.no_grad():
_, pr = model(**data, test_size=0)
pr = pr.clamp_min_(0.0)
return pr
| 29.967213
| 64
| 0.565646
| 228
| 1,828
| 4.307018
| 0.328947
| 0.05499
| 0.033605
| 0.051935
| 0.114053
| 0.114053
| 0.114053
| 0
| 0
| 0
| 0
| 0.009031
| 0.333698
| 1,828
| 61
| 65
| 29.967213
| 0.797209
| 0
| 0
| 0.175439
| 0
| 0
| 0.015309
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.035088
| false
| 0
| 0.122807
| 0
| 0.175439
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
55bb1301f3cfe948295e5ac6f60a5f73e88c2c17
| 975
|
py
|
Python
|
python/StatsUtil.py
|
cbaldassano/Parcellating-connectivity
|
a98142a6b0dc10e9cb6f6e603cb5334996d018ec
|
[
"Unlicense"
] | 2
|
2020-08-17T21:06:28.000Z
|
2021-05-10T14:37:16.000Z
|
python/StatsUtil.py
|
cbaldassano/Parcellating-connectivity
|
a98142a6b0dc10e9cb6f6e603cb5334996d018ec
|
[
"Unlicense"
] | null | null | null |
python/StatsUtil.py
|
cbaldassano/Parcellating-connectivity
|
a98142a6b0dc10e9cb6f6e603cb5334996d018ec
|
[
"Unlicense"
] | 3
|
2018-07-06T17:08:47.000Z
|
2019-10-09T18:58:31.000Z
|
import numpy as np
# Compute normalized mutual information between two parcellations z1 and z2
def NMI(z1, z2):
N = len(z1)
assert N == len(z2)
p1 = np.bincount(z1)/N
p1[p1 == 0] = 1
H1 = (-p1*np.log(p1)).sum()
p2 = np.bincount(z2)/N
p2[p2 == 0] = 1
H2 = (-p2*np.log(p2)).sum()
joint = np.histogram2d(z1,z2,[range(0,z1.max()+2), range(0,z2.max()+2)],
normed=True)
joint_p = joint[0]
pdiv = joint_p/np.outer(p1,p2)
pdiv[joint_p == 0] = 1
MI = (joint_p*np.log(pdiv)).sum()
if MI == 0:
NMI = 0
else:
NMI = MI/np.sqrt(H1*H2)
return NMI
# (Approximately) return whether an array is symmetric
def CheckSymApprox(D):
# Random indices to check for symmetry
sym_sub = np.random.randint(D.shape[0], size=(1000,2))
a = np.ravel_multi_index((sym_sub[:,0],sym_sub[:,1]), dims=np.shape(D))
b = np.ravel_multi_index((sym_sub[:,1],sym_sub[:,0]), dims=np.shape(D))
sym = np.all(D.flat[a] == D.flat[b])
return sym
| 24.375
| 75
| 0.610256
| 177
| 975
| 3.288136
| 0.40113
| 0.051546
| 0.034364
| 0.058419
| 0.079038
| 0.079038
| 0
| 0
| 0
| 0
| 0
| 0.066581
| 0.198974
| 975
| 40
| 76
| 24.375
| 0.678617
| 0.167179
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.037037
| 1
| 0.074074
| false
| 0
| 0.037037
| 0
| 0.185185
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
55c01bcc5785d0af3f6437a91b853450fda2bb63
| 2,531
|
py
|
Python
|
gdesk/panels/imgview/quantiles.py
|
thocoo/gamma-desk
|
9cb63a65fe23e30e155b3beca862f369b7fa1b7e
|
[
"Apache-2.0"
] | null | null | null |
gdesk/panels/imgview/quantiles.py
|
thocoo/gamma-desk
|
9cb63a65fe23e30e155b3beca862f369b7fa1b7e
|
[
"Apache-2.0"
] | 8
|
2021-04-09T11:31:43.000Z
|
2021-06-09T09:07:18.000Z
|
gdesk/panels/imgview/quantiles.py
|
thocoo/gamma-desk
|
9cb63a65fe23e30e155b3beca862f369b7fa1b7e
|
[
"Apache-2.0"
] | null | null | null |
import numpy as np
from .fasthist import hist2d
stdquant = np.ndarray(13)
stdquant[0] = (0.0000316712418331200) #-4 sdev
stdquant[1] = (0.0013498980316301000) #-3 sdev
stdquant[2] = (0.0227501319481792000) #-2 sdev
stdquant[3] = (0.05)
stdquant[4] = (0.1586552539314570000) #-1 sdev or lsdev
stdquant[5] = (0.25) #first quartile
stdquant[6] = (0.50) #median
stdquant[7] = (0.75) #third quartile
stdquant[8] = (0.8413447460685430000) #+1 sdev or usdev
stdquant[9] = (0.95)
stdquant[10] = (0.9772498680518210000) #+2 sdev
stdquant[11] = (0.9986501019683700000) #+3 sdev
stdquant[12] = (0.9999683287581670000) #+4 sdev
def get_standard_quantiles(arr, bins=64, step=None, quantiles=None):
hist, starts, stepsize = hist2d(arr, bins, step, plot=False)
cumhist = np.cumsum(hist)
if quantiles is None:
quantiles = stdquant
else:
quantiles = np.array(quantiles)
n = len(quantiles)
npix = np.multiply.reduce(arr.shape)
quantiles *= npix
thresh = [0] * n
#TO DO: speed up by using interpolation function of numpy
for ind in range(n):
thresh[ind] = starts[(cumhist < quantiles[ind]).sum()]
return thresh
def get_sigma_range(arr, sigma=1, bins=64, step=None):
if sigma == 1:
return get_standard_quantiles(arr, bins, step, (stdquant[4], stdquant[8]))
elif sigma == 2:
return get_standard_quantiles(arr, bins, step, (stdquant[2], stdquant[10]))
elif sigma == 3:
return get_standard_quantiles(arr, bins, step, (stdquant[1], stdquant[11]))
elif sigma == 4:
return get_standard_quantiles(arr, bins, step, (stdquant[0], stdquant[12]))
def get_sigma_range_for_hist(starts, hist, sigma):
cumhist = np.cumsum(hist)
if sigma==1:
quantiles = np.array((stdquant[4], stdquant[8]))
elif sigma==2:
quantiles = np.array((stdquant[2], stdquant[10]))
elif sigma==3:
quantiles = np.array((stdquant[1], stdquant[11]))
elif sigma==4:
quantiles = np.array((stdquant[0], stdquant[12]))
n = len(quantiles)
npix = cumhist[-1]
quantiles *= npix
thresh = [0] * n
#TO DO: speed up by using interpolation function of numpy
for ind in range(n):
thresh[ind] = starts[(cumhist < quantiles[ind]).sum()]
return thresh
| 34.671233
| 83
| 0.590281
| 317
| 2,531
| 4.662461
| 0.277603
| 0.028417
| 0.067659
| 0.077808
| 0.44452
| 0.397835
| 0.397835
| 0.2977
| 0.175913
| 0.175913
| 0
| 0.1322
| 0.279731
| 2,531
| 73
| 84
| 34.671233
| 0.678552
| 0.086922
| 0
| 0.392857
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.053571
| false
| 0
| 0.035714
| 0
| 0.196429
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e94dc72d516776aab0f1e035f052d60121476db1
| 1,981
|
py
|
Python
|
create_h5ad.py
|
xmuyulab/DAISM-XMBD
|
916e18a1f111789a1c0bd3c1209d5a73813f3d3a
|
[
"MIT"
] | 2
|
2021-11-05T00:43:16.000Z
|
2021-12-14T08:39:29.000Z
|
create_h5ad.py
|
biosyy/DAISM-XMBD
|
a76f976db8c33ef33f78533a5a2be50a85148e79
|
[
"MIT"
] | 2
|
2021-01-14T19:40:46.000Z
|
2021-01-14T19:41:14.000Z
|
create_h5ad.py
|
biosyy/DAISM-XMBD
|
a76f976db8c33ef33f78533a5a2be50a85148e79
|
[
"MIT"
] | 1
|
2021-08-30T15:11:45.000Z
|
2021-08-30T15:11:45.000Z
|
##############################
## cread purified h5ad file ##
##############################
# input: annotation table and the whole expression profile
# output: purified h5ad file
import os
import pandas as pd
import anndata
import argparse
import gc
import numpy as np
parser = argparse.ArgumentParser(description='cread purified h5ad file for DAISM-XMBD')
parser.add_argument("-anno", type=str, help="annotation table (contains 'sample.name' and 'cell.type' two columns)", default=None)
parser.add_argument("-exp", type=str, help="the whole expression profile (sample.name in column and gene symbol in row)", default=None)
parser.add_argument("-outdir", type=str, help="the directory to store h5ad file", default="example/")
parser.add_argument("-prefix",type=str,help="the prefix of h5ad file",default= "purified")
def main():
inputArgs = parser.parse_args()
if os.path.exists(inputArgs.outdir)==False:
os.mkdir(inputArgs.outdir)
anno_table = pd.read_csv(inputArgs.anno)
cell_list = list(anno_table['cell.type'].unique())
exp = pd.read_csv(inputArgs.exp,sep="\t",index_col=0)
adata = []
for cell in cell_list:
tmp = anno_table[anno_table['cell.type']==cell]
sample_list = tmp['sample.name']
sample_list_inter = list(set(sample_list).intersection(list(exp.columns)))
exp_select=exp[sample_list_inter]
anno = pd.DataFrame(np.repeat(cell,exp_select.shape[1]),columns=['cell.type'])
adata.append(anndata.AnnData(X=exp_select.T.values,
obs=anno,
var=pd.DataFrame(columns=[],index=list(exp_select.index))))
for i in range(1, len(adata)):
print("Concatenating " + str(i))
adata[0] = adata[0].concatenate(adata[1])
del adata[1]
gc.collect()
print(len(adata))
adata = adata[0]
adata.write(inputArgs.outdir+'/'+inputArgs.prefix+'.h5ad')
if __name__ == "__main__":
main()
| 34.155172
| 135
| 0.649167
| 265
| 1,981
| 4.732075
| 0.373585
| 0.031898
| 0.054226
| 0.033493
| 0.044657
| 0
| 0
| 0
| 0
| 0
| 0
| 0.008679
| 0.185765
| 1,981
| 58
| 136
| 34.155172
| 0.768754
| 0.055023
| 0
| 0
| 0
| 0
| 0.19103
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.026316
| false
| 0
| 0.157895
| 0
| 0.184211
| 0.052632
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e94e1af31de28cb3ee32e1feeddbef4991bf43d4
| 1,424
|
py
|
Python
|
FM_Tuning.py
|
RomanGutin/GEMSEC
|
cb2c26d4747cbd3d4c048787ca41665ef0e64155
|
[
"MIT"
] | null | null | null |
FM_Tuning.py
|
RomanGutin/GEMSEC
|
cb2c26d4747cbd3d4c048787ca41665ef0e64155
|
[
"MIT"
] | null | null | null |
FM_Tuning.py
|
RomanGutin/GEMSEC
|
cb2c26d4747cbd3d4c048787ca41665ef0e64155
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Thu Nov 29 13:56:44 2018
@author: RomanGutin
"""
import pandas as pd
import numpy as np
#Frequency Tuning Loop
amino_letter = ['A','R','D','N','C','E','Q','G','H','I','L','K','M','F','P','S','T','W','Y','V']
length_scores =[4,8,6,6,5,7,7,4,7,5,6,8,7,8,5,5,5,9,8,5]
FM_df = pd.DataFrame(0, index= just_let.index, columns= range(0,81))
FM_score_dict = dict(zip(amino_letter,length_scores))
#splitting amino letter into new independent variables based on its length score#
fm_letter_dict ={}
for letter in amino_letter:
new_vars =[]
for i in range(FM_score_dict[letter]):
new_vars.append(letter+str(i+1))
fm_letter_dict[letter]=new_vars
#generate new FM_tuned dataframe
for seq in FM_df.index:
letter_list= list(seq)
for letter in letter_list:
for var in fm_letter_dict[letter]:
row= FM_df.loc[seq,:]
spot= row[row==0].index[0]
FM_df.loc[seq,spot]= var
FM_df= pd.read_csv('Frequency Tuned Dataset') #data after frequency tuning wit
FM_df.set_index('sequence', inplace= True)
FM_df_arr = np.array(FM_df.values, dtype=[('O', np.float)]).astype(np.float)
#New letter to weight holding the new FM tuned variables
ltw_fm_MLE={}
for amino in amino_letter:
for var in fm_letter_dict[amino]:
ltw_fm_MLE[var]= ltw_AM_n[amino]
ltw_fm_MLE = np.load('ltw_fm_MLE.npy').item()
| 30.297872
| 96
| 0.656601
| 256
| 1,424
| 3.472656
| 0.441406
| 0.035996
| 0.053993
| 0.038245
| 0.07649
| 0.044994
| 0
| 0
| 0
| 0
| 0
| 0.034305
| 0.18118
| 1,424
| 47
| 97
| 30.297872
| 0.72813
| 0.213483
| 0
| 0
| 0
| 0
| 0.059621
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.074074
| 0
| 0.074074
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e94e9483c973c25abe2c71d5816ab7d9b774441e
| 692
|
py
|
Python
|
unified_api/brokers/kafka/consumer.py
|
campos537/deep-fashion-system
|
1de31dd6260cc967e1832cff63ae7e537a3a4e9d
|
[
"Unlicense"
] | 1
|
2021-04-06T00:43:26.000Z
|
2021-04-06T00:43:26.000Z
|
unified_api/brokers/kafka/consumer.py
|
campos537/deep-fashion-system
|
1de31dd6260cc967e1832cff63ae7e537a3a4e9d
|
[
"Unlicense"
] | null | null | null |
unified_api/brokers/kafka/consumer.py
|
campos537/deep-fashion-system
|
1de31dd6260cc967e1832cff63ae7e537a3a4e9d
|
[
"Unlicense"
] | null | null | null |
from kafka import KafkaConsumer
class Consumer:
def __init__(self, config):
bootstrap_server = config.get(
"bootstrap_server") + ":" + config.get("port")
self.consumer = KafkaConsumer(config.get(
"subscription_id_2"), bootstrap_servers=bootstrap_server, api_version=(0, 10),
auto_offset_reset='earliest', enable_auto_commit=True, group_id="test")
self.messages = []
def get_message(self):
if len(self.messages) > 0:
mes = self.messages.pop(0)
return mes
def listen(self):
for message in self.consumer:
self.messages.append(message.value)
| 34.6
| 109
| 0.601156
| 77
| 692
| 5.181818
| 0.558442
| 0.120301
| 0.105263
| 0.120301
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.012245
| 0.291908
| 692
| 19
| 110
| 36.421053
| 0.802041
| 0
| 0
| 0
| 0
| 0
| 0.072254
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.1875
| false
| 0
| 0.0625
| 0
| 0.375
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e94ef8f2fd09f77bca0e59bab465fb16e55c0ca1
| 2,159
|
py
|
Python
|
utils.py
|
mino2401200231/File-convertor
|
6fb438dc5f37bf0efd78e18e4848b4cdb0331343
|
[
"MIT"
] | null | null | null |
utils.py
|
mino2401200231/File-convertor
|
6fb438dc5f37bf0efd78e18e4848b4cdb0331343
|
[
"MIT"
] | null | null | null |
utils.py
|
mino2401200231/File-convertor
|
6fb438dc5f37bf0efd78e18e4848b4cdb0331343
|
[
"MIT"
] | 2
|
2021-08-12T06:37:52.000Z
|
2021-09-05T13:03:36.000Z
|
# utilities
import os
from re import sub
import uuid
import subprocess
# Image To Pdf
import img2pdf
# PDF To Images
from pdf2image import convert_from_path
# PDF To Word
from pdf2docx import parse
_BASE_DIR = os.getcwd()
_BASE_DIR_FILE = os.path.join(_BASE_DIR, "files")
def process_image_to_pdf(files, pdf_name):
img = []
with open(f"{_BASE_DIR_FILE}/{pdf_name}.pdf","wb") as fil:
for fname in files:
path = os.path.join(_BASE_DIR_FILE, fname)
img.append(path)
fil.write(img2pdf.convert(img))
return pdf_name
def process_word_to_pdf(file):
file_address = os.path.join(_BASE_DIR_FILE, file)
command = ['lowriter' ,'--convert-to','pdf' , file_address , "--outdir", _BASE_DIR_FILE]
command_run = subprocess.run(command)
file_name = -1
if command_run.returncode == 0:
file_name = ".".join(file.split(".")[:-1]) + ".pdf"
return file_name
def process_pdf_to_images(file):
file_address = os.path.join(_BASE_DIR_FILE, file)
folder_name = str(uuid.uuid1())
folder_address = os.path.join(_BASE_DIR_FILE, folder_name)
os.mkdir(folder_address)
try:
convert_from_path(file_address, output_folder=folder_address, fmt="jpeg", thread_count=10, jpegopt="quality")
return folder_address
except:
import shutil
shutil.rmtree(folder_address)
return -1
def process_pdf_to_word(file):
file_address = os.path.join(_BASE_DIR_FILE, file)
word_file = str(uuid.uuid1()) + ".docx"
word_file_address = os.path.join(_BASE_DIR_FILE, word_file)
try:
parse(file_address, word_file_address, multi_processing=True)
return word_file_address
except:
return -1
def del_user_files(list):
for file in list:
file_address = os.path.join(_BASE_DIR_FILE, file)
try:
os.remove(file_address)
except:
pass
def del_one_file(file):
try:
os.remove(file)
except:
try:
file_address = os.path.join(_BASE_DIR_FILE, file)
os.remove(file_address)
except:
pass
pass
return 1
| 26.329268
| 117
| 0.656322
| 304
| 2,159
| 4.355263
| 0.253289
| 0.068731
| 0.09139
| 0.095166
| 0.282477
| 0.269637
| 0.190332
| 0.169184
| 0.145015
| 0.090634
| 0
| 0.008526
| 0.239463
| 2,159
| 82
| 118
| 26.329268
| 0.797808
| 0.022233
| 0
| 0.34375
| 0
| 0
| 0.043189
| 0.014713
| 0
| 0
| 0
| 0
| 0
| 1
| 0.09375
| false
| 0.046875
| 0.125
| 0
| 0.328125
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e950fb1913401e7e3634e1210cfe24f9fddcf950
| 2,026
|
py
|
Python
|
screens/tasks/tasks.py
|
athrn/kognitivo
|
15822338778213c09ea654ec4e06a300129f9478
|
[
"Apache-2.0"
] | 80
|
2017-11-13T21:58:55.000Z
|
2022-01-03T20:10:42.000Z
|
screens/tasks/tasks.py
|
athrn/kognitivo
|
15822338778213c09ea654ec4e06a300129f9478
|
[
"Apache-2.0"
] | null | null | null |
screens/tasks/tasks.py
|
athrn/kognitivo
|
15822338778213c09ea654ec4e06a300129f9478
|
[
"Apache-2.0"
] | 21
|
2017-11-14T09:47:41.000Z
|
2021-11-23T06:44:31.000Z
|
from kivy.uix.screenmanager import Screen
from kivy.properties import StringProperty, ObjectProperty, NumericProperty, ListProperty, BooleanProperty
from kivy.app import App
from kivy.logger import Logger
from library_widgets import TrackingScreenMixin
from utils import import_kv
import_kv(__file__)
class TasksScreen(TrackingScreenMixin, Screen):
family = StringProperty(None, allownone=True)
played_times = NumericProperty()
tasks = ListProperty()
_main_manager = ObjectProperty()
loading = ObjectProperty()
quick_test = BooleanProperty(False)
def on_quick_test(self, *args):
if self._main_manager:
self.update_content()
@property
def main_manager(self):
if not self._main_manager:
from .content import TaskScreenManager
self._main_manager = TaskScreenManager()
return self._main_manager
def update_content(self, *args, **kwargs):
if self.quick_test:
self.main_manager.start_test(self.family, self.tasks)
self.main_manager.current = 'test'
else:
self.main_manager.task_sets_screen.fill()
self.main_manager.current = 'task_sets'
app = App.get_running_app()
sessions_starts = app.storage['sessions']['started']
app.tracker.send_event('tasks', 'sessions', label='started', value=sessions_starts + 1)
app.storage['sessions'] = {"started": sessions_starts + 1,
"finished": app.storage['sessions']['finished']}
self.played_times += 1
Logger.info("Tasks: playing %s times" % self.played_times)
if self.played_times == 10:
App.get_running_app().google_client.unlock_achievement("addicted")
if self.main_manager.parent != self:
self.loading.hide(self._main_manager)
def on_enter(self, *args):
super(TasksScreen, self).on_enter(*args)
app = App.get_running_app()
app.initialize_billing(self.update_content)
| 35.54386
| 106
| 0.673248
| 230
| 2,026
| 5.691304
| 0.352174
| 0.10084
| 0.114591
| 0.036669
| 0.02903
| 0
| 0
| 0
| 0
| 0
| 0
| 0.003197
| 0.228036
| 2,026
| 56
| 107
| 36.178571
| 0.83376
| 0
| 0
| 0.044444
| 0
| 0
| 0.058243
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.088889
| false
| 0
| 0.177778
| 0
| 0.444444
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e954754c8db1dbc45662c97eec7de33aed7d3e19
| 1,240
|
py
|
Python
|
imclassify/train_model.py
|
AdamSpannbauer/imclassify
|
27c24576ef6a2ed344cad7f568f7e4cdfe6ea0bd
|
[
"MIT"
] | null | null | null |
imclassify/train_model.py
|
AdamSpannbauer/imclassify
|
27c24576ef6a2ed344cad7f568f7e4cdfe6ea0bd
|
[
"MIT"
] | null | null | null |
imclassify/train_model.py
|
AdamSpannbauer/imclassify
|
27c24576ef6a2ed344cad7f568f7e4cdfe6ea0bd
|
[
"MIT"
] | null | null | null |
"""Train logistic regression model on hdf5 features for classification
Modified from:
https://gurus.pyimagesearch.com/topic/transfer-learning-example-dogs-and-cats/
"""
import pickle
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import classification_report
def train_model(h5py_db, model_output='model.pickle', percent_train=1.0):
"""Train logistic regression classifier
:param h5py_db: path to HDF5 database containing 'features', 'labels', & 'label_names'
:param model_output: path to save trained model to using pickle
:param percent_train: percent of images to be used for training (instead of testing)
:return: None; output is written to `model_output`
"""
i = int(h5py_db['labels'].shape[0] * percent_train)
# C decided with sklearn.model_selection.GridSearchCV
model = LogisticRegression(C=0.1)
model.fit(h5py_db['features'][:i], h5py_db['labels'][:i])
if percent_train < 1.0:
preds = model.predict(h5py_db['features'][i:])
print(classification_report(h5py_db['labels'][i:], preds,
target_names=h5py_db['label_names']))
with open(model_output, 'wb') as f:
f.write(pickle.dumps(model))
| 37.575758
| 90
| 0.704839
| 167
| 1,240
| 5.08982
| 0.473054
| 0.056471
| 0.042353
| 0.032941
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.016732
| 0.180645
| 1,240
| 32
| 91
| 38.75
| 0.819882
| 0.43871
| 0
| 0
| 0
| 0
| 0.088989
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.076923
| false
| 0
| 0.230769
| 0
| 0.307692
| 0.076923
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e955b53af943d2f078f97e589977586caea5ae03
| 1,760
|
py
|
Python
|
Test/final/V5_baseline_CC_ref/aggregate.py
|
WangWenhao0716/ISC-Track1-Submission
|
3484142c0550262c90fc229e5e0ba719c58c592d
|
[
"MIT"
] | 46
|
2021-10-31T08:02:51.000Z
|
2022-03-11T08:42:30.000Z
|
Test/final/V5_baseline_CC_ref/aggregate.py
|
WangWenhao0716/ISC-Track1-Submission
|
3484142c0550262c90fc229e5e0ba719c58c592d
|
[
"MIT"
] | 3
|
2021-11-18T09:35:45.000Z
|
2022-03-31T01:20:34.000Z
|
Test/final/V5_baseline_CC_ref/aggregate.py
|
WangWenhao0716/ISC-Track1-Submission
|
3484142c0550262c90fc229e5e0ba719c58c592d
|
[
"MIT"
] | 8
|
2021-12-01T08:02:08.000Z
|
2022-02-26T13:29:36.000Z
|
import pandas as pd
v_4 = pd.read_csv('50/predictions_dev_queries_50k_normalized_exp.csv')
temp = list(v_4['query_id'])
v_4['query_id'] = list(v_4['reference_id'])
v_4['reference_id'] = temp
v_5 = pd.read_csv('ibn/predictions_dev_queries_50k_normalized_exp.csv')
temp = list(v_5['query_id'])
v_5['query_id'] = list(v_5['reference_id'])
v_5['reference_id'] = temp
v_6 = pd.read_csv('152/predictions_dev_queries_50k_normalized_exp.csv')
temp = list(v_6['query_id'])
v_6['query_id'] = list(v_6['reference_id'])
v_6['reference_id'] = temp
v_4_query = list(v_4['query_id'])
v_4_reference = list(v_4['reference_id'])
v_4_com = []
for i in range(len(v_4)):
v_4_com.append((v_4_query[i],v_4_reference[i]))
v_5_query = list(v_5['query_id'])
v_5_reference = list(v_5['reference_id'])
v_5_com = []
for i in range(len(v_5)):
v_5_com.append((v_5_query[i],v_5_reference[i]))
v_6_query = list(v_6['query_id'])
v_6_reference = list(v_6['reference_id'])
v_6_com = []
for i in range(len(v_6)):
v_6_com.append((v_6_query[i],v_6_reference[i]))
inter_45 = list(set(v_4_com).intersection(set(v_5_com)))
inter_46 = list(set(v_4_com).intersection(set(v_6_com)))
inter_456 = list(set(inter_45).intersection(set(inter_46)))
new_456 = pd.DataFrame()
q = []
for i in range(len(inter_456)):
q.append(inter_456[i][0])
r = []
for i in range(len(inter_456)):
r.append(inter_456[i][1])
new_456['query_id'] = q
new_456['reference_id'] = r
df_2 = pd.merge(new_456, v_4, on=['query_id','reference_id'], how='inner')
df_3 = pd.merge(new_456, v_5, on=['query_id','reference_id'], how='inner')
df_4 = pd.merge(new_456, v_6, on=['query_id','reference_id'], how='inner')
fast_456 = pd.concat((df_2,df_3,df_4))
fast_456.to_csv('R-baseline-CC-234-50k.csv',index=False)
| 31.428571
| 74
| 0.710795
| 360
| 1,760
| 3.088889
| 0.161111
| 0.030576
| 0.043165
| 0.04946
| 0.557554
| 0.519784
| 0.519784
| 0.236511
| 0.132194
| 0.132194
| 0
| 0.075567
| 0.097727
| 1,760
| 55
| 75
| 32
| 0.624685
| 0
| 0
| 0.044444
| 0
| 0
| 0.255114
| 0.098864
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.022222
| 0
| 0.022222
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e9569e3a4e8763ed40f2c7965c464907cae6ec57
| 744
|
py
|
Python
|
tutorial/flask-api-mongo/app/services/mail_service.py
|
carrenolg/python
|
7c1f0013d911177ce3bc2c5ea58b8e6e562b7282
|
[
"Apache-2.0"
] | null | null | null |
tutorial/flask-api-mongo/app/services/mail_service.py
|
carrenolg/python
|
7c1f0013d911177ce3bc2c5ea58b8e6e562b7282
|
[
"Apache-2.0"
] | null | null | null |
tutorial/flask-api-mongo/app/services/mail_service.py
|
carrenolg/python
|
7c1f0013d911177ce3bc2c5ea58b8e6e562b7282
|
[
"Apache-2.0"
] | null | null | null |
from threading import Thread
from flask_mail import Mail, Message
from resources.errors import InternalServerError
mail = Mail(app=None)
app = None
def initialize_mail_service(appiclation):
global mail
global app
mail = Mail(app=appiclation)
app = appiclation
def send_async_email(app, msg, mail):
with app.app_context():
try:
mail.send(msg)
except ConnectionRefusedError:
raise InternalServerError("[MAIL SERVER] not working")
def send_email(subject, sender, recipients, text_body, html_body):
msg = Message(subject, sender=sender, recipients=recipients)
msg.body = text_body
msg.html = html_body
Thread(target=send_async_email, args=(app, msg, mail)).start()
| 25.655172
| 66
| 0.711022
| 94
| 744
| 5.489362
| 0.414894
| 0.089147
| 0.042636
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.201613
| 744
| 28
| 67
| 26.571429
| 0.868687
| 0
| 0
| 0
| 0
| 0
| 0.033602
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.142857
| false
| 0
| 0.142857
| 0
| 0.285714
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e9570255d9896891bde513fb7630bb22b041b8d0
| 18,541
|
py
|
Python
|
vxsandbox/resources/tests/test_http.py
|
praekeltfoundation/vumi-sandbox
|
1e2dfca8325ce98e52fe32a072749fe4cf7f448d
|
[
"BSD-3-Clause"
] | 1
|
2021-05-26T08:38:28.000Z
|
2021-05-26T08:38:28.000Z
|
vxsandbox/resources/tests/test_http.py
|
praekelt/vumi-sandbox
|
1e2dfca8325ce98e52fe32a072749fe4cf7f448d
|
[
"BSD-3-Clause"
] | 24
|
2015-03-04T08:33:12.000Z
|
2016-08-18T07:57:12.000Z
|
vxsandbox/resources/tests/test_http.py
|
praekeltfoundation/vumi-sandbox
|
1e2dfca8325ce98e52fe32a072749fe4cf7f448d
|
[
"BSD-3-Clause"
] | null | null | null |
import base64
import json
from OpenSSL.SSL import (
VERIFY_PEER, VERIFY_FAIL_IF_NO_PEER_CERT, VERIFY_NONE,
SSLv3_METHOD, SSLv23_METHOD, TLSv1_METHOD)
from twisted.web.http_headers import Headers
from twisted.internet.defer import inlineCallbacks, fail, succeed
from vxsandbox.resources.http import (
HttpClientContextFactory, HttpClientPolicyForHTTPS, make_context_factory,
HttpClientResource)
from vxsandbox.resources.tests.utils import ResourceTestCaseBase
class DummyResponse(object):
def __init__(self):
self.headers = Headers({})
class DummyHTTPClient(object):
def __init__(self):
self._next_http_request_result = None
self.http_requests = []
def set_agent(self, agent):
self.agent = agent
def get_context_factory(self):
# We need to dig around inside our Agent to find the context factory.
# Since this involves private attributes that have changed a few times
# recently, we need to try various options.
if hasattr(self.agent, "_contextFactory"):
# For Twisted 13.x
return self.agent._contextFactory
elif hasattr(self.agent, "_policyForHTTPS"):
# For Twisted 14.x
return self.agent._policyForHTTPS
elif hasattr(self.agent, "_endpointFactory"):
# For Twisted 15.0.0 (and possibly newer)
return self.agent._endpointFactory._policyForHTTPS
else:
raise NotImplementedError(
"I can't find the context factory on this Agent. This seems"
" to change every few versions of Twisted.")
def fail_next(self, error):
self._next_http_request_result = fail(error)
def succeed_next(self, body, code=200, headers={}):
default_headers = {
'Content-Length': str(len(body)),
}
default_headers.update(headers)
response = DummyResponse()
response.code = code
for header, value in default_headers.items():
response.headers.addRawHeader(header, value)
response.content = lambda: succeed(body)
self._next_http_request_result = succeed(response)
def request(self, *args, **kw):
self.http_requests.append((args, kw))
return self._next_http_request_result
class TestHttpClientResource(ResourceTestCaseBase):
resource_cls = HttpClientResource
@inlineCallbacks
def setUp(self):
super(TestHttpClientResource, self).setUp()
yield self.create_resource({})
self.dummy_client = DummyHTTPClient()
self.patch(self.resource_cls,
'http_client_class', self.get_dummy_client)
def get_dummy_client(self, agent):
self.dummy_client.set_agent(agent)
return self.dummy_client
def http_request_fail(self, error):
self.dummy_client.fail_next(error)
def http_request_succeed(self, body, code=200, headers={}):
self.dummy_client.succeed_next(body, code, headers)
def assert_not_unicode(self, arg):
self.assertFalse(isinstance(arg, unicode))
def get_context_factory(self):
return self.dummy_client.get_context_factory()
def get_context(self, context_factory=None):
if context_factory is None:
context_factory = self.get_context_factory()
if hasattr(context_factory, 'creatorForNetloc'):
# This context_factory is a new-style IPolicyForHTTPS
# implementation, so we need to get a context from through its
# client connection creator. The creator could either be a wrapper
# around a ClientContextFactory (in which case we treat it like
# one) or a ClientTLSOptions object (which means we have to grab
# the context from a private attribute).
creator = context_factory.creatorForNetloc('example.com', 80)
if hasattr(creator, 'getContext'):
return creator.getContext()
else:
return creator._ctx
else:
# This context_factory is an old-style WebClientContextFactory and
# will build us a context object if we ask nicely.
return context_factory.getContext('example.com', 80)
def assert_http_request(self, url, method='GET', headers=None, data=None,
timeout=None, files=None):
timeout = (timeout if timeout is not None
else self.resource.timeout)
args = (method, url,)
kw = dict(headers=headers, data=data,
timeout=timeout, files=files)
[(actual_args, actual_kw)] = self.dummy_client.http_requests
# NOTE: Files are handed over to treq as file pointer-ish things
# which in our case are `StringIO` instances.
actual_kw_files = actual_kw.get('files')
if actual_kw_files is not None:
actual_kw_files = actual_kw.pop('files', None)
kw_files = kw.pop('files', {})
for name, file_data in actual_kw_files.items():
kw_file_data = kw_files[name]
file_name, content_type, sio = file_data
self.assertEqual(
(file_name, content_type, sio.getvalue()),
kw_file_data)
self.assertEqual((actual_args, actual_kw), (args, kw))
self.assert_not_unicode(actual_args[0])
self.assert_not_unicode(actual_kw.get('data'))
headers = actual_kw.get('headers')
if headers is not None:
for key, values in headers.items():
self.assert_not_unicode(key)
for value in values:
self.assert_not_unicode(value)
def test_make_context_factory_no_method_verify_none(self):
context_factory = make_context_factory(verify_options=VERIFY_NONE)
self.assertIsInstance(context_factory, HttpClientContextFactory)
self.assertEqual(context_factory.verify_options, VERIFY_NONE)
self.assertEqual(context_factory.ssl_method, None)
self.assertEqual(
self.get_context(context_factory).get_verify_mode(), VERIFY_NONE)
def test_make_context_factory_no_method_verify_peer(self):
# This test's behaviour depends on the version of Twisted being used.
context_factory = make_context_factory(verify_options=VERIFY_PEER)
context = self.get_context(context_factory)
self.assertEqual(context_factory.ssl_method, None)
self.assertNotEqual(context.get_verify_mode(), VERIFY_NONE)
if HttpClientPolicyForHTTPS is None:
# We have Twisted<14.0.0
self.assertIsInstance(context_factory, HttpClientContextFactory)
self.assertEqual(context_factory.verify_options, VERIFY_PEER)
self.assertEqual(context.get_verify_mode(), VERIFY_PEER)
else:
self.assertIsInstance(context_factory, HttpClientPolicyForHTTPS)
def test_make_context_factory_no_method_verify_peer_or_fail(self):
# This test's behaviour depends on the version of Twisted being used.
context_factory = make_context_factory(
verify_options=(VERIFY_PEER | VERIFY_FAIL_IF_NO_PEER_CERT))
context = self.get_context(context_factory)
self.assertEqual(context_factory.ssl_method, None)
self.assertNotEqual(context.get_verify_mode(), VERIFY_NONE)
if HttpClientPolicyForHTTPS is None:
# We have Twisted<14.0.0
self.assertIsInstance(context_factory, HttpClientContextFactory)
self.assertEqual(
context_factory.verify_options,
VERIFY_PEER | VERIFY_FAIL_IF_NO_PEER_CERT)
self.assertEqual(
context.get_verify_mode(),
VERIFY_PEER | VERIFY_FAIL_IF_NO_PEER_CERT)
else:
self.assertIsInstance(context_factory, HttpClientPolicyForHTTPS)
def test_make_context_factory_no_method_no_verify(self):
# This test's behaviour depends on the version of Twisted being used.
context_factory = make_context_factory()
self.assertEqual(context_factory.ssl_method, None)
if HttpClientPolicyForHTTPS is None:
# We have Twisted<14.0.0
self.assertIsInstance(context_factory, HttpClientContextFactory)
self.assertEqual(context_factory.verify_options, None)
else:
self.assertIsInstance(context_factory, HttpClientPolicyForHTTPS)
def test_make_context_factory_sslv3_no_verify(self):
# This test's behaviour depends on the version of Twisted being used.
context_factory = make_context_factory(ssl_method=SSLv3_METHOD)
self.assertEqual(context_factory.ssl_method, SSLv3_METHOD)
if HttpClientPolicyForHTTPS is None:
# We have Twisted<14.0.0
self.assertIsInstance(context_factory, HttpClientContextFactory)
self.assertEqual(context_factory.verify_options, None)
else:
self.assertIsInstance(context_factory, HttpClientPolicyForHTTPS)
@inlineCallbacks
def test_handle_get(self):
self.http_request_succeed("foo")
reply = yield self.dispatch_command('get',
url='http://www.example.com')
self.assertTrue(reply['success'])
self.assertEqual(reply['body'], "foo")
self.assert_http_request('http://www.example.com', method='GET')
@inlineCallbacks
def test_handle_post(self):
self.http_request_succeed("foo")
reply = yield self.dispatch_command('post',
url='http://www.example.com')
self.assertTrue(reply['success'])
self.assertEqual(reply['body'], "foo")
self.assert_http_request('http://www.example.com', method='POST')
@inlineCallbacks
def test_handle_patch(self):
self.http_request_succeed("foo")
reply = yield self.dispatch_command('patch',
url='http://www.example.com')
self.assertTrue(reply['success'])
self.assertEqual(reply['body'], "foo")
self.assert_http_request('http://www.example.com', method='PATCH')
@inlineCallbacks
def test_handle_head(self):
self.http_request_succeed("foo")
reply = yield self.dispatch_command('head',
url='http://www.example.com')
self.assertTrue(reply['success'])
self.assertEqual(reply['body'], "foo")
self.assert_http_request('http://www.example.com', method='HEAD')
@inlineCallbacks
def test_handle_delete(self):
self.http_request_succeed("foo")
reply = yield self.dispatch_command('delete',
url='http://www.example.com')
self.assertTrue(reply['success'])
self.assertEqual(reply['body'], "foo")
self.assert_http_request('http://www.example.com', method='DELETE')
@inlineCallbacks
def test_handle_put(self):
self.http_request_succeed("foo")
reply = yield self.dispatch_command('put',
url='http://www.example.com')
self.assertTrue(reply['success'])
self.assertEqual(reply['body'], "foo")
self.assert_http_request('http://www.example.com', method='PUT')
@inlineCallbacks
def test_failed_get(self):
self.http_request_fail(ValueError("HTTP request failed"))
reply = yield self.dispatch_command('get',
url='http://www.example.com')
self.assertFalse(reply['success'])
self.assertEqual(reply['reason'], "HTTP request failed")
self.assert_http_request('http://www.example.com', method='GET')
@inlineCallbacks
def test_null_url(self):
reply = yield self.dispatch_command('get')
self.assertFalse(reply['success'])
self.assertEqual(reply['reason'], "No URL given")
@inlineCallbacks
def test_https_request(self):
# This test's behaviour depends on the version of Twisted being used.
self.http_request_succeed("foo")
reply = yield self.dispatch_command('get',
url='https://www.example.com')
self.assertTrue(reply['success'])
self.assertEqual(reply['body'], "foo")
self.assert_http_request('https://www.example.com', method='GET')
context_factory = self.get_context_factory()
self.assertEqual(context_factory.ssl_method, None)
if HttpClientPolicyForHTTPS is None:
self.assertIsInstance(context_factory, HttpClientContextFactory)
self.assertEqual(context_factory.verify_options, None)
else:
self.assertIsInstance(context_factory, HttpClientPolicyForHTTPS)
@inlineCallbacks
def test_https_request_verify_none(self):
self.http_request_succeed("foo")
reply = yield self.dispatch_command(
'get', url='https://www.example.com',
verify_options=['VERIFY_NONE'])
self.assertTrue(reply['success'])
self.assertEqual(reply['body'], "foo")
self.assert_http_request('https://www.example.com', method='GET')
context = self.get_context()
self.assertEqual(context.get_verify_mode(), VERIFY_NONE)
@inlineCallbacks
def test_https_request_verify_peer_or_fail(self):
# This test's behaviour depends on the version of Twisted being used.
self.http_request_succeed("foo")
reply = yield self.dispatch_command(
'get', url='https://www.example.com',
verify_options=['VERIFY_PEER', 'VERIFY_FAIL_IF_NO_PEER_CERT'])
self.assertTrue(reply['success'])
self.assertEqual(reply['body'], "foo")
self.assert_http_request('https://www.example.com', method='GET')
context = self.get_context()
# We don't control verify mode in newer Twisted.
self.assertNotEqual(context.get_verify_mode(), VERIFY_NONE)
if HttpClientPolicyForHTTPS is None:
self.assertEqual(
context.get_verify_mode(),
VERIFY_PEER | VERIFY_FAIL_IF_NO_PEER_CERT)
@inlineCallbacks
def test_handle_post_files(self):
self.http_request_succeed('')
reply = yield self.dispatch_command(
'post', url='https://www.example.com', files={
'foo': {
'file_name': 'foo.json',
'content_type': 'application/json',
'data': base64.b64encode(json.dumps({'foo': 'bar'})),
}
})
self.assertTrue(reply['success'])
self.assert_http_request(
'https://www.example.com', method='POST', files={
'foo': ('foo.json', 'application/json',
json.dumps({'foo': 'bar'})),
})
@inlineCallbacks
def test_data_limit_exceeded_using_head_method(self):
self.http_request_succeed('', headers={
'Content-Length': str(self.resource.DEFAULT_DATA_LIMIT + 1),
})
reply = yield self.dispatch_command(
'head', url='https://www.example.com',)
self.assertTrue(reply['success'])
self.assertEqual(reply['body'], "")
self.assert_http_request('https://www.example.com', method='HEAD')
@inlineCallbacks
def test_data_limit_exceeded_using_header(self):
self.http_request_succeed('', headers={
'Content-Length': str(self.resource.DEFAULT_DATA_LIMIT + 1),
})
reply = yield self.dispatch_command(
'get', url='https://www.example.com',)
self.assertFalse(reply['success'])
self.assertEqual(
reply['reason'],
'Received %d bytes, maximum of %s bytes allowed.' % (
self.resource.DEFAULT_DATA_LIMIT + 1,
self.resource.DEFAULT_DATA_LIMIT,))
@inlineCallbacks
def test_data_limit_exceeded_inferred_from_body(self):
self.http_request_succeed('1' * (self.resource.DEFAULT_DATA_LIMIT + 1))
reply = yield self.dispatch_command(
'get', url='https://www.example.com',)
self.assertFalse(reply['success'])
self.assertEqual(
reply['reason'],
'Received %d bytes, maximum of %s bytes allowed.' % (
self.resource.DEFAULT_DATA_LIMIT + 1,
self.resource.DEFAULT_DATA_LIMIT,))
@inlineCallbacks
def test_https_request_method_default(self):
self.http_request_succeed("foo")
reply = yield self.dispatch_command(
'get', url='https://www.example.com')
self.assertTrue(reply['success'])
self.assertEqual(reply['body'], "foo")
self.assert_http_request('https://www.example.com', method='GET')
context_factory = self.get_context_factory()
self.assertEqual(context_factory.ssl_method, None)
@inlineCallbacks
def test_https_request_method_SSLv3(self):
self.http_request_succeed("foo")
reply = yield self.dispatch_command(
'get', url='https://www.example.com', ssl_method='SSLv3')
self.assertTrue(reply['success'])
self.assertEqual(reply['body'], "foo")
self.assert_http_request('https://www.example.com', method='GET')
context_factory = self.get_context_factory()
self.assertEqual(context_factory.ssl_method, SSLv3_METHOD)
@inlineCallbacks
def test_https_request_method_SSLv23(self):
self.http_request_succeed("foo")
reply = yield self.dispatch_command(
'get', url='https://www.example.com', ssl_method='SSLv23')
self.assertTrue(reply['success'])
self.assertEqual(reply['body'], "foo")
self.assert_http_request('https://www.example.com', method='GET')
context_factory = self.get_context_factory()
self.assertEqual(context_factory.ssl_method, SSLv23_METHOD)
@inlineCallbacks
def test_https_request_method_TLSv1(self):
self.http_request_succeed("foo")
reply = yield self.dispatch_command(
'get', url='https://www.example.com', ssl_method='TLSv1')
self.assertTrue(reply['success'])
self.assertEqual(reply['body'], "foo")
self.assert_http_request('https://www.example.com', method='GET')
context_factory = self.get_context_factory()
self.assertEqual(context_factory.ssl_method, TLSv1_METHOD)
| 42.138636
| 79
| 0.644356
| 2,092
| 18,541
| 5.476577
| 0.118069
| 0.085537
| 0.038579
| 0.031422
| 0.680981
| 0.626167
| 0.607052
| 0.585668
| 0.552675
| 0.541067
| 0
| 0.004762
| 0.252521
| 18,541
| 439
| 80
| 42.234624
| 0.821921
| 0.073782
| 0
| 0.514368
| 0
| 0
| 0.102887
| 0.001575
| 0
| 0
| 0
| 0
| 0.278736
| 1
| 0.112069
| false
| 0
| 0.020115
| 0.002874
| 0.16954
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e95a4fa6b39694c0762d544398c6a91dc4eb000f
| 722
|
py
|
Python
|
soundDB/__init__.py
|
gjoseph92/soundDB2
|
4d9cc93cc596a5089233f17b0b8be252f73e1224
|
[
"CC0-1.0"
] | 3
|
2017-05-16T19:37:32.000Z
|
2020-03-29T21:54:33.000Z
|
soundDB/__init__.py
|
gjoseph92/soundDB2
|
4d9cc93cc596a5089233f17b0b8be252f73e1224
|
[
"CC0-1.0"
] | 19
|
2016-12-02T20:47:24.000Z
|
2021-10-05T19:01:01.000Z
|
soundDB/__init__.py
|
gjoseph92/soundDB2
|
4d9cc93cc596a5089233f17b0b8be252f73e1224
|
[
"CC0-1.0"
] | 2
|
2017-05-10T23:01:06.000Z
|
2019-12-27T19:49:29.000Z
|
from .accessor import Accessor
from . import parsers
import inspect
def populateAccessors():
"""
Find all filetype-specific Accessor subclasses in the parsers file (i.e. NVSPL, SRCID, etc.) and instantiate them.
This way, one instance of each Accessor is added to the soundDB namespace under the name of the Endpoint it uses.
"""
predicate = lambda obj: inspect.isclass(obj) and issubclass(obj, Accessor) and obj is not Accessor
specificAccessorSubclasses = inspect.getmembers(parsers, predicate)
accessors = { cls.endpointName: cls for name, cls in specificAccessorSubclasses }
return accessors
globals().update(populateAccessors())
del inspect, accessor, parsers, populateAccessors
| 34.380952
| 118
| 0.756233
| 89
| 722
| 6.134831
| 0.617978
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.17313
| 722
| 20
| 119
| 36.1
| 0.914573
| 0.315789
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.1
| false
| 0
| 0.3
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e95c5e6fc88c9d5b12bafc54c0d0afb1690c36cf
| 556
|
py
|
Python
|
tests/testLoadMapFromString.py
|
skowronskij/OGCServer
|
3fd11438180944ffa43e315c6390e89437a28f4e
|
[
"BSD-3-Clause"
] | 90
|
2015-04-30T22:13:14.000Z
|
2022-02-16T17:30:11.000Z
|
tests/testLoadMapFromString.py
|
skowronskij/OGCServer
|
3fd11438180944ffa43e315c6390e89437a28f4e
|
[
"BSD-3-Clause"
] | 6
|
2019-09-09T06:07:27.000Z
|
2020-06-17T09:52:49.000Z
|
tests/testLoadMapFromString.py
|
skowronskij/OGCServer
|
3fd11438180944ffa43e315c6390e89437a28f4e
|
[
"BSD-3-Clause"
] | 28
|
2015-05-12T09:08:17.000Z
|
2021-07-02T11:53:29.000Z
|
import nose
import os
from ogcserver.WMS import BaseWMSFactory
def test_wms_capabilities():
base_path, tail = os.path.split(__file__)
file_path = os.path.join(base_path, 'mapfile_encoding.xml')
wms = BaseWMSFactory()
with open(file_path) as f:
settings = f.read()
wms.loadXML(xmlstring=settings, basepath=base_path)
wms.finalize()
if len(wms.layers) != 1:
raise Exception('Incorrect number of layers')
if len(wms.styles) != 1:
raise Exception('Incorrect number of styles')
return True
| 27.8
| 63
| 0.676259
| 74
| 556
| 4.918919
| 0.554054
| 0.065934
| 0.043956
| 0.131868
| 0.175824
| 0.175824
| 0
| 0
| 0
| 0
| 0
| 0.004608
| 0.219424
| 556
| 19
| 64
| 29.263158
| 0.834101
| 0
| 0
| 0
| 0
| 0
| 0.129496
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.0625
| false
| 0
| 0.1875
| 0
| 0.3125
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e95f809c079ce79cbabf21b0bd9fca926c8f6149
| 864
|
py
|
Python
|
setup.py
|
mikemalinowski/insomnia
|
ea637e5eba608eacd1731239f7ddf6bb91aacc9e
|
[
"MIT"
] | 2
|
2019-02-28T09:58:55.000Z
|
2020-03-06T05:03:34.000Z
|
setup.py
|
mikemalinowski/insomnia
|
ea637e5eba608eacd1731239f7ddf6bb91aacc9e
|
[
"MIT"
] | null | null | null |
setup.py
|
mikemalinowski/insomnia
|
ea637e5eba608eacd1731239f7ddf6bb91aacc9e
|
[
"MIT"
] | null | null | null |
import setuptools
try:
with open('README.md', 'r') as fh:
long_description = fh.read()
except:
long_description = ''
setuptools.setup(
name='blackout',
version='1.0.4',
author='Mike Malinowski',
author_email='mike@twisted.space',
description='A python package making it easy to drop a multi-module package from sys.modules',
long_description=long_description,
long_description_content_type='text/markdown',
url='https://github.com/mikemalinowski/blackout',
packages=setuptools.find_packages(),
entry_points="""
[console_scripts]
blackout = blackout:blackout
""",
py_modules=["blackout"],
classifiers=[
'Programming Language :: Python',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
],
)
| 28.8
| 99
| 0.635417
| 92
| 864
| 5.836957
| 0.728261
| 0.139665
| 0.070764
| 0.111732
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.004566
| 0.239583
| 864
| 29
| 100
| 29.793103
| 0.812785
| 0
| 0
| 0
| 0
| 0
| 0.440719
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.037037
| 0
| 0.037037
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e96093e48bfaf833c59e3c55fbafb9b3d90f3407
| 710
|
py
|
Python
|
src/hypermd/html/html.py
|
Riib11/HyperMD
|
d6921b701635356236b00d0a8794ab68d733ad59
|
[
"MIT"
] | null | null | null |
src/hypermd/html/html.py
|
Riib11/HyperMD
|
d6921b701635356236b00d0a8794ab68d733ad59
|
[
"MIT"
] | null | null | null |
src/hypermd/html/html.py
|
Riib11/HyperMD
|
d6921b701635356236b00d0a8794ab68d733ad59
|
[
"MIT"
] | null | null | null |
class Element:
def __init__(self, name, single):
self.name = name
self.single = single
self.attrs = {}
self.content = ""
def set_attr(self, k, v): self.attrs[k] = v
def get_attr(self, v): return self.attrs[k]
def tohtml(self):
attrs = (" " + " ".join([ "%s=\"%s\"" % (k,v)
for k,v in self.attrs.items() ])
if len(self.attrs) > 0
else "")
if self.single:
s = "<%s%s>" % (self.name, attrs)
return s
else:
s = "<%s%s>" % (self.name, attrs)
s += self.content
s += "</%s>" % self.name
return s
__str__ = tohtml; __repr__ = tohtml
| 29.583333
| 53
| 0.453521
| 89
| 710
| 3.460674
| 0.303371
| 0.175325
| 0.058442
| 0.097403
| 0.103896
| 0.103896
| 0
| 0
| 0
| 0
| 0
| 0.002278
| 0.38169
| 710
| 24
| 54
| 29.583333
| 0.699317
| 0
| 0
| 0.181818
| 0
| 0
| 0.032349
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.181818
| false
| 0
| 0
| 0.045455
| 0.409091
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e960b0fabb4246bd94bb826b4cf1e4c34f2696b5
| 2,590
|
py
|
Python
|
vk_music/__main__.py
|
w1r2p1/vk_music
|
066fa623f87a6351846011c477cff2aad2943bc5
|
[
"MIT"
] | 7
|
2015-01-26T08:46:12.000Z
|
2020-08-29T13:07:07.000Z
|
vk_music/__main__.py
|
w1r2p1/vk_music
|
066fa623f87a6351846011c477cff2aad2943bc5
|
[
"MIT"
] | 3
|
2015-04-29T20:34:53.000Z
|
2015-07-08T08:43:47.000Z
|
vk_music/__main__.py
|
sashasimkin/vk_music
|
3814909ffd914103e80734e51b01dddb458b1bfe
|
[
"MIT"
] | 4
|
2016-04-24T14:09:48.000Z
|
2019-11-23T14:50:46.000Z
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import print_function
import os
import argparse
from subprocess import call
from .vk_music import VkMusic
from .exceptions import AlreadyRunningError
from .defaults import SafeFsStorage
def main():
parser = argparse.ArgumentParser()
parser.add_argument('dir', type=str, nargs='?', help="Directory for synchronization")
parser.add_argument("-uid", type=int, default=60411837, help="Vk user id") # Default is my VK id :-)
parser.add_argument("-client_id", type=int, default=2970439, help="Application id") # Application ID from VK
parser.add_argument("--threads", "-t", type=int, default=2, help="Number of threads to use")
parser.add_argument("-token", type=str, help="access token to use")
parser.add_argument("-token_dir", type=str, help="Directory where script will save token and temp data")
parser.add_argument("-f", dest='force', default=False, action='store_true', help="Ignore already running error")
parser.add_argument("-from", type=int, default=0, help="Start downloading from position")
parser.add_argument("-to", type=int, help="End downloading on position")
parser.add_argument("-redirect_url", type=str, help="Redirect url after getting token")
args = vars(parser.parse_args())
# Don't let not passed arguments to be
for k, v in args.items():
if v is None:
del args[k]
workdir = args.get('dir', '').decode('utf-8') or os.getcwd() + '/Music'
try:
# Try to create directory if not exists
if not os.path.isdir(workdir):
os.makedirs(workdir)
# Need write access to that dir
os.chmod(workdir, 0o755)
if not os.access(workdir, os.W_OK):
raise Exception('Permission denied for dir %s' % workdir)
except Exception as e:
exit("Problem with directory '%s': %s" % (workdir, e))
storage = SafeFsStorage(workdir)
try:
with VkMusic(storage, **args) as manager:
# Start working
result = manager.synchronize()
try:
call(['notify-send',
'Vk Music',
'Saved: %(saved)s\n'
'Skipped: %(skipped)s\n'
'Removed: %(removed)s\n'
'Not removed: %(not_removed)s' % result])
except Exception:
pass
except AlreadyRunningError:
# If is running - terminate
print('Other sync process is running. Please wait')
if __name__ == '__main__':
main()
| 39.846154
| 116
| 0.622008
| 329
| 2,590
| 4.802432
| 0.449848
| 0.056962
| 0.107595
| 0.017722
| 0.034177
| 0.034177
| 0
| 0
| 0
| 0
| 0
| 0.011886
| 0.252896
| 2,590
| 64
| 117
| 40.46875
| 0.804651
| 0.088803
| 0
| 0.061224
| 0
| 0
| 0.247129
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.020408
| false
| 0.020408
| 0.142857
| 0
| 0.163265
| 0.040816
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e962ef78829cd251169298d5da18fd8a33cb94ba
| 950
|
py
|
Python
|
misc/convert.py
|
Fusion-Goettingen/ExtendedTargetTrackingToolbox
|
945ede661e9258a8f1ca8abc00e25727fedf3ac7
|
[
"MIT"
] | 40
|
2018-07-30T13:07:23.000Z
|
2021-08-30T05:53:29.000Z
|
misc/convert.py
|
GitRooky/ExtendedTargetTrackingToolbox
|
945ede661e9258a8f1ca8abc00e25727fedf3ac7
|
[
"MIT"
] | null | null | null |
misc/convert.py
|
GitRooky/ExtendedTargetTrackingToolbox
|
945ede661e9258a8f1ca8abc00e25727fedf3ac7
|
[
"MIT"
] | 21
|
2018-10-03T11:50:00.000Z
|
2022-01-11T06:41:24.000Z
|
__author__ = "Jens Honer"
__copyright__ = "Copyright 2018, Jens Honer Tracking Toolbox"
__email__ = "-"
__license__ = "mit"
__version__ = "1.0"
__status__ = "Prototype"
import numpy as np
_bbox_sign_factors = np.asarray(
[
[1.0, 1.0],
[0.0, 1.0],
[-1.0, 1.0],
[-1.0, 0.0],
[-1.0, -1.0],
[0.0, -1.0],
[1.0, -1.0],
[1.0, 0.0],
], dtype='f4')
def convert_rectangle_to_eight_point(bboxes):
pt_set = np.zeros((len(bboxes), 8, 2))
pt_set[:] = bboxes['center_xy'][:, None, :]
for i, bbox in enumerate(bboxes):
s_phi_offset, c_phi_offset = np.sin(bbox['orientation']), np.cos(bbox['orientation'])
rot = np.array([[c_phi_offset, - s_phi_offset], [s_phi_offset, c_phi_offset]])
offset_xy = np.dot(_bbox_sign_factors * 0.5 * bbox['dimension'], rot.T)
pt_set[i, :, :] += offset_xy
return pt_set
| 27.142857
| 93
| 0.548421
| 138
| 950
| 3.391304
| 0.427536
| 0.055556
| 0.070513
| 0.068376
| 0.194444
| 0.153846
| 0.068376
| 0.068376
| 0.068376
| 0.068376
| 0
| 0.062409
| 0.274737
| 950
| 34
| 94
| 27.941176
| 0.616836
| 0
| 0
| 0
| 0
| 0
| 0.116842
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.037037
| false
| 0
| 0.037037
| 0
| 0.111111
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e9667bd424694f5af16378d0dfcd7bc9fa58a7a6
| 3,356
|
py
|
Python
|
src/base/local_dataset.py
|
wenyushi451/Deep-SAD-PyTorch
|
168d31f538a50fb029739206994ea5517d907853
|
[
"MIT"
] | null | null | null |
src/base/local_dataset.py
|
wenyushi451/Deep-SAD-PyTorch
|
168d31f538a50fb029739206994ea5517d907853
|
[
"MIT"
] | null | null | null |
src/base/local_dataset.py
|
wenyushi451/Deep-SAD-PyTorch
|
168d31f538a50fb029739206994ea5517d907853
|
[
"MIT"
] | null | null | null |
from torch.utils.data import Dataset
from torchvision.transforms import transforms
from sklearn.model_selection import train_test_split
import os
import glob
import torch
import numpy as np
from PIL import Image
import pdb
class LocalDataset(Dataset):
def __init__(
self,
root: str,
dataset_name: str,
target_transform,
train=True,
random_state=None,
split=True,
random_effect=True,
):
super(Dataset, self).__init__()
self.target_transform = target_transform
self.classes = [0, 1]
self.root = root
self.train = train # training set or test set
# self.dataset_path = os.path.join(self.root, self.dataset_name)
# class_idx/image
X = np.array(glob.glob(os.path.join(self.root, "*/*.[jp][pn][g]")))
y = [int(i.split("/")[-2]) for i in X]
y = np.array(y)
if split:
idx_norm = y == 0
idx_out = y != 0
# 80% data for training and 20% for testing; keep outlier ratio
# pdb.set_trace()
X_train_norm, X_test_norm, y_train_norm, y_test_norm = train_test_split(
X[idx_norm], y[idx_norm], test_size=0.1, random_state=random_state, stratify=y[idx_norm]
)
X_train_out, X_test_out, y_train_out, y_test_out = train_test_split(
X[idx_out], y[idx_out], test_size=0.1, random_state=random_state, stratify=y[idx_out]
)
X_train = np.concatenate((X_train_norm, X_train_out))
X_test = np.concatenate((X_test_norm, X_test_out))
y_train = np.concatenate((y_train_norm, y_train_out))
y_test = np.concatenate((y_test_norm, y_test_out))
if self.train:
self.data = X_train
self.targets = torch.tensor(y_train, dtype=torch.int64)
else:
self.data = X_test
self.targets = torch.tensor(y_test, dtype=torch.int64)
else:
self.data = X
self.targets = torch.tensor(y, dtype=torch.int64)
self.semi_targets = torch.zeros_like(self.targets)
# for training we will add brightness variance
if random_effect:
self.transform = transforms.Compose(
[
# transforms.ColorJitter(
# brightness=0.5 + int(np.random.rand(1)), contrast=0.5 + int(np.random.rand(1))
# ),
# saturation=0.5 + int(np.random.rand(1)),
# hue=0.5 + int(np.random.rand(1))),
transforms.Resize((224, 224)),
transforms.ToTensor(),
]
)
# for testing
else:
self.transform = transforms.Compose([transforms.Resize((224, 224)), transforms.ToTensor()])
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (sample, target, semi_target, index)
"""
data = Image.open(self.data[index])
data = self.transform(data)
sample, target, semi_target = data, 0 if self.targets[index] == 0 else 1, int(self.semi_targets[index])
return sample, target, semi_target, index
def __len__(self):
return len(self.data)
| 35.326316
| 111
| 0.56615
| 425
| 3,356
| 4.249412
| 0.244706
| 0.016611
| 0.011074
| 0.015504
| 0.355482
| 0.183832
| 0.119601
| 0.048726
| 0.048726
| 0.048726
| 0
| 0.020318
| 0.325387
| 3,356
| 94
| 112
| 35.702128
| 0.777385
| 0.152563
| 0
| 0.044776
| 0
| 0
| 0.005745
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.044776
| false
| 0
| 0.134328
| 0.014925
| 0.223881
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e96b4f43c95a1b4ce5857c21e88b3785232408aa
| 9,142
|
py
|
Python
|
main.py
|
Lmy0217/Flight
|
faf5045712c4d28e0ca3df408308a5e3b9bf8038
|
[
"MIT"
] | 2
|
2019-03-31T01:42:29.000Z
|
2019-05-16T06:31:50.000Z
|
main.py
|
Lmy0217/Flight
|
faf5045712c4d28e0ca3df408308a5e3b9bf8038
|
[
"MIT"
] | 1
|
2019-03-31T01:45:25.000Z
|
2019-04-17T05:46:35.000Z
|
main.py
|
Lmy0217/Flight
|
faf5045712c4d28e0ca3df408308a5e3b9bf8038
|
[
"MIT"
] | 1
|
2019-03-31T01:42:34.000Z
|
2019-03-31T01:42:34.000Z
|
#coding=utf-8
import tkinter as tk
from tkinter import ttk
from tkinter import scrolledtext
from tkinter import messagebox as mBox
from tkinter import filedialog
import matplotlib
matplotlib.use('TkAgg')
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
import matplotlib.pyplot as plt
import datetime
import threading
import flight
import outlier
import analytics
# 标题
win = tk.Tk()
win.title("机票数据爬取分析预测")
win.resizable(0, 0)
# 三个页面
tabControl = ttk.Notebook(win)
tab1 = ttk.Frame(tabControl)
tabControl.add(tab1, text='爬取')
tab2 = ttk.Frame(tabControl)
tabControl.add(tab2, text='分析')
tab3 = ttk.Frame(tabControl)
tabControl.add(tab3, text='预测')
tabControl.pack(expand=1, fill="both")
# 参数框
monty = ttk.LabelFrame(tab1, text='')
monty.grid(column=0, row=0, padx=8, pady=4)
labelsFrame = ttk.LabelFrame(monty, text=' 参数 ')
labelsFrame.grid(column=0, row=0)
# 城市标签
ttk.Label(labelsFrame, text="城市:").grid(column=0, row=0, sticky='W')
# 城市输入框
city = tk.Text(labelsFrame, width=20, height=10)
city.insert(tk.END, "'SHA', 'SIA', 'BJS', 'CAN', 'SZX', 'CTU', 'HGH', 'WUH', 'CKG', 'TAO', 'CSX', 'NKG', 'XMN', 'KMG', 'DLC', 'TSN', 'CGO', 'SYX', 'TNA', 'FOC'")
city.grid(column=1, row=0, sticky='W')
# 起始日期标签
ttk.Label(labelsFrame, text="起始日期:").grid(column=0, row=1, sticky='W')
# 起始日期输入框
date1 = tk.StringVar()
da_days = datetime.datetime.now() + datetime.timedelta(days=1)
date1.set(da_days.strftime('%Y-%m-%d'))
date1Entered = ttk.Entry(labelsFrame, textvariable=date1)
date1Entered.grid(column=1, row=1, sticky='W')
# 截止日期标签
ttk.Label(labelsFrame, text="截止日期:").grid(column=0, row=2, sticky='W')
# 截止日期输入框
date2 = tk.StringVar()
da_days2 = datetime.datetime.now() + datetime.timedelta(days=1)
date2.set(da_days2.strftime('%Y-%m-%d'))
date2Entered = ttk.Entry(labelsFrame, textvariable=date2)
date2Entered.grid(column=1, row=2, sticky='W')
# Log框
scrolW = 91;
scrolH = 37;
scr = scrolledtext.ScrolledText(monty, width=scrolW, height=scrolH, wrap=tk.WORD)
scr.grid(column=3, row=0, sticky='WE', rowspan=5)
# 爬取数据
def spider_flight():
spider_flight.flight = flight.spider(city.get("0.0", "end"), date1.get(), date2.get(), scr)
spider_flight.flight = None
def run_spider_flight():
scr.insert(tk.END, datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') + '\n爬取数据:\n城市:'
+ str(city.get("0.0", "end")) + '\n日期:' + str(date1.get()) + ' 至 ' + str(date2.get()) + '\n\n')
t = threading.Thread(target=spider_flight)
t.start()
# 爬取标签
spider = ttk.Button(labelsFrame, text="爬取", width=10, command=run_spider_flight)
spider.grid(column=0, row=4, sticky='W')
# 保存文件
def save_file():
if spider_flight.flight is not None:
fname = tk.filedialog.asksaveasfilename(filetypes=[("JSON", ".json")], defaultextension='.json')
if fname is not '':
spider_flight.flight.save(fname)
scr.insert(tk.END, datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') + '\n数据保存到 ' + fname + '\n\n')
else:
mBox.showwarning('Python Message Warning Box', '请先爬取数据!')
# 保存标签
save = ttk.Button(labelsFrame, text="保存", width=10, command=save_file)
save.grid(column=1, row=4, sticky='E')
for child in labelsFrame.winfo_children():
child.grid_configure(padx=8, pady=4)
for child in monty.winfo_children():
child.grid_configure(padx=3, pady=1)
# 参数框
monty2 = ttk.LabelFrame(tab2, text='')
monty2.grid(column=0, row=0, padx=8, pady=4)
labelsFrame2 = ttk.LabelFrame(monty2, text=' 参数 ')
labelsFrame2.grid(column=0, row=0)
# Log框
scrolW = 34;
scrolH = 25;
scr2 = scrolledtext.ScrolledText(monty2, width=scrolW, height=scrolH, wrap=tk.WORD)
scr2.grid(column=0, row=3, sticky='WE')
# 数据标签
ttk.Label(labelsFrame2, text="数据:").grid(column=0, row=0, sticky='W')
# 打开文件
def data_file():
fname = tk.filedialog.askopenfilename(filetypes=[("JSON", ".json")], defaultextension='.json')
if fname is not '':
data_file.outlier = outlier.Outlier(fname)
scr2.insert(tk.END, datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') + '\n打开文件 ' + fname + '\n\n')
data_file.outlier = None
# 打开文件按钮
data = ttk.Button(labelsFrame2, text="打开文件", width=10, command=data_file)
data.grid(column=1, row=0, sticky='E')
# 异常数标签
ttk.Label(labelsFrame2, text="异常数:").grid(column=0, row=1, sticky='W')
# 异常数输入框
diff = tk.IntVar()
diff.set(5)
diffEntered = ttk.Entry(labelsFrame2, textvariable=diff)
diffEntered.grid(column=1, row=1, sticky='W')
# 图框
def drawdiff():
try:
num_diff = int(diffEntered.get())
except:
num_diff = 5
diffEntered.delete(0, tk.END)
diffEntered.insert(0, 5)
drawdiff.f.clf()
drawdiff.out = data_file.outlier.extreme(drawdiff.f, scr2, num_diff)
drawdiff.canvas.show()
drawdiff.out = None
drawdiff.f = plt.figure()
drawdiff.canvas = FigureCanvasTkAgg(drawdiff.f, master=monty2)
drawdiff.canvas.show()
drawdiff.canvas.get_tk_widget().grid(column=1, row=0, rowspan=4)
def run_drawdiff():
if data_file.outlier is not None:
scr2.insert(tk.END, datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') + '\n分析数据(设定 '
+ str(diffEntered.get()) + ' 个异常值)...\n\n异常值:\n')
t = threading.Thread(target=drawdiff)
t.start()
else:
mBox.showwarning('Python Message Warning Box', '请先打开文件!')
# 分析按钮
da = ttk.Button(labelsFrame2, text="分析", width=10, command=run_drawdiff)
da.grid(column=0, row=2, sticky='W')
# 保存文件
def save_file2():
if drawdiff.out is not None:
fname = tk.filedialog.asksaveasfilename(filetypes=[("JSON", ".json")], defaultextension='.json')
if fname is not '':
with open(fname, 'w') as f1:
f1.write(str(drawdiff.out))
scr2.insert(tk.END, datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') + '\n异常值保存到 ' + fname + '\n\n')
else:
mBox.showwarning('Python Message Warning Box', '请先分析数据!')
# 保存按钮
save2 = ttk.Button(labelsFrame2, text="保存", width=10, command=save_file2)
save2.grid(column=1, row=2, sticky='E')
for child in labelsFrame2.winfo_children():
child.grid_configure(padx=8, pady=4)
for child in monty2.winfo_children():
child.grid_configure(padx=8, pady=4)
# 参数框
monty3 = ttk.LabelFrame(tab3, text='')
monty3.grid(column=0, row=0, padx=8, pady=4)
labelsFrame3 = ttk.LabelFrame(monty3, text=' 参数 ')
labelsFrame3.grid(column=0, row=0)
# Log框
scrolW = 34;
scrolH = 25;
scr3 = scrolledtext.ScrolledText(monty3, width=scrolW, height=scrolH, wrap=tk.WORD)
scr3.grid(column=0, row=3, sticky='WE')
# 数据标签
ttk.Label(labelsFrame3, text="数据:").grid(column=0, row=0, sticky='W')
# 打开文件
def data_file2():
fname = tk.filedialog.askopenfilename(filetypes=[("JSON", ".json")], defaultextension='.json')
if fname is not '':
data_file2.analytics = analytics.Analytics(fname)
scr3.insert(tk.END, datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') + '\n打开文件 ' + fname + '\n\n')
data_file2.analytics = None
# 打开文件按钮
data2 = ttk.Button(labelsFrame3, text="打开文件", width=10, command=data_file2)
data2.grid(column=1, row=0, sticky='E')
# 预测天数标签
ttk.Label(labelsFrame3, text="预测天数:").grid(column=0, row=1, sticky='W')
# 预测天数输入框
days = tk.IntVar()
days.set(30)
daysEntered = ttk.Entry(labelsFrame3, textvariable=days)
daysEntered.grid(column=1, row=1, sticky='W')
# 图框
def drawpredict():
try:
num_day = int(daysEntered.get())
except:
num_day = 30
daysEntered.delete(0, tk.END)
daysEntered.insert(0, 30)
# 清空图像,以使得前后两次绘制的图像不会重叠
drawpredict.f.clf()
drawpredict.out = data_file2.analytics.predict(num_day, scr3)
drawpredict.canvas.show()
drawpredict.out = None
drawpredict.f = plt.figure()
drawpredict.canvas = FigureCanvasTkAgg(drawpredict.f, master=monty3)
drawpredict.canvas.show()
drawpredict.canvas.get_tk_widget().grid(column=1, row=0, rowspan=4)
def run_drawpredict():
if data_file2.analytics is not None:
scr3.insert(tk.END, datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') + '\n分析数据(设定预测 '
+ str(daysEntered.get()) + ' 天)...\n\n训练过程:\n轮次/总轮次 [损失]\n')
t = threading.Thread(target=drawpredict)
t.start()
else:
mBox.showwarning('Python Message Warning Box', '请先打开文件!')
# 预测按钮
pr = ttk.Button(labelsFrame3, text="预测", width=10, command=run_drawpredict)
pr.grid(column=0, row=2, sticky='W')
# 保存文件
def save_file3():
if drawpredict.out is not None:
fname = tk.filedialog.asksaveasfilename(filetypes=[("JSON", ".json")], defaultextension='.json')
with open(fname, 'w') as f1: # 打开文件
f1.write(str(drawpredict.out))
scr3.insert(tk.END, datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') + '\n训练过程和预测结果保存到 ' + fname + '\n\n')
else:
mBox.showwarning('Python Message Warning Box', '请先预测数据!')
# 保存按钮
save = ttk.Button(labelsFrame3, text="保存", width=10, command=save_file3)
save.grid(column=1, row=2, sticky='E')
for child in labelsFrame3.winfo_children():
child.grid_configure(padx=8, pady=4)
for child in monty3.winfo_children():
child.grid_configure(padx=8, pady=4)
if __name__ == "__main__":
win.mainloop()
| 27.371257
| 161
| 0.669438
| 1,311
| 9,142
| 4.617086
| 0.194508
| 0.051214
| 0.032711
| 0.041632
| 0.45845
| 0.415827
| 0.376673
| 0.317694
| 0.317694
| 0.282835
| 0
| 0.028774
| 0.152264
| 9,142
| 333
| 162
| 27.453453
| 0.752258
| 0.025487
| 0
| 0.191919
| 0
| 0.005051
| 0.092334
| 0.00248
| 0
| 0
| 0
| 0
| 0
| 1
| 0.055556
| false
| 0
| 0.065657
| 0
| 0.121212
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e96debb65a28b71e00c0a2a49cd0ca34ceacdd69
| 449
|
py
|
Python
|
api/compat.py
|
fancystats/api
|
298ae6d71fa37f649bbd61ad000767242f49a698
|
[
"MIT"
] | 1
|
2015-03-20T20:35:22.000Z
|
2015-03-20T20:35:22.000Z
|
api/compat.py
|
fancystats/api
|
298ae6d71fa37f649bbd61ad000767242f49a698
|
[
"MIT"
] | null | null | null |
api/compat.py
|
fancystats/api
|
298ae6d71fa37f649bbd61ad000767242f49a698
|
[
"MIT"
] | null | null | null |
"""
Python 2/3 Compatibility
========================
Not sure we need to support anything but Python 2.7 at this point , but copied
this module over from flask-peewee for the time being.
"""
import sys
PY2 = sys.version_info[0] == 2
if PY2:
text_type = unicode
string_types = (str, unicode)
unichr = unichr
reduce = reduce
else:
text_type = str
string_types = (str, )
unichr = chr
from functools import reduce
| 17.96
| 78
| 0.639198
| 64
| 449
| 4.40625
| 0.6875
| 0.049645
| 0.099291
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.023324
| 0.23608
| 449
| 24
| 79
| 18.708333
| 0.798834
| 0.4098
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.166667
| 0
| 0.166667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e97022aba46b50c4fc79f34b4e0641ec360d25a6
| 3,254
|
bzl
|
Python
|
infra-sk/karma_test/index.bzl
|
bodymovin/skia-buildbot
|
1570e4e48ecb330750264d4ae6a875b5e49a37fe
|
[
"BSD-3-Clause"
] | null | null | null |
infra-sk/karma_test/index.bzl
|
bodymovin/skia-buildbot
|
1570e4e48ecb330750264d4ae6a875b5e49a37fe
|
[
"BSD-3-Clause"
] | null | null | null |
infra-sk/karma_test/index.bzl
|
bodymovin/skia-buildbot
|
1570e4e48ecb330750264d4ae6a875b5e49a37fe
|
[
"BSD-3-Clause"
] | null | null | null |
"""This module defines the karma_test rule."""
load("@infra-sk_npm//@bazel/typescript:index.bzl", "ts_library")
load("@infra-sk_npm//@bazel/rollup:index.bzl", "rollup_bundle")
load("@infra-sk_npm//karma:index.bzl", _generated_karma_test = "karma_test")
def karma_test(name, srcs, deps, entry_point = None):
"""Runs unit tests in a browser with Karma and the Mocha test runner.
When executed with `bazel test`, a headless Chrome browser will be used. This supports testing
multiple karma_test targets in parallel, and works on RBE.
When executed with `bazel run`, it prints out a URL to stdout that can be opened in the browser,
e.g. to debug the tests using the browser's developer tools. Source maps are generated.
When executed with `ibazel test`, the test runner never exits, and tests will be rerun every
time a source file is changed.
When executed with `ibazel run`, it will act the same way as `bazel run`, but the tests will be
rebuilt automatically when a source file changes. Reload the browser page to see the changes.
Args:
name: The name of the target.
srcs: The *.ts test files.
deps: The ts_library dependencies for the source files.
entry_point: File in srcs to be used as the entry point to generate the JS bundle executed by
the test runner. Optional if srcs contains only one file.
"""
if len(srcs) > 1 and not entry_point:
fail("An entry_point must be specified when srcs contains more than one file.")
if entry_point and entry_point not in srcs:
fail("The entry_point must be included in srcs.")
if len(srcs) == 1:
entry_point = srcs[0]
ts_library(
name = name + "_lib",
srcs = srcs,
deps = deps + [
# Add common test dependencies for convenience.
"@infra-sk_npm//@types/mocha",
"@infra-sk_npm//@types/chai",
"@infra-sk_npm//@types/sinon",
],
)
rollup_bundle(
name = name + "_bundle",
entry_point = entry_point,
deps = [
name + "_lib",
"@infra-sk_npm//@rollup/plugin-node-resolve",
"@infra-sk_npm//@rollup/plugin-commonjs",
"@infra-sk_npm//rollup-plugin-sourcemaps",
],
format = "umd",
config_file = "//infra-sk:rollup.config.js",
)
# This rule is automatically generated by rules_nodejs from Karma's package.json file.
_generated_karma_test(
name = name,
size = "large",
data = [
name + "_bundle",
"//infra-sk/karma_test:karma.conf.js",
"@infra-sk_npm//karma-chrome-launcher",
"@infra-sk_npm//karma-sinon",
"@infra-sk_npm//karma-mocha",
"@infra-sk_npm//karma-chai",
"@infra-sk_npm//karma-chai-dom",
"@infra-sk_npm//karma-spec-reporter",
"@infra-sk_npm//mocha",
],
templated_args = [
"start",
"$(execpath //infra-sk/karma_test:karma.conf.js)",
"$$(rlocation $(location %s_bundle))" % name,
],
tags = [
# Necessary for it to work with ibazel.
"ibazel_notify_changes",
],
)
| 36.977273
| 100
| 0.609711
| 440
| 3,254
| 4.388636
| 0.35
| 0.068876
| 0.082859
| 0.054376
| 0.101502
| 0.027965
| 0.027965
| 0
| 0
| 0
| 0
| 0.001279
| 0.279348
| 3,254
| 87
| 101
| 37.402299
| 0.822175
| 0.374309
| 0
| 0.092593
| 0
| 0
| 0.433231
| 0.307849
| 0
| 0
| 0
| 0
| 0
| 1
| 0.018519
| false
| 0
| 0
| 0
| 0.018519
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e970a8957b84490bbe0b79a62e25d6fddc55f490
| 5,894
|
py
|
Python
|
stats/ClassicAnalyzerStats.py
|
arndff/fpl-rivals-tracker
|
311b932ab7c07b03c1676e5a971df13e652a1b7b
|
[
"Apache-2.0"
] | 4
|
2019-02-06T10:42:50.000Z
|
2021-02-17T21:09:26.000Z
|
stats/ClassicAnalyzerStats.py
|
arndff/fpl-rivals-tracker
|
311b932ab7c07b03c1676e5a971df13e652a1b7b
|
[
"Apache-2.0"
] | null | null | null |
stats/ClassicAnalyzerStats.py
|
arndff/fpl-rivals-tracker
|
311b932ab7c07b03c1676e5a971df13e652a1b7b
|
[
"Apache-2.0"
] | 1
|
2021-02-17T21:09:27.000Z
|
2021-02-17T21:09:27.000Z
|
from fileutils.fileutils import save_output_to_file, select_option_from_menu
class ClassicAnalyzerStats:
def __init__(self, data, current_event, output_file_name):
self.__data = data
self.__current_event = current_event
self.__output_file_name = output_file_name
self.__output = []
self.__options = self.__init_options()
self.__append_options_to_output()
def save_stats_output_to_file(self):
save_output_to_file(self.__output_file_name, "a+", self.__output)
def stats_menu(self):
while True:
exception_msg = "\n[!] Please enter an integer from 1 to 10."
option = select_option_from_menu(self.__options, exception_msg)
self.__output.append("Selected option: {}".format(option))
if option == -1:
continue
if option == 1:
self.__calculate_average_points()
elif option == 2:
self.__print_captains((list(map(lambda x: x.captain_name, self.__data))))
elif option == 3:
self.__print_captains((list(map(lambda x: x.vice_captain_name, self.__data))))
elif option == 4:
self.__print_chip_usage_whole_season()
elif option == 5:
self.__print_chip_usage_current_event()
elif option == 6:
self.__count_managers_made_transfer()
elif option == 7:
self.__count_managers_took_hit()
elif option == 8:
self.__print_team_value(max)
elif option == 9:
self.__print_team_value(min)
elif option == 10:
self.__output.append("")
break
else:
print("\n[!] Invalid option. Try again!")
@staticmethod
def init_a_dict(key, dictionary):
if key not in dictionary:
dictionary[key] = 1
else:
dictionary[key] += 1
def print_chips(self, chips):
for chip in chips:
string = "{}({})".format(chip, chips[chip])
print(string, end=" ")
self.__output.append(string)
print()
self.__output.append("")
def __init_options(self):
options = ["\n* Please choose an option from 1 to 10:",
"1) Sample's average score",
"2) Most captained players",
"3) Most vice-captained players",
"4) Chips usage during the whole season",
"5) Chips usage during GW{}".format(self.__current_event),
"6) Count of managers made at least one transfer",
"7) Count of managers took at least one hit",
"8) Richest manager(s)",
"9) Poorest manager(s)",
"10) Exit"]
return options
def __calculate_average_points(self):
managers_count = len(self.__data)
total_points = 0
for manager in self.__data:
total_points += manager.gw_points()
total_points -= manager.gw_hits
average_points = total_points / managers_count
result = "{:.2f} points".format(average_points)
print(result)
self.__output.append(result)
self.__output.append("")
def __print_captains(self, list_of_captains):
captains = {}
for captain in list_of_captains:
self.init_a_dict(captain, captains)
captains_sorted = [(captain, captains[captain]) for captain in sorted(captains, key=captains.get, reverse=True)]
for key, value in captains_sorted:
captain = "{}({})".format(key, value)
print(captain, end=" ")
self.__output.append(captain)
print()
self.__output.append("")
def __print_chip_usage_whole_season(self):
chips = {}
for manager in self.__data:
for chip in manager.used_chips_by_gw:
self.init_a_dict(chip, chips)
self.print_chips(chips)
def __print_chip_usage_current_event(self):
active_chips = {}
for manager in self.__data:
active_chip = manager.active_chip
if active_chip != "None":
self.init_a_dict(active_chip, active_chips)
if len(active_chips) < 1:
result = "No manager has used any chip in GW{}".format(self.__current_event)
self.__log_string(result)
else:
self.print_chips(active_chips)
def __count_managers_made_transfer(self):
result = len(list(filter(lambda x: x.gw_transfers > 0, self.__data)))
if result == 1:
managers_count = "1 manager"
else:
managers_count = "{} managers".format(result)
self.__log_string(managers_count)
def __count_managers_took_hit(self):
result = len(list(filter(lambda x: x.gw_hits > 0, self.__data)))
managers_count = "{} managers".format(result)
self.__log_string(managers_count)
def __print_team_value(self, extremum):
team_values = list(map(lambda x: x.team_value, self.__data))
max_value = extremum(team_values)
richest_managers = list(filter(lambda x: x.team_value == max_value, self.__data))
richest_managers_names = (list(map(lambda x: x.manager_name, richest_managers)))
result = ", ".join(richest_managers_names)
result_string = "{} ({}M)".format(result, format(max_value, ".1f"))
self.__log_string(result_string)
def __append_options_to_output(self):
self.__output.append("")
[self.__output.append(option) for option in self.__options]
self.__output.append("")
def __log_string(self, string):
print(string)
self.__output.append(string)
self.__output.append("")
| 33.68
| 120
| 0.588904
| 685
| 5,894
| 4.670073
| 0.191241
| 0.053142
| 0.06502
| 0.017505
| 0.20944
| 0.11316
| 0.0794
| 0.0794
| 0.059394
| 0.038762
| 0
| 0.009575
| 0.308958
| 5,894
| 174
| 121
| 33.873563
| 0.775841
| 0
| 0
| 0.165414
| 0
| 0
| 0.090092
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.112782
| false
| 0
| 0.007519
| 0
| 0.135338
| 0.150376
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e9736a918f48d6f382688f91eb8391428a99f968
| 2,893
|
py
|
Python
|
sarpy/io/product/base.py
|
spacefan/sarpy
|
2791af86b568c8a8560275aee426a4718d5a4606
|
[
"MIT"
] | 119
|
2018-07-12T22:08:17.000Z
|
2022-03-24T12:11:39.000Z
|
sarpy/io/product/base.py
|
spacefan/sarpy
|
2791af86b568c8a8560275aee426a4718d5a4606
|
[
"MIT"
] | 72
|
2018-03-29T15:57:37.000Z
|
2022-03-10T01:46:21.000Z
|
sarpy/io/product/base.py
|
spacefan/sarpy
|
2791af86b568c8a8560275aee426a4718d5a4606
|
[
"MIT"
] | 54
|
2018-03-27T19:57:20.000Z
|
2022-03-09T20:53:11.000Z
|
"""
Base common features for product readers
"""
__classification__ = "UNCLASSIFIED"
__author__ = "Thomas McCullough"
from typing import Sequence, List, Tuple, Union
from sarpy.io.general.base import AbstractReader
from sarpy.io.product.sidd1_elements.SIDD import SIDDType as SIDDType1
from sarpy.io.product.sidd2_elements.SIDD import SIDDType as SIDDType2
from sarpy.io.complex.sicd_elements.SICD import SICDType
class SIDDTypeReader(AbstractReader):
def __init__(self, sidd_meta, sicd_meta):
"""
Parameters
----------
sidd_meta : None|SIDDType1|SIDDType2|Sequence[SIDDType1]|Sequence[SIDDType2]
The SIDD metadata object(s), if provided
sicd_meta : None|SICDType|Sequence[SICDType]
the SICD metadata object(s), if provided
"""
if sidd_meta is None:
self._sidd_meta = None
elif isinstance(sidd_meta, (SIDDType1, SIDDType2)):
self._sidd_meta = sidd_meta
else:
temp_list = [] # type: List[Union[SIDDType1]]
for el in sidd_meta:
if not isinstance(el, (SIDDType1, SIDDType2)):
raise TypeError(
'Got a collection for sidd_meta, and all elements are required '
'to be instances of SIDDType.')
temp_list.append(el)
self._sidd_meta = tuple(temp_list)
if sicd_meta is None:
self._sicd_meta = None
elif isinstance(sicd_meta, SICDType):
self._sicd_meta = (sicd_meta, )
else:
temp_list = [] # type: List[SICDType]
for el in sicd_meta:
if not isinstance(el, SICDType):
raise TypeError(
'Got a collection for sicd_meta, and all elements are required '
'to be instances of SICDType.')
temp_list.append(el)
self._sicd_meta = tuple(temp_list)
@property
def sidd_meta(self):
# type: () -> Union[None, SIDDType1, SIDDType2, Tuple[SIDDType1], Tuple[SIDDType2]]
"""
None|SIDDType1|SIDDType2|Tuple[SIDDType1]|Tuple[SIDDType2]: the sidd meta_data collection.
"""
return self._sidd_meta
@property
def sicd_meta(self):
# type: () -> Union[None, Tuple[SICDType]]
"""
None|Tuple[SICDType]: the sicd meta_data collection.
"""
return self._sicd_meta
def get_sidds_as_tuple(self):
"""
Get the sidd collection as a tuple - for simplicity and consistency of use.
Returns
-------
Tuple[SIDDType1]|Tuple[SIDDType2]
"""
if self._sidd_meta is None:
return None
elif isinstance(self._sidd_meta, tuple):
return self._sidd_meta
else:
return (self._sidd_meta, )
| 31.445652
| 98
| 0.59281
| 324
| 2,893
| 5.089506
| 0.243827
| 0.082474
| 0.065494
| 0.05094
| 0.351728
| 0.180716
| 0.114008
| 0.053366
| 0.053366
| 0.053366
| 0
| 0.011651
| 0.317663
| 2,893
| 91
| 99
| 31.791209
| 0.823708
| 0.248531
| 0
| 0.270833
| 0
| 0
| 0.104187
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.083333
| false
| 0
| 0.104167
| 0
| 0.3125
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|