hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
42b27e1114addb6efa22983ea1b8536333e5b90e
| 3,096
|
py
|
Python
|
datar/forcats/misc.py
|
stjordanis/datar
|
4e2b5db026ad35918954576badef9951928c0cb1
|
[
"MIT"
] | 110
|
2021-03-09T04:10:40.000Z
|
2022-03-13T10:28:20.000Z
|
datar/forcats/misc.py
|
sthagen/datar
|
1218a549e2f0547c7b5a824ca6d9adf1bf96ba46
|
[
"MIT"
] | 54
|
2021-06-20T18:53:44.000Z
|
2022-03-29T22:13:07.000Z
|
datar/forcats/misc.py
|
sthagen/datar
|
1218a549e2f0547c7b5a824ca6d9adf1bf96ba46
|
[
"MIT"
] | 11
|
2021-06-18T03:03:14.000Z
|
2022-02-25T11:48:26.000Z
|
"""Provides other helper functions for factors"""
from typing import Any, Iterable
import numpy
from pandas import Categorical, DataFrame
from pipda import register_verb
from pipda.utils import CallingEnvs
from ..core.types import ForcatsRegType, ForcatsType, is_null, is_scalar
from ..core.utils import Array
from ..core.contexts import Context
from ..core.defaults import f
from ..base import (
factor,
tabulate,
prop_table,
nlevels,
levels,
NA,
setdiff,
is_ordered,
)
from ..dplyr import arrange, desc, mutate
from .utils import check_factor
from .lvl_order import fct_inorder
@register_verb(ForcatsRegType, context=Context.EVAL)
def fct_count(_f: ForcatsType, sort: bool = False, prop=False) -> Categorical:
"""Count entries in a factor
Args:
_f: A factor
sort: If True, sort the result so that the most common values float to
the top
prop: If True, compute the fraction of marginal table.
Returns:
A data frame with columns `f`, `n` and `p`, if prop is True
"""
f2 = check_factor(_f)
n_na = sum(is_null(f2))
df = DataFrame(
{
"f": fct_inorder(
levels(f2, __calling_env=CallingEnvs.REGULAR),
__calling_env=CallingEnvs.REGULAR,
),
"n": tabulate(
f2,
nlevels(f2, __calling_env=CallingEnvs.REGULAR),
__calling_env=CallingEnvs.REGULAR,
),
}
)
if n_na > 0:
df = df.append({"f": NA, "n": n_na}, ignore_index=True)
if sort:
df = arrange(
df,
desc(f.n, __calling_env=CallingEnvs.PIPING),
__calling_env=CallingEnvs.REGULAR,
)
if prop:
df = mutate(
df,
p=prop_table(f.n, __calling_env=CallingEnvs.PIPING),
__calling_env=CallingEnvs.REGULAR,
)
return df
@register_verb(ForcatsRegType, context=Context.EVAL)
def fct_match(_f: ForcatsType, lvls: Any) -> Iterable[bool]:
"""Test for presence of levels in a factor
Do any of `lvls` occur in `_f`?
Args:
_f: A factor
lvls: A vector specifying levels to look for.
Returns:
A logical factor
"""
_f = check_factor(_f)
if is_scalar(lvls):
lvls = [lvls]
bad_lvls = setdiff(
lvls,
levels(_f, __calling_env=CallingEnvs.REGULAR),
__calling_env=CallingEnvs.REGULAR,
)
if len(bad_lvls) > 0:
bad_lvls = Array(bad_lvls)[~is_null(bad_lvls)]
if len(bad_lvls) > 0:
raise ValueError(f"Levels not present in factor: {bad_lvls}.")
return numpy.isin(_f, lvls)
@register_verb(ForcatsRegType)
def fct_unique(_f: ForcatsType) -> Categorical:
"""Unique values of a factor
Args:
_f: A factor
Returns:
The factor with the unique values in `_f`
"""
lvls = levels(_f, __calling_env=CallingEnvs.REGULAR)
is_ord = is_ordered(_f, __calling_env=CallingEnvs.REGULAR)
return factor(lvls, lvls, exclude=None, ordered=is_ord)
| 25.377049
| 78
| 0.622739
| 393
| 3,096
| 4.687023
| 0.300254
| 0.065147
| 0.136808
| 0.152009
| 0.295874
| 0.259501
| 0.238871
| 0.211726
| 0.157438
| 0.061889
| 0
| 0.0036
| 0.2823
| 3,096
| 121
| 79
| 25.586777
| 0.825383
| 0.194121
| 0
| 0.175676
| 0
| 0
| 0.018821
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.040541
| false
| 0
| 0.175676
| 0
| 0.256757
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
42b603082633608e2a31d1e0d368cdcfc8b30d98
| 6,585
|
py
|
Python
|
qucumber/utils/training_statistics.py
|
silky/QuCumber
|
f0dd8725b8dd3a0c94f10f1a3b88a769c63a567f
|
[
"ECL-2.0",
"Apache-2.0"
] | 1
|
2019-06-27T11:26:29.000Z
|
2019-06-27T11:26:29.000Z
|
qucumber/utils/training_statistics.py
|
silky/QuCumber
|
f0dd8725b8dd3a0c94f10f1a3b88a769c63a567f
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
qucumber/utils/training_statistics.py
|
silky/QuCumber
|
f0dd8725b8dd3a0c94f10f1a3b88a769c63a567f
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# Copyright 2018 PIQuIL - All Rights Reserved
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import torch
import qucumber.utils.cplx as cplx
import qucumber.utils.unitaries as unitaries
def fidelity(nn_state, target_psi, space, **kwargs):
r"""Calculates the square of the overlap (fidelity) between the reconstructed
wavefunction and the true wavefunction (both in the computational basis).
:param nn_state: The neural network state (i.e. complex wavefunction or
positive wavefunction).
:type nn_state: WaveFunction
:param target_psi: The true wavefunction of the system.
:type target_psi: torch.Tensor
:param space: The hilbert space of the system.
:type space: torch.Tensor
:param \**kwargs: Extra keyword arguments that may be passed. Will be ignored.
:returns: The fidelity.
:rtype: torch.Tensor
"""
Z = nn_state.compute_normalization(space)
F = torch.tensor([0.0, 0.0], dtype=torch.double, device=nn_state.device)
target_psi = target_psi.to(nn_state.device)
for i in range(len(space)):
psi = nn_state.psi(space[i]) / Z.sqrt()
F[0] += target_psi[0, i] * psi[0] + target_psi[1, i] * psi[1]
F[1] += target_psi[0, i] * psi[1] - target_psi[1, i] * psi[0]
return cplx.norm_sqr(F)
def rotate_psi(nn_state, basis, space, unitaries, psi=None):
r"""A function that rotates the reconstructed wavefunction to a different
basis.
:param nn_state: The neural network state (i.e. complex wavefunction or
positive wavefunction).
:type nn_state: WaveFunction
:param basis: The basis to rotate the wavefunction to.
:type basis: str
:param space: The hilbert space of the system.
:type space: torch.Tensor
:param unitaries: A dictionary of (2x2) unitary operators.
:type unitaries: dict
:param psi: A wavefunction that the user can input to override the neural
network state's wavefunction.
:type psi: torch.Tensor
:returns: A wavefunction in a new basis.
:rtype: torch.Tensor
"""
N = nn_state.num_visible
v = torch.zeros(N, dtype=torch.double, device=nn_state.device)
psi_r = torch.zeros(2, 1 << N, dtype=torch.double, device=nn_state.device)
for x in range(1 << N):
Upsi = torch.zeros(2, dtype=torch.double, device=nn_state.device)
num_nontrivial_U = 0
nontrivial_sites = []
for jj in range(N):
if basis[jj] != "Z":
num_nontrivial_U += 1
nontrivial_sites.append(jj)
sub_state = nn_state.generate_hilbert_space(num_nontrivial_U)
for xp in range(1 << num_nontrivial_U):
cnt = 0
for j in range(N):
if basis[j] != "Z":
v[j] = sub_state[xp][cnt]
cnt += 1
else:
v[j] = space[x, j]
U = torch.tensor([1.0, 0.0], dtype=torch.double, device=nn_state.device)
for ii in range(num_nontrivial_U):
tmp = unitaries[basis[nontrivial_sites[ii]]]
tmp = tmp[
:, int(space[x][nontrivial_sites[ii]]), int(v[nontrivial_sites[ii]])
].to(nn_state.device)
U = cplx.scalar_mult(U, tmp)
if psi is None:
Upsi += cplx.scalar_mult(U, nn_state.psi(v))
else:
index = 0
for k in range(len(v)):
index = (index << 1) | int(v[k].item())
Upsi += cplx.scalar_mult(U, psi[:, index])
psi_r[:, x] = Upsi
return psi_r
def KL(nn_state, target_psi, space, bases=None, **kwargs):
r"""A function for calculating the total KL divergence.
:param nn_state: The neural network state (i.e. complex wavefunction or
positive wavefunction).
:type nn_state: WaveFunction
:param target_psi: The true wavefunction of the system.
:type target_psi: torch.Tensor
:param space: The hilbert space of the system.
:type space: torch.Tensor
:param bases: An array of unique bases.
:type bases: np.array(dtype=str)
:param \**kwargs: Extra keyword arguments that may be passed. Will be ignored.
:returns: The KL divergence.
:rtype: torch.Tensor
"""
psi_r = torch.zeros(
2, 1 << nn_state.num_visible, dtype=torch.double, device=nn_state.device
)
KL = 0.0
unitary_dict = unitaries.create_dict()
target_psi = target_psi.to(nn_state.device)
Z = nn_state.compute_normalization(space)
eps = 0.000001
if bases is None:
num_bases = 1
for i in range(len(space)):
KL += (
cplx.norm_sqr(target_psi[:, i])
* (cplx.norm_sqr(target_psi[:, i]) + eps).log()
)
KL -= (
cplx.norm_sqr(target_psi[:, i])
* (cplx.norm_sqr(nn_state.psi(space[i])) + eps).log()
)
KL += cplx.norm_sqr(target_psi[:, i]) * Z.log()
else:
num_bases = len(bases)
for b in range(1, len(bases)):
psi_r = rotate_psi(nn_state, bases[b], space, unitary_dict)
target_psi_r = rotate_psi(
nn_state, bases[b], space, unitary_dict, target_psi
)
for ii in range(len(space)):
if cplx.norm_sqr(target_psi_r[:, ii]) > 0.0:
KL += (
cplx.norm_sqr(target_psi_r[:, ii])
* cplx.norm_sqr(target_psi_r[:, ii]).log()
)
KL -= (
cplx.norm_sqr(target_psi_r[:, ii])
* cplx.norm_sqr(psi_r[:, ii]).log().item()
)
KL += cplx.norm_sqr(target_psi_r[:, ii]) * Z.log()
return KL / float(num_bases)
| 39.431138
| 88
| 0.603037
| 909
| 6,585
| 4.244224
| 0.213421
| 0.050804
| 0.034215
| 0.039658
| 0.450233
| 0.399171
| 0.361586
| 0.333593
| 0.289787
| 0.289787
| 0
| 0.011578
| 0.291724
| 6,585
| 166
| 89
| 39.668675
| 0.815609
| 0.3918
| 0
| 0.184783
| 0
| 0
| 0.000525
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.032609
| false
| 0
| 0.032609
| 0
| 0.097826
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
42b7e07ad45d9d0be2cad9161c36276cb3b1762f
| 1,433
|
py
|
Python
|
14.py
|
niharikasingh/aoc2018
|
21d430d393321e6066eca22d7c6b49e5eb42d756
|
[
"MIT"
] | null | null | null |
14.py
|
niharikasingh/aoc2018
|
21d430d393321e6066eca22d7c6b49e5eb42d756
|
[
"MIT"
] | null | null | null |
14.py
|
niharikasingh/aoc2018
|
21d430d393321e6066eca22d7c6b49e5eb42d756
|
[
"MIT"
] | null | null | null |
import copy
def next10(i):
# start condition
board = [3, 7]
elves = [0, 1]
found = False
# while (len(board) < i + 10):
while (not found):
to_add = board[elves[0]] + board[elves[1]]
if (to_add < 10):
board.append(to_add)
if (board[-1*len(i):] == i):
found = len(board[:-1*len(i)])
else:
board.append(1)
board.append(to_add%10)
if (board[-1*len(i):] == i):
found = len(board[:-1*len(i)])
elif (board[-1*len(i)-1:-1] == i):
found = len(board[:-1*len(i)-1])
elves[0] = (elves[0] + board[elves[0]] + 1) % len(board)
elves[1] = (elves[1] + board[elves[1]] + 1) % len(board)
# print board
# to_print = copy.deepcopy(board)
# to_print[elves[0]] = "(" + str(to_print[elves[0]]) + ")"
# to_print[elves[1]] = "[" + str(to_print[elves[1]]) + "]"
# print(to_print)
# print(board[i:i+10])
# return board[i:i+10]
print(found)
return found
# assert next10(5) == [0,1,2,4,5,1,5,8,9,1]
# assert next10(9) == [5,1,5,8,9,1,6,7,7,9]
# assert next10(18) == [9,2,5,1,0,7,1,0,8,5]
# assert next10(2018) == [5,9,4,1,4,2,9,8,8,2]
# print(next10(760221))
assert next10([0,1,2,4,5]) == 5
assert next10([5,1,5,8,9]) == 9
assert next10([9,2,5,1,0]) == 18
assert next10([5,9,4,1,4]) == 2018
print(next10([7,6,0,2,2,1]))
| 31.844444
| 66
| 0.491975
| 244
| 1,433
| 2.848361
| 0.159836
| 0.046043
| 0.077698
| 0.086331
| 0.2
| 0.133813
| 0.116547
| 0.089209
| 0.089209
| 0.089209
| 0
| 0.142582
| 0.275646
| 1,433
| 44
| 67
| 32.568182
| 0.526975
| 0.316818
| 0
| 0.148148
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.148148
| 1
| 0.037037
| false
| 0
| 0.037037
| 0
| 0.111111
| 0.074074
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
42b83fe05de3f7690454c9ae7844d6d5c0896fb0
| 892
|
py
|
Python
|
rpc inv matriz/ServerRPC.py
|
Aldair47x/DISTRIBUIDOS-UTP
|
182f143b3a5d73744f78eb4fe1428cbca22387c2
|
[
"MIT"
] | null | null | null |
rpc inv matriz/ServerRPC.py
|
Aldair47x/DISTRIBUIDOS-UTP
|
182f143b3a5d73744f78eb4fe1428cbca22387c2
|
[
"MIT"
] | null | null | null |
rpc inv matriz/ServerRPC.py
|
Aldair47x/DISTRIBUIDOS-UTP
|
182f143b3a5d73744f78eb4fe1428cbca22387c2
|
[
"MIT"
] | null | null | null |
import xmlrpclib
from SimpleXMLRPCServer import SimpleXMLRPCServer
from SimpleXMLRPCServer import SimpleXMLRPCRequestHandler
import numpy as np
from io import StringIO
from numpy.linalg import inv
from scipy.linalg import *
# Restrict to a particular path.
class RequestHandler(SimpleXMLRPCRequestHandler):
rpc_paths = ('/RPC2',)
# Create server
server = SimpleXMLRPCServer(("localhost", 9000),
requestHandler=RequestHandler)
server.register_introspection_functions()
s = xmlrpclib.ServerProxy('http://localhost:9000')
def operacion(name):
matriz = []
print ("Franquito")
archivo = open(name)
for linea in archivo:
matriz.append(linea.strip().split())
archivo.close()
matrizInv=inv(matriz)
return str(matrizInv)
server.register_function(operacion, 'operacion')
# Run the server's main loop
server.serve_forever()
| 27.030303
| 59
| 0.734305
| 96
| 892
| 6.770833
| 0.604167
| 0.067692
| 0.086154
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.012228
| 0.174888
| 892
| 33
| 60
| 27.030303
| 0.870924
| 0.079596
| 0
| 0
| 0
| 0
| 0.06743
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.041667
| false
| 0
| 0.291667
| 0
| 0.458333
| 0.041667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
42ba676a4b1855f63fba242958ff64fc7b10d468
| 1,524
|
py
|
Python
|
damq/api/management/commands/check_settings.py
|
zhanghui9700/clouddam
|
18c7c7578fb727bcab50737b51b8fb5c09070b48
|
[
"Apache-2.0"
] | null | null | null |
damq/api/management/commands/check_settings.py
|
zhanghui9700/clouddam
|
18c7c7578fb727bcab50737b51b8fb5c09070b48
|
[
"Apache-2.0"
] | null | null | null |
damq/api/management/commands/check_settings.py
|
zhanghui9700/clouddam
|
18c7c7578fb727bcab50737b51b8fb5c09070b48
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# coding=utf-8
from smtplib import SMTPException
from django.conf import settings
from django.core.management import BaseCommand
from django.core.mail import send_mail
class Command(BaseCommand):
def _log(self, tag, result):
label = self.style.ERROR("XXX")
if result:
label = self.style.MIGRATE_SUCCESS(":-)")
self.stdout.write("{:<30}{:<5}".format(tag, label))
def _check_mail(self):
if len(settings.ADMINS) < 1:
self._log("CHECK_MAIL No Admin", False)
return
try:
title = "%sCheck Settings" % settings.EMAIL_SUBJECT_PREFIX
msg = "This message used for checking email settings."
result = send_mail(title, msg,
settings.EMAIL_FROM, [settings.ADMINS[0]])
except SMTPException as e:
result = False
raise e
self._log("CHECK_MAIL", result)
def _check_rpc_send(self):
try:
from rpc import notify
msg = "{'test': 'This message used for checking email settings.'}"
notify(msg, routing="transResponse")
except Exception as e:
raise e
self._log("CHECK_RPC_SEND", True)
def handle(self, *args, **kwargs):
self.stdout.write(self.style.WARNING("************CHECK START************"))
self._check_mail()
self._check_rpc_send()
self.stdout.write(self.style.WARNING("************CHECK END*************"))
| 30.48
| 84
| 0.57874
| 177
| 1,524
| 4.847458
| 0.429379
| 0.041958
| 0.052448
| 0.04662
| 0.216783
| 0.174825
| 0.174825
| 0
| 0
| 0
| 0
| 0.005479
| 0.281496
| 1,524
| 49
| 85
| 31.102041
| 0.778082
| 0.021654
| 0
| 0.111111
| 0
| 0
| 0.176629
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.111111
| false
| 0
| 0.138889
| 0
| 0.305556
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
42ba9ea7e400e5ef293ccdc589dfbbce586a2405
| 4,113
|
py
|
Python
|
sidomo/sidomo.py
|
noajshu/sdpm
|
b70825d9017eb0c2c6b6389345cccbcbd52cf669
|
[
"Unlicense"
] | 358
|
2016-02-24T01:36:55.000Z
|
2022-02-20T00:10:22.000Z
|
sidomo/sidomo.py
|
noajshu/sdpm
|
b70825d9017eb0c2c6b6389345cccbcbd52cf669
|
[
"Unlicense"
] | 5
|
2016-02-24T22:50:25.000Z
|
2017-01-30T07:58:00.000Z
|
sidomo/sidomo.py
|
noajshu/sdpm
|
b70825d9017eb0c2c6b6389345cccbcbd52cf669
|
[
"Unlicense"
] | 27
|
2016-02-24T13:40:22.000Z
|
2021-06-30T12:04:41.000Z
|
"""Manages the lifecycle of a docker container.
Use via the with statement:
with Container(some_image) as c:
for line in c.run("some_command"):
print line
"""
import docker
import click, os
# sets the docker host from your environment variables
client = docker.Client(
**docker.utils.kwargs_from_env(assert_hostname=False))
class Container:
"""
Represents a single docker container on the host.
Volumes should be a list of mapped paths, e.g. ['/var/log/docker:/var/log/docker'].
"""
def __init__(self, image, memory_limit_gb=4, stderr=True, stdout=True, volumes=[], cleanup=False, environment=[]):
self.image = image
self.memory_limit_bytes = int(memory_limit_gb * 1e9)
self.stderr = stderr
self.stdout = stdout
self.volumes = [x[1] for x in map(lambda vol: vol.split(':'), volumes)]
self.binds = volumes
self.cleanup = cleanup
self.environment = environment
def __enter__(self):
"""Power on."""
self.container_id = client.create_container(
image=self.image,
volumes=self.volumes,
host_config=client.create_host_config(
mem_limit=self.memory_limit_bytes,
binds=self.binds,
),
environment=self.environment,
stdin_open=True
)['Id']
client.start(self.container_id)
return self
def __exit__(self, type, value, traceback):
"""Power off."""
client.stop(self.container_id)
if self.cleanup:
client.remove_container(self.container_id)
def run(self, command):
"""Just like 'docker run CMD'.
This is a generator that yields lines of container output.
"""
exec_id = client.exec_create(
container=self.container_id,
cmd=command,
stdout=self.stdout,
stderr=self.stderr
)['Id']
for line in client.exec_start(exec_id, stream=True):
yield line
@click.command()
@click.argument('do', nargs=-1)
@click.option('--image', '-i', help='Image name in which to run do', default=None)
@click.option('--sharedir', '-s', help='Directory on host machine to mount to docker.', default=os.path.abspath(os.getcwd()))
@click.option('--display', '-d', help='Display variable to set for X11 forwarding.', default=None)
def dodo(do, image, sharedir, display):
""" dodo (like sudo but for docker) runs argument in a docker image.
do is the command to run in the image.
image taken from (1) command-line, (2) "DODOIMAGE" environment variable, or (3) first built image.
sharedir (e.g., to pass data to command) is mounted (default: current directory). empty string does no mounting.
display is environment variable to set in docker image that allows X11 forwarding.
"""
# try to set image three ways
if not image:
if 'DODOIMAGE' in os.environ:
image = os.environ['DODOIMAGE']
else:
ims = client.images()
if len(ims) >= 1:
image = [im['RepoTags'][0] for im in client.images()][0]
assert image, 'No image given or found locally.'
# get image if not available locally
imnames = [im['RepoTags'][0] for im in client.images()]
if (not any([image in imname for imname in imnames])) and client.search(image):
print('Image {} not found locally. Pulling from docker hub.'.format(image))
client.pull(image)
# mount directory in docker
if sharedir:
volumes = ['{}:/home'.format(sharedir)]
else:
volumes = []
# set docker environment to display X11 locally
if display:
environment = ['DISPLAY={}'.format(display)]
elif 'DODODISPLAY' in os.environ:
environment = ['DISPLAY={}'.format(os.environ['DODODISPLAY'])]
else:
environment = []
with Container(image, volumes=volumes, cleanup=True, environment=environment) as c:
for output_line in c.run(do):
print('{}:\t {}'.format(image, output_line.decode('utf-8')))
| 33.713115
| 125
| 0.623389
| 529
| 4,113
| 4.765595
| 0.344045
| 0.025783
| 0.02975
| 0.007933
| 0.0238
| 0.0238
| 0.0238
| 0.0238
| 0
| 0
| 0
| 0.006219
| 0.257233
| 4,113
| 121
| 126
| 33.991736
| 0.818985
| 0.242645
| 0
| 0.070423
| 0
| 0
| 0.111516
| 0
| 0
| 0
| 0
| 0
| 0.028169
| 1
| 0.070423
| false
| 0
| 0.028169
| 0
| 0.126761
| 0.028169
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
42bb4531b3deb62a4952ce2f40bb5fa396ce9810
| 4,321
|
py
|
Python
|
scripts/utils/prepare_data.py
|
Harshs27/mGLAD
|
f85d5a7cb2091a4528c762dc550d8c9b35d190b1
|
[
"MIT"
] | null | null | null |
scripts/utils/prepare_data.py
|
Harshs27/mGLAD
|
f85d5a7cb2091a4528c762dc550d8c9b35d190b1
|
[
"MIT"
] | null | null | null |
scripts/utils/prepare_data.py
|
Harshs27/mGLAD
|
f85d5a7cb2091a4528c762dc550d8c9b35d190b1
|
[
"MIT"
] | null | null | null |
import networkx as nx
import numpy as np
from sklearn import covariance
import torch
def convertToTorch(data, req_grad=False, use_cuda=False):
"""Convert data from numpy to torch variable, if the req_grad
flag is on then the gradient calculation is turned on.
"""
if not torch.is_tensor(data):
dtype = torch.cuda.FloatTensor if use_cuda else torch.FloatTensor
data = torch.from_numpy(data.astype(np.float, copy=False)).type(dtype)
data.requires_grad = req_grad
return data
def eigVal_conditionNum(A):
"""Calculates the eigenvalues and the condition
number of the input matrix A
condition number = max(|eig|)/min(|eig|)
"""
eig = [v.real for v in np.linalg.eigvals(A)]
condition_number = max(np.abs(eig)) / min(np.abs(eig))
return eig, condition_number
def getCovariance(Xb, offset = 0.1):
"""Calculate the batch covariance matrix
Args:
Xb (3D np array): The input sample matrices (B x M x D)
offset (float): The eigenvalue offset in case of bad
condition number
Returns:
Sb (3D np array): Covariance matrices (B x D x D)
"""
Sb = []
for X in Xb:
S = covariance.empirical_covariance(X, assume_centered=False)
# calculate the eigenvalue of the covariance S
eig, con = eigVal_conditionNum(S)
if min(eig)<=1e-3:
# adjust the eigenvalue
print(f'Adjust the eval: min {min(eig)}, con {con}')
S += np.eye(S.shape[-1]) * (offset-min(eig))
Sb.append(S)
return np.array(Sb)
def generateRandomGraph(num_nodes, sparsity, seed=None):
"""Generate a random erdos-renyi graph with a given
sparsity.
Args:
num_nodes (int): The number of nodes in the DAG
sparsity (float): = #edges-present/#total-edges
seed (int, optional): set the numpy random seed
Returns:
edge_connections (2D np array (float)): Adj matrix
"""
if seed: np.random.seed(seed)
G = nx.generators.random_graphs.gnp_random_graph(
num_nodes,
sparsity,
seed=seed,
directed=False
)
edge_connections = nx.adjacency_matrix(G).todense()
return edge_connections
def simulateGaussianSamples(
num_nodes,
edge_connections,
num_samples,
seed=None,
u=0.1,
w_min=0.5,
w_max=1.0,
):
"""Simulating num_samples from a Gaussian distribution. The
precision matrix of the Gaussian is determined using the
edge_connections
Args:
num_nodes (int): The number of nodes in the DAG
edge_connections (2D np array (float)): Adj matrix
num_sample (int): The number of samples
seed (int, optional): set the numpy random seed
u (float): Min eigenvalue offset for the precision matrix
w_min (float): Precision matrix entries ~Unif[w_min, w_max]
w_max (float): Precision matrix entries ~Unif[w_min, w_max]
Returns:
X (2D np array (float)): num_samples x num_nodes
precision_mat (2D np array (float)): num_nodes x num_nodes
"""
# zero mean of Gaussian distribution
mean_value = 0
mean_normal = np.ones(num_nodes) * mean_value
# Setting the random seed
if seed: np.random.seed(seed)
# uniform entry matrix [w_min, w_max]
U = np.matrix(np.random.random((num_nodes, num_nodes))
* (w_max - w_min) + w_min)
theta = np.multiply(edge_connections, U)
# making it symmetric
theta = (theta + theta.T)/2 + np.eye(num_nodes)
smallest_eigval = np.min(np.linalg.eigvals(theta))
# Just in case : to avoid numerical error in case an
# epsilon complex component present
smallest_eigval = smallest_eigval.real
# making the min eigenvalue as u
precision_mat = theta + np.eye(num_nodes)*(u - smallest_eigval)
# print(f'Smallest eval: {np.min(np.linalg.eigvals(precision_mat))}')
# getting the covariance matrix (avoid the use of pinv)
cov = np.linalg.inv(precision_mat)
# get the samples
if seed: np.random.seed(seed)
# Sampling data from multivariate normal distribution
data = np.random.multivariate_normal(
mean=mean_normal,
cov=cov,
size=num_samples
)
return data, precision_mat # MxD, DxD
| 33.757813
| 78
| 0.649618
| 611
| 4,321
| 4.479542
| 0.288052
| 0.037998
| 0.013153
| 0.02046
| 0.162221
| 0.135185
| 0.111071
| 0.111071
| 0.056997
| 0.028498
| 0
| 0.005919
| 0.257116
| 4,321
| 128
| 79
| 33.757813
| 0.846729
| 0.449664
| 0
| 0.081967
| 0
| 0
| 0.0191
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.081967
| false
| 0
| 0.065574
| 0
| 0.229508
| 0.016393
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
42bc62f46cb6d0412a2527cc37f497de098a673f
| 1,475
|
py
|
Python
|
Exercicios/multplica_matriz.py
|
eduardodarocha/Introducao_Ciencia_da_Computacao_com_Python_Parte_2_Coursera
|
b5b9198e16b4b67894b85766eb521ae96010accf
|
[
"MIT"
] | 1
|
2020-08-28T20:29:23.000Z
|
2020-08-28T20:29:23.000Z
|
Exercicios/multplica_matriz.py
|
eduardodarocha/Introducao_Ciencia_da_Computacao_com_Python_Parte_2_Coursera
|
b5b9198e16b4b67894b85766eb521ae96010accf
|
[
"MIT"
] | null | null | null |
Exercicios/multplica_matriz.py
|
eduardodarocha/Introducao_Ciencia_da_Computacao_com_Python_Parte_2_Coursera
|
b5b9198e16b4b67894b85766eb521ae96010accf
|
[
"MIT"
] | null | null | null |
def multiplica_matrizes(m1, m2):
'''Minha solução para multiplicação de matrizes'''
matriz = []
cont = 0
b1 = 0
for t in range(len(m1)): # números de linhas mat1
linhanova = []
for t1 in range(len(m2[0])): #números de colunas mat2
while cont < len(m2):
#a1 = m1[t][cont] * m2[cont][t1]
#b1 = b1 + a1
b1 = b1 + m1[t][cont] * m2[cont][t1] # refatorado
cont += 1
linhanova.append(b1)
cont = b1 = 0
matriz.append(linhanova)
return matriz
def mat_mul (A, B):
num_linhas_A, num_colunas_A = len(A), len(A[0])
num_linhas_B, num_colunas_B = len(B), len(B[0])
assert num_colunas_A == num_linhas_B
C = []
for linha in range(num_linhas_A):
C.append([])
for coluna in range(num_colunas_B):
C[linha].append(0)
for k in range(num_colunas_A):
C[linha][coluna] += A[linha][k] * B[k][coluna]
return C
# mat1 = [[2,3,1], [-1, 0, 2]]
# mat2 = [[1, -2], [0, 5],[4, 1]]
# mat1 = [[5, 8, -4], [6, 9, -5],[4, 7, -2]]
# mat2 = [[2], [-3], [1]]
# mat1 = [[2,5,9], [3, 6, 8]]
mat1 = [[1, 2, 3], [4, 5, 6]]
mat2 = [[1, 2],[3, 4],[5, 6]]
# mat2 = [[2,7],[4,3],[5,2]]
#https://brasilescola.uol.com.br/matematica/multiplicacao-matrizes.htm
#
print(multiplica_matrizes(mat1, mat2))
# print(mat_mul (mat1, mat2))
| 28.921569
| 70
| 0.492203
| 222
| 1,475
| 3.171171
| 0.265766
| 0.049716
| 0.046875
| 0.025568
| 0.071023
| 0.071023
| 0.028409
| 0
| 0
| 0
| 0
| 0.091
| 0.322034
| 1,475
| 51
| 71
| 28.921569
| 0.613
| 0.288136
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.034483
| 1
| 0.068966
| false
| 0
| 0
| 0
| 0.137931
| 0.034483
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
42c012688f03cf2033f2ea77e4e8d937fb973de4
| 996
|
py
|
Python
|
bifacialvf/tests/test_vf.py
|
shirubana/bifacialvf
|
7cd1c4c658bb7a68f0815b2bd1a6d5c492ca7300
|
[
"BSD-3-Clause"
] | 16
|
2018-01-17T06:03:23.000Z
|
2021-11-08T18:54:20.000Z
|
bifacialvf/tests/test_vf.py
|
shirubana/bifacialvf
|
7cd1c4c658bb7a68f0815b2bd1a6d5c492ca7300
|
[
"BSD-3-Clause"
] | 36
|
2018-03-16T15:17:58.000Z
|
2022-03-18T17:54:49.000Z
|
bifacialvf/tests/test_vf.py
|
shirubana/bifacialvf
|
7cd1c4c658bb7a68f0815b2bd1a6d5c492ca7300
|
[
"BSD-3-Clause"
] | 15
|
2018-01-11T09:11:13.000Z
|
2022-03-21T11:37:42.000Z
|
"""
Tests of the view factors module
"""
import pytest
import numpy as np
from bifacialvf.vf import getSkyConfigurationFactors
from bifacialvf.tests import (
SKY_BETA160_C05_D1, SKY_BETA20_C05_D1, SKY_BETA20_C0_D1, SKY_BETA160_C0_D1,
SKY_BETA160_C1_D1, SKY_BETA20_C1_D1, SKY_BETA20_C1_D0, SKY_BETA160_C1_D0,
SKY_BETA160_C05_D0, SKY_BETA20_C05_D0)
@pytest.mark.parametrize('beta, C, D, expected',
[(160, 0.5, 1, SKY_BETA160_C05_D1), (20, 0.5, 1, SKY_BETA20_C05_D1),
(20, 0, 1, SKY_BETA20_C0_D1), (160, 0, 1, SKY_BETA160_C0_D1),
(160, 1, 1, SKY_BETA160_C1_D1), (20, 1, 1, SKY_BETA20_C1_D1),
(20, 1, 0, SKY_BETA20_C1_D0), (160, 1, 0, SKY_BETA160_C1_D0),
(160, 0.5, 0, SKY_BETA160_C05_D0), (20, 0.5, 0, SKY_BETA20_C05_D0)])
def test_getSkyConfigurationFactors(beta, C, D, expected):
"""
Benchmark against to the master branch on 2018-08-20 at 91e785d.
"""
assert np.allclose(
getSkyConfigurationFactors("interior", beta, C, D), expected)
| 39.84
| 79
| 0.715863
| 170
| 996
| 3.835294
| 0.288235
| 0.153374
| 0.079755
| 0.064417
| 0.042945
| 0
| 0
| 0
| 0
| 0
| 0
| 0.191847
| 0.162651
| 996
| 24
| 80
| 41.5
| 0.589928
| 0.09739
| 0
| 0
| 0
| 0
| 0.031963
| 0
| 0
| 0
| 0
| 0
| 0.0625
| 1
| 0.0625
| false
| 0
| 0.25
| 0
| 0.3125
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
42c0646e767e46f559cbd944cee5d0ed57e7deeb
| 732
|
py
|
Python
|
test_horovod.py
|
lu-wang-dl/test-horovod
|
0b1699057fe03f84bbea46c3e63197a6c9e21c14
|
[
"Apache-2.0"
] | null | null | null |
test_horovod.py
|
lu-wang-dl/test-horovod
|
0b1699057fe03f84bbea46c3e63197a6c9e21c14
|
[
"Apache-2.0"
] | null | null | null |
test_horovod.py
|
lu-wang-dl/test-horovod
|
0b1699057fe03f84bbea46c3e63197a6c9e21c14
|
[
"Apache-2.0"
] | null | null | null |
# Databricks notebook source
import horovod.tensorflow.keras as hvd
def run_training_horovod():
# Horovod: initialize Horovod.
hvd.init()
import os
print(os.environ.get('PYTHONPATH'))
print(os.environ.get('PYTHONHOME'))
print(f"Rank is: {hvd.rank()}")
print(f"Size is: {hvd.size()}")
# COMMAND ----------
from sparkdl import HorovodRunner
hr = HorovodRunner(np=-spark.sparkContext.defaultParallelism, driver_log_verbosity="all")
hr.run(run_training_horovod)
# COMMAND ----------
from sparkdl import HorovodRunner
hr = HorovodRunner(np=spark.sparkContext.defaultParallelism, driver_log_verbosity="all")
hr.run(run_training_horovod) # manually stopping b/c it's just hanging
# COMMAND ----------
| 24.4
| 89
| 0.715847
| 91
| 732
| 5.648352
| 0.494505
| 0.064202
| 0.105058
| 0.066148
| 0.51751
| 0.51751
| 0.51751
| 0.51751
| 0.51751
| 0.51751
| 0
| 0
| 0.132514
| 732
| 29
| 90
| 25.241379
| 0.809449
| 0.20765
| 0
| 0.285714
| 0
| 0
| 0.119089
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.071429
| false
| 0
| 0.285714
| 0
| 0.357143
| 0.285714
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
42c3250899086a2d423b9d8448bed7aa2e3d35b4
| 1,832
|
py
|
Python
|
datasets.py
|
Liuhongzhi2018/Car_detection
|
f32fea9c348c691ccc30b9804a4f3fa32732bbae
|
[
"MIT"
] | 1
|
2022-03-05T04:20:46.000Z
|
2022-03-05T04:20:46.000Z
|
datasets.py
|
Liuhongzhi2018/Car_detection
|
f32fea9c348c691ccc30b9804a4f3fa32732bbae
|
[
"MIT"
] | null | null | null |
datasets.py
|
Liuhongzhi2018/Car_detection
|
f32fea9c348c691ccc30b9804a4f3fa32732bbae
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Thu Mar 12 10:11:09 2020
@author: NAT
"""
import torch
from torch.utils.data import Dataset
import json
import os
from PIL import Image
from utils import transform
class VOCDataset(Dataset):
def __init__(self, DataFolder, split):
"""
DataFolder: folder where data files are stored
split: split {"TRAIN", "TEST"}
"""
self.split = str(split.upper())
if self.split not in {"TRAIN", "TEST"}:
print("Param split not in {TRAIN, TEST}")
assert self.split in {"TRAIN", "TEST"}
self.DataFolder = DataFolder
#read data file from json file
with open(os.path.join(DataFolder, self.split+ '_images.json'), 'r') as j:
self.images = json.load(j)
with open(os.path.join(DataFolder, self.split+ '_objects.json'), 'r') as j:
self.objects = json.load(j)
assert len(self.images) == len(self.objects)
def __len__(self):
return len(self.images)
def __getitem__(self, i):
image = Image.open(self.images[i], mode= "r")
image = image.convert("RGB")
#Read objects in this image
objects = self.objects[i]
boxes = torch.FloatTensor(objects["boxes"])
labels = torch.LongTensor(objects['labels'])
difficulties = torch.ByteTensor(objects['difficulties'])
#Apply transforms
new_image, new_boxes, new_labels, new_difficulties = transform(image, boxes,
labels, difficulties, self.split)
return new_image, new_boxes, new_labels, new_difficulties
| 33.309091
| 105
| 0.543668
| 203
| 1,832
| 4.79803
| 0.374384
| 0.055441
| 0.033881
| 0.030801
| 0.221766
| 0.158111
| 0.158111
| 0.158111
| 0
| 0
| 0
| 0.010879
| 0.347707
| 1,832
| 55
| 106
| 33.309091
| 0.804184
| 0.120633
| 0
| 0
| 0
| 0
| 0.068966
| 0
| 0
| 0
| 0
| 0
| 0.066667
| 1
| 0.1
| false
| 0
| 0.2
| 0.033333
| 0.4
| 0.033333
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
42c34955df6c0e7aa377ac0cc57e813a5826e6fa
| 7,564
|
py
|
Python
|
roles/gitlab_runner/filter_plugins/from_toml.py
|
wikimedia/operations-gitlab-ansible
|
f6433674ff812ea6e07ee192ff6fd848ba252aaa
|
[
"MIT"
] | 17
|
2019-03-08T15:33:46.000Z
|
2021-11-02T18:22:47.000Z
|
roles/gitlab_runner/filter_plugins/from_toml.py
|
wikimedia/operations-gitlab-ansible
|
f6433674ff812ea6e07ee192ff6fd848ba252aaa
|
[
"MIT"
] | 8
|
2018-12-23T21:17:36.000Z
|
2019-12-10T13:52:13.000Z
|
roles/gitlab_runner/filter_plugins/from_toml.py
|
wikimedia/operations-gitlab-ansible
|
f6433674ff812ea6e07ee192ff6fd848ba252aaa
|
[
"MIT"
] | 12
|
2019-01-26T15:00:32.000Z
|
2022-03-15T08:04:17.000Z
|
#!/usr/bin/python
DOCUMENTATION = '''
---
module: to_toml, from_toml
version_added: "2.8"
short_description: Converts Python data to TOML and TOML to Python data.
author:
- "Samy Coenen (contact@samycoenen.be)"
'''
import datetime
import sys
from collections import OrderedDict
#pip3 install python-toml
def to_toml(data):
''' Convert the value to TOML '''
return dumps(data)
def from_toml(data):
''' Convert TOML to Python data '''
return loads(data)
class FilterModule(object):
''' Ansible TOML jinja2 filters '''
def filters(self):
return {
# toml
'to_toml': to_toml,
'from_toml': from_toml
}
def loads(s, *args, **kwargs):
if not isinstance(s, basestring):
raise TypeError("It's not a string.")
try:
s = s.decode('utf-8')
except AttributeError:
pass
s = _clear_r_n_t(s)
return _loads(s)
def load(file, *args, **kwargs):
return loads(_read(file, *args, **kwargs))
def dumps(s, *args, **kwargs):
if not isinstance(s, dict):
raise TypeError("It's not a dict.")
return un_utf_8(_json_transition_str(s))
def dump(file, s, *args, **kwargs):
_write(file, dumps(s))
def _clear_r_n_t(v):
return v.replace('\r', '').replace('\t', '').split('\n')
def _clear_empty_l_r(v):
return v.rstrip(' ').lstrip(' ')
def _clear_empty(v):
return v.replace(' ', '')
def _is_empty(v):
return v[0] if v else v
def _get_key(v):
key = _re('\[\[(.*?)\]\]', v)
if key:
return key, True
return _re('\[(.*?)\]', v), False
def _loads(s):
items, nd, it, fg = ordict(), ordict(), [], False
key_status = False
for v in s:
if not v or _is_empty(_clear_empty(v)) == '#':
continue
if '[' == _is_empty(_clear_empty(v)) and ']' in v:
key, key_status = _get_key(v)
nd = ordict()
else:
_it = v.split('=')
_it[0] = _clear_empty(_is_empty(_it))
"""
arr_arr = [
'zbc',
'sdf',
]
"""
try:
if '[' not in _it[0] and _it[0][-1] == ']':
it.append(_it[0])
fg = False
elif _it[1].replace(' ', '')[0] == '[' and ']' not in _it[1]:
it.append(_it[0])
fg = True
except Exception as e:
pass
if fg:
it.append(_it[1] if len(_it) > 1 else _it[0])
elif not fg and it:
_it = [it[0], ''.join(it[1:])]
it = []
nd.update(_str_transition_json(_it))
ite = items
try:
# [1][:-1] = []
for k in key[:-1]:
try:
ite = ite[k]
except Exception as e:
ite[k] = ordict()
ite = ite[k]
if isinstance(ite, list):
ite = ite[-1]
try:
ite[key[-1]]
if key_status:
ite[key[-1]].append(nd)
except Exception as e:
ite[key[-1]] = [nd] if key_status else nd
finally:
key_status = False
except Exception as e:
ite.update(nd)
pass
return items
def _str_transition_json(v):
item = ordict()
if not isinstance(v, (list, tuple)):
raise TypeError("It's not a list/tuple.")
if (len(v) == 2):
item[v[0]] = _str_transition_obj(_clear_empty_l_r(v[1]))
elif (len(v) > 2):
item[v[0]] = _str_transition_obj(_clear_empty_l_r('='.join(v[1:])))
return item
def _str_transition_obj(v):
if not isinstance(v, basestring):
raise TypeError("It's not a string")
if v.lower() == 'true':
return True
elif v.lower() == 'false':
return False
try:
if _re('\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}Z', v):
return str_to_datetime(v)
except Exception as e:
raise e
try:
_veal = eval(v.replace(',', ', '))
if isinstance(_veal, basestring):
return escape(_veal)
return _veal
except SyntaxError as e:
pass
return v
def _json_transition_str(s, _k='', index=0):
_s = ''
for k, v in s.items():
_k = _k.rstrip('.') + '.' if _k else ''
if isinstance(v, dict):
_s += '\n' + '\t' * index + '[{}]\n'.format(_k + k)
_s += _json_transition_str(v, _k + k, index=index + 1)
elif isinstance(v, list) and isinstance(v[0], dict):
for _v in v:
_s += '\n' + '\t' * index + '[[{}]]\n'.format(_k + k)
_s += _json_transition_str(_v, _k + k, index=index + 1)
elif not isinstance(v, dict):
_s += '\t' * index + _key_equal_value(k, v)
else:
_s += '\n'
return _s
def _key_equal_value(k, v):
if isinstance(v, datetime.datetime):
v = datetime_to_str(v)
elif isinstance(v, bool):
v = str(v).lower()
elif not isinstance(v, basestring):
v = str(v)
else:
v = '"' + str(v) + '"'
return k + ' = ' + _utf_8(v) + '\n'
def _read(file, *args, **kwargs):
if PY3:
with open(file, encoding='utf-8', *args, **kwargs) as fp:
v = fp.read()
else:
with open(file, *args, **kwargs) as fp:
v = fp.read()
return v
def _write(file, text, model='w', *args, **kwargs):
if PY3:
with open(file, model, encoding='utf-8', *args, **kwargs) as fp:
fp.write(text)
else:
with open(file, model, *args, **kwargs) as fp:
fp.write(text)
def _re(reg, text):
reg = re.findall(re.compile(reg), text)
reg = reg[0].split('.') if reg else []
return reg
def escape(v):
if not isinstance(v, basestring):
return v
return v.replace(
'\\', '\\\\').replace(
'\b', '\\b').replace(
'\t', '\\t').replace(
'\f', '\\f').replace(
'\r', '\\r').replace(
'\"', '\\"').replace(
'\/', '\\/').replace(
'\n', '\\n')
def escape_u(v):
if not isinstance(v, basestring):
return v
# v = escape(v)
v = v.encode('unicode-escape').decode()
if PY2:
return v.replace('\\x', '\\u00')
return v
def unescape_u(v):
if not isinstance(v, basestring):
return v
v = unescape(v)
return v.encode().decode('unicode-escape')
def _utf_8(v):
if PY2:
return v.decode('utf-8')
return v
def un_utf_8(v):
if PY2:
return v.encode('utf-8')
return v
def str_to_datetime(dtstr, strftime='%Y-%m-%dT%H:%M:%SZ'):
if not isinstance(dtstr, basestring):
raise TypeError("It's not a string.")
return datetime.datetime.strptime(dtstr, strftime)
def datetime_to_str(dttime, strftime='%Y-%m-%dT%H:%M:%SZ'):
if not isinstance(dttime, datetime.datetime):
raise TypeError("It's not a datetime.")
return dttime.strftime(strftime)
PY2 = sys.version_info[0] == 2
PY3 = sys.version_info[0] == 3
PY35 = sys.version_info[:2] == (3, 5)
PY36 = sys.version_info[:2] == (3, 6)
if PY3:
basestring = str,
integer_types = int,
unicode = str
unichr = chr
_range = range
else:
integer_types = (int, long)
_range = xrange
def ordict():
return {} if PY36 else OrderedDict()
if __name__ == '__main__':
pass
| 22.714715
| 77
| 0.504098
| 1,003
| 7,564
| 3.633101
| 0.175474
| 0.032656
| 0.037047
| 0.027991
| 0.283205
| 0.227497
| 0.19786
| 0.127058
| 0.087267
| 0.087267
| 0
| 0.015513
| 0.335272
| 7,564
| 333
| 78
| 22.714715
| 0.709228
| 0.020888
| 0
| 0.229437
| 0
| 0.004329
| 0.079422
| 0.008259
| 0.008658
| 0
| 0
| 0
| 0
| 1
| 0.121212
| false
| 0.021645
| 0.012987
| 0.030303
| 0.30303
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
42c37f3f064078bde91e95903b77950bc9bd114f
| 414
|
py
|
Python
|
ABC190/D.py
|
shimomura314/AtcoderCodes
|
db1d62a7715f5f1b3c40eceff8d34f0f34839f41
|
[
"MIT"
] | null | null | null |
ABC190/D.py
|
shimomura314/AtcoderCodes
|
db1d62a7715f5f1b3c40eceff8d34f0f34839f41
|
[
"MIT"
] | null | null | null |
ABC190/D.py
|
shimomura314/AtcoderCodes
|
db1d62a7715f5f1b3c40eceff8d34f0f34839f41
|
[
"MIT"
] | null | null | null |
def divisor(n: int):
divisors = []
for integer in range(1, int(n**0.5)+1):
if not n % integer:
divisors.append(integer)
divisors.append(n//integer)
divisors.sort()
return divisors
n = int(input())
divisors = divisor(2*n)
answer = 0
for integer in divisors:
pair = 2*n // integer
a2 = pair + 1 - integer
if a2 % 2 == 0:
answer += 1
print(answer)
| 21.789474
| 43
| 0.562802
| 59
| 414
| 3.949153
| 0.389831
| 0.103004
| 0.103004
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.044983
| 0.301932
| 414
| 19
| 44
| 21.789474
| 0.761246
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.058824
| false
| 0
| 0
| 0
| 0.117647
| 0.058824
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
42c55d5c799cf1af35cb63cb32b363a33a23a6ae
| 862
|
py
|
Python
|
TV/models/episode.py
|
viswas163/Parse-bot
|
881df2767cc5bdf88ff5dcc451a97c2ed96fc073
|
[
"MIT"
] | null | null | null |
TV/models/episode.py
|
viswas163/Parse-bot
|
881df2767cc5bdf88ff5dcc451a97c2ed96fc073
|
[
"MIT"
] | null | null | null |
TV/models/episode.py
|
viswas163/Parse-bot
|
881df2767cc5bdf88ff5dcc451a97c2ed96fc073
|
[
"MIT"
] | null | null | null |
from mongoengine import Document, IntField, StringField, FloatField, connect
from pymongo import UpdateOne
class Episode(Document):
title = StringField(required=True)
show = StringField(required=True)
rating = FloatField(required=True)
votes = IntField(required=True)
def bulk_upsert(episodes):
bulk_operations = []
for entity in episodes:
try:
entity.validate()
filter = {
'title': entity.title,
'show': entity.show
}
bulk_operations.append(
UpdateOne(filter, {'$set': entity.to_mongo().to_dict()}, upsert=True)
)
except ValidationError:
pass
if bulk_operations:
with connect("tvdb") as c:
collection = Episode._get_collection().bulk_write(bulk_operations, ordered=False)
| 29.724138
| 93
| 0.611369
| 85
| 862
| 6.082353
| 0.552941
| 0.092843
| 0.088975
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.293503
| 862
| 29
| 93
| 29.724138
| 0.848933
| 0
| 0
| 0
| 0
| 0
| 0.019699
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.041667
| false
| 0.041667
| 0.083333
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
42caa75d97d78a5da176444b0c283b314888e8e5
| 4,161
|
py
|
Python
|
BasicReport.py
|
nikneural/Report
|
414b08c157ef14345372fd5b84f134eb7c911fe4
|
[
"MIT"
] | null | null | null |
BasicReport.py
|
nikneural/Report
|
414b08c157ef14345372fd5b84f134eb7c911fe4
|
[
"MIT"
] | null | null | null |
BasicReport.py
|
nikneural/Report
|
414b08c157ef14345372fd5b84f134eb7c911fe4
|
[
"MIT"
] | null | null | null |
import subprocess
import docx.table
import pandas as pd
from docx import Document
from docx.enum.text import WD_PARAGRAPH_ALIGNMENT
from docx.oxml import OxmlElement
from docx.oxml import ns
from docx.oxml.ns import qn
from docx.shared import Inches, Pt
from docx.table import _Cell
from docx2pdf import convert
class BasicReport:
def __init__(self):
pass
@staticmethod
def column_text_change(data: pd.DataFrame,
table: docx.table.Table,
size: int,
bold: bool = False):
for i in range(len(data.columns)):
run = table.cell(0, i).paragraphs[0].runs[0]
run.font.size = Pt(size)
run.font.bold = bold
@staticmethod
def cell_text_change(data: pd.DataFrame,
table: docx.table.Table,
size: int,
bold: bool = False):
for i in range(data.shape[0]):
for j in range(data.shape[1]):
run = table.cell(i + 1, j).paragraphs[0].runs[0]
run.font.size = Pt(size)
run.font.bold = bold
@staticmethod
def delete_columns(table, columns):
# sort columns descending
columns.sort(reverse=True)
grid = table._tbl.find("w:tblGrid", table._tbl.nsmap)
for ci in columns:
for cell in table.column_cells(ci):
cell._tc.getparent().remove(cell._tc)
# Delete column reference.
col_elem = grid[ci]
grid.remove(col_elem)
@staticmethod
def generate_pdf_windows(doc_path: str, out_path: str):
"""Generate pdf file for windows system"""
convert(doc_path, out_path)
@staticmethod
def generate_pdf_Linux(doc_path, out_path):
"""Generate pdf file for windows system"""
subprocess.call(['soffice',
# '--headless',
'--convert-to',
'pdf',
'--outdir',
out_path,
doc_path])
return doc_path
@staticmethod
def set_row_height(row, height):
trPr = row.tr.get_or_add_trPr()
trHeight = OxmlElement('w:trHeight')
trHeight.set(qn('w:val'), str(height))
trPr.append(trHeight)
@staticmethod
def set_vertical_cell_direction(cell: _Cell, direction: str):
# direction: tbRl -- top to bottom, btLr -- bottom to top
assert direction in ("tbRl", "btLr")
tc = cell._tc
tcPr = tc.get_or_add_tcPr()
textDirection = OxmlElement('w:textDirection')
textDirection.set(qn('w:val'), direction) # btLr tbRl
tcPr.append(textDirection)
@staticmethod
def create_element(name):
return OxmlElement(name)
@staticmethod
def create_attribute(element, name, value):
element.set(ns.qn(name), value)
def create_document(self, header):
document = Document()
section = document.sections[-1]
section.left_martin = Inches(0.1)
paragraph_format = document.styles['Normal'].paragraph_format
paragraph_format.space_before = 0
paragraph_format.space_after = 0
document.add_paragraph().add_run(header).bold = True
document.add_paragraph(" ")
section.footer.paragraphs[0].text = header
section.footer.add_paragraph()
self.add_page_number(section.footer.paragraphs[1].add_run())
section.footer.paragraphs[1].alignment = WD_PARAGRAPH_ALIGNMENT.RIGHT
return document
def add_page_number(self, run):
fldChar1 = self.create_element('w:fldChar')
self.create_attribute(fldChar1, 'w:fldCharType', 'begin')
instrText = self.create_element('w:instrText')
self.create_attribute(instrText, 'xml:space', 'preserve')
instrText.text = "PAGE"
fldChar2 = self.create_element('w:fldChar')
self.create_attribute(fldChar2, 'w:fldCharType', 'end')
run._r.append(fldChar1)
run._r.append(instrText)
run._r.append(fldChar2)
| 32.76378
| 77
| 0.59553
| 485
| 4,161
| 4.954639
| 0.286598
| 0.05618
| 0.014981
| 0.022472
| 0.177278
| 0.177278
| 0.151477
| 0.151477
| 0.114856
| 0.114856
| 0
| 0.007901
| 0.300409
| 4,161
| 126
| 78
| 33.02381
| 0.817588
| 0.048786
| 0
| 0.191919
| 0
| 0
| 0.043875
| 0
| 0
| 0
| 0
| 0
| 0.010101
| 1
| 0.121212
| false
| 0.010101
| 0.111111
| 0.010101
| 0.272727
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
42cd0e4c33a465776d2f55cc4beb83f4edfc71a6
| 4,568
|
py
|
Python
|
main.py
|
meaug/indoor_air_quality_dht22_sgp30
|
a746a9955903de1f7ce5e5d84493f860c1fd0b16
|
[
"MIT"
] | null | null | null |
main.py
|
meaug/indoor_air_quality_dht22_sgp30
|
a746a9955903de1f7ce5e5d84493f860c1fd0b16
|
[
"MIT"
] | null | null | null |
main.py
|
meaug/indoor_air_quality_dht22_sgp30
|
a746a9955903de1f7ce5e5d84493f860c1fd0b16
|
[
"MIT"
] | null | null | null |
from network import WLAN
import urequests as requests # from ubidots tutorial https://help.ubidots.com/en/articles/961994-connect-any-pycom-board-to-ubidots-using-wi-fi-over-http
from machine import I2C
import adafruit_sgp30 # from https://github.com/alexmrqt/micropython-sgp30
from machine import Pin
from dht import DHT # from https://github.com/JurassicPork/DHT_PyCom
import machine
import time
#Ubidots TOKEN
TOKEN = "INSERT UBIDOTS TOKEN HERE"
#wifi setup
wlan = WLAN(mode=WLAN.STA)
wlan.antenna(WLAN.INT_ANT)
# Wi-Fi credentials
wlan.connect("INSERT WIFI SSI", auth=(WLAN.WPA2, "INSERT WIFI PASSWORD"), timeout=5000)
while not wlan.isconnected ():
machine.idle()
print("Connected to Wifi\n")
# Initialize I2C bus
i2c = I2C(0, I2C.MASTER)
i2c.init(I2C.MASTER, baudrate=100000)
# Create library object on our I2C port
sgp30 = adafruit_sgp30.Adafruit_SGP30(i2c)
print("SGP30 serial #", [hex(i) for i in sgp30.serial])
# Initialize SGP-30 internal drift compensation algorithm.
sgp30.iaq_init()
# Wait 15 seconds for the SGP30 to properly initialize
print("Waiting 15 seconds for SGP30 initialization.")
time.sleep(15)
# Retrieve previously stored baselines, if any (helps the compensation algorithm).
has_baseline = False
try:
f_co2 = open('co2eq_baseline.txt', 'r')
f_tvoc = open('tvoc_baseline.txt', 'r')
co2_baseline = int(f_co2.read())
tvoc_baseline = int(f_tvoc.read())
#Use them to calibrate the sensor
sgp30.set_iaq_baseline(co2_baseline, tvoc_baseline)
f_co2.close()
f_tvoc.close()
has_baseline = True
except:
print('No SGP30 baselines found')
#Store the time at which last baseline has been saved
baseline_time = time.time()
#Initialize dht22
th = DHT(Pin('P23', mode=Pin.OPEN_DRAIN), 1) #1 because dht22, change to 0 if using a DHT11
print("Waiting 2 seconds for DHT22 initialization.")
time.sleep(2)
# Builds the json to send the post request to ubidots
def build_json(variable1, value1, variable2, value2, variable3, value3, variable4, value4):
try:
#lat = 6.217
#lng = -75.567
data = {variable1: {"value": value1},
variable2: {"value": value2},
variable3: {"value": value3},
variable4: {"value": value4}}
return data
except:
return None
# Sends the post request to ubidots using the REST API
def post_var(device, value1, value2, value3, value4):
try:
url = "https://industrial.api.ubidots.com/"
url = url + "api/v1.6/devices/" + device
headers = {"X-Auth-Token": TOKEN, "Content-Type": "application/json"}
data = build_json("temperature", value1, "humidity", value2, "CO2", value3, "TVOC", value4)
if data is not None:
print(data)
req = requests.post(url=url, headers=headers, json=data)
return req.json()
else:
pass
except:
pass
while True:
#gets the temperature and humidity measurements from dht22
result = th.read()
while not result.is_valid():
time.sleep(.5)
result = th.read()
print('Temp.:', result.temperature)
print('RH:', result.humidity)
#sends the humidity and temperature from DHT22 to SGP30 for a more accurate output
sgp30.set_iaq_rel_humidity(result.humidity, result.temperature)
#gets the co2 and tvoc measurements
co2_eq, tvoc = sgp30.iaq_measure()
print('co2eq = ' + str(co2_eq) + ' ppm \t tvoc = ' + str(tvoc) + ' ppb')
#sends the data to Ubidots
temperature = result.temperature
humidity = result.humidity
post_var("pycom", temperature, humidity, co2_eq, tvoc)
#sends the data to pybytes
pybytes.send_signal(1,result.temperature)
pybytes.send_signal(2,result.humidity)
pybytes.send_signal(3,co2_eq)
pybytes.send_signal(4,tvoc)
#writes baselines after 12 hours (first time) or 1 hour
if (has_baseline and (time.time() - baseline_time >= 3600)) \
or ((not has_baseline) and (time.time() - baseline_time >= 43200)):
print('Saving baseline')
baseline_time = time.time()
try:
f_co2 = open('co2eq_baseline.txt', 'w')
f_tvoc = open('tvoc_baseline.txt', 'w')
bl_co2, bl_tvoc = sgp30.get_iaq_baseline()
f_co2.write(str(bl_co2))
f_tvoc.write(str(bl_tvoc))
f_co2.close()
f_tvoc.close()
has_baseline = True
except:
print('Impossible to write SGP30 baselines!')
# Measures every 5 minutes (300 seconds)
time.sleep(300)
| 31.722222
| 153
| 0.668345
| 630
| 4,568
| 4.749206
| 0.353968
| 0.008021
| 0.022727
| 0.012032
| 0.102273
| 0.086898
| 0.070856
| 0.03008
| 0.03008
| 0.03008
| 0
| 0.046512
| 0.218695
| 4,568
| 143
| 154
| 31.944056
| 0.791818
| 0.240587
| 0
| 0.212766
| 0
| 0
| 0.14846
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.021277
| false
| 0.031915
| 0.085106
| 0
| 0.138298
| 0.117021
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
42cd2ea8378c0d8edecc4b1ef21bb286fd030c27
| 5,278
|
py
|
Python
|
drivers/get_imu_data.py
|
ndkjing/usv
|
132e021432a0344a22914aaf68da7d7955d7331f
|
[
"MIT"
] | null | null | null |
drivers/get_imu_data.py
|
ndkjing/usv
|
132e021432a0344a22914aaf68da7d7955d7331f
|
[
"MIT"
] | null | null | null |
drivers/get_imu_data.py
|
ndkjing/usv
|
132e021432a0344a22914aaf68da7d7955d7331f
|
[
"MIT"
] | 1
|
2021-09-04T10:27:30.000Z
|
2021-09-04T10:27:30.000Z
|
# coding:UTF-8
import queue
import serial
import time
import threading
ACCData = [0.0] * 8
GYROData = [0.0] * 8
AngleData = [0.0] * 8
FrameState = 0 # 通过0x后面的值判断属于哪一种情况
Bytenum = 0 # 读取到这一段的第几位
CheckSum = 0 # 求和校验位
a = [0.0] * 3
w = [0.0] * 3
Angle = [0.0] * 3
count=0
start_time = time.time()
interval=0.01
def DueData(inputdata,q): # 新增的核心程序,对读取的数据进行划分,各自读到对应的数组里
global FrameState # 在局部修改全局变量,要进行global的定义
global Bytenum
global CheckSum
global a
global w
global Angle
global count
global start_time
for data in inputdata: # 在输入的数据进行遍历
# data = ord(data)
if FrameState == 0: # 当未确定状态的时候,进入以下判断
if data == 0x55 and Bytenum == 0: # 0x55位于第一位时候,开始读取数据,增大bytenum
CheckSum = data
Bytenum = 1
continue
elif data == 0x51 and Bytenum == 1: # 在byte不为0 且 识别到 0x51 的时候,改变frame
CheckSum += data
FrameState = 1
Bytenum = 2
elif data == 0x52 and Bytenum == 1: # 同理
CheckSum += data
FrameState = 2
Bytenum = 2
elif data == 0x53 and Bytenum == 1:
CheckSum += data
FrameState = 3
Bytenum = 2
elif FrameState == 1: # acc #已确定数据代表加速度
if Bytenum < 10: # 读取8个数据
ACCData[Bytenum - 2] = data # 从0开始
CheckSum += data
Bytenum += 1
else:
if data == (CheckSum & 0xff): # 假如校验位正确
a = get_acc(ACCData)
CheckSum = 0 # 各数据归零,进行新的循环判断
Bytenum = 0
FrameState = 0
elif FrameState == 2: # gyro
if Bytenum < 10:
GYROData[Bytenum - 2] = data
CheckSum += data
Bytenum += 1
else:
if data == (CheckSum & 0xff):
w = get_gyro(GYROData)
CheckSum = 0
Bytenum = 0
FrameState = 0
elif FrameState == 3: # angle
if Bytenum < 10:
AngleData[Bytenum - 2] = data
CheckSum += data
Bytenum += 1
else:
if data == (CheckSum & 0xff):
Angle = get_angle(AngleData)
d = a + w + Angle
# 元组类型
# print("a(g):%10.3f %10.3f %10.3f w(deg/s):%10.3f %10.3f %10.3f Angle(deg):%10.3f %10.3f %10.3f" % d)
q.put(d)
count+=1
if count%1000==0:
print('count 1 cost time',(time.time()-start_time)/count)
if count>100000000:
count=0
# return d
CheckSum = 0
Bytenum = 0
FrameState = 0
def get_acc(datahex):
axl = datahex[0]
axh = datahex[1]
ayl = datahex[2]
ayh = datahex[3]
azl = datahex[4]
azh = datahex[5]
k_acc = 16.0
acc_x = (axh << 8 | axl) / 32768.0 * k_acc
acc_y = (ayh << 8 | ayl) / 32768.0 * k_acc
acc_z = (azh << 8 | azl) / 32768.0 * k_acc
if acc_x >= k_acc:
acc_x -= 2 * k_acc
if acc_y >= k_acc:
acc_y -= 2 * k_acc
if acc_z >= k_acc:
acc_z -= 2 * k_acc
return acc_x, acc_y, acc_z
def get_gyro(datahex):
wxl = datahex[0]
wxh = datahex[1]
wyl = datahex[2]
wyh = datahex[3]
wzl = datahex[4]
wzh = datahex[5]
k_gyro = 2000.0
gyro_x = (wxh << 8 | wxl) / 32768.0 * k_gyro
gyro_y = (wyh << 8 | wyl) / 32768.0 * k_gyro
gyro_z = (wzh << 8 | wzl) / 32768.0 * k_gyro
if gyro_x >= k_gyro:
gyro_x -= 2 * k_gyro
if gyro_y >= k_gyro:
gyro_y -= 2 * k_gyro
if gyro_z >= k_gyro:
gyro_z -= 2 * k_gyro
return gyro_x, gyro_y, gyro_z
def get_angle(datahex):
rxl = datahex[0]
rxh = datahex[1]
ryl = datahex[2]
ryh = datahex[3]
rzl = datahex[4]
rzh = datahex[5]
k_angle = 180.0
angle_x = (rxh << 8 | rxl) / 32768.0 * k_angle
angle_y = (ryh << 8 | ryl) / 32768.0 * k_angle
angle_z = (rzh << 8 | rzl) / 32768.0 * k_angle
if angle_x >= k_angle:
angle_x -= 2 * k_angle
if angle_y >= k_angle:
angle_y -= 2 * k_angle
if angle_z >= k_angle:
angle_z -= 2 * k_angle
return angle_x, angle_y, angle_z
class GetImuData:
def __init__(self, port, baud, timeout=0.5):
self.q = queue.Queue()
try:
self.serial_obj = serial.Serial(port, baud, timeout=timeout)
except Exception as e:
print(e)
exit(-1)
print('串口打开',self.serial_obj.is_open)
def get_data(self):
while True:
datahex = self.serial_obj.read(33)
DueData(datahex,self.q)
def imu_integration(q):
"""
imu积分计算
:param d: 当前检测到加速度与角速度
:return:
"""
if __name__ == '__main__':
obj = GetImuData(port='com4',baud=115200)
# 打印数据
t1 = threading.Thread(target=obj.get_data)
t2 = threading.Thread(target=obj.imu_integration)
t1.start()
t2.start()
t1.join()
t2.join()
| 26.656566
| 122
| 0.48939
| 669
| 5,278
| 3.718984
| 0.22571
| 0.016077
| 0.025322
| 0.019293
| 0.18127
| 0.116158
| 0.060289
| 0.060289
| 0.060289
| 0.043408
| 0
| 0.079708
| 0.403372
| 5,278
| 198
| 123
| 26.656566
| 0.710384
| 0.082228
| 0
| 0.220126
| 0
| 0
| 0.006901
| 0
| 0
| 0
| 0.005855
| 0
| 0
| 1
| 0.044025
| false
| 0
| 0.025157
| 0
| 0.09434
| 0.018868
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
42cdb0ad159342fdea9a675f50b583e29f8c7d2a
| 3,082
|
py
|
Python
|
test/test_utils.py
|
dilettacal/nmt_seq2seq_evo
|
1de7647fb50445d17aa0eab8f300fdcbe6b8145e
|
[
"MIT"
] | null | null | null |
test/test_utils.py
|
dilettacal/nmt_seq2seq_evo
|
1de7647fb50445d17aa0eab8f300fdcbe6b8145e
|
[
"MIT"
] | null | null | null |
test/test_utils.py
|
dilettacal/nmt_seq2seq_evo
|
1de7647fb50445d17aa0eab8f300fdcbe6b8145e
|
[
"MIT"
] | null | null | null |
import os
import unittest
from torchtext.data import Field, Iterator
from project.utils.utils_metrics import AverageMeter
from project.utils.utils_logging import Logger
from project.utils.datasets import Seq2SeqDataset
data_dir = os.path.join(".", "test", "test_data")
class TestIOUtils(unittest.TestCase):
def test_load_data(self):
src_vocab = Field(pad_token="<p>", unk_token="<u>", lower=True)
trg_vocab = Field(init_token="<s>", eos_token="</s>",pad_token="<p>", unk_token="<u>", lower=True )
exts = (".de", ".en")
samples = Seq2SeqDataset.splits(root="", path=data_dir, exts=exts,
train="samples", fields=(src_vocab, trg_vocab), validation="",test="")
self.assertIsInstance(samples, tuple)
samples = samples[0]
self.assertIsInstance(samples, Seq2SeqDataset)
self.assertIsNotNone(samples.examples)
self.assertAlmostEqual(len(samples.examples), 15)
self.assertEqual(list(samples.fields.keys()), ["src", "trg"])
src_vocab.build_vocab(samples)
trg_vocab.build_vocab(samples)
self.assertIsNotNone(src_vocab.vocab.stoi)
self.assertIsNotNone(trg_vocab.vocab.stoi)
def test_logger(self):
path = os.path.join(data_dir, "log.log")
if os.path.exists(path):
os.remove(path)
logger = Logger(path=data_dir)
self.assertIsNotNone(logger)
logger.log("test_logging", stdout=False)
logger.log("test_second_logging", stdout=False)
with open(path, mode="r") as f:
content = f.read().strip().split("\n")
self.assertEqual(content[0], "test_logging")
self.assertEqual(content[1], "test_second_logging")
def test_save_model(self):
path = os.path.join(data_dir, "log.log")
if os.path.exists(path):
os.remove(path)
logger = Logger(path=data_dir)
self.assertIsNotNone(logger)
model = dict({"model": [1,2,3,4,2]})
logger.save_model(model)
files = os.listdir(data_dir)
self.assertIn("model.pkl", files)
os.remove(os.path.join(data_dir, "model.pkl"))
def test_plot_metrics(self):
path = os.path.join(data_dir, "log.log")
if os.path.exists(path):
os.remove(path)
logger = Logger(path=data_dir)
self.assertIsNotNone(logger)
metric = [1,2,5,1,6,1]
logger.plot(metric, "", "", "metric")
files = os.listdir(data_dir)
self.assertIn("metric.png", files)
os.remove(os.path.join(data_dir, "metric.png"))
def test_metric(self):
metric = AverageMeter()
for i in range(10):
metric.update(i)
self.assertEqual(metric.count, 10)
self.assertEqual(metric.val, 9)
self.assertEqual(metric.avg, 4.5)
self.assertEqual(metric.sum, 45)
metric.reset()
self.assertEqual(metric.count, 0)
self.assertEqual(metric.val, 0)
self.assertEqual(metric.avg, 0)
self.assertEqual(metric.sum, 0)
| 36.690476
| 110
| 0.621999
| 390
| 3,082
| 4.797436
| 0.276923
| 0.044896
| 0.089792
| 0.037413
| 0.270978
| 0.270978
| 0.270978
| 0.235703
| 0.174773
| 0.174773
| 0
| 0.013605
| 0.236859
| 3,082
| 84
| 111
| 36.690476
| 0.781888
| 0
| 0
| 0.239437
| 0
| 0
| 0.060655
| 0
| 0
| 0
| 0
| 0
| 0.309859
| 1
| 0.070423
| false
| 0
| 0.084507
| 0
| 0.169014
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
42d1f1c104a654530b6968dd6b6bff5cdf01c509
| 2,156
|
py
|
Python
|
networks/cifar_net.py
|
DQle38/Fair-Feature-Distillation-for-Visual-Recognition
|
f0f98728f36528218bf19dce9a26d6ee1ba96e58
|
[
"MIT"
] | 5
|
2021-09-07T13:33:45.000Z
|
2022-02-12T18:56:45.000Z
|
networks/cifar_net.py
|
DQle38/Fair-Feature-Distillation-for-Visual-Recognition
|
f0f98728f36528218bf19dce9a26d6ee1ba96e58
|
[
"MIT"
] | null | null | null |
networks/cifar_net.py
|
DQle38/Fair-Feature-Distillation-for-Visual-Recognition
|
f0f98728f36528218bf19dce9a26d6ee1ba96e58
|
[
"MIT"
] | 4
|
2021-09-25T06:56:38.000Z
|
2022-03-24T18:06:08.000Z
|
import torch
import torch.nn as nn
import numpy as np
class Net(nn.Module):
def __init__(self, num_classes=10):
super().__init__()
self.conv1 = nn.Conv2d(3, 32, kernel_size=3, padding=1)
s = compute_conv_output_size(32, 3, padding=1) # 32
self.conv2 = nn.Conv2d(32, 32, kernel_size=3, padding=1)
s = compute_conv_output_size(s, 3, padding=1) # 32
s = s // 2 # 16
self.conv3 = nn.Conv2d(32, 64, kernel_size=3, padding=1)
s = compute_conv_output_size(s, 3, padding=1) # 16
self.conv4 = nn.Conv2d(64, 64, kernel_size=3, padding=1)
s = compute_conv_output_size(s, 3, padding=1) # 16
s = s // 2 # 8
self.conv5 = nn.Conv2d(64, 128, kernel_size=3, padding=1)
s = compute_conv_output_size(s, 3, padding=1) # 8
self.conv6 = nn.Conv2d(128, 128, kernel_size=3, padding=1)
s = compute_conv_output_size(s, 3, padding=1) # 8
s = s // 2 # 4
self.fc1 = nn.Linear(s * s * 128, 256) # 2048
self.drop1 = nn.Dropout(0.25)
self.drop2 = nn.Dropout(0.5)
self.MaxPool = torch.nn.MaxPool2d(2)
self.last = torch.nn.Linear(256, num_classes)
self.relu = torch.nn.ReLU()
def forward(self, x, get_inter=False, before_fc=False):
act1 = self.relu(self.conv1(x))
act2 = self.relu(self.conv2(act1))
h = self.drop1(self.MaxPool(act2))
act3 = self.relu(self.conv3(h))
act4 = self.relu(self.conv4(act3))
h = self.drop1(self.MaxPool(act4))
act5 = self.relu(self.conv5(h))
act6 = self.relu(self.conv6(act5))
h = self.drop1(self.MaxPool(act6))
h = h.view(x.shape[0], -1)
act7 = self.relu(self.fc1(h))
# h = self.drop2(act7)
y=self.last(act7)
if get_inter:
if before_fc:
return act6, y
else:
return act7, y
else:
return y
def compute_conv_output_size(l_in, kernel_size, stride=1, padding=0, dilation=1):
return int(np.floor((l_in + 2 * padding - dilation * (kernel_size - 1) - 1) / float(stride) + 1))
| 35.933333
| 101
| 0.574212
| 333
| 2,156
| 3.582583
| 0.243243
| 0.080469
| 0.090528
| 0.123219
| 0.317687
| 0.264878
| 0.264878
| 0.264878
| 0.264878
| 0.264878
| 0
| 0.092978
| 0.286642
| 2,156
| 59
| 102
| 36.542373
| 0.682705
| 0.022263
| 0
| 0.204082
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.061224
| false
| 0
| 0.061224
| 0.020408
| 0.22449
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
42d4aca626e7056c3cd312d444ec2606808efc07
| 1,207
|
py
|
Python
|
solutions/python3/problem654.py
|
tjyiiuan/LeetCode
|
abd10944c6a1f7a7f36bd9b6218c511cf6c0f53e
|
[
"MIT"
] | null | null | null |
solutions/python3/problem654.py
|
tjyiiuan/LeetCode
|
abd10944c6a1f7a7f36bd9b6218c511cf6c0f53e
|
[
"MIT"
] | null | null | null |
solutions/python3/problem654.py
|
tjyiiuan/LeetCode
|
abd10944c6a1f7a7f36bd9b6218c511cf6c0f53e
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
654. Maximum Binary Tree
Given an integer array with no duplicates. A maximum tree building on this array is defined as follow:
The root is the maximum number in the array.
The left subtree is the maximum tree constructed from left part subarray divided by the maximum number.
The right subtree is the maximum tree constructed from right part subarray divided by the maximum number.
Construct the maximum tree by the given array and output the root node of this tree.
"""
# Definition for a binary tree node.
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
class Solution:
def constructMaximumBinaryTree(self, nums) -> TreeNode:
if not nums:
return None
max_val = nums[0]
max_ind = 0
for ind, val in enumerate(nums):
if val > max_val:
max_ind = ind
max_val = val
l_node = self.constructMaximumBinaryTree(nums[:max_ind])
r_node = self.constructMaximumBinaryTree(nums[max_ind + 1:])
root = TreeNode(val=max_val, left=l_node, right=r_node)
return root
| 30.948718
| 105
| 0.666114
| 173
| 1,207
| 4.554913
| 0.358382
| 0.076142
| 0.045685
| 0.048223
| 0.30203
| 0.30203
| 0.190355
| 0
| 0
| 0
| 0
| 0.009019
| 0.26512
| 1,207
| 38
| 106
| 31.763158
| 0.879369
| 0.43662
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.105263
| false
| 0
| 0
| 0
| 0.315789
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
42d54535865b205f51d1935bf40792c7ce95c829
| 5,189
|
py
|
Python
|
pparser.py
|
deadsurgeon42/StarryPy3k
|
9291e5a7ca97004675a4868165ce5690c111c492
|
[
"WTFPL"
] | 44
|
2015-11-18T07:45:11.000Z
|
2022-03-30T06:32:18.000Z
|
pparser.py
|
deadsurgeon42/StarryPy3k
|
9291e5a7ca97004675a4868165ce5690c111c492
|
[
"WTFPL"
] | 110
|
2016-08-01T06:45:13.000Z
|
2021-11-30T18:45:36.000Z
|
pparser.py
|
deadsurgeon42/StarryPy3k
|
9291e5a7ca97004675a4868165ce5690c111c492
|
[
"WTFPL"
] | 32
|
2015-01-31T09:54:38.000Z
|
2022-03-31T06:12:21.000Z
|
import asyncio
import traceback
from configuration_manager import ConfigurationManager
from data_parser import *
parse_map = {
0: ProtocolRequest,
1: ProtocolResponse,
2: ServerDisconnect,
3: ConnectSuccess,
4: ConnectFailure,
5: HandshakeChallenge,
6: ChatReceived,
7: None,
8: None,
9: PlayerWarpResult,
10: None,
11: None,
12: None,
13: ClientConnect,
14: ClientDisconnectRequest,
15: None,
16: PlayerWarp,
17: FlyShip,
18: ChatSent,
19: None,
20: ClientContextUpdate,
21: WorldStart,
22: WorldStop,
23: None,
24: None,
25: None,
26: None,
27: None,
28: None,
29: None,
30: None,
31: GiveItem,
32: None,
33: None,
34: None,
35: None,
36: None,
37: None,
38: None,
39: ModifyTileList,
40: None,
41: None,
42: None,
43: SpawnEntity,
44: None,
45: None,
46: None,
47: None,
48: None,
49: None,
50: EntityCreate,
51: None,
52: None,
53: EntityInteract,
54: EntityInteractResult,
55: None,
56: DamageRequest,
57: DamageNotification,
58: EntityMessage,
59: EntityMessageResponse,
60: DictVariant,
61: StepUpdate,
62: None,
63: None,
64: None,
65: None,
66: None,
67: None,
68: None
}
class PacketParser:
"""
Object for handling the parsing and caching of packets.
"""
def __init__(self, config: ConfigurationManager):
self._cache = {}
self.config = config
self.loop = asyncio.get_event_loop()
self._reaper = self.loop.create_task(self._reap())
@asyncio.coroutine
def parse(self, packet):
"""
Given a packet preped packet from the stream, parse it down to its
parts. First check if the packet is one we've seen before; if it is,
pull its parsed form from the cache, and run with that. Otherwise,
pass it to the appropriate parser for parsing.
:param packet: Packet with header information parsed.
:return: Fully parsed packet.
"""
try:
if packet["size"] >= self.config.config["min_cache_size"]:
packet["hash"] = hash(packet["original_data"])
if packet["hash"] in self._cache:
self._cache[packet["hash"]].count += 1
packet["parsed"] = self._cache[packet["hash"]].packet[
"parsed"]
else:
packet = yield from self._parse_and_cache_packet(packet)
else:
packet = yield from self._parse_packet(packet)
except Exception as e:
print("Error during parsing.")
print(traceback.print_exc())
finally:
return packet
@asyncio.coroutine
def _reap(self):
"""
Prune packets from the cache that are not being used, and that are
older than the "packet_reap_time".
:return: None.
"""
while True:
yield from asyncio.sleep(self.config.config["packet_reap_time"])
for h, cached_packet in self._cache.copy().items():
cached_packet.count -= 1
if cached_packet.count <= 0:
del (self._cache[h])
@asyncio.coroutine
def _parse_and_cache_packet(self, packet):
"""
Take a new packet and pass it to the parser. Once we get it back,
make a copy of it to the cache.
:param packet: Packet with header information parsed.
:return: Fully parsed packet.
"""
packet = yield from self._parse_packet(packet)
self._cache[packet["hash"]] = CachedPacket(packet=packet)
return packet
@asyncio.coroutine
def _parse_packet(self, packet):
"""
Parse the packet by giving it to the appropriate parser.
:param packet: Packet with header information parsed.
:return: Fully parsed packet.
"""
res = parse_map[packet["type"]]
if res is None:
packet["parsed"] = {}
else:
#packet["parsed"] = yield from self.loop.run_in_executor(
# self.loop.executor, res.parse, packet["data"])
# Removed due to issues with testers. Need to evaluate what's going
# on.
packet["parsed"] = res.parse(packet["data"])
return packet
# def __del__(self):
# self._reaper.cancel()
class CachedPacket:
"""
Prototype for cached packets. Keep track of how often it is used,
as well as the full packet's contents.
"""
def __init__(self, packet):
self.count = 1
self.packet = packet
def build_packet(packet_id, data, compressed=False):
"""
Convenience method for building a packet.
:param packet_id: ID value of packet.
:param data: Contents of packet.
:param compressed: Whether or not to compress the packet.
:return: Built packet object.
"""
return BasePacket.build({"id": packet_id,
"data": data,
"compressed": compressed})
| 27.167539
| 79
| 0.578724
| 605
| 5,189
| 4.86281
| 0.401653
| 0.040789
| 0.025833
| 0.024473
| 0.141061
| 0.103671
| 0.092794
| 0.068321
| 0.068321
| 0.068321
| 0
| 0.037607
| 0.323569
| 5,189
| 190
| 80
| 27.310526
| 0.80057
| 0.262478
| 0
| 0.09375
| 0
| 0
| 0.038117
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.054688
| false
| 0
| 0.03125
| 0
| 0.132813
| 0.015625
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
42d72c0c58e56c65e8f873c5b25c452eaaf9e7cc
| 3,032
|
py
|
Python
|
deploy/testData.py
|
yaoguxiao/insightface
|
731f9ec7503cda3a5f3433525aa57709a78b2118
|
[
"MIT"
] | null | null | null |
deploy/testData.py
|
yaoguxiao/insightface
|
731f9ec7503cda3a5f3433525aa57709a78b2118
|
[
"MIT"
] | null | null | null |
deploy/testData.py
|
yaoguxiao/insightface
|
731f9ec7503cda3a5f3433525aa57709a78b2118
|
[
"MIT"
] | null | null | null |
import sys
import os
import mxnet as mx
import argparse
sys.path.append(os.path.join(os.getcwd(), "../src/common"))
sys.path.append(os.path.join(os.getcwd(), "../src/eval"))
import verification
def argParser():
parser = argparse.ArgumentParser(description='test network')
parser.add_argument('--model', default='../../insightface/models/model-res4-8-16-4-dim512/model,0', help='path of model')
parser.add_argument('--data-dir', default='../../insightface/datasets/faces_ms1m_112x112/', help='path of test data')
parser.add_argument('--target', default='lfw', help='name of test data')
parser.add_argument('--output', default='fc1', help='output name')
parser.add_argument('--batch-size', default=50, help='batch size')
# parser.add_argument('add_argument')
args = parser.parse_args()
return args
def reaTestData():
verList = {}
for name in args.target.split(','):
print("============", name)
path = os.path.join(args.data_dir,name+".bin")
print(path)
if not os.path.exists(path):break
verList[name] = verification.load_bin(path, [112,112])
print('ver', name)
return verList
def verTest(model, nbatch):
results = []
verList = reaTestData()
print("===============, line:", sys._getframe().f_lineno)
if verList is None:
print("read test data err")
return
print("===============, line:", sys._getframe().f_lineno)
for i in verList:
print("===============, line:", sys._getframe().f_lineno)
acc1, std1, acc2, std2, xnorm, embeddings_list = verification.test(verList[i], model, args.batch_size, 10, None, None)
print('[%s][%d]XNorm: %f' % (i, nbatch, xnorm))
# print('[%s][%d]Accuracy: %1.5f+-%1.5f' % (i, nbatch, acc1, std1))
print('[%s][%d]Accuracy-Flip: %1.5f+-%1.5f' % (i, nbatch, acc2, std2))
results.append(acc2)
return results
# class faceMode:
# def __init__(self, args):
# self.arts = args
# modelid = args.model.split(',')
# print(modelid[0], modelid[1])
# sym, argParams, auxParams = mx.model.load_checkpoint(modelid[0], int(modelid[1]))#type:mx.symbol.symbol.Symbol
# sym = sym.get_internals()[args.output + '_output']
# self.model = mx.mod.Module(symbol=sym, label_names=None)
# self.model.bind(('data', (1, 3, 112,112)))
# self.model.set_params(argParams, auxParams)
# print(type(sym))
if __name__ == "__main__":
args = argParser()
# faceMode(args)
modelid = args.model.split(',')
print(modelid[0], modelid[1])
sym, argParams, auxParams = mx.model.load_checkpoint(modelid[0], int(modelid[1])) # type:mx.symbol.symbol.Symbol
sym = sym.get_internals()[args.output + '_output']
model = mx.mod.Module(symbol=sym, context=mx.gpu(0), label_names=None)
# model.bind(data_shapes=('data', (args.batch_size, 3, 112, 112)))
model.bind(data_shapes=[('data', (args.batch_size, 3, 112,112))])
model.set_params(argParams, auxParams)
verTest(model, args.batch_size)
| 41.534247
| 125
| 0.632256
| 407
| 3,032
| 4.594595
| 0.29484
| 0.041176
| 0.054545
| 0.032086
| 0.405882
| 0.374332
| 0.261497
| 0.261497
| 0.261497
| 0.225134
| 0
| 0.029786
| 0.169525
| 3,032
| 73
| 126
| 41.534247
| 0.712867
| 0.240765
| 0
| 0.058824
| 0
| 0
| 0.189934
| 0.054705
| 0
| 0
| 0
| 0
| 0
| 1
| 0.058824
| false
| 0
| 0.098039
| 0
| 0.235294
| 0.196078
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
42dc77f7900d79cb250ea17552132e0f738917bd
| 4,482
|
py
|
Python
|
test/test_plugin_spontit.py
|
NiNiyas/apprise
|
8d96e95acd7cb89f082685ae161bd0e268203f0c
|
[
"MIT"
] | 1
|
2022-01-19T01:40:04.000Z
|
2022-01-19T01:40:04.000Z
|
test/test_plugin_spontit.py
|
NiNiyas/apprise
|
8d96e95acd7cb89f082685ae161bd0e268203f0c
|
[
"MIT"
] | null | null | null |
test/test_plugin_spontit.py
|
NiNiyas/apprise
|
8d96e95acd7cb89f082685ae161bd0e268203f0c
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2021 Chris Caron <lead2gold@gmail.com>
# All rights reserved.
#
# This code is licensed under the MIT License.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files(the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and / or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions :
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import requests
from apprise import plugins
from helpers import AppriseURLTester
# Disable logging for a cleaner testing output
import logging
logging.disable(logging.CRITICAL)
# Our Testing URLs
apprise_url_tests = (
('spontit://', {
# invalid url
'instance': TypeError,
}),
# Another bad url
('spontit://:@/', {
'instance': TypeError,
}),
# No user specified
('spontit://%s' % ('a' * 100), {
'instance': TypeError,
}),
# Invalid API Key specified
('spontit://user@%%20_', {
'instance': TypeError,
}),
# Provide a valid user and API Key
('spontit://%s@%s' % ('u' * 11, 'b' * 100), {
'instance': plugins.NotifySpontit,
# Our expected url(privacy=True) startswith() response:
'privacy_url': 'spontit://{}@b...b/'.format('u' * 11),
}),
# Provide a valid user and API Key, but provide an invalid channel
('spontit://%s@%s/#!!' % ('u' * 11, 'b' * 100), {
# An instance is still created, but the channel won't be notified
'instance': plugins.NotifySpontit,
}),
# Provide a valid user, API Key and a valid channel
('spontit://%s@%s/#abcd' % ('u' * 11, 'b' * 100), {
'instance': plugins.NotifySpontit,
}),
# Provide a valid user, API Key, and a subtitle
('spontit://%s@%s/?subtitle=Test' % ('u' * 11, 'b' * 100), {
'instance': plugins.NotifySpontit,
}),
# Provide a valid user, API Key, and a lengthy subtitle
('spontit://%s@%s/?subtitle=%s' % ('u' * 11, 'b' * 100, 'c' * 300), {
'instance': plugins.NotifySpontit,
}),
# Provide a valid user and API Key, but provide a valid channel (that is
# not ours).
# Spontit uses a slash (/) to delimite the user from the channel id when
# specifying channel entries. For Apprise we need to encode this
# so we convert the slash (/) into %2F
('spontit://{}@{}/#1245%2Fabcd'.format('u' * 11, 'b' * 100), {
'instance': plugins.NotifySpontit,
}),
# Provide multipe channels
('spontit://{}@{}/#1245%2Fabcd/defg'.format('u' * 11, 'b' * 100), {
'instance': plugins.NotifySpontit,
}),
# Provide multipe channels through the use of the to= variable
('spontit://{}@{}/?to=#1245/abcd'.format('u' * 11, 'b' * 100), {
'instance': plugins.NotifySpontit,
}),
('spontit://%s@%s' % ('u' * 11, 'b' * 100), {
'instance': plugins.NotifySpontit,
# force a failure
'response': False,
'requests_response_code': requests.codes.internal_server_error,
}),
('spontit://%s@%s' % ('u' * 11, 'b' * 100), {
'instance': plugins.NotifySpontit,
# throw a bizzare code forcing us to fail to look it up
'response': False,
'requests_response_code': 999,
}),
('spontit://%s@%s' % ('u' * 11, 'b' * 100), {
'instance': plugins.NotifySpontit,
# Throws a series of connection and transfer exceptions when this flag
# is set and tests that we gracfully handle them
'test_requests_exceptions': True,
}),
)
def test_plugin_spontit_urls():
"""
NotifySpontit() Apprise URLs
"""
# Run our general tests
AppriseURLTester(tests=apprise_url_tests).run_all()
| 37.663866
| 79
| 0.629407
| 573
| 4,482
| 4.891798
| 0.366492
| 0.012843
| 0.015697
| 0.027471
| 0.281127
| 0.236889
| 0.236889
| 0.211916
| 0.197289
| 0.171602
| 0
| 0.025865
| 0.232262
| 4,482
| 118
| 80
| 37.983051
| 0.788724
| 0.499554
| 0
| 0.6
| 0
| 0
| 0.258494
| 0.109275
| 0
| 0
| 0
| 0
| 0
| 1
| 0.016667
| false
| 0
| 0.066667
| 0
| 0.083333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
42dc9cb1aa466dc4d81d1303416d9c0741104c68
| 2,751
|
py
|
Python
|
img_striper.py
|
tacensi/image_striper
|
d361c5c4b7e9b8588b50d8f992b90d14fd64d4f0
|
[
"MIT"
] | null | null | null |
img_striper.py
|
tacensi/image_striper
|
d361c5c4b7e9b8588b50d8f992b90d14fd64d4f0
|
[
"MIT"
] | null | null | null |
img_striper.py
|
tacensi/image_striper
|
d361c5c4b7e9b8588b50d8f992b90d14fd64d4f0
|
[
"MIT"
] | null | null | null |
#! /usr/bin/env python3
import argparse
import textwrap
import math
from PIL import Image
parser = argparse.ArgumentParser(
prog='img_striper.py',
formatter_class=argparse.RawDescriptionHelpFormatter,
description=textwrap.dedent('''\
Image striper
This is a simple program to make stripes out of images
and join them together again. It was inspired by this great
video: https://www.instagram.com/p/BhZU4XMgdYA/
This script follows the WTFPL, so go ahead and do whatever
the fuck you want with it.
'''),
epilog=textwrap.dedent('''\
This is just a simple exercise.
Please don't hate me for my noobiness.
'''))
parser.add_argument('--i',
'-input',
help='File to be shifted',
type=argparse.FileType('rb', 0),
required=True
)
parser.add_argument('--o',
'-output',
help='Image to be saved',
type=argparse.FileType('wb', 0),
required=True
)
args = parser.parse_args()
# open image and create new one
original_doggo = Image.open(args.i)
original_w, original_h = original_doggo.size
inter_w = int(math.floor(original_w / 2))
inter_h = original_h * 2
inter_doggo = Image.new('RGB', [inter_w, inter_h], 'white')
# calculate the number of strips
no_strips = int(math.floor(original_w / 15))
for n in range(0, no_strips):
# calculate xs from the cropped strip
x1 = n * 15
x2 = x1 + 15
# create crop box
crop_box = (x1, 0, x2, original_h)
# cropped section
section = original_doggo.crop(crop_box)
y1 = 0
# calculate xs for the placement of the paste
if n % 2:
y1 = original_h
y2 = y1 + original_h
x3 = 15 * int(math.floor(n / 2))
x4 = x3 + 15
paste_box = (x3, y1, x4, y2)
inter_doggo.paste(section, paste_box)
original_w, original_h = inter_doggo.size
new_h = int(math.floor(inter_h / 2))
new_w = inter_w * 2
new_doggo = Image.new('RGB', [new_w, new_h], 'white')
# calculate the number of strips
no_strips = int(math.floor(inter_h / 15))
for n in range(0, no_strips):
# calculate xs from the cropped strip
y1 = n * 15
y2 = y1 + 15
# create crop box
crop_box = (0, y1, inter_w, y2)
# cropped section
section = inter_doggo.crop(crop_box)
x1 = 0
# calculate xs for the placement of the paste
if n % 2:
x1 = inter_w
x2 = x1 + inter_w
y3 = 15 * int(math.floor(n / 2))
y4 = y3 + 15
paste_box = (x1, y3, x2, y4)
new_doggo.paste(section, paste_box)
new_doggo.save(args.o)
# print(original_w, original_h)
# parser.print_help()
| 25.472222
| 67
| 0.607779
| 400
| 2,751
| 4.045
| 0.34
| 0.038937
| 0.044499
| 0.033375
| 0.290482
| 0.226205
| 0.179234
| 0.179234
| 0.179234
| 0.179234
| 0
| 0.035696
| 0.287168
| 2,751
| 107
| 68
| 25.71028
| 0.789393
| 0.14104
| 0
| 0.085714
| 0
| 0
| 0.212857
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.057143
| 0
| 0.057143
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
42dcb97d77131e74ecfe71c62c27b3b22cca853a
| 7,590
|
py
|
Python
|
ds/web/views.py
|
brainmorsel/python-dhcp-sprout
|
c8da1b19558e404fdfef24304e1996c696fc13b1
|
[
"MIT"
] | null | null | null |
ds/web/views.py
|
brainmorsel/python-dhcp-sprout
|
c8da1b19558e404fdfef24304e1996c696fc13b1
|
[
"MIT"
] | 1
|
2019-05-03T07:54:57.000Z
|
2019-05-03T07:54:57.000Z
|
ds/web/views.py
|
brainmorsel/python-dhcp-sprout
|
c8da1b19558e404fdfef24304e1996c696fc13b1
|
[
"MIT"
] | null | null | null |
import datetime
from aiohttp import web
from aiohttp_jinja2 import template
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql as pg
import psycopg2
from ds import db
from . import forms
@template('index.jinja2')
async def index(request):
return {}
@template('profile_list.jinja2')
async def profile_list(request):
async with request.app.db.acquire() as conn:
items = await (await conn.execute(
sa.select([
db.profile, 'ips_used',
(sa.func.broadcast(db.profile.c.network_addr) - db.profile.c.network_addr - 2).label('ips_total')
]).
select_from(
db.profile.
join(sa.select([
db.owner.c.profile_id, sa.func.count(db.owner.c.id).label('ips_used')
]).group_by(db.owner.c.profile_id).alias('cnts'))
).
order_by(db.profile.c.name)
)).fetchall()
return {'items': items}
def _cast_str_to_inet_arr(ip_list_str):
return sa.cast(map(str, forms.str_to_ip_list(ip_list_str)), pg.ARRAY(pg.INET))
@template('profile_edit.jinja2')
async def profile_edit(request):
tbl = db.profile
item_id = request.match_info.get('id')
await request.post()
async with request.app.db.acquire() as conn:
async with conn.begin():
item = await (await conn.execute(
tbl.select().where(tbl.c.id == item_id)
)).fetchone()
form = forms.ProfileEditForm(await request.post(), item)
if request.method == 'POST' and form.validate():
params = db.fit_params_dict(form.data, tbl.c.keys())
print(params['dns_ips'])
params['dns_ips'] = _cast_str_to_inet_arr(params['dns_ips'])
params['ntp_ips'] = _cast_str_to_inet_arr(params['ntp_ips'])
if item_id is None:
await conn.execute(tbl.insert().values(params))
else:
await conn.execute(
tbl.update().values(params).where(tbl.c.id == item_id)
)
await conn.execute(
sa.select([sa.func.pg_notify('dhcp_control', 'RELOAD_PROFILE {}'.format(item_id))])
)
return web.HTTPFound('/profile/')
return {'form': form}
async def profile_delete(request):
tbl = db.profile
item_id = request.match_info.get('id')
async with request.app.db.acquire() as conn:
await conn.execute(tbl.delete().where(tbl.c.id == item_id))
return web.HTTPFound('/profile/')
@template('staging_list.jinja2')
async def staging_list(request):
async with request.app.db.acquire() as conn:
items = await (await conn.execute(
sa.select([
db.owner,
db.profile.c.name.label('profile_name'),
db.profile.c.relay_ip,
]).
select_from(
db.owner.
join(db.profile)
).
where(db.owner.c.ip_addr == None).
order_by(sa.desc(db.owner.c.create_date))
)).fetchall()
return {'items': items}
async def staging_assign_ip(request):
item_id = int(request.match_info.get('id'))
async with request.app.db.acquire() as conn:
async with conn.begin():
profile_id = await conn.scalar(
sa.select([db.owner.c.profile_id]).where(db.owner.c.id == item_id)
)
gen = sa.select([
(sa.cast('0.0.0.0', pg.INET) + sa.func.generate_series(
sa.cast(db.profile.c.network_addr, pg.INET) - '0.0.0.0' + 1,
sa.func.broadcast(db.profile.c.network_addr) - '0.0.0.0' - 1
)).label('ip_addr')
]).\
select_from(db.profile.join(db.owner)). \
where(db.profile.c.id == profile_id)
sel = sa.select([db.owner.c.ip_addr]). \
where(db.owner.c.profile_id == profile_id). \
where(db.owner.c.ip_addr != None)
ip_addr = gen.except_(sel).order_by('ip_addr').limit(1)
await conn.execute(
db.owner.update().values(
ip_addr=ip_addr,
modify_date=sa.func.now()
).
where(db.owner.c.id == item_id)
)
await conn.execute(
sa.select([sa.func.pg_notify('dhcp_control', 'RELOAD_ITEM {}'.format(item_id))])
)
if 'edit' in request.rel_url.query:
return web.HTTPFound('/assigned/{}/edit?redirect=/staging/'.format(item_id))
return web.HTTPFound('/staging/')
async def staging_delete(request):
tbl = db.owner
item_id = request.match_info.get('id')
async with request.app.db.acquire() as conn:
async with conn.begin():
mac_addr = await conn.scalar(
sa.select([tbl.c.mac_addr]).
where(tbl.c.id == item_id)
)
await conn.execute(tbl.delete().where(tbl.c.id == item_id))
await conn.execute(
sa.select([sa.func.pg_notify('dhcp_control', 'REMOVE_STAGING {}'.format(mac_addr))])
)
return web.HTTPFound('/staging/')
@template('assigned_list.jinja2')
async def assigned_list(request):
async with request.app.db.acquire() as conn:
items = await (await conn.execute(
sa.select([
db.owner,
db.profile.c.name.label('profile_name'),
db.profile.c.relay_ip,
]).
select_from(
db.owner.
join(db.profile)
).
where(db.owner.c.ip_addr != None).
order_by(sa.desc(db.owner.c.lease_date))
)).fetchall()
return {'items': items}
@template('assigned_edit.jinja2')
async def assigned_edit(request):
item_id = request.match_info.get('id')
await request.post()
async with request.app.db.acquire() as conn:
item = await (await conn.execute(
sa.select([
db.owner,
db.profile.c.name.label('profile_name'),
db.profile.c.relay_ip,
]).
select_from(
db.owner.
join(db.profile)
).
where(db.owner.c.id == item_id)
)).fetchone()
form = forms.AssignedItemEditForm(await request.post(), item)
if request.method == 'POST' and form.validate():
params = db.fit_params_dict(form.data, db.owner.c.keys())
await conn.execute(
db.owner.update().values(params).where(db.owner.c.id == item_id)
)
if 'redirect' in request.rel_url.query:
return web.HTTPFound(request.rel_url.query['redirect'])
return web.HTTPFound('/assigned/')
return {'item': item, 'form': form}
async def assigned_delete(request):
tbl = db.owner
item_id = request.match_info.get('id')
async with request.app.db.acquire() as conn:
async with conn.begin():
mac_addr = await conn.scalar(
sa.select([tbl.c.mac_addr]).
where(tbl.c.id == item_id)
)
await conn.execute(tbl.delete().where(tbl.c.id == item_id))
await conn.execute(
sa.select([sa.func.pg_notify('dhcp_control', 'REMOVE_ACTIVE {}'.format(mac_addr))])
)
return web.HTTPFound('/assigned/')
| 35.633803
| 113
| 0.548221
| 941
| 7,590
| 4.274176
| 0.134963
| 0.046992
| 0.06365
| 0.024615
| 0.673794
| 0.606663
| 0.564147
| 0.490055
| 0.453257
| 0.453257
| 0
| 0.004625
| 0.316337
| 7,590
| 212
| 114
| 35.801887
| 0.770476
| 0
| 0
| 0.516304
| 0
| 0
| 0.067852
| 0.004743
| 0
| 0
| 0
| 0
| 0
| 1
| 0.005435
| false
| 0
| 0.043478
| 0.005435
| 0.130435
| 0.005435
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
42e0044ddc8db8684b032fa92b309e589628c115
| 6,123
|
py
|
Python
|
etsyapi/__init__.py
|
DempDemp/etsyapi
|
995250d2f76dcac7edf3b2404bfbce1df732765e
|
[
"BSD-3-Clause"
] | 1
|
2021-02-19T01:45:49.000Z
|
2021-02-19T01:45:49.000Z
|
etsyapi/__init__.py
|
DempDemp/etsyapi
|
995250d2f76dcac7edf3b2404bfbce1df732765e
|
[
"BSD-3-Clause"
] | null | null | null |
etsyapi/__init__.py
|
DempDemp/etsyapi
|
995250d2f76dcac7edf3b2404bfbce1df732765e
|
[
"BSD-3-Clause"
] | 2
|
2016-04-10T21:28:05.000Z
|
2019-09-20T19:51:37.000Z
|
import six
import json
import logging
import requests
from requests_oauthlib import OAuth1
if six.PY3:
from urllib.parse import parse_qs
from urllib.parse import urlencode
else:
from urlparse import parse_qs
from urllib import urlencode
log = logging.getLogger(__name__)
class EtsyError(Exception):
def __init__(self, message, response):
super(EtsyError, self).__init__(message)
self.response = response
class Etsy(object):
"""
Represents the etsy API
"""
url_base = "https://openapi.etsy.com/v2"
def __init__(self, consumer_key, consumer_secret, oauth_token=None, oauth_token_secret=None, sandbox=False):
self.params = {'api_key': consumer_key}
self.consumer_key = consumer_key
self.consumer_secret = consumer_secret
if sandbox:
self.url_base = "http://sandbox.openapi.etsy.com/v2"
# generic authenticated oauth hook
self.simple_oauth = OAuth1(consumer_key, client_secret=consumer_secret)
if oauth_token and oauth_token_secret:
# full oauth hook for an authenticated user
self.full_oauth = OAuth1(consumer_key, client_secret=consumer_secret,
resource_owner_key=oauth_token, resource_owner_secret=oauth_token_secret)
def show_listings(self, color=None, color_wiggle=5):
"""
Show all listings on the site.
color should be a RGB ('#00FF00') or a HSV ('360;100;100')
"""
endpoint = '/listings/active'
params = {}
if color:
params['color'] = color
params['color_accuracy'] = color_wiggle
response = self.execute(endpoint, params=params)
return response
def get_user_info(self, user):
"""
Get basic info about a user, pass in a username or a user_id
"""
endpoint = '/users/%s' % user
auth = {}
if user == '__SELF__':
auth = {'oauth': self.full_oauth}
response = self.execute(endpoint, **auth)
return response
def find_user(self, keywords):
"""
Search for a user given the
"""
endpoint = '/users'
params = {'keywords': keywords}
response = self.execute(endpoint, params=params)
return response
def get_auth_url(self, permissions=[]):
"""
Returns a url that a user is redirected to in order to authenticate with
the etsy API. This is step one in the authentication process.
oauth_token and oauth_token_secret need to be saved for step two.
"""
endpoint = '/oauth/request_token'
params = {}
if permissions:
params = {'scope': " ".join(permissions)}
self.oauth = self.simple_oauth
response = self.execute(endpoint, oauth=self.oauth, params=params)
parsed = parse_qs(response)
url = parsed['login_url'][0]
token = parsed['oauth_token'][0]
secret = parsed['oauth_token_secret'][0]
return {'oauth_token': token, 'url': url, 'oauth_token_secret': secret}
def get_auth_token(self, verifier, oauth_token, oauth_token_secret):
"""
Step two in the authentication process. oauth_token and oauth_token_secret
are the same that came from the get_auth_url function call. Returned is
the permanent oauth_token and oauth_token_secret that will be used in
every subsiquent api request that requires authentication.
"""
endpoint = '/oauth/access_token'
oauth = OAuth1(self.consumer_key, client_secret=self.consumer_secret,
resource_owner_key=oauth_token,
resource_owner_secret=oauth_token_secret,
verifier=verifier)
response = requests.post(url="%s%s" % (self.url_base, endpoint), auth=oauth)
parsed = parse_qs(response.text)
return {'oauth_token': parsed['oauth_token'][0], 'oauth_token_secret': parsed['oauth_token_secret'][0]}
def execute(self, endpoint, method='get', oauth=None, params=None, files=None, **hooks):
"""
Actually do the request, and raise exception if an error comes back.
"""
if oauth:
# making an authenticated request, add the oauth hook to the request
hooks['auth'] = oauth
if params is None:
params = {}
else:
if params is None:
params = self.params
else:
params.update(self.params)
querystring = urlencode(params)
url = "%s%s" % (self.url_base, endpoint)
if querystring:
url = "%s?%s" % (url, querystring)
response = getattr(requests, method)(url, files=files, **hooks)
if response.status_code > 201:
e = response.text
code = response.status_code
raise EtsyError('API returned %s response: %s' % (code, e), response)
try:
return json.loads(response.text)
except (TypeError, ValueError):
return response.text
def execute_authed(self, endpoint, method='get', params=None, **hooks):
return self.execute(endpoint, method, oauth=self.full_oauth, params=params, **hooks)
def iterate_pages(self, f, *p, **d):
'''
Iterates through pages in a response.
Use this method when the response is valid json and has pagination
Example:
pages = e.iterate_pages('execute_authed', '/shops/GreenTurtleTshirts/receipts',
params={'was_paid': True, 'was_shipped': False})
for page in pages:
print page
'''
f = getattr(self, f)
r = f(*p, **d)
yield r
while r['pagination']['next_page'] is not None:
if not d:
d = {}
if 'params' not in d:
d['params'] = {}
d['params']['page'] = r['pagination']['next_page']
r = f(*p, **d)
yield r
| 36.230769
| 112
| 0.594806
| 721
| 6,123
| 4.882108
| 0.260749
| 0.068182
| 0.054545
| 0.020455
| 0.252841
| 0.182955
| 0.144318
| 0.130682
| 0.107386
| 0.107386
| 0
| 0.006828
| 0.306386
| 6,123
| 168
| 113
| 36.446429
| 0.821992
| 0.194349
| 0
| 0.160377
| 0
| 0
| 0.090812
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.09434
| false
| 0
| 0.084906
| 0.009434
| 0.283019
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
42e109eb76a25424069247c9b529582b0044ded2
| 2,996
|
py
|
Python
|
SatTrack/tle.py
|
ed-ortizm/satellite-tracking
|
9eb2b4a7f31b43035a425d8e2e51044f2e80712d
|
[
"MIT"
] | 2
|
2021-10-05T10:04:56.000Z
|
2021-10-13T18:31:35.000Z
|
SatTrack/tle.py
|
ed-ortizm/satellite-tracking
|
9eb2b4a7f31b43035a425d8e2e51044f2e80712d
|
[
"MIT"
] | 14
|
2021-09-01T12:30:59.000Z
|
2022-02-14T18:53:44.000Z
|
SatTrack/tle.py
|
ed-ortizm/satellite-tracking
|
9eb2b4a7f31b43035a425d8e2e51044f2e80712d
|
[
"MIT"
] | null | null | null |
import datetime
import os
import re
import sys
import urllib
from SatTrack.superclasses import FileDirectory
###############################################################################
# CONSTANTS
TLE_URL = f"https://celestrak.com/NORAD/elements/supplemental"
###############################################################################
class TLE(FileDirectory):
def __init__(self, satellite_brand: str, directory: str):
"""
Handles tle files
PARAMETERS
satellite_brand: Name of satellite type, e.g, oneweb
directory: The location of the tle files
"""
self.satellite_brand = satellite_brand
self.directory = directory
###########################################################################
def download(self) -> str:
"""
Downloads the tle_file pass in the costructor from
TLE_URL = f"https://celestrak.com/NORAD/elements/supplemental"
OUTPUTS
string with name of the tle file in the format
"tle_{satellite_brand}_{time_stamp}.txt".
time_stamp -> "%Y-%m-%d %H:%M:%S"
example: "tle_oneweb_2021-10-09 16:18:16.txt"
"""
tle_query = f"{TLE_URL}/{self.satellite_brand}.txt"
time_stamp = self._get_time_stamp()
tle_file_name = f"tle_{self.satellite_brand}_{time_stamp}.txt"
super().check_directory(directory=self.directory, exit=False)
urllib.request.urlretrieve(
tle_query, f"{self.directory}/{tle_file_name}"
)
return tle_file_name
###########################################################################
def get_satellites_from_tle(self, file_location: str) -> list:
"""
Retrieves the names of satellites present in tle file.
The tle file must be stored locally.
PARAMETERS
file_location: path of the tle file
RETURNS
list with all the sattelites available in tle file
example: [oneweb-000, ...]
"""
super().file_exists(file_location, exit=True)
# oneweb -> ONEWEB
satellite = self.satellite_brand.upper()
regular_expression = f"{satellite}-[0-9]*.*\)|{satellite}.[0-9]*"
pattern = re.compile(regular_expression)
with open(f"{file_location}", "r") as tle:
content = tle.read()
satellites = pattern.findall(content)
return satellites
###########################################################################
def _get_time_stamp(self) -> str:
"""
Returns time stamp for tle file download: "2021-10-09 16:18:16"
"""
now = datetime.datetime.now(tz=datetime.timezone.utc)
time_stamp = f"{now:%Y-%m-%d %H:%M:%S}"
return time_stamp
###########################################################################
###########################################################################
| 32.215054
| 79
| 0.496996
| 302
| 2,996
| 4.754967
| 0.354305
| 0.048747
| 0.062674
| 0.016713
| 0.132312
| 0.0961
| 0.068245
| 0.068245
| 0.068245
| 0
| 0
| 0.015351
| 0.238985
| 2,996
| 92
| 80
| 32.565217
| 0.614474
| 0.261015
| 0
| 0
| 0
| 0
| 0.163599
| 0.103613
| 0
| 0
| 0
| 0
| 0
| 1
| 0.121212
| false
| 0
| 0.181818
| 0
| 0.424242
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
42e13e620ce8965d49cd0e6e2ae37165c0735674
| 21,970
|
py
|
Python
|
vinfo/dataset.py
|
john-hewitt/conditional-probing
|
bebc90aa0c910395e2370910409076a945279fe0
|
[
"Apache-2.0"
] | 13
|
2021-09-21T11:07:33.000Z
|
2022-03-25T08:46:46.000Z
|
vinfo/dataset.py
|
john-hewitt/conditional-probing
|
bebc90aa0c910395e2370910409076a945279fe0
|
[
"Apache-2.0"
] | 2
|
2021-09-25T15:45:19.000Z
|
2021-12-10T15:57:35.000Z
|
vinfo/dataset.py
|
john-hewitt/conditional-probing
|
bebc90aa0c910395e2370910409076a945279fe0
|
[
"Apache-2.0"
] | 2
|
2021-09-27T01:21:49.000Z
|
2021-09-28T06:08:19.000Z
|
import os
import h5py
import torch
import torch.nn as nn
from torch.utils.data import Dataset, IterableDataset, DataLoader
import Levenshtein as levenshtein
from tqdm import tqdm
from yaml import YAMLObject
from transformers import AutoTokenizer, AutoModel
from allennlp.modules.elmo import batch_to_ids
from utils import TRAIN_STR, DEV_STR, TEST_STR, InitYAMLObject
BATCH_SIZE = 50
"""
Classes for loading, caching, and yielding text datasets
"""
#class Dataset(Dataset, InitYAMLObject):
# """
# Base class for objects that serve batches of
# tensors. For decoration/explanation only
# """
# yaml_tag = '!Dataset'
class IterableDatasetWrapper(Dataset):#(IterableDataset):
"""
Wrapper class to pass to a DataLoader so it doesn't
think the underlying generator should have a len() fn.
But I gave up on this for various reasons so it's just
a normal dataset, here in case I try again.
"""
def __init__(self, generator):
self.generator = generator #[x for x in generator]
def __iter__(self):
return iter(self.generator)
def __len__(self):
return len(self.generator)
def __getitem__(self, idx):
return self.generator[idx]
class ListDataset(Dataset, InitYAMLObject):
"""
Container class for collecting multiple annotation or
representation datasets and a single target task dataset
, and serving all of them
"""
yaml_tag = '!ListDataset'
def __init__(self, args, data_loader, output_dataset, input_datasets):
"""
Arguments:
output_datset:
"""
self.args = args
self.input_datasets = input_datasets
self.output_dataset = output_dataset
self.data_loader = data_loader
self.train_data = None
self.dev_data = None
self.test_data = None
def get_train_dataloader(self, shuffle=True):
"""Returns a PyTorch DataLoader object with the training data
"""
if self.train_data is None:
self.train_data = list(self.load_data(TRAIN_STR))
#generator = IterableDatasetWrapper(self.load_data(TRAIN_STR))
generator = IterableDatasetWrapper(self.train_data)
return DataLoader(generator, batch_size=BATCH_SIZE, shuffle=shuffle, collate_fn=self.collate_fn)
def get_dev_dataloader(self, shuffle=False):
"""Returns a PyTorch DataLoader object with the dev data
"""
if self.dev_data is None:
self.dev_data = list(self.load_data(DEV_STR))
#generator = IterableDatasetWrapper(self.load_data(DEV_STR))
generator = IterableDatasetWrapper(self.dev_data)
return DataLoader(generator, batch_size=BATCH_SIZE, shuffle=shuffle, collate_fn=self.collate_fn)
def get_test_dataloader(self, shuffle=False):
"""Returns a PyTorch DataLoader object with the test data
"""
if self.test_data is None:
self.test_data = list(self.load_data(TEST_STR))
#generator = IterableDatasetWrapper(self.load_data(TEST_STR))
generator = IterableDatasetWrapper(self.test_data)
return DataLoader(generator, batch_size=BATCH_SIZE, shuffle=shuffle, collate_fn=self.collate_fn)
def load_data(self, split_string):
"""Loads data from disk into RAM tensors for passing to a network on GPU
Iterates through the training set once, passing each sentence to each
input Dataset and the output Dataset
"""
for sentence in tqdm(self.data_loader.yield_dataset(split_string),desc='[loading]'):
input_tensors = []
for dataset in self.input_datasets:
input_tensors.append(dataset.tensor_of_sentence(sentence, split_string))
output_tensor = self.output_dataset.tensor_of_sentence(sentence, split_string)
yield (input_tensors, output_tensor, sentence)
def collate_fn(self, observation_list):
"""
Combines observations (input_tensors, output_tensor, sentence) tuples
input_tensors is of the form ((annotation, alignment), ..., (annotation, alignment))
output_tensor is of the form (annotation, alignment),
to batches of observations ((batches_input_1, batches_input_2), batches_output, sentences)
"""
sentences = (x[2] for x in observation_list)
max_corpus_token_len = max((len(x) for x in sentences))
input_annotation_tensors = []
input_alignment_tensors = []
input_tensor_count = len(observation_list[0][0])
for input_tensor_index in range(input_tensor_count):
max_annotation_token_len = max([x[0][input_tensor_index][0].shape[0] for x in observation_list])
intermediate_annotation_list = []
intermediate_alignment_list = []
for input_annotation, input_alignment in ((x[0][input_tensor_index][0],
x[0][input_tensor_index][1]) for x in observation_list):
if len(input_annotation.shape) == 1: # word-level ids
new_annotation_tensor = torch.zeros(max_annotation_token_len, dtype=torch.long)
new_annotation_tensor[:len(input_annotation)] = input_annotation
elif len(input_annotation.shape) == 2: # characeter-level ids
new_annotation_tensor = torch.zeros(max_annotation_token_len, input_annotation.shape[1]).long()
new_annotation_tensor[:len(input_annotation),:] = input_annotation
intermediate_annotation_list.append(new_annotation_tensor)
new_alignment_tensor = torch.zeros(max_annotation_token_len, max_corpus_token_len)
new_alignment_tensor[:input_alignment.shape[0], :input_alignment.shape[1]] = input_alignment
intermediate_alignment_list.append(new_alignment_tensor)
input_annotation_tensors.append(torch.stack(intermediate_annotation_list).to(self.args['device']))
input_alignment_tensors.append(torch.stack(intermediate_alignment_list).to(self.args['device']))
intermediate_annotation_list = []
intermediate_alignment_list = []
max_output_annotation_len = max([x[1][0].shape[0] for x in observation_list])
for output_annotation, output_alignment in (x[1] for x in observation_list):
new_annotation_tensor = torch.zeros(max_output_annotation_len, dtype=torch.long)
new_annotation_tensor[:len(output_annotation)] = output_annotation
intermediate_annotation_list.append(new_annotation_tensor)
output_annotation_tensor = torch.stack(intermediate_annotation_list).to(self.args['device'])
sentences = [x[2] for x in observation_list]
return ((input_annotation_tensors, input_alignment_tensors), output_annotation_tensor, sentences)
class ELMoData(InitYAMLObject):
"""
Loading and serving minibatches of tokens to input to
ELMo, as mediated by allennlp.
"""
yaml_tag = '!ELMoData'
def __init__(self, args):
self.args = args
def tensor_of_sentence(self, sentence, split_string):
"""
Provides character indices for a single sentence.
"""
words = [x[1] for x in sentence]
alignment = torch.eye(len(words))
return batch_to_ids([words])[0,:,:], alignment
#for index, token in enumerate([x[1] for x in sentence]):
class HuggingfaceData(InitYAMLObject):
"""
Loading and serving minibatches of tokens to input
to a Huggingface-loaded model.
"""
yaml_tag = '!HuggingfaceData'
def __init__(self, args, model_string, cache=None):
print('Constructing HuggingfaceData of {}'.format(model_string))
self.tokenizer = AutoTokenizer.from_pretrained(model_string) #, add_prefix_space=True)
self.args = args
self.cache = cache
self.task_name = 'hfacetokens.{}'.format(model_string)
self.cache_is_setup = False
def levenshtein_matrix(self, string1, string2):
opcodes = levenshtein.opcodes(string1, string2)
mtx = torch.zeros(len(string1), len(string2))
cumulative = 0
for opcode in opcodes:
opcode_type, str1b, str1e, str2b, str2e = opcode
if opcode_type in {'equal', 'replace'}:
diff = str1e - str1b
for i in range(diff):
mtx[str1b+i,str2b+i] = 1
if opcode_type == 'delete':
diff = str1e - str1b
for i in range(diff):
mtx[str1b+i, str2b] = 1
if opcode_type == 'insert':
diff = str2e - str2b
for i in range(diff):
mtx[str1b, str2b+i] = 1
return mtx
def token_to_character_alignment(self, tokens):
ptb_sentence_length = sum((len(tok) for tok in tokens))
ptb_string_token_alignment = []
cumulative = 0
for token in tokens:
new_alignment = torch.zeros(ptb_sentence_length)
for i, char in enumerate(token):
if char == ' ':
continue
new_alignment[i+cumulative] = 1
new_alignment = new_alignment / sum(new_alignment)
cumulative += len(token)
ptb_string_token_alignment.append(new_alignment)
return torch.stack(ptb_string_token_alignment)
def de_ptb_tokenize(self, tokens):
tokens_with_spaces = []
new_tokens_with_spaces = []
ptb_sentence_length = sum((len(tok) for tok in tokens))
token_alignments = []
cumulative = 0
for i, _ in enumerate(tokens):
token = tokens[i]
next_token = tokens[i+1] if i < len(tokens)-1 else '<EOS>'
# Handle LaTeX-style quotes
if token.strip() in {"``", "''"}:
new_token = '"'
elif token.strip() == '-LRB-':
new_token = '('
elif token.strip() == '-RRB-':
new_token = ')'
elif token.strip() == '-LSB-':
new_token = '['
elif token.strip() == '-RSB-':
new_token = ']'
elif token.strip() == '-LCB-':
new_token = '{'
elif token.strip() == '-RCB-':
new_token = '}'
else:
new_token = token
use_space = (token.strip() not in {'(', '[', '{', '"', "'", '``', "''"} and
next_token.strip() not in {"'ll", "'re", "'ve", "n't",
"'s", "'LL", "'RE", "'VE",
"N'T", "'S", '"', "'", '``', "''", ')', '}', ']',
'.', ';', ':', '!', '?'}
and i != len(tokens) - 1)
new_token = new_token.strip() + (' ' if use_space else '')
new_tokens_with_spaces.append(new_token)
tokens_with_spaces.append(token)
new_alignment = torch.zeros(ptb_sentence_length)
for index, char in enumerate(token):
new_alignment[index+cumulative] = 1
#new_alignment = new_alignment / sum(new_alignment)
for new_char in new_token:
token_alignments.append(new_alignment)
cumulative += len(token)
return new_tokens_with_spaces, torch.stack(token_alignments)
def hface_ontonotes_alignment(self, sentence):
tokens = [x[1] for x in sentence]
tokens = [ x + (' ' if i !=len(tokens)-1 else '') for (i, x) in enumerate(tokens)]
raw_tokens, ptb_to_deptb_alignment = self.de_ptb_tokenize(tokens)
raw_string = ''.join(raw_tokens)
ptb_token_to_ptb_string_alignment = self.token_to_character_alignment(tokens)
#tokenizer = transformers.AutoTokenizer.from_pretrained('roberta-base')
hface_tokens = self.tokenizer.tokenize(raw_string)
hface_tokens_with_spaces = [x+ (' ' if i != len(hface_tokens)-1 else '')for (i, x) in enumerate(hface_tokens)]
hface_token_to_hface_string_alignment = self.token_to_character_alignment(hface_tokens_with_spaces)
hface_string = ' '.join(hface_tokens)
hface_character_to_deptb_character_alignment = self.levenshtein_matrix(hface_string, raw_string)
unnormalized_alignment = torch.matmul(torch.matmul(hface_token_to_hface_string_alignment.to(self.args['device']), hface_character_to_deptb_character_alignment.to(self.args['device'])),
torch.matmul(ptb_token_to_ptb_string_alignment.to(self.args['device']), ptb_to_deptb_alignment.to(self.args['device']).t()).t())
return (unnormalized_alignment / torch.sum(unnormalized_alignment, dim=0)).cpu(), hface_tokens, raw_string
def _setup_cache(self):
"""
Constructs readers for caches that exist
and writers for caches that do not.
"""
if self.cache is None:
return
if self.cache_is_setup:
return
# Check cache readable/writeable
train_cache_path, train_cache_readable, train_cache_writeable = \
self.cache.get_cache_path_and_check(TRAIN_STR, self.task_name)
dev_cache_path, dev_cache_readable, dev_cache_writeable = \
self.cache.get_cache_path_and_check(DEV_STR, self.task_name)
test_cache_path, test_cache_readable, test_cache_writeable = \
self.cache.get_cache_path_and_check(TEST_STR, self.task_name)
# If any of the train/dev/test are neither readable nor writeable, do not use cache.
if ((not train_cache_readable and not train_cache_writeable) or
(not dev_cache_readable and not dev_cache_writeable) or
(not test_cache_readable and not test_cache_writeable)):
self.cache = None
print("Not using the cache at all, since at least of one "
"of {train,dev,test} cache neither readable nor writable.")
return
# Load readers or writers
self.train_cache_writer = None
self.dev_cache_writer = None
self.test_cache_writer = None
if train_cache_readable:
f = h5py.File(train_cache_path, 'r')
self.train_cache_tokens = (torch.tensor(f[str(i)+'tok'][()]) for i in range(len(f.keys())))
self.train_cache_alignments = (torch.tensor(f[str(i)+'aln'][()]) for i in range(len(f.keys())))
elif train_cache_writeable:
#self.train_cache_writer = h5py.File(train_cache_path, 'w')
self.train_cache_writer = self.cache.get_hdf5_cache_writer(train_cache_path)
self.train_cache_tokens = None
self.train_cache_alignments = None
else:
raise ValueError("Train cache neither readable nor writeable")
if dev_cache_readable:
f2 = h5py.File(dev_cache_path, 'r')
self.dev_cache_tokens = (torch.tensor(f2[str(i)+'tok'][()]) for i in range(len(f2.keys())))
self.dev_cache_alignments = (torch.tensor(f2[str(i)+'aln'][()]) for i in range(len(f2.keys())))
elif dev_cache_writeable:
#self.dev_cache_writer = h5py.File(dev_cache_path, 'w')
self.dev_cache_writer = self.cache.get_hdf5_cache_writer(dev_cache_path)
self.dev_cache_tokens = None
self.dev_cache_alignments = None
else:
raise ValueError("Dev cache neither readable nor writeable")
if test_cache_readable:
f3 = h5py.File(test_cache_path, 'r')
self.test_cache_tokens = (torch.tensor(f3[str(i)+'tok'][()]) for i in range(len(f3.keys())))
self.test_cache_alignments = (torch.tensor(f3[str(i)+'aln'][()]) for i in range(len(f3.keys())))
elif test_cache_writeable:
#self.test_cache_writer = h5py.File(test_cache_path, 'w')
self.test_cache_writer = self.cache.get_hdf5_cache_writer(test_cache_path)
self.test_cache_tokens = None
self.test_cache_alignments = None
else:
raise ValueError("Test cache neither readable nor writeable")
self.cache_is_setup = True
def tensor_of_sentence(self, sentence, split):
self._setup_cache()
if self.cache is None:
labels = self._tensor_of_sentence(sentence, split)
return labels
# Otherwise, either read from or write to cache
if split == TRAIN_STR and self.train_cache_tokens is not None:
return next(self.train_cache_tokens), next(self.train_cache_alignments)
if split == DEV_STR and self.dev_cache_tokens is not None:
return next(self.dev_cache_tokens), next(self.dev_cache_alignments)
if split == TEST_STR and self.test_cache_tokens is not None:
return next(self.test_cache_tokens), next(self.test_cache_alignments)
cache_writer = (self.train_cache_writer if split == TRAIN_STR else (
self.dev_cache_writer if split == DEV_STR else (
self.test_cache_writer if split == TEST_STR else None)))
if cache_writer is None:
raise ValueError("Unknown split: {}".format(split))
wordpiece_indices, alignments = self._tensor_of_sentence(sentence, split)
tok_string_key = str(len(list(filter(lambda x: 'tok' in x, cache_writer.keys())))) + 'tok'
tok_dset = cache_writer.create_dataset(tok_string_key, wordpiece_indices.shape)
tok_dset[:] = wordpiece_indices
aln_string_key = str(len(list(filter(lambda x: 'aln' in x, cache_writer.keys())))) + 'aln'
aln_dset = cache_writer.create_dataset(aln_string_key, alignments.shape)
aln_dset[:] = alignments
return wordpiece_indices, alignments
def _tensor_of_sentence(self, sentence, split):
alignment, wordpiece_strings, raw_string = self.hface_ontonotes_alignment(sentence)
# add [SEP] and [CLS] empty alignments
empty = torch.zeros(1, alignment.shape[1])
alignment = torch.cat((empty, alignment, empty))
#wordpiece_indices = torch.tensor(self.tokenizer(wordpiece_strings)
wordpiece_indices = torch.tensor(self.tokenizer(raw_string).input_ids) #, is_split_into_words=True))
return wordpiece_indices, alignment
def _naive_tensor_of_sentence(self, sentence, split_string):
"""
Converts from a tuple-formatted sentence (e.g, from conll-formatted data)
to a Torch tensor of integers representing subword piece ids for input to
a Huggingface-formatted neural model
"""
# CLS token given by tokenizer
wordpiece_indices = []
wordpiece_alignment_vecs = [torch.zeros(len(sentence))]
# language tokens
for index, token in enumerate([x[1] for x in sentence]):
new_wordpieces = self.tokenizer.tokenize(token)
wordpiece_alignment = torch.zeros(len(sentence))
wordpiece_alignment[index] = 1
for wordpiece in new_wordpieces:
wordpiece_alignment_vecs.append(torch.clone(wordpiece_alignment))
wordpiece_indices.extend(new_wordpieces)
# SEP token given by tokenizer
wordpiece_indices = torch.tensor(self.tokenizer.encode(wordpiece_indices))
wordpiece_alignment_vecs.append(torch.zeros(len(sentence)))
wordpiece_alignment_vecs = torch.stack(wordpiece_alignment_vecs)
return wordpiece_indices, wordpiece_alignment_vecs
class AnnotationData(InitYAMLObject):
"""
Loading and serving minibatches of data from annotations
"""
yaml_tag = '!AnnotationDataset'
def __init__(self, args, task):
self.args = args
self.task = task
#self.task.setup_cache()
def tensor_of_sentence(self, sentence, split_string):
"""
Converts from a tuple-formatted sentence (e.g, from conll-formatted data)
to a Torch tensor of integers representing the annotation
"""
alignment = torch.eye(len(sentence))
return self.task.labels_of_sentence(sentence, split_string), alignment
class Loader(InitYAMLObject):
"""
Base class for objects that read datasets from disk
and yield sentence buffers for tokenization and labeling
Strictly for description
"""
yaml_tag = '!Loader'
class OntonotesReader(Loader):
"""
Minutae for reading the Ontonotes dataset,
as formatted as described in the readme
"""
yaml_tag = '!OntonotesReader'
def __init__(self, args, train_path, dev_path, test_path, cache):
print('Constructing OntoNotesReader')
self.train_path = train_path
self.dev_path = dev_path
self.test_path = test_path
self.cache = cache
@staticmethod
def sentence_lists_of_stream(ontonotes_stream):
"""
Yield sentences from raw ontonotes stream
Arguments:
ontonotes_stream: iterable of ontonotes file lines
Yields:
a buffer for each sentence in the stream; elements
in the buffer are lists defined by TSV fields of the
ontonotes stream
"""
buf = []
for line in ontonotes_stream:
if line.startswith('#'):
continue
if not line.strip():
yield buf
buf = []
else:
buf.append([x.strip() for x in line.split('\t')])
if buf:
yield buf
def yield_dataset(self, split_string):
"""
Yield a list of attribute lines, given by ontonotes_fields,
for each sentence in the training set of ontonotes
"""
path = (self.train_path if split_string == TRAIN_STR else
(self.dev_path if split_string == DEV_STR else
(self.test_path if split_string == TEST_STR else
None)))
if path is None:
raise ValueError("Unknown split string: {}".format(split_string))
with open(path) as fin:
for sentence in OntonotesReader.sentence_lists_of_stream(fin):
yield sentence
class SST2Reader(Loader):
"""
Minutae for reading the Stanford Sentiment (SST-2)
dataset, as downloaded from the GLUE website.
"""
yaml_tag = '!SST2Reader'
def __init__(self, args, train_path, dev_path, test_path, cache):
print('Constructing SST2Reader')
self.train_path = train_path
self.dev_path = dev_path
self.test_path = test_path
self.cache = cache
@staticmethod
def sentence_lists_of_stream(sst2_stream):
"""
Yield sentences from raw sst2 stream
Arguments:
sst2_stream: iterable of sst2_stream lines
Yields:
a buffer for each sentence in the stream;
elements in the buffer are lists defined by TSV
fields of the ontonotes stream
"""
_ = next(sst2_stream) # Get rid of the column labels
for line in sst2_stream:
word_string, label_string = [x.strip() for x in line.split('\t')]
word_tokens = word_string.split(' ')
indices = [str(i) for i, _ in enumerate(word_tokens)]
label_tokens = [label_string for _ in word_tokens]
yield list(zip(indices, word_tokens, label_tokens))
def yield_dataset(self, split_string):
"""
Yield a list of attribute lines, given by ontonotes_fields,
for each sentence in the training set of ontonotes
"""
path = (self.train_path if split_string == TRAIN_STR else
(self.dev_path if split_string == DEV_STR else
(self.test_path if split_string == TEST_STR else
None)))
if path is None:
raise ValueError("Unknown split string: {}".format(split_string))
with open(path) as fin:
for sentence in SST2Reader.sentence_lists_of_stream(fin):
yield sentence
| 40.238095
| 188
| 0.697679
| 2,940
| 21,970
| 4.962925
| 0.12483
| 0.014187
| 0.005757
| 0.006785
| 0.478514
| 0.395175
| 0.306833
| 0.275992
| 0.191968
| 0.161469
| 0
| 0.005634
| 0.200182
| 21,970
| 545
| 189
| 40.311927
| 0.824721
| 0.180337
| 0
| 0.213483
| 0
| 0
| 0.039842
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.081461
| false
| 0
| 0.030899
| 0.008427
| 0.219101
| 0.011236
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
42e6a0854dc4ea36c5a33692e83aa3d38c0f49cc
| 2,505
|
py
|
Python
|
function/python/brightics/function/statistics/test/correlation_test.py
|
parkjh80/studio
|
6d8d8384272e5e1b2838b12e5557272a19408e89
|
[
"Apache-2.0"
] | 202
|
2018-10-23T04:37:35.000Z
|
2022-01-27T05:51:10.000Z
|
function/python/brightics/function/statistics/test/correlation_test.py
|
data-weirdo/studio
|
48852c4f097f773ce3d408b59f79fda2e2d60470
|
[
"Apache-2.0"
] | 444
|
2018-11-07T08:41:14.000Z
|
2022-03-16T06:48:57.000Z
|
function/python/brightics/function/statistics/test/correlation_test.py
|
data-weirdo/studio
|
48852c4f097f773ce3d408b59f79fda2e2d60470
|
[
"Apache-2.0"
] | 99
|
2018-11-08T04:12:13.000Z
|
2022-03-30T05:36:27.000Z
|
"""
Copyright 2019 Samsung SDS
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
import numpy as np
from brightics.function.statistics import correlation
from brightics.common.datasets import load_iris
import HtmlTestRunner
import os
class CorrelationTest(unittest.TestCase):
def setUp(self):
print("*** Correlation UnitTest Start ***")
self.testdata = load_iris()
def tearDown(self):
print("*** Correlation UnitTest End ***")
def test_first(self):
cr = correlation(self.testdata, vars=['sepal_length', 'sepal_width'], method='pearson', display_plt=True, height=2.5, corr_prec=2)
DF1 = cr['result']['corr_table'].values
# print(DF1)
np.testing.assert_equal(DF1[0][0], 'sepal_width')
np.testing.assert_equal(DF1[0][1], 'sepal_length')
np.testing.assert_almost_equal(DF1[0][2], -0.10936924995064935, 10)
np.testing.assert_almost_equal(DF1[0][3], 0.1827652152713665, 10)
def test_second(self):
cr = correlation(self.testdata, vars=['sepal_width', 'petal_length', 'petal_width'], method='spearman', display_plt=False, height=2.5, corr_prec=2)
DF2 = cr['result']['corr_table'].values
# print(DF2)
np.testing.assert_almost_equal(DF2[0][2], -0.3034206463815157, 10)
np.testing.assert_almost_equal(DF2[0][3], 0.0001603809454660342, 10)
np.testing.assert_almost_equal(DF2[1][2], -0.2775110724763029, 10)
np.testing.assert_almost_equal(DF2[1][3], 0.0005856929405699988, 10)
np.testing.assert_almost_equal(DF2[2][2], 0.9360033509355782, 10)
np.testing.assert_almost_equal(DF2[2][3], 5.383649646072797e-69, 10)
if __name__ == '__main__':
filepath = os.path.dirname(os.path.abspath(__file__))
reportFoler = filepath + "/../../../../../../../reports"
unittest.main(testRunner=HtmlTestRunner.HTMLTestRunner(combine_reports=True, output=reportFoler))
| 41.065574
| 155
| 0.683433
| 332
| 2,505
| 5.012048
| 0.427711
| 0.054087
| 0.090144
| 0.100962
| 0.280048
| 0.280048
| 0.195913
| 0.076923
| 0
| 0
| 0
| 0.102615
| 0.190818
| 2,505
| 60
| 156
| 41.75
| 0.718303
| 0.229142
| 0
| 0
| 0
| 0
| 0.122995
| 0.015508
| 0
| 0
| 0
| 0
| 0.3125
| 1
| 0.125
| false
| 0
| 0.1875
| 0
| 0.34375
| 0.0625
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
42e77bb6f8a615aa18b12b83385ee014877a332f
| 340
|
py
|
Python
|
fdp/__init__.py
|
cffbots/fairdatapoint
|
6142b31408b5746d1a7e9f59e61735b7ad8bfde9
|
[
"Apache-2.0"
] | 9
|
2020-03-27T12:58:51.000Z
|
2021-01-21T16:22:46.000Z
|
fdp/__init__.py
|
MaastrichtU-IDS/fairdatapoint
|
f9f38903a629acbdb74a6a20014ac424cc3d3206
|
[
"Apache-2.0"
] | 26
|
2016-05-26T22:22:34.000Z
|
2020-02-13T07:12:37.000Z
|
fdp/__init__.py
|
MaastrichtU-IDS/fairdatapoint
|
f9f38903a629acbdb74a6a20014ac424cc3d3206
|
[
"Apache-2.0"
] | 4
|
2020-06-09T18:37:33.000Z
|
2020-12-16T08:05:01.000Z
|
# -*- coding: utf-8 -*-
import logging
from .__version__ import __version__
logging.getLogger(__name__).addHandler(logging.NullHandler())
__author__ = "Rajaram Kaliyaperumal, Arnold Kuzniar, Cunliang Geng, Carlos Martinez-Ortiz"
__email__ = 'c.martinez@esciencecenter.nl'
__status__ = 'beta'
__license__ = 'Apache License, Version 2.0'
| 26.153846
| 90
| 0.770588
| 38
| 340
| 6.157895
| 0.815789
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.009934
| 0.111765
| 340
| 12
| 91
| 28.333333
| 0.764901
| 0.061765
| 0
| 0
| 0
| 0
| 0.422713
| 0.088328
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.285714
| 0
| 0.285714
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
42e8e15830841aa965ec225fd7e1715fe1c14fdd
| 60,795
|
py
|
Python
|
fluids/flow_meter.py
|
rddaz2013/fluids
|
acde6a6edc2110c152c59341574739b24a2f1bad
|
[
"MIT"
] | null | null | null |
fluids/flow_meter.py
|
rddaz2013/fluids
|
acde6a6edc2110c152c59341574739b24a2f1bad
|
[
"MIT"
] | null | null | null |
fluids/flow_meter.py
|
rddaz2013/fluids
|
acde6a6edc2110c152c59341574739b24a2f1bad
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
'''Chemical Engineering Design Library (ChEDL). Utilities for process modeling.
Copyright (C) 2018 Caleb Bell <Caleb.Andrew.Bell@gmail.com>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.'''
from __future__ import division
from math import cos, sin, tan, atan, pi, radians, exp, acos, log10
import numpy as np
from fluids.friction import friction_factor
from fluids.core import Froude_densimetric
from scipy.optimize import newton, brenth
from scipy.constants import g, inch
__all__ = ['C_Reader_Harris_Gallagher',
'differential_pressure_meter_solver',
'differential_pressure_meter_dP',
'orifice_discharge', 'orifice_expansibility',
'Reader_Harris_Gallagher_discharge',
'discharge_coefficient_to_K', 'K_to_discharge_coefficient',
'dP_orifice', 'velocity_of_approach_factor',
'flow_coefficient', 'nozzle_expansibility',
'C_long_radius_nozzle', 'C_ISA_1932_nozzle', 'C_venturi_nozzle',
'orifice_expansibility_1989', 'dP_venturi_tube',
'diameter_ratio_cone_meter', 'diameter_ratio_wedge_meter',
'cone_meter_expansibility_Stewart', 'dP_cone_meter',
'C_wedge_meter_Miller',
'C_Reader_Harris_Gallagher_wet_venturi_tube',
'dP_Reader_Harris_Gallagher_wet_venturi_tube'
]
CONCENTRIC_ORIFICE = 'concentric'
ECCENTRIC_ORIFICE = 'eccentric'
SEGMENTAL_ORIFICE = 'segmental'
CONDITIONING_4_HOLE_ORIFICE = 'Rosemount 4 hole self conditioing'
ORIFICE_HOLE_TYPES = [CONCENTRIC_ORIFICE, ECCENTRIC_ORIFICE, SEGMENTAL_ORIFICE,
CONDITIONING_4_HOLE_ORIFICE]
ORIFICE_CORNER_TAPS = 'corner'
ORIFICE_FLANGE_TAPS = 'flange'
ORIFICE_D_AND_D_2_TAPS = 'D and D/2'
ISO_5167_ORIFICE = 'ISO 5167 orifice'
LONG_RADIUS_NOZZLE = 'long radius nozzle'
ISA_1932_NOZZLE = 'ISA 1932 nozzle'
VENTURI_NOZZLE = 'venuri nozzle'
AS_CAST_VENTURI_TUBE = 'as cast convergent venturi tube'
MACHINED_CONVERGENT_VENTURI_TUBE = 'machined convergent venturi tube'
ROUGH_WELDED_CONVERGENT_VENTURI_TUBE = 'rough welded convergent venturi tube'
CONE_METER = 'cone meter'
WEDGE_METER = 'wedge meter'
__all__.extend(['ISO_5167_ORIFICE', 'LONG_RADIUS_NOZZLE', 'ISA_1932_NOZZLE',
'VENTURI_NOZZLE', 'AS_CAST_VENTURI_TUBE',
'MACHINED_CONVERGENT_VENTURI_TUBE',
'ROUGH_WELDED_CONVERGENT_VENTURI_TUBE', 'CONE_METER',
'WEDGE_METER'])
def orifice_discharge(D, Do, P1, P2, rho, C, expansibility=1.0):
r'''Calculates the flow rate of an orifice plate based on the geometry
of the plate, measured pressures of the orifice, and the density of the
fluid.
.. math::
m = \left(\frac{\pi D_o^2}{4}\right) C \frac{\sqrt{2\Delta P \rho_1}}
{\sqrt{1 - \beta^4}}\cdot \epsilon
Parameters
----------
D : float
Upstream internal pipe diameter, [m]
Do : float
Diameter of orifice at flow conditions, [m]
P1 : float
Static pressure of fluid upstream of orifice at the cross-section of
the pressure tap, [Pa]
P2 : float
Static pressure of fluid downstream of orifice at the cross-section of
the pressure tap, [Pa]
rho : float
Density of fluid at `P1`, [kg/m^3]
C : float
Coefficient of discharge of the orifice, [-]
expansibility : float, optional
Expansibility factor (1 for incompressible fluids, less than 1 for
real fluids), [-]
Returns
-------
m : float
Mass flow rate of fluid, [kg/s]
Notes
-----
This is formula 1-12 in [1]_ and also [2]_.
Examples
--------
>>> orifice_discharge(D=0.0739, Do=0.0222, P1=1E5, P2=9.9E4, rho=1.1646,
... C=0.5988, expansibility=0.9975)
0.01120390943807026
References
----------
.. [1] American Society of Mechanical Engineers. Mfc-3M-2004 Measurement
Of Fluid Flow In Pipes Using Orifice, Nozzle, And Venturi. ASME, 2001.
.. [2] ISO 5167-2:2003 - Measurement of Fluid Flow by Means of Pressure
Differential Devices Inserted in Circular Cross-Section Conduits Running
Full -- Part 2: Orifice Plates.
'''
dP = P1 - P2
beta = Do/D
return (pi*Do*Do/4.)*C*(2*dP*rho)**0.5/(1.0 - beta**4)**0.5*expansibility
def orifice_expansibility(D, Do, P1, P2, k):
r'''Calculates the expansibility factor for orifice plate calculations
based on the geometry of the plate, measured pressures of the orifice, and
the isentropic exponent of the fluid.
.. math::
\epsilon = 1 - (0.351 + 0.256\beta^4 + 0.93\beta^8)
\left[1-\left(\frac{P_2}{P_1}\right)^{1/\kappa}\right]
Parameters
----------
D : float
Upstream internal pipe diameter, [m]
Do : float
Diameter of orifice at flow conditions, [m]
P1 : float
Static pressure of fluid upstream of orifice at the cross-section of
the pressure tap, [Pa]
P2 : float
Static pressure of fluid downstream of orifice at the cross-section of
the pressure tap, [Pa]
k : float
Isentropic exponent of fluid, [-]
Returns
-------
expansibility : float, optional
Expansibility factor (1 for incompressible fluids, less than 1 for
real fluids), [-]
Notes
-----
This formula was determined for the range of P2/P1 >= 0.80, and for fluids
of air, steam, and natural gas. However, there is no objection to using
it for other fluids.
Examples
--------
>>> orifice_expansibility(D=0.0739, Do=0.0222, P1=1E5, P2=9.9E4, k=1.4)
0.9974739057343425
References
----------
.. [1] American Society of Mechanical Engineers. Mfc-3M-2004 Measurement
Of Fluid Flow In Pipes Using Orifice, Nozzle, And Venturi. ASME, 2001.
.. [2] ISO 5167-2:2003 - Measurement of Fluid Flow by Means of Pressure
Differential Devices Inserted in Circular Cross-Section Conduits Running
Full -- Part 2: Orifice Plates.
'''
beta = Do/D
return (1.0 - (0.351 + 0.256*beta**4 + 0.93*beta**8)*(
1.0 - (P2/P1)**(1./k)))
def orifice_expansibility_1989(D, Do, P1, P2, k):
r'''Calculates the expansibility factor for orifice plate calculations
based on the geometry of the plate, measured pressures of the orifice, and
the isentropic exponent of the fluid.
.. math::
\epsilon = 1- (0.41 + 0.35\beta^4)\Delta P/\kappa/P_1
Parameters
----------
D : float
Upstream internal pipe diameter, [m]
Do : float
Diameter of orifice at flow conditions, [m]
P1 : float
Static pressure of fluid upstream of orifice at the cross-section of
the pressure tap, [Pa]
P2 : float
Static pressure of fluid downstream of orifice at the cross-section of
the pressure tap, [Pa]
k : float
Isentropic exponent of fluid, [-]
Returns
-------
expansibility : float
Expansibility factor (1 for incompressible fluids, less than 1 for
real fluids), [-]
Notes
-----
This formula was determined for the range of P2/P1 >= 0.75, and for fluids
of air, steam, and natural gas. However, there is no objection to using
it for other fluids.
This is an older formula used to calculate expansibility factors for
orifice plates.
In this standard, an expansibility factor formula transformation in terms
of the pressure after the orifice is presented as well. This is the more
standard formulation in terms of the upstream conditions. The other formula
is below for reference only:
.. math::
\epsilon_2 = \sqrt{1 + \frac{\Delta P}{P_2}} - (0.41 + 0.35\beta^4)
\frac{\Delta P}{\kappa P_2 \sqrt{1 + \frac{\Delta P}{P_2}}}
[2]_ recommends this formulation for wedge meters as well.
Examples
--------
>>> orifice_expansibility_1989(D=0.0739, Do=0.0222, P1=1E5, P2=9.9E4, k=1.4)
0.9970510687411718
References
----------
.. [1] American Society of Mechanical Engineers. MFC-3M-1989 Measurement
Of Fluid Flow In Pipes Using Orifice, Nozzle, And Venturi. ASME, 2005.
.. [2] Miller, Richard W. Flow Measurement Engineering Handbook. 3rd
edition. New York: McGraw-Hill Education, 1996.
'''
return 1.0 - (0.41 + 0.35*(Do/D)**4)*(P1 - P2)/(k*P1)
def C_Reader_Harris_Gallagher(D, Do, rho, mu, m, taps='corner'):
r'''Calculates the coefficient of discharge of the orifice based on the
geometry of the plate, measured pressures of the orifice, mass flow rate
through the orifice, and the density and viscosity of the fluid.
.. math::
C = 0.5961 + 0.0261\beta^2 - 0.216\beta^8 + 0.000521\left(\frac{
10^6\beta}{Re_D}\right)^{0.7}\\
+ (0.0188 + 0.0063A)\beta^{3.5} \left(\frac{10^6}{Re_D}\right)^{0.3} \\
+(0.043 + 0.080\exp(-10L_1) -0.123\exp(-7L_1))(1-0.11A)\frac{\beta^4}
{1-\beta^4} \\
- 0.031(M_2' - 0.8M_2'^{1.1})\beta^{1.3}
.. math::
M_2' = \frac{2L_2'}{1-\beta}
A = \left(\frac{19000\beta}{Re_{D}}\right)^{0.8}
Re_D = \frac{\rho v D}{\mu}
If D < 71.12 mm (2.8 in.):
.. math::
C += 0.11(0.75-\beta)\left(2.8-\frac{D}{0.0254}\right)
If the orifice has corner taps:
.. math::
L_1 = L_2' = 0
If the orifice has D and D/2 taps:
.. math::
L_1 = 1
L_2' = 0.47
If the orifice has Flange taps:
.. math::
L_1 = L_2' = \frac{0.0254}{D}
Parameters
----------
D : float
Upstream internal pipe diameter, [m]
Do : float
Diameter of orifice at flow conditions, [m]
rho : float
Density of fluid at `P1`, [kg/m^3]
mu : float
Viscosity of fluid at `P1`, [Pa*s]
m : float
Mass flow rate of fluid through the orifice, [kg/s]
taps : str
The orientation of the taps; one of 'corner', 'flange', 'D', or 'D/2',
[-]
Returns
-------
C : float
Coefficient of discharge of the orifice, [-]
Notes
-----
The following limits apply to the orifice plate standard [1]_:
The measured pressure difference for the orifice plate should be under
250 kPa.
There are roughness limits as well; the roughness should be under 6
micrometers, although there are many more conditions to that given in [1]_.
For orifice plates with D and D/2 or corner pressure taps:
* Orifice bore diameter muse be larger than 12.5 mm (0.5 inches)
* Pipe diameter between 50 mm and 1 m (2 to 40 inches)
* Beta between 0.1 and 0.75 inclusive
* Reynolds number larger than 5000 (for :math:`0.10 \le \beta \le 0.56`)
or for :math:`\beta \ge 0.56, Re_D \ge 16000\beta^2`
For orifice plates with flange pressure taps:
* Orifice bore diameter muse be larger than 12.5 mm (0.5 inches)
* Pipe diameter between 50 mm and 1 m (2 to 40 inches)
* Beta between 0.1 and 0.75 inclusive
* Reynolds number larger than 5000 and also larger than
:math:`170000\beta^2 D`.
This is also presented in Crane's TP410 (2009)publication, whereas the
1999 and 1982 editions showed only a graph for discharge coefficients.
Examples
--------
>>> C_Reader_Harris_Gallagher(D=0.07391, Do=0.0222, rho=1.165, mu=1.85E-5,
... m=0.12, taps='flange')
0.5990326277163659
References
----------
.. [1] American Society of Mechanical Engineers. Mfc-3M-2004 Measurement
Of Fluid Flow In Pipes Using Orifice, Nozzle, And Venturi. ASME, 2001.
.. [2] ISO 5167-2:2003 - Measurement of Fluid Flow by Means of Pressure
Differential Devices Inserted in Circular Cross-Section Conduits Running
Full -- Part 2: Orifice Plates.
.. [3] Reader-Harris, M. J., "The Equation for the Expansibility Factor for
Orifice Plates," Proceedings of FLOMEKO 1998, Lund, Sweden, 1998:
209-214.
.. [4] Reader-Harris, Michael. Orifice Plates and Venturi Tubes. Springer,
2015.
'''
A_pipe = pi/4.*D*D
v = m/(A_pipe*rho)
Re_D = rho*v*D/mu
beta = Do/D
if taps == 'corner':
L1, L2_prime = 0.0, 0.0
elif taps == 'D' or taps == 'D/2':
L1 = 1.0
L2_prime = 0.47
elif taps == 'flange':
L1 = L2_prime = 0.0254/D
else:
raise Exception('Unsupported tap location')
beta2 = beta*beta
beta4 = beta2*beta2
beta8 = beta4*beta4
A = (19000.0*beta/Re_D)**0.8
M2_prime = 2*L2_prime/(1.0 - beta)
delta_C_upstream = ((0.043 + 0.080*exp(-1E1*L1) - 0.123*exp(-7.0*L1))
*(1.0 - 0.11*A)*beta4/(1.0 - beta4))
# The max part is not in the ISO standard
delta_C_downstream = (-0.031*(M2_prime - 0.8*M2_prime**1.1)*beta**1.3
*(1.0 + 8*max(log10(3700./Re_D), 0.0)))
# C_inf is discharge coefficient with corner taps for infinite Re
# Cs, slope term, provides increase in discharge coefficient for lower
# Reynolds numbers.
# max term is not in the ISO standard
C_inf_C_s = (0.5961 + 0.0261*beta2 - 0.216*beta8
+ 0.000521*(1E6*beta/Re_D)**0.7
+ (0.0188 + 0.0063*A)*beta**3.5*(
max((1E6/Re_D)**0.3, 22.7 - 4700.0*(Re_D/1E6))))
C = (C_inf_C_s + delta_C_upstream + delta_C_downstream)
if D < 0.07112:
# Limit is 2.8 inches, .1 inches smaller than the internal diameter of
# a sched. 80 pipe.
# Suggested to be required not becausue of any effect of small
# diameters themselves, but because of edge radius differences.
# max term is given in [4]_ Reader-Harris, Michael book
delta_C_diameter = 0.011*(0.75 - beta)*max((2.8 - D/0.0254), 0.0)
C += delta_C_diameter
return C
def Reader_Harris_Gallagher_discharge(D, Do, P1, P2, rho, mu, k, taps='corner'):
r'''Calculates the mass flow rate of fluid through an orifice based on the
geometry of the plate, measured pressures of the orifice, and the density,
viscosity, and isentropic exponent of the fluid. This solves an equation
iteratively to obtain the correct flow rate.
Parameters
----------
D : float
Upstream internal pipe diameter, [m]
Do : float
Diameter of orifice at flow conditions, [m]
P1 : float
Static pressure of fluid upstream of orifice at the cross-section of
the pressure tap, [Pa]
P2 : float
Static pressure of fluid downstream of orifice at the cross-section of
the pressure tap, [Pa]
rho : float
Density of fluid at `P1`, [kg/m^3]
mu : float
Viscosity of fluid at `P1`, [Pa*s]
k : float
Isentropic exponent of fluid, [-]
taps : str
The orientation of the taps; one of 'corner', 'flange', 'D', or 'D/2',
[-]
Returns
-------
m : float
Mass flow rate of fluid through the orifice, [kg/s]
Notes
-----
Examples
--------
>>> Reader_Harris_Gallagher_discharge(D=0.07366, Do=0.05, P1=200000.0,
... P2=183000.0, rho=999.1, mu=0.0011, k=1.33, taps='D')
7.702338035732167
References
----------
.. [1] American Society of Mechanical Engineers. Mfc-3M-2004 Measurement
Of Fluid Flow In Pipes Using Orifice, Nozzle, And Venturi. ASME, 2001.
.. [2] ISO 5167-2:2003 - Measurement of Fluid Flow by Means of Pressure
Differential Devices Inserted in Circular Cross-Section Conduits Running
Full -- Part 2: Orifice Plates.
'''
def to_solve(m):
C = C_Reader_Harris_Gallagher(D=D, Do=Do,
rho=rho, mu=mu, m=m, taps=taps)
epsilon = orifice_expansibility(D=D, Do=Do, P1=P1, P2=P2, k=k)
m_calc = orifice_discharge(D=D, Do=Do, P1=P1, P2=P2, rho=rho,
C=C, expansibility=epsilon)
return m - m_calc
return newton(to_solve, 2.81)
def discharge_coefficient_to_K(D, Do, C):
r'''Converts a discharge coefficient to a standard loss coefficient,
for use in computation of the actual pressure drop of an orifice or other
device.
.. math::
K = \left[\frac{\sqrt{1-\beta^4(1-C^2)}}{C\beta^2} - 1\right]^2
Parameters
----------
D : float
Upstream internal pipe diameter, [m]
Do : float
Diameter of orifice at flow conditions, [m]
C : float
Coefficient of discharge of the orifice, [-]
Returns
-------
K : float
Loss coefficient with respect to the velocity and density of the fluid
just upstream of the orifice, [-]
Notes
-----
If expansibility is used in the orifice calculation, the result will not
match with the specified pressure drop formula in [1]_; it can almost
be matched by dividing the calculated mass flow by the expansibility factor
and using that mass flow with the loss coefficient.
Examples
--------
>>> discharge_coefficient_to_K(D=0.07366, Do=0.05, C=0.61512)
5.2314291729754
References
----------
.. [1] American Society of Mechanical Engineers. Mfc-3M-2004 Measurement
Of Fluid Flow In Pipes Using Orifice, Nozzle, And Venturi. ASME, 2001.
.. [2] ISO 5167-2:2003 - Measurement of Fluid Flow by Means of Pressure
Differential Devices Inserted in Circular Cross-Section Conduits Running
Full -- Part 2: Orifice Plates.
'''
beta = Do/D
beta2 = beta*beta
beta4 = beta2*beta2
return ((1.0 - beta4*(1.0 - C*C))**0.5/(C*beta2) - 1.0)**2
def K_to_discharge_coefficient(D, Do, K):
r'''Converts a standard loss coefficient to a discharge coefficient.
.. math::
C = \sqrt{\frac{1}{2 \sqrt{K} \beta^{4} + K \beta^{4}}
- \frac{\beta^{4}}{2 \sqrt{K} \beta^{4} + K \beta^{4}} }
Parameters
----------
D : float
Upstream internal pipe diameter, [m]
Do : float
Diameter of orifice at flow conditions, [m]
K : float
Loss coefficient with respect to the velocity and density of the fluid
just upstream of the orifice, [-]
Returns
-------
C : float
Coefficient of discharge of the orifice, [-]
Notes
-----
If expansibility is used in the orifice calculation, the result will not
match with the specified pressure drop formula in [1]_; it can almost
be matched by dividing the calculated mass flow by the expansibility factor
and using that mass flow with the loss coefficient.
This expression was derived with SymPy, and checked numerically. There were
three other, incorrect roots.
Examples
--------
>>> K_to_discharge_coefficient(D=0.07366, Do=0.05, K=5.2314291729754)
0.6151200000000001
References
----------
.. [1] American Society of Mechanical Engineers. Mfc-3M-2004 Measurement
Of Fluid Flow In Pipes Using Orifice, Nozzle, And Venturi. ASME, 2001.
.. [2] ISO 5167-2:2003 - Measurement of Fluid Flow by Means of Pressure
Differential Devices Inserted in Circular Cross-Section Conduits Running
Full -- Part 2: Orifice Plates.
'''
beta = Do/D
beta2 = beta*beta
beta4 = beta2*beta2
root_K = K**0.5
common_term = 2.0*root_K*beta4 + K*beta4
return (-beta4/(common_term) + 1.0/(common_term))**0.5
def dP_orifice(D, Do, P1, P2, C):
r'''Calculates the non-recoverable pressure drop of an orifice plate based
on the pressure drop and the geometry of the plate and the discharge
coefficient.
.. math::
\Delta\bar w = \frac{\sqrt{1-\beta^4(1-C^2)}-C\beta^2}
{\sqrt{1-\beta^4(1-C^2)}+C\beta^2} (P_1 - P_2)
Parameters
----------
D : float
Upstream internal pipe diameter, [m]
Do : float
Diameter of orifice at flow conditions, [m]
P1 : float
Static pressure of fluid upstream of orifice at the cross-section of
the pressure tap, [Pa]
P2 : float
Static pressure of fluid downstream of orifice at the cross-section of
the pressure tap, [Pa]
C : float
Coefficient of discharge of the orifice, [-]
Returns
-------
dP : float
Non-recoverable pressure drop of the orifice plate, [Pa]
Notes
-----
This formula can be well approximated by:
.. math::
\Delta\bar w = \left(1 - \beta^{1.9}\right)(P_1 - P_2)
The recoverable pressure drop should be recovered by 6 pipe diameters
downstream of the orifice plate.
Examples
--------
>>> dP_orifice(D=0.07366, Do=0.05, P1=200000.0, P2=183000.0, C=0.61512)
9069.474705745388
References
----------
.. [1] American Society of Mechanical Engineers. Mfc-3M-2004 Measurement
Of Fluid Flow In Pipes Using Orifice, Nozzle, And Venturi. ASME, 2001.
.. [2] ISO 5167-2:2003 - Measurement of Fluid Flow by Means of Pressure
Differential Devices Inserted in Circular Cross-Section Conduits Running
Full -- Part 2: Orifice Plates.
'''
beta = Do/D
beta2 = beta*beta
beta4 = beta2*beta2
dP = P1 - P2
delta_w = ((1.0 - beta4*(1.0 - C*C))**0.5 - C*beta2)/(
(1.0 - beta4*(1.0 - C*C))**0.5 + C*beta2)*dP
return delta_w
def velocity_of_approach_factor(D, Do):
r'''Calculates a factor for orifice plate design called the `velocity of
approach`.
.. math::
\text{Velocity of approach} = \frac{1}{\sqrt{1 - \beta^4}}
Parameters
----------
D : float
Upstream internal pipe diameter, [m]
Do : float
Diameter of orifice at flow conditions, [m]
Returns
-------
velocity_of_approach : float
Coefficient of discharge of the orifice, [-]
Notes
-----
Examples
--------
>>> velocity_of_approach_factor(D=0.0739, Do=0.0222)
1.0040970074165514
References
----------
.. [1] American Society of Mechanical Engineers. Mfc-3M-2004 Measurement
Of Fluid Flow In Pipes Using Orifice, Nozzle, And Venturi. ASME, 2001.
'''
return (1.0 - (Do/D)**4)**-0.5
def flow_coefficient(D, Do, C):
r'''Calculates a factor for differential pressure flow meter design called
the `flow coefficient`. This should not be confused with the flow
coefficient often used when discussing valves.
.. math::
\text{Flow coefficient} = \frac{C}{\sqrt{1 - \beta^4}}
Parameters
----------
D : float
Upstream internal pipe diameter, [m]
Do : float
Diameter of flow meter characteristic dimension at flow conditions, [m]
C : float
Coefficient of discharge of the flow meter, [-]
Returns
-------
flow_coefficient : float
Differential pressure flow meter flow coefficient, [-]
Notes
-----
This measure is used not just for orifices but for other differential
pressure flow meters [2]_.
It is sometimes given the symbol K. It is also equal to the product of the
diacharge coefficient and the velocity of approach factor [2]_.
Examples
--------
>>> flow_coefficient(D=0.0739, Do=0.0222, C=0.6)
0.6024582044499308
References
----------
.. [1] American Society of Mechanical Engineers. Mfc-3M-2004 Measurement
Of Fluid Flow In Pipes Using Orifice, Nozzle, And Venturi. ASME, 2001.
.. [2] Miller, Richard W. Flow Measurement Engineering Handbook. 3rd
edition. New York: McGraw-Hill Education, 1996.
'''
return C*(1.0 - (Do/D)**4)**-0.5
def nozzle_expansibility(D, Do, P1, P2, k):
r'''Calculates the expansibility factor for a nozzle or venturi nozzle,
based on the geometry of the plate, measured pressures of the orifice, and
the isentropic exponent of the fluid.
.. math::
\epsilon = \left\{\left(\frac{\kappa \tau^{2/\kappa}}{\kappa-1}\right)
\left(\frac{1 - \beta^4}{1 - \beta^4 \tau^{2/\kappa}}\right)
\left[\frac{1 - \tau^{(\kappa-1)/\kappa}}{1 - \tau}
\right] \right\}^{0.5}
Parameters
----------
D : float
Upstream internal pipe diameter, [m]
Do : float
Diameter of orifice of the venturi or nozzle, [m]
P1 : float
Static pressure of fluid upstream of orifice at the cross-section of
the pressure tap, [Pa]
P2 : float
Static pressure of fluid downstream of orifice at the cross-section of
the pressure tap, [Pa]
k : float
Isentropic exponent of fluid, [-]
Returns
-------
expansibility : float
Expansibility factor (1 for incompressible fluids, less than 1 for
real fluids), [-]
Notes
-----
This formula was determined for the range of P2/P1 >= 0.75.
Examples
--------
>>> nozzle_expansibility(D=0.0739, Do=0.0222, P1=1E5, P2=9.9E4, k=1.4)
0.9945702344566746
References
----------
.. [1] American Society of Mechanical Engineers. Mfc-3M-2004 Measurement
Of Fluid Flow In Pipes Using Orifice, Nozzle, And Venturi. ASME, 2001.
.. [2] ISO 5167-3:2003 - Measurement of Fluid Flow by Means of Pressure
Differential Devices Inserted in Circular Cross-Section Conduits Running
Full -- Part 3: Nozzles and Venturi Nozzles.
'''
beta = Do/D
beta2 = beta*beta
beta4 = beta2*beta2
tau = P2/P1
term1 = k*tau**(2.0/k )/(k - 1.0)
term2 = (1.0 - beta4)/(1.0 - beta4*tau**(2.0/k))
term3 = (1.0 - tau**((k - 1.0)/k))/(1.0 - tau)
return (term1*term2*term3)**0.5
def C_long_radius_nozzle(D, Do, rho, mu, m):
r'''Calculates the coefficient of discharge of a long radius nozzle used
for measuring flow rate of fluid, based on the geometry of the nozzle,
mass flow rate through the nozzle, and the density and viscosity of the
fluid.
.. math::
C = 0.9965 - 0.00653\beta^{0.5} \left(\frac{10^6}{Re_D}\right)^{0.5}
Parameters
----------
D : float
Upstream internal pipe diameter, [m]
Do : float
Diameter of long radius nozzle orifice at flow conditions, [m]
rho : float
Density of fluid at `P1`, [kg/m^3]
mu : float
Viscosity of fluid at `P1`, [Pa*s]
m : float
Mass flow rate of fluid through the nozzle, [kg/s]
Returns
-------
C : float
Coefficient of discharge of the long radius nozzle orifice, [-]
Notes
-----
Examples
--------
>>> C_long_radius_nozzle(D=0.07391, Do=0.0422, rho=1.2, mu=1.8E-5, m=0.1)
0.9805503704679863
References
----------
.. [1] American Society of Mechanical Engineers. Mfc-3M-2004 Measurement
Of Fluid Flow In Pipes Using Orifice, Nozzle, And Venturi. ASME, 2001.
.. [2] ISO 5167-3:2003 - Measurement of Fluid Flow by Means of Pressure
Differential Devices Inserted in Circular Cross-Section Conduits Running
Full -- Part 3: Nozzles and Venturi Nozzles.
'''
A_pipe = pi/4.*D*D
v = m/(A_pipe*rho)
Re_D = rho*v*D/mu
beta = Do/D
return 0.9965 - 0.00653*beta**0.5*(1E6/Re_D)**0.5
def C_ISA_1932_nozzle(D, Do, rho, mu, m):
r'''Calculates the coefficient of discharge of an ISA 1932 style nozzle
used for measuring flow rate of fluid, based on the geometry of the nozzle,
mass flow rate through the nozzle, and the density and viscosity of the
fluid.
.. math::
C = 0.9900 - 0.2262\beta^{4.1} - (0.00175\beta^2 - 0.0033\beta^{4.15})
\left(\frac{10^6}{Re_D}\right)^{1.15}
Parameters
----------
D : float
Upstream internal pipe diameter, [m]
Do : float
Diameter of nozzle orifice at flow conditions, [m]
rho : float
Density of fluid at `P1`, [kg/m^3]
mu : float
Viscosity of fluid at `P1`, [Pa*s]
m : float
Mass flow rate of fluid through the nozzle, [kg/s]
Returns
-------
C : float
Coefficient of discharge of the nozzle orifice, [-]
Notes
-----
Examples
--------
>>> C_ISA_1932_nozzle(D=0.07391, Do=0.0422, rho=1.2, mu=1.8E-5, m=0.1)
0.9635849973250495
References
----------
.. [1] American Society of Mechanical Engineers. Mfc-3M-2004 Measurement
Of Fluid Flow In Pipes Using Orifice, Nozzle, And Venturi. ASME, 2001.
.. [2] ISO 5167-3:2003 - Measurement of Fluid Flow by Means of Pressure
Differential Devices Inserted in Circular Cross-Section Conduits Running
Full -- Part 3: Nozzles and Venturi Nozzles.
'''
A_pipe = pi/4.*D*D
v = m/(A_pipe*rho)
Re_D = rho*v*D/mu
beta = Do/D
C = (0.9900 - 0.2262*beta**4.1
- (0.00175*beta**2 - 0.0033*beta**4.15)*(1E6/Re_D)**1.15)
return C
def C_venturi_nozzle(D, Do):
r'''Calculates the coefficient of discharge of an Venturi style nozzle
used for measuring flow rate of fluid, based on the geometry of the nozzle.
.. math::
C = 0.9858 - 0.196\beta^{4.5}
Parameters
----------
D : float
Upstream internal pipe diameter, [m]
Do : float
Diameter of nozzle orifice at flow conditions, [m]
Returns
-------
C : float
Coefficient of discharge of the nozzle orifice, [-]
Notes
-----
Examples
--------
>>> C_venturi_nozzle(D=0.07391, Do=0.0422)
0.9698996454169576
References
----------
.. [1] American Society of Mechanical Engineers. Mfc-3M-2004 Measurement
Of Fluid Flow In Pipes Using Orifice, Nozzle, And Venturi. ASME, 2001.
.. [2] ISO 5167-3:2003 - Measurement of Fluid Flow by Means of Pressure
Differential Devices Inserted in Circular Cross-Section Conduits Running
Full -- Part 3: Nozzles and Venturi Nozzles.
'''
beta = Do/D
return 0.9858 - 0.198*beta**4.5
# Relative pressure loss as a function of beta reatio for venturi nozzles
# Venturi nozzles should be between 65 mm and 500 mm; there are high and low
# loss ratios , with the high losses corresponding to small diameters,
# low high losses corresponding to large diameters
# Interpolation can be performed.
venturi_tube_betas = np.array(
[0.299160, 0.299470, 0.312390, 0.319010, 0.326580, 0.337290,
0.342020, 0.347060, 0.359030, 0.365960, 0.372580, 0.384870,
0.385810, 0.401250, 0.405350, 0.415740, 0.424250, 0.434010,
0.447880, 0.452590, 0.471810, 0.473090, 0.493540, 0.499240,
0.516530, 0.523800, 0.537630, 0.548060, 0.556840, 0.573890,
0.582350, 0.597820, 0.601560, 0.622650, 0.626490, 0.649480,
0.650990, 0.668700, 0.675870, 0.688550, 0.693180, 0.706180,
0.713330, 0.723510, 0.749540, 0.749650])
venturi_tube_dP_high = np.array(
[0.164534, 0.164504, 0.163591, 0.163508, 0.163439,
0.162652, 0.162224, 0.161866, 0.161238, 0.160786,
0.160295, 0.159280, 0.159193, 0.157776, 0.157467,
0.156517, 0.155323, 0.153835, 0.151862, 0.151154,
0.147840, 0.147613, 0.144052, 0.143050, 0.140107,
0.138981, 0.136794, 0.134737, 0.132847, 0.129303,
0.127637, 0.124758, 0.124006, 0.119269, 0.118449,
0.113605, 0.113269, 0.108995, 0.107109, 0.103688,
0.102529, 0.099567, 0.097791, 0.095055, 0.087681,
0.087648])
venturi_tube_dP_low = np.array(
[0.089232, 0.089218, 0.088671, 0.088435, 0.088206,
0.087853, 0.087655, 0.087404, 0.086693, 0.086241,
0.085813, 0.085142, 0.085102, 0.084446, 0.084202,
0.083301, 0.082470, 0.081650, 0.080582, 0.080213,
0.078509, 0.078378, 0.075989, 0.075226, 0.072700,
0.071598, 0.069562, 0.068128, 0.066986, 0.064658,
0.063298, 0.060872, 0.060378, 0.057879, 0.057403,
0.054091, 0.053879, 0.051726, 0.050931, 0.049362,
0.048675, 0.046522, 0.045381, 0.043840, 0.039913,
0.039896])
#ratios_average = 0.5*(ratios_high + ratios_low)
D_bound_venturi_tube = np.array([0.065, 0.5])
def dP_venturi_tube(D, Do, P1, P2):
r'''Calculates the non-recoverable pressure drop of a venturi tube
differential pressure meter based on the pressure drop and the geometry of
the venturi meter.
.. math::
\epsilon = \frac{\Delta\bar w }{\Delta P}
The :math:`\epsilon` value is looked up in a table of values as a function
of beta ratio and upstream pipe diameter (roughness impact).
Parameters
----------
D : float
Upstream internal pipe diameter, [m]
Do : float
Diameter of venturi tube at flow conditions, [m]
P1 : float
Static pressure of fluid upstream of venturi tube at the cross-section
of the pressure tap, [Pa]
P2 : float
Static pressure of fluid downstream of venturi tube at the
cross-section of the pressure tap, [Pa]
Returns
-------
dP : float
Non-recoverable pressure drop of the venturi tube, [Pa]
Notes
-----
The recoverable pressure drop should be recovered by 6 pipe diameters
downstream of the venturi tube.
Note there is some information on the effect of Reynolds number as well
in [1]_ and [2]_, with a curve showing an increased pressure drop
from 1E5-6E5 to with a decreasing multiplier from 1.75 to 1; the multiplier
is 1 for higher Reynolds numbers. This is not currently included in this
implementation.
Examples
--------
>>> dP_venturi_tube(D=0.07366, Do=0.05, P1=200000.0, P2=183000.0)
1788.5717754177406
References
----------
.. [1] American Society of Mechanical Engineers. Mfc-3M-2004 Measurement
Of Fluid Flow In Pipes Using Orifice, Nozzle, And Venturi. ASME, 2001.
.. [2] ISO 5167-4:2003 - Measurement of Fluid Flow by Means of Pressure
Differential Devices Inserted in Circular Cross-Section Conduits Running
Full -- Part 4: Venturi Tubes.
'''
# Effect of Re is not currently included
beta = Do/D
epsilon_D65 = np.interp(beta, venturi_tube_betas, venturi_tube_dP_high)
epsilon_D500 = np.interp(beta, venturi_tube_betas, venturi_tube_dP_low)
epsilon = np.interp(D, D_bound_venturi_tube, [epsilon_D65, epsilon_D500])
return epsilon*(P1 - P2)
def diameter_ratio_cone_meter(D, Dc):
r'''Calculates the diameter ratio `beta` used to characterize a cone
flow meter.
.. math::
\beta = \sqrt{1 - \frac{d_c^2}{D^2}}
Parameters
----------
D : float
Upstream internal pipe diameter, [m]
Dc : float
Diameter of the largest end of the cone meter, [m]
Returns
-------
beta : float
Cone meter diameter ratio, [-]
Notes
-----
Examples
--------
>>> diameter_ratio_cone_meter(D=0.2575, Dc=0.184)
0.6995709873957624
References
----------
.. [1] Hollingshead, Colter. "Discharge Coefficient Performance of Venturi,
Standard Concentric Orifice Plate, V-Cone, and Wedge Flow Meters at
Small Reynolds Numbers." May 1, 2011.
https://digitalcommons.usu.edu/etd/869.
'''
D_ratio = Dc/D
return (1.0 - D_ratio*D_ratio)**0.5
def cone_meter_expansibility_Stewart(D, Dc, P1, P2, k):
r'''Calculates the expansibility factor for a cone flow meter,
based on the geometry of the cone meter, measured pressures of the orifice,
and the isentropic exponent of the fluid. Developed in [1]_, also shown
in [2]_.
.. math::
\epsilon = 1 - (0.649 + 0.696\beta^4) \frac{\Delta P}{\kappa P_1}
Parameters
----------
D : float
Upstream internal pipe diameter, [m]
Dc : float
Diameter of the largest end of the cone meter, [m]
P1 : float
Static pressure of fluid upstream of cone meter at the cross-section of
the pressure tap, [Pa]
P2 : float
Static pressure of fluid at the end of the center of the cone pressure
tap, [Pa]
k : float
Isentropic exponent of fluid, [-]
Returns
-------
expansibility : float
Expansibility factor (1 for incompressible fluids, less than 1 for
real fluids), [-]
Notes
-----
This formula was determined for the range of P2/P1 >= 0.75; the only gas
used to determine the formula is air.
Examples
--------
>>> cone_meter_expansibility_Stewart(D=1, Dc=0.9, P1=1E6, P2=8.5E5, k=1.2)
0.9157343
References
----------
.. [1] Stewart, D. G., M. Reader-Harris, and NEL Dr RJW Peters. "Derivation
of an Expansibility Factor for the V-Cone Meter." In Flow Measurement
International Conference, Peebles, Scotland, UK, 2001.
.. [2] ISO 5167-5:2016 - Measurement of Fluid Flow by Means of Pressure
Differential Devices Inserted in Circular Cross-Section Conduits Running
Full -- Part 5: Cone meters.
'''
dP = P1 - P2
beta = diameter_ratio_cone_meter(D, Dc)
return 1.0 - (0.649 + 0.696*beta**4)*dP/(k*P1)
def dP_cone_meter(D, Dc, P1, P2):
r'''Calculates the non-recoverable pressure drop of a cone meter
based on the measured pressures before and at the cone end, and the
geometry of the cone meter according to [1]_.
.. math::
\Delta \bar \omega = (1.09 - 0.813\beta)\Delta P
Parameters
----------
D : float
Upstream internal pipe diameter, [m]
Dc : float
Diameter of the largest end of the cone meter, [m]
P1 : float
Static pressure of fluid upstream of cone meter at the cross-section of
the pressure tap, [Pa]
P2 : float
Static pressure of fluid at the end of the center of the cone pressure
tap, [Pa]
Returns
-------
dP : float
Non-recoverable pressure drop of the orifice plate, [Pa]
Notes
-----
The recoverable pressure drop should be recovered by 6 pipe diameters
downstream of the cone meter.
Examples
--------
>>> dP_cone_meter(1, .7, 1E6, 9.5E5)
25470.093437973323
References
----------
.. [1] ISO 5167-5:2016 - Measurement of Fluid Flow by Means of Pressure
Differential Devices Inserted in Circular Cross-Section Conduits Running
Full -- Part 5: Cone meters.
'''
dP = P1 - P2
beta = diameter_ratio_cone_meter(D, Dc)
return (1.09 - 0.813*beta)*dP
def diameter_ratio_wedge_meter(D, H):
r'''Calculates the diameter ratio `beta` used to characterize a wedge
flow meter as given in [1]_ and [2]_.
.. math::
\beta = \left(\frac{1}{\pi}\left\{\arccos\left[1 - \frac{2H}{D}
\right] - 2 \left[1 - \frac{2H}{D}
\right]\left(\frac{H}{D} - \left[\frac{H}{D}\right]^2
\right)^{0.5}\right\}\right)^{0.5}
Parameters
----------
D : float
Upstream internal pipe diameter, [m]
H : float
Portion of the diameter of the clear segment of the pipe up to the
wedge blocking flow; the height of the pipe up to the wedge, [m]
Returns
-------
beta : float
Wedge meter diameter ratio, [-]
Notes
-----
Examples
--------
>>> diameter_ratio_wedge_meter(D=0.2027, H=0.0608)
0.5022531424646643
References
----------
.. [1] Hollingshead, Colter. "Discharge Coefficient Performance of Venturi,
Standard Concentric Orifice Plate, V-Cone, and Wedge Flow Meters at
Small Reynolds Numbers." May 1, 2011.
https://digitalcommons.usu.edu/etd/869.
.. [2] IntraWedge WEDGE FLOW METER Type: IWM. January 2011.
http://www.intra-automation.com/download.php?file=pdf/products/technical_information/en/ti_iwm_en.pdf
'''
H_D = H/D
t0 = 1.0 - 2.0*H_D
t1 = acos(t0)
t2 = 2.0*(t0)
t3 = (H_D - H_D*H_D)**0.5
t4 = t1 - t2*t3
return (1./pi*t4)**0.5
def C_wedge_meter_Miller(D, H):
r'''Calculates the coefficient of discharge of an wedge flow meter
used for measuring flow rate of fluid, based on the geometry of the
differential pressure flow meter.
For half-inch lines:
.. math::
C = 0.7883 + 0.107(1 - \beta^2)
For 1 to 1.5 inch lines:
.. math::
C = 0.6143 + 0.718(1 - \beta^2)
For 1.5 to 24 inch lines:
.. math::
C = 0.5433 + 0.2453(1 - \beta^2)
Parameters
----------
D : float
Upstream internal pipe diameter, [m]
H : float
Portion of the diameter of the clear segment of the pipe up to the
wedge blocking flow; the height of the pipe up to the wedge, [m]
Returns
-------
C : float
Coefficient of discharge of the wedge flow meter, [-]
Notes
-----
There is an ISO standard being developed to cover wedge meters as of 2018.
Wedge meters can have varying angles; 60 and 90 degree wedge meters have
been reported. Tap locations 1 or 2 diameters (upstream and downstream),
and 2D upstream/1D downstream have been used. Some wedges are sharp;
some are smooth. [2]_ gives some experimental values.
Examples
--------
>>> C_wedge_meter_Miller(D=0.1524, H=0.3*0.1524)
0.7267069372687651
References
----------
.. [1] Miller, Richard W. Flow Measurement Engineering Handbook. 3rd
edition. New York: McGraw-Hill Education, 1996.
.. [2] Seshadri, V., S. N. Singh, and S. Bhargava. "Effect of Wedge Shape
and Pressure Tap Locations on the Characteristics of a Wedge Flowmeter."
IJEMS Vol.01(5), October 1994.
'''
beta = diameter_ratio_wedge_meter(D, H)
if D <= 0.7*inch:
# suggested limit 0.5 inch for this equation
C = 0.7883 + 0.107*(1 - beta*beta)
elif D <= 1.4*inch:
# Suggested limit is under 1.5 inches
C = 0.6143 + 0.718*(1 - beta*beta)
else:
C = 0.5433 + 0.2453*(1 - beta*beta)
return C
def C_Reader_Harris_Gallagher_wet_venturi_tube(mg, ml, rhog, rhol, D, Do, H=1):
r'''Calculates the coefficient of discharge of the wet gas venturi tube
based on the geometry of the tube, mass flow rates of liquid and vapor
through the tube, the density of the liquid and gas phases, and an
adjustable coefficient `H`.
.. math::
C = 1 - 0.0463\exp(-0.05Fr_{gas, th}) \cdot \min\left(1,
\sqrt{\frac{X}{0.016}}\right)
Fr_{gas, th} = \frac{Fr_{\text{gas, densionetric }}}{\beta^{2.5}}
\phi = \sqrt{1 + C_{Ch} X + X^2}
C_{Ch} = \left(\frac{\rho_l}{\rho_{1,g}}\right)^n +
\left(\frac{\rho_{1, g}}{\rho_{l}}\right)^n
n = \max\left[0.583 - 0.18\beta^2 - 0.578\exp\left(\frac{-0.8
Fr_{\text{gas, densiometric}}}{H}\right),0.392 - 0.18\beta^2 \right]
X = \left(\frac{m_l}{m_g}\right) \sqrt{\frac{\rho_{1,g}}{\rho_l}}
{Fr_{\text{gas, densiometric}}} = \frac{v_{gas}}{\sqrt{gD}}
\sqrt{\frac{\rho_{1,g}}{\rho_l - \rho_{1,g}}}
= \frac{4m_g}{\rho_{1,g} \pi D^2 \sqrt{gD}}
\sqrt{\frac{\rho_{1,g}}{\rho_l - \rho_{1,g}}}
Parameters
----------
mg : float
Mass flow rate of gas through the venturi tube, [kg/s]
ml : float
Mass flow rate of liquid through the venturi tube, [kg/s]
rhog : float
Density of gas at `P1`, [kg/m^3]
rhol : float
Density of liquid at `P1`, [kg/m^3]
D : float
Upstream internal pipe diameter, [m]
Do : float
Diameter of venturi tube at flow conditions, [m]
H : float, optional
A surface-tension effect coefficient used to adjust for different
fluids, (1 for a hydrocarbon liquid, 1.35 for water, 0.79 for water in
steam) [-]
Returns
-------
C : float
Coefficient of discharge of the wet gas venturi tube flow meter
(includes flow rate of gas ONLY), [-]
Notes
-----
This model has more error than single phase differential pressure meters.
The model was first published in [1]_, and became ISO 11583 later.
The limits of this correlation according to [2]_ are as follows:
.. math::
0.4 \le \beta \le 0.75
0 < X \le 0.3
Fr_{gas, th} > 3
\frac{\rho_g}{\rho_l} > 0.02
D \ge 50 \text{ mm}
Examples
--------
>>> C_Reader_Harris_Gallagher_wet_venturi_tube(mg=5.31926, ml=5.31926/2,
... rhog=50.0, rhol=800., D=.1, Do=.06, H=1)
0.9754210845876333
References
----------
.. [1] Reader-harris, Michael, and Tuv Nel. An Improved Model for
Venturi-Tube Over-Reading in Wet Gas, 2009.
.. [2] ISO/TR 11583:2012 Measurement of Wet Gas Flow by Means of Pressure
Differential Devices Inserted in Circular Cross-Section Conduits.
'''
V = 4*mg/(rhog*pi*D**2)
Frg = Froude_densimetric(V, L=D, rho1=rhol, rho2=rhog, heavy=False)
beta = Do/D
beta2 = beta*beta
Fr_gas_th = Frg*beta**-2.5
n = max(0.583 - 0.18*beta2 - 0.578*exp(-0.8*Frg/H),
0.392 - 0.18*beta2)
C_Ch = (rhol/rhog)**n + (rhog/rhol)**n
X = ml/mg*(rhog/rhol)**0.5
OF = (1.0 + C_Ch*X + X*X)**0.5
C = 1.0 - 0.0463*exp(-0.05*Fr_gas_th)*min(1.0, (X/0.016)**0.5)
return C
def dP_Reader_Harris_Gallagher_wet_venturi_tube(D, Do, P1, P2, ml, mg, rhol,
rhog, H=1):
r'''Calculates the non-recoverable pressure drop of a wet gas venturi
nozzle based on the pressure drop and the geometry of the venturi nozzle,
the mass flow rates of liquid and gas through it, the densities of the
vapor and liquid phase, and an adjustable coefficient `H`.
.. math::
Y = \frac{\Delta \bar \omega}{\Delta P} - 0.0896 - 0.48\beta^9
Y_{max} = 0.61\exp\left[-11\frac{\rho_{1,g}}{\rho_l}
- 0.045 \frac{Fr_{gas}}{H}\right]
\frac{Y}{Y_{max}} = 1 - \exp\left[-35 X^{0.75} \exp
\left( \frac{-0.28Fr_{gas}}{H}\right)\right]
X = \left(\frac{m_l}{m_g}\right) \sqrt{\frac{\rho_{1,g}}{\rho_l}}
{Fr_{\text{gas, densiometric}}} = \frac{v_{gas}}{\sqrt{gD}}
\sqrt{\frac{\rho_{1,g}}{\rho_l - \rho_{1,g}}}
= \frac{4m_g}{\rho_{1,g} \pi D^2 \sqrt{gD}}
\sqrt{\frac{\rho_{1,g}}{\rho_l - \rho_{1,g}}}
Parameters
----------
D : float
Upstream internal pipe diameter, [m]
Do : float
Diameter of venturi tube at flow conditions, [m]
P1 : float
Static pressure of fluid upstream of venturi tube at the cross-section
of the pressure tap, [Pa]
P2 : float
Static pressure of fluid downstream of venturi tube at the cross-
section of the pressure tap, [Pa]
ml : float
Mass flow rate of liquid through the venturi tube, [kg/s]
mg : float
Mass flow rate of gas through the venturi tube, [kg/s]
rhol : float
Density of liquid at `P1`, [kg/m^3]
rhog : float
Density of gas at `P1`, [kg/m^3]
H : float, optional
A surface-tension effect coefficient used to adjust for different
fluids, (1 for a hydrocarbon liquid, 1.35 for water, 0.79 for water in
steam) [-]
Returns
-------
C : float
Coefficient of discharge of the wet gas venturi tube flow meter
(includes flow rate of gas ONLY), [-]
Notes
-----
The model was first published in [1]_, and became ISO 11583 later.
Examples
--------
>>> dP_Reader_Harris_Gallagher_wet_venturi_tube(D=.1, Do=.06, H=1,
... P1=6E6, P2=6E6-5E4, ml=5.31926/2, mg=5.31926, rhog=50.0, rhol=800.,)
16957.43843129572
References
----------
.. [1] Reader-harris, Michael, and Tuv Nel. An Improved Model for
Venturi-Tube Over-Reading in Wet Gas, 2009.
.. [2] ISO/TR 11583:2012 Measurement of Wet Gas Flow by Means of Pressure
Differential Devices Inserted in Circular Cross-Section Conduits.
'''
dP = P1 - P2
beta = Do/D
X = ml/mg*(rhog/rhol)**0.5
V = 4*mg/(rhog*pi*D**2)
Frg = Froude_densimetric(V, L=D, rho1=rhol, rho2=rhog, heavy=False)
Y_ratio = 1.0 - exp(-35.0*X**0.75*exp(-0.28*Frg/H))
Y_max = 0.61*exp(-11.0*rhog/rhol - 0.045*Frg/H)
Y = Y_max*Y_ratio
rhs = -0.0896 - 0.48*beta**9
dw = dP*(Y - rhs)
return dw
# Venturi tube loss coefficients as a function of Re
as_cast_convergent_venturi_Res = [4E5, 6E4, 1E5, 1.5E5]
as_cast_convergent_venturi_Cs = [0.957, 0.966, 0.976, 0.982]
machined_convergent_venturi_Res = [5E4, 1E5, 2E5, 3E5,
7.5E5, # 5E5 to 1E6
1.5E6, # 1E6 to 2E6
5E6] # 2E6 to 1E8
machined_convergent_venturi_Cs = [0.970, 0.977, 0.992, 0.998, 0.995, 1.000, 1.010]
rough_welded_convergent_venturi_Res = [4E4, 6E4, 1E5]
rough_welded_convergent_venturi_Cs = [0.96, 0.97, 0.98]
as_cast_convergent_entrance_machined_venturi_Res = [1E4, 6E4, 1E5, 1.5E5,
3.5E5, # 2E5 to 5E5
3.2E6] # 5E5 to 3.2E6
as_cast_convergent_entrance_machined_venturi_Cs = [0.963, 0.978, 0.98, 0.987, 0.992, 0.995]
CONE_METER_C = 0.82
ROUGH_WELDED_CONVERGENT_VENTURI_TUBE_C = 0.985
MACHINED_CONVERGENT_VENTURI_TUBE_C = 0.995
AS_CAST_VENTURI_TUBE_C = 0.984
def _differential_pressure_C_epsilon(D, D2, m, P1, P2, rho, mu, k, meter_type,
taps=None):
'''Helper function only.
'''
if meter_type == ISO_5167_ORIFICE:
C = C_Reader_Harris_Gallagher(D=D, Do=D2, rho=rho, mu=mu, m=m, taps=taps)
epsilon = orifice_expansibility(D=D, Do=D2, P1=P1, P2=P2, k=k)
elif meter_type == LONG_RADIUS_NOZZLE:
epsilon = nozzle_expansibility(D=D, Do=D2, P1=P1, P2=P2, k=k)
C = C_long_radius_nozzle(D=D, Do=D2, rho=rho, mu=mu, m=m)
elif meter_type == ISA_1932_NOZZLE:
epsilon = nozzle_expansibility(D=D, Do=D2, P1=P1, P2=P2, k=k)
C = C_ISA_1932_nozzle(D=D, Do=D2, rho=rho, mu=mu, m=m)
elif meter_type == VENTURI_NOZZLE:
epsilon = nozzle_expansibility(D=D, Do=D2, P1=P1, P2=P2, k=k)
C = C_venturi_nozzle(D=D, Do=D2)
elif meter_type == AS_CAST_VENTURI_TUBE:
epsilon = nozzle_expansibility(D=D, Do=D2, P1=P1, P2=P2, k=k)
C = AS_CAST_VENTURI_TUBE_C
elif meter_type == MACHINED_CONVERGENT_VENTURI_TUBE:
epsilon = nozzle_expansibility(D=D, Do=D2, P1=P1, P2=P2, k=k)
C = MACHINED_CONVERGENT_VENTURI_TUBE_C
elif meter_type == ROUGH_WELDED_CONVERGENT_VENTURI_TUBE:
epsilon = nozzle_expansibility(D=D, Do=D2, P1=P1, P2=P2, k=k)
C = ROUGH_WELDED_CONVERGENT_VENTURI_TUBE_C
elif meter_type == CONE_METER:
epsilon = cone_meter_expansibility_Stewart(D=D, Dc=D2, P1=P1, P2=P2, k=k)
C = CONE_METER_C
elif meter_type == WEDGE_METER:
epsilon = orifice_expansibility_1989(D=D, Do=D2, P1=P1, P2=P2, k=k)
C = C_wedge_meter_Miller(D=D, H=D2)
return epsilon, C
def differential_pressure_meter_solver(D, rho, mu, k, D2=None, P1=None, P2=None,
m=None, meter_type=ISO_5167_ORIFICE,
taps=None):
r'''Calculates either the mass flow rate, the upstream pressure, the second
pressure value, or the orifice diameter for a differential
pressure flow meter based on the geometry of the meter, measured pressures
of the meter, and the density, viscosity, and isentropic exponent of the
fluid. This solves an equation iteratively to obtain the correct flow rate.
Parameters
----------
D : float
Upstream internal pipe diameter, [m]
rho : float
Density of fluid at `P1`, [kg/m^3]
mu : float
Viscosity of fluid at `P1`, [Pa*s]
k : float
Isentropic exponent of fluid, [-]
D2 : float, optional
Diameter of orifice, or venturi meter orifice, or flow tube orifice,
or cone meter end diameter, or wedge meter fluid flow height, [m]
P1 : float, optional
Static pressure of fluid upstream of differential pressure meter at the
cross-section of the pressure tap, [Pa]
P2 : float, optional
Static pressure of fluid downstream of differential pressure meter or
at the prescribed location (varies by type of meter) [Pa]
m : float, optional
Mass flow rate of fluid through the flow meter, [kg/s]
meter_type : str, optional
One of ('ISO 5167 orifice', 'long radius nozzle', 'ISA 1932 nozzle',
'venuri nozzle', 'as cast convergent venturi tube',
'machined convergent venturi tube',
'rough welded convergent venturi tube', 'cone meter',
'wedge meter'), [-]
taps : str, optional
The orientation of the taps; one of 'corner', 'flange', 'D', or 'D/2';
applies for orifice meters only, [-]
Returns
-------
ans : float
One of `m`, the mass flow rate of the fluid; `P1`, the pressure
upstream of the flow meter; `P2`, the second pressure
tap's value; and `D2`, the diameter of the measuring device; units
of respectively, [kg/s], [Pa], [Pa], or [m]
Notes
-----
See the appropriate functions for the documentation for the formulas and
references used in each method.
The solvers make some assumptions about the range of values answers may be
in.
Note that the solver for the upstream pressure uses the provided values of
density, viscosity and isentropic exponent; whereas these values all
depend on pressure (albeit to a small extent). An outer loop should be
added with pressure-dependent values calculated in it for maximum accuracy.
It would be possible to solve for the upstream pipe diameter, but there is
no use for that functionality.
Examples
--------
>>> differential_pressure_meter_solver(D=0.07366, D2=0.05, P1=200000.0,
... P2=183000.0, rho=999.1, mu=0.0011, k=1.33,
... meter_type='ISO 5167 orifice', taps='D')
7.702338035732168
>>> differential_pressure_meter_solver(D=0.07366, m=7.702338, P1=200000.0,
... P2=183000.0, rho=999.1, mu=0.0011, k=1.33,
... meter_type='ISO 5167 orifice', taps='D')
0.04999999990831885
'''
if m is None:
def to_solve(m):
C, epsilon = _differential_pressure_C_epsilon(D, D2, m, P1, P2, rho,
mu, k, meter_type,
taps=taps)
m_calc = orifice_discharge(D=D, Do=D2, P1=P1, P2=P2, rho=rho,
C=C, expansibility=epsilon)
return m - m_calc
return newton(to_solve, 2.81)
elif D2 is None:
def to_solve(D2):
C, epsilon = _differential_pressure_C_epsilon(D, D2, m, P1, P2, rho,
mu, k, meter_type,
taps=taps)
m_calc = orifice_discharge(D=D, Do=D2, P1=P1, P2=P2, rho=rho,
C=C, expansibility=epsilon)
return m - m_calc
return brenth(to_solve, D*(1-1E-9), D*5E-3)
elif P2 is None:
def to_solve(P2):
C, epsilon = _differential_pressure_C_epsilon(D, D2, m, P1, P2, rho,
mu, k, meter_type,
taps=taps)
m_calc = orifice_discharge(D=D, Do=D2, P1=P1, P2=P2, rho=rho,
C=C, expansibility=epsilon)
return m - m_calc
return brenth(to_solve, P1*(1-1E-9), P1*0.7)
elif P1 is None:
def to_solve(P1):
C, epsilon = _differential_pressure_C_epsilon(D, D2, m, P1, P2, rho,
mu, k, meter_type,
taps=taps)
m_calc = orifice_discharge(D=D, Do=D2, P1=P1, P2=P2, rho=rho,
C=C, expansibility=epsilon)
return m - m_calc
return brenth(to_solve, P2*(1+1E-9), P2*1.4)
else:
raise Exception('Solver is capable of solving for one of P2, D2, or m only.')
def differential_pressure_meter_dP(D, D2, P1, P2, C=None,
meter_type=ISO_5167_ORIFICE):
r'''Calculates either the non-recoverable pressure drop of a differential
pressure flow meter based on the geometry of the meter, measured pressures
of the meter, and for most models the meter discharge coefficient.
Parameters
----------
D : float
Upstream internal pipe diameter, [m]
D2 : float
Diameter of orifice, or venturi meter orifice, or flow tube orifice,
or cone meter end diameter, or wedge meter fluid flow height, [m]
P1 : float
Static pressure of fluid upstream of differential pressure meter at the
cross-section of the pressure tap, [Pa]
P2 : float
Static pressure of fluid downstream of differential pressure meter or
at the prescribed location (varies by type of meter) [Pa]
C : float, optional
Coefficient of discharge of the wedge flow meter, [-]
meter_type : str, optional
One of ('ISO 5167 orifice', 'long radius nozzle', 'ISA 1932 nozzle',
'as cast convergent venturi tube',
'machined convergent venturi tube',
'rough welded convergent venturi tube', 'cone meter'), [-]
Returns
-------
dP : float
Non-recoverable pressure drop of the differential pressure flow
meter, [Pa]
Notes
-----
See the appropriate functions for the documentation for the formulas and
references used in each method.
Wedge meters, and venturi nozzles do not have standard formulas available
for pressure drop computation.
Examples
--------
>>> differential_pressure_meter_dP(D=0.07366, D2=0.05, P1=200000.0,
... P2=183000.0, meter_type='as cast convergent venturi tube')
1788.5717754177406
'''
if meter_type == ISO_5167_ORIFICE:
dP = dP_orifice(D=D, Do=D2, P1=P1, P2=P2, C=C)
elif meter_type == LONG_RADIUS_NOZZLE:
dP = dP_orifice(D=D, Do=D2, P1=P1, P2=P2, C=C)
elif meter_type == ISA_1932_NOZZLE:
dP = dP_orifice(D=D, Do=D2, P1=P1, P2=P2, C=C)
elif meter_type == VENTURI_NOZZLE:
raise Exception(NotImplemented)
elif meter_type == AS_CAST_VENTURI_TUBE:
dP = dP_venturi_tube(D=D, Do=D2, P1=P1, P2=P2)
elif meter_type == MACHINED_CONVERGENT_VENTURI_TUBE:
dP = dP_venturi_tube(D=D, Do=D2, P1=P1, P2=P2)
elif meter_type == ROUGH_WELDED_CONVERGENT_VENTURI_TUBE:
dP = dP_venturi_tube(D=D, Do=D2, P1=P1, P2=P2)
elif meter_type == CONE_METER:
dP = dP_cone_meter(D=D, Dc=D2, P1=P1, P2=P2)
elif meter_type == WEDGE_METER:
raise Exception(NotImplemented)
return dP
| 35.407688
| 108
| 0.607385
| 8,839
| 60,795
| 4.097409
| 0.115058
| 0.016153
| 0.014413
| 0.017616
| 0.660104
| 0.61946
| 0.588425
| 0.569429
| 0.547036
| 0.533313
| 0
| 0.092302
| 0.278625
| 60,795
| 1,716
| 109
| 35.428322
| 0.733514
| 0.653952
| 0
| 0.314516
| 0
| 0
| 0.069344
| 0.029807
| 0
| 0
| 0
| 0
| 0
| 1
| 0.080645
| false
| 0
| 0.018817
| 0
| 0.188172
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
42eb0db02ed2cdde4c36688526176ef0796f32f2
| 1,370
|
py
|
Python
|
git_plan/cli/commands/delete.py
|
synek/git-plan
|
4cf5429348a71fb5ea8110272fb89d20bfa38c38
|
[
"MIT"
] | 163
|
2021-03-06T12:01:06.000Z
|
2022-03-01T22:52:36.000Z
|
git_plan/cli/commands/delete.py
|
synek/git-plan
|
4cf5429348a71fb5ea8110272fb89d20bfa38c38
|
[
"MIT"
] | 61
|
2021-03-06T07:00:39.000Z
|
2021-04-13T10:25:58.000Z
|
git_plan/cli/commands/delete.py
|
synek/git-plan
|
4cf5429348a71fb5ea8110272fb89d20bfa38c38
|
[
"MIT"
] | 9
|
2021-03-07T17:52:57.000Z
|
2021-10-18T21:35:23.000Z
|
"""Delete command
Author: Rory Byrne <rory@rory.bio>
"""
from typing import Any
from git_plan.cli.commands.command import Command
from git_plan.service.plan import PlanService
from git_plan.util.decorators import requires_initialized, requires_git_repository
@requires_initialized
@requires_git_repository
class Delete(Command):
"""Delete an existing commit"""
subcommand = 'delete'
def __init__(self, plan_service: PlanService, **kwargs):
super().__init__(**kwargs)
assert plan_service, "Plan service not injected"
self._plan_service = plan_service
def command(self, **kwargs):
"""Create a new commit"""
commits = self._plan_service.get_commits(self._repository)
if not commits:
self._ui.bold('No commits found.')
return
chosen_commit = self._ui.choose_commit(commits, 'Which plan do you want to delete?')
self._ui.bold(f'{chosen_commit.message.headline}\n')
confirm_msg = 'Are you sure you want to delete this commit?'
if not self._ui.confirm(confirm_msg):
self._ui.bold("Stopped.")
return
self._plan_service.delete_commit(chosen_commit)
self._ui.bold('Deleted.')
def register_subparser(self, subparsers: Any):
subparsers.add_parser(Delete.subcommand, help='Delete a planned commit.')
| 31.136364
| 92
| 0.687591
| 173
| 1,370
| 5.196532
| 0.404624
| 0.097887
| 0.066741
| 0.066741
| 0.088988
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.210219
| 1,370
| 43
| 93
| 31.860465
| 0.830869
| 0.070073
| 0
| 0.074074
| 0
| 0
| 0.158313
| 0.027049
| 0
| 0
| 0
| 0
| 0.037037
| 1
| 0.111111
| false
| 0
| 0.148148
| 0
| 0.407407
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
42ef38196b7af8975b40694b6eb1954f2a48845e
| 1,926
|
py
|
Python
|
vision_module.py
|
seongdong2/GRADUATION
|
c38b13a2dd82a58bdba7673916408daa0d9b471e
|
[
"Unlicense"
] | 2
|
2021-09-19T13:52:05.000Z
|
2021-10-04T01:09:21.000Z
|
vision_module.py
|
seongdong2/graduation
|
c38b13a2dd82a58bdba7673916408daa0d9b471e
|
[
"Unlicense"
] | 1
|
2021-10-14T06:19:44.000Z
|
2021-10-14T06:19:44.000Z
|
vision_module.py
|
seongdong2/graduation
|
c38b13a2dd82a58bdba7673916408daa0d9b471e
|
[
"Unlicense"
] | null | null | null |
import numpy as np
import cv2
CLASSES = ["background", "aeroplane", "bicycle", "bird", "boat",
"bottle", "bus", "car", "cat", "chair", "cow", "diningtable",
"dog", "horse", "motorbike", "person", "pottedplant", "sheep",
"sofa", "train", "tvmonitor"]
net = cv2.dnn.readNetFromCaffe(
"MobileNetSSD_deploy.prototxt.txt", "MobileNetSSD_deploy.caffemodel")
BLACK_CRITERIA = 60
def detect(frame):
(h, w) = frame.shape[:2]
blob = cv2.dnn.blobFromImage(cv2.resize(frame, (300, 300)),
0.007843, (300, 300), 127.5)
net.setInput(blob)
detections = net.forward()
result_all = []
result_black = []
for i in np.arange(0, detections.shape[2]):
confidence = detections[0, 0, i, 2]
if confidence > 0.3:
idx = int(detections[0, 0, i, 1])
if CLASSES[idx] == "person":
box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
startX, startY, endX, endY = box.astype("int")
x, y, w, h = startX, startY, endX - startX, endY - startY
result_all.append((confidence, (x, y, w, h)))
cut_size = int(min(w, h) / 6)
black_value = np.mean(frame[y + cut_size:y + h - cut_size, x + cut_size:x + w - cut_size])
if black_value < BLACK_CRITERIA:
result_black.append((confidence, (x, y, w, h)))
if result_black:
result_black.sort(key=lambda x: x[0])
return True, result_black[-1][1]
else:
return False, None
def find_template(template, full_img):
h, w, _ = template.shape
full_img_copy = full_img.copy()
res = cv2.matchTemplate(full_img_copy, template, cv2.TM_CCOEFF)
_, max_val, _, max_loc = cv2.minMaxLoc(res)
top_left = max_loc
x = top_left[0]
y = top_left[1]
return full_img[y:y + h, x:x + w], (x, y, w, h)
| 30.571429
| 106
| 0.555556
| 261
| 1,926
| 3.961686
| 0.413793
| 0.01354
| 0.011605
| 0.015474
| 0.073501
| 0.038685
| 0
| 0
| 0
| 0
| 0
| 0.038771
| 0.290239
| 1,926
| 63
| 107
| 30.571429
| 0.71763
| 0
| 0
| 0
| 0
| 0
| 0.101713
| 0.032174
| 0
| 0
| 0
| 0
| 0
| 1
| 0.045455
| false
| 0
| 0.045455
| 0
| 0.159091
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
42efd3e55b344db382180d65f36b45d066baab96
| 618
|
py
|
Python
|
riccipy/metrics/lewis_papapetrou.py
|
cjayross/riccipy
|
2cc0ca5e1aa4af91b203b3ff2bb1effd7d2f4846
|
[
"MIT"
] | 4
|
2019-08-17T04:28:06.000Z
|
2021-01-02T15:19:18.000Z
|
riccipy/metrics/lewis_papapetrou.py
|
grdbii/riccipy
|
2cc0ca5e1aa4af91b203b3ff2bb1effd7d2f4846
|
[
"MIT"
] | 3
|
2019-08-02T04:07:43.000Z
|
2020-06-18T07:49:38.000Z
|
riccipy/metrics/lewis_papapetrou.py
|
grdbii/riccipy
|
2cc0ca5e1aa4af91b203b3ff2bb1effd7d2f4846
|
[
"MIT"
] | null | null | null |
"""
Name: Lewis Papapetrou
References: Ernst, Phys. Rev., v167, p1175, (1968)
Coordinates: Cartesian
"""
from sympy import Function, Rational, exp, symbols, zeros
coords = symbols("t x y z", real=True)
variables = ()
functions = symbols("k r s w", cls=Function)
t, x, y, z = coords
k, r, s, w = functions
metric = zeros(4)
metric[0, 0] = -exp(2 * s(x, y))
metric[3, 3] = (exp(-s(x, y)) * r(x, y) - w(x, y) * exp(s(x, y))) * (
exp(-s(x, y)) * r(x, y) + w(x, y) * exp(s(x, y))
)
metric[0, 3] = metric[3, 0] = -w(x, y) * exp(2 * s(x, y))
metric[1, 2] = metric[2, 1] = Rational(1, 2) * exp(2 * k(x, y) - 2 * s(x, y))
| 30.9
| 77
| 0.553398
| 121
| 618
| 2.826446
| 0.330579
| 0.087719
| 0.061404
| 0.070175
| 0.181287
| 0.181287
| 0.105263
| 0.105263
| 0.105263
| 0.105263
| 0
| 0.060852
| 0.202265
| 618
| 19
| 78
| 32.526316
| 0.63286
| 0.15534
| 0
| 0
| 0
| 0
| 0.027237
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.076923
| 0
| 0.076923
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
42f8e8791025cfd39e8878d6744a088d9902c8a3
| 1,206
|
py
|
Python
|
test/variable_type.py
|
bourne7/demo-python
|
0c4dd12475bcada4e5826b7117bd4c4bdcedfd9f
|
[
"MIT"
] | null | null | null |
test/variable_type.py
|
bourne7/demo-python
|
0c4dd12475bcada4e5826b7117bd4c4bdcedfd9f
|
[
"MIT"
] | null | null | null |
test/variable_type.py
|
bourne7/demo-python
|
0c4dd12475bcada4e5826b7117bd4c4bdcedfd9f
|
[
"MIT"
] | null | null | null |
def do_loop():
print('Being Invoked.')
# * 表示参数为 元组
def fun1(*args): # 相当于 def fun1(1,2,3) ==> args 就相当于(1,2,3)
for a in args:
print(a)
# ** 表示参数为 字典
def fun2(**args): # 相当于 def fun2({a:1,b:2,c:3}) ==>args 就相当于{a:1,b:2,c:3}
for k, v in args:
print(k, ":", v)
# Python3 的六个标准数据类型
def show_type():
# 不可变对象
var_int = 123
# 注意 isinstance(1, int) 这种可以判断父类,type不行
print('Number 数字', type(var_int))
var_str = 'Hello'
print('String 字符串', type(var_str))
var_tuple = ('Hi', 786, 2.23, 'john', 70.2)
print('Tuple 元组', type(var_tuple))
# 可变对象
var_set = {1, 2, 3, 4, 5}
print('Sets 集合', type(var_set))
var_list = [1, 2, 3, 4, 5, 6]
print('List 列表', type(var_list))
var_dict = {'a': 'apple', 'b': 'banana', 'z': 1000}
print('Dictionary 字典', type(var_dict))
def test_mutable():
a1 = [1, 2, 3]
a2 = a1
print(id(a1), id(a2))
# 这3种都不会导致对象id变化,因为都是调用内部函数。
a2.append(4)
a2 += [4]
a2.extend([4])
# 会导致对象id变化,因为创建了新的对象。
# a2 = a2 + [4]
print(id(a1), id(a2))
print(a1)
print(a2)
if __name__ == '__main__':
print('Start test as main.')
show_type()
test_mutable()
| 19.451613
| 74
| 0.543118
| 195
| 1,206
| 3.230769
| 0.405128
| 0.066667
| 0.02381
| 0.012698
| 0.07619
| 0.019048
| 0
| 0
| 0
| 0
| 0
| 0.07545
| 0.263682
| 1,206
| 61
| 75
| 19.770492
| 0.634009
| 0.20398
| 0
| 0.057143
| 0
| 0
| 0.127637
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.142857
| false
| 0
| 0
| 0
| 0.142857
| 0.4
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
42f979541235624972aa7beb6b4040036e613c33
| 951
|
py
|
Python
|
scrapystsytem/spiders/doubanmoviespider.py
|
mezhou887/ScrapySystem2017
|
888ac42bba36b541845244596db1644e332bf291
|
[
"Apache-2.0"
] | null | null | null |
scrapystsytem/spiders/doubanmoviespider.py
|
mezhou887/ScrapySystem2017
|
888ac42bba36b541845244596db1644e332bf291
|
[
"Apache-2.0"
] | null | null | null |
scrapystsytem/spiders/doubanmoviespider.py
|
mezhou887/ScrapySystem2017
|
888ac42bba36b541845244596db1644e332bf291
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
import logging
from scrapystsytem.misc.commonspider import CommonSpider
from scrapy.spiders import Rule
from scrapy.linkextractors import LinkExtractor as sle
logger = logging.getLogger(__name__)
class DoubanMovieSpider(CommonSpider):
name = "doubanmovie"
allowed_domains = ["douban.com"]
start_urls = [
"https://movie.douban.com/chart"
]
rules = [
Rule(sle(allow=("/subject/[0-9]+/$")), callback='parse_subject', follow=True),
]
content_css_rules = {
'rating_per': '.rating_per::text',
'rating_num': '.rating_num::text',
'title': 'h1 span:nth-child(1)::text',
'rating_people': '.rating_people span::text',
}
def parse_subject(self, response):
item = self.parse_with_rules(response, self.content_css_rules, dict)
logger.info('function: parse_subject, url: '+response.url+' , item: '+str(item));
return item
| 31.7
| 89
| 0.648791
| 110
| 951
| 5.418182
| 0.581818
| 0.060403
| 0.050336
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.006614
| 0.205047
| 951
| 30
| 90
| 31.7
| 0.781746
| 0.022082
| 0
| 0
| 0
| 0
| 0.261572
| 0.024758
| 0
| 0
| 0
| 0
| 0
| 1
| 0.041667
| false
| 0
| 0.166667
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
42fe26b4d9e2cf96a145d2ebd3a33d07d37ab54e
| 2,476
|
py
|
Python
|
09/09b.py
|
thejoeejoee/aoc-2021
|
1ae7650aea42b5fbb60e891687cf7bc84c81bd66
|
[
"MIT"
] | 1
|
2021-12-01T17:43:38.000Z
|
2021-12-01T17:43:38.000Z
|
09/09b.py
|
thejoeejoee/aoc-2021
|
1ae7650aea42b5fbb60e891687cf7bc84c81bd66
|
[
"MIT"
] | null | null | null |
09/09b.py
|
thejoeejoee/aoc-2021
|
1ae7650aea42b5fbb60e891687cf7bc84c81bd66
|
[
"MIT"
] | null | null | null |
#!/bin/env python3
import operator
from _operator import attrgetter, itemgetter
from collections import defaultdict, Counter
from functools import reduce, partial
from itertools import chain
from aocd import get_data
EMPTY = type('EMPTY', (int,), dict(__repr__=(f := lambda s: 'EMPTY'), __str__=f))(10)
def windowed(seq, n):
for i in range(len(seq) - n + 1):
yield seq[i: i + n]
def compose(*fs):
return reduce(lambda f, g: lambda x: f(g(x)), fs, lambda x: x)
heights = get_data().strip().splitlines()
HEIGHT = len(heights) + 2
WIDTH = len(heights[0]) + 2
def get_neighbors(data, pos):
row, col = pos
for p in (
(row, col + 1),
(row, col - 1),
(row + 1, col),
(row - 1, col),
):
r, c = p
if 0 <= r < HEIGHT and 0 <= c < WIDTH:
yield p, data[r * WIDTH + c]
def find_low_points(levels):
for triplet_i, triplet in filter(
# turbo magic to get triples (with indexes) with center item which is NOT EMPTY
compose(partial(operator.ne, EMPTY), itemgetter(1), itemgetter(1)),
enumerate(windowed(levels, 3), start=1) # wtf dunno why to start at 1
):
row = triplet_i // WIDTH
col = triplet_i % WIDTH
left, center, right = triplet
top = levels[(row - 1) * WIDTH + col]
bottom = levels[(row + 1) * WIDTH + col]
if all(map(partial(operator.lt, center), (left, right, top, bottom))):
yield row, col
def main():
data = tuple(chain(
(EMPTY for _ in range(WIDTH)),
*(((EMPTY,) + tuple(int(c) for c in line) + (EMPTY,)) for line in heights),
(EMPTY for _ in range(WIDTH)),
))
basins = Counter()
for low_point in find_low_points(data):
known = set()
to_explore = {low_point}
# not BFS, dot DFS? just JoeFS
while to_explore:
exploring = to_explore.pop()
known.add(exploring)
r, c = exploring
current = data[r * WIDTH + c]
for neighbor, level in get_neighbors(data, exploring):
if level in known:
continue
if level > current and level not in (EMPTY, 9):
to_explore.add(neighbor)
basins[low_point] = len(known)
return reduce(
operator.mul,
map(itemgetter(1), basins.most_common(3))
)
if __name__ == '__main__':
print(main())
| 26.340426
| 91
| 0.560582
| 330
| 2,476
| 4.090909
| 0.363636
| 0.017778
| 0.023704
| 0.014815
| 0.056296
| 0
| 0
| 0
| 0
| 0
| 0
| 0.013618
| 0.317851
| 2,476
| 93
| 92
| 26.623656
| 0.785672
| 0.061389
| 0
| 0.061538
| 0
| 0
| 0.007759
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.076923
| false
| 0
| 0.092308
| 0.015385
| 0.2
| 0.015385
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
42ff0390633d326bb027aa10d5b16efa20802940
| 1,343
|
py
|
Python
|
tests/test_window.py
|
yogeshkumarpilli/detectron2
|
f4f276dc8239b2c5a1bbbf6ed234acd25c75a522
|
[
"Apache-2.0"
] | null | null | null |
tests/test_window.py
|
yogeshkumarpilli/detectron2
|
f4f276dc8239b2c5a1bbbf6ed234acd25c75a522
|
[
"Apache-2.0"
] | null | null | null |
tests/test_window.py
|
yogeshkumarpilli/detectron2
|
f4f276dc8239b2c5a1bbbf6ed234acd25c75a522
|
[
"Apache-2.0"
] | 3
|
2021-12-17T04:28:02.000Z
|
2022-02-22T18:18:03.000Z
|
from detectron2.engine import DefaultPredictor
from detectron2.data import MetadataCatalog
from detectron2.config import get_cfg
from detectron2.utils.visualizer import ColorMode, Visualizer
from detectron2 import model_zoo
import cv2
import numpy as np
import requests
# Load an image
res = requests.get("https://thumbor.forbes.com/thumbor/fit-in/1200x0/filters%3Aformat%28jpg%29/https%3A%2F%2Fspecials-images.forbesimg.com%2Fimageserve%2F5f15af31465263000625ce08%2F0x0.jpg")
image = np.asarray(bytearray(res.content), dtype="uint8")
image = cv2.imdecode(image, cv2.IMREAD_COLOR)
config_file = 'COCO-Detection/faster_rcnn_R_101_FPN_3x.yaml'
cfg = get_cfg()
cfg.merge_from_file(model_zoo.get_config_file(config_file))
cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.75 # Threshold
cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url(config_file)
cfg.MODEL.DEVICE = "cuda" # cpu or cuda
# Create predictor
predictor = DefaultPredictor(cfg)
# Make prediction
output = predictor(image)
print(output)
v = Visualizer(image[:, :, ::-1],
scale=0.8,
metadata=MetadataCatalog.get(cfg.DATASETS.TRAIN[0]),
instance_mode=ColorMode.IMAGE
)
v = v.draw_instance_predictions(output["instances"].to("cpu"))
cv2.imshow('images', v.get_image()[:, :, ::-1])
cv2.waitKey(0)
| 37.305556
| 191
| 0.737156
| 181
| 1,343
| 5.320442
| 0.546961
| 0.07269
| 0.022845
| 0.037383
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.053136
| 0.145197
| 1,343
| 36
| 192
| 37.305556
| 0.785714
| 0.050633
| 0
| 0
| 0
| 0.035714
| 0.193522
| 0.035628
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.285714
| 0
| 0.285714
| 0.035714
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
42ff644535c1107deafd0fab424dd9161db0897b
| 9,920
|
py
|
Python
|
hydra/cli.py
|
albertoa/hydra
|
8161e75829e4e76cb91ce516bbf03c258a87ce9e
|
[
"Apache-2.0"
] | 28
|
2020-11-05T16:04:51.000Z
|
2021-02-16T22:58:10.000Z
|
hydra/cli.py
|
albertoa/hydra
|
8161e75829e4e76cb91ce516bbf03c258a87ce9e
|
[
"Apache-2.0"
] | 43
|
2020-11-06T19:21:39.000Z
|
2021-02-25T19:04:42.000Z
|
hydra/cli.py
|
albertoa/hydra
|
8161e75829e4e76cb91ce516bbf03c258a87ce9e
|
[
"Apache-2.0"
] | 4
|
2020-11-06T08:54:57.000Z
|
2021-01-18T03:26:00.000Z
|
import os
import yaml
import json
import click
import hydra.utils.constants as const
from hydra.utils.git import check_repo
from hydra.utils.utils import dict_to_string, inflate_options
from hydra.cloud.local_platform import LocalPlatform
from hydra.cloud.fast_local_platform import FastLocalPlatform
from hydra.cloud.google_cloud_platform import GoogleCloudPlatform
from hydra.cloud.aws_platform import AWSPlatform
from hydra.version import __version__
@click.group()
@click.version_option(__version__)
def cli():
pass
@cli.command()
# Generic options
@click.option('-y', '--yaml_path', default='hydra.yaml', type=str)
@click.option('-p', '--project_name', default=None, type=str)
@click.option('-m', '--model_path', default=None, type=str)
@click.option('--cloud', default=None, type=click.Choice(['fast_local','local', 'aws', 'gcp', 'azure'], case_sensitive=False))
@click.option('--github_token', envvar='GITHUB_TOKEN') # Takes either an option or environment var
# Cloud specific options
@click.option('--cpu_count', default=None, type=click.IntRange(0, 96), help='Number of CPU cores required')
@click.option('--memory_size', default=None, type=click.IntRange(0, 624), help='GB of RAM required')
@click.option('--gpu_count', default=None, type=click.IntRange(0, 8), help="Number of accelerator GPUs")
@click.option('--gpu_type', default=None, type=str, help="Accelerator GPU type")
@click.option('--region', default=None, type=str, help="Region of cloud server location")
# AWS specific options
@click.option('--metadata_db_hostname', default=None, type=str, help="Hostname of the RDS instance storing job metadata")
@click.option('--metadata_db_username_secret', default=None, type=str, help="Secret name in AWS of the username of the RDS instance storing job metadata")
@click.option('--metadata_db_password_secret', default=None, type=str, help="Secret name in AWS of the password of the RDS instance storing job metadata")
@click.option('--metadata_db_name', default=None, type=str, help="Database name of the RDS instance storing job metadata")
# Docker Options
@click.option('-t', '--image_tag', default=None, type=str, help="Docker image tag name")
@click.option('-u', '--image_url', default=None, type=str, help="Url to the docker image on cloud")
# Env variable of model file
@click.option('-o', '--options', default=None, type=str, help='Environmental variables for the script')
def run(
yaml_path,
project_name,
model_path,
cloud,
github_token,
cpu_count,
memory_size,
gpu_count,
gpu_type,
region,
metadata_db_hostname,
metadata_db_username_secret,
metadata_db_password_secret,
metadata_db_name,
image_tag,
image_url,
options):
# If YAML config file available to supplement the command line arguments
if os.path.isfile(yaml_path):
with open(yaml_path) as f:
print("[Hydra Info]: Loading run info from {}...".format(yaml_path))
data = yaml.load(f, Loader=yaml.FullLoader)
run_data = data.get('run', '')
project_name = run_data.get('project_name')
if project_name is None:
raise ValueError("project_name option is required")
model_path = run_data.get('model_path', const.MODEL_PATH_DEFAULT) if model_path is None else model_path
cloud = run_data.get('cloud', const.CLOUD_DEFAULT).lower() if cloud is None else cloud
image_tag = run_data.get('image_tag', const.IMAGE_TAG_DEFAULT) if image_tag is None else image_tag
image_url = run_data.get('image_url', const.IMAGE_URL_DEFAULT) if image_url is None else image_url
if image_tag == '' and image_url != '':
raise Exception("image_tag is required when passing a custom image_url")
if cloud == 'gcp' or cloud == 'aws':
region = run_data.get('region', const.REGION_DEFAULT) if region is None else region
cpu_count = run_data.get('cpu_count', const.CPU_COUNT_DEFAULT) if cpu_count is None else cpu_count
memory_size = run_data.get('memory_size', const.MEMORY_SIZE_DEFAULT) if memory_size is None else memory_size
gpu_count = run_data.get('gpu_count', const.GPU_COUNT_DEFAULT) if gpu_count is None else gpu_count
gpu_type = run_data.get('gpu_type', const.GPU_TYPE_DEFAULT) if gpu_type is None else gpu_type
if cloud == 'aws':
metadata_db_hostname = run_data.get('metadata_db_hostname', const.METADATA_DB_HOSTNAME) if metadata_db_hostname is None else metadata_db_hostname
metadata_db_username_secret = run_data.get('metadata_db_username_secret', const.METADATA_DB_USERNAME_SECRET) if metadata_db_username_secret is None else metadata_db_username_secret
metadata_db_password_secret = run_data.get('metadata_db_password_secret', const.METADATA_DB_PASSWORD_SECRET) if metadata_db_password_secret is None else metadata_db_password_secret
metadata_db_name = run_data.get('metadata_db_name', const.METADATA_DB_NAME) if metadata_db_name is None else metadata_db_name
elif cloud == 'local' or cloud == 'fast_local':
pass
else:
raise RuntimeError("Reached parts of Hydra that are either not implemented or recognized.")
options_list = run_data.get('options', const.OPTIONS_DEFAULT) if options is None else options
if type(options_list) is str:
options_list = json.loads(options_list)
# Read the options for run from CIL
else:
model_path = const.MODEL_PATH_DEFAULT if model_path is None else model_path
cloud = const.CLOUD_DEFAULT if cloud is None else cloud
region = const.REGION_DEFAULT if region is None else region
cpu_count = const.CPU_COUNT_DEFAULT if cpu_count is None else cpu_count
memory_size = const.MEMORY_SIZE_DEFAULT if memory_size is None else memory_size
gpu_count = const.GPU_COUNT_DEFAULT if gpu_count is None else gpu_count
gpu_type = const.GPU_TYPE_DEFAULT if gpu_type is None else gpu_type
image_tag = const.IMAGE_TAG_DEFAULT if image_tag is None else image_tag
image_url = const.IMAGE_URL_DEFAULT if image_url is None else image_url
options = str(const.OPTIONS_DEFAULT) if options is None else options
options_list = json.loads(options)
if cloud == 'aws':
metadata_db_hostname = const.METADATA_DB_HOSTNAME if metadata_db_hostname is None else metadata_db_hostname
metadata_db_username_secret = const.METADATA_DB_USERNAME_SECRET if metadata_db_username_secret is None else metadata_db_username_secret
metadata_db_password_secret = const.METADATA_DB_PASSWORD_SECRET if metadata_db_password_secret is None else metadata_db_password_secret
metadata_db_name = const.METADATA_DB_NAME if metadata_db_name is None else metadata_db_name
if isinstance(options_list, dict):
options_list = [options_list]
options_list_inflated = inflate_options(options_list)
if cloud == 'aws':
git_url, commit_sha = '', ''
else:
git_url, commit_sha = check_repo(github_token)
hydra_core_configs = {
'HYDRA_PLATFORM': cloud,
'HYDRA_GIT_URL': git_url or '',
'HYDRA_COMMIT_SHA': commit_sha or '',
'HYDRA_OAUTH_TOKEN': github_token,
'HYDRA_MODEL_PATH': model_path
}
print("\n[Hydra Info]: Executing experiments with the following options: \n {}\n".format(options_list_inflated))
for i, options in enumerate(options_list_inflated):
options_str = dict_to_string(options)
hydra_core_configs_str = dict_to_string(hydra_core_configs)
print("\n[Hydra Info]: Runnning experiment #{} with the following options: \n {}\n".format(i, options))
if cloud == 'fast_local':
platform = FastLocalPlatform(model_path,
f"{options_str} {hydra_core_configs_str}")
platform.run()
continue
if cloud == 'local':
platform = LocalPlatform(
model_path=model_path,
options=options_str,
git_url=git_url,
commit_sha=commit_sha,
github_token=github_token,
image_url=image_url,
image_tag=image_tag)
elif cloud == 'gcp':
platform = GoogleCloudPlatform(
model_path=model_path,
github_token=github_token,
cpu=cpu_count,
memory=memory_size,
gpu_count=gpu_count,
gpu_type=gpu_type,
region=region,
git_url=git_url,
commit_sha=commit_sha,
image_url=image_url,
image_tag=image_tag,
options=options_str)
elif cloud == 'aws':
platform = AWSPlatform(
model_path=model_path,
project_name=project_name,
github_token=github_token,
cpu=cpu_count,
memory=memory_size,
gpu_count=gpu_count,
region=region,
git_url=git_url,
commit_sha=commit_sha,
hydra_version=__version__,
metadata_db_hostname=metadata_db_hostname,
metadata_db_username_secret=metadata_db_username_secret,
metadata_db_password_secret=metadata_db_password_secret,
metadata_db_name=metadata_db_name,
image_url=image_url,
image_tag=image_tag,
options=options
)
else:
raise RuntimeError("Reached parts of Hydra that are not yet implemented.")
platform.run()
return 0
| 44.684685
| 200
| 0.674698
| 1,328
| 9,920
| 4.745482
| 0.13253
| 0.082513
| 0.04443
| 0.049508
| 0.544906
| 0.501269
| 0.471279
| 0.445255
| 0.43034
| 0.391622
| 0
| 0.001327
| 0.240323
| 9,920
| 221
| 201
| 44.886878
| 0.834926
| 0.025
| 0
| 0.209302
| 0
| 0
| 0.15668
| 0.016351
| 0
| 0
| 0
| 0
| 0
| 1
| 0.011628
| false
| 0.046512
| 0.069767
| 0
| 0.087209
| 0.017442
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6e0596f60ea2aacca4a2e542940c06bbc4f394b7
| 25,458
|
py
|
Python
|
utils/dataset_utils.py
|
Daipuwei/YOLO-tf2
|
1b2e7133c99507573f419c8a367a8dba4abeae5b
|
[
"MIT"
] | null | null | null |
utils/dataset_utils.py
|
Daipuwei/YOLO-tf2
|
1b2e7133c99507573f419c8a367a8dba4abeae5b
|
[
"MIT"
] | null | null | null |
utils/dataset_utils.py
|
Daipuwei/YOLO-tf2
|
1b2e7133c99507573f419c8a367a8dba4abeae5b
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# @Time : 2021/9/18 下午11:23
# @Author : DaiPuWei
# @Email : 771830171@qq.com
# @File : dataset_utils.py
# @Software: PyCharm
"""
这是YOLO模型数据集
"""
import cv2
import numpy as np
from PIL import Image
from matplotlib.colors import rgb_to_hsv, hsv_to_rgb
from utils.model_utils import get_classes
from utils.model_utils import get_anchors
def resize_keep_aspect_ratio(image_src, dst_size, value=[128, 128, 128]):
'''
这是opencv将源图像扩充边界成正方形,并完成图像尺寸变换
Args:
image_src: 源图像
dst_size: 缩放尺寸
value: 填充像素值
Returns:
'''
# 获取源图像和目标图像的尺寸
src_h, src_w, _ = np.shape(image_src)
dst_h, dst_w = dst_size
# 首先确定哪个方向进行填充
if src_h < src_w: # 在h方向进行填充
delta = src_w - src_h # 计算需要填充的像素个数,然后均分到上下两侧
top = int(delta // 2)
down = delta - top
left = 0
right = 0
else: # 在w方向进行填充
delta = src_h - src_w # 计算需要填充的像素个数,然后均分到左右两侧
top = 0
down = 0
left = int(delta // 2)
right = delta - left
borderType = cv2.BORDER_CONSTANT
image_dst = cv2.copyMakeBorder(image_src, top, down, left, right, borderType, None, value)
image_dst = cv2.resize(image_dst, dst_size)
return image_dst
def letterbox_image(image, size):
'''
这是PIL将源图像扩充边界成正方形,并完成图像尺寸变换
Args:
image: 图像
size: 缩放尺寸
Returns:
'''
iw, ih = image.size
w, h = size
scale = min(w/iw, h/ih)
nw = int(iw*scale)
nh = int(ih*scale)
image = image.resize((nw,nh), Image.BICUBIC)
new_image = Image.new('RGB', size, (128,128,128))
new_image.paste(image, ((w-nw)//2, (h-nh)//2))
return new_image
def rand(a=0, b=1):
return np.random.rand()*(b-a) + a
class Dataset(object):
def __init__(self,dataset_path,classes_path,anchors_path,batch_size,target_size,
max_boxes_num=20,use_mosaic=False,random=True,model_name='yolov3'):
'''
这是目标检测数据集初始化类
Args:
dataset_path: COCO格式的数据集txt地址
classes_path: 目标分类txt地址
anchors_path: 模版框txt地址
batch_size: 小批量数规模
target_size: 目标尺寸
max_boxes_num: 最大目标框个数,默认为20
use_mosaic: 是否使用mosaic数据增强,默认为False
random: 是否进行随机数据增强标志量,默认为True
model_name: 模型名称,默认为‘yolov3’
'''
self.dataset_path = dataset_path
self.classes_path = classes_path
self.anchors_path = anchors_path
self.target_size = target_size
self.max_boxes_num = max_boxes_num
self.use_mosaic = use_mosaic
self.random = random
self.model_name = model_name
self.annotation_lines = []
with open(self.dataset_path, 'r') as f:
for line in f.readlines():
self.annotation_lines.append(line)
self.annotation_lines = np.array(self.annotation_lines)
self.annotation_lines = np.random.permutation(self.annotation_lines)
self.size = len(self.annotation_lines)
self.batch_size = batch_size
self.iter_num = self.size // self.batch_size
if self.size % self.batch_size != 0:
self.iter_num += 1
# 初始化anchors与classes
self.anchors = get_anchors(self.anchors_path)
self.classes_names = get_classes(self.classes_path)
self.num_anchors = len(self.anchors)
self.num_classes = len(self.classes_names)
# 初始化相关数据增强参数
self.jitter = 0.3
self.hue=.1
self.sat=1.5
self.val=1.5
def get_batch_data_with_mosaic(self,batch_annotation_lines):
'''
这是获取批量图像及其标签并使用mosaic数据增强的函数
Args:
batch_annotation_lines: 批量yolo数据集格式标注
Returns:
'''
batch_image_data = []
batch_boxes = []
size = len(batch_annotation_lines)
for start in np.arange(0,len(batch_annotation_lines),4):
end = int(np.min([start+4,size]))
_batch_annotation_lines = batch_annotation_lines[start:end]
image_data,box_data = self.get_random_data_with_mosaic(_batch_annotation_lines)
batch_image_data.append(image_data)
batch_boxes.append(box_data)
batch_image_data = np.array(batch_image_data)
batch_boxes = np.array(batch_boxes)
return batch_image_data,batch_boxes
def get_random_data_with_mosaic(self,batch_lines):
"""
这是4张图像及其目标标签,并对图像法进行mosaic数据增强操作的函数
:param batch_lines: 4张yolo格式数据
:return:
"""
h, w = self.target_size
min_offset_x = 0.3
min_offset_y = 0.3
scale_low = 1 - min(min_offset_x, min_offset_y)
scale_high = scale_low + 0.2
image_datas = []
box_datas = []
index = 0
place_x = [0, 0, int(w * min_offset_x), int(w * min_offset_x)]
place_y = [0, int(h * min_offset_y), int(h * min_offset_y), 0]
# 批量图像可能不足4张,随机补充
size = len(batch_lines)
if size < 4:
dif = 4 - len(batch_lines)
_batch_line = [line for line in batch_lines]
for i in np.arange(dif):
random_index = np.random.randint(0,size)
_batch_line.append(batch_lines[random_index])
batch_lines = np.array(_batch_line)
# 便利所有图像,加载真实标签
for line in batch_lines:
# 每一行进行分割
line_content = line.split()
# 打开图片
image = Image.open(line_content[0])
image = image.convert("RGB")
# 图片的大小
iw, ih = image.size
# 保存框的位置
box = np.array([np.array(list(map(int, box.split(',')))) for box in line_content[1:]])
# 是否翻转图片
flip = rand() < .5
if flip and len(box) > 0:
image = image.transpose(Image.FLIP_LEFT_RIGHT)
box[:, [0, 2]] = iw - box[:, [2, 0]]
# 对输入进来的图片进行缩放
new_ar = w / h
scale = rand(scale_low, scale_high)
if new_ar < 1:
nh = int(scale * h)
nw = int(nh * new_ar)
else:
nw = int(scale * w)
nh = int(nw / new_ar)
image = image.resize((nw, nh), Image.BICUBIC)
# 进行色域变换
hue = rand(-self.hue, self.hue)
sat = rand(1, self.sat) if rand() < .5 else 1 / rand(1, self.sat)
val = rand(1, self.val) if rand() < .5 else 1 / rand(1, self.val)
x = cv2.cvtColor(np.array(image, np.float32) / 255, cv2.COLOR_RGB2HSV)
x[..., 0] += hue * 360
x[..., 0][x[..., 0] > 1] -= 1
x[..., 0][x[..., 0] < 0] += 1
x[..., 1] *= sat
x[..., 2] *= val
x[x[:, :, 0] > 360, 0] = 360
x[:, :, 1:][x[:, :, 1:] > 1] = 1
x[x < 0] = 0
image = cv2.cvtColor(x, cv2.COLOR_HSV2RGB) # numpy array, 0 to 1
image = Image.fromarray((image * 255).astype(np.uint8))
# 将图片进行放置,分别对应四张分割图片的位置
dx = place_x[index]
dy = place_y[index]
new_image = Image.new('RGB', (w, h), (128, 128, 128))
new_image.paste(image, (dx, dy))
image_data = np.array(new_image) / 255
index = index + 1
box_data = []
# 对box进行重新处理
if len(box) > 0:
np.random.shuffle(box)
box[:, [0, 2]] = box[:, [0, 2]] * nw / iw + dx
box[:, [1, 3]] = box[:, [1, 3]] * nh / ih + dy
box[:, 0:2][box[:, 0:2] < 0] = 0
box[:, 2][box[:, 2] > w] = w
box[:, 3][box[:, 3] > h] = h
box_w = box[:, 2] - box[:, 0]
box_h = box[:, 3] - box[:, 1]
box = box[np.logical_and(box_w > 1, box_h > 1)]
box_data = np.zeros((len(box), 5))
box_data[:len(box)] = box
image_datas.append(image_data)
box_datas.append(box_data)
# 将图片分割,放在一起
cutx = np.random.randint(int(w * min_offset_x), int(w * (1 - min_offset_x)))
cuty = np.random.randint(int(h * min_offset_y), int(h * (1 - min_offset_y)))
new_image = np.zeros([h, w, 3])
new_image[:cuty, :cutx, :] = image_datas[0][:cuty, :cutx, :]
new_image[cuty:, :cutx, :] = image_datas[1][cuty:, :cutx, :]
new_image[cuty:, cutx:, :] = image_datas[2][cuty:, cutx:, :]
new_image[:cuty, cutx:, :] = image_datas[3][:cuty, cutx:, :]
# 归并边界框
merge_bbox = self.merge_bboxes(box_datas,cutx,cuty)
#print(np.shape(merge_bbox))
bbox = np.zeros((self.max_boxes_num, 5))
if len(merge_bbox) != 0:
if len(merge_bbox) > self.max_boxes_num:
merge_bbox = merge_bbox[:self.max_boxes_num]
bbox[:len(merge_bbox)] = merge_bbox
return new_image,bbox
def merge_bboxes(self,bbox_data,cutx,cuty):
'''
这是mosaic数据增强中对4张图片的边界框标签进行合并的函数
Args:
bbox_data: 边界框标签数组
cutx: x坐标轴分界值
cuty: y坐标轴分界值
Returns:
'''
merge_bbox = []
for i,bboxes in enumerate(bbox_data):
if bboxes is not None:
for box in bboxes:
tmp_box = []
x1, y1, x2, y2 = box[0], box[1], box[2], box[3]
if i == 0:
if y1 > cuty or x1 > cutx:
continue
if y2 >= cuty and y1 <= cuty:
y2 = cuty
if y2 - y1 < 5: # 相差过小则放弃
continue
if x2 >= cutx and x1 <= cutx:
x2 = cutx
if x2 - x1 < 5: # 相差过小则放弃
continue
if i == 1:
if y2 < cuty or x1 > cutx:
continue
if y2 >= cuty and y1 <= cuty:
y1 = cuty
if y2 - y1 < 5: # 相差过小则放弃
continue
if x2 >= cutx and x1 <= cutx:
x2 = cutx
if x2 - x1 < 5: # 相差过小则放弃
continue
if i == 2:
if y2 < cuty or x2 < cutx:
continue
if y2 >= cuty and y1 <= cuty:
y1 = cuty
if y2 - y1 < 5: # 相差过小则放弃
continue
if x2 >= cutx and x1 <= cutx:
x1 = cutx
if x2 - x1 < 5: # 相差过小则放弃
continue
if i == 3:
if y1 > cuty or x2 < cutx:
continue
if y2 >= cuty and y1 <= cuty:
y2 = cuty
if y2 - y1 < 5: # 相差过小则放弃
continue
if x2 >= cutx and x1 <= cutx:
x1 = cutx
if x2 - x1 < 5: # 相差过小则放弃
continue
tmp_box.append(x1)
tmp_box.append(y1)
tmp_box.append(x2)
tmp_box.append(y2)
tmp_box.append(box[-1])
merge_bbox.append(tmp_box)
del bbox_data
return np.array(merge_bbox)
def get_batch_data(self,batch_annotation_lines):
'''
这是获取批量图像及其目标框标签的函数,不使用mosaic数据增强
Args:
batch_annotation_lines: 批量yolo数据集格式标注
Returns:
'''
batch_images = []
batch_boxes = []
for annotation_line in batch_annotation_lines:
image,box_data = self.get_random_data(annotation_line)
batch_images.append(image)
batch_boxes.append(box_data)
batch_images = np.array(batch_images)
batch_boxes = np.array(batch_boxes)
return batch_images,batch_boxes
def get_random_data(self,line):
'''
这是获取图像及其目标标签,并对图像法进行实时数据增强操作的函数
Args:
line: yolo格式数据
Returns:
'''
lines =line.split()
image = Image.open(lines[0])
iw, ih = image.size
h, w = self.target_size
box = np.array([np.array(list(map(int, box.split(',')))) for box in lines[1:]])
if not self.random:
# resize image
scale = min(w / iw, h / ih)
nw = int(iw * scale)
nh = int(ih * scale)
dx = (w - nw) // 2
dy = (h - nh) // 2
image = image.resize((nw, nh), Image.BICUBIC)
new_image = Image.new('RGB', (w, h), (128, 128, 128))
new_image.paste(image, (dx, dy))
image_data = np.array(new_image, np.float32) / 255
# correct boxes
box_data = np.zeros((self.max_boxes_num, 5))
if len(box) > 0:
np.random.shuffle(box)
box[:, [0, 2]] = box[:, [0, 2]] * nw / iw + dx
box[:, [1, 3]] = box[:, [1, 3]] * nh / ih + dy
box[:, 0:2][box[:, 0:2] < 0] = 0
box[:, 2][box[:, 2] > w] = w
box[:, 3][box[:, 3] > h] = h
box_w = box[:, 2] - box[:, 0]
box_h = box[:, 3] - box[:, 1]
box = box[np.logical_and(box_w > 1, box_h > 1)] # discard invalid box
if len(box) > self.max_boxes_num: box = box[:self.max_boxes_num]
box_data[:len(box)] = box
return image_data, box_data
# resize image
new_ar = w / h * rand(1 - self.jitter, 1 + self.jitter) / rand(1 - self.jitter, 1 + self.jitter)
scale = rand(.25, 2)
if new_ar < 1:
nh = int(scale * h)
nw = int(nh * new_ar)
else:
nw = int(scale * w)
nh = int(nw / new_ar)
image = image.resize((nw, nh), Image.BICUBIC)
# place image
dx = int(rand(0, w - nw))
dy = int(rand(0, h - nh))
new_image = Image.new('RGB', (w, h), (128, 128, 128))
new_image.paste(image, (dx, dy))
image = new_image
# flip image or not
flip = rand() < .5
if flip:
image = image.transpose(Image.FLIP_LEFT_RIGHT)
# distort image
hue = rand(-self.hue, self.hue)
sat = rand(1, self.sat) if rand() < .5 else 1 / rand(1, self.sat)
val = rand(1, self.val) if rand() < .5 else 1 / rand(1, self.val)
x = rgb_to_hsv(np.array(image) / 255.)
x[..., 0] += hue
x[..., 0][x[..., 0] > 1] -= 1
x[..., 0][x[..., 0] < 0] += 1
x[..., 1] *= sat
x[..., 2] *= val
x[x > 1] = 1
x[x < 0] = 0
image_data = hsv_to_rgb(x) # numpy array, 0 to 1
# correct boxes
box_data = np.zeros((self.max_boxes_num, 5))
if len(box) > 0:
np.random.shuffle(box)
box[:, [0, 2]] = box[:, [0, 2]] * nw / iw + dx
box[:, [1, 3]] = box[:, [1, 3]] * nh / ih + dy
if flip: box[:, [0, 2]] = w - box[:, [2, 0]]
box[:, 0:2][box[:, 0:2] < 0] = 0
box[:, 2][box[:, 2] > w] = w
box[:, 3][box[:, 3] > h] = h
box_w = box[:, 2] - box[:, 0]
box_h = box[:, 3] - box[:, 1]
box = box[np.logical_and(box_w > 1, box_h > 1)] # discard invalid box
if len(box) > self.max_boxes_num:
box = box[:self.max_boxes_num]
box_data[:len(box)] = box
return image_data, box_data
# ---------------------------------------------------#
# 读入xml文件,并输出y_true
# ---------------------------------------------------#
def preprocess_true_boxes(self,true_boxes):
'''
这是根据真实标签转换成不同yolo预测输出的函数
Args:
true_boxes: 真实目标框标签
Returns:
'''
assert (true_boxes[..., 4] < self.num_classes).all(), 'class id must be less than num_classes'
# -----------------------------------------------------------#
# 获得框的坐标和图片的大小
# -----------------------------------------------------------#
true_boxes = np.array(true_boxes, dtype='float32')
input_shape = np.array(self.target_size, dtype='int32')
# 根据不同yolo模型初始化不同anchor掩膜、网格尺寸和输出层数
if self.model_name == 'yolov3': # yolov3
anchor_mask = [[6, 7, 8], [3, 4, 5], [0, 1, 2]]
num_layers = 3
grid_shapes = [input_shape // {0: 32, 1: 16, 2: 8}[l] for l in range(num_layers)]
elif self.model_name == 'yolov3-spp': # yolov3-spp
anchor_mask = [[6, 7, 8], [3, 4, 5], [0, 1, 2]]
num_layers = 3
grid_shapes = [input_shape // {0: 32, 1: 16, 2: 8}[l] for l in range(num_layers)]
elif self.model_name == 'yolov4': # yolov4
anchor_mask = [[6, 7, 8], [3, 4, 5], [0, 1, 2]]
num_layers = 3
grid_shapes = [input_shape // {0: 32, 1: 16, 2: 8}[l] for l in range(num_layers)]
elif self.model_name == 'yolov4-csp': # yolov4-csp
anchor_mask = [[6, 7, 8], [3, 4, 5], [0, 1, 2]]
num_layers = 3
grid_shapes = [input_shape // {0: 32, 1: 16, 2: 8}[l] for l in range(num_layers)]
elif self.model_name == 'yolov4-p5': # yolov4-p5
anchor_mask = [[8, 9, 10, 11], [4, 5, 6, 7], [0, 1, 2, 3]]
num_layers = 3
grid_shapes = [input_shape // {0: 32, 1: 16, 2: 8}[l] for l in range(num_layers)]
elif self.model_name == 'yolov4-p6': # yolov4-p6
anchor_mask = [[12, 13, 14, 15], [8, 9, 10, 11], [4, 5, 6, 7], [0, 1, 2, 3]]
num_layers = 4
grid_shapes = [input_shape // {0: 64, 1: 32, 2: 16, 3: 8}[l] for l in range(num_layers)]
elif self.model_name == 'yolov4-p7': # yolov4-p7
anchor_mask = [[16, 17, 18, 19], [12, 13, 14, 15], [8, 9, 10, 11], [4, 5, 6, 7], [0, 1, 2, 3]]
num_layers = 5
grid_shapes = [input_shape // {0:128, 1: 64, 2: 32, 3: 16, 4: 8}[l] for l in range(num_layers)]
elif self.model_name == 'poly-yolo': # poly-yolo(v3)
anchor_mask = [[0,1,2,3,4,5,6,7,8]]
num_layers = 1
grid_shapes = [input_shape // {0: 8}[l] for l in range(num_layers)]
elif self.model_name == 'yolov3-tiny': # yolov3-tiny
anchor_mask = [[3, 4, 5], [0, 1, 2]]
num_layers = 2
grid_shapes = [input_shape // {0: 32, 1: 16}[l] for l in range(num_layers)]
elif self.model_name == 'yolov4-tiny': # yolov4-tiny
anchor_mask = [ [3, 4, 5], [0, 1, 2]]
num_layers = 2
grid_shapes = [input_shape // {0: 32, 1: 16}[l] for l in range(num_layers)]
print(grid_shapes)
else: # 默认为yolov3
anchor_mask = [[6, 7, 8], [3, 4, 5], [0, 1, 2]]
num_layers = 3
grid_shapes = [input_shape // {0: 32, 1: 16, 2: 8}[l] for l in range(num_layers)]
# -----------------------------------------------------------#
# 通过计算获得真实框的中心和宽高
# 中心点(m,n,2) 宽高(m,n,2)
# -----------------------------------------------------------#
boxes_xy = (true_boxes[..., 0:2] + true_boxes[..., 2:4]) // 2
boxes_wh = true_boxes[..., 2:4] - true_boxes[..., 0:2]
# -----------------------------------------------------------#
# 将真实框归一化到小数形式
# -----------------------------------------------------------#
true_boxes[..., 0:2] = boxes_xy / input_shape[::-1]
true_boxes[..., 2:4] = boxes_wh / input_shape[::-1]
# m为图片数量,grid_shapes为网格的shape
m = true_boxes.shape[0]
#grid_shapes = [input_shape // {0: 32, 1: 16, 2: 8}[l] for l in range(num_layers)]
# -----------------------------------------------------------#
# y_true的格式为(m,13,13,3,85)(m,26,26,3,85)(m,52,52,3,85)
# -----------------------------------------------------------#
y_true = [np.zeros((m, grid_shapes[l][0], grid_shapes[l][1], len(anchor_mask[l]), 5 + self.num_classes),
dtype='float32') for l in range(num_layers)]
# -----------------------------------------------------------#
# [9,2] -> [1,9,2]
# -----------------------------------------------------------#
anchors = np.expand_dims(self.anchors, 0)
anchor_maxes = anchors / 2.
anchor_mins = -anchor_maxes
# -----------------------------------------------------------#
# 长宽要大于0才有效
# -----------------------------------------------------------#
valid_mask = boxes_wh[..., 0] > 0
for b in range(m):
# 对每一张图进行处理
wh = boxes_wh[b, valid_mask[b]]
if len(wh) == 0: continue
# -----------------------------------------------------------#
# [n,2] -> [n,1,2]
# -----------------------------------------------------------#
wh = np.expand_dims(wh, -2)
box_maxes = wh / 2.
box_mins = -box_maxes
# -----------------------------------------------------------#
# 计算所有真实框和先验框的交并比
# intersect_area [n,9]
# box_area [n,1]
# anchor_area [1,9]
# iou [n,9]
# -----------------------------------------------------------#
intersect_mins = np.maximum(box_mins, anchor_mins)
intersect_maxes = np.minimum(box_maxes, anchor_maxes)
intersect_wh = np.maximum(intersect_maxes - intersect_mins, 0.)
intersect_area = intersect_wh[..., 0] * intersect_wh[..., 1]
box_area = wh[..., 0] * wh[..., 1]
anchor_area = anchors[..., 0] * anchors[..., 1]
iou = intersect_area / (box_area + anchor_area - intersect_area)
# -----------------------------------------------------------#
# 维度是[n,] 感谢 消尽不死鸟 的提醒
# -----------------------------------------------------------#
best_anchor = np.argmax(iou, axis=-1)
for t, n in enumerate(best_anchor):
# -----------------------------------------------------------#
# 找到每个真实框所属的特征层
# -----------------------------------------------------------#
for l in range(num_layers):
if n in anchor_mask[l]:
# -----------------------------------------------------------#
# floor用于向下取整,找到真实框所属的特征层对应的x、y轴坐标
# -----------------------------------------------------------#
i = np.floor(true_boxes[b, t, 0] * grid_shapes[l][1]).astype('int32')
j = np.floor(true_boxes[b, t, 1] * grid_shapes[l][0]).astype('int32')
# -----------------------------------------------------------#
# k指的的当前这个特征点的第k个先验框
# -----------------------------------------------------------#
k = anchor_mask[l].index(n)
# -----------------------------------------------------------#
# c指的是当前这个真实框的种类
# -----------------------------------------------------------#
c = true_boxes[b, t, 4].astype('int32')
# -----------------------------------------------------------#
# y_true的shape为(m,13,13,3,85)(m,26,26,3,85)(m,52,52,3,85)
# 最后的85可以拆分成4+1+80,4代表的是框的中心与宽高、
# 1代表的是置信度、80代表的是种类
# -----------------------------------------------------------#
y_true[l][b, j, i, k, 0:4] = true_boxes[b, t, 0:4]
y_true[l][b, j, i, k, 4] = 1
y_true[l][b, j, i, k, 5 + c] = 1
return y_true
def generator(self):
'''
这是数据生成器定义函数
Returns:
'''
while True:
# 随机打乱数据集
self.annotation_lines = np.random.permutation(self.annotation_lines)
for start in np.arange(0,self.size,self.batch_size):
end = int(np.min([start+self.batch_size,self.size]))
batch_annotation_lines = self.annotation_lines[start:end]
if self.use_mosaic:
batch_images,batch_boxes = self.get_batch_data_with_mosaic(batch_annotation_lines)
else:
batch_images, batch_boxes = self.get_batch_data(batch_annotation_lines)
# 对box数组进行处理,生成符合YOLO v4模型输出的标签
batch_y_true = self.preprocess_true_boxes(batch_boxes)
batch_loss = np.zeros(len(batch_images))
yield [batch_images,*batch_y_true],batch_loss
| 39.902821
| 112
| 0.439351
| 2,953
| 25,458
| 3.60955
| 0.122249
| 0.021109
| 0.006567
| 0.014448
| 0.453138
| 0.406136
| 0.366545
| 0.331363
| 0.302749
| 0.289145
| 0
| 0.049403
| 0.371867
| 25,458
| 638
| 113
| 39.902821
| 0.61716
| 0.168081
| 0
| 0.391198
| 0
| 0
| 0.009059
| 0
| 0
| 0
| 0
| 0
| 0.002445
| 1
| 0.026895
| false
| 0
| 0.01467
| 0.002445
| 0.06846
| 0.002445
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6e080db2602e0c90c09249fc8d6eeaeabeabd005
| 750
|
py
|
Python
|
caesar_cipher.py
|
DomirScire/Basic_Ciphers
|
7425b306f8d0ce9ceb5ba3a59e73a52892bee5ca
|
[
"MIT"
] | 1
|
2021-03-31T23:29:00.000Z
|
2021-03-31T23:29:00.000Z
|
caesar_cipher.py
|
DomirScire/Ciphers_Py
|
127c82b14c9bd5595f924bc267b6bf238f654c22
|
[
"MIT"
] | null | null | null |
caesar_cipher.py
|
DomirScire/Ciphers_Py
|
127c82b14c9bd5595f924bc267b6bf238f654c22
|
[
"MIT"
] | null | null | null |
import string
def caesar_cipher(text, shift, decrypt=False):
if not text.isascii() or not text.isalpha():
raise ValueError("Text must be ASCII and contain no numbers.")
lowercase = string.ascii_lowercase
uppercase = string.ascii_uppercase
result = ""
if decrypt:
shift = shift * -1
for char in text:
if char.islower():
index = lowercase.index(char)
result += lowercase[(index + shift) % 26]
else:
index = uppercase.index(char)
result += uppercase[(index + shift) % 26]
return result
if __name__ == "__main__":
print(caesar_cipher("meetMeAtOurHideOutAtTwo", 10))
print(caesar_cipher("woodWoKdYebRsnoYedKdDgy", 10, decrypt=True))
| 27.777778
| 70
| 0.630667
| 84
| 750
| 5.47619
| 0.5
| 0.078261
| 0.065217
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.016275
| 0.262667
| 750
| 26
| 71
| 28.846154
| 0.815552
| 0
| 0
| 0
| 0
| 0
| 0.128
| 0.061333
| 0
| 0
| 0
| 0
| 0
| 1
| 0.05
| false
| 0
| 0.05
| 0
| 0.15
| 0.1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6e0977041deef6fa7bf74e2fadd3b0a89bcf73e3
| 6,953
|
py
|
Python
|
hume/hume/app.py
|
megacorpincorporated/HOME
|
0eb8009b028fabf64abb03acc0a081b2b8207eb0
|
[
"MIT"
] | 1
|
2018-02-18T15:51:57.000Z
|
2018-02-18T15:51:57.000Z
|
hume/hume/app.py
|
megacorpincorporated/HOME
|
0eb8009b028fabf64abb03acc0a081b2b8207eb0
|
[
"MIT"
] | null | null | null |
hume/hume/app.py
|
megacorpincorporated/HOME
|
0eb8009b028fabf64abb03acc0a081b2b8207eb0
|
[
"MIT"
] | null | null | null |
import json
import logging
from app.abc import StartError
from app.device import DeviceApp, DeviceMessage
from app.device.models import Device
from app.hint import HintApp
from app.hint.defs import HintMessage
from util.storage import DataStore
LOGGER = logging.getLogger(__name__)
class Hume:
def __init__(self, cli_args):
self.storage = DataStore()
self.device_app = DeviceApp(cli_args, self.storage)
self.hint_app = HintApp(cli_args, self.storage)
def start(self):
"""Starts the HUME."""
LOGGER.info("hume start")
self.device_app.pre_start()
self.hint_app.pre_start()
# Register callbacks prior to starting Apps in case of any
# confirmation-type messages happen on connection establishment, or in
# case of queued up messages from HINT.
self.device_app.register_callback(self._on_device_message)
self.hint_app.register_callback(self._on_hint_message)
try:
self.device_app.start()
self.hint_app.start()
except StartError:
self.stop() # may or may not raise another exception
# raise runtime error to ensure stop
raise RuntimeError("failed to start an app")
self.device_app.post_start()
self.hint_app.post_start()
def stop(self):
"""Stops the HUME."""
LOGGER.info("hume stop")
# Important to maintain same stop order as the start order!
self.device_app.pre_stop()
self.hint_app.pre_stop()
self.device_app.stop()
self.hint_app.stop()
self.device_app.post_stop()
self.hint_app.post_stop()
"""
Private
"""
def _on_device_message(self,
device: Device,
msg_type: int,
msg: bytearray):
"""
Registered to be called by the Device app when a new message is
received from a connected device.
"""
LOGGER.debug("HUME handling device message")
if msg_type == DeviceMessage.CAPABILITY.value:
decoded_msg = json.loads(msg)
LOGGER.info(f"device {device.uuid[:4]} sent capability response")
capabilities = decoded_msg
capabilities["identifier"] = device.uuid
if self.hint_app.create_device(capabilities):
LOGGER.info("device created in HINT successfully")
# This is done since BLE devices cannot provide UUID before
# capability response is gotten and are thus saved with their
# address as their primary key prior to attach success.
device = self.storage.get(Device, device.uuid)
device.uuid = capabilities["uuid"]
device.attached = True
self.storage.set(device)
else:
LOGGER.error("failed to create device in HINT")
# Detach device to clean up after unsuccessful attach.
self.device_app.detach(device)
self.hint_app.attach_failure(device.uuid)
elif msg_type == DeviceMessage.ACTION_STATEFUL.value:
decoded_msg = msg.decode()
self.hint_app.action_response(device,
HintMessage.ACTION_STATEFUL,
{
"group_id": int(decoded_msg[0]),
"state_id": int(decoded_msg[1])
})
else:
LOGGER.warning(f"got message from device {device.uuid[:4]} of an "
f"unknown type: {msg_type}, msg: {msg}")
def _on_hint_message(self, msg_type, msg):
"""
Registered to be called by the Hint app when a new message is received
from HINT.
"""
LOGGER.debug("HUME handling HINT message")
if msg_type == HintMessage.DISCOVER_DEVICES.value:
LOGGER.info("HINT requested device discovery")
self.device_app.discover(self._discovered_devices)
elif msg_type == HintMessage.ATTACH.value:
identifier = msg["identifier"]
LOGGER.info(f"HINT requested device {identifier[:4]} to "
f"be attached")
device = self.storage.get(Device, identifier)
if device is not None:
if not self.device_app.request_capabilities(device):
LOGGER.error(f"failed to attach device {identifier[:4]}")
self.hint_app.attach_failure(identifier)
elif msg_type == HintMessage.DETACH.value:
device_uuid = msg["device_uuid"]
LOGGER.info(f"HINT requested detaching device {device_uuid[:4]}")
device = self.storage.get(Device, device_uuid)
if device is not None:
self.device_app.detach(device)
else:
LOGGER.error(f"can't detach device {device_uuid[:4]}, "
f"does not exist")
elif msg_type == HintMessage.UNPAIR.value:
LOGGER.info("HINT requested unpairing, factory resetting HUME")
self.device_app.reset()
self.storage.delete_all()
elif msg_type == HintMessage.ACTION_STATEFUL.value:
device_uuid = msg.pop("device_uuid")
LOGGER.info(f"HINT requested stateful action for device "
f"{device_uuid[:4]}")
msg.pop("type")
device = self.storage.get(Device, device_uuid)
if device is not None:
self.device_app.stateful_action(device, **msg)
else:
LOGGER.error("could not execute stateful action since device "
"does not exist")
elif msg_type == HintMessage.ACTION_STATES.value:
device_uuid = msg["device_uuid"]
LOGGER.info(f"HINT requested all stateful action states for "
f"device {device_uuid[:4]}")
device = self.storage.get(Device, device_uuid)
if device is not None:
self.device_app.action_states(device)
else:
LOGGER.error("could not fetch stateful action states since "
"the device did not exist")
else:
LOGGER.warning(f"got message from hint of an unknown type: "
f"{msg_type}, msg: {msg}")
def _discovered_devices(self, devices: [Device]):
"""
Callback provided to the device app when discovering devices.
"""
for device in devices:
# Store discovered devices to remember the transport type reported
# by the individual connection types.
self.storage.set(device)
self.hint_app.discovered_devices(devices)
| 38.414365
| 78
| 0.576154
| 790
| 6,953
| 4.93038
| 0.23038
| 0.04878
| 0.050064
| 0.021823
| 0.278049
| 0.167394
| 0.154557
| 0.102696
| 0.086264
| 0.086264
| 0
| 0.002182
| 0.341004
| 6,953
| 180
| 79
| 38.627778
| 0.847883
| 0.129009
| 0
| 0.155738
| 0
| 0
| 0.163348
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.04918
| false
| 0
| 0.065574
| 0
| 0.122951
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6e0c62be30176a8297c1bf84eb84e82bffd0d9ee
| 3,281
|
py
|
Python
|
scripts/generate_demo_requests.py
|
onedata/onezone-gui-plugin-ecrin
|
2bf38b0994d1c0bf8148b1b8c5990bcf0aa4a62b
|
[
"MIT"
] | null | null | null |
scripts/generate_demo_requests.py
|
onedata/onezone-gui-plugin-ecrin
|
2bf38b0994d1c0bf8148b1b8c5990bcf0aa4a62b
|
[
"MIT"
] | null | null | null |
scripts/generate_demo_requests.py
|
onedata/onezone-gui-plugin-ecrin
|
2bf38b0994d1c0bf8148b1b8c5990bcf0aa4a62b
|
[
"MIT"
] | null | null | null |
#
# Author: Michał Borzęcki
#
# This script creates empty files with study and data object metadata in
# specified space and Oneprovider. It uses JSON files located in directories
# `studies_dir` (= studies) and `data_object_dir` (= data_objects). Positional
# arguments:
# 1. Oneprovider location (IP address or domain).
# 2. Space name (it must be supported by passed Oneprovider).
# 3. Access token (can be obtained via Onezone).
# 4. Number of files metadata to upload ("100" means 100 studies and 100 data
# objects)
# 5. Name of a directory (in space), where files with metadata should be
# uploaded. Warning: if that directory already exists, it will be removed.
# Example of usage:
# python3 generate_demo_requests.py 172.17.0.16 s1 MDAzMvY...ZlOGCg 1000 ecrin1
#
# Example studies and data objects can be found at
# https://github.com/beatmix92/ct.gov_updated
#
import os
import sys
import subprocess
import json
from natsort import natsorted
provider = sys.argv[1]
space = sys.argv[2]
token = sys.argv[3]
files = int(sys.argv[4])
directory = sys.argv[5]
studies_dir = 'studies'
data_object_dir = 'data_objects'
FNULL = open(os.devnull, 'w')
curl = [
'curl',
'-k',
'-H', 'X-Auth-Token: ' + token,
'-H', 'X-CDMI-Specification-Version: 1.1.1',
'-H', 'Content-Type: application/cdmi-container',
'-X', 'DELETE',
'https://' + provider + '/cdmi/' + space + '/' + directory + '/'
]
remove_dir_proc = subprocess.Popen(curl, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
remove_dir_proc.wait()
curl = [
'curl',
'-k',
'-H', 'X-Auth-Token: ' + token,
'-H', 'X-CDMI-Specification-Version: 1.1.1',
'-H', 'Content-Type: application/cdmi-container',
'-X', 'PUT',
'https://' + provider + '/cdmi/' + space + '/' + directory + '/'
]
create_dir_proc = subprocess.Popen(curl, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
create_dir_proc.wait()
processes = []
for source in [studies_dir, data_object_dir]:
index = 0
for (dirpath, _, filenames) in os.walk(source):
filenames = natsorted(filenames)
for filename in filenames[:files]:
path = dirpath + '/' + filename
with open(path, 'r') as json_file:
metadata = json_file.read()
metadata_json = json.loads(metadata)
if metadata_json['object_type'] == 'study':
linked_data_objects = metadata_json['linked_data_objects']
start_id = linked_data_objects[0]['id']
for i in range(1, 20):
linked_data_objects.append({ 'id': start_id + i })
else:
related_studies = metadata_json['related_studies']
start_id = related_studies[0]['id']
for i in range(1, 20):
related_studies.append({ 'id': start_id - i })
curl = [
'curl',
'-k',
'-H', 'X-Auth-Token: ' + token,
'-H', 'X-CDMI-Specification-Version: 1.1.1',
'-H', 'Content-Type: application/cdmi-object',
'-X', 'PUT',
'-d', '{"metadata": {"onedata_json": ' + json.dumps(metadata_json) + '}}',
'https://' + provider + '/cdmi/' + space + '/' + directory + '/' + filename
]
processes.append(subprocess.Popen(curl, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL))
for proc in processes:
proc.wait()
| 33.824742
| 102
| 0.643401
| 431
| 3,281
| 4.786543
| 0.357309
| 0.042656
| 0.032962
| 0.014542
| 0.329132
| 0.245274
| 0.245274
| 0.245274
| 0.228793
| 0.197286
| 0
| 0.020698
| 0.204816
| 3,281
| 96
| 103
| 34.177083
| 0.770027
| 0.255105
| 0
| 0.338235
| 0
| 0
| 0.198102
| 0.066859
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.073529
| 0
| 0.073529
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6e0cbccdccc4307ec0cd8efe2c3cb65f9c612951
| 1,925
|
py
|
Python
|
backend/routes/user.py
|
mradzikowski/flask-trackerproductivity
|
029103b80e21b6c64801816fe8dc27585317cb02
|
[
"MIT"
] | null | null | null |
backend/routes/user.py
|
mradzikowski/flask-trackerproductivity
|
029103b80e21b6c64801816fe8dc27585317cb02
|
[
"MIT"
] | null | null | null |
backend/routes/user.py
|
mradzikowski/flask-trackerproductivity
|
029103b80e21b6c64801816fe8dc27585317cb02
|
[
"MIT"
] | null | null | null |
from flask import jsonify, request
import backend.services.user as user_services
from . import bp
@bp.route('/user', methods=['POST', 'GET'])
def create_user():
if request.method == "POST":
data_json = request.json
body, status = user_services.create_user(data_json)
elif request.method == "GET":
body, status = user_services.get_all_users()
else:
body, status = None, 405
return jsonify(body), status
@bp.route('/user/<pk>', methods=['GET', 'DELETE'])
def get_user(pk):
if request.method == "GET":
body, status = user_services.get_user(pk)
elif request.method == "DELETE":
body, status = user_services.delete_user(pk)
else:
body, status = None, 405
return jsonify(body), status
@bp.route('/user/<pk>/tasks', methods=['GET'])
def get_all_tasks_for_user(pk):
if request.method == "GET":
active = request.args.get('active')
if active is None:
body, status = user_services.get_all_tasks_for_user(pk)
if active.upper() == "TRUE":
active = True
elif active.upper() == "FALSE":
active = False
else:
return {"success": False, "message": "Invalid argument key."}, 400
body, status = user_services.get_all_active_tasks_for_user(pk, active)
else:
body, status = None, 405
return jsonify(body), status
@bp.route('/user/<pk>/tasks/productivity', methods=['GET'])
def get_productivity_for_user(pk):
if request.method == "GET":
body, status = user_services.get_all_tasks_and_calculate_productivity(pk)
else:
body, status = None, 405
return jsonify(body), status
@bp.route('/user/get/all', methods=['GET'])
def get_all_users():
if request.methdod == "GET":
body, status = user_services.get_all_users()
else:
body, status = None, 405
return jsonify(body), status
| 27.112676
| 81
| 0.628052
| 250
| 1,925
| 4.664
| 0.196
| 0.154374
| 0.096055
| 0.150943
| 0.531732
| 0.504288
| 0.480274
| 0.403087
| 0.403087
| 0.389365
| 0
| 0.01222
| 0.234805
| 1,925
| 70
| 82
| 27.5
| 0.779362
| 0
| 0
| 0.403846
| 0
| 0
| 0.089964
| 0.015081
| 0
| 0
| 0
| 0
| 0
| 1
| 0.096154
| false
| 0
| 0.057692
| 0
| 0.269231
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6e0db8ed1374b74b17dc4c64dad644332a33ce07
| 7,205
|
py
|
Python
|
src/modu/editable/datatypes/date.py
|
philchristensen/modu
|
795f3bc413956b98522ac514dafe35cbab0d57a3
|
[
"MIT"
] | null | null | null |
src/modu/editable/datatypes/date.py
|
philchristensen/modu
|
795f3bc413956b98522ac514dafe35cbab0d57a3
|
[
"MIT"
] | null | null | null |
src/modu/editable/datatypes/date.py
|
philchristensen/modu
|
795f3bc413956b98522ac514dafe35cbab0d57a3
|
[
"MIT"
] | null | null | null |
# modu
# Copyright (c) 2006-2010 Phil Christensen
# http://modu.bubblehouse.org
#
#
# See LICENSE for details
"""
Datatypes for managing stringlike data.
"""
import time, datetime
from zope.interface import implements
from modu.editable import IDatatype, define
from modu.util import form, tags, date
from modu.persist import sql
from modu import persist, assets
DAY = 86400
MONTH = DAY * 31
YEAR = DAY * 365
class CurrentDateField(define.definition):
"""
Display a checkbox that allows updating a date field with the current date.
"""
def get_element(self, req, style, storable):
"""
@see: L{modu.editable.define.definition.get_element()}
"""
value = getattr(storable, self.get_column_name(), None)
if(value):
output = date.strftime(value, self.get('format_string', '%B %d, %Y at %I:%M%p'))
else:
output = ''
if(style == 'search'):
frm = form.FormNode(self.name)
return frm
elif(style == 'listing'):
frm = form.FormNode(self.name)
if(self.get('date_in_listing', True)):
if(output == ''):
output = '(none)'
frm(type='label', value=output)
else:
frm(type='checkbox', disabled=True, checked=bool(output))
return frm
elif(style == 'detail' and self.get('read_only', False)):
if(output == ''):
output = '(none)'
frm = form.FormNode(self.name)
frm(type='label', value=output)
return frm
checked = False
if(storable.get_id() == 0 and self.get('default_checked', False)):
checked = True
frm = form.FormNode(self.name)(
type = 'checkbox',
# this is only True if default_checked is true and it's a new item
checked = checked,
suffix = ' ' + tags.small()[output],
)
if(bool(output)):
if(self.get('one_time', True)):
frm(attributes=dict(disabled='disabled'))
else:
frm(
text = ' ' + tags.small(_class='minor-help')['check to set current date']
)
return frm
def update_storable(self, req, form, storable):
if(form[self.name].attr('checked', False)):
value = datetime.datetime.now()
save_format = self.get('save_format', 'timestamp')
if(save_format == 'timestamp'):
setattr(storable, self.get_column_name(), date.convert_to_timestamp(value))
else:
setattr(storable, self.get_column_name(), value)
return True
class DateField(define.definition):
"""
Allow editing of date data via a multiple select interface or javascript popup calendar.
"""
implements(IDatatype)
def get_element(self, req, style, storable):
"""
@see: L{modu.editable.define.definition.get_element()}
"""
value = getattr(storable, self.get_column_name(), None)
if(isinstance(value, (int, long, float))):
value = datetime.datetime.utcfromtimestamp(value)
if(style == 'search'):
frm = form.FormNode(self.name)
frm['from'] = self.get_form_element(req, '_detail', storable)(
prefix='<div>from date:',
suffix=tags.br() + '</div>',
)
frm['to'] = self.get_form_element(req, '_detail', storable)(
prefix='<div>to date:',
suffix='</div>',
)
return frm
elif(style == 'listing' or (style == 'detail' and self.get('read_only', False))):
if(value):
output = date.strftime(value, self.get('format_string', '%B %d, %Y at %I:%M%p'))
else:
output = ''
frm = form.FormNode(self.name)
frm(type='label', value=output)
return frm
current_year = datetime.datetime.now().year
if(value is not None):
current_year = getattr(value, 'year', current_year)
start_year = self.get('start_year', current_year - 2)
end_year = self.get('end_year', current_year + 5)
months, days = date.get_date_arrays()
frm = form.FormNode(self.name)
frm(type='fieldset', style='brief')
frm['null'](type='checkbox', text="no value", weight=-1, suffix=tags.br(),
attributes=dict(onChange='enableDateField(this);'))
assets.activate_jquery(req)
req.content.report('header', tags.script(type='text/javascript')["""
function enableDateField(checkboxField){
var formItem = $(checkboxField).parent().parent();
if($(checkboxField).attr('checked')){
formItem.children(':enabled').attr('disabled', true);
}
else{
formItem.children(':disabled').attr('disabled', false);
}
}
"""])
attribs = {}
if(value is None):
frm['null'](checked=True)
#attribs['disabled'] = None
if(self.get('default_now', False)):
value = datetime.datetime.now()
frm['null'](checked=False)
frm['date'](
type = self.get('style', 'datetime'),
value = value,
attributes = attribs,
suffix = tags.script(type="text/javascript")["""
enableDateField($('#form-item-%s input'));
""" % self.name],
)
frm.validate = self.validate
return frm
def validate(self, req, frm):
if(not frm[self.name]['date'].attr('value', '') and self.get('required', False)):
frm.set_error(self.name, 'You must enter a value for this field.')
return False
return True
def update_storable(self, req, form, storable):
"""
@see: L{modu.editable.define.definition.update_storable()}
"""
save_format = self.get('save_format', 'timestamp')
if(self.get('read_only')):
if(self.get('default_now', False) and not storable.get_id()):
if(save_format == 'timestamp'):
setattr(storable, self.get_column_name(), int(time.time()))
else:
setattr(storable, self.get_column_name(), datetime.datetime.now())
return True
data = form[self.name]['date']
if(data.attr('null', 0)):
setattr(storable, self.get_column_name(), None)
return True
date_data = req.data[form.name][self.name].get('date', None)
# if it's not a dict, it must be None, or broken
if(isinstance(date_data, dict)):
value = date.get_dateselect_value(date_data, self.get('style', 'datetime'))
else:
value = None
if(save_format == 'timestamp'):
setattr(storable, self.get_column_name(), date.convert_to_timestamp(value))
else:
setattr(storable, self.get_column_name(), value)
return True
def get_search_value(self, value, req, frm):
form_data = frm[self.name]
to_value = 0
from_value = 0
if not(value['to'].get('null')):
start_year = form_data['to']['date'].start_year
end_year = form_data['to']['date'].end_year
date_data = value['to'].get('date', None)
if(date_data):
to_value = date.get_dateselect_value(date_data, self.get('style', 'datetime'), start_year, end_year)
to_value = time.mktime(to_value.timetuple())
if not(value['from'].get('null')):
start_year = form_data['from']['date'].start_year
end_year = form_data['from']['date'].end_year
date_data = value['from'].get('date', None)
if(date_data):
from_value = date.get_dateselect_value(date_data, self.get('style', 'datetime'), start_year, end_year)
from_value = time.mktime(from_value.timetuple())
if(to_value and from_value):
if(self.get('save_format', 'timestamp') == 'datetime'):
return sql.RAW('UNIX_TIMESTAMP(%%s) BETWEEN %s AND %s' % (from_value, to_value))
else:
return sql.RAW('%%s BETWEEN %s AND %s' % (from_value, to_value))
elif(to_value):
return sql.LT(to_value)
elif(from_value):
return sql.GT(from_value)
else:
return None
| 28.82
| 106
| 0.658015
| 988
| 7,205
| 4.673077
| 0.194332
| 0.047
| 0.02924
| 0.040936
| 0.455491
| 0.390513
| 0.34178
| 0.292831
| 0.261209
| 0.214425
| 0
| 0.0042
| 0.173907
| 7,205
| 249
| 107
| 28.935743
| 0.771505
| 0.084663
| 0
| 0.346369
| 0
| 0
| 0.17821
| 0.038308
| 0
| 0
| 0
| 0
| 0
| 1
| 0.03352
| false
| 0
| 0.03352
| 0
| 0.178771
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6e11fb05adb494991b86d4b22a22f936a7c8a876
| 1,908
|
py
|
Python
|
cactusbot/commands/magic/alias.py
|
CactusBot/CactusBot
|
6d035bf74bdc8f7fb3ee1e79f8d443f5b17e7ea5
|
[
"MIT"
] | 23
|
2016-02-16T05:09:11.000Z
|
2016-09-20T14:22:51.000Z
|
cactusbot/commands/magic/alias.py
|
Alkali-Metal/CactusBot
|
6d035bf74bdc8f7fb3ee1e79f8d443f5b17e7ea5
|
[
"MIT"
] | 190
|
2016-09-30T05:31:59.000Z
|
2018-12-22T08:46:49.000Z
|
cactusbot/commands/magic/alias.py
|
Alkali-Metal/CactusBot
|
6d035bf74bdc8f7fb3ee1e79f8d443f5b17e7ea5
|
[
"MIT"
] | 16
|
2016-10-09T16:51:48.000Z
|
2017-10-25T05:29:10.000Z
|
"""Alias command."""
from . import Command
from ...packets import MessagePacket
class Alias(Command):
"""Alias command."""
COMMAND = "alias"
@Command.command(role="moderator")
async def add(self, alias: "?command", command: "?command", *_: False,
raw: "packet"):
"""Add a new command alias."""
_, _, _, _, *args = raw.split()
if args:
packet_args = MessagePacket.join(
*args, separator=' ').json["message"]
else:
packet_args = None
response = await self.api.add_alias(command, alias, packet_args)
if response.status == 201:
return "Alias !{} for !{} created.".format(alias, command)
elif response.status == 200:
return "Alias !{} for command !{} updated.".format(alias, command)
elif response.status == 404:
return "Command !{} does not exist.".format(command)
@Command.command(role="moderator")
async def remove(self, alias: "?command"):
"""Remove a command alias."""
response = await self.api.remove_alias(alias)
if response.status == 200:
return "Alias !{} removed.".format(alias)
elif response.status == 404:
return "Alias !{} doesn't exist!".format(alias)
@Command.command("list", role="moderator")
async def list_aliases(self):
"""List all aliases."""
response = await self.api.get_command()
if response.status == 200:
commands = (await response.json())["data"]
return "Aliases: {}.".format(', '.join(sorted(
"{} ({})".format(
command["attributes"]["name"],
command["attributes"]["commandName"])
for command in commands
if command.get("type") == "aliases"
)))
return "No aliases added!"
| 32.338983
| 78
| 0.545597
| 191
| 1,908
| 5.387435
| 0.319372
| 0.116618
| 0.073858
| 0.061224
| 0.2138
| 0.137998
| 0
| 0
| 0
| 0
| 0
| 0.013544
| 0.303459
| 1,908
| 58
| 79
| 32.896552
| 0.760722
| 0.015199
| 0
| 0.15
| 0
| 0
| 0.162934
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.05
| 0
| 0.275
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6e13a8102a55ae649fda3dcfedbae946ebff32c0
| 2,828
|
py
|
Python
|
explorer/util.py
|
brianhouse/rlab
|
4d878abd2299fd340a645ebd8b92a68c2b48f41e
|
[
"MIT"
] | null | null | null |
explorer/util.py
|
brianhouse/rlab
|
4d878abd2299fd340a645ebd8b92a68c2b48f41e
|
[
"MIT"
] | null | null | null |
explorer/util.py
|
brianhouse/rlab
|
4d878abd2299fd340a645ebd8b92a68c2b48f41e
|
[
"MIT"
] | null | null | null |
import numpy as np
def combine(signal_x, signal_y):
return np.stack((signal_x, signal_y), axis=-1)
def normalize(signal, minimum=None, maximum=None):
"""Normalize a signal to the range 0, 1. Uses the minimum and maximum observed in the data unless explicitly passed."""
signal = np.array(signal).astype('float')
if minimum is None:
minimum = np.min(signal)
if maximum is None:
maximum = np.max(signal)
signal -= minimum
maximum -= minimum
signal /= maximum
signal = np.clip(signal, 0.0, 1.0)
return signal
def resample(ts, values, num_samples):
"""Convert a list of times and a list of values to evenly spaced samples with linear interpolation"""
assert np.all(np.diff(ts) > 0)
ts = normalize(ts)
return np.interp(np.linspace(0.0, 1.0, num_samples), ts, values)
def smooth(signal, size=10, window='blackman'):
"""Apply weighted moving average (aka low-pass filter) via convolution function to a signal"""
signal = np.array(signal)
if size < 3:
return signal
s = np.r_[2 * signal[0] - signal[size:1:-1], signal, 2 * signal[-1] - signal[-1:-size:-1]]
w = np.ones(size,'d')
y = np.convolve(w / w.sum(), s, mode='same')
return y[size - 1:-size + 1]
def detect_peaks(signal, lookahead=10, delta=0):
""" Detect the local maximas and minimas in a signal
lookahead -- samples to look ahead from a potential peak to see if a bigger one is coming
delta -- minimum difference between a peak and surrounding points to be considered a peak (no hills) and makes things faster
Note: careful if you have flat regions, may affect lookahead
"""
signal = np.array(signal)
peaks = []
valleys = []
min_value, max_value = np.Inf, -np.Inf
for index, value in enumerate(signal[:-lookahead]):
if value > max_value:
max_value = value
max_pos = index
if value < min_value:
min_value = value
min_pos = index
if value < max_value - delta and max_value != np.Inf:
if signal[index:index + lookahead].max() < max_value:
peaks.append([max_pos, max_value])
drop_first_peak = True
max_value = np.Inf
min_value = np.Inf
if index + lookahead >= signal.size:
break
continue
if value > min_value + delta and min_value != -np.Inf:
if signal[index:index + lookahead].min() > min_value:
valleys.append([min_pos, min_value])
drop_first_valley = True
min_value = -np.Inf
max_value = -np.Inf
if index + lookahead >= signal.size:
break
return peaks, valleys
| 40.985507
| 132
| 0.597242
| 391
| 2,828
| 4.232737
| 0.340153
| 0.043505
| 0.042296
| 0.03142
| 0.101511
| 0.09426
| 0.09426
| 0.09426
| 0.049547
| 0
| 0
| 0.014141
| 0.299859
| 2,828
| 69
| 133
| 40.985507
| 0.821717
| 0.220297
| 0
| 0.142857
| 0
| 0
| 0.008353
| 0
| 0
| 0
| 0
| 0
| 0.017857
| 1
| 0.089286
| false
| 0
| 0.017857
| 0.017857
| 0.214286
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6e14c71363bc33135f20b63aec47306b9531737a
| 2,839
|
py
|
Python
|
dooly/converters/kobart_utils.py
|
jinmang2/DOOLY
|
961c7b43b06dffa98dc8a39e72e417502e89470c
|
[
"Apache-2.0"
] | 17
|
2022-03-06T05:06:14.000Z
|
2022-03-31T00:25:06.000Z
|
dooly/converters/kobart_utils.py
|
jinmang2/DOOLY
|
961c7b43b06dffa98dc8a39e72e417502e89470c
|
[
"Apache-2.0"
] | 6
|
2022-03-27T18:18:40.000Z
|
2022-03-31T17:35:34.000Z
|
dooly/converters/kobart_utils.py
|
jinmang2/DOOLY
|
961c7b43b06dffa98dc8a39e72e417502e89470c
|
[
"Apache-2.0"
] | 1
|
2022-03-31T13:07:41.000Z
|
2022-03-31T13:07:41.000Z
|
import os
import sys
import hashlib
import importlib
def is_available_boto3():
return importlib.util.find_spec("boto3")
if is_available_boto3():
import boto3
from botocore import UNSIGNED
from botocore.client import Config
else:
raise ModuleNotFoundError("Please install boto3 with: `pip install boto3`.")
class AwsS3Downloader(object):
def __init__(
self,
aws_access_key_id=None,
aws_secret_access_key=None,
):
self.resource = boto3.Session(
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key,
).resource("s3")
self.client = boto3.client(
"s3",
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key,
config=Config(signature_version=UNSIGNED),
)
def __split_url(self, url: str):
if url.startswith("s3://"):
url = url.replace("s3://", "")
bucket, key = url.split("/", maxsplit=1)
return bucket, key
def download(self, url: str, local_dir: str):
bucket, key = self.__split_url(url)
filename = os.path.basename(key)
file_path = os.path.join(local_dir, filename)
os.makedirs(os.path.dirname(file_path), exist_ok=True)
meta_data = self.client.head_object(Bucket=bucket, Key=key)
total_length = int(meta_data.get("ContentLength", 0))
downloaded = 0
def progress(chunk):
nonlocal downloaded
downloaded += chunk
done = int(50 * downloaded / total_length)
sys.stdout.write(
"\r{}[{}{}]".format(file_path, "█" * done, "." * (50 - done))
)
sys.stdout.flush()
try:
with open(file_path, "wb") as f:
self.client.download_fileobj(bucket, key, f, Callback=progress)
sys.stdout.write("\n")
sys.stdout.flush()
except Exception as e: # E722 do not use bare 'except'
print(f"Exception occured: {e}.\ndownloading file is failed. {url}")
return file_path
def download(url, chksum=None, cachedir=".cache"):
cachedir_full = os.path.join(os.getcwd(), cachedir)
os.makedirs(cachedir_full, exist_ok=True)
filename = os.path.basename(url)
file_path = os.path.join(cachedir_full, filename)
if os.path.isfile(file_path):
if hashlib.md5(open(file_path, "rb").read()).hexdigest()[:10] == chksum:
print(f"using cached model. {file_path}")
return file_path, True
s3 = AwsS3Downloader()
file_path = s3.download(url, cachedir_full)
if chksum:
assert (
chksum == hashlib.md5(open(file_path, "rb").read()).hexdigest()[:10]
), "corrupted file!"
return file_path, False
| 31.898876
| 80
| 0.610426
| 352
| 2,839
| 4.71875
| 0.34375
| 0.062613
| 0.036123
| 0.042143
| 0.145695
| 0.124022
| 0.124022
| 0.124022
| 0.124022
| 0.077062
| 0
| 0.015429
| 0.269461
| 2,839
| 88
| 81
| 32.261364
| 0.784957
| 0.010215
| 0
| 0.082192
| 0
| 0
| 0.074786
| 0
| 0
| 0
| 0
| 0
| 0.013699
| 1
| 0.082192
| false
| 0
| 0.109589
| 0.013699
| 0.273973
| 0.027397
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6e15e9506e9a75c167124e23e066dc0069217190
| 1,565
|
py
|
Python
|
tests/uv/util/test_env.py
|
hartikainen/uv-metrics
|
7b47b8ce1dff5fc41cdd540f816ea41a0cd27c21
|
[
"ECL-2.0",
"Apache-2.0"
] | 9
|
2020-06-17T17:33:05.000Z
|
2022-03-30T17:32:05.000Z
|
tests/uv/util/test_env.py
|
hartikainen/uv-metrics
|
7b47b8ce1dff5fc41cdd540f816ea41a0cd27c21
|
[
"ECL-2.0",
"Apache-2.0"
] | 28
|
2020-06-16T18:32:08.000Z
|
2020-11-12T17:51:20.000Z
|
tests/uv/util/test_env.py
|
hartikainen/uv-metrics
|
7b47b8ce1dff5fc41cdd540f816ea41a0cd27c21
|
[
"ECL-2.0",
"Apache-2.0"
] | 4
|
2020-08-07T20:05:49.000Z
|
2021-10-21T01:43:00.000Z
|
#!/usr/bin/python
#
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import uv.util.env as ue
def test_extract_params(monkeypatch):
def mem_env(prefix):
return {
f"{prefix}_MY_KEY": "face",
f"{prefix}_ANOTHER_KEY": "sandwich",
f"{prefix}THIRD_KEY": "ham"
}
expected = {"my_key": "face", "another_key": "sandwich", "third_key": "ham"}
# with various prefixes, a custom-supplied environment will return the
# correctly parsed env variables.
assert expected == ue.extract_params(prefix="ENVVAR", env=mem_env("ENVVAR"))
assert expected == ue.extract_params(prefix="funky", env=mem_env("funky"))
k = f"{ue._ENV_VAR_PREFIX}_RANDOM_KEY"
v = "better_not_be_set"
# make sure we don't have some random value set
if os.environ.get(k):
monkeypatch.delenv(k)
# the environment should be empty.
assert ue.extract_params() == {}
# set our expected kv pair...
monkeypatch.setenv(k, v)
# and get it back from the env.
assert ue.extract_params() == {"random_key": v}
| 29.528302
| 78
| 0.705431
| 235
| 1,565
| 4.587234
| 0.548936
| 0.055659
| 0.055659
| 0.029685
| 0.064935
| 0.064935
| 0
| 0
| 0
| 0
| 0
| 0.006245
| 0.18147
| 1,565
| 52
| 79
| 30.096154
| 0.835285
| 0.513099
| 0
| 0
| 0
| 0
| 0.253711
| 0.041835
| 0
| 0
| 0
| 0
| 0.210526
| 1
| 0.105263
| false
| 0
| 0.105263
| 0.052632
| 0.263158
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6e1651dd40e1ae6c43644b4a77456f4eb701c53a
| 1,054
|
py
|
Python
|
models/fleet.py
|
gnydick/qairon
|
e67af1f88ac6c614ae33adc4f42ab2ec3cc5b257
|
[
"MIT"
] | null | null | null |
models/fleet.py
|
gnydick/qairon
|
e67af1f88ac6c614ae33adc4f42ab2ec3cc5b257
|
[
"MIT"
] | null | null | null |
models/fleet.py
|
gnydick/qairon
|
e67af1f88ac6c614ae33adc4f42ab2ec3cc5b257
|
[
"MIT"
] | null | null | null |
from sqlalchemy import *
from sqlalchemy.orm import relationship
from db import db
class Fleet(db.Model):
__tablename__ = "fleet"
id = Column(String, primary_key=True)
deployment_target_id = Column(String, ForeignKey('deployment_target.id'))
fleet_type_id = Column(String, ForeignKey('fleet_type.id'))
name = Column(String(256))
defaults = Column(Text)
native_id = Column(String)
deployment_target = relationship("DeploymentTarget", back_populates="fleets")
subnets = relationship("Subnet", secondary='subnets_fleets', back_populates="fleets")
type = relationship("FleetType", back_populates="fleets")
capacities = relationship("Capacity", back_populates="fleet")
def __repr__(self):
return self.id
@db.event.listens_for(Fleet, 'before_update')
@db.event.listens_for(Fleet, 'before_insert')
def my_before_insert_listener(mapper, connection, fleet):
__update_id__(fleet)
def __update_id__(fleet):
fleet.id = ':'.join([fleet.deployment_target_id, fleet.fleet_type_id, fleet.name])
| 30.114286
| 89
| 0.736243
| 129
| 1,054
| 5.682171
| 0.387597
| 0.081855
| 0.076398
| 0.065484
| 0.076398
| 0.076398
| 0
| 0
| 0
| 0
| 0
| 0.003319
| 0.142315
| 1,054
| 34
| 90
| 31
| 0.807522
| 0
| 0
| 0
| 0
| 0
| 0.133776
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.130435
| false
| 0
| 0.130435
| 0.043478
| 0.826087
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6e1773f3e2177f91fdf46e022af55af83edbbcb5
| 1,568
|
py
|
Python
|
logs/followup_email.py
|
vreyespue/Movie_Bot
|
192c74be62afcfda77a0984ff4da3014226c3432
|
[
"Apache-2.0"
] | 26
|
2019-02-04T04:55:09.000Z
|
2021-09-22T14:58:46.000Z
|
logs/followup_email.py
|
vreyespue/Movie_Bot
|
192c74be62afcfda77a0984ff4da3014226c3432
|
[
"Apache-2.0"
] | 2
|
2019-05-07T16:33:09.000Z
|
2021-02-13T18:25:35.000Z
|
logs/followup_email.py
|
vreyespue/Movie_Bot
|
192c74be62afcfda77a0984ff4da3014226c3432
|
[
"Apache-2.0"
] | 27
|
2018-12-10T12:13:50.000Z
|
2020-10-11T17:43:22.000Z
|
###################################################################
######## Follow up email #############
###################################################################
"""
followup_email.py
This is special use case code written to assist bot developers. It consolidates topics that are not familiar to the bot
and sends it in a nicely formatted email to the developers team.
"""
from email.mime.text import MIMEText
from email.mime.image import MIMEImage
from email.mime.multipart import MIMEMultipart
from email.mime.base import MIMEBase
from email import encoders
import smtplib
import os,string,sys
sys.path.append(os.path.normpath(os.getcwd()))
from config import location
SERVER = " "
FROM = ["xxxx@gmail.com"]
TO = ["xxxx@gmail.com"] # must be a list
SUBJECT = "Follow up questions email"
TEXT = """Hello,
Here are the various questions users asked me today which I have no idea about. Could you help me learn these topics?
Regards,
Kelly
"""
msg = MIMEMultipart()
msg['From'] = ", ".join(FROM)
msg['To'] = ", ".join(TO)
msg['Subject'] = SUBJECT
body = TEXT
msg.attach(MIMEText(body, 'plain'))
filename = 'followup_file.TXT'
attachment = open(location + 'followup_file.TXT', "rb")
part = MIMEBase('application', 'octet-stream')
part.set_payload((attachment).read())
encoders.encode_base64(part)
part.add_header('Content-Disposition', "attachment; filename= %s" % filename)
msg.attach(part)
message = msg.as_string()
server = smtplib.SMTP(SERVER)
server.sendmail(FROM, TO, message)
server.quit()
| 26.133333
| 122
| 0.646684
| 201
| 1,568
| 5.00995
| 0.557214
| 0.044687
| 0.051639
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.001497
| 0.147959
| 1,568
| 60
| 123
| 26.133333
| 0.752246
| 0.169643
| 0
| 0
| 0
| 0.028571
| 0.284192
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.228571
| 0
| 0.228571
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6e1b6e602b092d059fb5b4b96bb130aa002770f4
| 1,213
|
py
|
Python
|
wiwo/sender.py
|
CoreSecurity/wiwo
|
44bd44b8ebea7e33105a7f4dac6480493cbb9623
|
[
"Apache-1.1"
] | 76
|
2015-08-01T23:24:43.000Z
|
2018-07-02T11:13:16.000Z
|
wiwo/sender.py
|
6e726d/wiwo
|
44bd44b8ebea7e33105a7f4dac6480493cbb9623
|
[
"Apache-1.1"
] | 1
|
2016-01-28T22:11:17.000Z
|
2016-02-03T22:14:46.000Z
|
wiwo/sender.py
|
6e726d/wiwo
|
44bd44b8ebea7e33105a7f4dac6480493cbb9623
|
[
"Apache-1.1"
] | 27
|
2015-08-11T07:24:42.000Z
|
2018-10-05T11:09:54.000Z
|
#!/usr/bin/env python
# -*- coding: iso-8859-15 -*-
#
# Copyright 2003-2015 CORE Security Technologies
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors:
# Andres Blanco (6e726d)
# Andres Gazzoli
#
import ethernet
import pcapy
class Sender(object):
@staticmethod
def send(frame_obj, iface_name):
"""
Method that inject/send a frame.
"""
frame = frame_obj.get_packet()
if len(frame) < ethernet.ETHERNET_MIN_SIZE:
padding = "\x00" * (ethernet.ETHERNET_MIN_SIZE - len(frame))
frame += padding
pd = pcapy.open_live(iface_name, ethernet.ETHERNET_MTU, 0, 100)
pd.sendpacket(frame)
return frame
| 28.880952
| 74
| 0.678483
| 164
| 1,213
| 4.95122
| 0.664634
| 0.073892
| 0.03202
| 0.039409
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.029979
| 0.230008
| 1,213
| 41
| 75
| 29.585366
| 0.8394
| 0.587799
| 0
| 0
| 0
| 0
| 0.008772
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.083333
| false
| 0
| 0.166667
| 0
| 0.416667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6e201007363380e4d643bfc71a7961525d34bdc2
| 4,073
|
py
|
Python
|
email_scrapper/readers/gmail_reader.py
|
datmellow/email-scrapper
|
614e99a4b33f3a0d3d85d5eb9c359818991673a6
|
[
"MIT"
] | 2
|
2018-01-07T23:12:28.000Z
|
2018-01-10T00:58:17.000Z
|
email_scrapper/readers/gmail_reader.py
|
LucasCoderT/email-scrapper
|
614e99a4b33f3a0d3d85d5eb9c359818991673a6
|
[
"MIT"
] | null | null | null |
email_scrapper/readers/gmail_reader.py
|
LucasCoderT/email-scrapper
|
614e99a4b33f3a0d3d85d5eb9c359818991673a6
|
[
"MIT"
] | 1
|
2019-12-09T17:01:08.000Z
|
2019-12-09T17:01:08.000Z
|
import base64
import datetime
import email
import logging
import os
import typing
from email.message import Message
from googleapiclient import errors
from email_scrapper.models import Stores
from email_scrapper.readers.base_reader import BaseReader
logger = logging.getLogger(__name__)
class GmailReader(BaseReader):
SCOPES = ['https://www.googleapis.com/auth/gmail.readonly']
def __init__(self, service, user_id: str = "me", user_email: str = None, email_mapping: dict = None,
date_from: datetime.datetime = None):
"""
Parameters
----------
service:
The Gmail API service
email_mapping: dict
Mapping of class:Stores: to str representing the email to search from
"""
super(GmailReader, self).__init__(date_from=date_from, user_email=user_email, email_mapping=email_mapping)
self.service = service
self.user_id = user_id
@classmethod
def authenticate_with_browser(cls, credentials_json: dict = None, date_from: datetime.datetime = None):
"""
Login to gmail through the browser.
Requires a credentials.json file or a credentials_json dict passed
Returns
-------
GmailReader
"""
try:
from google_auth_oauthlib.flow import InstalledAppFlow
from googleapiclient.discovery import build
import pickle
creds = None
if os.path.exists('token.pickle'):
with open('token.pickle', 'rb') as token:
creds = pickle.load(token)
if not creds or not creds.valid:
if credentials_json:
flow = InstalledAppFlow.from_client_config(credentials_json, GmailReader.SCOPES)
else:
flow = InstalledAppFlow.from_client_secrets_file(
'credentials.json', GmailReader.SCOPES)
creds = flow.run_local_server(port=0)
# Save the credentials for the next run
with open('token.pickle', 'wb') as token:
pickle.dump(creds, token)
service = build('gmail', 'v1', credentials=creds)
response = service.users().getProfile(userId="me").execute()
return cls(service, user_id="me", user_email=response.get("emailAddress"), date_from=date_from)
except (ImportError, ModuleNotFoundError):
raise BaseException("Google Auth library not found")
def _get_search_date_range(self):
return self.search_date_range.strftime("%Y-%m-%d")
def _get_email_details(self, message) -> Message:
response = self.service.users().messages().get(userId=self.user_id, id=message['id'], format="raw").execute()
msg_str = base64.urlsafe_b64decode(response['raw'].encode('ASCII'))
mime_msg = email.message_from_bytes(msg_str)
return mime_msg
def _get_search_query(self, store: Stores, subject: str = None):
return f"from:{self._get_store_email(store)} after:{self._get_search_date_range()}"
def read_store_emails(self, store: Stores, subject: str = None) -> typing.Generator[str, None, None]:
query = self._get_search_query(store, subject)
try:
response = self.service.users().messages().list(userId=self.user_id,
q=query).execute()
if 'messages' in response:
for message in response['messages']:
yield self._get_email_details(message)
while 'nextPageToken' in response:
page_token = response['nextPageToken']
response = self.service.users().messages().list(userId=self.user_id, q=query,
pageToken=page_token).execute()
for message in response['messages']:
yield self._get_email_details(message)
except errors.HttpError as error:
print('An error occurred: %s' % error)
| 41.141414
| 117
| 0.615762
| 456
| 4,073
| 5.302632
| 0.335526
| 0.01737
| 0.016543
| 0.029777
| 0.163772
| 0.150538
| 0.126551
| 0.096774
| 0.096774
| 0.096774
| 0
| 0.002758
| 0.287749
| 4,073
| 98
| 118
| 41.561224
| 0.830748
| 0.076848
| 0
| 0.089552
| 0
| 0
| 0.088211
| 0.019786
| 0
| 0
| 0
| 0
| 0
| 1
| 0.089552
| false
| 0
| 0.208955
| 0.029851
| 0.38806
| 0.014925
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6e2255b8f77a18ad6776515831039d97cfa15e3a
| 748
|
py
|
Python
|
Advanced_algorithm/oj_test/test04.py
|
mndream/MyOJ
|
ee92fb657475d998e6c201f222cb20bcbc2bfd64
|
[
"Apache-2.0"
] | 1
|
2018-12-27T08:06:38.000Z
|
2018-12-27T08:06:38.000Z
|
Advanced_algorithm/oj_test/test04.py
|
mndream/MyPythonOJ
|
ee92fb657475d998e6c201f222cb20bcbc2bfd64
|
[
"Apache-2.0"
] | null | null | null |
Advanced_algorithm/oj_test/test04.py
|
mndream/MyPythonOJ
|
ee92fb657475d998e6c201f222cb20bcbc2bfd64
|
[
"Apache-2.0"
] | null | null | null |
'''
A+B for Input-Output Practice (IV)
描述
Your task is to Calculate the sum of some integers.
输入
Input contains multiple test cases. Each test case contains a integer N,
and then N integers follow in the same line.
A test case starting with 0 terminates the input and this test case is not to be processed.
输出
For each group of input integers you should output their sum in one line,
and with one line of output for each line in input.
输入样例
4 1 2 3 4
5 1 2 3 4 5
0
输出样例
10
15
'''
while(True):
input_list = list(map(int, input().split()))
# split()默认为所有的空字符,包括空格、换行(\n)、制表符(\t)等。
# 使用split(" ") 报RE
n = input_list[0]
if n == 0:
break
sum = 0
for i in range(n):
sum = sum + input_list[i + 1]
print(sum)
| 24.933333
| 91
| 0.669786
| 142
| 748
| 3.507042
| 0.528169
| 0.048193
| 0.012048
| 0.016064
| 0.02008
| 0
| 0
| 0
| 0
| 0
| 0
| 0.037037
| 0.241979
| 748
| 30
| 92
| 24.933333
| 0.84127
| 0.705882
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.111111
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6e22c62fbf96771a37ae5b157b23776e81cda2c5
| 2,421
|
py
|
Python
|
pre-processing/obtain_audio_spectrogram.py
|
GeWu-Lab/OGM-GE_CVPR2022
|
08b3f2498dd3e89f57fe9a12b5bf0c162eba1fbf
|
[
"MIT"
] | 4
|
2022-03-06T17:57:24.000Z
|
2022-03-24T04:26:32.000Z
|
pre-processing/obtain_audio_spectrogram.py
|
GeWu-Lab/OGM-GE_CVPR2022
|
08b3f2498dd3e89f57fe9a12b5bf0c162eba1fbf
|
[
"MIT"
] | null | null | null |
pre-processing/obtain_audio_spectrogram.py
|
GeWu-Lab/OGM-GE_CVPR2022
|
08b3f2498dd3e89f57fe9a12b5bf0c162eba1fbf
|
[
"MIT"
] | 1
|
2022-03-31T08:12:15.000Z
|
2022-03-31T08:12:15.000Z
|
import multiprocessing
import os
import os.path
import pickle
import librosa
import numpy as np
from scipy import signal
def audio_extract(path, audio_name, audio_path, sr=16000):
save_path = path
samples, samplerate = librosa.load(audio_path)
resamples = np.tile(samples, 10)[:160000]
resamples[resamples > 1.] = 1.
resamples[resamples < -1.] = -1.
frequencies, times, spectrogram = signal.spectrogram(resamples, samplerate, nperseg=512, noverlap=353)
spectrogram = np.log(spectrogram + 1e-7)
mean = np.mean(spectrogram)
std = np.std(spectrogram)
spectrogram = np.divide(spectrogram - mean, std + 1e-9)
assert spectrogram.shape == (257, 1004)
save_name = os.path.join(save_path, audio_name + '.pkl')
print(save_name)
with open(save_name, 'wb') as fid:
pickle.dump(spectrogram, fid)
class Consumer(multiprocessing.Process):
def __init__(self, task_queue):
multiprocessing.Process.__init__(self)
self.task_queue = task_queue
def run(self):
proc_name = self.name
while True:
next_task = self.task_queue.get()
if next_task is None:
# Poison pill means shutdown
print('{}: Exiting'.format(proc_name))
self.task_queue.task_done()
break
# print(next_task)
audio_extract(next_task[0], next_task[1], next_task[2])
self.task_queue.task_done()
if __name__ == '__main__':
# Establish communication queues
tasks = multiprocessing.JoinableQueue()
# Start consumers
num_consumers = multiprocessing.cpu_count()
print('Creating {} consumers'.format(num_consumers))
consumers = [
Consumer(tasks)
for i in range(num_consumers)
]
for w in consumers:
w.start()
# path='data/'
save_dir = '/home/xiaokang_peng/data/AVE_av/audio_spec'
if not os.path.exists(save_dir):
os.mkdir(save_dir)
path_origin = '/home/xiaokang_peng/data/AVE_av/audio'
audios = os.listdir(path_origin)
for audio in audios:
audio_name = audio
audio_path = os.path.join(path_origin, audio)
tasks.put([save_dir, audio_name[:-4], audio_path])
# Add a poison pill for each consumer
for i in range(num_consumers):
tasks.put(None)
# Wait for all of the tasks to finish
tasks.join()
print("ok")
| 28.482353
| 106
| 0.646014
| 312
| 2,421
| 4.807692
| 0.391026
| 0.036
| 0.043333
| 0.034
| 0.098667
| 0.070667
| 0.04
| 0
| 0
| 0
| 0
| 0.020868
| 0.247831
| 2,421
| 84
| 107
| 28.821429
| 0.802856
| 0.072284
| 0
| 0.033333
| 0
| 0
| 0.056747
| 0.035299
| 0
| 0
| 0
| 0
| 0.016667
| 1
| 0.05
| false
| 0
| 0.116667
| 0
| 0.183333
| 0.066667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6e237945177ee47426cc1fcc873291dbba403f32
| 3,317
|
py
|
Python
|
src/protean/core/event_handler.py
|
mpsiva89/protean
|
315fa56da3f64178bbbf0edf1995af46d5eb3da7
|
[
"BSD-3-Clause"
] | null | null | null |
src/protean/core/event_handler.py
|
mpsiva89/protean
|
315fa56da3f64178bbbf0edf1995af46d5eb3da7
|
[
"BSD-3-Clause"
] | null | null | null |
src/protean/core/event_handler.py
|
mpsiva89/protean
|
315fa56da3f64178bbbf0edf1995af46d5eb3da7
|
[
"BSD-3-Clause"
] | null | null | null |
import inspect
import logging
from protean.container import Element, OptionsMixin
from protean.core.event import BaseEvent
from protean.exceptions import IncorrectUsageError
from protean.utils import DomainObjects, derive_element_class, fully_qualified_name
from protean.utils.mixins import HandlerMixin
logger = logging.getLogger(__name__)
class BaseEventHandler(Element, HandlerMixin, OptionsMixin):
"""Base Event Handler to be inherited by all event handlers"""
element_type = DomainObjects.EVENT_HANDLER
class Meta:
abstract = True
@classmethod
def _default_options(cls):
aggregate_cls = (
getattr(cls.meta_, "aggregate_cls")
if hasattr(cls.meta_, "aggregate_cls")
else None
)
return [
("aggregate_cls", None),
("stream_name", aggregate_cls.meta_.stream_name if aggregate_cls else None),
("source_stream", None),
]
def __new__(cls, *args, **kwargs):
if cls is BaseEventHandler:
raise TypeError("BaseEventHandler cannot be instantiated")
return super().__new__(cls)
def event_handler_factory(element_cls, **opts):
element_cls = derive_element_class(element_cls, BaseEventHandler, **opts)
if not (element_cls.meta_.aggregate_cls or element_cls.meta_.stream_name):
raise IncorrectUsageError(
{
"_entity": [
f"Event Handler `{element_cls.__name__}` needs to be associated with an aggregate or a stream"
]
}
)
# Iterate through methods marked as `@handle` and construct a handler map
#
# Also, if `_target_cls` is an event, associate it with the event handler's
# aggregate or stream
methods = inspect.getmembers(element_cls, predicate=inspect.isroutine)
for method_name, method in methods:
if not (
method_name.startswith("__") and method_name.endswith("__")
) and hasattr(method, "_target_cls"):
# `_handlers` is a dictionary mapping the event to the handler method.
if method._target_cls == "$any":
# This replaces any existing `$any` handler, by design. An Event Handler
# can have only one `$any` handler method.
element_cls._handlers["$any"] = {method}
else:
element_cls._handlers[fully_qualified_name(method._target_cls)].add(
method
)
# Associate Event with the handler's stream
if inspect.isclass(method._target_cls) and issubclass(
method._target_cls, BaseEvent
):
# Order of preference:
# 1. Stream name defined in event
# 2. Stream name defined for the event handler
# 3. Stream name derived from aggregate
stream_name = element_cls.meta_.stream_name or (
element_cls.meta_.aggregate_cls.meta_.stream_name
if element_cls.meta_.aggregate_cls
else None
)
method._target_cls.meta_.stream_name = (
method._target_cls.meta_.stream_name or stream_name
)
return element_cls
| 36.855556
| 114
| 0.619234
| 363
| 3,317
| 5.38292
| 0.319559
| 0.06653
| 0.053736
| 0.052201
| 0.136643
| 0.085977
| 0
| 0
| 0
| 0
| 0
| 0.001306
| 0.307507
| 3,317
| 89
| 115
| 37.269663
| 0.849369
| 0.177872
| 0
| 0.032787
| 0
| 0
| 0.082288
| 0.008856
| 0
| 0
| 0
| 0
| 0
| 1
| 0.04918
| false
| 0
| 0.114754
| 0
| 0.262295
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6e246664f07a32e8eef7dfd24b7f3cda19fa9734
| 7,508
|
py
|
Python
|
read_prepare_data.py
|
jlu-ilr-hydro/IPCC-Repots-Focus-Overview
|
bf631975eb6c3ea2cf2f8fe9382e3361ad700a6e
|
[
"Apache-2.0"
] | null | null | null |
read_prepare_data.py
|
jlu-ilr-hydro/IPCC-Repots-Focus-Overview
|
bf631975eb6c3ea2cf2f8fe9382e3361ad700a6e
|
[
"Apache-2.0"
] | null | null | null |
read_prepare_data.py
|
jlu-ilr-hydro/IPCC-Repots-Focus-Overview
|
bf631975eb6c3ea2cf2f8fe9382e3361ad700a6e
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Fri Sep 17 10:12:26 2021
@author: Florian Jehn
"""
import os
import pandas as pd
import numpy as np
def read_ipcc_counts_temp():
"""reads all counts of temperatures for all reports and makes on df"""
files = os.listdir(os.getcwd()+os.sep+"Results"+ os.sep + "temperatures")
all_df = pd.DataFrame()
for file in files:
file_df = pd.read_csv("Results" + os.sep + "temperatures" + os.sep + file, sep=";", index_col=0)
file_df.columns = [file[:-4]]
all_df = pd.concat([all_df, file_df], axis=1)
return all_df.transpose()
def read_ipcc_counts_rfc():
"""reads all counts of reasons of concern for all reports and makes on df"""
files = os.listdir(os.getcwd()+os.sep+"Results"+ os.sep + "reasons_for_concern")
all_df = pd.DataFrame()
for file in files:
file_df = pd.read_csv("Results" + os.sep + "reasons_for_concern" + os.sep + file, sep=";", index_col=0)
file_df.columns = [file[:-4]]
all_df = pd.concat([all_df, file_df], axis=1)
return all_df.transpose()
def read_false_positive():
"""reads in all the counted false/true positive rates for the temperatres in the
IPCC and calculates a true positive rate for each entry"""
files = os.listdir(os.getcwd()+os.sep+"Results"+ os.sep + "false_positive_check_files")
all_df = pd.DataFrame()
for file in files:
# only read those files that contains the counting results
if "results" not in file:
continue
file_df = pd.read_csv("Results" + os.sep + "false_positive_check_files" + os.sep + file, sep=",", index_col=0)
# calculate the true positive rate
file_df["True Positive Rate [%]"] = (file_df["n true positive"]/(file_df["n true positive"]+file_df["n false positive"]))*100
# Arange the df for seaborn
file_df["Temperature [°C]"] = file_df.index
file_df.reset_index(inplace=True, drop=True)
all_df = pd.concat([all_df, file_df])
return all_df
def scale_counts(ipcc_counts):
"""scale the counts by overall sum"""
sums = ipcc_counts.sum(axis=1)
for col in ipcc_counts:
ipcc_counts[col] = ipcc_counts[col]/sums*100
return ipcc_counts
def read_meta():
"""reads in the meta data of the reports"""
meta = pd.read_csv("Reports" + os.sep + "meta_data_reports.tsv", sep="\t")
meta["Year"] = meta["Year"].astype("str")
return meta
def group_temps(ipcc_counts):
"""groups the temperatures into three categories"""
ipcc_counts["0.5°C - 2°C"] = ipcc_counts[" 0.5°C"] + ipcc_counts[" 1°C"] + ipcc_counts[" 1.5°C"] +ipcc_counts[" 2°C"]
ipcc_counts["2.5°C - 4°C"] = ipcc_counts[" 2.5°C"] + ipcc_counts[" 3°C"] + ipcc_counts[" 3.5°C"] +ipcc_counts[" 4°C"]
ipcc_counts["≥ 4.5°C"] = ipcc_counts[" 4.5°C"] + ipcc_counts[" 5°C"] + ipcc_counts[" 5.5°C"] +ipcc_counts[" 6°C"] +ipcc_counts[" 6.5°C"] + ipcc_counts[" 7°C"] + ipcc_counts[" 7.5°C"] +ipcc_counts[" 8°C"] + ipcc_counts[" 8.5°C"] + ipcc_counts[" 9°C"] + ipcc_counts[" 9.5°C"] +ipcc_counts[" 10°C"]
return ipcc_counts.iloc[:,20:]
def merge_counts_meta(ipcc_counts, meta):
"""merges the df with the counted temperatures/rfcs with the metadata"""
return pd.merge(meta, ipcc_counts, right_index=True, left_on="count_names")
def lookup_names():
""""Returns lookup dict for different files names to merge them"""
lookup_dict = {
"IPCC_AR6_WGI_Full_Report":"counts_IPCC_AR6_WGI_Full_Report_parsed",
"SROCC_FullReport_FINAL":"counts_SROCC_FullReport_FINAL_parsed",
"210714-IPCCJ7230-SRCCL-Complete-BOOK-HRES":"counts_210714-IPCCJ7230-SRCCL-Complete-BOOK-HRES_parsed",
"SR15_Full_Report_Low_Res":"counts_SR15_Full_Report_Low_Res_parsed",
"SYR_AR5_FINAL_full":"counts_SYR_AR5_FINAL_full_wcover_parsed",
"ipcc_wg3_ar5_full":"counts_ipcc_wg3_ar5_full_parsed",
"WGIIAR5-PartA_FINAL":"counts_WGIIAR5-PartA_FINAL_parsed",
"WGIIAR5-PartB_FINAL":"counts_WGIIAR5-PartB_FINAL_parsed",
"WG1AR5_all_final":"counts_WG1AR5_all_final_parsed",
"SREX_Full_Report-1":"counts_SREX_Full_Report-1_parsed",
"SRREN_Full_Report-1":"counts_SRREN_Full_Report-1_parsed",
"ar4_syr_full_report":"counts_ar4_syr_full_report_parsed",
"ar4_wg2_full_report":"counts_ar4_wg2_full_report_parsed",
"ar4_wg1_full_report-1":"counts_ar4_wg1_full_report-1_parsed",
"ar4_wg3_full_report-1":"counts_ar4_wg3_full_report-1_parsed",
"sroc_full-1":"counts_sroc_full-1_parsed",
"srccs_wholereport-1":"counts_srccs_wholereport-1_parsed",
"SYR_TAR_full_report":"counts_SYR_TAR_full_report_parsed",
"WGII_TAR_full_report-2":"counts_WGII_TAR_full_report-2_parsed",
"WGI_TAR_full_report":"counts_WGI_TAR_full_report_parsed",
"WGIII_TAR_full_report":"counts_WGIII_TAR_full_report_parsed",
"srl-en-1":"counts_srl-en-1_parsed",
"srtt-en-1":"counts_srtt-en-1_parsedd",
"emissions_scenarios-1":"counts_emissions_scenarios-1_parsed",
"av-en-1":"counts_av-en-1_parsed",
"The-Regional-Impact":"counts_The-Regional-Impact_parsed",
"2nd-assessment-en-1":"counts_2nd-assessment-en-1_parsed",
"ipcc_sar_wg_III_full_report":"counts_ipcc_sar_wg_III_full_report_parsed",
"ipcc_sar_wg_II_full_report":"counts_ipcc_sar_wg_II_full_report_parsed",
"ipcc_sar_wg_I_full_report":"counts_ipcc_sar_wg_I_full_report_parsed",
"climate_change_1994-2":"counts_climate_change_1994-2_parsed",
# "ipcc-technical-guidelines-1994n-1":"", # could not read in, but also contains no temp mentions
"ipcc_wg_I_1992_suppl_report_full_report":"counts_ipcc_wg_I_1992_suppl_report_full_report_parsed",
"ipcc_wg_II_1992_suppl_report_full_report":"counts_ipcc_wg_II_1992_suppl_report_full_report_parsed",
"ipcc_90_92_assessments_far_full_report":"counts_ipcc_90_92_assessments_far_full_report_parsed",
"ipcc_far_wg_III_full_report":"counts_ipcc_far_wg_III_full_report_parsed",
"ipcc_far_wg_II_full_report":"counts_ipcc_far_wg_II_full_report_parsed",
"ipcc_far_wg_I_full_report":"counts_ipcc_far_wg_I_full_report_parsed",
}
return lookup_dict
def create_temp_keys():
"""Creates a list of strings for all temperatures the paper looked at"""
temps = []
for i,temp in enumerate(np.arange(0.5,10.1,0.5)):
if i % 2 != 0:
temps.append(" "+str(int(temp))+"°C")
else:
temps.append(" "+str(temp)+"°C" )
return temps
def combine_all_raw_strings():
"""combines all raw strings into one big file to search through"""
reports = [file for file in os.listdir(os.getcwd() + os.sep + "Raw IPCC Strings") if file[-4:] == ".csv" ]
all_reports = " "
for report in reports:
print("Starting with " + report)
report_df = pd.read_csv(os.getcwd() + os.sep + "Raw IPCC Strings" + os.sep + report, sep="\t", usecols=[0])
report_list = report_df[report_df.columns[0]].tolist()
report_str = " ".join([str(item) for item in report_list])
all_reports += report_str
with open(os.getcwd() + os.sep + "Raw IPCC Strings" + os.sep + "all_ipcc_strings.csv", 'w', encoding='utf-8') as f:
# this file is not included in the repository, as it is too large for Github
f.write(all_reports)
if __name__ == "__main__":
combine_all_raw_strings()
| 48.128205
| 300
| 0.683404
| 1,187
| 7,508
| 4.002527
| 0.20219
| 0.088402
| 0.027784
| 0.055567
| 0.419491
| 0.320354
| 0.211534
| 0.175752
| 0.112608
| 0.099137
| 0
| 0.03261
| 0.183138
| 7,508
| 155
| 301
| 48.43871
| 0.737323
| 0.13439
| 0
| 0.110092
| 0
| 0
| 0.41668
| 0.297962
| 0
| 0
| 0
| 0
| 0
| 1
| 0.091743
| false
| 0
| 0.027523
| 0
| 0.201835
| 0.009174
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6e2666a6e406e4ebd7fe6e6904bdb4696b8d2f47
| 404
|
py
|
Python
|
has33.py
|
CombatPompano81/Python-Snippets-Galore
|
c2fb9c6ebef0477895749db9f2aa0f87132a72d6
|
[
"Apache-2.0"
] | null | null | null |
has33.py
|
CombatPompano81/Python-Snippets-Galore
|
c2fb9c6ebef0477895749db9f2aa0f87132a72d6
|
[
"Apache-2.0"
] | null | null | null |
has33.py
|
CombatPompano81/Python-Snippets-Galore
|
c2fb9c6ebef0477895749db9f2aa0f87132a72d6
|
[
"Apache-2.0"
] | null | null | null |
# main function
def has33(nums):
# iterates through the list and tries to find two 3s next to each other
for i in range(0, len(nums) - 1):
# if indice i has a 3 and the indice next to it has a 3, print true
if nums[i] == 3 and nums[i + 1] == 3:
return print('True')
return print('False')
has33([1, 3, 3])
has33([3, 1, 3])
has33([3, 3, 3])
has33([1, 3, 1, 3])
| 22.444444
| 75
| 0.569307
| 76
| 404
| 3.026316
| 0.460526
| 0.043478
| 0.043478
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.108014
| 0.289604
| 404
| 17
| 76
| 23.764706
| 0.69338
| 0.368812
| 0
| 0
| 0
| 0
| 0.035857
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.111111
| false
| 0
| 0
| 0
| 0.333333
| 0.222222
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6e2726ca9cbe233a3e8bac00017eecef8153cd91
| 17,692
|
py
|
Python
|
survos2/frontend/plugins/objects.py
|
DiamondLightSource/SuRVoS2
|
42bacfb6a5cc267f38ca1337e51a443eae1a9d2b
|
[
"MIT"
] | 4
|
2017-10-10T14:47:16.000Z
|
2022-01-14T05:57:50.000Z
|
survos2/frontend/plugins/objects.py
|
DiamondLightSource/SuRVoS2
|
42bacfb6a5cc267f38ca1337e51a443eae1a9d2b
|
[
"MIT"
] | 1
|
2022-01-11T21:11:12.000Z
|
2022-01-12T08:22:34.000Z
|
survos2/frontend/plugins/objects.py
|
DiamondLightSource/SuRVoS2
|
42bacfb6a5cc267f38ca1337e51a443eae1a9d2b
|
[
"MIT"
] | 2
|
2018-03-06T06:31:29.000Z
|
2019-03-04T03:33:18.000Z
|
from survos2.config import Config
import numpy as np
from numpy.lib.function_base import flip
from qtpy import QtWidgets
from qtpy.QtWidgets import QPushButton, QRadioButton
from survos2.frontend.components.base import *
from survos2.frontend.components.entity import (
SmallVolWidget,
TableWidget,
setup_entity_table,
setup_bb_table,
)
from survos2.frontend.components.icon_buttons import IconButton
from survos2.frontend.control import Launcher
from survos2.frontend.plugins.base import *
from survos2.frontend.plugins.plugins_components import MultiSourceComboBox
from survos2.frontend.utils import FileWidget
from survos2.improc.utils import DatasetManager
from survos2.model import DataModel
from survos2.server.state import cfg
from survos2.frontend.plugins.features import FeatureComboBox
from survos2.frontend.plugins.annotations import LevelComboBox
from survos2.entity.patches import PatchWorkflow, organize_entities, make_patches
class ObjectComboBox(LazyComboBox):
def __init__(self, full=False, header=(None, "None"), parent=None):
self.full = full
super().__init__(header=header, parent=parent)
def fill(self):
params = dict(workspace=True, full=self.full)
result = Launcher.g.run("objects", "existing", **params)
logger.debug(f"Result of objects existing: {result}")
if result:
# self.addCategory("Points")
for fid in result:
if result[fid]["kind"] == "points":
self.addItem(fid, result[fid]["name"])
elif result[fid]["kind"] == "boxes":
self.addItem(fid, result[fid]["name"])
@register_plugin
class ObjectsPlugin(Plugin):
__icon__ = "fa.picture-o"
__pname__ = "objects"
__views__ = ["slice_viewer"]
__tab__ = "objects"
def __init__(self, parent=None):
super().__init__(parent=parent)
self.vbox = VBox(self, spacing=10)
self.objects_combo = ComboBox()
self.vbox.addWidget(self.objects_combo)
self.existing_objects = {}
self.objects_layout = VBox(margin=0, spacing=5)
self.objects_combo.currentIndexChanged.connect(self.add_objects)
self.vbox.addLayout(self.objects_layout)
self._populate_objects()
def _populate_objects(self):
self.objects_params = {}
self.objects_combo.clear()
self.objects_combo.addItem("Add objects")
params = dict(
workspace=DataModel.g.current_session + "@" + DataModel.g.current_workspace
)
result = Launcher.g.run("objects", "available", **params)
print(result)
logger.debug(f"objects available: {result}")
if result:
all_categories = sorted(set(p["category"] for p in result))
for i, category in enumerate(all_categories):
self.objects_combo.addItem(category)
self.objects_combo.model().item(
i + len(self.objects_params) + 1
).setEnabled(False)
for f in [p for p in result if p["category"] == category]:
self.objects_params[f["name"]] = f["params"]
self.objects_combo.addItem(f["name"])
def add_objects(self, idx):
logger.debug(f"Add objects with idx {idx}")
if idx == 0 or idx == -1:
return
# self.objects_combo.setCurrentIndex(0)
print(idx)
order = idx - 2
if order == 1:
params = dict(
order=order,
workspace=DataModel.g.current_session
+ "@"
+ DataModel.g.current_workspace,
fullname="survos2/entity/blank_boxes.csv",
)
else:
params = dict(
order=order,
workspace=DataModel.g.current_session
+ "@"
+ DataModel.g.current_workspace,
fullname="survos2/entity/blank_entities.csv",
)
result = Launcher.g.run("objects", "create", **params)
if result:
objectsid = result["id"]
objectsname = result["name"]
objectsfullname = result["fullname"]
objectstype = result["kind"]
self._add_objects_widget(
objectsid, objectsname, objectsfullname, objectstype, True
)
def _add_objects_widget(
self, objectsid, objectsname, objectsfullname, objectstype, expand=False
):
logger.debug(
f"Add objects {objectsid} {objectsname} {objectsfullname} {objectstype}"
)
widget = ObjectsCard(objectsid, objectsname, objectsfullname, objectstype)
widget.showContent(expand)
self.objects_layout.addWidget(widget)
src = DataModel.g.dataset_uri(objectsid, group="objects")
with DatasetManager(src, out=None, dtype="uint32", fillvalue=0) as DM:
src_dataset = DM.sources[0]
src_dataset.set_metadata("fullname", objectsfullname)
self.existing_objects[objectsid] = widget
return widget
def clear(self):
for objects in list(self.existing_objects.keys()):
self.existing_objects.pop(objects).setParent(None)
self.existing_objects = {}
def setup(self):
self._populate_objects()
params = dict(
workspace=DataModel.g.current_session + "@" + DataModel.g.current_workspace
)
result = Launcher.g.run("objects", "existing", **params)
logger.debug(f"objects result {result}")
if result:
# Remove objects that no longer exist in the server
print(self.existing_objects.keys())
for objects in list(self.existing_objects.keys()):
if objects not in result:
self.existing_objects.pop(objects).setParent(None)
# Populate with new entity if any
for entity in sorted(result):
if entity in self.existing_objects:
continue
enitity_params = result[entity]
objectsid = enitity_params.pop("id", entity)
objectsname = enitity_params.pop("name", entity)
objectsfullname = enitity_params.pop("fullname", entity)
objectstype = enitity_params.pop("kind", entity)
print(f"type: {objectstype}")
if objectstype != "unknown":
widget = self._add_objects_widget(
objectsid, objectsname, objectsfullname, objectstype
)
widget.update_params(params)
self.existing_objects[objectsid] = widget
else:
logger.debug(
"+ Skipping loading entity: {}, {}, {}".format(
objectsid, objectsname, objectstype
)
)
class ObjectsCard(Card):
def __init__(
self, objectsid, objectsname, objectsfullname, objectstype, parent=None
):
super().__init__(
title=objectsname,
collapsible=True,
removable=True,
editable=True,
parent=parent,
)
self.objectsid = objectsid
self.objectsname = objectsname
self.object_scale = 1.0
self.objectsfullname = objectsfullname
self.objectstype = objectstype
self.widgets = {}
self.filewidget = FileWidget(extensions="*.csv", save=False)
self.filewidget.path.setText(self.objectsfullname)
self.add_row(self.filewidget)
self.filewidget.path_updated.connect(self.load_data)
self.compute_btn = PushButton("Compute")
self.view_btn = PushButton("View", accent=True)
self.get_btn = PushButton("Get", accent=True)
self._add_param("scale", title="Scale: ", type="Float", default=1)
self._add_param("offset", title="Offset: ", type="FloatOrVector", default=0)
self._add_param(
"crop_start", title="Crop Start: ", type="FloatOrVector", default=0
)
self._add_param(
"crop_end", title="Crop End: ", type="FloatOrVector", default=9000
)
self.flipxy_checkbox = CheckBox(checked=True)
self.add_row(HWidgets(None, self.flipxy_checkbox, Spacing(35)))
self.add_row(HWidgets(None, self.view_btn, self.get_btn, Spacing(35)))
self.view_btn.clicked.connect(self.view_objects)
self.get_btn.clicked.connect(self.get_objects)
cfg.object_scale = self.widgets["scale"].value()
cfg.object_offset = self.widgets["offset"].value()
cfg.object_crop_start = self.widgets["crop_start"].value()
cfg.object_crop_end = self.widgets["crop_end"].value()
cfg.object_scale = 1.0
cfg.object_offset = (0,0,0)
cfg.object_crop_start = (0,0,0)
cfg.object_crop_end = (1e9,1e9,1e9)
if self.objectstype == "patches":
self._add_annotations_source()
self.entity_mask_bvol_size = LineEdit3D(default=64, parse=int)
self._add_feature_source()
self.make_entity_mask_btn = PushButton("Make entity mask", accent=True)
self.make_entity_mask_btn.clicked.connect(self.make_entity_mask)
self.make_patches_btn = PushButton("Make patches", accent=True)
self.make_patches_btn.clicked.connect(self.make_patches)
self.add_row(HWidgets(None, self.entity_mask_bvol_size, self.make_entity_mask_btn, Spacing(35)))
self.add_row(HWidgets(None, self.make_patches_btn, Spacing(35)))
self.table_control = TableWidget()
self.add_row(self.table_control.w, max_height=500)
cfg.entity_table = self.table_control
def _add_param(self, name, title=None, type="String", default=None):
if type == "Int":
p = LineEdit(default=default, parse=int)
elif type == "Float":
p = LineEdit(default=default, parse=float)
elif type == "FloatOrVector":
p = LineEdit3D(default=default, parse=float)
elif type == "IntOrVector":
p = LineEdit3D(default=default, parse=int)
else:
p = None
if title is None:
title = name
if p:
self.widgets[name] = p
self.add_row(HWidgets(None, title, p, Spacing(35)))
def load_data(self, path):
self.objectsfullname = path
print(f"Setting objectsfullname: {self.objectsfullname}")
def card_deleted(self):
params = dict(objects_id=self.objectsid, workspace=True)
result = Launcher.g.run("objects", "remove", **params)
if result["done"]:
self.setParent(None)
self.table_control = None
def _add_annotations_source(self):
self.annotations_source = LevelComboBox(full=True)
self.annotations_source.fill()
self.annotations_source.setMaximumWidth(250)
widget = HWidgets(
"Annotation:", self.annotations_source, Spacing(35), stretch=1
)
self.add_row(widget)
def card_title_edited(self, newtitle):
logger.debug(f"Edited entity title {newtitle}")
params = dict(objects_id=self.objectsid, new_name=newtitle, workspace=True)
result = Launcher.g.run("objects", "rename", **params)
return result["done"]
def view_objects(self):
logger.debug(f"Transferring objects {self.objectsid} to viewer")
cfg.ppw.clientEvent.emit(
{
"source": "objects",
"data": "view_objects",
"objects_id": self.objectsid,
"flipxy": self.flipxy_checkbox.value(),
}
)
def update_params(self, params):
if "fullname" in params:
self.objectsfullname = params["fullname"]
def _add_feature_source(self):
self.feature_source = FeatureComboBox()
self.feature_source.fill()
self.feature_source.setMaximumWidth(250)
widget = HWidgets("Feature:", self.feature_source, Spacing(35), stretch=1)
self.add_row(widget)
def get_objects(self):
cfg.object_scale = self.widgets["scale"].value()
cfg.object_offset = self.widgets["offset"].value()
cfg.object_crop_start = self.widgets["crop_start"].value()
cfg.object_crop_end = self.widgets["crop_end"].value()
dst = DataModel.g.dataset_uri(self.objectsid, group="objects")
print(f"objectsfullname: {self.objectsfullname}")
params = dict(
dst=dst,
fullname=self.objectsfullname,
scale=cfg.object_scale,
offset=cfg.object_offset,
crop_start=cfg.object_crop_start,
crop_end=cfg.object_crop_end,
)
logger.debug(f"Getting objects with params {params}")
result = Launcher.g.run("objects", "update_metadata", workspace=True, **params)
if self.objectstype == "points":
tabledata, self.entities_df = setup_entity_table(
self.objectsfullname,
scale=cfg.object_scale,
offset=cfg.object_offset,
crop_start=cfg.object_crop_start,
crop_end=cfg.object_crop_end,
flipxy=self.flipxy_checkbox.value()
)
elif self.objectstype == "boxes":
tabledata, self.entities_df = setup_bb_table(
self.objectsfullname,
scale=cfg.object_scale,
offset=cfg.object_offset,
crop_start=cfg.object_crop_start,
crop_end=cfg.object_crop_end,
flipxy=self.flipxy_checkbox.value()
)
elif self.objectstype == "patches":
tabledata, self.entities_df = setup_entity_table(
self.objectsfullname,
scale=cfg.object_scale,
offset=cfg.object_offset,
crop_start=cfg.object_crop_start,
crop_end=cfg.object_crop_end,
flipxy=self.flipxy_checkbox.value()
)
cfg.tabledata = tabledata
self.table_control.set_data(tabledata)
print(f"Loaded tabledata {tabledata}")
self.table_control.set_data(tabledata)
self.collapse()
self.expand()
def make_entity_mask(self):
src = DataModel.g.dataset_uri(self.feature_source.value(), group="features")
with DatasetManager(src, out=None, dtype="float32", fillvalue=0) as DM:
src_array = DM.sources[0][:]
entity_arr = np.array(self.entities_df)
bvol_dim = self.entity_mask_bvol_size.value()
entity_arr[:, 0] -= bvol_dim[0]
entity_arr[:, 1] -= bvol_dim[1]
entity_arr[:, 2] -= bvol_dim[2]
from survos2.entity.entities import make_entity_mask
gold_mask = make_entity_mask(
src_array, entity_arr, flipxy=True, bvol_dim=bvol_dim
)[0]
# create new raw feature
params = dict(feature_type="raw", workspace=True)
result = Launcher.g.run("features", "create", **params)
if result:
fid = result["id"]
ftype = result["kind"]
fname = result["name"]
logger.debug(f"Created new object in workspace {fid}, {ftype}, {fname}")
dst = DataModel.g.dataset_uri(fid, group="features")
with DatasetManager(dst, out=dst, dtype="float32", fillvalue=0) as DM:
DM.out[:] = gold_mask
cfg.ppw.clientEvent.emit(
{"source": "objects_plugin", "data": "refresh", "value": None}
)
def make_patches(self):
src = DataModel.g.dataset_uri(self.feature_source.value(), group="features")
with DatasetManager(src, out=None, dtype="float32", fillvalue=0) as DM:
src_array = DM.sources[0][:]
objects_scale = 1.0
entity_meta = {
"0": {
"name": "class1",
"size": np.array((15, 15, 15)) * objects_scale,
"core_radius": np.array((7, 7, 7)) * objects_scale,
},
}
entity_arr = np.array(self.entities_df)
combined_clustered_pts, classwise_entities = organize_entities(
src_array, entity_arr, entity_meta, plot_all=False
)
wparams = {}
wparams["entities_offset"] = (0, 0, 0)
wparams["entity_meta"] = entity_meta
wparams["workflow_name"] = "Make_Patches"
wparams["proj"] = DataModel.g.current_workspace
wf = PatchWorkflow(
[src_array], combined_clustered_pts, classwise_entities, src_array, wparams, combined_clustered_pts
)
src = DataModel.g.dataset_uri(self.annotations_source.value().rsplit("/", 1)[-1], group="annotations")
with DatasetManager(src, out=None, dtype="uint16", fillvalue=0) as DM:
src_dataset = DM.sources[0]
anno_level = src_dataset[:] & 15
logger.debug(f"Obtained annotation level with labels {np.unique(anno_level)}")
logger.debug(f"Making patches in path {src_dataset._path}")
train_v_density = make_patches(wf, entity_arr, src_dataset._path,
proposal_vol=(anno_level > 0)* 1.0,
padding=self.entity_mask_bvol_size.value(), num_augs=0, max_vols=-1)
self.patches = train_v_density
cfg.ppw.clientEvent.emit(
{"source": "panel_gui", "data": "view_patches", "patches_fullname": train_v_density}
)
| 37.562633
| 111
| 0.603154
| 1,938
| 17,692
| 5.312694
| 0.155831
| 0.024476
| 0.017677
| 0.013986
| 0.395105
| 0.299242
| 0.246989
| 0.225622
| 0.18075
| 0.174145
| 0
| 0.010387
| 0.287135
| 17,692
| 470
| 112
| 37.642553
| 0.805978
| 0.009552
| 0
| 0.239583
| 0
| 0
| 0.091179
| 0.007422
| 0
| 0
| 0
| 0
| 0
| 1
| 0.052083
| false
| 0
| 0.049479
| 0
| 0.127604
| 0.018229
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6e28319339ecb10a654afec47c04531f1e4fc2e5
| 5,459
|
py
|
Python
|
tests/benchmark/preprocess_img/preproc.py
|
mpascucci/AST-image-processing
|
54111e874237f0c146760d514eea96131177878a
|
[
"ECL-2.0",
"Apache-2.0"
] | 6
|
2020-11-24T15:55:35.000Z
|
2021-12-31T11:52:56.000Z
|
tests/benchmark/preprocess_img/preproc.py
|
mpascucci/AST-image-processing
|
54111e874237f0c146760d514eea96131177878a
|
[
"ECL-2.0",
"Apache-2.0"
] | 1
|
2020-11-24T15:46:15.000Z
|
2020-11-24T15:46:15.000Z
|
tests/benchmark/preprocess_img/preproc.py
|
mpascucci/AST-image-processing
|
54111e874237f0c146760d514eea96131177878a
|
[
"ECL-2.0",
"Apache-2.0"
] | 3
|
2021-02-04T10:08:43.000Z
|
2022-02-21T02:00:47.000Z
|
from tqdm import tqdm
import os
import glob
import pickle
import numpy as np
from imageio import imread, imwrite
import astimp
from multiprocessing import Pool, cpu_count
from functools import partial
class ErrorInPreproc(Exception):
pass
class Dataset():
"""Datasets consisting of several files in a given input_folder."""
def __init__(self, base_path, glob_patterns=('*.jpg', '*.JPG', '*.png', "*.PNG")):
"""base_path : path to the folder where the files are stored
glob_patterns : a list of patterns for selecting files (e.g. ['*.jpg'])"""
assert os.path.exists(
base_path), "input folder '{}' not found".format(base_path)
self.base_path = base_path
self.paths = []
for pattern in glob_patterns:
self.paths += glob.glob(os.path.join(base_path, pattern))
self.names = [os.path.basename(path).split('.')[0]
for path in self.paths]
class PreprocResults():
"""Access to preprocessed pickled AST images"""
def __init__(self, pickles_folder):
if not os.path.exists(pickles_folder):
raise FileNotFoundError("{} does not exit".format(pickles_folder))
self.pf = pickles_folder
self.ds = Dataset(self.pf, glob_patterns=("*.pickle",))
self.names = self.ds.names
errorlog_path = os.path.join(pickles_folder, "error_log.txt")
if os.path.exists(errorlog_path):
with open(errorlog_path, 'r') as f:
lines = f.readlines()
self.errors = {line.split(',')[0]: line.split(',')[
1] for line in lines}
else:
self.errors = []
def get_by_name(self, name):
"""Load a pickle by name.
Pickles have the same name than images
example:
234_SLR_ESBL.jpg <-> 234_SLR_ESBL.jpg.pickle"""
if name in self.errors and self.errors[name].split(" ") != 'INFO':
raise ErrorInPreproc(self.errors[name].strip())
path = os.path.join(self.pf, name+'.pickle')
if not os.path.exists(path):
raise FileNotFoundError("Pickle {} not found.".format(path))
with open(path, 'rb') as f:
p = pickle.load(f)
return p
def __getitem__(self, name):
return self.get_by_name(name)
def get_all(self):
"""Load all pickles in input folder"""
output = []
for path in tqdm(self.ds.paths, desc="Loading pickles"):
with open(path, 'rb') as f:
p = pickle.load(f)
output.append(p)
return output
def preprocess_one_image(path):
img = np.array(imread(path)) # load image
ast = astimp.AST(img)
crop = ast.crop
circles = ast.circles
pellets = ast.pellets
labels = ast.labels_text
# create preprocessing object
# NOTE the preprocessing object is not created it no pellets where found.
preproc = ast.preproc if len(circles) != 0 else None
pobj = {"ast":ast,
"preproc": preproc,
"circles": circles,
"pellets": pellets,
"labels": labels,
"crop": crop,
"fname": os.path.basename(path),
"inhibitions": ast.inhibitions}
return pobj
def pickle_one_preproc(idx, output_path, image_paths, error_list, skip_existing=False, mute=True):
if mute:
log_function = lambda x : x
else:
log_function = tqdm.write
path = image_paths[idx]
try:
# create output path
fname = os.path.basename(path) # file name from path
ofpath = os.path.join(
output_path, f"{fname}.pickle") # output file path
if skip_existing:
# skip if output file exists already
if os.path.exists(ofpath):
return None
# WARNING for an unknown reason the pickle call must be inside this function
pobj = preprocess_one_image(path)
with open(ofpath, 'wb') as f:
pickle.dump(pobj, f)
if len(pobj['circles']) == 0:
# if no pellet found
error_list[idx] = "INFO : {}, No pellets found".format(fname)
log_function("No pellet found in {}".format(fname))
except Exception as e:
ex_text = ', '.join(map(lambda x: str(x), e.args))
error_list[idx] = "{}, {}".format(fname, ex_text)
log_function("Failed images: {} - {}".format(len(error_list), ex_text))
return None
def preprocess(img_paths, output_path, skip_existing=False, parallel=True):
"""preprocess images and pickle the preproc object.
img_paths : a list of paths of the image files."""
if not os.path.exists(output_path):
os.mkdir(output_path)
errors = [""]*len(img_paths)
if parallel:
jobs = cpu_count()
print("Running in parallel on {} processes".format(jobs))
f = partial(pickle_one_preproc,
image_paths=img_paths,
output_path=output_path,
error_list=errors,
skip_existing=skip_existing
)
with Pool(jobs) as p:
list(tqdm(p.imap(f,range(len(img_paths))), total=len(img_paths)))
errors = [e for e in errors if e != ""]
else:
for idx in tqdm(range(len(img_paths)), desc="Preprocessing"):
pickle_one_preproc(idx, output_path, img_paths, errors, skip_existing, mute=False)
return errors
| 31.923977
| 98
| 0.596263
| 703
| 5,459
| 4.499289
| 0.257468
| 0.02466
| 0.022763
| 0.017072
| 0.067341
| 0.036674
| 0.018337
| 0.018337
| 0.018337
| 0.018337
| 0
| 0.00283
| 0.287965
| 5,459
| 170
| 99
| 32.111765
| 0.810908
| 0.14325
| 0
| 0.078261
| 0
| 0
| 0.073446
| 0
| 0
| 0
| 0
| 0
| 0.008696
| 1
| 0.069565
| false
| 0.008696
| 0.078261
| 0.008696
| 0.234783
| 0.008696
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6e28b70b57732d2994e0b212e99122e11d61d96f
| 1,024
|
py
|
Python
|
src/main.py
|
Evelkos/PAM-and-CLARA
|
26fbb8d2d4a7924ce1d0d504c4b23bac38238c69
|
[
"MIT"
] | null | null | null |
src/main.py
|
Evelkos/PAM-and-CLARA
|
26fbb8d2d4a7924ce1d0d504c4b23bac38238c69
|
[
"MIT"
] | null | null | null |
src/main.py
|
Evelkos/PAM-and-CLARA
|
26fbb8d2d4a7924ce1d0d504c4b23bac38238c69
|
[
"MIT"
] | null | null | null |
from clustering_algorithms import CLARA, PAM, get_initial_points
from data_loaders import load_data
from timer import Timer
from visualizers import plot_data
# FILENAME = "datasets/artificial/sizes3.arff"
FILENAME = "datasets/artificial/zelnik4.arff"
# FILENAME = "datasets/artificial/xclara.arff"
# FILENAME = "datasets/real-world/glass.arff"
def run_clara(data, points):
clara = CLARA(points, len(data["classes"]), labels=data["classes"])
clara.run()
return clara.get_result_df()
def run_pam(data, points):
pam = PAM(points, len(data["classes"]), labels=data["classes"])
pam.run()
return pam.get_result_df()
if __name__ == "__main__":
data = load_data(FILENAME)
# plot_data(data["df"], data["classes"], data["class_column"])
points = get_initial_points(data["df"], data["coordinates_columns"])
# result = run_clara(data, points)
result = run_pam(data, points)
plot_data(
result, data["classes"], "cluster", attributes_names=data["coordinates_columns"]
)
| 30.117647
| 88
| 0.709961
| 133
| 1,024
| 5.233083
| 0.323308
| 0.094828
| 0.112069
| 0.086207
| 0.106322
| 0.106322
| 0.106322
| 0
| 0
| 0
| 0
| 0.002296
| 0.149414
| 1,024
| 33
| 89
| 31.030303
| 0.796785
| 0.22168
| 0
| 0
| 0
| 0
| 0.154235
| 0.040455
| 0
| 0
| 0
| 0
| 0
| 1
| 0.1
| false
| 0
| 0.2
| 0
| 0.4
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6e2a9766e0a79f77304a55be682d4bc167bde209
| 4,459
|
py
|
Python
|
src/utils.py
|
zimonitrome/AbstractionNet
|
a037b696ccac015936d60026cb1ac4ebafc68371
|
[
"MIT"
] | null | null | null |
src/utils.py
|
zimonitrome/AbstractionNet
|
a037b696ccac015936d60026cb1ac4ebafc68371
|
[
"MIT"
] | null | null | null |
src/utils.py
|
zimonitrome/AbstractionNet
|
a037b696ccac015936d60026cb1ac4ebafc68371
|
[
"MIT"
] | null | null | null |
import torch
from einops import rearrange
import svgwrite
###########################################
# Normalization / Standardization functions
###########################################
def normalize_functional(tensor: torch.Tensor, mean: list, std: list):
"""
Standardizes tensor in the channel dimension (dim -3) using mean and std.
[... C H W] -> [... C H W]
"""
mean = torch.tensor(mean).view(-1, 1, 1).to(tensor.device)
std = torch.tensor(std).view(-1, 1, 1).to(tensor.device)
return (tensor-mean)/std
def unnormalize_functional(tensor: torch.Tensor, mean: list, std: list):
"""
Un-standardizes tensor in the channel dimension (dim -3) using mean and std.
Also clips the tensor to be in the range [0, 1].
[... C H W] -> [... C H W]
"""
mean = torch.tensor(mean).view(-1, 1, 1).to(tensor.device)
std = torch.tensor(std).view(-1, 1, 1).to(tensor.device)
return ((tensor*std)+mean).clamp(0, 1)
def unnormalize_to(x, x_min, x_max):
"""
Linear normalization of x to [x_min, x_max].
In other words maps x.min() -> x_min and x.max() -> x_max.
"""
return x * (x_max - x_min) + x_min
############################
# Image convertion functions
############################
def rgba_to_rgb(rgba: torch.Tensor):
"""
Converts tensor from 3 channels into 4.
Multiplies first 3 channels with the last channel.
[... 4 H W] -> [... 3 H W]
"""
return rgba[..., :-1, :, :] * rgba[..., -1:, :, :]
def rgb_to_rgba(rgb: torch.Tensor, fill: float = 1.0):
"""
Converts tensor from 4 channels into 3.
Alpha layer will be filled with 1 by default, but can also be specified.
[... 3 H W] -> [... 4 H W]
"""
alpha_channel = torch.full_like(rgb[..., :1, :, :], fill_value=fill)
return torch.concat([rgb, alpha_channel], dim=-3)
###########################################
# Alpha compositing/decompositing functions
###########################################
def alpha_composite(base, added, eps=1e-8):
"""
Composite two tensors, i.e., layers `added` on top of `base`,
where the last channel is assumed to be an alpha channel.
[... C H W], [... C H W] -> [... C H W]
"""
# Separate color and alpha
alpha_b = base[..., -1:, :, :]
alpha_a = added[..., -1:, :, :]
color_b = base[..., :-1, :, :]
color_a = added[..., :-1, :, :]
# https://en.wikipedia.org/wiki/Alpha_compositing#Alpha_blending
alpha_0 = (1 - alpha_a) * alpha_b + alpha_a
color_0 = ((1-alpha_a) * alpha_b*color_b + alpha_a*color_a) / (alpha_0 + eps)
# Re-combine new color and alpha
return torch.concat([color_0, alpha_0], dim=-3)
def alpha_composite_multiple(images_tensor):
"""
Composite tensor of N images into a single image.
Assumes last channel is an alpha channel.
[... N C H W] -> [... C H W]
"""
image_iterator = rearrange(images_tensor, "... N C H W -> N ... C H W")
# Get first image
compositioned_image = image_iterator[0]
# Add the rest of the images
for image in image_iterator[1:]:
# TODO: Possibly need to add .copy() to prevent assignment error in autograd.
compositioned_image = alpha_composite(compositioned_image, image)
return compositioned_image
def get_visible_mask(shapes):
"""
Inputs a set of rendered images where C > 1 and the last channel is an alpha channel.
Assuming that images were to be compositioned first to last (N=0, 1, 2...),
returns a mask for each image that show what pixels of that image is visible in the final composition.
[... N C H W] -> [... N H W]
"""
shape_iterator = rearrange(shapes, "... N C H W -> N ... C H W").flip(0)
accumulated_alpha = torch.zeros_like(shape_iterator[0,..., 0, :, :]) # empty like first image, single channel
shape_maks = torch.zeros_like(shape_iterator[..., 0, :, :]) # empty image for each shape layer
for i, shape in enumerate(shape_iterator):
# a over b alpha compositioning
# alpha_0 = (1 - alpha_a) * alpha_b + alpha_a
# get b
# alpha_b = (alpha_0 - alpha_a) / (1 - alpha_a)
shape_alpha = shape[..., -1, :, :]
alpha_visible = shape_alpha - accumulated_alpha * shape_alpha
shape_maks[i] = alpha_visible
accumulated_alpha = (1 - shape_alpha) * accumulated_alpha + shape_alpha
return rearrange(shape_maks.flip(0), "N ... H W -> ... N H W").unsqueeze(-3)
| 36.54918
| 113
| 0.589146
| 637
| 4,459
| 3.99529
| 0.241758
| 0.016503
| 0.016503
| 0.00943
| 0.267584
| 0.262868
| 0.179961
| 0.179961
| 0.140668
| 0.121022
| 0
| 0.019009
| 0.22135
| 4,459
| 122
| 114
| 36.54918
| 0.713998
| 0.389549
| 0
| 0.095238
| 0
| 0
| 0.032188
| 0
| 0
| 0
| 0
| 0.008197
| 0
| 1
| 0.190476
| false
| 0
| 0.071429
| 0
| 0.452381
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6e2c7487821c1b466bfeb152a868353bd01ba3f7
| 3,742
|
py
|
Python
|
CellMQ.py
|
edjuaro/cell-migration-quantification
|
b6479cc8525a1ac8bdaf0abfc66dec57de0be21e
|
[
"MIT"
] | null | null | null |
CellMQ.py
|
edjuaro/cell-migration-quantification
|
b6479cc8525a1ac8bdaf0abfc66dec57de0be21e
|
[
"MIT"
] | null | null | null |
CellMQ.py
|
edjuaro/cell-migration-quantification
|
b6479cc8525a1ac8bdaf0abfc66dec57de0be21e
|
[
"MIT"
] | null | null | null |
import cv2
import numpy as np
from skimage import draw
from skimage import io
# Read image
im_in = cv2.imread("analyses/MDA231_stopper_1_c3.tif", cv2.IMREAD_GRAYSCALE);
# Threshold.
# Set values equal to or above 220 to 0.
# Set values below 220 to 255.
th, im_th = cv2.threshold(im_in, 20, 255, cv2.THRESH_BINARY_INV);
# Copy the thresholded image.
im_floodfill = im_th.copy()
# Mask used to flood filling.
# Notice the size needs to be 2 pixels than the image.
h, w = im_th.shape[:2]
mask = np.zeros((h+2, w+2), np.uint8)
# Floodfill from point (0, 0)
cv2.floodFill(im_floodfill, mask, (0,0), 255);
# Invert floodfilled image
im_floodfill_inv = cv2.bitwise_not(im_floodfill)
# Combine the two images to get the foreground.
im_out = im_th | im_floodfill_inv
io.imsave(fname='temp_output.png', arr=im_out)
# im_out_inv = cv2.bitwise_not(im_out)
# dilate the mask:
k_size = 2
k_half = k_size/2
kernel = np.ones((k_size,k_size),np.uint8)
coords = draw.circle(k_half, k_half, k_half, shape=im_th.shape)
kernel[coords] = 1
erosion = cv2.erode(im_out,kernel,iterations = 1)
dilation = cv2.dilate(cv2.bitwise_not(erosion),kernel,iterations = 1)
dilation = cv2.bitwise_not(dilation)
# io.imshow(dilation)
io.imsave(fname='mask.png', arr=dilation)
# Display images.
# io.imsave(fname='mask.png', arr=im_out)
# # mostly from http://nickc1.github.io/python,/matlab/2016/05/17/Standard-Deviation-(Filters)-in-Matlab-and-Python.html
# import cv2
# from skimage import draw
# from skimage import io
# filename = 'analyses/MDA231_stopper_1_c3.tif'
# plate = io.imread(filename,as_grey=True)
# image = plate
# #io.imshow(image)
# # io.imsave(fname='temp_output.png', arr=image)
# import numpy as np
# # img = cv2.imread('....') # Read in the image
# sobelx = cv2.Sobel(image,cv2.CV_64F,1,0) # Find x and y gradients
# sobely = cv2.Sobel(image,cv2.CV_64F,0,1)
# # Find magnitude and angle
# I2 = np.sqrt(sobelx**2.0 + sobely**2.0)
# # angle = np.arctan2(sobely, sobelx) * (180 / np.pi)
# # io.imshow(I2)
# # io.imsave(fname='temp_output.png', arr=I2)
# from scipy.ndimage.filters import uniform_filter
# import numpy as np
# def window_stdev(X, window_size):
# c1 = uniform_filter(X, window_size, mode='reflect')
# c2 = uniform_filter(X*X, window_size, mode='reflect')
# return np.sqrt(c2 - c1*c1)
# # x = np.arange(16).reshape(4,4).astype('float')
# kernel_size = 3
# I1 = window_stdev(I2,kernel_size)*np.sqrt(kernel_size**2/(kernel_size**2 - 1))
# # io.imshow(I1)
# # io.imsave(fname='temp_output.png', arr=I1)
# from scipy.signal import medfilt2d
# I1 = medfilt2d(I1, kernel_size=3)
# # io.imshow(I1)
# # io.imsave(fname='temp_output.png', arr=I1)
# import numpy as np
# from skimage.morphology import reconstruction
# from skimage.exposure import rescale_intensity
# # image = rescale_intensity(I1, in_range=(50, 200))
# image = I1
# seed = np.copy(image)
# seed[1:-1, 1:-1] = image.max()
# mask = image
# filled = reconstruction(seed, mask, method='erosion')
# io.imsave(fname='temp_output.png', arr=filled)
# # kernel = np.zeros((80,80),np.uint8)
# # coords = draw.circle(40, 40, 40, shape=image.shape)
# # kernel[coords] = 1
# # erosion = cv2.erode(I1,kernel,iterations = 1)
# # # io.imshow(erosion)
# # # # kernel = np.ones((40,40),np.uint8)
# # # # erosion = cv2.erode(I1,kernel,iterations = 1)
# # # # io.imshow(erosion)
# # # io.imsave(fname='temp_output.png', arr=erosion)
# # from skimage.morphology import reconstruction
# # fill = reconstruction(I1, erosion, method='erosion')
# # # io.imshow(fill)
# # # io.imsave(fname='temp_output.png', arr=fill)
# # dilation = cv2.dilate(fill,kernel,iterations = 1)
# # # io.imshow(dilation)
# # io.imsave(fname='temp_output.png', arr=dilation)
| 27.925373
| 120
| 0.69829
| 601
| 3,742
| 4.231281
| 0.264559
| 0.034605
| 0.056233
| 0.060165
| 0.395596
| 0.289422
| 0.204876
| 0.127802
| 0.070783
| 0.070783
| 0
| 0.046671
| 0.141101
| 3,742
| 134
| 121
| 27.925373
| 0.744555
| 0.70791
| 0
| 0
| 0
| 0
| 0.056352
| 0.032787
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.181818
| 0
| 0.181818
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6e2d9335521cea1ce24ba509b262882641d75542
| 1,344
|
py
|
Python
|
test/unit/messages/bloxroute/test_txs_message.py
|
dolphinridercrypto/bxcommon
|
8f70557c1dbff785a5dd3fcdf91176066e085c3a
|
[
"MIT"
] | 12
|
2019-11-06T17:39:10.000Z
|
2022-03-01T11:26:19.000Z
|
test/unit/messages/bloxroute/test_txs_message.py
|
dolphinridercrypto/bxcommon
|
8f70557c1dbff785a5dd3fcdf91176066e085c3a
|
[
"MIT"
] | 8
|
2019-11-06T21:31:11.000Z
|
2021-06-02T00:46:50.000Z
|
test/unit/messages/bloxroute/test_txs_message.py
|
dolphinridercrypto/bxcommon
|
8f70557c1dbff785a5dd3fcdf91176066e085c3a
|
[
"MIT"
] | 5
|
2019-11-14T18:08:11.000Z
|
2022-02-08T09:36:22.000Z
|
from bxcommon.test_utils.abstract_test_case import AbstractTestCase
from bxcommon.messages.bloxroute.txs_message import TxsMessage
from bxcommon.models.transaction_info import TransactionInfo
from bxcommon.test_utils import helpers
from bxcommon.utils.object_hash import Sha256Hash
class TxsMessageTests(AbstractTestCase):
def test_txs_with_short_ids_message(self):
txs_info = [
TransactionInfo(Sha256Hash(helpers.generate_bytearray(32)), helpers.generate_bytearray(200), 111),
TransactionInfo(Sha256Hash(helpers.generate_bytearray(32)), helpers.generate_bytearray(300), 222),
TransactionInfo(Sha256Hash(helpers.generate_bytearray(32)), helpers.generate_bytearray(400), 333)
]
msg = TxsMessage(txs=txs_info)
msg_bytes = msg.rawbytes()
self.assertTrue(msg_bytes)
parsed_msg = TxsMessage(buf=msg_bytes)
self.assertTrue(parsed_msg)
parsed_txs_info = parsed_msg.get_txs()
self.assertEqual(len(parsed_txs_info), len(txs_info))
for index in range(len(txs_info)):
self.assertEqual(parsed_txs_info[index].short_id, txs_info[index].short_id)
self.assertEqual(parsed_txs_info[index].contents, txs_info[index].contents)
self.assertEqual(parsed_txs_info[index].hash, txs_info[index].hash)
| 38.4
| 110
| 0.738095
| 164
| 1,344
| 5.780488
| 0.317073
| 0.088608
| 0.151899
| 0.126582
| 0.369198
| 0.341772
| 0.237342
| 0.237342
| 0.237342
| 0
| 0
| 0.032316
| 0.171131
| 1,344
| 34
| 111
| 39.529412
| 0.818671
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.26087
| 1
| 0.043478
| false
| 0
| 0.217391
| 0
| 0.304348
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6e2fe086028f0377c018ceee95df734b7ae1f811
| 986
|
py
|
Python
|
BLAST/make_fasta.py
|
cdiaza/bootcamp
|
2fda661a44930f70ac8ef15218cc99d099fc4019
|
[
"MIT"
] | 1
|
2021-01-16T20:39:41.000Z
|
2021-01-16T20:39:41.000Z
|
BLAST/make_fasta.py
|
cdiaza/bootcamp
|
2fda661a44930f70ac8ef15218cc99d099fc4019
|
[
"MIT"
] | null | null | null |
BLAST/make_fasta.py
|
cdiaza/bootcamp
|
2fda661a44930f70ac8ef15218cc99d099fc4019
|
[
"MIT"
] | 1
|
2021-01-16T20:31:17.000Z
|
2021-01-16T20:31:17.000Z
|
import random
def format_fasta(title, sequence):
"""
This formats a fasta sequence
Input:
title - String - Title of the sequence
sequence - String - Actual sequence
Output:
String - Fully formatted fasta sequence
"""
fasta_width = 70 # Number of characters in one line
n_lines = 1 + len(sequence) // fasta_width # Number of lines
lines = [ sequence[i*fasta_width: (i+1)*fasta_width] for i in range(n_lines)]
lines = "\n".join(lines)
formatted = f"> {title}\n{lines}\n\n"
return formatted
bases = "actg" # Bases for our randon protein
# Writing random sequences in a file
with open("random_sequences.fa", "w") as f:
for length in range(1, 25): # Sequences of different lengths
for run in range(10): # Trying several times
title = f"length_{length} run_{run}"
sequence = "".join(random.choices(bases, k=length))
f.write(format_fasta(title, sequence))
| 29.878788
| 81
| 0.631846
| 134
| 986
| 4.567164
| 0.447761
| 0.065359
| 0.052288
| 0.078431
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.01238
| 0.262677
| 986
| 32
| 82
| 30.8125
| 0.829436
| 0.337728
| 0
| 0
| 0
| 0
| 0.119281
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.066667
| false
| 0
| 0.066667
| 0
| 0.2
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6e330bec332cbcb5e47190df3547281fe5168a28
| 903
|
py
|
Python
|
tests/test_echo_server_contextvar.py
|
rednafi/think-async
|
3642afc0d8661b10affd953ce3b239f3e6b3009b
|
[
"MIT"
] | 87
|
2021-04-14T09:51:30.000Z
|
2022-03-24T10:38:41.000Z
|
tests/test_echo_server_contextvar.py
|
rednafi/think-async
|
3642afc0d8661b10affd953ce3b239f3e6b3009b
|
[
"MIT"
] | 3
|
2021-06-27T18:06:11.000Z
|
2022-03-24T19:56:38.000Z
|
tests/test_echo_server_contextvar.py
|
rednafi/think-async
|
3642afc0d8661b10affd953ce3b239f3e6b3009b
|
[
"MIT"
] | 4
|
2021-05-12T01:36:14.000Z
|
2022-01-28T04:06:12.000Z
|
from unittest.mock import Mock, patch
import pytest
import patterns.echo_server_contextvar as main
@patch.object(main, "client_addr_var", Mock())
def test_render_goodbye(capsys):
# Call 'render_goodbye'
goodbye_string = main.render_goodbye()
print(goodbye_string)
# Assert.
out, err = capsys.readouterr()
assert err == ""
assert "Good bye, client @" in out
@pytest.mark.asyncio
@patch("patterns.echo_server_contextvar.asyncio.start_server", autospec=True)
@patch("patterns.echo_server_contextvar.asyncio.sleep", autospec=True)
async def test_server(mock_asyncio_sleep, mock_asyncio_start_server):
stop_after = 5
# Call 'server()'.
await main.server(stop_after=stop_after)
# Assert.
assert mock_asyncio_sleep.call_count == stop_after
args = main.handle_request, "127.0.0.1", 8081
mock_asyncio_start_server.assert_called_once_with(*args)
| 25.8
| 77
| 0.743079
| 123
| 903
| 5.170732
| 0.430894
| 0.069182
| 0.084906
| 0.132075
| 0.125786
| 0.125786
| 0
| 0
| 0
| 0
| 0
| 0.014342
| 0.150609
| 903
| 34
| 78
| 26.558824
| 0.814863
| 0.059801
| 0
| 0
| 0
| 0
| 0.164692
| 0.114929
| 0
| 0
| 0
| 0
| 0.210526
| 1
| 0.052632
| false
| 0
| 0.157895
| 0
| 0.210526
| 0.052632
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6e3355f7d36e6d39cee7c23d5acd90666f7629a8
| 693
|
py
|
Python
|
test.py
|
riquedev/SSLProxies24Feed
|
93ab23a6794ae7f40002eb464a9c443afe44db86
|
[
"MIT"
] | null | null | null |
test.py
|
riquedev/SSLProxies24Feed
|
93ab23a6794ae7f40002eb464a9c443afe44db86
|
[
"MIT"
] | 1
|
2017-09-15T13:27:09.000Z
|
2017-09-15T14:43:28.000Z
|
test.py
|
riquedev/SSLProxies24Feed
|
93ab23a6794ae7f40002eb464a9c443afe44db86
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Autor: rique_dev (rique_dev@hotmail.com)
from SSLProxies24.Feed import Feed
from SSLProxies24.Check import CheckProxy
import time
import gc
# Recupera a listagem
prx = Feed().PROXY_LIST
# Inicia classe
chk = CheckProxy()
# Começa validação
chk.validatelist(prx)
# Ativa garbage
gc.enable()
time.sleep(30)
# Contagem
print('Sucesso: '+str(chk.getsucesscount()))
print('Falhas: '+str(chk.getfailcount()))
print('Total de Proxys: '+str(chk.getproxycount()))
print('Restam: '+str(chk.getproxycount()-(chk.getsucesscount()+chk.getfailcount())))
# Lista de Proxys
print(chk.getproxylist())
del prx
del chk
print('Classes eliminadas.')
exit(0)
| 19.25
| 84
| 0.730159
| 93
| 693
| 5.408602
| 0.602151
| 0.047714
| 0.075547
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.012987
| 0.111111
| 693
| 36
| 85
| 19.25
| 0.803571
| 0.249639
| 0
| 0
| 0
| 0
| 0.119141
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.222222
| 0
| 0.222222
| 0.333333
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6e33da3d320ddccf5c2863568bc4b5fb0505e125
| 577
|
py
|
Python
|
euler.py
|
user3719431/tna_lab1
|
183c34d927c39f502fea7d6a81f2945104d7b75b
|
[
"MIT"
] | null | null | null |
euler.py
|
user3719431/tna_lab1
|
183c34d927c39f502fea7d6a81f2945104d7b75b
|
[
"MIT"
] | null | null | null |
euler.py
|
user3719431/tna_lab1
|
183c34d927c39f502fea7d6a81f2945104d7b75b
|
[
"MIT"
] | null | null | null |
import math as m
def yakobi(a, n, k):
if a < 0:
k *= pow(-1, (n - 1) // 2)
yakobi(-a, n, k)
if a % 2 == 0:
k *= (-1) ** ((pow(n, 2) - 1) / 8)
yakobi(a / 2, n, k)
if a == 1:
return k
if a < n:
k *= pow(-1, ((n - 1)(a - 1)) / 4)
yakobi(n % a, a, k)
def euler_test(p, x):
if pow(x, (p - 1) / 2) % p == yakobi(x, p, k = 1):
return bool(True)
elif pow(x, (p - 1) / 2) % p - p == yakobi(x, p, k = 1):
return bool(True)
else:
return bool(False)
| 24.041667
| 60
| 0.363951
| 99
| 577
| 2.111111
| 0.262626
| 0.038278
| 0.076555
| 0.07177
| 0.492823
| 0.425837
| 0.239234
| 0.239234
| 0.239234
| 0
| 0
| 0.067278
| 0.433276
| 577
| 24
| 61
| 24.041667
| 0.571865
| 0
| 0
| 0.1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.1
| false
| 0
| 0.05
| 0
| 0.35
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6e34180a8de5ed1a630ffd86a9a830130bbd1076
| 3,787
|
py
|
Python
|
src/b2d/hud_b2d.py
|
VgTajdd/neuroevolver
|
248c96b25ad936e15cfffc7a4223926db83ad540
|
[
"MIT"
] | null | null | null |
src/b2d/hud_b2d.py
|
VgTajdd/neuroevolver
|
248c96b25ad936e15cfffc7a4223926db83ad540
|
[
"MIT"
] | null | null | null |
src/b2d/hud_b2d.py
|
VgTajdd/neuroevolver
|
248c96b25ad936e15cfffc7a4223926db83ad540
|
[
"MIT"
] | null | null | null |
## ========================================================================= ##
## Copyright (c) 2019 Agustin Durand Diaz. ##
## This code is licensed under the MIT license. ##
## hud_b2d.py ##
## ========================================================================= ##
from core.hud_base import HudBase
from enums import ScreenType, SimulationType
from core.utils import getPathWithoutExtension, existsFile, getImageSize
import settings
class HudB2D(HudBase):
def __init__(self, width, height):
HudBase.__init__(self, width, height)
def init(self):
self.showFPS()
self.addLabel((80, 30), (150, 30), 'Box2D')
self.addButton((725, 40), (100, 50), 'Back', self.gotoMetamap)
def gotoMetamap(self):
self.m_manager.gotoScreen(ScreenType.META_MAP)
class HudB2DNEATDIP(HudB2D):
def __init__(self, width, height, params):
self.params = params
HudB2D.__init__(self, width, height)
def init(self):
self.showFPS()
self.addLabel((75, 15), (150, 30), 'NEAT DIP')
if 'isTraining' in self.params and self.params['isTraining']:
self.addLabel((75, 45), (150, 30),
str(self.params['currentStep']) + "/" + str(settings.NEAT_DIP_EVOLVING_STEPS))
else:
imgPath = self.params['genomePath']
imgPath = getPathWithoutExtension(imgPath) + '.png'
if existsFile(imgPath):
size = getImageSize(imgPath)
w, h = size
if size[0] > 450:
w = 450
if size[1] > 450:
h = 450
self.addImage(((w/2) + 30, (h/2) + 30), (w, h), imgPath)
self.addButton((770, 15), (60, 30), 'Back', self.gotoMetamap, alpha = 200)
self.addButton((670, 15), (60, 30), 'Reset', self.resetDIP, alpha = 200)
def resetDIP(self):
self.m_manager.gotoScreen(ScreenType.SIMULATION, {'simulationType': SimulationType.NEAT_B2D_DIP})
class HudB2DNEATTIP(HudB2D):
def __init__(self, width, height, params):
self.params = params
HudB2D.__init__(self, width, height)
def init(self):
self.showFPS()
self.addLabel((75, 15), (150, 30), 'NEAT TIP')
if 'isTraining' in self.params and self.params['isTraining']:
self.addLabel((75, 45), (150, 30),
str(self.params['currentStep']) + "/" + str(settings.NEAT_TIP_EVOLVING_STEPS))
else:
imgPath = 'net_neat_tip.png'
if existsFile(imgPath):
size = getImageSize(imgPath)
self.addImage(((size[0]/2) + 30, (size[1]/2) + 30), size, imgPath)
self.addButton((770, 15), (60, 30), 'Back', self.gotoMetamap, alpha = 200)
class HudB2DNEATWalker(HudB2D):
def __init__(self, width, height, params):
self.params = params
HudB2D.__init__(self, width, height)
def init(self):
self.showFPS()
self.addLabel((75, 15), (150, 30), 'NEAT Walker')
if 'isTraining' in self.params and self.params['isTraining']:
self.addLabel((75, 45), (150, 30),
str(self.params['currentStep']) + "/" + str(settings.NEAT_WALKER_EVOLVING_STEPS))
else:
imgPath = 'net_neat_walker.png'
if existsFile(imgPath):
size = getImageSize(imgPath)
self.addImage(((size[0]/2) + 30, (size[1]/2) + 30), size, imgPath)
self.addButton((770, 15), (60, 30), 'Back', self.gotoMetamap, alpha = 200)
| 44.034884
| 108
| 0.520201
| 393
| 3,787
| 4.880407
| 0.24173
| 0.067779
| 0.045881
| 0.079249
| 0.658498
| 0.647028
| 0.577164
| 0.553702
| 0.553702
| 0.553702
| 0
| 0.066743
| 0.311592
| 3,787
| 86
| 109
| 44.034884
| 0.668968
| 0.098495
| 0
| 0.528571
| 0
| 0
| 0.064087
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.142857
| false
| 0
| 0.057143
| 0
| 0.257143
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6e358277ee18f33ce73fddfacb850dc985cb0977
| 1,958
|
py
|
Python
|
grblc/search/gcn/parser/combine.py
|
youngsm/adsgrb
|
a89b56b371888deb67788a9f5a91300b281784a6
|
[
"MIT"
] | null | null | null |
grblc/search/gcn/parser/combine.py
|
youngsm/adsgrb
|
a89b56b371888deb67788a9f5a91300b281784a6
|
[
"MIT"
] | null | null | null |
grblc/search/gcn/parser/combine.py
|
youngsm/adsgrb
|
a89b56b371888deb67788a9f5a91300b281784a6
|
[
"MIT"
] | null | null | null |
def get_final_txt(grb, tables, sentences, output_path):
"""
Combine the data from [grb]_final_sentences.txt and [grb]_final_tables.txt.
If a piece of data in tables and another piece in sentecnes are originially
from the same GCN. Put them in the same GCN in [grb]_final.txt.
"""
# Avoid modifying the data for the later use.
tables = tables.copy()
sentences = sentences.copy()
# Open up the file.
file = open(f"{output_path}{grb}/{grb}_final.txt", 'w')
# Loop through the sentences and for each sentence, check if there is any table
# that are originially from the same GCN.
for sentence in sentences:
# The number of the GCN.
num = sentence['number']
# The final string that we dumps into the text file.
result = "=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=\n\n"
result += f"GCN Number: {sentence['number']}\n\n"
result += f"SENTENCE DATA:\n\n{sentence['sentences']}\n\n"
# The variable to help check how many tables are from the same GCN.
table_with_the_same_number = 0
# Loop through the tables to see if there are any tables in the same GCN.
for idx, table in enumerate(tables):
# If we find any tables in the same GCN.
if table['number'] == num:
if table_with_the_same_number == 0:
result += "TABLE DATA:\n\n"
table_with_the_same_number += 1
result += '\n'.join(table['table']) + '\n\n'
tables.pop(idx)
file.write(result)
# Write the remaining tables to the text file.
for table in tables:
result = "=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=\n\n"
result += f"GCN Number: {table['number']}\n"
result += "TABLE DATA:\n\n" + '\n'.join(table['table']) + '\n\n'
file.write(result)
| 36.943396
| 88
| 0.550051
| 262
| 1,958
| 4.026718
| 0.278626
| 0.018957
| 0.056872
| 0.03981
| 0.267299
| 0.214218
| 0.045498
| 0
| 0
| 0
| 0
| 0.002135
| 0.282431
| 1,958
| 52
| 89
| 37.653846
| 0.748754
| 0.353422
| 0
| 0.173913
| 0
| 0
| 0.283049
| 0.188159
| 0.043478
| 0
| 0
| 0
| 0
| 1
| 0.043478
| false
| 0
| 0
| 0
| 0.043478
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6e35f3a7bd64997a4e302cd1d8e7454d8298b774
| 972
|
py
|
Python
|
hardware/headband.py
|
davidji/roundbot
|
2ca34a83c9feb3331f1b818106f06b3182c4970e
|
[
"Apache-2.0"
] | null | null | null |
hardware/headband.py
|
davidji/roundbot
|
2ca34a83c9feb3331f1b818106f06b3182c4970e
|
[
"Apache-2.0"
] | null | null | null |
hardware/headband.py
|
davidji/roundbot
|
2ca34a83c9feb3331f1b818106f06b3182c4970e
|
[
"Apache-2.0"
] | null | null | null |
from solid import *
from solid.utils import *
import util
from util import *
from math import pi
def headband(r1=64.0, r2=85.0, t=3.0, w=12.0):
combe = right(r1-t/2)(linear_extrude(1)(square([1,1], center=True) + left(0.5)(circle(d=1))))
combe_spacing = 3.0 # mm
combe_count = pi*r1/combe_spacing
combes = union()(*[ rotate([0,0, i*180.0/combe_count])(combe) for i in range(-int(combe_count/2), int(combe_count/2))])
def arcshell(r, ends):
start, end = ends
return (arc(rad=r+t/6, start_degrees = start, end_degrees=end) -
arc(rad=r-t/6, start_degrees = start, end_degrees=end))
return (linear_extrude(w)(
offset(r=t/3)(
arcshell(r1, (-90, 90)) +
forward(r2 - r1)(arcshell(r2, (-130, -90))) +
back(r2 - r1)(arcshell(r2, (90, 130))))) +
combes)
def export_scad():
util.save('headband', headband())
if __name__ == '__main__':
export_scad()
| 31.354839
| 123
| 0.588477
| 152
| 972
| 3.618421
| 0.421053
| 0.072727
| 0.047273
| 0.050909
| 0.141818
| 0.141818
| 0.141818
| 0.141818
| 0.141818
| 0.141818
| 0
| 0.07537
| 0.235597
| 972
| 30
| 124
| 32.4
| 0.664872
| 0.002058
| 0
| 0
| 0
| 0
| 0.016529
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.125
| false
| 0
| 0.208333
| 0
| 0.416667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6e364089d40bdc8f90fe2c5aa5081ef11b937f59
| 3,482
|
py
|
Python
|
climlab/dynamics/meridional_advection_diffusion.py
|
nfeldl/climlab
|
2cabb49e2c3f54c1795f24338ef5ee44e49fc7e7
|
[
"BSD-3-Clause",
"MIT"
] | 160
|
2015-02-25T15:56:37.000Z
|
2022-03-14T23:51:23.000Z
|
climlab/dynamics/meridional_advection_diffusion.py
|
nfeldl/climlab
|
2cabb49e2c3f54c1795f24338ef5ee44e49fc7e7
|
[
"BSD-3-Clause",
"MIT"
] | 137
|
2015-12-18T17:39:31.000Z
|
2022-02-04T20:50:53.000Z
|
climlab/dynamics/meridional_advection_diffusion.py
|
nfeldl/climlab
|
2cabb49e2c3f54c1795f24338ef5ee44e49fc7e7
|
[
"BSD-3-Clause",
"MIT"
] | 54
|
2015-04-28T05:57:39.000Z
|
2022-02-17T08:15:11.000Z
|
r"""General solver of the 1D meridional advection-diffusion equation on the sphere:
.. math::
\frac{\partial}{\partial t} \psi(\phi,t) &= -\frac{1}{a \cos\phi} \frac{\partial}{\partial \phi} \left[ \cos\phi ~ F(\phi,t) \right] \\
F &= U(\phi) \psi(\phi) -\frac{K(\phi)}{a} ~ \frac{\partial \psi}{\partial \phi}
for a state variable :math:`\psi(\phi,t)`, arbitrary diffusivity :math:`K(\phi)`
in units of :math:`x^2 ~ t^{-1}`, and advecting velocity :math:`U(\phi)`.
:math:`\phi` is latitude and :math:`a` is the Earth's radius (in meters).
:math:`K` and :math:`U` can be scalars,
or optionally vector *specified at grid cell boundaries*
(so their lengths must be exactly 1 greater than the length of :math:`\phi`).
:math:`K` and :math:`U` can be modified by the user at any time
(e.g., after each timestep, if they depend on other state variables).
A fully implicit timestep is used for computational efficiency. Thus the computed
tendency :math:`\frac{\partial \psi}{\partial t}` will depend on the timestep.
In addition to the tendency over the implicit timestep,
the solver also calculates several diagnostics from the updated state:
- ``diffusive_flux`` given by :math:`-\frac{K(\phi)}{a} ~ \frac{\partial \psi}{\partial \phi}` in units of :math:`[\psi]~[x]`/s
- ``advective_flux`` given by :math:`U(\phi) \psi(\phi)` (same units)
- ``total_flux``, the sum of advective, diffusive and prescribed fluxes
- ``flux_convergence`` (or instantanous scalar tendency) given by the right hand side of the first equation above, in units of :math:`[\psi]`/s
Non-uniform grid spacing is supported.
The state variable :math:`\psi` may be multi-dimensional, but the diffusion
will operate along the latitude dimension only.
"""
from __future__ import division
import numpy as np
from .advection_diffusion import AdvectionDiffusion, Diffusion
from climlab import constants as const
class MeridionalAdvectionDiffusion(AdvectionDiffusion):
"""A parent class for meridional advection-diffusion processes.
"""
def __init__(self,
K=0.,
U=0.,
use_banded_solver=False,
prescribed_flux=0.,
**kwargs):
super(MeridionalAdvectionDiffusion, self).__init__(K=K, U=U,
diffusion_axis='lat', use_banded_solver=use_banded_solver, **kwargs)
# Conversion of delta from degrees (grid units) to physical length units
phi_stag = np.deg2rad(self.lat_bounds)
phi = np.deg2rad(self.lat)
self._Xcenter[...,:] = phi*const.a
self._Xbounds[...,:] = phi_stag*const.a
self._weight_bounds[...,:] = np.cos(phi_stag)
self._weight_center[...,:] = np.cos(phi)
# Now properly compute the weighted advection-diffusion matrix
self.prescribed_flux = prescribed_flux
self.K = K
self.U = U
class MeridionalDiffusion(MeridionalAdvectionDiffusion):
"""A parent class for meridional diffusion-only processes,
with advection set to zero.
Otherwise identical to the parent class.
"""
def __init__(self,
K=0.,
use_banded_solver=False,
prescribed_flux=0.,
**kwargs):
# Just initialize the AdvectionDiffusion class with U=0
super(MeridionalDiffusion, self).__init__(
U=0.,
K=K,
prescribed_flux=prescribed_flux,
use_banded_solver=use_banded_solver, **kwargs)
| 42.463415
| 143
| 0.661401
| 473
| 3,482
| 4.754757
| 0.365751
| 0.024011
| 0.040018
| 0.028012
| 0.169409
| 0.114718
| 0.114718
| 0.066696
| 0.066696
| 0
| 0
| 0.005117
| 0.214245
| 3,482
| 81
| 144
| 42.987654
| 0.816886
| 0.605112
| 0
| 0.352941
| 0
| 0
| 0.002234
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.058824
| false
| 0
| 0.117647
| 0
| 0.235294
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6e397c403213c314186ad9c8dc4d66123671cfea
| 620
|
py
|
Python
|
Day14/main.py
|
dloibl/AOC2021
|
80672a7ee8ebc1a7970c155e4e15e0ed2351e085
|
[
"MIT"
] | null | null | null |
Day14/main.py
|
dloibl/AOC2021
|
80672a7ee8ebc1a7970c155e4e15e0ed2351e085
|
[
"MIT"
] | null | null | null |
Day14/main.py
|
dloibl/AOC2021
|
80672a7ee8ebc1a7970c155e4e15e0ed2351e085
|
[
"MIT"
] | null | null | null |
data = open("input.txt", "r").readlines()
polymer = data[0]
pair_insertion = {}
for line in data[2:]:
[token, replacement] = line.strip().split(" -> ")
pair_insertion[token] = replacement
result = [i for i in polymer.strip()]
for step in range(0, 10):
next = []
for i, si in enumerate(result):
if i < len(result)-1:
next.append(si)
next.append(pair_insertion[result[i]+result[i+1]])
else:
next.append(si)
result = next
count = [result.count(a) for a in set(pair_insertion.values())]
print("The answer of part 1 is", max(count) - min(count))
| 23.846154
| 63
| 0.596774
| 90
| 620
| 4.066667
| 0.477778
| 0.142077
| 0.065574
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.016985
| 0.240323
| 620
| 25
| 64
| 24.8
| 0.760085
| 0
| 0
| 0.111111
| 0
| 0
| 0.059677
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.055556
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6e399f9876b8a0c8affd85f404dc546dcab1961f
| 1,199
|
py
|
Python
|
raster/migrations/0006_auto_20141016_0522.py
|
bpneumann/django-raster
|
74daf9d396f2332a2cd83723b7330e6b10d73b1c
|
[
"BSD-3-Clause"
] | null | null | null |
raster/migrations/0006_auto_20141016_0522.py
|
bpneumann/django-raster
|
74daf9d396f2332a2cd83723b7330e6b10d73b1c
|
[
"BSD-3-Clause"
] | null | null | null |
raster/migrations/0006_auto_20141016_0522.py
|
bpneumann/django-raster
|
74daf9d396f2332a2cd83723b7330e6b10d73b1c
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('raster', '0005_auto_20141014_0955'),
]
operations = [
migrations.AddField(
model_name='rastertile',
name='tilex',
field=models.IntegerField(null=True, db_index=True),
preserve_default=True,
),
migrations.AddField(
model_name='rastertile',
name='tiley',
field=models.IntegerField(null=True, db_index=True),
preserve_default=True,
),
migrations.AddField(
model_name='rastertile',
name='tilez',
field=models.IntegerField(db_index=True, null=True, choices=[(1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6), (7, 7), (8, 8), (9, 9), (10, 10), (11, 11), (12, 12), (13, 13), (14, 14), (15, 15), (16, 16), (17, 17), (18, 18)]),
preserve_default=True,
),
migrations.AlterField(
model_name='rastertile',
name='level',
field=models.IntegerField(null=True, db_index=True),
),
]
| 31.552632
| 236
| 0.539616
| 131
| 1,199
| 4.793893
| 0.419847
| 0.057325
| 0.121019
| 0.146497
| 0.457006
| 0.457006
| 0.39172
| 0.39172
| 0.324841
| 0.324841
| 0
| 0.084827
| 0.301918
| 1,199
| 37
| 237
| 32.405405
| 0.665472
| 0.017515
| 0
| 0.548387
| 0
| 0
| 0.07568
| 0.019558
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.064516
| 0
| 0.16129
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6e3b1af1bee45ddc7a412b33a2fead806c9ec302
| 1,765
|
py
|
Python
|
djangorecipebook/templating.py
|
tkhyn/djangorecipebook
|
2cbb3d46631630e2c7a3c511b504de2088aac115
|
[
"MIT"
] | null | null | null |
djangorecipebook/templating.py
|
tkhyn/djangorecipebook
|
2cbb3d46631630e2c7a3c511b504de2088aac115
|
[
"MIT"
] | null | null | null |
djangorecipebook/templating.py
|
tkhyn/djangorecipebook
|
2cbb3d46631630e2c7a3c511b504de2088aac115
|
[
"MIT"
] | null | null | null |
"""
Carry out template-based replacements in project files
"""
import os
import sys
from string import Template
def replace_name(path, mapping):
"""
Handles replacement strings in the file or directory name
"""
# look for replacement strings in filename
f_split = list(os.path.split(path))
name = f_split[1]
if '${' in name:
new_name = Template(name).substitute(mapping)
new_path = os.path.join(f_split[0], new_name)
os.rename(path, new_path)
else:
new_path = path
return new_path
def replace_ctnt(f, mapping):
"""
Handles replacement strings in the file content
"""
if not os.path.isfile(f):
return
try:
# look for replacement strings in file
t_file = open(f, 'r+')
t = Template(t_file.read())
t_file.seek(0)
t_file.write(t.substitute(mapping))
t_file.truncate()
except Exception as e:
sys.stderr.write("""
ERROR: while running template engine on file %s
""" % f)
raise e
finally:
t_file.close()
def process(path, mapping):
"""
Performs all templating operations on the given path
"""
replace_ctnt(replace_name(path, mapping), mapping)
def process_tree(directory, mapping):
"""
Performs all templating operations on the directory and its children
"""
directory = replace_name(directory, mapping)
for dirpath, dirnames, filenames in os.walk(directory):
for f in filenames:
process(os.path.join(dirpath, f), mapping)
for d in dirnames:
dirnames.remove(d)
dirnames.append(replace_name(os.path.join(dirpath, d), mapping))
| 25.214286
| 77
| 0.607932
| 224
| 1,765
| 4.691964
| 0.370536
| 0.028544
| 0.076118
| 0.041865
| 0.211227
| 0.159848
| 0.159848
| 0
| 0
| 0
| 0
| 0.002408
| 0.294051
| 1,765
| 69
| 78
| 25.57971
| 0.841091
| 0.204533
| 0
| 0
| 0
| 0
| 0.043444
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.102564
| false
| 0
| 0.076923
| 0
| 0.230769
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6e3c23f713b7a54ba361ed5b6913012fed253e5e
| 1,747
|
py
|
Python
|
toHash.py
|
ElTarget/-
|
fcf774386514a7f070be25d643be7bbf1a92af1e
|
[
"MIT"
] | 1
|
2022-02-22T02:39:52.000Z
|
2022-02-22T02:39:52.000Z
|
toHash.py
|
ElTarget/-
|
fcf774386514a7f070be25d643be7bbf1a92af1e
|
[
"MIT"
] | 1
|
2022-03-08T04:46:17.000Z
|
2022-03-08T04:46:17.000Z
|
toHash.py
|
ElTarget/get_malware_bazaar
|
fcf774386514a7f070be25d643be7bbf1a92af1e
|
[
"MIT"
] | null | null | null |
import hashlib
import os
# 生成字符串的MD5值
def str2md5(content=None):
if not content:
return ''
md5gen = hashlib.md5()
md5gen.update(content.encode())
return md5gen.hexdigest()
# 生成字符串的SHA256值
def str2sha256(content=None):
if not content:
return ''
sha256gen = hashlib.sha256()
sha256gen.update(content.encode())
return sha256gen.hexdigest()
# 生成文件的MD5值
def file2md5(filename):
hash_value = ''
if os.path.exists(filename):
try:
md5obj = hashlib.md5()
with open(filename, 'rb') as f:
md5obj.update(f.read())
hash_value = md5obj.hexdigest()
except Exception as e:
print(e)
return hash_value
def file2sha256(filename):
hash_value = ''
if os.path.exists(filename):
try:
sha256obj = hashlib.sha256()
with open(filename, "rb") as f:
sha256obj.update(f.read())
hash_value = sha256obj.hexdigest()
except Exception as e:
print(e)
return hash_value
def file2sha1(filename):
hash_value = ''
if os.path.exists(filename):
try:
sha1obj = hashlib.sha1()
with open(filename, 'rb') as f:
sha1obj.update(f.read())
hash_value = sha1obj.hexdigest()
except Exception as e:
print(e)
return hash_value
def file2sha3(filename):
hash_value = ''
if os.path.exists(filename):
try:
sha3obj = hashlib.sha3_384()
with open(filename, 'rb') as f:
sha3obj.update(f.read())
hash_value = sha3obj.hexdigest()
except Exception as e:
print(e)
return hash_value
| 23.293333
| 46
| 0.567258
| 192
| 1,747
| 5.09375
| 0.25
| 0.110429
| 0.06953
| 0.07771
| 0.604294
| 0.522495
| 0.377301
| 0.377301
| 0.377301
| 0.205521
| 0
| 0.054514
| 0.327991
| 1,747
| 74
| 47
| 23.608108
| 0.778535
| 0.019462
| 0
| 0.534483
| 0
| 0
| 0.004684
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.103448
| false
| 0
| 0.034483
| 0
| 0.275862
| 0.068966
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6e3ec2b42c30f989802844d030b6a4725567d1ae
| 442
|
py
|
Python
|
config.py
|
benperove/oneliner.sh
|
0c6eb25f2dd32cdd5cc275ef5849b5e12c76e9db
|
[
"Apache-2.0"
] | 4
|
2019-02-15T01:35:17.000Z
|
2020-07-08T17:47:33.000Z
|
config.py
|
benperove/oneliner.sh
|
0c6eb25f2dd32cdd5cc275ef5849b5e12c76e9db
|
[
"Apache-2.0"
] | 1
|
2019-05-24T21:00:37.000Z
|
2019-05-24T21:00:37.000Z
|
config.py
|
benperove/oneliner.sh
|
0c6eb25f2dd32cdd5cc275ef5849b5e12c76e9db
|
[
"Apache-2.0"
] | 1
|
2020-04-10T08:03:16.000Z
|
2020-04-10T08:03:16.000Z
|
import os
#github login
SITE = 'https://api.github.com'
CALLBACK = 'https://oneliner.sh/oauth2'
AUTHORIZE_URL = 'https://github.com/login/oauth/authorize'
TOKEN_URL = 'https://github.com/login/oauth/access_token'
SCOPE = 'user'
#redis config
REDIS_HOST = os.environ['REDIS_HOST']
#REDIS_HOST = 'localhost'
REDIS_PORT = 6379
REDIS_DB = 0
DATA_DIR = 'oneliners'
DEBUG = True
#app
SUBMISSION_PATH = 'incoming'
| 26
| 61
| 0.68552
| 59
| 442
| 4.966102
| 0.627119
| 0.09215
| 0.095563
| 0.116041
| 0.1843
| 0.1843
| 0
| 0
| 0
| 0
| 0
| 0.016484
| 0.176471
| 442
| 16
| 62
| 27.625
| 0.788462
| 0.115385
| 0
| 0
| 0
| 0
| 0.418605
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.083333
| 0
| 0.083333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6e4153ef83e21bf087ec6ed89dceeb002c6fc185
| 319
|
py
|
Python
|
examples/pybullet/examples/signedDistanceField.py
|
frk2/bullet3
|
225d823e4dc3f952c6c39920c3f87390383e0602
|
[
"Zlib"
] | 27
|
2018-05-21T14:28:10.000Z
|
2021-12-31T03:12:35.000Z
|
examples/pybullet/examples/signedDistanceField.py
|
frk2/bullet3
|
225d823e4dc3f952c6c39920c3f87390383e0602
|
[
"Zlib"
] | 1
|
2018-11-19T19:07:47.000Z
|
2018-11-19T19:07:47.000Z
|
examples/pybullet/examples/signedDistanceField.py
|
frk2/bullet3
|
225d823e4dc3f952c6c39920c3f87390383e0602
|
[
"Zlib"
] | 13
|
2019-11-08T12:48:44.000Z
|
2022-01-04T04:13:33.000Z
|
import pybullet as p
import pybullet
import time
p.connect(p.GUI)
p.loadURDF("toys/concave_box.urdf")
p.setGravity(0,0,-10)
for i in range (10):
p.loadURDF("sphere_1cm.urdf",[i*0.02,0,0.5])
p.loadURDF("duck_vhacd.urdf")
timeStep = 1./240.
p.setTimeStep(timeStep)
while (1):
p.stepSimulation()
time.sleep(timeStep)
| 21.266667
| 45
| 0.727273
| 57
| 319
| 4.017544
| 0.561404
| 0.117904
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.0625
| 0.097179
| 319
| 15
| 46
| 21.266667
| 0.732639
| 0
| 0
| 0
| 0
| 0
| 0.159375
| 0.065625
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.214286
| 0
| 0.214286
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6e415d21c97c8bf5b7c0199061ba4f235f80c0f3
| 2,472
|
py
|
Python
|
Old/TitleTable.py
|
StephanM87/Sofie-Herrmann-Praktikum
|
3fa7e715061e35aade8eb93756c30ebf10971059
|
[
"MIT"
] | null | null | null |
Old/TitleTable.py
|
StephanM87/Sofie-Herrmann-Praktikum
|
3fa7e715061e35aade8eb93756c30ebf10971059
|
[
"MIT"
] | 2
|
2021-10-04T08:22:40.000Z
|
2021-10-05T13:30:02.000Z
|
Old/TitleTable.py
|
StephanM87/Sofie-Herrmann-Praktikum
|
3fa7e715061e35aade8eb93756c30ebf10971059
|
[
"MIT"
] | null | null | null |
from pylatex import Document, Tabular, Section, NoEscape, Command, MultiRow
from Old.BioCatHubDatenmodell import DataModel
first_name = "some firstname"
last_name = "some lastname"
e_mail = "some@adress.com"
institution = "some institution"
vessel_type = "some vessel"
volume = int(42)
vol_unit = "mol/l"
add_attributes = [{"Sektor": "Kruzifix"}, {"Bereich": "Eisheiligen"}]
temp = int(42)
temp_unit = "°C"
ph_value = int(7)
buffer = "some buffer"
class PdfLibrary (Document):
def __init__(self, data_model):
self.biocathub_model = data_model
def create_pdf(self):
geometry_options = {
"margin": "2cm",
"includeheadfoot": True
}
doc = Document(page_numbers=True, geometry_options=geometry_options)
doc.preamble.append(Command("title", self.biocathub_model["title"]))
doc.append(NoEscape(r"\maketitle"))
with doc.create(Section("User:")):
with doc.create(Tabular("|c|c|")) as table:
table.add_hline()
table.add_row(["First Name", first_name])
table.add_hline()
table.add_row(["Last Name", last_name])
table.add_hline()
table.add_row(["E-Mail", e_mail])
table.add_hline()
table.add_row(["Institution", institution])
table.add_hline()
with doc.create(Section("Vessel:")):
with doc.create(Tabular("|c|c|")) as table2:
for i in DataModel["vessel"]:
key = list(i.keys())
table2.add_row([key, i[key]])
table2.add_hline()
with doc.create(Section("Condition:")):
with doc.create(Tabular("|c|c|")) as table3:
table3.add_hline()
table3.add_row(["Temperature", temp])
table3.add_hline()
table3.add_row(["Unit", temp_unit])
table3.add_hline()
table3.add_row(["pH", ph_value])
table3.add_hline()
table3.add_row(["Buffer", buffer])
table3.add_hline()
for i in add_attributes:
key = list(i.keys())[0]
table3.add_row([key, i[key]])
table3.add_hline()
doc.generate_pdf("Gesamt_Test",
compiler="pdflatex", clean_tex=False)
doc = PdfLibrary(DataModel)
doc.create_pdf()
| 34.333333
| 76
| 0.552589
| 277
| 2,472
| 4.743682
| 0.33935
| 0.073059
| 0.059361
| 0.054795
| 0.275495
| 0.255708
| 0.097412
| 0
| 0
| 0
| 0
| 0.012995
| 0.315129
| 2,472
| 71
| 77
| 34.816901
| 0.762552
| 0
| 0
| 0.180328
| 0
| 0
| 0.114887
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.032787
| false
| 0
| 0.032787
| 0
| 0.081967
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6e41787cb64edb79c7312a9c056163a1f57400e3
| 535
|
py
|
Python
|
Lab2/la2_4.py
|
ThomCruz/ImageAnalysisLab
|
6a524696ecf4aab96336931d22ead8e8c9ec9e30
|
[
"MIT"
] | null | null | null |
Lab2/la2_4.py
|
ThomCruz/ImageAnalysisLab
|
6a524696ecf4aab96336931d22ead8e8c9ec9e30
|
[
"MIT"
] | null | null | null |
Lab2/la2_4.py
|
ThomCruz/ImageAnalysisLab
|
6a524696ecf4aab96336931d22ead8e8c9ec9e30
|
[
"MIT"
] | null | null | null |
import cv2
import numpy as np
import matplotlib.pyplot as plt
pic = cv2.imread('image2.png',0)
#pic = imageio.imread('img/parrot.jpg')
gray = lambda rgb : np.dot(rgb[... , :3] , [0.299 , 0.587, 0.114])
gray = gray(pic)
'''
log transform
-> s = c*log(1+r)
So, we calculate constant c to estimate s
-> c = (L-1)/log(1+|I_max|)
'''
max_ = np.max(gray)
def log_transform():
return (255/np.log(1+max_)) * np.log(1+gray)
plt.figure(figsize = (5,5))
plt.imshow(log_transform(), cmap = plt.get_cmap(name = 'gray'))
plt.axis('off');
| 20.576923
| 67
| 0.637383
| 96
| 535
| 3.489583
| 0.541667
| 0.047761
| 0.035821
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.059603
| 0.153271
| 535
| 25
| 68
| 21.4
| 0.679912
| 0.071028
| 0
| 0
| 0
| 0
| 0.044156
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.083333
| false
| 0
| 0.25
| 0.083333
| 0.416667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6e45ae2f0c35533b4360de6c8858cfc005287327
| 4,100
|
py
|
Python
|
metafilter/model/__init__.py
|
exhuma/metafilter
|
dfbc01877a3020f7fe58b9fda3e14ed073684f25
|
[
"BSD-3-Clause"
] | null | null | null |
metafilter/model/__init__.py
|
exhuma/metafilter
|
dfbc01877a3020f7fe58b9fda3e14ed073684f25
|
[
"BSD-3-Clause"
] | null | null | null |
metafilter/model/__init__.py
|
exhuma/metafilter
|
dfbc01877a3020f7fe58b9fda3e14ed073684f25
|
[
"BSD-3-Clause"
] | null | null | null |
from ConfigParser import SafeConfigParser
from cStringIO import StringIO
import sqlalchemy
from sqlalchemy import create_engine
from sqlalchemy import MetaData
from sqlalchemy.orm import sessionmaker
from os.path import sep
from hashlib import md5
from datetime import datetime, timedelta
import re
import logging
import functools
NON_LTREE = re.compile(r'[^a-zA-Z0-9/]')
LOG = logging.getLogger(__name__)
CONFIG = None
metadata = MetaData()
Session = sessionmaker()
def loadconfig(filename):
defaults=StringIO("""\
[cli_logging]
error_log=
""")
config = SafeConfigParser()
config.readfp(defaults)
config.read(filename)
dsn = config.get('database', 'dsn', None)
if not dsn:
raise ValueError('No DSN found in the config file! This is required!')
set_dsn(dsn)
return config
class memoized(object):
"""Decorator that caches a function's return value each time it is called.
If called later with the same arguments, the cached value is returned, and
not re-evaluated.
"""
def __init__(self, func):
self.func = func
self.cache = {}
def __call__(self, *args):
obsoletion = datetime.now() - timedelta(seconds=60*5)
if args in self.cache and self.cache[args][1] < obsoletion:
# value too old. Remove it from the cache
LOG.debug("Removing obsolete value for args %r from cache." % (args,))
del(self.cache[args])
try:
output = self.cache[args][0]
LOG.debug("Cache hit for args %r." % (args,))
return output
except KeyError:
LOG.debug("Initialising cache for args %r." % (args,))
value = self.func(*args)
if isinstance(value, sqlalchemy.orm.query.Query):
result = value.all()
self.cache[args] = (result, datetime.now())
return result
else:
self.cache[args] = (value, datetime.now())
return value
except TypeError:
# uncachable -- for instance, passing a list as an argument.
# Better to not cache than to blow up entirely.
LOG.warning("Uncachable function call for args %r" % (args,))
return self.func(*args)
def __repr__(self):
"""Return the function's docstring."""
return self.func.__doc__
def __get__(self, obj, objtype):
"""Support instance methods."""
return functools.partial(self.__call__, obj)
def uri_depth(uri):
"determines the depth of a uri"
if not uri:
return 0
if uri.endswith(sep):
uri = uri[0:-1]
return len(uri.split(sep))
def file_md5(path):
"""
Return the MD5 hash of the file
"""
hash = md5()
fptr = open(path, "rb")
chunk = fptr.read(1024)
while chunk:
hash.update(chunk)
chunk = fptr.read(1024)
fptr.close()
return hash.hexdigest()
def uri_to_ltree(uri):
if not uri or uri == "/":
return "ROOT"
if uri.endswith(sep):
uri = uri[0:-1]
if uri.startswith(sep):
ltree = "ROOT%s%s" % (sep, uri[1:])
else:
ltree = uri
# the ltree module uses "." as path separator. Replace dots by
# underscores and path separators by dots
ltree = NON_LTREE.sub("_", ltree)
ltree = ltree.replace(sep, ".")
return ltree
def set_dsn(dsn):
engine = create_engine(dsn)
metadata.bind = engine
Session.bind = engine
from metafilter.model.nodes import Node
from metafilter.model.queries import Query
from metafilter.model.tags import Tag
#
# Parse the config file
#
from os.path import join, exists, expanduser
from os import getcwd
paths = [
join(getcwd(), 'config.ini'),
join(expanduser("~"), '.metafilter', 'config.ini'),
join('/', 'etc', 'metafilter', 'config.ini'),
]
for path in paths:
if not exists(path):
continue
LOG.debug('Reading config from %s' % path)
CONFIG = loadconfig(path)
if not CONFIG:
LOG.error('Unable to open config file (search order: %s)' % (', '.join(paths)))
| 26.973684
| 83
| 0.621463
| 528
| 4,100
| 4.748106
| 0.354167
| 0.02513
| 0.025927
| 0.01436
| 0.033506
| 0.019146
| 0.019146
| 0.019146
| 0
| 0
| 0
| 0.008286
| 0.264146
| 4,100
| 151
| 84
| 27.152318
| 0.822672
| 0.135122
| 0
| 0.073395
| 0
| 0
| 0.115745
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.082569
| false
| 0
| 0.155963
| 0
| 0.357798
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6e46d398600e4b5a657c138522f24f0eef1938e9
| 3,067
|
py
|
Python
|
manager/base.py
|
monocleface/viewer
|
8ab47a9e846bd2716fe0208c34f33565513fc3f6
|
[
"Apache-2.0"
] | 6
|
2020-02-28T21:18:16.000Z
|
2020-03-13T16:45:57.000Z
|
manager/base.py
|
monocleface/viewer
|
8ab47a9e846bd2716fe0208c34f33565513fc3f6
|
[
"Apache-2.0"
] | 6
|
2020-02-28T12:42:52.000Z
|
2020-03-16T03:49:09.000Z
|
manager/base.py
|
monocleface/viewer
|
8ab47a9e846bd2716fe0208c34f33565513fc3f6
|
[
"Apache-2.0"
] | 6
|
2020-03-05T13:04:25.000Z
|
2020-03-13T16:46:03.000Z
|
from pathlib import Path
from typing import Union
import yaml
class Config(object):
"""Basic Config Class"""
def __init__(self, cfg_yaml_path:str, root:str=".", data_path:str="./data"):
r"""
Configuration of Settings
Args:
root: root path of project, default="."
data_path: data path that contains data directories
cfg_yaml_path: argument file path(`str`)
It will create directory automatically by `cfg_yaml_path`,
```
checkpoints
└── data_type
└── eval_type
├── exp_arg1
│ ├── exp1_summary
│ ├── model_type + attr_type1 <-weights
│ ├── model_type + attr_type2
│ └── model_type + attr_type3
├── exp_arg2
└── exp_arg3
```
`cfg_yaml_path` file shuould like below.
```yaml
# confiugre.yaml
type:
data_type: mnist
eval_type: roar
model_type: resnet18
attr_type: ["vanillagrad", "gradcam"]
...
```
"""
self.prj_path = Path(root)
self.data_path = Path(data_path)
with open(cfg_yaml_path, mode="r") as f:
conf = yaml.load(f, Loader=yaml.FullLoader)
# vars(self).update(conf)
self.__dict__.update(conf)
self.check_type_args()
def check_type_args(self):
r"""
Check arguments and create experiment path
"""
type_args = self.conf["type_args"]
check_types = ["data_type", "eval_type", "model_type", "attr_type"]
for c_type in check_types:
if not (c_type in type_args):
raise KeyError(f"Configure file dosen't have {c_type}, check your argument file")
self.exp_path = self.prj_path / "checkpoints" / type_args["data_type"] / type_args["eval_type"]
self.check_dir_exist(self.exp_path)
def check_dir_exist(self, path:Union[str, Path], file:bool=False):
r"""
Check directory file is exists, if not exists will create one
Args:
path: `str` or `pathlib.Path` type
file: if True, will create a file, not a directory path
"""
if not isinstance(path, Path):
path = Path(path)
if file:
if not path.exists():
path.touch()
print(f"Given path doesn't exists, created {path}")
else:
if not path.exists():
path.mkdir(parents=True)
print(f"Given path doesn't exists, created {path}")
@property
def conf(self):
return self.__dict__
class Checkpoints(object):
"""Model Checkpoint Manager"""
def __init__(self, cfg):
r"""
Save details about model weights and summaries
"""
def save_model(self):
r"""
Save model weights
"""
def save_summary(self):
r"""
Save training stats
"""
| 29.209524
| 103
| 0.538637
| 369
| 3,067
| 4.355014
| 0.330623
| 0.034848
| 0.034225
| 0.017424
| 0.092097
| 0.047293
| 0.047293
| 0.047293
| 0.047293
| 0
| 0
| 0.004557
| 0.356048
| 3,067
| 104
| 104
| 29.490385
| 0.793418
| 0.345615
| 0
| 0.093023
| 0
| 0
| 0.141081
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.162791
| false
| 0
| 0.069767
| 0.023256
| 0.302326
| 0.046512
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6e486d2de9698c2208f5c29100b107e8de344209
| 307
|
py
|
Python
|
007 - Intro List Comprehension.py/016 - Maior.py
|
rodrigoviannini/meus_Primeiros_Codigos
|
828dec1c4ce06889efd491145e631c30a45e858f
|
[
"MIT"
] | 2
|
2021-07-22T23:26:54.000Z
|
2021-07-22T23:27:27.000Z
|
007 - Intro List Comprehension.py/016 - Maior.py
|
rodrigoviannini/meus_Primeiros_Codigos
|
828dec1c4ce06889efd491145e631c30a45e858f
|
[
"MIT"
] | null | null | null |
007 - Intro List Comprehension.py/016 - Maior.py
|
rodrigoviannini/meus_Primeiros_Codigos
|
828dec1c4ce06889efd491145e631c30a45e858f
|
[
"MIT"
] | null | null | null |
"""
List Comprehension Aninhada
OBJ: Encontrar o maior ou os maiores números de uma lista e imprimir outra lista
"""
listaGenerica = [1, 2, 3, 4, 1, 2, 3, 4, 10, 10, 10, 5, 3, -4]
listaMaior = [x for x in listaGenerica if not False in [True if x >= y else False for y in listaGenerica]]
print(listaMaior)
| 30.7
| 106
| 0.693811
| 55
| 307
| 3.872727
| 0.636364
| 0.028169
| 0.028169
| 0.037559
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.069106
| 0.198697
| 307
| 10
| 107
| 30.7
| 0.796748
| 0.351792
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.333333
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6e487df26dabde97ea3f1c6bd9a631bd068d4b7f
| 357
|
py
|
Python
|
thehardway/practice3.py
|
sunquan9301/pythonLearn
|
f10760a4e32c3ac267e39d835c08f45800d081b6
|
[
"Apache-2.0"
] | null | null | null |
thehardway/practice3.py
|
sunquan9301/pythonLearn
|
f10760a4e32c3ac267e39d835c08f45800d081b6
|
[
"Apache-2.0"
] | null | null | null |
thehardway/practice3.py
|
sunquan9301/pythonLearn
|
f10760a4e32c3ac267e39d835c08f45800d081b6
|
[
"Apache-2.0"
] | null | null | null |
def main():
# age = input("How old are you?")
# print("I am %s year old" % age)
file = open("demo1")
lines = file.readlines()
print("lines",lines)
for i in range(len(lines)):
print(lines[i])
file.close()
c,d = addOne(1,2)
print(c,d)
def addOne(a,b):
return a+1, b+1
if __name__ == '__main__':
main()
| 17
| 37
| 0.535014
| 56
| 357
| 3.267857
| 0.589286
| 0.10929
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.019531
| 0.282913
| 357
| 20
| 38
| 17.85
| 0.695313
| 0.176471
| 0
| 0
| 0
| 0
| 0.061856
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.153846
| false
| 0
| 0
| 0.076923
| 0.230769
| 0.230769
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6e4b454f9d9a661e964992d4f53efcc35fd88de8
| 651
|
py
|
Python
|
ipt/td1/3.3-nbracines.py
|
lucas8/MPSI
|
edefa2155071910d95633acf87b9f3a9d34f67d3
|
[
"MIT"
] | null | null | null |
ipt/td1/3.3-nbracines.py
|
lucas8/MPSI
|
edefa2155071910d95633acf87b9f3a9d34f67d3
|
[
"MIT"
] | null | null | null |
ipt/td1/3.3-nbracines.py
|
lucas8/MPSI
|
edefa2155071910d95633acf87b9f3a9d34f67d3
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
def nbracines(a, b, c):
if a == 0:
print("Le coefficient dominant est nul, ce n'est pas un trinome !")
return
d = b*b - 4*a*c
k = 2
if abs(d) < 1e-10:
k = 1
d = 0
elif d < 0:
k = 0
print("Le polynome " + str(a) + "X^2 + " + str(b) + "X + " + str(c) + " admet " + str(k) + " racines distinctes (det = " + str(d) + ")")
a = float(input("Entrez le coefficient dominant du trinome : "))
b = float(input("Entrez le coefficient d'ordre 1 du trinome : "))
c = float(input("Entrez la constante du trinome : "))
nbracines(a, b, c)
nbracines(0, 3, 1)
nbracines(1, 0.2, 0.01)
| 28.304348
| 140
| 0.537634
| 108
| 651
| 3.240741
| 0.435185
| 0.111429
| 0.137143
| 0.068571
| 0.165714
| 0
| 0
| 0
| 0
| 0
| 0
| 0.047312
| 0.285714
| 651
| 22
| 141
| 29.590909
| 0.705376
| 0.026114
| 0
| 0
| 0
| 0
| 0.375
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.055556
| false
| 0
| 0
| 0
| 0.111111
| 0.111111
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6e52fb33dd28eee7b106bc48ba5c34f08261ca0b
| 2,309
|
py
|
Python
|
src/pynorare/__main__.py
|
concepticon/pynorare
|
3cf5ea2d1597c5acc84963f781ff49d96b4d7e02
|
[
"MIT"
] | null | null | null |
src/pynorare/__main__.py
|
concepticon/pynorare
|
3cf5ea2d1597c5acc84963f781ff49d96b4d7e02
|
[
"MIT"
] | 5
|
2020-07-20T11:05:07.000Z
|
2022-03-11T15:51:52.000Z
|
src/pynorare/__main__.py
|
concepticon/pynorare
|
3cf5ea2d1597c5acc84963f781ff49d96b4d7e02
|
[
"MIT"
] | null | null | null |
"""
Main command line interface to the pynorare package.
"""
import sys
import pathlib
import contextlib
from cldfcatalog import Config, Catalog
from clldutils.clilib import register_subcommands, get_parser_and_subparsers, ParserError, PathType
from clldutils.loglib import Logging
from pyconcepticon import Concepticon
from pynorare import NoRaRe
import pynorare.commands
def main(args=None, catch_all=False, parsed_args=None):
try: # pragma: no cover
repos = Config.from_file().get_clone('concepticon')
except KeyError: # pragma: no cover
repos = pathlib.Path('.')
parser, subparsers = get_parser_and_subparsers('norare')
parser.add_argument(
'--repos',
help="clone of concepticon/concepticon-data",
default=repos,
type=PathType(type='dir'))
parser.add_argument(
'--repos-version',
help="version of repository data. Requires a git clone!",
default=None)
parser.add_argument(
'--norarepo',
default=pathlib.Path('.'),
type=PathType(type='dir'))
register_subcommands(subparsers, pynorare.commands)
args = parsed_args or parser.parse_args(args=args)
if not hasattr(args, "main"): # pragma: no cover
parser.print_help()
return 1
with contextlib.ExitStack() as stack:
stack.enter_context(Logging(args.log, level=args.log_level))
if args.repos_version: # pragma: no cover
# If a specific version of the data is to be used, we make
# use of a Catalog as context manager:
stack.enter_context(Catalog(args.repos, tag=args.repos_version))
args.repos = Concepticon(args.repos)
args.api = NoRaRe(args.norarepo, concepticon=args.repos)
args.log.info('norare at {0}'.format(args.repos.repos))
try:
return args.main(args) or 0
except KeyboardInterrupt: # pragma: no cover
return 0
except ParserError as e: # pragma: no cover
print(e)
return main([args._command, '-h'])
except Exception as e: # pragma: no cover
if catch_all: # pragma: no cover
print(e)
return 1
raise
if __name__ == '__main__': # pragma: no cover
sys.exit(main() or 0)
| 32.985714
| 99
| 0.644435
| 285
| 2,309
| 5.105263
| 0.350877
| 0.049485
| 0.080412
| 0.030241
| 0.047423
| 0.034364
| 0
| 0
| 0
| 0
| 0
| 0.003499
| 0.257254
| 2,309
| 69
| 100
| 33.463768
| 0.844898
| 0.129926
| 0
| 0.203704
| 0
| 0
| 0.085384
| 0.014063
| 0
| 0
| 0
| 0
| 0
| 1
| 0.018519
| false
| 0
| 0.166667
| 0
| 0.277778
| 0.055556
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6e53df58b8e50b1065505ed5b573aa01243270d1
| 12,263
|
py
|
Python
|
yolov3_deepsort.py
|
h-enes-simsek/deep_sort_pytorch
|
0a9ede55e53355c19455197cc8daa60336c652bb
|
[
"MIT"
] | 1
|
2021-02-28T15:22:43.000Z
|
2021-02-28T15:22:43.000Z
|
yolov3_deepsort.py
|
h-enes-simsek/deep_sort_pytorch
|
0a9ede55e53355c19455197cc8daa60336c652bb
|
[
"MIT"
] | null | null | null |
yolov3_deepsort.py
|
h-enes-simsek/deep_sort_pytorch
|
0a9ede55e53355c19455197cc8daa60336c652bb
|
[
"MIT"
] | null | null | null |
import os
import cv2
import time
import argparse
import torch
import warnings
import numpy as np
from detector import build_detector
from deep_sort import build_tracker
from utils.draw import draw_boxes
from utils.parser import get_config
from utils.log import get_logger
from utils.io import write_results
from numpy import loadtxt #gt.txt yi almak için
class VideoTracker(object):
def __init__(self, cfg, args, video_path):
self.cfg = cfg
self.args = args
self.video_path = video_path
self.logger = get_logger("root")
use_cuda = args.use_cuda and torch.cuda.is_available()
if not use_cuda:
warnings.warn("Running in cpu mode which maybe very slow!", UserWarning)
if args.display:
cv2.namedWindow("test", cv2.WINDOW_NORMAL)
cv2.resizeWindow("test", args.display_width, args.display_height)
if args.cam != -1:
print("Using webcam " + str(args.cam))
self.vdo = cv2.VideoCapture(args.cam)
else:
self.vdo = cv2.VideoCapture()
self.detector = build_detector(cfg, use_cuda=use_cuda)
self.deepsort = build_tracker(cfg, use_cuda=use_cuda)
self.class_names = self.detector.class_names
def __enter__(self):
if self.args.cam != -1:
ret, frame = self.vdo.read()
assert ret, "Error: Camera error"
self.im_width = frame.shape[0]
self.im_height = frame.shape[1]
else:
assert os.path.isfile(self.video_path), "Path error"
self.vdo.open(self.video_path)
self.im_width = int(self.vdo.get(cv2.CAP_PROP_FRAME_WIDTH))
self.im_height = int(self.vdo.get(cv2.CAP_PROP_FRAME_HEIGHT))
assert self.vdo.isOpened()
if self.args.save_path:
os.makedirs(self.args.save_path, exist_ok=True)
# path of saved video and results
self.save_video_path = os.path.join(self.args.save_path, "results.avi")
self.save_results_path = os.path.join(self.args.save_path, "results.txt")
# create video writer
fourcc = cv2.VideoWriter_fourcc(*'MJPG')
self.writer = cv2.VideoWriter(self.save_video_path, fourcc, 20, (self.im_width, self.im_height))
# logging
self.logger.info("Save results to {}".format(self.args.save_path))
#eğer gt'den veriler okunacaksa
if self.args.gt:
gtFolder = self.video_path + "/../gt/gt.txt"
gt = loadtxt(gtFolder, delimiter=",")
def sortwithFrame(elem):
return elem[0]
# sort list with key
gt_sorted = sorted(gt,key=sortwithFrame)
#-----------------------------
# object_type=1 olmayanları sil,
def filterType(param):
if (param[7]==1):
return True
else:
return False
gt_filtered = list(filter(filterType, gt_sorted))
#-------------------------------
#not_ignored=0 olanları sil
def filterIgnore(param):
if (param[6]==1):
return True
else:
return False
gt_filtered2 = list(filter(filterIgnore, gt_filtered))
self.gt = np.array(gt_filtered2)
return self
def __exit__(self, exc_type, exc_value, exc_traceback):
if exc_type:
print(exc_type, exc_value, exc_traceback)
#deep_sort içindeki fonksiyon doğru çalışmadığı için düzenleyip buraya fonksiyon olarak yazdım.
#input: frame görüntüsü, xywh formatında bbox matrisi (shape=#ofDetections,4)
#output: xywh formatında matrisin xyxy formatında matris karşılığı
def my_xywh_to_xyxy(self,ori_img, bbox_xywh):
x,y,w,h = bbox_xywh[:,0],bbox_xywh[:,1],bbox_xywh[:,2],bbox_xywh[:,3]
x = x.reshape((x.size,1))
y = y.reshape((y.size,1))
w = w.reshape((w.size,1))
h = h.reshape((h.size,1))
#ekranın boyutu alınıyor
height, width = ori_img.shape[:2]
x1 = np.maximum(np.int_(x-w/2),0)
x2 = np.minimum(np.int_(x+w/2),width-1)
y1 = np.maximum(np.int_(y-h/2),0)
y2 = np.minimum(np.int_(y+h/2),height-1)
arr = np.concatenate((x1,y1,x2,y2),axis=1)
return arr
def my_tlwh_to_xywh(self,ori_img, bbox_tlwh):
x,y,w,h = bbox_tlwh[:,0],bbox_tlwh[:,1],bbox_tlwh[:,2],bbox_tlwh[:,3]
x = x.reshape((x.size,1))
y = y.reshape((y.size,1))
w = w.reshape((w.size,1))
h = h.reshape((h.size,1))
#ekranın boyutu alınıyor
height, width = ori_img.shape[:2]
x1 = np.minimum(np.int_(x+w/2),width-1)
y1 = np.minimum(np.int_(y+h/2),height-1)
arr = np.concatenate((x1,y1,w,h),axis=1)
return arr
#topleft(xy)wh >> xyxy dönüştürücü
#gt içinde veriler tlxy şeklinde verilmiş. yolo verilerini xywh olarak üretiyor. (xy orta nokta)
def my_tlwh_to_xyxy(self,ori_img, bbox_tlwh):
x,y,w,h = bbox_tlwh[:,0],bbox_tlwh[:,1],bbox_tlwh[:,2],bbox_tlwh[:,3]
x = x.reshape((x.size,1))
y = y.reshape((y.size,1))
w = w.reshape((w.size,1))
h = h.reshape((h.size,1))
#ekranın boyutu alınıyor
height, width = ori_img.shape[:2]
x1 = np.maximum(np.int_(x),0)
x2 = np.minimum(np.int_(x+w),width-1)
y1 = np.maximum(np.int_(y),0)
y2 = np.minimum(np.int_(y+h),height-1)
arr = np.concatenate((x1,y1,x2,y2),axis=1)
return arr
def run(self):
results = []
idx_frame = 0
while self.vdo.grab():
idx_frame += 1
if idx_frame % self.args.frame_interval:
continue
start = time.time()
_, ori_im = self.vdo.retrieve()
im = cv2.cvtColor(ori_im, cv2.COLOR_BGR2RGB)
#print(im.shape) #video_boyu,video_eni,3
# do detection
bbox_xywh, cls_conf, cls_ids = self.detector(im) #bbox_xywh, confidence, labels
#gt'leri gt'den okuyarak yolo yerine veren kısım
if (self.args.gt): #py çalıştırılırken --gt yazıldıysa
if(idx_frame == 1 or idx_frame == 2 or idx_frame == 3): #üç frame boyunca gt verileri yolo yerine veriliyor
gt_curr_frame = self.gt[self.gt[:,0]==idx_frame].astype('float64') #filtreli gt verilerinden içinde bulunuğunuz kısım çıkarılıyor
gt_curr_frame = gt_curr_frame[:,2:6] #tlwh tipinde veriler alınıyor
#print(gt_curr_frame)
#print(self.my_tlwh_to_xywh(im, gt_curr_frame))
bbox_xywh = self.my_tlwh_to_xywh(im, gt_curr_frame) #yolo yerine gt bboxları
cls_conf = np.ones((bbox_xywh.shape[0],), dtype=int) #yolo conf skorları yerine (tüm skorlar 1)
cls_ids = np.zeros(bbox_xywh.shape[0]) #bütün bboxlar yolo için 0 id'li yani person.
ori_im = draw_boxes(ori_im, self.my_tlwh_to_xyxy(im,gt_curr_frame)) #gt'deki bboxları çizdir
print("yolo yerine gt kullanıldı, frame: ",idx_frame)
#test amaçlı bilerek yanlış vererek başlangıçtaki verilerin tracker üzerindeki etkisini incelemek için
"""
bbox_xywh = np.array([[100,200,400.1,600.1],[500,600.1,600.1,800.1]]) #test amaçlı bilerek yanlış vermek için
cls_conf = np.ones((bbox_xywh.shape[0],), dtype=int) #test amaçlı bilerek yanlış vermek için
cls_ids = np.zeros(bbox_xywh.shape[0]) #test amaçlı bilerek yanlış vermek için
ori_im = draw_boxes(ori_im, bbox_xywh)
"""
"""
labels = ["person", "bicycle", "car", "motorbike", "aeroplane", "bus", "train", "truck",
"boat", "traffic light", "fire hydrant", "stop sign", "parking meter", "bench",
"bird", "cat", "dog", "horse", "sheep", "cow", "elephant", "bear", "zebra", "giraffe",
"backpack", "umbrella", "handbag", "tie", "suitcase", "frisbee", "skis", "snowboard",
"sports ball", "kite", "baseball bat", "baseball glove", "skateboard", "surfboard",
"tennis racket", "bottle", "wine glass", "cup", "fork", "knife", "spoon", "bowl", "banana",
"apple", "sandwich", "orange", "broccoli", "carrot", "hot dog", "pizza", "donut", "cake",
"chair", "sofa", "pottedplant", "bed", "diningtable", "toilet", "tvmonitor", "laptop", "mouse",
"remote", "keyboard", "cell phone", "microwave", "oven", "toaster", "sink", "refrigerator",
"book", "clock", "vase", "scissors", "teddy bear", "hair drier", "toothbrush"]
"""
# select person class 0-people 22-zebra 20-elephant
#mask = (cls_ids == 20) + (cls_ids == 22)
mask = cls_ids == 0
bbox_xywh = bbox_xywh[mask]
# bbox dilation just in case bbox too small, delete this line if using a better pedestrian detector
bbox_xywh[:, 3:] *= 1.2
cls_conf = cls_conf[mask]
# do tracking
outputs = self.deepsort.update(bbox_xywh, cls_conf, im) #im.shape = video_boyu,video_eni,3
#print(bbox_xywh) # number_of_detection, 4
#print(cls_conf) # number_of_detection,
# draw boxes for visualization
if len(outputs) > 0:
bbox_tlwh = []
bbox_xyxy = outputs[:, :4]
identities = outputs[:, -1]
#detection'ları ekrana çizen kendi yazdığım kod
#ori_im = draw_boxes(ori_im, self.my_xywh_to_xyxy(im,bbox_xywh))
#doğru eşleşmeleri ekrana çizen orjinal kod
ori_im = draw_boxes(ori_im, bbox_xyxy, identities)
for bb_xyxy in bbox_xyxy:
bbox_tlwh.append(self.deepsort._xyxy_to_tlwh(bb_xyxy))
results.append((idx_frame - 1, bbox_tlwh, identities))
end = time.time()
if self.args.display:
cv2.imshow("test", ori_im)
cv2.waitKey(1)
if self.args.save_path:
self.writer.write(ori_im)
# save results
write_results(self.save_results_path, results, 'mot')
# logging
self.logger.info("time: {:.03f}s, fps: {:.03f}, detection numbers: {}, tracking numbers: {}" \
.format(end - start, 1 / (end - start), bbox_xywh.shape[0], len(outputs)))
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("VIDEO_PATH", type=str)
parser.add_argument("--config_detection", type=str, default="./configs/yolov3.yaml")
parser.add_argument("--config_deepsort", type=str, default="./configs/deep_sort.yaml")
# parser.add_argument("--ignore_display", dest="display", action="store_false", default=True)
parser.add_argument("--display", action="store_true")
parser.add_argument("--gt", action="store_true") #gt'den alınan verileri kullanmak istiyorsak
parser.add_argument("--frame_interval", type=int, default=1)
parser.add_argument("--display_width", type=int, default=800)
parser.add_argument("--display_height", type=int, default=600)
parser.add_argument("--save_path", type=str, default="./output/")
parser.add_argument("--cpu", dest="use_cuda", action="store_false", default=True)
parser.add_argument("--camera", action="store", dest="cam", type=int, default="-1")
return parser.parse_args()
if __name__ == "__main__":
args = parse_args()
cfg = get_config()
cfg.merge_from_file(args.config_detection)
cfg.merge_from_file(args.config_deepsort)
with VideoTracker(cfg, args, video_path=args.VIDEO_PATH) as vdo_trk:
vdo_trk.run()
| 42.432526
| 149
| 0.572698
| 1,567
| 12,263
| 4.30568
| 0.273133
| 0.0249
| 0.030236
| 0.014229
| 0.249444
| 0.24233
| 0.21165
| 0.17845
| 0.129835
| 0.110271
| 0
| 0.020654
| 0.297236
| 12,263
| 288
| 150
| 42.579861
| 0.762242
| 0.157466
| 0
| 0.181818
| 0
| 0
| 0.058271
| 0.005092
| 0
| 0
| 0
| 0
| 0.017045
| 1
| 0.0625
| false
| 0
| 0.079545
| 0.005682
| 0.204545
| 0.017045
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
280906641aae735ca1d3dbc649fdb86d59c81472
| 1,172
|
py
|
Python
|
aerosandbox/numpy/array.py
|
askprash/AeroSandbox
|
9e82966a25ced9ce96ca29bae45a4420278f0f1d
|
[
"MIT"
] | null | null | null |
aerosandbox/numpy/array.py
|
askprash/AeroSandbox
|
9e82966a25ced9ce96ca29bae45a4420278f0f1d
|
[
"MIT"
] | null | null | null |
aerosandbox/numpy/array.py
|
askprash/AeroSandbox
|
9e82966a25ced9ce96ca29bae45a4420278f0f1d
|
[
"MIT"
] | 1
|
2021-09-11T03:28:45.000Z
|
2021-09-11T03:28:45.000Z
|
import numpy as onp
import casadi as cas
def array(object, dtype=None):
try:
a = onp.array(object, dtype=dtype)
if a.dtype == "O":
raise Exception
return a
except (AttributeError, Exception): # If this occurs, it needs to be a CasADi type.
# First, determine the dimension
def make_row(row):
try:
return cas.horzcat(*row)
except (TypeError, Exception): # If not iterable or if it's a CasADi MX type
return row
return cas.vertcat(
*[
make_row(row)
for row in object
]
)
def length(array) -> int:
"""
Returns the length of an 1D-array-like object.
Args:
array:
Returns:
"""
try:
return len(array)
except TypeError: # array has no function len() -> either float, int, or CasADi type
try:
if len(array.shape) >= 1:
return array.shape[0]
else:
raise AttributeError
except AttributeError: # array has no attribute shape -> either float or int
return 1
| 25.478261
| 89
| 0.529863
| 139
| 1,172
| 4.453237
| 0.446043
| 0.035541
| 0.051696
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.00561
| 0.391638
| 1,172
| 45
| 90
| 26.044444
| 0.862553
| 0.266212
| 0
| 0.129032
| 0
| 0
| 0.001209
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.096774
| false
| 0
| 0.064516
| 0
| 0.387097
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
280b7ce2e2cb3f65d56ba5e4705455b1cbb3bb0e
| 3,283
|
py
|
Python
|
capspayment/api_payin.py
|
agorapay/python-sdk
|
c5b7fd6894f95e6862446248b26c16253c8fd4f4
|
[
"MIT"
] | null | null | null |
capspayment/api_payin.py
|
agorapay/python-sdk
|
c5b7fd6894f95e6862446248b26c16253c8fd4f4
|
[
"MIT"
] | null | null | null |
capspayment/api_payin.py
|
agorapay/python-sdk
|
c5b7fd6894f95e6862446248b26c16253c8fd4f4
|
[
"MIT"
] | null | null | null |
"""
Payin API
"""
from dataclasses import dataclass
from typing import Union
from api_payin_model import (
PayinAdjustPaymentRequest,
PayinCancelRequest,
PayinCancelResponse,
PayinCaptureRequest,
PayinCaptureResponse,
PayinMandateRequest,
PayinMandateResponse,
PayinOrderDetailsRequest,
PayinOrderDetailsResponse,
PayinPaymentDetailsRequest,
PayinPaymentDetailsResponse,
PayinPaymentIframeRequest,
PayinPaymentIframeResponse,
PayinPaymentMethodsRequest,
PayinPaymentMethodsResponse,
PayinPaymentRequest,
PayinPaymentResponse,
PayinRefundRequest,
PayinRefundResponse,
PayinTicketRequest,
PayinTicketResponse,
)
from base import BaseRequest
from model import Response
@dataclass
class ApiPayin(BaseRequest):
"""Payin API requests"""
def payment(
self, payload: PayinPaymentRequest
) -> Union[PayinPaymentResponse, Response]:
"""Submit a payment"""
return self.request("POST", "/payin/payment", payload)
def payment_details(
self, payload: PayinPaymentDetailsRequest
) -> Union[PayinPaymentDetailsResponse, Response]:
"""Submit additionnal payment details"""
return self.request("POST", "/payin/paymentDetails", payload)
def payment_methods(
self, payload: PayinPaymentMethodsRequest
) -> Union[PayinPaymentMethodsResponse, Response]:
"""Submit an order/get payment methods"""
return self.request("POST", "/payin/paymentMethods", payload)
def capture(
self, payload: PayinCaptureRequest
) -> Union[PayinCaptureResponse, Response]:
"""Capture a transaction/order"""
return self.request("POST", "/payin/capture", payload)
def cancel(
self, payload: PayinCancelRequest
) -> Union[PayinCancelResponse, Response]:
"""Cancel a transaction/order"""
return self.request("POST", "/payin/cancel", payload)
def order_details(
self, payload: PayinOrderDetailsRequest
) -> Union[PayinOrderDetailsResponse, Response]:
"""Get all the order details"""
return self.request("GET", "/payin/orderDetails", payload)
def adjust_payment(self, payload: PayinAdjustPaymentRequest) -> Response:
"""Adjust the amount of the payment/change the breakdown of the payment"""
return self.request("POST", "/payin/adjustPayment", payload)
def payment_iframe(
self, payload: PayinPaymentIframeRequest
) -> Union[PayinPaymentIframeResponse, Response]:
"""Submit an order/get an authent code"""
return self.request("POST", "/payin/paymentIframe", payload)
def refund(
self, payload: PayinRefundRequest
) -> Union[PayinRefundResponse, Response]:
"""Refund a transaction/order"""
return self.request("POST", "/payin/refund", payload)
def mandate(
self, payload: PayinMandateRequest
) -> Union[PayinMandateResponse, Response]:
"""Get signed mandate file"""
return self.request("GET", "/payin/mandate", payload)
def ticket(
self, payload: PayinTicketRequest
) -> Union[PayinTicketResponse, Response]:
"""Get card payment ticket"""
return self.request("GET", "/payin/ticket", payload)
| 32.186275
| 82
| 0.687786
| 276
| 3,283
| 8.155797
| 0.271739
| 0.053754
| 0.083074
| 0.074634
| 0.175922
| 0.086628
| 0.057308
| 0.057308
| 0
| 0
| 0
| 0
| 0.208955
| 3,283
| 101
| 83
| 32.504951
| 0.866769
| 0.114834
| 0
| 0
| 0
| 0
| 0.078576
| 0.014799
| 0
| 0
| 0
| 0
| 0
| 1
| 0.15493
| false
| 0
| 0.070423
| 0
| 0.394366
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
280c4e3ff6e2c8be5af4beb5882bf9b9cd5ee1c7
| 3,626
|
py
|
Python
|
script/gen_canonical_combining_class.py
|
CyberZHG/UChar
|
e59ee5e3ad166288380407df6d5e6c0fe20681cf
|
[
"MIT"
] | 1
|
2020-07-15T16:16:20.000Z
|
2020-07-15T16:16:20.000Z
|
script/gen_canonical_combining_class.py
|
CyberZHG/UChar
|
e59ee5e3ad166288380407df6d5e6c0fe20681cf
|
[
"MIT"
] | null | null | null |
script/gen_canonical_combining_class.py
|
CyberZHG/UChar
|
e59ee5e3ad166288380407df6d5e6c0fe20681cf
|
[
"MIT"
] | 1
|
2020-06-01T01:15:29.000Z
|
2020-06-01T01:15:29.000Z
|
#!/usr/bin/env python
""" Copyright 2020 Zhao HG
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
with open('UnicodeData.txt', 'r') as reader:
last, indices, canonicals, classes = '', [], [], {}
for line in reader:
parts = line.strip().split(';')
if parts[3] != last:
last = parts[3]
indices.append(parts[0])
canonicals.append(parts[3])
classes[parts[3]] = parts[0]
with open('include/unicode_data.h', 'a') as writer:
writer.write('/** The total number of indices used to store the canonical combing class. */\n')
writer.write('const int32_t CANONICAL_COMBINING_NUM = {};\n'.format(len(indices)))
writer.write('/** The indices of the first character that have a different type. */\n')
writer.write('extern const int32_t CANONICAL_COMBINING_INDEX[];\n')
writer.write('/** The canonical combining class data. */\n')
writer.write('extern const int32_t CANONICAL_COMBINING_CLASS[];\n\n')
with open('src/canonical_combining_class.cpp', 'w') as writer:
with open('copyright.txt', 'r') as reader:
writer.write(reader.read())
writer.write('#include "unicode_data.h"\n\n')
writer.write('namespace unicode {\n\n')
writer.write('\nconst int32_t CANONICAL_COMBINING_INDEX[] = {')
for i, index in enumerate(indices):
if i == 0:
writer.write('\n ')
elif i % 8 == 0:
writer.write(',\n ')
else:
writer.write(', ')
writer.write('0x' + index)
writer.write('\n};\n')
writer.write('\nconst int32_t CANONICAL_COMBINING_CLASS[] = {')
for i, canonical in enumerate(canonicals):
if i == 0:
writer.write('\n ')
elif i % 8 == 0:
writer.write(',\n ')
else:
writer.write(', ')
writer.write(canonical)
writer.write('\n};\n\n')
writer.write('} // namespace unicode\n')
with open('tests/test_canonical_combining_class_gen.cpp', 'w') as writer:
with open('copyright.txt', 'r') as reader:
writer.write(reader.read())
writer.write('#include "test.h"\n')
writer.write('#include "unicode_char.h"\n\n')
writer.write('namespace test {\n\n')
writer.write('class CanonicalCombiningClassGenTest : public UnitTest {};\n\n')
writer.write('__TEST_U(CanonicalCombiningClassGenTest, test_classes) {\n')
for canonical, code in classes.items():
writer.write(' __ASSERT_EQ({}, unicode::getCanonicalCombiningClass({}));\n'.format(
canonical, '0x' + code
))
writer.write('}\n\n')
writer.write('} // namespace test\n')
| 40.741573
| 99
| 0.660232
| 490
| 3,626
| 4.822449
| 0.344898
| 0.144308
| 0.066018
| 0.044012
| 0.274228
| 0.253491
| 0.243335
| 0.195514
| 0.195514
| 0.11934
| 0
| 0.00978
| 0.210425
| 3,626
| 88
| 100
| 41.204545
| 0.815578
| 0.294264
| 0
| 0.285714
| 0
| 0
| 0.386907
| 0.137593
| 0
| 0
| 0
| 0
| 0.017857
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
280cef3837d316af797287a2c5c707f3a00a10c1
| 3,676
|
py
|
Python
|
server.py
|
Timothylock/twillio-buzzer-connector
|
9ac7e4763a5eee7d04daa054841e17332c0bac13
|
[
"Apache-2.0"
] | null | null | null |
server.py
|
Timothylock/twillio-buzzer-connector
|
9ac7e4763a5eee7d04daa054841e17332c0bac13
|
[
"Apache-2.0"
] | null | null | null |
server.py
|
Timothylock/twillio-buzzer-connector
|
9ac7e4763a5eee7d04daa054841e17332c0bac13
|
[
"Apache-2.0"
] | null | null | null |
from flask import Flask, request
from twilio.twiml.voice_response import VoiceResponse, Gather
import datetime
import os
import json
import http.client
app = Flask(__name__)
allowUntil = datetime.datetime.now()
# Fetch env vars
whitelisted_numbers = os.environ['WHITELISTED_NUMBERS'].split(",") # Numbers allowed to dial into the system
forward_number = os.environ['FORWARD_NUMBER'] # Number that will be forwarded to if not whitelisted
forward_number_from = os.environ['FORWARD_NUMBER_FROM'] # Number that will be forwarded to if not whitelisted
buzzcode = os.environ['BUZZCODE'] # Digits to dial to let them in
minutes = int(os.environ['MINUTES']) # Number of minutes to unlock the system
slack_path = os.environ['SLACK_PATH'] # Slack path for slack message
say_message = os.environ['SAY_MESSAGE'] # The message to be said to the dialer
# Buzzer
##########################################################################
@app.route("/buzzer/webhook", methods=['GET', 'POST'])
def voice():
"""Respond to incoming phone calls"""
resp = VoiceResponse()
incoming_number = request.values['From']
# If an unknown number, filter out robo callers and forward to cell
if incoming_number not in whitelisted_numbers:
gather = Gather(num_digits=1, action='/buzzer/forward')
gather.say('Press 1 to continue')
resp.append(gather)
return str(resp)
# Tell the user a nice message that they are not permitted to enter
if not allowed_to_buzz():
resp.say("The system cannot let you in. Did you dial the right buzzcode?")
send_message("A visitor was just rejected as the buzzer system was not unlocked")
return str(resp)
# Otherwise, unlock the door
resp.say(say_message, language='zh-CN')
resp.play(digits=buzzcode)
send_message("A visitor was just let in")
return str(resp)
@app.route("/buzzer/forward", methods=['GET', 'POST'])
def forward():
resp = VoiceResponse()
incoming_number = request.values['From']
send_message("About to forward a call from " + str(incoming_number))
resp.say("Please note your call may be recorded for the benefit of both parties")
resp.dial(forward_number, caller_id=forward_number_from)
return str(resp)
@app.route("/buzzer/state", methods=['POST'])
def change_state():
"""Tells the buzzer to unlock the door for the next 30 minutes"""
global allowUntil
c = request.json
if "active" not in c:
return "missing \"active\" field", 400
if c["active"] == "true":
allowUntil = datetime.datetime.now() + datetime.timedelta(minutes=minutes)
if c["active"] == "false":
allowUntil = datetime.datetime.now()
return "OK", 200
@app.route("/buzzer/state", methods=['GET'])
def status():
"""Fetches whether the system will buzz people in"""
return json.dumps({"is_active": str(allowed_to_buzz()).lower()}), 200
def allowed_to_buzz():
"""Fetches whether the system is allowed to buzz somebody in"""
global allowUntil
return allowUntil > datetime.datetime.now()
def send_message(message):
try:
conn = http.client.HTTPSConnection("hooks.slack.com")
payload = "{\"text\": \"" + message + "\"}"
headers = {
'content-type': "application/json",
}
conn.request("POST", slack_path, payload, headers)
conn.getresponse()
except:
print("error sending message")
if __name__ == "__main__":
app.run(host='0.0.0.0', port=8080)
| 33.418182
| 121
| 0.639554
| 469
| 3,676
| 4.908316
| 0.360341
| 0.027368
| 0.045178
| 0.050391
| 0.148566
| 0.132059
| 0.108601
| 0.037359
| 0.037359
| 0
| 0
| 0.0074
| 0.227965
| 3,676
| 109
| 122
| 33.724771
| 0.803735
| 0.178727
| 0
| 0.171429
| 0
| 0
| 0.212152
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.085714
| false
| 0
| 0.085714
| 0
| 0.285714
| 0.014286
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
2810be0978f433319136f58db93ce028bbbb9a9c
| 8,151
|
py
|
Python
|
cosmos/ingestion/ingest/process/hierarchy_extractor/bert_hierarchy_extractor/train/bert_extractor_trainer.py
|
ilmcconnell/Cosmos
|
84245034727c30e20ffddee9e02c7e96f3aa115e
|
[
"Apache-2.0"
] | 30
|
2019-03-14T08:24:34.000Z
|
2022-03-09T06:05:44.000Z
|
cosmos/ingestion/ingest/process/hierarchy_extractor/bert_hierarchy_extractor/train/bert_extractor_trainer.py
|
ilmcconnell/Cosmos
|
84245034727c30e20ffddee9e02c7e96f3aa115e
|
[
"Apache-2.0"
] | 78
|
2019-02-07T22:14:48.000Z
|
2022-03-09T05:59:18.000Z
|
cosmos/ingestion/ingest/process/hierarchy_extractor/bert_hierarchy_extractor/train/bert_extractor_trainer.py
|
ilmcconnell/Cosmos
|
84245034727c30e20ffddee9e02c7e96f3aa115e
|
[
"Apache-2.0"
] | 11
|
2019-03-02T01:20:06.000Z
|
2022-03-25T07:25:46.000Z
|
from bert_hierarchy_extractor.datasets.train_dataset import TrainHierarchyExtractionDataset
from bert_hierarchy_extractor.datasets.utils import cudafy
from bert_hierarchy_extractor.logging.utils import log_metrics
import numpy as np
from torch.utils.data import DataLoader
from transformers import AdamW, get_linear_schedule_with_warmup
import torch
import time
from tqdm import tqdm
from comet_ml import Experiment
def placeholder_num_correct(x, y, print_result=False):
result = torch.argmax(x, dim=1)
result = result.view(-1)
y2 = y.view(-1)
mask = (y2 != -1)
y2 = y2[mask]
result = result[mask]
if print_result:
print('*************')
y1mask = (y[0] != -1)
print(y[0][y1mask])
print('-------------')
rez = torch.argmax(x[0], dim=0)
print(rez[y1mask])
print('**************')
total_correct = (result == y2).sum().detach().cpu().numpy()
total = result.shape[0]
return total_correct, total
class BertExtractorTrainer:
def __init__(
self,
experiment: Experiment,
model,
data_path: str,
base_model: str,
bsz: int,
num_workers: int,
lr: float,
weight_decay: float,
warmup_updates: int,
max_updates: int,
accumulation_steps: int,
validate_interval: int,
save_metric: str,
save_min: bool,
device: str,
seed=1,
num_correct=placeholder_num_correct,
):
"""
:param model: Initialized model
:param dataset_path: Path to dataset
:param base_model: Path to base model
:param bsz: Batch size
:param num_workers: Num workers available
:param lr: Learning rate
:param weight_decay: weight decay
:param warmup_updates: number of samples to warmup learning rate
:param max_updates: max number of samples
:param accumulation_steps: Number of batches to accumulate loss over before running an update
:param validate_interval: num updates before validating
:param save_metric: metric to use to save best model
:param save_min: Whether we're looking to minimize or maximize the save metric
:param seed: Random seed for iteration
"""
torch.manual_seed(seed)
self.experiment = experiment
self.device = device
print(device)
self.model = model.to(device)
self.max_accumulation = accumulation_steps
print("Loading training dataset")
self.train_dataset = TrainHierarchyExtractionDataset(data_path)
num_classes = len(self.train_dataset.label_map)-1
class_counts = np.zeros(num_classes)
for i in range(len(self.train_dataset)):
_, l = self.train_dataset[i]
for cl in l:
class_counts[cl] += 1
effective_num = 1.0 - np.power(0.9999, class_counts)
weights = (1.0 - 0.9999) / np.array(effective_num)
weights = weights / np.sum(weights * num_classes)
self.weights = torch.FloatTensor(weights).to(device)
print(self.weights)
#print("Loading validation dataset")
#self.val_dataset = TrainHierarchyExtractionDataset(data_path, base_model, "val")
self.train_dataloader = DataLoader(
self.train_dataset,
batch_size=bsz,
num_workers=num_workers,
pin_memory=True,
shuffle=True,
collate_fn=TrainHierarchyExtractionDataset.collate,
)
self.val_dataloader = DataLoader(
self.train_dataset,
batch_size=bsz,
num_workers=num_workers,
pin_memory=True,
shuffle=True,
collate_fn=TrainHierarchyExtractionDataset.collate,
)
self.bsz = bsz
self.optimizer = AdamW(model.parameters(), lr=lr, weight_decay=0.01)
self.scheduler = get_linear_schedule_with_warmup(
self.optimizer,
num_warmup_steps=warmup_updates,
num_training_steps=max_updates,
)
self.max_updates = max_updates
self.validate_interval = validate_interval
self.num_correct = num_correct
self.save_metric = save_metric
self.current_best_metric = float('inf')
def validate(self, validate_cap=None, best_save_metric=None):
self.model.eval()
val_cap = validate_cap if validate_cap is not None else len(self.val_dataloader)
with tqdm(total=val_cap) as pbar:
total_loss = 0
total_correct = 0
total_instances = 0
for ind, batch in enumerate(self.val_dataloader):
if ind > val_cap:
break
xs, labels = cudafy(batch)
loss, logits = self.model(xs, labels=labels, weights=self.weights)
nc, t = self.num_correct(logits, labels, print_result=True if ind < 5 else False)
total_correct += nc
total_instances += t
total_loss += loss.detach().cpu().numpy()
pbar.update(1)
loss_per_sample = total_loss / val_cap / self.bsz
accuracy = total_correct / total_instances
metrics = {}
metrics["val_loss"] = loss_per_sample
metrics["val_accuracy"] = accuracy
metrics["val_per_sample_loss"] = total_loss
if best_save_metric is not None:
if metrics[best_save_metric] <= self.current_best_metric:
self.model.save_pretrained('best')
self.current_best_metric = metrics[best_save_metric]
return metrics
def train(self):
"""
"""
start_time = time.time()
# Verify forward pass using validation loop
metrics = self.validate(validate_cap=5)
self.model.train()
with tqdm(total=self.max_updates, desc='Number of updates') as pbar:
total_updates = 1
val_updates = 1
while total_updates < self.max_updates:
accumulation_steps = 0
accumulation_loss = None
for batch in self.train_dataloader:
xs, labels = cudafy(batch)
loss, _ = self.model(xs, labels=labels, weights=self.weights)
if accumulation_loss is None:
accumulation_steps += 1
accumulation_loss = loss
elif accumulation_steps > self.max_accumulation:
self.optimizer.zero_grad()
accumulation_loss.backward()
torch.nn.utils.clip_grad_norm_(self.model.parameters(), 1.0)
self.optimizer.step()
self.scheduler.step()
pbar.update(1)
total_updates += 1
accumulation_steps = 0
accumulation_loss = loss
l = loss.detach().cpu().numpy()
metrics = {}
metrics["train_update_loss"] = l
metrics["train_per_sample_loss"] = l / self.bsz
# TODO: Accuracy, f1, etc metrics
log_metrics(self.experiment, metrics, total_updates)
if total_updates % self.validate_interval == 0:
metrics = self.validate(validate_cap=100, best_save_metric=self.save_metric)
val_updates += 1
log_metrics(self.experiment, metrics, val_updates)
else:
accumulation_steps += 1
accumulation_loss += loss
metrics = self.validate(validate_cap=1000)
print(f"Final validation metrics: {metrics}")
torch.save(self.model.state_dict(), 'last.pt')
val_updates += 1
log_metrics(self.experiment, metrics, val_updates)
end_time = time.time()
total_time = end_time - start_time
print(f"Total train time: {total_time}")
| 39.567961
| 104
| 0.585818
| 909
| 8,151
| 5.036304
| 0.220022
| 0.024028
| 0.02097
| 0.017038
| 0.211228
| 0.133246
| 0.103102
| 0.103102
| 0.08519
| 0.08519
| 0
| 0.011881
| 0.328794
| 8,151
| 205
| 105
| 39.760976
| 0.824895
| 0.10514
| 0
| 0.164706
| 0
| 0
| 0.033114
| 0.002934
| 0
| 0
| 0
| 0.004878
| 0
| 1
| 0.023529
| false
| 0
| 0.058824
| 0
| 0.1
| 0.076471
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
28137bb29b2acdc147558b677e97f5e615bea160
| 2,900
|
py
|
Python
|
adduser.py
|
Vignesh424/Face-Recognition-Attendance-Python
|
5d9c33b64bd41918edc55290a320f73bc4afa4e5
|
[
"Apache-2.0"
] | null | null | null |
adduser.py
|
Vignesh424/Face-Recognition-Attendance-Python
|
5d9c33b64bd41918edc55290a320f73bc4afa4e5
|
[
"Apache-2.0"
] | null | null | null |
adduser.py
|
Vignesh424/Face-Recognition-Attendance-Python
|
5d9c33b64bd41918edc55290a320f73bc4afa4e5
|
[
"Apache-2.0"
] | null | null | null |
import cv2
import os
import sqlite3
import dlib
import re,time
from playsound import playsound
import pyttsx3
cam = cv2.VideoCapture(0)
cam.set(3, 640) # set video width
cam.set(4, 480) # set video height
face_detector = cv2.CascadeClassifier('C:/Users/ACER/Desktop/PROJECT ALL RESOURCE/PROJECT ALL RESOURCE/Face recognition/HaarCascade/haarcascade_frontalface_default.xml')
detector = dlib.get_frontal_face_detector()
# init function to get an engine instance for the speech synthesis
engine1 = pyttsx3.init()
engine2 = pyttsx3.init()
# For each person, enter one numeric face id
detector = dlib.get_frontal_face_detector()
regex = '^\w+([\.-]?\w+)*@\w+([\.-]?\w+)*(\.\w{2,3})+$'
Id =int(input("Enter ID:"))
fullname = input("Enter FullName : ")
email=input("Enter Email:")
match = re.match(regex,email)
if match == None:
print('Invalid Email')
raise ValueError('Invalid Email')
rollno = int(input("Enter Roll Number : "))
print("\n [INFO] Initializing face capture. Look the camera and wait ...")
# say method on the engine that passing input text to be spoken
playsound('sound.mp3')
engine1.say('User Added Successfully')
# run and wait method, it processes the voice commands.
engine2.runAndWait()
connects = sqlite3.connect("C:/Users/ACER/Desktop/PROJECT ALL RESOURCE/PROJECT ALL RESOURCE/Face recognition/sqlite3/Studentdb.db")# connecting to the database
c = connects.cursor()
c.execute('CREATE TABLE IF NOT EXISTS Student (ID INT NOT NULL UNIQUE PRIMARY KEY, FULLNAME TEXT NOT NULL, EMAIL NOT NULL, ROLLNO INT UNIQUE NOT NULL , STATUS TEXT DATE TIMESTAMP)')
c.execute("INSERT INTO Student(ID, FULLNAME, EMAIL,ROLLNO) VALUES(?,?,?,?)",(Id,fullname,email,rollno))
print('Record entered successfully')
connects.commit()# commiting into the database
c.close()
connects.close()# closing the connection
# Initialize individual sampling face count
count = 0
while(True):
ret, img = cam.read()
img = cv2.flip(img,1) # flip video image vertically
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = face_detector.detectMultiScale(gray, 1.3, 5)
for (x,y,w,h) in faces:
cv2.rectangle(img, (x,y), (x+w,y+h), (255,0,0), 2)
count += 1
# Save the captured image into the datasets folder
cv2.imwrite("dataset/User." + str(Id) + '.' + str(count) + ".jpg", gray[y:y+h,x:x+w])
cv2.imshow('image', img)
k = cv2.waitKey(100) & 0xff # Press 'ESC' for exiting video
if k == 27:
break
elif count >= 30: # Take 30 face sample and stop video
playsound('sound.mp3')
engine2.say('DataSets Captured Successfully')
# run and wait method, it processes the voice commands.
engine2.runAndWait()
break
# Doing a bit of cleanup
print("\n [INFO] Exiting Program and cleanup stuff")
cam.release()
cv2.destroyAllWindows()
| 43.283582
| 182
| 0.686207
| 412
| 2,900
| 4.803398
| 0.456311
| 0.024255
| 0.036382
| 0.01718
| 0.175846
| 0.175846
| 0.141486
| 0.141486
| 0.141486
| 0.141486
| 0
| 0.024979
| 0.185517
| 2,900
| 66
| 183
| 43.939394
| 0.81287
| 0.206897
| 0
| 0.137931
| 0
| 0.051724
| 0.368967
| 0.087506
| 0
| 0
| 0.001804
| 0
| 0
| 1
| 0
| false
| 0
| 0.12069
| 0
| 0.12069
| 0.068966
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
2814df1e327e7a389483fc7f28c047ef76e86e37
| 8,753
|
py
|
Python
|
conet/datasets/duke_oct_flat_sp.py
|
steermomo/conet
|
21d60fcb4ab9a01a00aa4d9cd0bdee79ea35cc4b
|
[
"MIT"
] | null | null | null |
conet/datasets/duke_oct_flat_sp.py
|
steermomo/conet
|
21d60fcb4ab9a01a00aa4d9cd0bdee79ea35cc4b
|
[
"MIT"
] | null | null | null |
conet/datasets/duke_oct_flat_sp.py
|
steermomo/conet
|
21d60fcb4ab9a01a00aa4d9cd0bdee79ea35cc4b
|
[
"MIT"
] | 1
|
2020-05-18T10:05:24.000Z
|
2020-05-18T10:05:24.000Z
|
import multiprocessing as mp
# mp.set_start_method('spawn')
import math
import os
import pickle
import random
from glob import glob
from os import path
import albumentations as alb
import cv2
import numpy as np
import skimage
import torch
import imageio
from albumentations.pytorch import ToTensorV2
from skimage.color import gray2rgb
from torch.utils.data import Dataset
from conet.config import get_cfg
# https://github.com/albumentations-team/albumentations/pull/511
# Fix grid distortion bug. #511
# GridDistortion bug修复.....
train_size_aug = alb.Compose([
# alb.RandomSizedCrop(min_max_height=(300, 500)),
alb.PadIfNeeded(min_height=100, min_width=600, border_mode=cv2.BORDER_REFLECT101),
alb.Rotate(limit=6),
alb.RandomScale(scale_limit=0.05,),
alb.ElasticTransform(),
# alb.GridDistortion(p=1, num_steps=20, distort_limit=0.5),
# alb.GridDistortion(num_steps=10, p=1),
# alb.OneOf([
# alb.OpticalDistortion(),
# ]),
# alb.MaskDropout(image_fill_value=0, mask_fill_value=-1,p=0.3),
alb.HorizontalFlip(),
# alb.VerticalFlip(),
# alb.RandomBrightness(limit=0.01),
alb.PadIfNeeded(min_height=224, min_width=512, border_mode=cv2.BORDER_REFLECT101),
alb.RandomCrop(224, 512),
# alb.Normalize(),
# alb.pytorch.ToTensor(),
# ToTensorV2()
])
train_content_aug = alb.Compose([
# alb.MedianBlur(3),
# alb.GaussianBlur(3),
alb.RGBShift(r_shift_limit=5, g_shift_limit=5, b_shift_limit=5),
alb.RandomBrightnessContrast(brightness_limit=0.05),
alb.Normalize(),
# ToTensorV2()
])
val_aug = alb.Compose([
# alb.PadIfNeeded(512, border_mode=cv2.BORDER_REFLECT101),
# alb.Normalize(),
# alb.Resize(512, 512),
alb.PadIfNeeded(min_height=224, min_width=512, border_mode=cv2.BORDER_REFLECT101),
alb.CenterCrop(224, 512),
# ToTensorV2(),
])
val_c_aug = alb.Compose([
alb.Normalize(),
# ToTensorV2()
])
# train_aug_f = alb.Compose([
# # alb.RandomSizedCrop(min_max_height=(300, 500)),
# alb.RandomScale(),
# # alb.HorizontalFlip(),
# alb.VerticalFlip(),
# alb.RandomBrightness(limit=0.01),
# alb.Rotate(limit=30),
# # 224 548
# alb.PadIfNeeded(min_height=224, min_width=548, border_mode=cv2.BORDER_REFLECT101),
# alb.RandomCrop(224, 512),
# alb.Normalize(),
# # alb.pytorch.ToTensor(),
# ToTensorV2()
# ])
# val_aug_f = alb.Compose([
# alb.PadIfNeeded(min_height=224, min_width=512, border_mode=cv2.BORDER_REFLECT101),
# alb.Normalize(),
# # alb.Resize(512, 512),
# alb.CenterCrop(224, 512),
# ToTensorV2(),
# ])
class DukeOctFlatSPDataset(Dataset):
def __init__(self, split='train', n_seg=0):
cfg = get_cfg()
self.cfg = cfg
self.data_dir = path.join(cfg.dme_flatten_sp, str(n_seg))
print(f'Load data from {self.data_dir}')
# with open(path.join(cfg.data_dir, 'split.dp'), 'rb') as infile:
# self.d_split = pickle.load(infile)
self.split = split
data_files = glob(path.join(self.data_dir, '*.jpg'))
# img_bname = ['_'.join(path.basename(x).split('_')[:-1]) for x in img_files]
data_bnames = [path.basename(x).split('.')[0] for x in data_files]
# self.data_bnames = data_bnames
subject_ids = [int(x.split('_')[1]) for x in data_bnames]
if split == 'train':
self.bnames = [data_bnames[i] for i in range(len(data_files)) if subject_ids[i] < 6]
else:
self.bnames = [data_bnames[i] for i in range(len(data_files)) if subject_ids[i] >= 6]
if split == 'train':
self.b_aug = train_size_aug
self.c_aug = train_content_aug
elif split == 'val':
self.b_aug = val_aug
self.c_aug = val_c_aug
else:
raise NotImplementedError
self.cache = []
for idx in range(len(self)):
bname = self.bnames[idx]
img_fp = path.join(self.data_dir, f'{bname}.jpg')
label_fp = path.join(self.data_dir, f'{bname}_label.npy')
softlabel_fp = path.join(self.data_dir, f'{bname}_softlabel.npy')
img = imageio.imread(img_fp)
label = np.load(label_fp)
softlabel = np.load(softlabel_fp)
self.cache.append((img_fp, img, label, softlabel))
def __len__(self):
# return len(self.d_basefp)
return len(self.bnames)
def __getitem__(self, idx):
# carr = np.load(path.join(self.data_dir, self.d_basefp[idx]))
# carr = np.load(self.bnames[idx])
# if idx in self.cache.keys():
# img_fp, img, label, soft_label = self.cache[idx]
# else:
# bname = self.bnames[idx]
# img_fp = path.join(self.data_dir, f'{bname}.jpg')
# label_fp = path.join(self.data_dir, f'{bname}_label.npy')
# softlabel_fp = path.join(self.data_dir, f'{bname}_softlabel.npy')
# img = imageio.imread(img_fp)
# label = np.load(label_fp)
# softlabel = np.load(softlabel_fp)
# self.cache[idx] = (img_fp, img, label, softlabel)
img_fp, img, label, softlabel = self.cache[idx]
img_fp, img, label, softlabel = img_fp, img.copy(), label.copy(), softlabel.copy()
# img = gray2rgb(img)
# if self.split == 'train':
# auged = train_aug_f(image=img, mask=label)
# else:
# auged = val_aug_f(image=img, mask=label)
# auged['fname'] = img_fp
# auged['softlabel'] = torch.tensor(0.)
# return auged
# img = np.transpose(img, (1, 2, 0))
softlabel = np.transpose(softlabel, (1, 2, 0))
img = np.expand_dims(img, axis=-1)
img_a = np.concatenate([img, softlabel], axis=-1)
# img = gray2rgb(img)
# grid_distortion 可能不支持负数
label[label == -1] = 255
auged = self.b_aug(image=img_a, mask=label)
img = auged['image']
label = auged['mask']
label[label == 255] = -1
softlabel = img[:, :, 1:]
image = img[:, :, 0]
# print(image.shape, image.max(), image.min())
image = np.clip(image, 0, 255).astype('uint8')
# image = skimage.img_as_ubyte(image)
image = gray2rgb(image)
image = self.c_aug(image=image)['image'] # normi
# image = alb.Normalize()(image)['image']
image = np.transpose(image, (2, 0, 1))
softlabel = np.transpose(softlabel, (2, 0, 1))
loss_mask = (label !=-1).astype("float")
image = torch.from_numpy(image)
softlabel = torch.from_numpy(softlabel).float()
label = torch.from_numpy(label)
loss_mask = torch.from_numpy(loss_mask)
# img = auged['image']
# print(img.shape)
return {
'image': image,
'softlabel': softlabel,
'mask': label,
'fname': img_fp,
'loss_mask': loss_mask
}
if __name__ == "__main__":
from skimage import segmentation, color, filters, exposure
import skimage
import os
from os import path
import imageio
from matplotlib import pyplot as plt
from torch.utils.data import DataLoader
import random
np.random.seed(42)
random.seed(42)
save_dir = '/data1/hangli/oct/debug'
os.makedirs(save_dir, exist_ok=True)
cmap = plt.cm.get_cmap('jet')
n_seg = 1200
training_dataset = DukeOctFlatSPDataset(split='train', n_seg=n_seg)
# val_dataset = DukeOctFlatSPDataset(split='val', n_seg=n_seg)
data_loader = DataLoader(training_dataset, batch_size=16, shuffle=False, num_workers=8, pin_memory=False)
# val_loader = DataLoader(val_dataset, batch_size=4, shuffle=False, num_workers=2, pin_memory=True)
for t in range(40):
for bidx, batch in enumerate(data_loader):
data = batch['image']
target = batch['mask']
for b_i in range(len(data)):
img = data[b_i]
img = img.permute(1, 2, 0).cpu().numpy()
img = (img - img.min()) / (img.max() - img.min())
img = skimage.img_as_ubyte(img)
mask = target[b_i]
# mask_color = cmap(mask)
mask_color = color.label2rgb(mask.cpu().numpy())
mask_color = skimage.img_as_ubyte(mask_color)
print(img.shape, mask_color.shape)
save_img = np.hstack((img, mask_color))
p = path.join(save_dir, f'{t}_{bidx}_{b_i}.jpg')
print(f'=> {p}')
imageio.imwrite(p, save_img)
| 30.498258
| 109
| 0.595224
| 1,134
| 8,753
| 4.409171
| 0.207231
| 0.012
| 0.022
| 0.0256
| 0.3316
| 0.2814
| 0.2614
| 0.2546
| 0.2546
| 0.2546
| 0
| 0.033967
| 0.266766
| 8,753
| 286
| 110
| 30.604895
| 0.745092
| 0.31395
| 0
| 0.161765
| 0
| 0
| 0.03956
| 0.007439
| 0
| 0
| 0
| 0
| 0
| 1
| 0.022059
| false
| 0
| 0.183824
| 0.007353
| 0.227941
| 0.022059
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
281720b5fdc07905c3eb03b6c213540b162d5693
| 1,109
|
py
|
Python
|
tests/config/test_project.py
|
gaborbernat/toxn
|
1ecb1121b3e3dc30b892b0254cb5566048b5d2e7
|
[
"MIT"
] | 4
|
2018-04-15T15:12:32.000Z
|
2019-06-03T12:41:06.000Z
|
tests/config/test_project.py
|
gaborbernat/tox3
|
1ecb1121b3e3dc30b892b0254cb5566048b5d2e7
|
[
"MIT"
] | 3
|
2018-03-15T11:06:30.000Z
|
2018-04-15T15:17:29.000Z
|
tests/config/test_project.py
|
gaborbernat/tox3
|
1ecb1121b3e3dc30b892b0254cb5566048b5d2e7
|
[
"MIT"
] | 1
|
2019-09-25T19:53:09.000Z
|
2019-09-25T19:53:09.000Z
|
from io import StringIO
from pathlib import Path
import pytest
from toxn.config import from_toml
@pytest.mark.asyncio
async def test_load_from_io():
content = StringIO("""
[build-system]
requires = ['setuptools >= 38.2.4']
build-backend = 'setuptools:build_meta'
[tool.toxn]
default_tasks = ['py36']
""")
build, project, filename = await from_toml(content)
assert build.backend == 'setuptools:build_meta'
assert build.requires == ['setuptools >= 38.2.4']
assert project == {'default_tasks': ['py36']}
assert filename is None
@pytest.mark.asyncio
async def test_load_from_path(tmpdir):
filename: Path = Path(tmpdir) / 'test.toml'
with open(filename, 'wt') as f:
f.write("""
[build-system]
requires = ['setuptools >= 38.2.4']
build-backend = 'setuptools:build_meta'
[tool.toxn]
default_tasks = ['py36']
""")
build, project, config_path = await from_toml(filename)
assert build.backend == 'setuptools:build_meta'
assert build.requires == ['setuptools >= 38.2.4']
assert project == {'default_tasks': ['py36']}
assert filename == config_path
| 25.790698
| 59
| 0.6844
| 144
| 1,109
| 5.138889
| 0.298611
| 0.097297
| 0.108108
| 0.113514
| 0.675676
| 0.675676
| 0.675676
| 0.675676
| 0.575676
| 0.575676
| 0
| 0.02603
| 0.16862
| 1,109
| 42
| 60
| 26.404762
| 0.776573
| 0
| 0
| 0.588235
| 0
| 0
| 0.35257
| 0.079351
| 0
| 0
| 0
| 0
| 0.235294
| 1
| 0
| false
| 0
| 0.117647
| 0
| 0.117647
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
2820ef5bc2fdcf7913515a4a45ac8b19c189a6ce
| 1,340
|
py
|
Python
|
longest path in matrix.py
|
buhuhaha/python
|
4ff72ac711f0948ae5bcb0886d68e8df77fe515b
|
[
"MIT"
] | null | null | null |
longest path in matrix.py
|
buhuhaha/python
|
4ff72ac711f0948ae5bcb0886d68e8df77fe515b
|
[
"MIT"
] | null | null | null |
longest path in matrix.py
|
buhuhaha/python
|
4ff72ac711f0948ae5bcb0886d68e8df77fe515b
|
[
"MIT"
] | null | null | null |
row = [-1, -1, -1, 0, 0, 1, 1, 1]
col = [-1, 0, 1, -1, 1, -1, 0, 1]
def isValid(x, y, mat):
return 0 <= x < len(mat) and 0 <= y < len(mat[0])
def findMaxLength(mat, x, y, previous):
if not isValid(x, y, mat) or chr(ord(previous) + 1) != mat[x][y]:
return 0
max_len = 0
for k in range(len(row)):
length = findMaxLength(mat, x + row[k], y + col[k], mat[x][y])
max_len = max(max_len, 1 + length)
return max_len
def findMaximumLength(mat, ch):
if not mat or not len(mat):
return 0
(M, N) = (len(mat), len(mat[0]))
max_len = 0
for x in range(M):
for y in range(N):
if mat[x][y] == ch:
for k in range(len(row)):
length = findMaxLength(mat, x + row[k], y + col[k], ch)
max_len = max(max_len, 1 + length)
return max_len
if __name__ == '__main__':
mat = [
['D', 'E', 'H', 'X', 'B'],
['A', 'O', 'G', 'P', 'E'],
['D', 'D', 'C', 'F', 'D'],
['E', 'B', 'E', 'A', 'S'],
['C', 'D', 'Y', 'E', 'N']
]
ch = 'C'
print("The length of the longest path with consecutive characters starting from "
"character", ch, "is", findMaximumLength(mat, ch))
| 20.30303
| 85
| 0.435075
| 200
| 1,340
| 2.835
| 0.27
| 0.084656
| 0.021164
| 0.014109
| 0.326279
| 0.292769
| 0.292769
| 0.292769
| 0.292769
| 0.292769
| 0
| 0.032143
| 0.373134
| 1,340
| 66
| 86
| 20.30303
| 0.642857
| 0
| 0
| 0.285714
| 0
| 0
| 0.08806
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.085714
| false
| 0
| 0
| 0.028571
| 0.228571
| 0.028571
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
28226ec9ea67dad00950fa1852a66dbf14540c2c
| 4,653
|
py
|
Python
|
AnimalProfile/session/batchAnimals.py
|
AtMostafa/AnimalProfile
|
866f55659b80291f840ecacd090afada5f4de674
|
[
"MIT"
] | null | null | null |
AnimalProfile/session/batchAnimals.py
|
AtMostafa/AnimalProfile
|
866f55659b80291f840ecacd090afada5f4de674
|
[
"MIT"
] | null | null | null |
AnimalProfile/session/batchAnimals.py
|
AtMostafa/AnimalProfile
|
866f55659b80291f840ecacd090afada5f4de674
|
[
"MIT"
] | null | null | null |
__all__ = ('get_session_list',
'get_animal_list',
'get_event',
'get_tag_pattern',
'get_pattern_animalList',
'get_current_animals')
import datetime
import logging
from .. import Root
from .. import File
from .. import Profile
from ..Profile import EventProfile
from .singleAnimal import *
def get_session_list(root: Root,
animalList: list = None,
profile: Profile = None):
"""
This function returns list of sessions with certain 'profile' for all the animals
in animalList. if animalList=Nonr, it will search all the animals.
"""
if profile is None:
profile = Profile(root=root)
if animalList is None or animalList == '' or animalList == []:
animalList = root.get_all_animals()
profileOut = Profile(root=root)
for animal in animalList:
tagFile = File(root, animal)
sessionProfile = tagFile.get_profile_session_list(profile)
profileOut += sessionProfile
return profileOut
def get_animal_list(root: Root, profile: Profile = None):
"""
this function returns list of animals with at least one session matching the "profile"
"""
if profile is None:
profile = Profile(root=root)
allProfiles = get_session_list(root, animalList=None, profile=profile)
sessionList = allProfiles.Sessions
animalList = []
for session in sessionList:
animalList.append(session[:len(profile._prefix) + 3])
animalList = list(set(animalList))
return sorted(animalList)
def get_event(root: Root,
profile1: Profile,
profile2: Profile,
badAnimals: list = None):
"""
This function finds the animals that match both profile1 and profile2 IN SUCCESSION
I.E., when the conditions changed
"""
if badAnimals is None:
badAnimals = []
animalList1 = get_animal_list(root, profile1)
animalList2 = get_animal_list(root, profile2)
animalList0 = set(animalList1).intersection(set(animalList2))
animalList0 = [animal for animal in animalList0 if animal not in badAnimals] # remove bad animals from animalList0
animalList0.sort()
eventProfile = EventProfile(profile1, profile2)
for animal in animalList0:
sessionProfile1 = get_session_list(root, animalList=[animal], profile=profile1)
sessionProfile2 = get_session_list(root, animalList=[animal], profile=profile2)
sessionTotal = get_session_list(root, animalList=[animal], profile=root.get_profile())
try:
index = sessionTotal.Sessions.index(sessionProfile1.Sessions[-1])
if sessionProfile2.Sessions[0] == sessionTotal.Sessions[index + 1]:
# Two profiles succeed, meaning the Event happended.
eventProfile.append(sessionProfile1.Sessions, sessionProfile2.Sessions)
except Exception:
pass
return eventProfile
def get_tag_pattern(root: Root,
animalList: list = None,
tagPattern: str = '*'):
"""
applies 'get_pattern_session_list' to a list of animals
"""
if animalList is None or animalList == []:
animalList = root.get_all_animals()
profileDict = root.get_profile()
for animal in animalList:
tagFile = File(root, animal)
profileDict += tagFile.get_pattern_session_list(tagPattern=tagPattern)
return profileDict
def get_pattern_animalList(root: Root, tagPattern: str):
"""
this function returns list of animals with at least one session matching the 'tagPattern'
"""
allProfile = get_tag_pattern(root, animalList=None, tagPattern=tagPattern)
sessionList = allProfile.Sessions
animalList = []
for session in sessionList:
animalList.append(session[:len(root.prefix) + 3])
animalList = list(set(animalList))
return sorted(animalList)
def get_current_animals(root: Root, days_passed: int = 4):
"""
this function returns the list of animals with a new session
within the last few ('days_passed') days
"""
now = datetime.datetime.now()
all_animals = root.get_all_animals()
if all_animals == []:
logging.warning('No animal found!')
return []
animalList = []
for animal in all_animals:
animalTag = File(root, animal)
sessionList = animalTag.get_all_sessions()
if not sessionList:
continue
lastSessionDate = animalTag.get_session_date(sessionList[-1])
if (now - lastSessionDate).days <= days_passed:
animalList.append(animal)
return animalList
| 33.47482
| 119
| 0.663873
| 514
| 4,653
| 5.877432
| 0.223735
| 0.032771
| 0.027805
| 0.029791
| 0.309169
| 0.282688
| 0.266799
| 0.200265
| 0.127772
| 0.127772
| 0
| 0.009445
| 0.249087
| 4,653
| 138
| 120
| 33.717391
| 0.85518
| 0.148077
| 0
| 0.225806
| 0
| 0
| 0.029275
| 0.005699
| 0
| 0
| 0
| 0
| 0
| 1
| 0.064516
| false
| 0.032258
| 0.075269
| 0
| 0.215054
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
282403dbaa1f17f6e0d6f80a9faabdc5990009bd
| 10,747
|
py
|
Python
|
IsaacAgent.py
|
dholmdahl/connect4-1
|
cdcd92ee30f45e89a9f01ebc87a8b6d797cc4a81
|
[
"MIT"
] | null | null | null |
IsaacAgent.py
|
dholmdahl/connect4-1
|
cdcd92ee30f45e89a9f01ebc87a8b6d797cc4a81
|
[
"MIT"
] | null | null | null |
IsaacAgent.py
|
dholmdahl/connect4-1
|
cdcd92ee30f45e89a9f01ebc87a8b6d797cc4a81
|
[
"MIT"
] | null | null | null |
from random import choice
from copy import deepcopy
from game_data import GameData
from agents import Agent
import numpy as np
import random
import pickle
import pandas as pd
class IsaacAgent(Agent):
def __init__(self, max_time=2, max_depth=300):
self.max_time = max_time
self.max_depth = max_depth
# self.heuristic = [
# [0], [0], [0], [0], [0], [0], [0],
# [0], [0], [0], [0], [0], [0], [0],
# [0], [0], [0], [0], [0], [0], [0],
# [0], [0], [0], [0], [0], [0], [0], # ...
# [0], [0], [-1], [-1], [-1], [0], [0], # odd player
# [0], [1, -1], [0], [0], [0], [1, -1], [0] # even player
# ]
self.heuristic = [
[0], [0], [0], [0], [0], [0], [0],
[0], [0], [1, -1], [2, -2], [1, -1], [0], [0],
[0], [0], [1, -2], [2, -2], [1, -2], [0], [0],
[0], [0], [3, -2], [3, -2], [3, -2], [0], [0],
[0], [0], [2, -3], [2, -3], [2, -3], [0], [0],
[0], [1, -1], [3, -3], [4, -4], [3, -3], [1, -1], [0]
]
self.game_data = None
self.model = pickle.load(open("./c4model.sav", 'rb'))
def get_name(self) -> str:
return "IsaacAgent"
def get_move(self, game_data) -> int:
self.game_data = game_data
rows_reversed_connect4_board = []
for row in list(game_data.game_board):
rows_reversed_connect4_board.append(row[::-1])
connect4_board = list(np.concatenate(rows_reversed_connect4_board).flat)[::-1]
for sn, sv in enumerate(connect4_board):
if sv == 0:
connect4_board[sn] = ' '
elif sv == 1:
connect4_board[sn] = 'R'
else:
connect4_board[sn] = 'B'
# self.print_board(connect4_board)
turn = self.player(connect4_board)
actions = self.actions(connect4_board)
best_action = random.choice(actions)
if turn == 'R':
# max player
local_best_min_v = -float('inf')
for action in actions:
self.current_depth = 0
min_v = self.min_value(self.result(connect4_board, action))
# print(f"Action: {action + 1}, Min Value: {min_v}")
if min_v > local_best_min_v:
local_best_min_v = min_v
best_action = action
else:
# min player
local_best_max_v = float('inf')
for action in actions:
self.current_depth = 0
max_v = self.max_value(self.result(connect4_board, action))
# print(f"Action: {action + 1}, Max Value: {max_v}")
if max_v < local_best_max_v:
local_best_max_v = max_v
best_action = action
return best_action
def print_board(self, board):
for l in range(0, 42, 7):
row = ''.join([board[l + i] + '|' for i in range(7)])
print(row[:13])
print('-+-+-+-+-+-+-')
def player(self, board):
return 'B' if board.count('R') > board.count('B') else 'R'
def is_tie(self, board):
return len([sq for sq in board if sq == ' ']) == 0
def utility(self, board):
return 0 if self.is_tie(board) else -1000 if self.player(board) == "R" else 1000
def terminal(self, board):
# use modulo 7 to detect new row
row = 0
for sq in range(42):
if sq % 7 == 0:
row += 1
distance_to_new_row = 7 * row - (sq + 1)
distance_to_column_end = [i for i in range(6) if (sq + 1) + i * 7 > 35][0]
if board[sq] == ' ':
continue
# 4 horizontally
if distance_to_new_row >= 3 and board[sq] == board[sq + 1] and board[sq] == board[sq + 2] and board[sq] == board[sq + 3]:
return True
# 4 vertically
elif distance_to_column_end > 2 and board[sq] == board[sq + 7] and board[sq] == board[sq + 14] and board[sq] == board[sq + 21]:
return True
# 4 diagonally
elif distance_to_new_row >= 3 and distance_to_column_end >= 2 and sq + 24 < len(board) and board[sq] == board[sq + 8] and board[sq] == board[sq + 16] and board[sq] == board[sq + 24]:
return True
elif distance_to_new_row >= 3 and distance_to_column_end <= 2 and 0 <= sq - 18 < len(board) and board[sq] == board[sq - 6] and board[sq] == board[sq - 12] and board[sq] == board[sq - 18]:
return True
return self.is_tie(board)
def actions(self, board):
return [sn for sn in range(7) if board[sn] == ' ']
def result(self, board, action):
result = board[:]
for r in range(6):
current_sq = board[action + 35 - r * 7]
if current_sq == ' ':
result[action + 35 - r * 7] = self.player(board)
break
return result
def count_two_in_row(self, board, player):
two_in_row = 0
row = 0
for sq in range(42):
if sq % 7 == 0:
row += 1
distance_to_new_row = 7 * row - (sq + 1)
distance_to_column_end = [i for i in range(6) if (sq + 1) + i * 7 > 35][0]
if board[sq] != player or board[sq].isdigit() or board[sq] == ' ':
continue
# 4 horizontally
if distance_to_new_row >= 3 and board[sq] == board[sq + 1]:
two_in_row += 1
# 4 vertically
elif distance_to_column_end > 2 and board[sq] == board[sq + 7]:
two_in_row += 1
# 4 diagonally
elif distance_to_new_row >= 3 and distance_to_column_end >= 2 and sq + 8 < len(board) and board[sq] == board[sq + 8]:
two_in_row += 1
elif distance_to_new_row >= 3 and distance_to_column_end <= 2 and 0 <= sq - 6 < len(board) and board[sq] == board[sq - 6]:
two_in_row += 1
return two_in_row
def count_three_in_row(self, board, player):
three_in_row = 0
row = 0
for sq in range(42):
if sq % 7 == 0:
row += 1
distance_to_new_row = 7 * row - (sq + 1)
distance_to_column_end = [i for i in range(6) if (sq + 1) + i * 7 > 35][0]
if board[sq] != player or board[sq].isdigit() or board[sq] == ' ':
continue
# 4 horizontally
if distance_to_new_row >= 3 and board[sq] == board[sq + 1] and board[sq] == board[sq + 2]:
three_in_row += 1
# 4 vertically
elif distance_to_column_end > 2 and board[sq] == board[sq + 7] and board[sq] == board[sq + 14]:
three_in_row += 1
# 4 diagonally
elif distance_to_new_row >= 3 and distance_to_column_end >= 2 and sq + 16 < len(board) and board[sq] == board[sq + 8] and board[sq] == board[sq + 16]:
three_in_row += 1
elif distance_to_new_row >= 3 and distance_to_column_end <= 2 and 0 <= sq - 12 < len(board) and board[sq] == board[sq - 6] and board[sq] == board[sq - 12]:
three_in_row += 1
return three_in_row
def evaluate(self, board):
"""
Heuristic:
- Squares value:
[0, 0, -1, -1, -1, 0, 0,
0, 0, 2, 2, 2, 0, 0,
0, 0, -2, -2, -2, 0, 0,
0, 0, 3, 3, 3, 0, 0,
0, 0, -3, -3, -3, 0, 0,
0, 0, 1, 1, 1, 0, 0]
- Include win squares of each player and where they are located.
Heuristic based off Odd-Even strategy: https://www.youtube.com/watch?v=YqqcNjQMX18
"""
total_score = 0
for vn, values in enumerate(self.heuristic):
for value in values:
if value < 0 and board[vn] == 'B':
total_score += value
elif value > 0 and board[vn] == 'R':
total_score += value
# three_in_row_modifier = 10
# total_score += self.count_three_in_row(board, 'R') * three_in_row_modifier
# total_score -= self.count_three_in_row(board, 'B') * three_in_row_modifier
# total_score += self.count_two_in_row(board, 'R') * three_in_row_modifier
# total_score -= self.count_two_in_row(board, 'B') * three_in_row_modifier
# divisor = 5
# for i in range(7):
# action_result = self.result(board, i)
# if self.terminal(action_result):
# total_score += self.utility(action_result) / divisor
# print(total_score)
# multiplier = 2
# r_win_states = 0
# b_win_states = 0
# for i in range(7):
# action_result = self.result(board, i)
# if self.terminal(action_result):
# if self.utility(action_result) == 1000:
# r_win_states += 1
# else:
# b_win_states += 1
# total_score += r_win_states * multiplier
# total_score -= b_win_states * multiplier
# if r_win_states >= 2:
# total_score += 400
# elif b_win_states >= 2:
# total_score -= 400
# print(f"Red Win States: {r_win_states}, Blue Win States: {b_win_states}")
# multiplier = 30
# conv_data = []
# for sq in board:
# if sq.isdigit() or sq == ' ':
# conv_data.append(0)
# elif sq == 'R':
# conv_data.append(1)
# else:
# conv_data.append(-1)
# c4_board = pd.Series(conv_data, index=[f"pos_{sn + 1}" for sn, sv in enumerate(board)])
# total_score += self.model.predict([c4_board])[0][0]
return total_score
def min_value(self, board):
if self.terminal(board):
return self.utility(board)
if self.current_depth > self.max_depth:
return self.evaluate(board)
self.current_depth += 1
v = float('inf')
for action in self.actions(board):
max_v = self.max_value(self.result(board, action))
v = min(v, max_v)
return v
def max_value(self, board):
if self.terminal(board):
return self.utility(board)
if self.current_depth > self.max_depth:
return self.evaluate(board)
self.current_depth += 1
v = -float('inf')
for action in self.actions(board):
min_v = self.min_value(self.result(board, action))
v = max(v, min_v)
return v
| 33.902208
| 199
| 0.499209
| 1,472
| 10,747
| 3.460598
| 0.110734
| 0.027091
| 0.031213
| 0.032195
| 0.557126
| 0.522968
| 0.476835
| 0.461327
| 0.439537
| 0.437966
| 0
| 0.052933
| 0.365404
| 10,747
| 317
| 200
| 33.902208
| 0.693988
| 0.215967
| 0
| 0.357576
| 0
| 0
| 0.008225
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.090909
| false
| 0
| 0.048485
| 0.030303
| 0.272727
| 0.018182
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
2826bae5797a9d9d95a636c0a99581f2619ca237
| 5,872
|
py
|
Python
|
algorand-oracle-smart-contracts/src/algorand_oracle.py
|
damees/algorand-oracle
|
f7f078f9d153341d1ba546ff66e8afbf2685f114
|
[
"MIT"
] | null | null | null |
algorand-oracle-smart-contracts/src/algorand_oracle.py
|
damees/algorand-oracle
|
f7f078f9d153341d1ba546ff66e8afbf2685f114
|
[
"MIT"
] | null | null | null |
algorand-oracle-smart-contracts/src/algorand_oracle.py
|
damees/algorand-oracle
|
f7f078f9d153341d1ba546ff66e8afbf2685f114
|
[
"MIT"
] | null | null | null |
from pyteal import *
ADMIN_KEY = Bytes("admin")
WHITELISTED_KEY = Bytes("whitelisted")
REQUESTS_BALANCE_KEY = Bytes("requests_balance")
MAX_BUY_AMOUNT = Int(1000000000)
MIN_BUY_AMOUNT = Int(10000000)
REQUESTS_SELLER = Addr("N5ICVTFKS7RJJHGWWM5QXG2L3BV3GEF6N37D2ZF73O4PCBZCXP4HV3K7CY")
MARKET_EXCHANGE_NOTE = Bytes("algo-oracle-app-4")
def approval_program():
on_creation = Seq(
[
Assert(Txn.application_args.length() == Int(0)),
App.localPut(Int(0), ADMIN_KEY, Int(1)),
Return(Int(1))
]
)
is_contract_admin = App.localGet(Int(0), ADMIN_KEY)
# set/remove an admin for this contract
admin_status = Btoi(Txn.application_args[2])
set_admin = Seq(
[
Assert(
And(
is_contract_admin,
Txn.application_args.length() == Int(3),
Txn.accounts.length() == Int(1),
)
),
App.localPut(Int(1), ADMIN_KEY, admin_status),
Return(Int(1)),
]
)
register = Seq(
[
App.localPut(Int(0), WHITELISTED_KEY, Int(0)), Return(Int(1))
]
)
# Depending on what you do, you should always consider implementing a whitelisting to
# control who access your app. This will allow you to process offchain validation before
# allowing an account to call you app.
# You may also consider case by case whitelisting to allow access to specific business methods.
whitelist = Seq(
[
Assert(
And(
is_contract_admin,
Txn.application_args.length() == Int(2),
Txn.accounts.length() == Int(1)
)
),
App.localPut(Int(1), WHITELISTED_KEY, Int(1)),
Return(Int(1))
]
)
# This should be added to the checklist of business methods.
is_whitelisted = App.localGet(Int(0), WHITELISTED_KEY)
# An admin can increase the request balance of a user.
requests_amount = Btoi(Txn.application_args[1])
allocate_requests = Seq(
[
Assert(
And(
is_contract_admin, # Sent by admin
Txn.application_args.length() == Int(3), # receiver and amount are provided
Txn.accounts.length() == Int(1),
App.localGet(Int(1), WHITELISTED_KEY), # receiver is whitelisted
)
),
App.localPut(
Int(1),
REQUESTS_BALANCE_KEY,
App.localGet(Int(1), REQUESTS_BALANCE_KEY) + requests_amount
),
Return(Int(1))
]
)
# a client can buy requests
buy_requests = Seq(
[
Assert(
And(
is_whitelisted,
Global.group_size() == Int(2), # buying requests must be done using an atomic transfer
Gtxn[0].type_enum() == TxnType.Payment, # the first transaction must be a payment...
Gtxn[0].receiver() == REQUESTS_SELLER, # ...to our address
Gtxn[0].amount() >= MIN_BUY_AMOUNT, # we don't sell for less than 10...
Gtxn[0].amount() <= MAX_BUY_AMOUNT, # ...or more than 1000 ALGO
Txn.group_index() == Int(1), # call to the contract is the second transaction
Txn.application_args.length() == Int(2),
Txn.accounts.length() == Int(1) # the address which will use the requests must be provided
)
),
App.localPut(
Int(1),
REQUESTS_BALANCE_KEY,
App.localGet(Int(1), REQUESTS_BALANCE_KEY) + (Gtxn[0].amount() / Int(100000)),
),
Return(Int(1))
]
)
market_exchange_rate_request = Seq(
[
Assert(
And(
is_whitelisted,
Txn.note() == MARKET_EXCHANGE_NOTE,
Txn.application_args.length() == Int(4),
Txn.accounts.length() == Int(0),
App.localGet(Int(0), REQUESTS_BALANCE_KEY) >= Int(1)
)
),
App.localPut(
Int(0),
REQUESTS_BALANCE_KEY,
App.localGet(Int(0), REQUESTS_BALANCE_KEY) - Int(1),
),
Return(Int(1))
]
)
# Implement other oracle methods...
program = Cond(
[Txn.application_id() == Int(0), on_creation],
[Txn.on_completion() == OnComplete.DeleteApplication, Return(is_contract_admin)],
[Txn.on_completion() == OnComplete.UpdateApplication, Return(is_contract_admin)],
[Txn.on_completion() == OnComplete.CloseOut, Return(Int(1))],
[Txn.on_completion() == OnComplete.OptIn, register],
[Txn.application_args[0] == Bytes("set_admin"), set_admin],
[Txn.application_args[0] == Bytes("whitelist"), whitelist],
[Txn.application_args[0] == Bytes("allocate_requests"), allocate_requests],
[Txn.application_args[0] == Bytes("buy_requests"), buy_requests],
[Txn.application_args[0] == Bytes("get_market_exchange_rate"), market_exchange_rate_request]
)
return program
def clear_state_program():
program = Seq(
[
Return(Int(1))
]
)
return program
if __name__ == "__main__":
with open("algorand_oracle_approval.teal", "w") as f:
compiled = compileTeal(approval_program(), mode=Mode.Application, version=5)
f.write(compiled)
with open("algorand_oracle_clear_state.teal", "w") as f:
compiled = compileTeal(clear_state_program(), mode=Mode.Application, version=5)
f.write(compiled)
| 35.161677
| 111
| 0.547854
| 630
| 5,872
| 4.91746
| 0.263492
| 0.032279
| 0.075533
| 0.046482
| 0.353777
| 0.286959
| 0.212395
| 0.20142
| 0.171724
| 0.100387
| 0
| 0.026601
| 0.340599
| 5,872
| 166
| 112
| 35.373494
| 0.773502
| 0.146628
| 0
| 0.355072
| 0
| 0
| 0.04989
| 0.028652
| 0
| 0
| 0
| 0
| 0.043478
| 1
| 0.014493
| false
| 0
| 0.007246
| 0
| 0.036232
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
28271eebbca12a80c721021d335930842259d168
| 20,198
|
py
|
Python
|
custom_components/shelly/__init__.py
|
astrandb/ShellyForHASS
|
f404d3007a26945f310a801c6c7d196d7fa1fe23
|
[
"MIT"
] | null | null | null |
custom_components/shelly/__init__.py
|
astrandb/ShellyForHASS
|
f404d3007a26945f310a801c6c7d196d7fa1fe23
|
[
"MIT"
] | null | null | null |
custom_components/shelly/__init__.py
|
astrandb/ShellyForHASS
|
f404d3007a26945f310a801c6c7d196d7fa1fe23
|
[
"MIT"
] | null | null | null |
"""
Support for Shelly smart home devices.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/shelly/
"""
# pylint: disable=broad-except, bare-except, invalid-name, import-error
from datetime import timedelta
import logging
import time
import asyncio
import voluptuous as vol
from homeassistant.const import (
CONF_DEVICES, CONF_DISCOVERY, CONF_ID, CONF_NAME, CONF_PASSWORD,
CONF_SCAN_INTERVAL, CONF_USERNAME, EVENT_HOMEASSISTANT_STOP)
from homeassistant import config_entries
from homeassistant.helpers import discovery
from homeassistant.helpers.dispatcher import async_dispatcher_send
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.script import Script
from homeassistant.util import slugify
from .const import *
from .configuration_schema import CONFIG_SCHEMA
REQUIREMENTS = ['pyShelly==0.1.16']
_LOGGER = logging.getLogger(__name__)
__version__ = "0.1.6.b6"
VERSION = __version__
BLOCKS = {}
DEVICES = {}
BLOCK_SENSORS = []
DEVICE_SENSORS = []
#def _get_block_key(block):
# key = block.id
# if not key in BLOCKS:
# BLOCKS[key] = block
# return key
def get_block_from_hass(hass, discovery_info):
"""Get block from HASS"""
if SHELLY_BLOCK_ID in discovery_info:
key = discovery_info[SHELLY_BLOCK_ID]
return hass.data[SHELLY_BLOCKS][key]
def _dev_key(dev):
key = dev.id + "-" + dev.device_type
if dev.device_sub_type is not None:
key += "-" + dev.device_sub_type
return key
#def _get_device_key(dev):
# key = _dev_key(dev)
# if not key in DEVICES:
# DEVICES[key] = dev
# return key
def get_device_from_hass(hass, discovery_info):
"""Get device from HASS"""
device_key = discovery_info[SHELLY_DEVICE_ID]
return hass.data[SHELLY_DEVICES][device_key]
async def async_setup(hass, config):
"""Set up this integration using yaml."""
if DOMAIN not in config:
return True
hass.data[DOMAIN] = config
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_IMPORT}, data={}
)
)
return True
async def async_setup_entry(hass, config_entry):
"""Setup Shelly component"""
_LOGGER.info("Starting shelly, %s", __version__)
config = hass.data[DOMAIN]
conf = config.get(DOMAIN, {})
#todo!
hass.data[SHELLY_CONFIG] = conf
hass.data[SHELLY_DEVICES] = DEVICES
hass.data[SHELLY_BLOCKS] = BLOCKS
if conf.get(CONF_WIFI_SENSOR) is not None:
_LOGGER.warning("wifi_sensor is deprecated, use rssi in sensors instead.")
if conf.get(CONF_WIFI_SENSOR) and SENSOR_RSSI not in conf[CONF_SENSORS]:
conf[CONF_SENSORS].append(SENSOR_RSSI)
if conf.get(CONF_UPTIME_SENSOR) is not None:
_LOGGER.warning("uptime_sensor is deprecated, use uptime in sensors instead.")
if conf.get(CONF_UPTIME_SENSOR) and SENSOR_UPTIME not in conf[CONF_SENSORS]:
conf[CONF_SENSORS].append(SENSOR_UPTIME)
hass.data["SHELLY_INSTANCE"] = ShellyInstance(hass, config_entry, conf)
#def update_status_information():
# pys.update_status_information()
#for _, block in pys.blocks.items():
# block.update_status_information()
#async def update_domain_callback(_now):
# """Update the Shelly status information"""
# await hass.async_add_executor_job(update_status_information)
#if conf.get(CONF_ADDITIONAL_INFO):
# hass.helpers.event.async_track_time_interval(
# update_domain_callback, update_interval)
return True
class ShellyInstance():
"""Config instance of Shelly"""
def __init__(self, hass, config_entry, conf):
self.hass = hass
self.config_entry = config_entry
self.platforms = {}
self.pys = None
self.conf = conf
self.discover = conf.get(CONF_DISCOVERY)
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, self._stop)
hass.loop.create_task(
self.start_up()
)
async def start_up(self):
conf = self.conf
if conf.get(CONF_LOCAL_PY_SHELLY):
_LOGGER.info("Loading local pyShelly")
#pylint: disable=no-name-in-module
from .pyShelly import pyShelly
else:
from pyShelly import pyShelly
additional_info = conf.get(CONF_ADDITIONAL_INFO)
update_interval = conf.get(CONF_SCAN_INTERVAL)
self.pys = pys = pyShelly()
_LOGGER.info("pyShelly, %s", pys.version())
pys.cb_block_added.append(self._block_added)
pys.cb_device_added.append(self._device_added)
pys.cb_device_removed.append(self._device_removed)
pys.username = conf.get(CONF_USERNAME)
pys.password = conf.get(CONF_PASSWORD)
pys.cloud_auth_key = conf.get(CONF_CLOUD_AUTH_KEY)
pys.cloud_server = conf.get(CONF_CLOUD_SEREVR)
pys.tmpl_name = conf.get(CONF_TMPL_NAME, pys.tmpl_name)
if additional_info:
pys.update_status_interval = update_interval
pys.only_device_id = conf.get(CONF_ONLY_DEVICE_ID)
pys.igmp_fix_enabled = conf.get(CONF_IGMPFIX)
pys.mdns_enabled = conf.get(CONF_MDNS)
pys.host_ip = conf.get(CONF_HOST_IP, '')
pys.start()
pys.discover()
discover_by_ip = conf.get(CONF_DISCOVER_BY_IP)
for ip_addr in discover_by_ip:
pys.add_device_by_ip(ip_addr, 'IP-addr')
if conf.get(CONF_VERSION):
attr = {'version': VERSION, 'pyShellyVersion': pys.version()}
self._add_device("sensor", attr)
fake_block = {
'id' : "694908",
'fake_block': True,
'info_values': {'temperature':5},
'cb_updated' : [],
}
attr = {'sensor_type':'temperature', 'itm': fake_block}
self._add_device("sensor", fake_block)
async def _stop(self, _):
"""Stop Shelly."""
_LOGGER.info("Shutting down Shelly")
self.pys.close()
def _get_specific_config_root(self, key, *ids):
item = self._get_specific_config(key, None, *ids)
if item is None:
item = self.conf.get(key)
return item
def _find_device_config(self, device_id):
device_conf_list = self.conf.get(CONF_DEVICES)
for item in device_conf_list:
if item[CONF_ID].upper() == device_id:
return item
return None
def _get_device_config(self, device_id, id_2=None):
"""Get config for device."""
item = self._find_device_config(device_id)
if item is None and id_2 is not None:
item = self._find_device_config(id_2)
if item is None:
return {}
return item
def _get_specific_config(self, key, default, *ids):
for device_id in ids:
item = self._find_device_config(device_id)
if item is not None and key in item:
return item[key]
return default
def _get_sensor_config(self, *ids):
sensors = self._get_specific_config(CONF_SENSORS, None, *ids)
if sensors is None:
sensors = self.conf.get(CONF_SENSORS)
if SENSOR_ALL in sensors:
return [*SENSOR_TYPES.keys()]
if sensors is None:
return {}
return sensors
def _add_device(self, platform, dev):
self.hass.add_job(self._async_add_device(platform, dev))
async def _async_add_device(self, platform, dev):
if platform not in self.platforms:
self.platforms[platform] = asyncio.Event()
await self.hass.config_entries.async_forward_entry_setup(
self.config_entry, platform)
self.platforms[platform].set()
await self.platforms[platform].wait()
async_dispatcher_send(self.hass, "shelly_new_" + platform \
, dev, self)
def _block_updated(self, block):
hass_data = block.hass_data
if hass_data['discover']:
if hass_data['allow_upgrade_switch']:
has_update = block.info_values.get('has_firmware_update', False)
update_switch = getattr(block, 'firmware_switch', None)
if has_update:
if update_switch is None:
attr = {'firmware': True, 'block':block}
self._add_device("switch", attr)
elif update_switch is not None:
update_switch.remove()
#block_key = _get_block_key(block)
for key, _value in block.info_values.items():
ukey = block.id + '-' + key
if not ukey in BLOCK_SENSORS:
BLOCK_SENSORS.append(ukey)
for sensor in hass_data['sensor_cfg']:
if SENSOR_TYPES[sensor].get('attr') == key:
attr = {'sensor_type':key,
'itm': block}
self._add_device("sensor", attr)
def _block_added(self, block):
self.hass.add_job(self._async_block_added(block))
async def _async_block_added(self, block):
block.cb_updated.append(self._block_updated)
discover_block = self.discover \
or self._get_device_config(block.id) != {}
block.hass_data = {
'allow_upgrade_switch' :
self._get_specific_config_root(CONF_UPGRADE_SWITCH, block.id),
'sensor_cfg' : self._get_sensor_config(block.id),
'discover': discover_block
}
#Config block
if block.unavailable_after_sec is None:
block.unavailable_after_sec \
= self._get_specific_config_root(CONF_UNAVALABLE_AFTER_SEC,
block.id)
#if conf.get(CONF_ADDITIONAL_INFO):
#block.update_status_information()
# cfg_sensors = conf.get(CONF_SENSORS)
# for sensor in cfg_sensors:
# sensor_type = SENSOR_TYPES[sensor]
# if 'attr' in sensor_type:
# attr = {'sensor_type':sensor_type['attr'],
# SHELLY_BLOCK_ID : block_key}
# discovery.load_platform(hass, 'sensor', DOMAIN, attr,
# config)
def _device_added(self, dev, _code):
self.hass.add_job(self._async_device_added(dev, _code))
async def _async_device_added(self, dev, _code):
device_config = self._get_device_config(dev.id, dev.block.id)
if not self.discover and device_config == {}:
return
if dev.device_type == "ROLLER":
self._add_device("cover", dev)
if dev.device_type == "RELAY":
if device_config.get(CONF_LIGHT_SWITCH):
self._add_device("light", dev)
else:
self._add_device("switch", dev)
elif dev.device_type == 'POWERMETER':
sensor_cfg = self._get_sensor_config(dev.id, dev.block.id)
if SENSOR_POWER in sensor_cfg:
self._add_device("sensor", dev)
elif dev.device_type == 'SWITCH':
sensor_cfg = self._get_sensor_config(dev.id, dev.block.id)
if SENSOR_SWITCH in sensor_cfg:
self._add_device("sensor", dev)
elif dev.device_type in ["SENSOR"]: #, "INFOSENSOR"]:
self._add_device("sensor", dev)
elif dev.device_type in ["LIGHT", "DIMMER"]:
self._add_device("light", dev)
def _device_removed(self, dev, _code):
dev.shelly_device.remove()
try:
pass
#key = _dev_key(dev)
#del DEVICES[key]
except KeyError:
pass
class ShellyBlock(Entity):
"""Base class for Shelly entities"""
def __init__(self, block, instance, prefix=""):
conf = instance.conf
id_prefix = conf.get(CONF_OBJECT_ID_PREFIX)
self._unique_id = slugify(id_prefix + "_" + block.type + "_" +
block.id + prefix)
self.entity_id = "." + self._unique_id
entity_id = instance._get_specific_config(CONF_ENTITY_ID , None, block.id)
if entity_id is not None:
self.entity_id = "." + slugify(id_prefix + "_" + entity_id + prefix)
self._unique_id += "_" + slugify(entity_id)
#self._name = None
#block.type_name()
#if conf.get(CONF_SHOW_ID_IN_NAME):
# self._name += " [" + block.id + "]"
self.fake_block = isinstance(block, dict) #:'fake_block' in block
self._show_id_in_name = conf.get(CONF_SHOW_ID_IN_NAME)
self._block = block
self.hass = instance.hass
self.instance = instance
self._block.cb_updated.append(self._updated)
block.shelly_device = self
self._name = instance._get_specific_config(CONF_NAME, None, block.id)
self._name_ext = None
self._is_removed = False
self.hass.add_job(self.setup_device(block))
async def setup_device(self, block):
dev_reg = await self.hass.helpers.device_registry.async_get_registry()
dev_reg.async_get_or_create(
config_entry_id=self.entity_id,
identifiers={(DOMAIN, block.id)},
manufacturer="Shelly",
name=block.friendly_name(),
model=block.type_name(),
sw_version="0.0.1",
)
@property
def name(self):
"""Return the display name of this device."""
if self.fake_block:
name = 'Fake'
if self._name is None:
name = self._block.friendly_name()
else:
name = self._name
if self._name_ext:
name += ' - ' + self._name_ext
if self._show_id_in_name:
name += " [" + self._block.id + "]"
return name
def _updated(self, _block):
"""Receive events when the switch state changed (by mobile,
switch etc)"""
if self.entity_id is not None and not self._is_removed:
self.schedule_update_ha_state(True)
@property
def device_state_attributes(self):
"""Show state attributes in HASS"""
if self.fake_block:
return {}
attrs = {'ip_address': self._block.ip_addr,
'shelly_type': self._block.type_name(),
'shelly_id': self._block.id,
'discovery': self._block.discovery_src
}
room = self._block.room_name()
if room:
attrs['room'] = room
if self._block.info_values is not None:
for key, value in self._block.info_values.items():
attrs[key] = value
return attrs
@property
def device_info(self):
return {
'identifiers': {
(DOMAIN, self._block.id)
}
# 'name': self.name,
# 'manufacturer': "Shelly",
# 'model': self._block.type,
# 'sw_version': '0.0.1',
# #'via_device': (hue.DOMAIN, self.api.bridgeid),
}
def remove(self):
self._is_removed = True
self.hass.add_job(self.async_remove)
class ShellyDevice(Entity):
"""Base class for Shelly entities"""
def __init__(self, dev, instance):
conf = instance.conf
id_prefix = conf.get(CONF_OBJECT_ID_PREFIX)
self._unique_id = id_prefix + "_" + dev.type + "_" + dev.id
self.entity_id = "." + slugify(self._unique_id)
entity_id = instance._get_specific_config(CONF_ENTITY_ID,
None, dev.id, dev.block.id)
if entity_id is not None:
self.entity_id = "." + slugify(id_prefix + "_" + entity_id)
self._unique_id += "_" + slugify(entity_id)
self._show_id_in_name = conf.get(CONF_SHOW_ID_IN_NAME)
#self._name = dev.type_name()
#if conf.get(CONF_SHOW_ID_IN_NAME):
# self._name += " [" + dev.id + "]" # 'Test' #light.name
self._dev = dev
self.hass = instance.hass
self.instance = instance
self._dev.cb_updated.append(self._updated)
dev.shelly_device = self
self._name = instance._get_specific_config(CONF_NAME, None,
dev.id, dev.block.id)
self._sensor_conf = instance._get_sensor_config(dev.id, dev.block.id)
self._is_removed = False
def _updated(self, _block):
"""Receive events when the switch state changed (by mobile,
switch etc)"""
if self.entity_id is not None and not self._is_removed:
self.schedule_update_ha_state(True)
if self._dev.info_values is not None:
for key, _value in self._dev.info_values.items():
ukey = self._dev.id + '-' + key
if not ukey in DEVICE_SENSORS:
DEVICE_SENSORS.append(ukey)
for sensor in self._sensor_conf:
if SENSOR_TYPES[sensor].get('attr') == key:
attr = {'sensor_type':key,
'itm':self._dev}
conf = self.hass.data[SHELLY_CONFIG]
#discovery.load_platform(self.hass, 'sensor',
# DOMAIN, attr, conf)
@property
def name(self):
"""Return the display name of this device."""
if self._name is None:
name = self._dev.friendly_name()
else:
name = self._name
if self._show_id_in_name:
name += " [" + self._dev.id + "]"
return name
@property
def device_state_attributes(self):
"""Show state attributes in HASS"""
attrs = {'ip_address': self._dev.ip_addr,
'shelly_type': self._dev.type_name(),
'shelly_id': self._dev.id,
'discovery': self._dev.discovery_src
}
room = self._dev.room_name()
if room:
attrs['room'] = room
if self._dev.block.info_values is not None:
for key, value in self._dev.block.info_values.items():
attrs[key] = value
if self._dev.info_values is not None:
for key, value in self._dev.info_values.items():
attrs[key] = value
if self._dev.sensor_values is not None:
for key, value in self._dev.sensor_values.items():
attrs[key] = value
return attrs
@property
def device_info(self):
return {
'identifiers': {
# Serial numbers are unique identifiers within a specific domain
(DOMAIN, self._dev.block.id)
},
# 'name': self._dev.block.friendly_name(),
# 'manufacturer': "Shelly",
# 'model': self._dev.block.type_name(),
# 'sw_version': '0.0.1',
#'via_device': (hue.DOMAIN, self.api.bridgeid),
}
@property
def unique_id(self):
"""Return the ID of this device."""
return self._unique_id
@property
def available(self):
"""Return true if switch is available."""
return self._dev.available()
def remove(self):
self._is_removed = True
self.hass.add_job(self.async_remove)
@property
def should_poll(self):
"""No polling needed."""
return False
| 36.003565
| 87
| 0.576839
| 2,383
| 20,198
| 4.591272
| 0.11582
| 0.019834
| 0.030162
| 0.011882
| 0.387625
| 0.319441
| 0.272736
| 0.254913
| 0.230235
| 0.211772
| 0
| 0.001977
| 0.323695
| 20,198
| 560
| 88
| 36.067857
| 0.798975
| 0.130458
| 0
| 0.26615
| 0
| 0
| 0.045417
| 0
| 0
| 0
| 0
| 0.001786
| 0
| 1
| 0.074935
| false
| 0.010336
| 0.043928
| 0.005168
| 0.196382
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|