hexsha
stringlengths 40
40
| size
int64 1
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
239
| max_stars_repo_name
stringlengths 5
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
239
| max_issues_repo_name
stringlengths 5
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
239
| max_forks_repo_name
stringlengths 5
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.03M
| avg_line_length
float64 1
958k
| max_line_length
int64 1
1.03M
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4a0424359ee56e9ca81be3c2d281dace766fe63d
| 734
|
py
|
Python
|
LC/165.py
|
szhu3210/LeetCode_Solutions
|
64747eb172c2ecb3c889830246f3282669516e10
|
[
"MIT"
] | 2
|
2018-02-24T17:20:02.000Z
|
2018-02-24T17:25:43.000Z
|
LC/165.py
|
szhu3210/LeetCode_Solutions
|
64747eb172c2ecb3c889830246f3282669516e10
|
[
"MIT"
] | null | null | null |
LC/165.py
|
szhu3210/LeetCode_Solutions
|
64747eb172c2ecb3c889830246f3282669516e10
|
[
"MIT"
] | null | null | null |
class Solution(object):
def compareVersion(self, version1, version2):
"""
:type version1: str
:type version2: str
:rtype: int
"""
l1=version1.split('.')
l2=version2.split('.')
l=min(len(l1),len(l2))
for x in range(l):
if int(l1[x])==int(l2[x]):
continue
elif int(l1[x])>int(l2[x]):
return 1
else:
return -1
if len(l1)>len(l2):
for x in l1[l:]:
if int(x)!=0:
return 1
if len(l1)<len(l2):
for x in l2[l:]:
if int(x)!=0:
return -1
return 0
| 26.214286
| 49
| 0.385559
|
4a042488cd7111975885ea70983ebfa68b2b01b9
| 2,182
|
py
|
Python
|
share/qt/extract_strings_qt.py
|
bitcoininvestproject/BitcoinInvestCore
|
ec42975bcd981d73e2f17983159fa39ff7660622
|
[
"MIT"
] | null | null | null |
share/qt/extract_strings_qt.py
|
bitcoininvestproject/BitcoinInvestCore
|
ec42975bcd981d73e2f17983159fa39ff7660622
|
[
"MIT"
] | null | null | null |
share/qt/extract_strings_qt.py
|
bitcoininvestproject/BitcoinInvestCore
|
ec42975bcd981d73e2f17983159fa39ff7660622
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
'''
Extract _("...") strings for translation and convert to Qt stringdefs so that
they can be picked up by Qt linguist.
'''
from __future__ import division,print_function,unicode_literals
from subprocess import Popen, PIPE
import glob
import operator
import os
import sys
OUT_CPP="qt/bitcoininveststrings.cpp"
EMPTY=['""']
def parse_po(text):
"""
Parse 'po' format produced by xgettext.
Return a list of (msgid,msgstr) tuples.
"""
messages = []
msgid = []
msgstr = []
in_msgid = False
in_msgstr = False
for line in text.split('\n'):
line = line.rstrip('\r')
if line.startswith('msgid '):
if in_msgstr:
messages.append((msgid, msgstr))
in_msgstr = False
# message start
in_msgid = True
msgid = [line[6:]]
elif line.startswith('msgstr '):
in_msgid = False
in_msgstr = True
msgstr = [line[7:]]
elif line.startswith('"'):
if in_msgid:
msgid.append(line)
if in_msgstr:
msgstr.append(line)
if in_msgstr:
messages.append((msgid, msgstr))
return messages
files = sys.argv[1:]
# xgettext -n --keyword=_ $FILES
XGETTEXT=os.getenv('XGETTEXT', 'xgettext')
if not XGETTEXT:
print('Cannot extract strings: xgettext utility is not installed or not configured.',file=sys.stderr)
print('Please install package "gettext" and re-run \'./configure\'.',file=sys.stderr)
exit(1)
child = Popen([XGETTEXT,'--output=-','-n','--keyword=_'] + files, stdout=PIPE)
(out, err) = child.communicate()
messages = parse_po(out.decode('utf-8'))
f = open(OUT_CPP, 'w')
f.write("""
#include <QtGlobal>
// Automatically generated by extract_strings.py
#ifdef __GNUC__
#define UNUSED __attribute__((unused))
#else
#define UNUSED
#endif
""")
f.write('static const char UNUSED *bitcoininvest_strings[] = {\n')
messages.sort(key=operator.itemgetter(0))
for (msgid, msgstr) in messages:
if msgid != EMPTY:
f.write('QT_TRANSLATE_NOOP("bitcoininvest-core", %s),\n' % ('\n'.join(msgid)))
f.write('};\n')
f.close()
| 25.97619
| 105
| 0.622823
|
4a042499047c85646dda8324a18089a119d04d67
| 12,482
|
py
|
Python
|
enstop/distributed_plsa.py
|
TIMC-Workshops/enstop
|
708825a42be2aa5278382cd623e3ea468148fad9
|
[
"BSD-2-Clause"
] | null | null | null |
enstop/distributed_plsa.py
|
TIMC-Workshops/enstop
|
708825a42be2aa5278382cd623e3ea468148fad9
|
[
"BSD-2-Clause"
] | null | null | null |
enstop/distributed_plsa.py
|
TIMC-Workshops/enstop
|
708825a42be2aa5278382cd623e3ea468148fad9
|
[
"BSD-2-Clause"
] | null | null | null |
import numpy as np
import numba
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.utils import check_array, check_random_state
from scipy.sparse import issparse, csr_matrix, coo_matrix
from enstop.utils import normalize, coherence, mean_coherence, log_lift, mean_log_lift
from enstop.plsa import plsa_init
from enstop.block_parallel_plsa import (
plsa_e_step_on_a_block,
plsa_partial_m_step_on_a_block,
)
from dask import delayed
import dask.array as da
@delayed
@numba.njit(nogil=True, fastmath=True)
def plsa_em_step_block_kernel(
row_block, col_block, val_block, p_w_given_z, p_z_given_d, e_step_thresh=1e-32,
):
result_p_w_given_z = np.zeros_like(p_w_given_z)
result_p_z_given_d = np.zeros_like(p_z_given_d)
result_norm_pwz = np.zeros(p_w_given_z.shape[0], dtype=np.float32)
result_norm_pdz = np.zeros(p_z_given_d.shape[0], dtype=np.float32)
p_z_given_wd_block = np.zeros(
(row_block.shape[0], p_w_given_z.shape[0]), dtype=np.float32
)
plsa_e_step_on_a_block(
row_block,
col_block,
p_w_given_z,
p_z_given_d,
p_z_given_wd_block,
e_step_thresh,
)
plsa_partial_m_step_on_a_block(
row_block,
col_block,
val_block,
result_p_w_given_z,
result_p_z_given_d,
p_z_given_wd_block,
result_norm_pwz,
result_norm_pdz,
)
return result_p_w_given_z, result_p_z_given_d, result_norm_pwz, result_norm_pdz
def plsa_em_step_dask(
block_rows_ndarray,
block_cols_ndarray,
block_vals_ndarray,
p_w_given_z,
p_z_given_d,
block_row_size,
block_col_size,
e_step_thresh=1e-32,
):
n_d_blocks = block_rows_ndarray.shape[0]
n_w_blocks = block_rows_ndarray.shape[1]
n = p_z_given_d.shape[0]
m = p_w_given_z.shape[1]
k = p_z_given_d.shape[1]
result_p_w_given_z = [[] for i in range(n_d_blocks)]
result_p_z_given_d = [[] for i in range(n_w_blocks)]
result_norm_pwz = []
result_norm_pdz = [[] for i in range(n_d_blocks)]
for i in range(n_d_blocks):
row_start = block_row_size * i
row_end = min(row_start + block_row_size, n)
for j in range(n_w_blocks):
col_start = block_col_size * j
col_end = min(col_start + block_col_size, m)
row_block = block_rows_ndarray[i, j]
col_block = block_cols_ndarray[i, j]
val_block = block_vals_ndarray[i, j]
kernel_results = plsa_em_step_block_kernel(
row_block,
col_block,
val_block,
p_w_given_z[:, col_start:col_end],
p_z_given_d[row_start:row_end, :],
e_step_thresh=e_step_thresh,
)
result_p_w_given_z[j].append(
da.from_delayed(
kernel_results[0], (k, block_col_size), dtype=np.float32
)
)
result_p_z_given_d[i].append(
da.from_delayed(
kernel_results[1], (block_row_size, k), dtype=np.float32
)
)
result_norm_pwz.append(
da.from_delayed(kernel_results[2], (k,), dtype=np.float32)
)
result_norm_pdz[i].append(
da.from_delayed(kernel_results[3], (block_row_size,), dtype=np.float32)
)
p_w_given_z_blocks = [
da.dstack(result_p_w_given_z[i]).sum(axis=-1) for i in range(n_w_blocks)
]
p_z_given_d_blocks = [
da.dstack(result_p_z_given_d[i]).sum(axis=-1) for i in range(n_d_blocks)
]
norm_pdz_blocks = [
da.dstack(result_norm_pdz[i]).sum(axis=-1) for i in range(n_d_blocks)
]
p_w_given_z = (
da.hstack(p_w_given_z_blocks) / da.dstack(result_norm_pwz).sum(axis=-1).T
)
p_z_given_d = da.vstack(p_z_given_d_blocks) / da.hstack(norm_pdz_blocks).T
return p_w_given_z.compute(), p_z_given_d.compute()
@numba.njit(
locals={
"i": numba.types.uint16,
"j": numba.types.uint16,
"k": numba.types.intp,
"w": numba.types.uint32,
"d": numba.types.uint32,
"z": numba.types.uint16,
"nz_idx": numba.types.uint32,
"x": numba.types.float32,
"result": numba.types.float32,
"p_w_given_d": numba.types.float32,
},
fastmath=True,
nogil=True,
parallel=True,
)
def log_likelihood_by_blocks(
block_rows_ndarray,
block_cols_ndarray,
block_vals_ndarray,
p_w_given_z,
p_z_given_d,
block_row_size,
block_col_size,
):
result = 0.0
k = p_w_given_z.shape[0]
for i in numba.prange(block_rows_ndarray.shape[0]):
for j in range(block_rows_ndarray.shape[1]):
for nz_idx in range(block_rows_ndarray.shape[2]):
if block_rows_ndarray[i, j, nz_idx] < 0:
break
d = block_rows_ndarray[i, j, nz_idx] + i * block_row_size
w = block_cols_ndarray[i, j, nz_idx] + j * block_col_size
x = block_vals_ndarray[i, j, nz_idx]
p_w_given_d = 0.0
for z in range(k):
p_w_given_d += p_w_given_z[z, w] * p_z_given_d[d, z]
result += x * np.log(p_w_given_d)
return result
def plsa_fit_inner_dask(
block_rows_ndarray,
block_cols_ndarray,
block_vals_ndarray,
p_w_given_z,
p_z_given_d,
block_row_size,
block_col_size,
n_iter=100,
n_iter_per_test=10,
tolerance=0.001,
e_step_thresh=1e-32,
):
previous_log_likelihood = log_likelihood_by_blocks(
block_rows_ndarray,
block_cols_ndarray,
block_vals_ndarray,
p_w_given_z,
p_z_given_d,
block_row_size,
block_col_size,
)
for i in range(n_iter):
p_w_given_z, p_z_given_d = plsa_em_step_dask(
block_rows_ndarray,
block_cols_ndarray,
block_vals_ndarray,
p_w_given_z,
p_z_given_d,
block_row_size,
block_col_size,
e_step_thresh=e_step_thresh,
)
if i % n_iter_per_test == 0:
current_log_likelihood = log_likelihood_by_blocks(
block_rows_ndarray,
block_cols_ndarray,
block_vals_ndarray,
p_w_given_z,
p_z_given_d,
block_row_size,
block_col_size,
)
change = np.abs(current_log_likelihood - previous_log_likelihood)
if change / np.abs(current_log_likelihood) < tolerance:
break
else:
previous_log_likelihood = current_log_likelihood
return p_z_given_d, p_w_given_z
def plsa_fit(
X,
k,
n_row_blocks=8,
n_col_blocks=8,
init="random",
n_iter=100,
n_iter_per_test=10,
tolerance=0.001,
e_step_thresh=1e-32,
random_state=None,
):
rng = check_random_state(random_state)
p_z_given_d, p_w_given_z = plsa_init(X, k, init=init, rng=rng)
p_z_given_d = p_z_given_d.astype(np.float32, order="C")
p_w_given_z = p_w_given_z.astype(np.float32, order="C")
A = X.tocsr().astype(np.float32)
n = A.shape[0]
m = A.shape[1]
block_row_size = np.uint16(np.ceil(A.shape[0] / n_row_blocks))
block_col_size = np.uint16(np.ceil(A.shape[1] / n_col_blocks))
A_blocks = [[0] * n_col_blocks for i in range(n_row_blocks)]
max_nnz_per_block = 0
for i in range(n_row_blocks):
row_start = block_row_size * i
row_end = min(row_start + block_row_size, n)
for j in range(n_col_blocks):
col_start = block_col_size * j
col_end = min(col_start + block_col_size, m)
A_blocks[i][j] = A[row_start:row_end, col_start:col_end].tocoo()
if A_blocks[i][j].nnz > max_nnz_per_block:
max_nnz_per_block = A_blocks[i][j].nnz
block_rows_ndarray = np.full(
(n_row_blocks, n_col_blocks, max_nnz_per_block), -1, dtype=np.int32
)
block_cols_ndarray = np.full(
(n_row_blocks, n_col_blocks, max_nnz_per_block), -1, dtype=np.int32
)
block_vals_ndarray = np.zeros(
(n_row_blocks, n_col_blocks, max_nnz_per_block), dtype=np.float32
)
for i in range(n_row_blocks):
for j in range(n_col_blocks):
nnz = A_blocks[i][j].nnz
block_rows_ndarray[i, j, :nnz] = A_blocks[i][j].row
block_cols_ndarray[i, j, :nnz] = A_blocks[i][j].col
block_vals_ndarray[i, j, :nnz] = A_blocks[i][j].data
p_z_given_d, p_w_given_z = plsa_fit_inner_dask(
block_rows_ndarray,
block_cols_ndarray,
block_vals_ndarray,
p_w_given_z,
p_z_given_d,
block_row_size,
block_col_size,
n_iter=n_iter,
n_iter_per_test=n_iter_per_test,
tolerance=tolerance,
e_step_thresh=e_step_thresh,
)
return p_z_given_d, p_w_given_z
class DistributedPLSA(BaseEstimator, TransformerMixin):
def __init__(
self,
n_components=10,
init="random",
n_row_blocks=8,
n_col_blocks=8,
n_iter=100,
n_iter_per_test=10,
tolerance=0.001,
e_step_thresh=1e-32,
transform_random_seed=42,
random_state=None,
):
self.n_components = n_components
self.init = init
self.n_row_blocks = n_row_blocks
self.n_col_blocks = n_col_blocks
self.n_iter = n_iter
self.n_iter_per_test = n_iter_per_test
self.tolerance = tolerance
self.e_step_thresh = e_step_thresh
self.transform_random_seed = transform_random_seed
self.random_state = random_state
def fit(self, X, y=None, sample_weight=None):
"""Learn the pLSA model for the data X and return the document vectors.
This is more efficient than calling fit followed by transform.
Parameters
----------
X: array or sparse matrix of shape (n_docs, n_words)
The data matrix pLSA is attempting to fit to.
y: Ignored
sample_weight: array of shape (n_docs,)
Input document weights.
Returns
-------
self
"""
self.fit_transform(X, sample_weight=sample_weight)
return self
def fit_transform(self, X, y=None, sample_weight=None):
"""Learn the pLSA model for the data X and return the document vectors.
This is more efficient than calling fit followed by transform.
Parameters
----------
X: array or sparse matrix of shape (n_docs, n_words)
The data matrix pLSA is attempting to fit to.
y: Ignored
sample_weight: array of shape (n_docs,)
Input document weights.
Returns
-------
embedding: array of shape (n_docs, n_topics)
An embedding of the documents into a topic space.
"""
X = check_array(X, accept_sparse="csr")
if not issparse(X):
X = csr_matrix(X)
if sample_weight is not None:
NotImplementedError("Sample weights not supported in distributed")
# sample_weight = _check_sample_weight(sample_weight, X, dtype=np.float32)
if np.any(X.data < 0):
raise ValueError(
"PLSA is only valid for matrices with non-negative " "entries"
)
row_sums = np.array(X.sum(axis=1).T)[0]
good_rows = row_sums != 0
if not np.all(good_rows):
zero_rows_found = True
data_for_fitting = X[good_rows]
else:
zero_rows_found = False
data_for_fitting = X
U, V = plsa_fit(
data_for_fitting,
self.n_components,
n_row_blocks=self.n_row_blocks,
n_col_blocks=self.n_col_blocks,
init=self.init,
n_iter=self.n_iter,
n_iter_per_test=self.n_iter_per_test,
tolerance=self.tolerance,
e_step_thresh=self.e_step_thresh,
random_state=self.random_state,
)
if zero_rows_found:
self.embedding_ = np.zeros((X.shape[0], self.n_components))
self.embedding_[good_rows] = U
else:
self.embedding_ = U
self.components_ = V
self.training_data_ = X
return self.embedding_
| 29.508274
| 87
| 0.613283
|
4a0424a4b7359d76c35213412dcc12863bcf7e94
| 905
|
py
|
Python
|
qiskit/quantum_info/operators/channel/__init__.py
|
Roshan-Thomas/qiskit-terra
|
77219b5c7b7146b1545c5e5190739b36f4064b2f
|
[
"Apache-2.0"
] | 1,599
|
2018-07-10T10:59:12.000Z
|
2022-03-31T23:56:25.000Z
|
qiskit/quantum_info/operators/channel/__init__.py
|
Roshan-Thomas/qiskit-terra
|
77219b5c7b7146b1545c5e5190739b36f4064b2f
|
[
"Apache-2.0"
] | 5,244
|
2018-07-10T06:20:13.000Z
|
2022-03-31T22:18:48.000Z
|
qiskit/quantum_info/operators/channel/__init__.py
|
Roshan-Thomas/qiskit-terra
|
77219b5c7b7146b1545c5e5190739b36f4064b2f
|
[
"Apache-2.0"
] | 1,409
|
2018-07-10T02:16:12.000Z
|
2022-03-31T09:01:32.000Z
|
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2019.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""
Quantum Channel Representations Package
For explanation of terminology and details of operations see Ref. [1]
References:
[1] C.J. Wood, J.D. Biamonte, D.G. Cory, Quant. Inf. Comp. 15, 0579-0811 (2015)
Open access: arXiv:1111.6950 [quant-ph]
"""
from .superop import SuperOp
from .choi import Choi
from .kraus import Kraus
from .stinespring import Stinespring
from .ptm import PTM
from .chi import Chi
| 31.206897
| 83
| 0.743646
|
4a04259a2fcb4692c4e07bd4dd516f4848ba597f
| 24,619
|
py
|
Python
|
tools/harness-thci/OpenThread_BR.py
|
marius-preda/openthread
|
442b7e82f67617cf754ed905d8e1dec9e1f9de80
|
[
"BSD-3-Clause"
] | 8
|
2021-03-20T02:39:22.000Z
|
2022-03-18T23:07:44.000Z
|
tools/harness-thci/OpenThread_BR.py
|
marius-preda/openthread
|
442b7e82f67617cf754ed905d8e1dec9e1f9de80
|
[
"BSD-3-Clause"
] | null | null | null |
tools/harness-thci/OpenThread_BR.py
|
marius-preda/openthread
|
442b7e82f67617cf754ed905d8e1dec9e1f9de80
|
[
"BSD-3-Clause"
] | 5
|
2021-06-18T03:29:42.000Z
|
2022-03-31T10:40:00.000Z
|
#!/usr/bin/env python
#
# Copyright (c) 2020, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
>> Thread Host Controller Interface
>> Device : OpenThread_BR THCI
>> Class : OpenThread_BR
"""
import logging
import re
import sys
import time
import ipaddress
import serial
from IThci import IThci
from THCI.OpenThread import OpenThreadTHCI, watched, API
RPI_FULL_PROMPT = 'pi@raspberrypi:~$ '
RPI_USERNAME_PROMPT = 'raspberrypi login: '
RPI_PASSWORD_PROMPT = 'Password: '
"""regex: used to split lines"""
LINESEPX = re.compile(r'\r\n|\n')
LOGX = re.compile(r'.*Under-voltage detected!')
"""regex: used to filter logging"""
assert LOGX.match('[57522.618196] Under-voltage detected! (0x00050005)')
OTBR_AGENT_SYSLOG_PATTERN = re.compile(r'raspberrypi otbr-agent\[\d+\]: (.*)')
assert OTBR_AGENT_SYSLOG_PATTERN.search(
'Jun 23 05:21:22 raspberrypi otbr-agent[323]: =========[[THCI] direction=send | type=JOIN_FIN.req | len=039]==========]'
).group(1) == '=========[[THCI] direction=send | type=JOIN_FIN.req | len=039]==========]'
logging.getLogger('paramiko').setLevel(logging.WARNING)
class SSHHandle(object):
def __init__(self, ip, port, username, password):
self.ip = ip
self.port = int(port)
self.username = username
self.password = password
self.__handle = None
self.__connect()
def __connect(self):
import paramiko
self.close()
self.__handle = paramiko.SSHClient()
self.__handle.set_missing_host_key_policy(paramiko.AutoAddPolicy())
self.__handle.connect(self.ip, port=self.port, username=self.username, password=self.password)
def close(self):
if self.__handle is not None:
self.__handle.close()
self.__handle = None
def bash(self, cmd, timeout):
from paramiko import SSHException
retry = 3
for i in range(retry):
try:
stdin, stdout, stderr = self.__handle.exec_command(cmd, timeout=timeout)
sys.stderr.write(stderr.read())
output = [r.encode('utf8').rstrip('\r\n') for r in stdout.readlines()]
return output
except Exception:
if i < retry - 1:
print('SSH connection is lost, try reconnect after 1 second.')
time.sleep(1)
self.__connect()
else:
raise
def log(self, fmt, *args):
try:
msg = fmt % args
print('%s - %s - %s' % (self.port, time.strftime('%b %d %H:%M:%S'), msg))
except Exception:
pass
class SerialHandle:
def __init__(self, port, baudrate):
self.port = port
self.__handle = serial.Serial(port, baudrate, timeout=0)
self.__lines = ['']
assert len(self.__lines) >= 1, self.__lines
self.log("inputing username ...")
self.__bashWriteLine('pi')
deadline = time.time() + 20
loginOk = False
while time.time() < deadline:
time.sleep(1)
lastLine = None
while True:
line = self.__bashReadLine(timeout=1)
if not line:
break
lastLine = line
if lastLine == RPI_FULL_PROMPT:
self.log("prompt found, login success!")
loginOk = True
break
if lastLine == RPI_PASSWORD_PROMPT:
self.log("inputing password ...")
self.__bashWriteLine('raspberry')
elif lastLine == RPI_USERNAME_PROMPT:
self.log("inputing username ...")
self.__bashWriteLine('pi')
elif not lastLine:
self.log("inputing username ...")
self.__bashWriteLine('pi')
if not loginOk:
raise Exception('login fail')
self.bash('stty cols 256')
def log(self, fmt, *args):
try:
msg = fmt % args
print('%s - %s - %s' % (self.port, time.strftime('%b %d %H:%M:%S'), msg))
except Exception:
pass
def close(self):
self.__handle.close()
def bash(self, cmd, timeout=10):
"""
Execute the command in bash.
"""
self.__bashClearLines()
self.__bashWriteLine(cmd)
self.__bashExpect(cmd, timeout=timeout, endswith=True)
response = []
deadline = time.time() + timeout
while time.time() < deadline:
line = self.__bashReadLine()
if line is None:
time.sleep(0.01)
continue
if line == RPI_FULL_PROMPT:
# return response lines without prompt
return response
response.append(line)
self.__bashWrite('\x03')
raise Exception('%s: failed to find end of response' % self.port)
def __bashExpect(self, expected, timeout=20, endswith=False):
self.log('Expecting [%r]' % (expected))
deadline = time.time() + timeout
while time.time() < deadline:
line = self.__bashReadLine()
if line is None:
time.sleep(0.01)
continue
print('[%s] Got line [%r]' % (self.port, line))
if endswith:
matched = line.endswith(expected)
else:
matched = line == expected
if matched:
print('[%s] Expected [%r]' % (self.port, expected))
return
# failed to find the expected string
# send Ctrl+C to terminal
self.__bashWrite('\x03')
raise Exception('failed to find expected string[%s]' % expected)
def __bashRead(self, timeout=1):
deadline = time.time() + timeout
data = ''
while True:
piece = self.__handle.read()
data = data + piece.decode('utf8')
if piece:
continue
if data or time.time() >= deadline:
break
if data:
self.log('>>> %r', data)
return data
def __bashReadLine(self, timeout=1):
line = self.__bashGetNextLine()
if line is not None:
return line
assert len(self.__lines) == 1, self.__lines
tail = self.__lines.pop()
try:
tail += self.__bashRead(timeout=timeout)
tail = tail.replace(RPI_FULL_PROMPT, RPI_FULL_PROMPT + '\r\n')
tail = tail.replace(RPI_USERNAME_PROMPT, RPI_USERNAME_PROMPT + '\r\n')
tail = tail.replace(RPI_PASSWORD_PROMPT, RPI_PASSWORD_PROMPT + '\r\n')
finally:
self.__lines += [l.rstrip('\r') for l in LINESEPX.split(tail)]
assert len(self.__lines) >= 1, self.__lines
return self.__bashGetNextLine()
def __bashGetNextLine(self):
assert len(self.__lines) >= 1, self.__lines
while len(self.__lines) > 1:
line = self.__lines.pop(0)
assert len(self.__lines) >= 1, self.__lines
if LOGX.match(line):
logging.info('LOG: %s', line)
continue
else:
return line
assert len(self.__lines) >= 1, self.__lines
return None
def __bashWrite(self, data):
self.__handle.write(data)
self.log("<<< %r", data)
def __bashClearLines(self):
assert len(self.__lines) >= 1, self.__lines
while self.__bashReadLine(timeout=0) is not None:
pass
assert len(self.__lines) >= 1, self.__lines
def __bashWriteLine(self, line):
self.__bashWrite(line + '\n')
class OpenThread_BR(OpenThreadTHCI, IThci):
DEFAULT_COMMAND_TIMEOUT = 20
IsBorderRouter = True
def _connect(self):
self.log("logging in to Raspberry Pi ...")
self.__cli_output_lines = []
self.__syslog_skip_lines = None
self.__syslog_last_read_ts = 0
if self.connectType == 'ip':
self.__handle = SSHHandle(self.telnetIp, self.telnetPort, self.telnetUsername, self.telnetPassword)
else:
self.__handle = SerialHandle(self.port, 115200)
self.__afterConnect()
def _disconnect(self):
if self.__handle:
self.__handle.close()
self.__handle = None
def _deviceBeforeReset(self):
if self.isPowerDown:
self.log('Powering up the device')
self.powerUp()
if self.IsHost:
self.__stopRadvdService()
self.bash('sudo ip -6 addr del 910b::1 dev eth0 || true')
self.bash('sudo ip -6 addr del fd00:7d03:7d03:7d03::1 dev eth0 || true')
self.stopListeningToAddrAll()
def _deviceAfterReset(self):
self.__dumpSyslog()
self.__truncateSyslog()
if not self.IsHost:
self.bash('sudo service otbr-agent restart')
@API
def setupHost(self, setDua=False):
self.IsHost = True
if not setDua:
cmd = 'sudo ip -6 addr add 910b::1 dev eth0'
else:
cmd = 'sudo ip -6 addr add fd00:7d03:7d03:7d03::1 dev eth0'
self.bash(cmd)
self.__startRadvdService()
def _deviceEscapeEscapable(self, string):
"""Escape CLI escapable characters in the given string.
Args:
string (str): UTF-8 input string.
Returns:
[str]: The modified string with escaped characters.
"""
return '"' + string + '"'
@watched
def bash(self, cmd, timeout=DEFAULT_COMMAND_TIMEOUT):
return self.__handle.bash(cmd, timeout=timeout)
def bash_unwatched(self, cmd, timeout=DEFAULT_COMMAND_TIMEOUT):
return self.__handle.bash(cmd, timeout=timeout)
# Override send_udp
@API
def send_udp(self, interface, dst, port, payload):
if interface == 0: # Thread Interface
super(OpenThread_BR, self).send_udp(interface, dst, port, payload)
return
if interface == 1:
ifname = 'eth0'
else:
raise AssertionError('Invalid interface set to send UDP: {} '
'Available interface options: 0 - Thread; 1 - Ethernet'.format(interface))
cmd = 'sudo /home/pi/reference-device/send_udp.py %s %s %s %s' % (ifname, dst, port, payload)
print(cmd)
self.bash(cmd)
@API
def mldv2_query(self):
ifname = 'eth0'
dst = 'ff02::1'
cmd = 'sudo /home/pi/reference-device/send_mld_query.py %s %s' % (ifname, dst)
print(cmd)
self.bash(cmd)
@API
def ip_neighbors_flush(self):
print('%s call clear_cache' % self.port)
# clear neigh cache on linux
cmd1 = 'sudo ip -6 neigh flush nud all nud failed nud noarp dev eth0'
cmd2 = 'sudo ip -6 neigh list nud all dev eth0 ' \
'| cut -d " " -f1 ' \
'| sudo xargs -I{} ip -6 neigh delete {} dev eth0'
cmd = '%s ; %s' % (cmd1, cmd2)
self.bash(cmd)
@API
def ip_neighbors_add(self, addr, lladdr, nud='noarp'):
print('%s ip_neighbors_add' % self.port)
cmd1 = 'sudo ip -6 neigh delete %s dev eth0' % addr
cmd2 = 'sudo ip -6 neigh add %s dev eth0 lladdr %s nud %s' % (addr, lladdr, nud)
cmd = '%s ; %s' % (cmd1, cmd2)
self.bash(cmd)
@API
def get_eth_ll(self):
print('%s get_eth_ll' % self.port)
cmd = "ip -6 addr list dev eth0 | grep 'inet6 fe80' | awk '{print $2}'"
ret = self.bash(cmd)[0].split('/')[0]
return ret
@API
def ping(self, strDestination, ilength=0, hop_limit=5, timeout=5):
""" send ICMPv6 echo request with a given length to a unicast destination
address
Args:
strDestination: the unicast destination address of ICMPv6 echo request
ilength: the size of ICMPv6 echo request payload
hop_limit: the hop limit
timeout: time before ping() stops
"""
if hop_limit is None:
hop_limit = 5
if self.IsHost or self.IsBackboneRouter:
ifName = 'eth0'
else:
ifName = 'wpan0'
cmd = 'ping -6 -I %s %s -c 1 -s %d -W %d -t %d' % (
ifName,
strDestination,
int(ilength),
int(timeout),
int(hop_limit),
)
self.bash(cmd)
time.sleep(timeout)
def multicast_Ping(self, destination, length=20):
"""send ICMPv6 echo request with a given length to a multicast destination
address
Args:
destination: the multicast destination address of ICMPv6 echo request
length: the size of ICMPv6 echo request payload
"""
print('%s call multicast_Ping' % self.port)
print('destination: %s' % destination)
hop_limit = 5
if self.IsHost or self.IsBackboneRouter:
ifName = 'eth0'
else:
ifName = 'wpan0'
cmd = 'ping -6 -I %s %s -c 1 -s %d -t %d' % (ifName, destination, str(length), hop_limit)
self.bash(cmd)
@API
def getGUA(self, filterByPrefix=None, eth=False):
"""get expected global unicast IPv6 address of Thread device
note: existing filterByPrefix are string of in lowercase. e.g.
'2001' or '2001:0db8:0001:0000".
Args:
filterByPrefix: a given expected global IPv6 prefix to be matched
Returns:
a global IPv6 address
"""
# get global addrs set if multiple
if eth:
return self.__getEthGUA(filterByPrefix=filterByPrefix)
else:
return super(OpenThread_BR, self).getGUA(filterByPrefix=filterByPrefix)
def __getEthGUA(self, filterByPrefix=None):
globalAddrs = []
cmd = 'ip -6 addr list dev eth0 | grep inet6'
output = self.bash(cmd)
for line in output:
# example: inet6 2401:fa00:41:23:274a:1329:3ab9:d953/64 scope global dynamic noprefixroute
line = line.strip().split()
if len(line) < 4 or line[2] != 'scope':
continue
if line[3] != 'global':
continue
addr = line[1].split('/')[0]
addr = str(ipaddress.IPv6Address(addr.decode()).exploded)
globalAddrs.append(addr)
if not filterByPrefix:
return globalAddrs[0]
else:
if filterByPrefix[-2:] != '::':
filterByPrefix = '%s::' % filterByPrefix
prefix = ipaddress.IPv6Network((filterByPrefix + '/64').decode())
for fullIp in globalAddrs:
address = ipaddress.IPv6Address(fullIp.decode())
if address in prefix:
return fullIp
def _cliReadLine(self):
# read commissioning log if it's commissioning
if not self.__cli_output_lines:
self.__readSyslogToCli()
if self.__cli_output_lines:
return self.__cli_output_lines.pop(0)
return None
@watched
def _deviceGetEtherMac(self):
# Harness wants it in string. Because wireshark filter for eth
# cannot be applies in hex
return self.bash('ip addr list dev eth0 | grep ether')[0].strip().split()[1]
@watched
def _onCommissionStart(self):
assert self.__syslog_skip_lines is None
self.__syslog_skip_lines = int(self.bash('wc -l /var/log/syslog')[0].split()[0])
self.__syslog_last_read_ts = 0
@watched
def _onCommissionStop(self):
assert self.__syslog_skip_lines is not None
self.__syslog_skip_lines = None
def _deviceBeforeThreadStart(self):
self.bash('sudo sysctl net.ipv6.conf.eth0.accept_ra=2')
@watched
def __startRadvdService(self):
assert self.IsHost, "radvd service runs on Host only"
self.bash("""sudo sh -c "cat >/etc/radvd.conf <<EOF
interface eth0
{
AdvSendAdvert on;
MinRtrAdvInterval 3;
MaxRtrAdvInterval 30;
AdvDefaultPreference low;
prefix 910b::/64
{
AdvOnLink on;
AdvAutonomous on;
AdvRouterAddr on;
};
prefix fd00:7d03:7d03:7d03::/64
{
AdvOnLink on;
AdvAutonomous off;
AdvRouterAddr off;
};
};
EOF"
""")
self.bash('sudo service radvd restart')
self.bash('sudo service radvd status')
@watched
def __stopRadvdService(self):
assert self.IsHost, "radvd service runs on Host only"
self.bash('sudo service radvd stop')
def __readSyslogToCli(self):
if self.__syslog_skip_lines is None:
return 0
# read syslog once per second
if time.time() < self.__syslog_last_read_ts + 1:
return 0
self.__syslog_last_read_ts = time.time()
lines = self.bash_unwatched('tail +%d /var/log/syslog' % self.__syslog_skip_lines)
for line in lines:
m = OTBR_AGENT_SYSLOG_PATTERN.search(line)
if not m:
continue
self.__cli_output_lines.append(m.group(1))
self.__syslog_skip_lines += len(lines)
return len(lines)
def _cliWriteLine(self, line):
cmd = 'sudo ot-ctl -- %s' % line
output = self.bash(cmd)
# fake the line echo back
self.__cli_output_lines.append(line)
for line in output:
self.__cli_output_lines.append(line)
def __afterConnect(self):
self.__truncateSyslog()
self.__checkServiceStatus()
def __checkServiceStatus(self):
self.bash('sudo service radvd stop')
self.bash('sudo service otbr-agent restart')
def __truncateSyslog(self):
self.bash('sudo truncate -s 0 /var/log/syslog')
def __dumpSyslog(self):
output = self.bash_unwatched('sudo grep "otbr-agent" /var/log/syslog')
for line in output:
self.log('%s', line)
@API
def mdns_query(self, dst='ff02::fb', service='_meshcop._udp.local', addrs_blacklist=[]):
print('mdns_query %s %s %s' % (dst, service, addrs_blacklist))
# For BBR-TC-03 or DH test cases just send a query
if dst == 'ff02::fb' and not addrs_blacklist:
self.bash('dig -p 5353 @%s %s ptr' % (dst, service))
return
# For MATN-TC-17 and MATN-TC-18 use Zeroconf to get the BBR address and border agent port
from zeroconf import ServiceBrowser, ServiceStateChange, Zeroconf, DNSAddress, DNSService, DNSText
def on_service_state_change(zeroconf, service_type, name, state_change):
if state_change is ServiceStateChange.Added:
zeroconf.get_service_info(service_type, name)
class BorderAgent(object):
alias = None
server_name = None
link_local_addr = None
port = None
thread_status = None
def __init__(self, alias):
self.alias = alias
def __repr__(self):
return '%s # [%s]:%s TIS=%s' % (self.alias, self.link_local_addr, self.port, self.thread_status)
def parse_cache(cache):
border_agents = []
# Find all border routers
for ptr in cache['_meshcop._udp.local.']:
border_agents.append(BorderAgent(ptr.alias))
# Find server name, port and Thread Interface status for each border router
for ba in border_agents:
for record in cache[ba.alias.lower()]:
if isinstance(record, DNSService):
ba.server_name = record.server
ba.port = record.port
elif isinstance(record, DNSText):
text = bytearray(record.text)
sb = text.split(b'sb=')[1][0:4]
ba.thread_status = (sb[3] & 0x18) >> 3
# Find link local address for each border router
for ba in border_agents:
for record in cache[ba.server_name]:
if isinstance(record, DNSAddress):
addr = ipaddress.ip_address(record.address)
if isinstance(addr, ipaddress.IPv6Address) and addr.is_link_local:
ba.link_local_addr = str(addr)
break
return border_agents
# Browse border agents
zeroconf = Zeroconf()
ServiceBrowser(zeroconf, "_meshcop._udp.local.", handlers=[on_service_state_change])
time.sleep(2)
cache = zeroconf.cache.cache
zeroconf.close()
# Find an active border agent not in the blacklist
border_agents = parse_cache(cache)
for ba in border_agents:
if ba.thread_status == 2 and ba.link_local_addr not in addrs_blacklist:
return ('%s%%eth0' % ba.link_local_addr, ba.port)
raise Exception('No active Border Agents found')
# Override powerDown
@API
def powerDown(self):
self.log('Powering down BBR')
self.bash('sudo service otbr-agent stop')
super(OpenThread_BR, self).powerDown()
# Override powerUp
@API
def powerUp(self):
self.log('Powering up BBR')
self.bash('sudo service otbr-agent start')
super(OpenThread_BR, self).powerUp()
# Override forceSetSlaac
@API
def forceSetSlaac(self, slaacAddress):
print('forceSetSlaac %s' % slaacAddress)
self.bash('sudo ip -6 addr add %s/64 dev wpan0' % slaacAddress)
# Override registerMulticast
@API
def registerMulticast(self, sAddr='ff04::1234:777a:1', timeout=300):
"""subscribe to the given ipv6 address (sAddr) in interface and send MLR.req OTA
Args:
sAddr : str : Multicast address to be subscribed and notified OTA.
"""
if self.externalCommissioner is not None:
self.externalCommissioner.MLR([sAddr], timeout)
return True
cmd = 'sudo nohup ~/repo/openthread/tests/scripts/thread-cert/mcast6.py wpan0 %s' % sAddr
cmd = cmd + ' > /dev/null 2>&1 &'
self.bash(cmd)
return True
# Override stopListeningToAddr
@API
def stopListeningToAddr(self, sAddr):
"""
Unsubscribe to a given IPv6 address which was subscribed earlier wiht `registerMulticast`.
Args:
sAddr : str : Multicast address to be unsubscribed. Use an empty string to unsubscribe
all the active multicast addresses.
"""
cmd = 'sudo pkill -f mcast6.*%s' % sAddr
self.bash(cmd)
def stopListeningToAddrAll(self):
return self.stopListeningToAddr('')
@API
def deregisterMulticast(self, sAddr):
"""
Unsubscribe to a given IPv6 address.
Only used by External Commissioner.
Args:
sAddr : str : Multicast address to be unsubscribed.
"""
self.externalCommissioner.MLR([sAddr], 0)
return True
| 32.564815
| 124
| 0.586986
|
4a04267160a84ec20a926e71ad66700c3ab47b4f
| 4,345
|
py
|
Python
|
controlm_py/models/monitoring_privilege_category.py
|
dcompane/controlm_py
|
c521208be2f00303383bb32ca5eb2b7ff91999d3
|
[
"MIT"
] | 2
|
2020-03-20T18:24:23.000Z
|
2021-03-05T22:05:04.000Z
|
controlm_py/models/monitoring_privilege_category.py
|
dcompane/controlm_py
|
c521208be2f00303383bb32ca5eb2b7ff91999d3
|
[
"MIT"
] | null | null | null |
controlm_py/models/monitoring_privilege_category.py
|
dcompane/controlm_py
|
c521208be2f00303383bb32ca5eb2b7ff91999d3
|
[
"MIT"
] | 1
|
2021-05-27T15:54:37.000Z
|
2021-05-27T15:54:37.000Z
|
# coding: utf-8
"""
Control-M Services
Provides access to BMC Control-M Services # noqa: E501
OpenAPI spec version: 9.20.220
Contact: customer_support@bmc.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class MonitoringPrivilegeCategory(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'alert': 'str',
'viewpoint_archive': 'str'
}
attribute_map = {
'alert': 'Alert',
'viewpoint_archive': 'ViewpointArchive'
}
def __init__(self, alert=None, viewpoint_archive=None): # noqa: E501
"""MonitoringPrivilegeCategory - a model defined in Swagger""" # noqa: E501
self._alert = None
self._viewpoint_archive = None
self.discriminator = None
if alert is not None:
self.alert = alert
if viewpoint_archive is not None:
self.viewpoint_archive = viewpoint_archive
@property
def alert(self):
"""Gets the alert of this MonitoringPrivilegeCategory. # noqa: E501
Alerts access level (None, Browse, Update, Full) # noqa: E501
:return: The alert of this MonitoringPrivilegeCategory. # noqa: E501
:rtype: str
"""
return self._alert
@alert.setter
def alert(self, alert):
"""Sets the alert of this MonitoringPrivilegeCategory.
Alerts access level (None, Browse, Update, Full) # noqa: E501
:param alert: The alert of this MonitoringPrivilegeCategory. # noqa: E501
:type: str
"""
self._alert = alert
@property
def viewpoint_archive(self):
"""Gets the viewpoint_archive of this MonitoringPrivilegeCategory. # noqa: E501
Archived Viewpoints access level (None, Browse, Update, Full) # noqa: E501
:return: The viewpoint_archive of this MonitoringPrivilegeCategory. # noqa: E501
:rtype: str
"""
return self._viewpoint_archive
@viewpoint_archive.setter
def viewpoint_archive(self, viewpoint_archive):
"""Sets the viewpoint_archive of this MonitoringPrivilegeCategory.
Archived Viewpoints access level (None, Browse, Update, Full) # noqa: E501
:param viewpoint_archive: The viewpoint_archive of this MonitoringPrivilegeCategory. # noqa: E501
:type: str
"""
self._viewpoint_archive = viewpoint_archive
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(MonitoringPrivilegeCategory, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, MonitoringPrivilegeCategory):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 30.815603
| 106
| 0.597238
|
4a04275a12d37dfa44bc43d7801f5844a8e11dba
| 989
|
py
|
Python
|
aries_cloudcontroller/model/publish_revocations.py
|
didx-xyz/aries-cloudcontroller-pyton
|
88faf35052820e28606f1a2231fc756c5c7ea268
|
[
"Apache-2.0"
] | 5
|
2021-04-07T17:48:41.000Z
|
2022-03-27T17:53:48.000Z
|
aries_cloudcontroller/model/publish_revocations.py
|
didx-xyz/aries-cloudcontroller-pyton
|
88faf35052820e28606f1a2231fc756c5c7ea268
|
[
"Apache-2.0"
] | 66
|
2021-04-12T13:45:05.000Z
|
2022-03-30T21:09:25.000Z
|
aries_cloudcontroller/model/publish_revocations.py
|
didx-xyz/aries-cloudcontroller-pyton
|
88faf35052820e28606f1a2231fc756c5c7ea268
|
[
"Apache-2.0"
] | 1
|
2021-06-02T06:58:06.000Z
|
2021-06-02T06:58:06.000Z
|
# coding: utf-8
from __future__ import annotations
from datetime import date, datetime # noqa: F401
import re # noqa: F401
from typing import Any, Dict, List, Optional, Union, Literal # noqa: F401
from pydantic import AnyUrl, BaseModel, EmailStr, validator, Field, Extra # noqa: F401
class PublishRevocations(BaseModel):
"""NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
Do not edit the class manually.
PublishRevocations - a model defined in OpenAPI
rrid2crid: Credential revocation ids by revocation registry id [Optional].
"""
rrid2crid: Optional[Dict[str, List[str]]] = None
def __init__(
self,
*,
rrid2crid: Optional[Dict[str, List[str]]] = None,
**kwargs,
):
super().__init__(
rrid2crid=rrid2crid,
**kwargs,
)
class Config:
allow_population_by_field_name = True
PublishRevocations.update_forward_refs()
| 24.725
| 96
| 0.665319
|
4a042876bfb516c170b9e3fc17a008bfa872c37f
| 2,079
|
py
|
Python
|
elasticapm/instrumentation/packages/zlib.py
|
trancee/apm-agent-python
|
04f014661f18b871b8b1970978b639b92f627e5f
|
[
"BSD-3-Clause"
] | 1
|
2019-04-01T10:23:45.000Z
|
2019-04-01T10:23:45.000Z
|
elasticapm/instrumentation/packages/zlib.py
|
alirezarezvani/apm-agent-python
|
5a287a4968dcaf1d5ea84f15f9793df24cf39a3d
|
[
"BSD-3-Clause"
] | null | null | null |
elasticapm/instrumentation/packages/zlib.py
|
alirezarezvani/apm-agent-python
|
5a287a4968dcaf1d5ea84f15f9793df24cf39a3d
|
[
"BSD-3-Clause"
] | 2
|
2020-02-04T22:03:00.000Z
|
2021-03-23T13:17:14.000Z
|
# BSD 3-Clause License
#
# Copyright (c) 2019, Elasticsearch BV
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from elasticapm.instrumentation.packages.base import AbstractInstrumentedModule
from elasticapm.traces import capture_span
class ZLibInstrumentation(AbstractInstrumentedModule):
name = "zlib"
instrument_list = [("zlib", "compress"), ("zlib", "decompress")]
def call(self, module, method, wrapped, instance, args, kwargs):
wrapped_name = module + "." + method
with capture_span(wrapped_name, "compression.zlib"):
return wrapped(*args, **kwargs)
| 48.348837
| 81
| 0.759019
|
4a0429bbff48c89628ccdbb6f74b5a63736e69f5
| 259
|
py
|
Python
|
62. Unique Paths.py
|
viewv/leetcode
|
b31e643846bb38978746342e3e3a94991178565a
|
[
"MIT"
] | 2
|
2018-02-26T09:12:19.000Z
|
2019-06-07T13:38:10.000Z
|
62. Unique Paths.py
|
viewv/leetcode
|
b31e643846bb38978746342e3e3a94991178565a
|
[
"MIT"
] | 1
|
2018-12-24T07:03:34.000Z
|
2018-12-24T07:03:34.000Z
|
62. Unique Paths.py
|
viewv/leetcode
|
b31e643846bb38978746342e3e3a94991178565a
|
[
"MIT"
] | 2
|
2018-12-24T07:01:03.000Z
|
2019-06-07T13:38:07.000Z
|
class Solution(object):
def uniquePaths(self, m, n):
dp = [[1 for x in range(m)] for y in range(n)]
for x in range(1, n):
for y in range(1, m):
dp[x][y] = dp[x - 1][y] + dp[x][y - 1]
return dp[n-1][m-1]
| 32.375
| 54
| 0.455598
|
4a042aaaafcbe3e1cd7b10bfa79745f3444fbc86
| 3,494
|
py
|
Python
|
test/test_config.py
|
by46/simplekit
|
33f3ce6de33accc185e1057f096af41859db5976
|
[
"MIT"
] | null | null | null |
test/test_config.py
|
by46/simplekit
|
33f3ce6de33accc185e1057f096af41859db5976
|
[
"MIT"
] | null | null | null |
test/test_config.py
|
by46/simplekit
|
33f3ce6de33accc185e1057f096af41859db5976
|
[
"MIT"
] | null | null | null |
import os
import os.path
import tempfile
import unittest
from simplekit.config import SQLiteConfig, Config
class SQLiteConfigTestCase(unittest.TestCase):
filename = 'config.db'
def test_config_normal(self):
tmp = tempfile.mktemp(prefix='cabinet', suffix='testing')
os.makedirs(tmp)
full_path = os.path.join(tmp, self.filename)
config = SQLiteConfig(full_path, default=dict(name='benjamin', sex='male', age=28))
config.close()
self.assertEqual('benjamin', config.name)
self.assertEqual('male', config.sex)
self.assertEqual(28, config.age)
config = SQLiteConfig(full_path)
self.assertEqual('benjamin', config.name)
self.assertEqual('male', config.sex)
self.assertEqual(28, config.age)
config.close()
os.remove(full_path)
os.removedirs(tmp)
def test_config_save(self):
tmp = tempfile.mktemp(prefix='cabinet', suffix='testing')
os.makedirs(tmp)
full_path = os.path.join(tmp, self.filename)
config = SQLiteConfig(full_path)
config.name = 'benjamin'
config.age = 28
config['sex'] = 'male'
config.close()
config = SQLiteConfig(full_path)
self.assertEqual('benjamin', config.name)
self.assertEqual('male', config.sex)
self.assertEqual(28, config.age)
del config.age
del config['sex']
config.close()
config = SQLiteConfig(full_path)
self.assertEqual('benjamin', config.name)
self.assertEqual(None, config.sex)
self.assertEqual(None, config.age)
config.close()
os.remove(full_path)
os.removedirs(tmp)
def test_get_namespace(self):
tmp = tempfile.mktemp(prefix='cabinet', suffix='testing')
os.makedirs(tmp)
full_path = os.path.join(tmp, self.filename)
config = SQLiteConfig(full_path, default=dict(generic_name='benjamin', generic_sex='male', generic_age=28))
namespace = config.get_namespace('generic_')
self.assertDictEqual(dict(name='benjamin', sex='male', age=28), namespace)
config.close()
os.remove(full_path)
os.removedirs(tmp)
BASIC_LOG_HOME = "."
BASIC_FILE_SIZE = 21
class ConfigTestCase(unittest.TestCase):
def test_config(self):
config = Config(os.path.dirname(__file__))
config.from_object(__name__)
self.assertEqual(".", config["BASIC_LOG_HOME"])
self.assertEqual(21, config["BASIC_FILE_SIZE"])
self.assertDictEqual({'log_home': '.', 'file_size': 21}, config.get_namespace('BASIC_'))
self.assertDictEqual({'BASIC_LOG_HOME': '.', 'BASIC_FILE_SIZE': 21},
config.get_namespace('BASIC_', lowercase=False, trim_namespace=False))
self.assertDictEqual({'basic_log_home': '.', 'basic_file_size': 21},
config.get_namespace('BASIC_', trim_namespace=False))
config = Config(os.path.dirname(__file__))
config.from_pyfile("saber.cfg")
self.assertEqual(".", config["BASIC_LOG_HOME"])
self.assertEqual(21, config["BASIC_FILE_SIZE"])
config = Config(os.path.dirname(__file__))
config.from_json("saber.json")
self.assertEqual(".", config["BASIC_LOG_HOME"])
self.assertEqual(21, config["BASIC_FILE_SIZE"])
| 32.351852
| 116
| 0.61763
|
4a042b4c41f61cf7d79799b3093f53c77f421a15
| 4,551
|
py
|
Python
|
reward_test.py
|
zachary2wave/UAV
|
df9f46a54b842504f13a4bb257f249b094addc0b
|
[
"MIT"
] | null | null | null |
reward_test.py
|
zachary2wave/UAV
|
df9f46a54b842504f13a4bb257f249b094addc0b
|
[
"MIT"
] | null | null | null |
reward_test.py
|
zachary2wave/UAV
|
df9f46a54b842504f13a4bb257f249b094addc0b
|
[
"MIT"
] | null | null | null |
import numpy as np
import gym
import matplotlib.pyplot as plt
import scipy.io as sio
from mpl_toolkits.mplot3d.axes3d import Axes3D
import sys
import csv
ENV_NAME = 'uav-downlink-2d-v3'
# Get the environment and extract the number of actions.
env = gym.make(ENV_NAME)
###########################3
def policy(env, policy, now):
dx = env.SPplacex
dy = env.SPplacey
selected = np.where(env.G != 0)[0]
if policy == 'maxG':
num = np.argmax(env.G)
aimx, aimy = dx[num] - env.placex, dy[num] - env.placey
elif policy == 'minSNR':
num = now
if env.G[num] == 0:
tnum = np.argmin(env.SNR[selected] + 10000)
num = selected[tnum]
aimx, aimy = dx[num] - env.placex, dy[num] - env.placey
elif policy == 'random':
num = now
if env.G[env.cline] == 0:
num = np.random.choice(selected)
aimx, aimy = dx[num] - env.placex, dy[num] - env.placey
elif policy == 'cline':
num = env.cline
if env.G[env.cline] == 0:
num = np.random.choice(selected)
aimx, aimy = dx[num] - env.placex, dy[num] - env.placey
norm = np.sqrt(aimx ** 2 + aimy ** 2)
aimx = aimx / norm
aimy = aimy / norm
if np.abs(env.v[0] + aimx * env.delta * env.amax) > env.Vmax:
aimx = 0
if np.abs(env.v[1] + aimy * env.delta * env.amax) > env.Vmax:
aimy = 0
return np.array([aimx, aimy, 1]), num
if __name__ == '__main__':
# records = []
# recordv = []
# recorda = []
# recorddone = []
# recordcline = []
# recordrate = []
# recordreward = []
# recordG = []
# recordSP = []
# recordobservation = []
records = []
recorda = []
recordr = []
recordd = []
try:
for loop in range(500):
print(loop)
S = env.reset()
cline = env.cline
fig = plt.figure(1)
plt.ion()
tarx = []
tary = []
recordtemp = []
acrecord = []
ac = 0
while env.done == 0:
action, cline = policy(env, 'maxG', cline)
# action = [1,1,1]
S_, reward, done, info = env.step(np.array(action))
print('reward:', reward, 'Gleft=', env.G[cline], 'recordtemp', info['temp'])
records.append(S)
recorda.append(action)
recordr.append(reward)
recordtemp.append(info['temp'])
ac +=reward
acrecord.append(ac)
recordd.append(done)
# print(loop, 'place =', int(S[12]), int(S[13]), 'speed =', int(S[14]), int(S[15]),
# 'action =', int(action[0]*30), int(action[1]*30), 'left=', int(np.sum(env.G)))
# # record.append({"observation":S,"action":action,"reward":reward,"done":done})
S = S_[:]
# print('reward=', str(reward), 'left=', np.sum(env.G))
# print(cline,env.cline)
'''huatu '''
# plt.cla()
# SPx = [str(int(x)) for x in env.SPplacex]
# SPy = [str(int(x)) for x in env.SPplacey]
# intG = [str(int(x)) for x in env.G]
# tarx.append(S[15])
# tary.append(S[16])
# plt.scatter(tarx, tary, c='r')
# SP = plt.scatter(env.SPplacex, env.SPplacey)
# LIN = plt.plot([env.placex, env.SPplacex[env.cline]], [env.placey, env.SPplacey[env.cline]],'--')
# plt.text(env.SPplacex[0], env.SPplacey[0], str(0)+ '-G=' + intG[0])
# plt.text(env.SPplacex[1], env.SPplacey[1], str(1)+ '-G=' + intG[1])
# plt.text(env.SPplacex[2], env.SPplacey[2], str(2)+ '-G=' + intG[2])
# plt.text(env.SPplacex[3], env.SPplacey[3], str(3)+ '-G=' + intG[3])
# plt.text(env.SPplacex[4], env.SPplacey[4], str(4)+ '-G=' + intG[4])
# plt.xlim(-400, 400)
# plt.ylim(-400, 400)
# plt.pause(0.1)
plt.figure(1)
plt.plot(np.arange(0, len(acrecord)), acrecord)
plt.plot(np.arange(0, len(recordtemp)), [x*100000 for x in recordtemp])
plt.show()
pass
except KeyboardInterrupt:
sio.savemat('warmdata-for-' + ENV_NAME + '.mat',
{"observation": records, "action": recorda, "reward": recordr, "done": recordd})
print('the data has been saved')
| 37.303279
| 115
| 0.488464
|
4a042b5b47bf250a39c7c9176ee25af5cbfed53b
| 14,080
|
py
|
Python
|
code/alex/OTHER/charel_ca_VFri.py
|
charelF/ComplexSystems
|
3efc9b577ec777fcecbd5248bbbaf77b7d90fc65
|
[
"MIT"
] | null | null | null |
code/alex/OTHER/charel_ca_VFri.py
|
charelF/ComplexSystems
|
3efc9b577ec777fcecbd5248bbbaf77b7d90fc65
|
[
"MIT"
] | null | null | null |
code/alex/OTHER/charel_ca_VFri.py
|
charelF/ComplexSystems
|
3efc9b577ec777fcecbd5248bbbaf77b7d90fc65
|
[
"MIT"
] | null | null | null |
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import math
import random
from mpl_toolkits.axes_grid1 import make_axes_locatable
import warnings
warnings.simplefilter("ignore")
np.random.seed(1)
random.seed(1)
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
def cluster_info(arr):
""" number of clusters (nonzero fields separated by 0s) in array
and size of cluster
"""
data = []
k2coord = {}
k = 0
if arr[0] != 0: # left boundary
data.append(0) # we will increment later in loop
k2coord[k] = []
else:
k=-1
# print("arr", arr)
# print("data", data)
for i in range(0,len(arr)-1):
if arr[i] == 0 and arr[i+1] != 0:
data.append(0)
k += 1
k2coord[k] = []
if arr[i] != 0:
data[-1] += 1
k2coord[k].append(i)
if arr[-1] != 0:
if data: # if array is not empty
data[-1] += 1 # right boundary
k2coord[k].append(len(arr)-1)
else:
data.append(1)
k2coord[k] = [len(arr)-1]
Ncl = len(data) # number of clusters
Nk = data # Nk[k] = size of cluster k
coord2k = {e:k for k,v in k2coord.items() for e in v}
return Ncl, Nk, k2coord, coord2k
def trunc(X, high, low):
return min(high, max(X, low))
def moving_average(x, w):
return np.convolve(x, np.ones(w), 'valid') / w
def visualiseFAST(G, P, N, S, X, D):
fig, (ax1,ax2) = plt.subplots(ncols=1, nrows=2, figsize=(12,4))
ax1.imshow(G.T, cmap="bone", interpolation="None", aspect="auto")
ax2.semilogy(S)
plt.show()
def visualiseNICE(G, P, N, S, X, D):
fig, (ax1,ax2,ax3,ax4,ax5,ax6) = plt.subplots(
ncols=1, nrows=6, figsize=(12,9), sharex=True, gridspec_kw =
{'wspace':0, 'hspace':0.05, 'height_ratios':[1,2,1,1,1,1]}
)
im1 = ax1.imshow(G.T, cmap="bwr", interpolation="None", aspect="auto")
im4 = ax4.imshow(P.T, cmap="hot", interpolation="None", aspect="auto")
amnwc = np.max(np.abs(N-initial_account_balance)) # absolute max net worth change
vmin, vmax = initial_account_balance-amnwc, initial_account_balance+amnwc
im5 = ax5.imshow(N.T, cmap="bwr", interpolation="None", aspect="auto", vmin=vmin, vmax=vmax)
size = "15%"
cax1 = make_axes_locatable(ax1).append_axes('right', size=size, pad=0.05)
fig.colorbar(im1, cax=cax1, orientation='vertical')
cax4 = make_axes_locatable(ax4).append_axes('right', size=size, pad=0.05)
fig.colorbar(im4, cax=cax4, orientation='vertical')
cax5 = make_axes_locatable(ax5).append_axes('right', size=size, pad=0.05)
fig.colorbar(im5, cax=cax5, orientation='vertical')
cax2 = make_axes_locatable(ax2).append_axes('right', size=size, pad=0.05)
cax2.hist(S, orientation="horizontal", bins=np.linspace(np.min(S), np.max(S), len(S)//2))
# cax2.hist(np.log10(S), orientation="horizontal", bins=np.logspace(np.log10(np.min(S)), np.log10(np.max(S)), len(S)//2))
# cax2.set_xscale("log")
# cax2.set_yscale("log")
cax2.get_xaxis().set_visible(False)
cax2.get_yaxis().set_visible(False)
cax3 = make_axes_locatable(ax3).append_axes('right', size=size, pad=0.05)
cax3.hist(X, orientation="horizontal", bins=np.linspace(np.min(X), np.max(X), len(X)//5))
cax3.get_xaxis().set_visible(False)
cax3.get_yaxis().set_visible(False)
cax6 = make_axes_locatable(ax6).append_axes('right', size=size, pad=0.05)
cax6.get_xaxis().set_visible(False)
cax6.get_yaxis().set_visible(False)
# for ax in (ax2,ax3):
# cax = make_axes_locatable(ax).append_axes('right', size=size, pad=0.05)
# # cax.axis('off')
# ax2.set_yscale("log")
ax2.plot(S, label="S")
Ws = [25]
for W in Ws:
ax2.plot(np.arange(W-1, len(S)), moving_average(S, W), label=f"MA{W}")
ax2.grid(alpha=0.4)
# ax2.legend(ncol=len(Ws)+1)
ax3.bar(np.arange(len(X)), X)
ax3.grid(alpha=0.4)
if D.shape[1] < 25:
ax6.plot(D, color="black", alpha=0.3)
ax6.plot(np.mean(D,axis=1), color="black", alpha=1)
ax6.grid(alpha=0.4)
ax6.set_xlabel("time")
# ax2.set_ylabel("standardised log returns")
ax2.set_ylabel("close price")
ax1.set_ylabel("agents")
ax3.set_ylabel("log return")
ax4.set_ylabel("portfolio")
ax5.set_ylabel("net worth")
ax6.set_ylabel("influence (I)")
# fig.colorbar(im, cax=ax4)
plt.tight_layout()
plt.show()
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
pd = 0.05
pe = 0.01
ph = 0.0485
pa = 0.3
N0 = 2000
N1 = 10
A = 2
a = 1
h = 1
initial_account_balance = 1000
min_account_balance = 500
initial_stock_price = 100
drift = 0
max_look_back = 3
G = np.zeros(shape=(N0,N1))
G[0] = np.random.choice(a=[-1,0,1], p=[pa/2, 1-pa, pa/2], size=N1, replace=True)
# G[0] = ((np.arange(0,N1)*6//N1)%3)-1
# G[0] = ((np.arange(0,N1)*1//N1)%3)-1
P = np.zeros_like(G) # portfolio: number of stocks
N = np.zeros_like(G) # Net worth
B = np.zeros_like(G) # acc balance
B[0] = initial_account_balance # everyone start with 1000 money
N[0] = B[0] # noone has stock initially
D = np.zeros_like(G)
X = np.zeros(N0)
S = np.zeros(N0)
S[0] = initial_stock_price
# each of the N1 agents has different treshold
treshold = np.random.random(size=N1)*10
# investor_type = np.random.choice(
# a=[0,1,2], size=N1, replace=True,
# p = [
# 1, # original CA
# .0, # momentum strategy
# .0, # market inverter
# ]
# )
investor_type = np.random.choice(
a=[0,1,2], size=N1, replace=True,
p = [
.7, # original CA
.2, # momentum strategy
.1, # market inverter
]
)
for t in range(N0-1):
Ncl, Nk, k2coord, coord2k = cluster_info(G[t])
Xt = 0
for k, size in enumerate(Nk):
tmp = 0
for i in k2coord[k]:
tmp += G[t,i]
Xt += size * tmp
X[t+1] = Xt/(10*N0)
S[t+1] = S[t]*math.exp(X[t]) + drift
xi = np.random.uniform(-1, 1, size=Ncl) # unique xi for each cluster k
for i in range(N1):
P[t+1,i] = P[t,i] + G[t,i]
# their next balance is their current balance minus
# their purchase (or sell) of stock at current price
B[t+1,i] = B[t,i] - (G[t,i] * S[t])
N[t+1,i] = B[t,i] + (P[t,i]*S[t])
if G[t,i] != 0:
# =================================================================
# original -------------------------------------------------------------------------------
# k = coord2k[i]
# total = 0
# zeta = random.uniform(-1,1) # sampled for each unique (k,i)
# for j in k2coord[k]: # for each coordinate in cluster k
# eta = random.uniform(-1,1) # different for each cell
# sigma = G[t,j]
# cluster_influence = A*xi[k]
# member_influence = 0#a*eta
# total += ((cluster_influence + member_influence) * sigma)
# self_influence = h*zeta
# I = (1 / len(k2coord[k])) * total + self_influence
# p = 1 / (1 + math.exp(-2 * I))
# same code but cleaner (only difference: no member influence) ----------------------------
# k = coord2k[i]
# zeta = random.uniform(-1,1) # sampled for each unique (k,i)
# cluster_influence = A * xi[k] * np.mean(G[t,k2coord[k]])
# self_influence = h * zeta
# I = cluster_influence + self_influence
# p = 1 / (1 + math.exp(-2 * I))
# minimal version -------------------------------------------------------------------------
# k = coord2k[i]
# cluster_influence = A * trunc(np.mean(G[t,k2coord[k]]),1,-1)
# self_influence = h * trunc(G[t,i],1,-1)
# I = cluster_influence + self_influence
# p = 1 / (1 + math.exp(-2 * I))
# 3 agent model -------------------------------------------------------------------------
if investor_type[i] == 0:
# agent # 1
k = coord2k[i]
zeta = random.uniform(-1,1) # sampled for each unique (k,i)
cluster_influence = A * trunc(np.mean(G[t,k2coord[k]]),3,-3) * xi[k]
self_influence = h * trunc(G[t,i],3,-3) * zeta
I = cluster_influence + self_influence
p = 1 / (1 + math.exp(-2 * I))
if investor_type[i] == 1:
performance = (N[t,i] - initial_account_balance) / initial_account_balance
lookback = min(t,max_look_back)
strategy = np.mean(G[t-lookback:t+1,i])
bias = performance * strategy * 10
trimmed_bias = trunc(bias, 3, -3)
# trimmed_bias = max(-10, min(10, bias))
# normalised_bias = 2 / (1 + math.exp(-2 * trimmed_bias)) - 1
# self_influence = normalised_bias * h
self_influence = trimmed_bias * h
I = self_influence
p = 1 / (1 + math.exp(-2 * I))
if investor_type[i] == 2:
change = (S[t] - initial_stock_price) / initial_stock_price
trigger = treshold[i] - abs(change) # when they decide to inverse others
# stock goes up --> change = pos --> they inverse others --> their I = negative
I = trunc(-change*5, 10, -10)
p = 1 / (1 + math.exp(-2 * I))
# =================================================================
D[t,i] = I
if random.random() < p:
G[t+1,i] = 1#trunc(round(I),2,1)
else:
G[t+1,i] = -1#trunc(-abs(round(I)),-1,-2)
# if random.random() < p:
# G[t+1,i] = 1
# else:
# G[t+1,i] = -1
# trader influences non-active neighbour to join
if G[t,i] != 0:
stance = G[t,i]
if random.random() < ph:
if G[t,(i-1)%N1] == 0 and G[t,(i+1)%N1] == 0:
ni = np.random.choice([-1,1])
G[t+1,(i+ni)%N1] = np.random.choice([-1,1])
elif G[t,(i-1)%N1] == 0:
G[t+1,(i-1)%N1] = np.random.choice([-1,1])
elif G[t,(i+1)%N1] == 0:
G[t+1,(i+1)%N1] = np.random.choice([-1,1])
else:
continue
# active trader diffuses if it has inactive neighbour
# only happens at edge of cluster
if G[t,i] != 0:
if random.random() < pd:
if (G[t,(i-1)%N1] == 0) or (G[t,(i+1)%N1] == 0):
G[t+1,i] = 0
else:
continue
# nontrader enters market
if G[t,i] == 0:
if random.random() < pe:
G[t+1,i] = np.random.choice([-1,1])
# margin call
still_ok = N[t] > min_account_balance
G[t+1] = G[t+1] * still_ok
final_trade = P[-1] * S[-1]
B[-1] += final_trade
N[-1] = B[-1]
visualiseNICE(G,P,N,S,X,D)
# visualiseFAST(G,P,N,S,X,D)
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
import pandas as pds
df = pds.read_csv("../../data/all_world_indices_clean.csv")
df_spx = df[["Date", "SPX Index"]]
df_spx["Date"] = pds.to_datetime(df_spx["Date"], format='%d/%m/%Y')
df_spx = df_spx.sort_values(by="Date")
df_spx.reset_index(inplace=True)
series_array = np.array(df_spx["SPX Index"])
log_ret_dat = np.diff(np.log(series_array))
log_ret_dat_stan = (log_ret_dat - np.mean(log_ret_dat)) / np.std(log_ret_dat)
r = (X - np.mean(X)) / np.std(X)
print(np.std(r))
print(np.std(log_ret_dat_stan))
fig = plt.figure(figsize=(8, 8))
plt.hist(r, alpha=0.4, bins=30, label="CA", density=True)
plt.hist(log_ret_dat_stan, bins=30, alpha=0.4, label="S&P500", density=True)
plt.yscale("log")
plt.title("Log Return Distribution - Standardised")
plt.legend()
plt.grid(alpha=0.2)
plt.show()
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
fig = plt.figure(figsize=(8, 8))
plt.hist(X, alpha=0.2, bins=50, label="CA", density=True)
plt.hist(log_ret_dat, bins=50, alpha=0.2, label="S&P500", density=True)
plt.title("Log Return Distribution - Unstandardised")
plt.yscale("log")
plt.legend()
plt.grid(alpha=0.2)
plt.show()
## back calc'd log returns for CA
# fig = plt.figure(figsize=(8, 8))
# plt.hist(, alpha=0.2, bins=50, label="CA", density=True)
# plt.hist(log_ret_dat_stan, bins=50, alpha=0.2, label="S&P500", density=True)
# plt.title("Log Return Distribution")
# plt.legend()
# plt.show()
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
import statsmodels.api as sm
import scipy.stats as stats
x_eval = np.linspace(-3, 3, 50)
kde1 = stats.gaussian_kde(r)
plt.plot(x_eval, kde1(x_eval), color="C4", label="CA Returns")
kde2 = stats.gaussian_kde(log_ret_dat_stan)
plt.plot(x_eval, kde2(x_eval), color="C9", label="S&P Returns")
plt.grid(alpha=0.2)
plt.legend()
plt.xlabel("r")
plt.ylabel("Prob Density")
plt.show()
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
acf_x_price = sm.tsa.stattools.acf(r)
acf_sp_price = sm.tsa.stattools.acf(log_ret_dat_stan)
x = np.arange(acf_x_price.shape[0])
mean_sp = np.mean(acf_sp_price)
fig = plt.figure(figsize=(15, 5))
plt.plot(x, acf_x_price, label="S&P500 Returns")
plt.plot(x, acf_sp_price, label="CA Returns")
plt.xlabel("Lag")
plt.ylabel("Autocorrelations")
plt.grid(alpha=0.2)
plt.legend()
plt.show()
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
acf_x_vol = sm.tsa.stattools.acf(np.abs(r))
acf_sp_vol = sm.tsa.stattools.acf(np.abs(log_ret_dat_stan))
x = np.arange(acf_x_vol.shape[0])
fig = plt.figure(figsize=(15, 5))
plt.plot(x, acf_x_vol, label="S&P500 Volatility")
plt.plot(x, acf_sp_vol, label="CA Volatility")
plt.xlabel("Lag")
plt.ylabel("Autocorrelations")
plt.grid(alpha=0.2)
plt.legend()
plt.show()
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
| 33.444181
| 125
| 0.530469
|
4a042b764b9010500cbaf643b23ce89e4b726a1c
| 21,644
|
py
|
Python
|
ansible/my_env/lib/python2.7/site-packages/ansible/modules/network/f5/bigiq_application_fastl4_tcp.py
|
otus-devops-2019-02/yyashkin_infra
|
0cd0c003884155ac922e3e301305ac202de7028c
|
[
"MIT"
] | 1
|
2019-04-16T21:23:15.000Z
|
2019-04-16T21:23:15.000Z
|
ansible/my_env/lib/python2.7/site-packages/ansible/modules/network/f5/bigiq_application_fastl4_tcp.py
|
otus-devops-2019-02/yyashkin_infra
|
0cd0c003884155ac922e3e301305ac202de7028c
|
[
"MIT"
] | 5
|
2020-02-26T20:10:50.000Z
|
2021-09-23T23:23:18.000Z
|
ansible/my_env/lib/python2.7/site-packages/ansible/modules/network/f5/bigiq_application_fastl4_tcp.py
|
otus-devops-2019-02/yyashkin_infra
|
0cd0c003884155ac922e3e301305ac202de7028c
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright: (c) 2018, F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'}
DOCUMENTATION = r'''
---
module: bigiq_application_fastl4_tcp
short_description: Manages BIG-IQ FastL4 TCP applications
description:
- Manages BIG-IQ applications used for load balancing a TCP-based application
with a FastL4 profile.
version_added: 2.6
options:
name:
description:
- Name of the new application.
required: True
description:
description:
- Description of the application.
servers:
description:
- A list of servers that the application is hosted on.
- If you are familiar with other BIG-IP setting, you might also refer to this
list as the list of pool members.
- When creating a new application, at least one server is required.
suboptions:
address:
description:
- The IP address of the server.
required: True
port:
description:
- The port of the server.
- When creating a new application and specifying a server, if this parameter
is not provided, the default of C(8000) will be used.
default: 8000
inbound_virtual:
description:
- Settings to configure the virtual which will receive the inbound connection.
suboptions:
address:
description:
- Specifies destination IP address information to which the virtual server
sends traffic.
- This parameter is required when creating a new application.
required: True
netmask:
description:
- Specifies the netmask to associate with the given C(destination).
- This parameter is required when creating a new application.
required: True
port:
description:
- The port that the virtual listens for connections on.
- When creating a new application, if this parameter is not specified, the
default value of C(8080) will be used.
default: 8080
service_environment:
description:
- Specifies the name of service environment that the application will be
deployed to.
- When creating a new application, this parameter is required.
- The service environment type will be discovered by this module automatically.
Therefore, it is crucial that you maintain unique names for items in the
different service environment types.
- SSGs are not supported for this type of application.
add_analytics:
description:
- Collects statistics of the BIG-IP that the application is deployed to.
- This parameter is only relevant when specifying a C(service_environment) which
is a BIG-IP; not an SSG.
type: bool
default: no
state:
description:
- The state of the resource on the system.
- When C(present), guarantees that the resource exists with the provided attributes.
- When C(absent), removes the resource from the system.
default: present
choices:
- absent
- present
wait:
description:
- If the module should wait for the application to be created, deleted or updated.
type: bool
default: yes
extends_documentation_fragment: f5
notes:
- This module does not support updating of your application (whether deployed or not).
If you need to update the application, the recommended practice is to remove and
re-create.
- Requires BIG-IQ version 6.0 or greater.
author:
- Tim Rupp (@caphrim007)
'''
EXAMPLES = r'''
- name: Load balance a TCP-based application with a FastL4 profile
bigiq_application_fastl4_tcp:
name: my-app
description: My description
service_environment: my-bigip-device
servers:
- address: 1.2.3.4
port: 8080
- address: 5.6.7.8
port: 8080
inbound_virtual:
name: foo
address: 2.2.2.2
netmask: 255.255.255.255
port: 443
provider:
password: secret
server: lb.mydomain.com
user: admin
state: present
delegate_to: localhost
'''
RETURN = r'''
description:
description: The new description of the application of the resource.
returned: changed
type: string
sample: My application
service_environment:
description: The environment which the service was deployed to.
returned: changed
type: string
sample: my-ssg1
inbound_virtual_destination:
description: The destination of the virtual that was created.
returned: changed
type: string
sample: 6.7.8.9
inbound_virtual_netmask:
description: The network mask of the provided inbound destination.
returned: changed
type: string
sample: 255.255.255.0
inbound_virtual_port:
description: The port the inbound virtual address listens on.
returned: changed
type: int
sample: 80
servers:
description: List of servers, and their ports, that make up the application.
type: complex
returned: changed
contains:
address:
description: The IP address of the server.
returned: changed
type: string
sample: 2.3.4.5
port:
description: The port that the server listens on.
returned: changed
type: int
sample: 8080
sample: hash/dictionary of values
'''
import time
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.basic import env_fallback
try:
from library.module_utils.network.f5.bigiq import F5RestClient
from library.module_utils.network.f5.common import F5ModuleError
from library.module_utils.network.f5.common import AnsibleF5Parameters
from library.module_utils.network.f5.common import f5_argument_spec
from library.module_utils.network.f5.common import exit_json
from library.module_utils.network.f5.common import fail_json
from library.module_utils.network.f5.ipaddress import is_valid_ip
except ImportError:
from ansible.module_utils.network.f5.bigiq import F5RestClient
from ansible.module_utils.network.f5.common import F5ModuleError
from ansible.module_utils.network.f5.common import AnsibleF5Parameters
from ansible.module_utils.network.f5.common import f5_argument_spec
from ansible.module_utils.network.f5.common import exit_json
from ansible.module_utils.network.f5.common import fail_json
from ansible.module_utils.network.f5.ipaddress import is_valid_ip
class Parameters(AnsibleF5Parameters):
api_map = {
'templateReference': 'template_reference',
'subPath': 'sub_path',
'configSetName': 'config_set_name',
'defaultDeviceReference': 'default_device_reference',
'addAnalytics': 'add_analytics'
}
api_attributes = [
'resources', 'description', 'configSetName', 'subPath', 'templateReference',
'defaultDeviceReference', 'addAnalytics'
]
returnables = [
'resources', 'description', 'config_set_name', 'sub_path', 'template_reference',
'default_device_reference', 'servers', 'inbound_virtual', 'add_analytics'
]
updatables = [
'resources', 'description', 'config_set_name', 'sub_path', 'template_reference',
'default_device_reference', 'servers', 'add_analytics'
]
class ApiParameters(Parameters):
pass
class ModuleParameters(Parameters):
@property
def http_profile(self):
return "profile_http"
@property
def config_set_name(self):
return self.name
@property
def sub_path(self):
return self.name
@property
def template_reference(self):
filter = "name+eq+'Default-f5-FastL4-TCP-lb-template'"
uri = "https://{0}:{1}/mgmt/cm/global/templates/?$filter={2}&$top=1&$select=selfLink".format(
self.client.provider['server'],
self.client.provider['server_port'],
filter
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if resp.status == 200 and response['totalItems'] == 0:
raise F5ModuleError(
"No default HTTP LB template was found."
)
elif 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp._content)
result = dict(
link=response['items'][0]['selfLink']
)
return result
@property
def default_device_reference(self):
if is_valid_ip(self.service_environment):
# An IP address was specified
filter = "address+eq+'{0}'".format(self.service_environment)
else:
# Assume a hostname was specified
filter = "hostname+eq+'{0}'".format(self.service_environment)
uri = "https://{0}:{1}/mgmt/shared/resolver/device-groups/cm-adccore-allbigipDevices/devices/?$filter={2}&$top=1&$select=selfLink".format(
self.client.provider['server'],
self.client.provider['server_port'],
filter
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if resp.status == 200 and response['totalItems'] == 0:
raise F5ModuleError(
"The specified service_environment '{0}' was found.".format(self.service_environment)
)
elif 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp._content)
result = dict(
link=response['items'][0]['selfLink']
)
return result
class Changes(Parameters):
def to_return(self):
result = {}
try:
for returnable in self.returnables:
result[returnable] = getattr(self, returnable)
result = self._filter_params(result)
except Exception:
pass
return result
class UsableChanges(Changes):
@property
def resources(self):
result = dict()
result.update(self.tcp_monitor)
result.update(self.virtual)
result.update(self.pool)
result.update(self.nodes)
return result
@property
def virtual(self):
result = dict()
result['ltm:virtual:20e0ce0ae107'] = [
dict(
parameters=dict(
name='virtual',
destinationAddress=self.inbound_virtual['address'],
mask=self.inbound_virtual['netmask'],
destinationPort=self.inbound_virtual.get('port', 8080)
),
subcollectionResources=self.profiles
)
]
return result
@property
def profiles(self):
result = {
'profiles:53f9b3028d90': [
dict(
parameters=dict()
)
]
}
return result
@property
def pool(self):
result = dict()
result['ltm:pool:9fa59a7bfc5c'] = [
dict(
parameters=dict(
name='pool_0'
),
subcollectionResources=self.pool_members
)
]
return result
@property
def pool_members(self):
result = dict()
result['members:3e91bd30bbfb'] = []
for x in self.servers:
member = dict(
parameters=dict(
port=x.get('port', 8000),
nodeReference=dict(
link='#/resources/ltm:node:3e91bd30bbfb/{0}'.format(x['address']),
fullPath='# {0}'.format(x['address'])
)
)
)
result['members:3e91bd30bbfb'].append(member)
return result
@property
def tcp_monitor(self):
result = dict()
result['ltm:monitor:tcp:f864a2efffea'] = [
dict(
parameters=dict(
name='monitor-tcp'
)
)
]
return result
@property
def nodes(self):
result = dict()
result['ltm:node:3e91bd30bbfb'] = []
for x in self.servers:
tmp = dict(
parameters=dict(
name=x['address'],
address=x['address']
)
)
result['ltm:node:3e91bd30bbfb'].append(tmp)
return result
@property
def node_addresses(self):
result = [x['address'] for x in self.servers]
return result
class ReportableChanges(Changes):
pass
class Difference(object):
def __init__(self, want, have=None):
self.want = want
self.have = have
def compare(self, param):
try:
result = getattr(self, param)
return result
except AttributeError:
return self.__default(param)
def __default(self, param):
attr1 = getattr(self.want, param)
try:
attr2 = getattr(self.have, param)
if attr1 != attr2:
return attr1
except AttributeError:
return attr1
class ModuleManager(object):
def __init__(self, *args, **kwargs):
self.module = kwargs.get('module', None)
self.client = kwargs.get('client', None)
self.want = ModuleParameters(params=self.module.params)
self.want.client = self.client
self.have = ApiParameters()
self.changes = UsableChanges()
def _set_changed_options(self):
changed = {}
for key in Parameters.returnables:
if getattr(self.want, key) is not None:
changed[key] = getattr(self.want, key)
if changed:
self.changes = UsableChanges(params=changed)
def _update_changed_options(self):
diff = Difference(self.want, self.have)
updatables = Parameters.updatables
changed = dict()
for k in updatables:
change = diff.compare(k)
if change is None:
continue
else:
if isinstance(change, dict):
changed.update(change)
else:
changed[k] = change
if changed:
self.changes = UsableChanges(params=changed)
return True
return False
def should_update(self):
result = self._update_changed_options()
if result:
return True
return False
def exec_module(self):
changed = False
result = dict()
state = self.want.state
if state == "present":
changed = self.present()
elif state == "absent":
changed = self.absent()
reportable = ReportableChanges(params=self.changes.to_return())
changes = reportable.to_return()
result.update(**changes)
result.update(dict(changed=changed))
self._announce_deprecations(result)
return result
def _announce_deprecations(self, result):
warnings = result.pop('__warnings', [])
for warning in warnings:
self.client.module.deprecate(
msg=warning['msg'],
version=warning['version']
)
def present(self):
if self.exists():
return False
else:
return self.create()
def exists(self):
uri = "https://{0}:{1}/mgmt/ap/query/v1/tenants/default/reports/AllApplicationsList?$filter=name+eq+'{2}'".format(
self.client.provider['server'],
self.client.provider['server_port'],
self.want.name
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if resp.status == 200 and 'result' in response and 'totalItems' in response['result'] and response['result']['totalItems'] == 0:
return False
return True
def remove(self):
if self.module.check_mode:
return True
self_link = self.remove_from_device()
if self.want.wait:
self.wait_for_apply_template_task(self_link)
if self.exists():
raise F5ModuleError("Failed to delete the resource.")
return True
def create(self):
if self.want.service_environment is None:
raise F5ModuleError(
"A 'service_environment' must be specified when creating a new application."
)
if self.want.servers is None:
raise F5ModuleError(
"At least one 'servers' item is needed when creating a new application."
)
if self.want.inbound_virtual is None:
raise F5ModuleError(
"An 'inbound_virtual' must be specified when creating a new application."
)
self._set_changed_options()
if self.module.check_mode:
return True
self_link = self.create_on_device()
if self.want.wait:
self.wait_for_apply_template_task(self_link)
if not self.exists():
raise F5ModuleError(
"Failed to deploy application."
)
return True
def create_on_device(self):
params = self.changes.api_params()
params['mode'] = 'CREATE'
uri = 'https://{0}:{1}/mgmt/cm/global/tasks/apply-template'.format(
self.client.provider['server'],
self.client.provider['server_port']
)
resp = self.client.api.post(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp._content)
return response['selfLink']
def absent(self):
if self.exists():
return self.remove()
return False
def remove_from_device(self):
params = dict(
configSetName=self.want.name,
mode='DELETE'
)
uri = 'https://{0}:{1}/mgmt/cm/global/tasks/apply-template'.format(
self.client.provider['server'],
self.client.provider['server_port']
)
resp = self.client.api.post(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp._content)
return response['selfLink']
def wait_for_apply_template_task(self, self_link):
host = 'https://{0}:{1}'.format(
self.client.provider['server'],
self.client.provider['server_port']
)
uri = self_link.replace('https://localhost', host)
while True:
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if response['status'] == 'FINISHED' and response.get('currentStep', None) == 'DONE':
return True
elif 'errorMessage' in response:
raise F5ModuleError(response['errorMessage'])
time.sleep(5)
class ArgumentSpec(object):
def __init__(self):
self.supports_check_mode = True
argument_spec = dict(
name=dict(required=True),
description=dict(),
servers=dict(
type='list',
options=dict(
address=dict(required=True),
port=dict(default=8000)
)
),
inbound_virtual=dict(
type='dict',
options=dict(
address=dict(required=True),
netmask=dict(required=True),
port=dict(default=8080)
)
),
service_environment=dict(),
add_analytics=dict(type='bool', default='no'),
state=dict(
default='present',
choices=['present', 'absent']
),
wait=dict(type='bool', default='yes')
)
self.argument_spec = {}
self.argument_spec.update(f5_argument_spec)
self.argument_spec.update(argument_spec)
def main():
spec = ArgumentSpec()
module = AnsibleModule(
argument_spec=spec.argument_spec,
supports_check_mode=spec.supports_check_mode
)
try:
client = F5RestClient(**module.params)
mm = ModuleManager(module=module, client=client)
results = mm.exec_module()
exit_json(module, results, client)
except F5ModuleError as ex:
fail_json(module, ex, client)
if __name__ == '__main__':
main()
| 31.505095
| 146
| 0.593975
|
4a042c2ca047f0969990d728e6be0094445b4950
| 293
|
py
|
Python
|
test/fasta/file.py
|
rroutsong/rosalind
|
e1bd2765261afeab0fd7c71808ebd71bf43f24b8
|
[
"Unlicense"
] | null | null | null |
test/fasta/file.py
|
rroutsong/rosalind
|
e1bd2765261afeab0fd7c71808ebd71bf43f24b8
|
[
"Unlicense"
] | null | null | null |
test/fasta/file.py
|
rroutsong/rosalind
|
e1bd2765261afeab0fd7c71808ebd71bf43f24b8
|
[
"Unlicense"
] | null | null | null |
# parse fasta, determine highest gc_content and return results
import dna
fidna = dna.fasta_parse('FASTA.fas')
gc_highest = ['', 0]
for i in fidna:
if(dna.gc_content(i[1])>gc_highest[1]):
gc_highest = [i[0], dna.gc_content(i[1])]
print gc_highest[0] + '\n' + str(gc_highest[1])
| 24.416667
| 62
| 0.668942
|
4a042c48bb1f8c524cc6538913de33ac47dc1f51
| 17,035
|
py
|
Python
|
src/nanoemoji/color_glyph.py
|
rsheeter/nanoemoji
|
d65a6271541304db60e65accd6e81f7f5a20a381
|
[
"Apache-2.0"
] | 3
|
2020-04-05T08:39:30.000Z
|
2020-04-05T12:51:18.000Z
|
src/nanoemoji/color_glyph.py
|
rsheeter/nanoemoji
|
d65a6271541304db60e65accd6e81f7f5a20a381
|
[
"Apache-2.0"
] | 2
|
2020-04-16T18:50:33.000Z
|
2020-04-17T18:19:37.000Z
|
src/nanoemoji/color_glyph.py
|
rsheeter/nanoemoji
|
d65a6271541304db60e65accd6e81f7f5a20a381
|
[
"Apache-2.0"
] | 1
|
2020-04-16T18:46:50.000Z
|
2020-04-16T18:46:50.000Z
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
from absl import logging
import dataclasses
from itertools import chain, groupby, combinations
from lxml import etree # type: ignore
from nanoemoji.colors import Color
from nanoemoji.config import FontConfig
from nanoemoji.paint import (
Extend,
ColorStop,
CompositeMode,
Paint,
PaintColrLayers,
PaintComposite,
PaintGlyph,
PaintLinearGradient,
PaintRadialGradient,
PaintSolid,
)
from nanoemoji.png import PNG
from picosvg.geometric_types import Point, Rect
from picosvg.svg_meta import number_or_percentage
from picosvg.svg_reuse import normalize, affine_between
from picosvg.svg_transform import Affine2D
from picosvg.svg import SVG, SVGTraverseContext
from picosvg.svg_types import (
SVGPath,
SVGLinearGradient,
SVGRadialGradient,
intersection,
)
from typing import Generator, NamedTuple, Optional, Sequence, Tuple
import ufoLib2
from ufoLib2.objects.glyph import Glyph as UfoGlyph
import pathops
def scale_viewbox_to_font_metrics(
view_box: Rect, ascender: int, descender: int, width: int
):
assert descender <= 0
# scale height to (ascender - descender)
scale = (ascender - descender) / view_box.h
# shift so width is centered
dx = (width - scale * view_box.w) / 2
return Affine2D.compose_ltr(
(
# first normalize viewbox origin
Affine2D(1, 0, 0, 1, -view_box.x, -view_box.y),
Affine2D(scale, 0, 0, scale, dx, 0),
)
)
def map_viewbox_to_font_space(
view_box: Rect, ascender: int, descender: int, width: int, user_transform: Affine2D
) -> Affine2D:
return Affine2D.compose_ltr(
[
scale_viewbox_to_font_metrics(view_box, ascender, descender, width),
# flip y axis and shift so things are in the right place
Affine2D(1, 0, 0, -1, 0, ascender),
user_transform,
]
)
# https://docs.microsoft.com/en-us/typography/opentype/spec/svg#coordinate-systems-and-glyph-metrics
def map_viewbox_to_otsvg_space(
view_box: Rect, ascender: int, descender: int, width: int, user_transform: Affine2D
) -> Affine2D:
return Affine2D.compose_ltr(
[
scale_viewbox_to_font_metrics(view_box, ascender, descender, width),
# shift things in the [+x,-y] quadrant where OT-SVG expects them
Affine2D(1, 0, 0, 1, 0, -ascender),
user_transform,
]
)
def _get_gradient_transform(
config: FontConfig,
grad_el: etree.Element,
shape_bbox: Rect,
view_box: Rect,
glyph_width: int,
) -> Affine2D:
transform = map_viewbox_to_font_space(
view_box, config.ascender, config.descender, glyph_width, config.transform
)
gradient_units = grad_el.attrib.get("gradientUnits", "objectBoundingBox")
if gradient_units == "objectBoundingBox":
bbox_space = Rect(0, 0, 1, 1)
bbox_transform = Affine2D.rect_to_rect(bbox_space, shape_bbox)
transform = Affine2D.compose_ltr((bbox_transform, transform))
if "gradientTransform" in grad_el.attrib:
gradient_transform = Affine2D.fromstring(grad_el.attrib["gradientTransform"])
transform = Affine2D.compose_ltr((gradient_transform, transform))
return transform
def _parse_linear_gradient(
config: FontConfig,
grad_el: etree.Element,
shape_bbox: Rect,
view_box: Rect,
glyph_width: int,
shape_opacity: float = 1.0,
):
gradient = SVGLinearGradient.from_element(grad_el, view_box)
p0 = Point(gradient.x1, gradient.y1)
p1 = Point(gradient.x2, gradient.y2)
# Set P2 to P1 rotated 90 degrees counter-clockwise around P0
p2 = p0 + (p1 - p0).perpendicular()
common_args = _common_gradient_parts(grad_el, shape_opacity)
transform = _get_gradient_transform(
config, grad_el, shape_bbox, view_box, glyph_width
)
return PaintLinearGradient( # pytype: disable=wrong-arg-types
p0=p0, p1=p1, p2=p2, **common_args
).apply_transform(transform)
def _parse_radial_gradient(
config: FontConfig,
grad_el: etree.Element,
shape_bbox: Rect,
view_box: Rect,
glyph_width: int,
shape_opacity: float = 1.0,
):
gradient = SVGRadialGradient.from_element(grad_el, view_box)
c0 = Point(gradient.fx, gradient.fy)
r0 = gradient.fr
c1 = Point(gradient.cx, gradient.cy)
r1 = gradient.r
gradient_args = {"c0": c0, "c1": c1, "r0": r0, "r1": r1}
gradient_args.update(_common_gradient_parts(grad_el, shape_opacity))
transform = _get_gradient_transform(
config, grad_el, shape_bbox, view_box, glyph_width
)
return PaintRadialGradient( # pytype: disable=wrong-arg-types
**gradient_args
).apply_transform(transform)
_GRADIENT_INFO = {
"linearGradient": _parse_linear_gradient,
"radialGradient": _parse_radial_gradient,
}
def _color_stop(stop_el, shape_opacity=1.0) -> ColorStop:
offset = number_or_percentage(stop_el.attrib.get("offset", "0"))
color = Color.fromstring(stop_el.attrib.get("stop-color", "black"))
opacity = number_or_percentage(stop_el.attrib.get("stop-opacity", "1"))
color = color._replace(alpha=color.alpha * opacity * shape_opacity)
return ColorStop(stopOffset=offset, color=color)
def _common_gradient_parts(el, shape_opacity=1.0):
spread_method = el.attrib.get("spreadMethod", "pad").upper()
if spread_method not in Extend.__members__:
raise ValueError(f"Unknown spreadMethod {spread_method}")
return {
"extend": Extend.__members__[spread_method],
"stops": tuple(_color_stop(stop, shape_opacity) for stop in el),
}
def _paint(
debug_hint: str, config: FontConfig, picosvg: SVG, shape: SVGPath, glyph_width: int
) -> Paint:
if shape.fill.startswith("url("):
el = picosvg.resolve_url(shape.fill, "*")
try:
return _GRADIENT_INFO[etree.QName(el).localname](
config,
el,
shape.bounding_box(),
picosvg.view_box(),
glyph_width,
shape.opacity,
)
except ValueError as e:
raise ValueError(
f"parse failed for {debug_hint}, {etree.tostring(el)[:128]}"
) from e
return PaintSolid(color=Color.fromstring(shape.fill, alpha=shape.opacity))
def _paint_glyph(
debug_hint: str,
config: FontConfig,
picosvg: SVG,
context: SVGTraverseContext,
glyph_width: int,
) -> Paint:
shape = context.shape()
if shape.fill.startswith("url("):
fill_el = picosvg.resolve_url(shape.fill, "*")
try:
glyph_paint = _GRADIENT_INFO[etree.QName(fill_el).localname](
config,
fill_el,
shape.bounding_box(),
picosvg.view_box(),
glyph_width,
shape.opacity,
)
except ValueError as e:
raise ValueError(
f"parse failed for {debug_hint}, {etree.tostring(fill_el)[:128]}"
) from e
else:
glyph_paint = PaintSolid(
color=Color.fromstring(shape.fill, alpha=shape.opacity)
)
return PaintGlyph(glyph=shape.as_path().d, paint=glyph_paint)
def _intersect(path1: SVGPath, path2: SVGPath) -> bool:
# Try computing intersection using pathops; if for whatever reason it fails
# (probably some bug) then be on the safe side and assume we do have one...
try:
return bool(intersection((path1, path2)))
except pathops.PathOpsError:
logging.error(
"pathops failed to compute intersection:\n- %s\n- %s", path1.d, path2.d
)
return True
def _any_overlap_with_reversing_transform(
debug_hint: str, paths: Sequence[SVGPath], transforms: Sequence[Affine2D]
) -> bool:
reverses_direction = [t.determinant() < 0 for t in transforms]
for i, j in combinations(range(len(paths)), 2):
if (reverses_direction[i] or reverses_direction[j]) and _intersect(
paths[i], paths[j]
):
logging.info(
"%s contains reusable paths that overlap and have a reversing "
"transform; decomposed to avoid winding issues:\n"
"- %s\n transform: %s\n"
"- %s\n transform: %s",
debug_hint,
paths[i].d,
transforms[i],
paths[j].d,
transforms[j],
)
return True
return False
def _painted_layers(
debug_hint: str,
config: FontConfig,
picosvg: SVG,
glyph_width: int,
) -> Tuple[Paint, ...]:
defs_seen = False
layers = []
# Reverse to get leaves first because that makes building Paint's easier
# shapes *must* be leaves per picosvg
for context in reversed(tuple(picosvg.depth_first())):
if context.depth() == 0:
continue # svg root
# picosvg will deliver us exactly one defs
if context.path == "/svg[0]/defs[0]":
assert not defs_seen
defs_seen = True
continue # defs are pulled in by the consuming paints
if context.is_shape():
while len(layers) < context.depth():
layers.append([])
assert len(layers) == context.depth()
layers[context.depth() - 1].append(
_paint_glyph(debug_hint, config, picosvg, context, glyph_width)
)
if context.is_group():
# flush child shapes into a new group
opacity = float(context.element.get("opacity", 1.0))
assert (
0.0 < opacity < 1.0
), f"{debug_hint} {context.path} should be transparent"
assert (
len(layers) == context.depth() + 1
), "Should have a list of child nodes"
child_nodes = layers.pop(context.depth())
assert (
len(child_nodes) > 1
), f"{debug_hint} {context.path} should have 2+ children"
assert {"opacity"} == set(
context.element.attrib.keys()
), f"{debug_hint} {context.path} only attribute should be opacity. Found {context.element.attrib.keys()}"
# insert reversed to undo the reversed at the top of loop
paint = PaintComposite(
mode=CompositeMode.SRC_IN,
source=PaintColrLayers(tuple(reversed(child_nodes))),
backdrop=PaintSolid(Color(0, 0, 0, opacity)),
)
layers[context.depth() - 1].append(paint)
assert defs_seen, f"{debug_hint} we never saw defs, what's up with that?!"
if not layers:
return ()
assert len(layers) == 1, f"Unexpected layers: {[len(l) for l in layers]}"
# undo the reversed at the top of loop
layers = reversed(layers[0])
return tuple(layers)
def _advance_width(view_box: Rect, config: FontConfig) -> int:
# Scale advance width proportionally to viewbox aspect ratio.
# Use the default advance width if it's larger than the proportional one.
font_height = config.ascender - config.descender # descender <= 0
return max(config.width, round(font_height * view_box.w / view_box.h))
def _mutating_traverse(paint, mutator):
paint = mutator(paint)
assert paint is not None, "Return the input for no change, not None"
try:
fields = dataclasses.fields(paint)
except TypeError as e:
raise ValueError(f"{paint} is not a dataclass?") from e
changes = {}
for field in fields:
try:
is_paint = issubclass(field.type, Paint)
except TypeError: # typing.Tuple and friends helpfully fail issubclass
is_paint = False
if is_paint:
current = getattr(paint, field.name)
modified = _mutating_traverse(current, mutator)
if current is not modified:
changes[field.name] = modified
# PaintColrLayers, uniquely, has a tuple of paint
if isinstance(paint, PaintColrLayers):
new_layers = list(paint.layers)
for i, current in enumerate(paint.layers):
modified = _mutating_traverse(current, mutator)
if current is not modified:
new_layers[i] = modified
new_layers = tuple(new_layers)
if new_layers != paint.layers:
changes["layers"] = tuple(new_layers)
if changes:
paint = dataclasses.replace(paint, **changes)
return paint
class ColorGlyph(NamedTuple):
ufo: ufoLib2.Font
svg_filename: str # empty string means no svg or bitmap filenames
bitmap_filename: str
ufo_glyph_name: str # only if config has keep_glyph_names will this match in font binary
glyph_id: int
codepoints: Tuple[int, ...]
painted_layers: Optional[Tuple[Paint, ...]] # None for untouched and bitmap formats
svg: Optional[SVG] # None for bitmap formats
user_transform: Affine2D
bitmap: Optional[PNG] # None for vector formats
@staticmethod
def create(
font_config: FontConfig,
ufo: ufoLib2.Font,
svg_filename: str,
glyph_id: int,
ufo_glyph_name: str,
codepoints: Tuple[int, ...],
svg: Optional[SVG],
bitmap_filename: str = "",
bitmap: Optional[PNG] = None,
) -> "ColorGlyph":
logging.debug(
" ColorGlyph for %s (%s)", svg_filename or bitmap_filename, codepoints
)
if ufo_glyph_name not in ufo:
base_glyph = ufo.newGlyph(ufo_glyph_name)
else:
base_glyph = ufo[ufo_glyph_name]
# non-square aspect ratio == proportional width; square == monospace
view_box = None
if svg:
view_box = svg.view_box()
elif bitmap:
view_box = Rect(0, 0, *bitmap.size)
if view_box is not None:
base_glyph.width = _advance_width(view_box, font_config)
else:
base_glyph.width = font_config.width
# Setup direct access to the glyph if possible
if len(codepoints) == 1:
base_glyph.unicode = next(iter(codepoints))
# Grab the transform + (color, glyph) layers unless they aren't to be touched
# or cannot possibly paint
painted_layers = ()
if not font_config.transform.is_degenerate():
if font_config.has_picosvgs:
painted_layers = tuple(
_painted_layers(svg_filename, font_config, svg, base_glyph.width)
)
return ColorGlyph(
ufo,
svg_filename,
bitmap_filename,
ufo_glyph_name,
glyph_id,
codepoints,
painted_layers,
svg,
font_config.transform,
bitmap,
)
def _has_viewbox_for_transform(self) -> bool:
view_box = None
if self.svg:
view_box = self.svg.view_box()
if view_box is None:
logging.warning(
f"{self.ufo.info.familyName} has no viewBox; no transform will be applied"
)
return view_box is not None
def _transform(self, map_fn):
if not self._has_viewbox_for_transform():
return Affine2D.identity()
return map_fn(
self.svg.view_box(),
self.ufo.info.ascender,
self.ufo.info.descender,
self.ufo_glyph.width,
self.user_transform,
)
def transform_for_otsvg_space(self):
return self._transform(map_viewbox_to_otsvg_space)
def transform_for_font_space(self):
return self._transform(map_viewbox_to_font_space)
@property
def ufo_glyph(self) -> UfoGlyph:
return self.ufo[self.ufo_glyph_name]
def colors(self):
"""Set of Color used by this glyph."""
all_colors = set()
self.traverse(lambda paint: all_colors.update(paint.colors()))
return all_colors
def traverse(self, visitor):
def _traverse_callback(paint):
visitor(paint)
return paint
for p in self.painted_layers:
_mutating_traverse(p, _traverse_callback)
def mutating_traverse(self, mutator) -> "ColorGlyph":
return self._replace(
painted_layers=tuple(
_mutating_traverse(p, mutator) for p in self.painted_layers
)
)
| 33.013566
| 117
| 0.629704
|
4a042cb927b64ffa3f9ae39d7363f371014e42dc
| 153
|
py
|
Python
|
stock_links/apps.py
|
ericpesto/Archeon-Django-REST-API
|
e02b871b95c5247d83580acfe25f6ec299fdb9b1
|
[
"MIT"
] | 1
|
2021-06-07T17:31:23.000Z
|
2021-06-07T17:31:23.000Z
|
stock_links/apps.py
|
ericpesto/Archeon-Django-REST-API
|
e02b871b95c5247d83580acfe25f6ec299fdb9b1
|
[
"MIT"
] | null | null | null |
stock_links/apps.py
|
ericpesto/Archeon-Django-REST-API
|
e02b871b95c5247d83580acfe25f6ec299fdb9b1
|
[
"MIT"
] | null | null | null |
from django.apps import AppConfig
class StockLinksConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'stock_links'
| 21.857143
| 56
| 0.771242
|
4a042cd9643e45820524476a7a86a40dc229857a
| 727
|
py
|
Python
|
backend/server/migrations/0001_initial.py
|
yacf/yacf
|
805bd0e077f39e4351d497c7cc2c7937b7def43f
|
[
"MIT"
] | null | null | null |
backend/server/migrations/0001_initial.py
|
yacf/yacf
|
805bd0e077f39e4351d497c7cc2c7937b7def43f
|
[
"MIT"
] | 22
|
2019-12-09T20:24:23.000Z
|
2022-02-26T18:43:46.000Z
|
backend/server/migrations/0001_initial.py
|
yacf/yacf
|
805bd0e077f39e4351d497c7cc2c7937b7def43f
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.2.9 on 2021-01-02 20:34
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Theme',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=25)),
('primary', models.CharField(max_length=8)),
('secondary', models.CharField(max_length=8)),
('foucs', models.CharField(max_length=8)),
('accent', models.CharField(max_length=8)),
],
),
]
| 27.961538
| 114
| 0.56121
|
4a042d9959a120b1ea284c7ee1ec4fb58f2ad932
| 11,754
|
py
|
Python
|
utils/data_builder.py
|
Devwalkar/BOC-KD
|
a19bab41ec450a8d933371501ff1f1db934be5f3
|
[
"MIT"
] | 2
|
2020-09-15T11:46:21.000Z
|
2020-09-25T13:35:22.000Z
|
utils/data_builder.py
|
Devwalkar/BOC-KD
|
a19bab41ec450a8d933371501ff1f1db934be5f3
|
[
"MIT"
] | null | null | null |
utils/data_builder.py
|
Devwalkar/BOC-KD
|
a19bab41ec450a8d933371501ff1f1db934be5f3
|
[
"MIT"
] | 2
|
2020-11-09T13:40:45.000Z
|
2020-11-27T02:17:27.000Z
|
import torchvision.datasets as Data
import torchvision.transforms as transforms
import torch.utils.data as TD
import torch
import os
from .Caltech_loader import Caltech256
from .ImageNet_loader import ImageNetDataset
from .CUB2011_loader import Cub2011
device = 'cuda' if torch.cuda.is_available() else 'cpu'
def Dataset_Loader(configer):
# This helper function loads the config dataset and created the torch Dataloader
data_configer = configer.dataset_cfg
Dataset_name = data_configer['id_cfg']['name']
Data_root = data_configer['id_cfg']['root']
Data_download = data_configer['id_cfg']['download']
Model = configer.model["name"]
####### Image Transforms builder
if Dataset_name == "CIFAR10":
if "Efficientnet" in Model:
img_transform = transforms.Compose([transforms.Resize((224,224)),
transforms.Pad(30),
#transforms.RandomAffine((-5,5)),
transforms.RandomHorizontalFlip(p=0.5),
transforms.RandomCrop((224,224)),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))
])
test_transform = transforms.Compose([transforms.Resize((224,224)),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))
])
else:
img_transform = transforms.Compose([transforms.Pad(4),
#transforms.RandomAffine((-20,20)),
transforms.RandomHorizontalFlip(p=0.5),
transforms.RandomCrop((32,32)),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))
])
test_transform = transforms.Compose([transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))
])
elif Dataset_name == "CIFAR100":
img_transform = transforms.Compose([transforms.Pad(4),
#transforms.RandomAffine((-20,20)),
transforms.RandomHorizontalFlip(p=0.5),
transforms.RandomCrop((32,32)),
transforms.ToTensor()
#transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))
])
test_transform = transforms.Compose([transforms.ToTensor()
#transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))
])
elif Dataset_name == "CUB2011":
img_transform = transforms.Compose([transforms.Resize((64,64)),
transforms.Pad(4),
#transforms.RandomAffine((-20,20)),
transforms.RandomHorizontalFlip(p=0.5),
transforms.RandomCrop((64,64)),
transforms.ToTensor()
#transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))
])
test_transform = transforms.Compose([transforms.Resize((64,64)),
transforms.ToTensor()
#transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))
])
elif Dataset_name == "Imagenet":
img_transform = transforms.Compose([transforms.Resize((224,224)),
transforms.Pad(50),
transforms.RandomAffine((-10,10)),
transforms.RandomCrop((224,224)),
transforms.RandomHorizontalFlip(p=0.5),
transforms.ToTensor(),
transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))
])
test_transform = transforms.Compose([transforms.Resize((224,224)),
transforms.ToTensor(),
transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))
])
else:
raise ImportError("DL model architecture not supported for Img transforms")
####### Dataset train and test builder
Train_configer = data_configer['train_cfg']
Val_configer = data_configer['val_cfg']
if Dataset_name == "MNIST": # Shape: (1,28,28)
if Data_download:
Trainloader = Data.MNIST(Data_root,download=True,train=True,transform = img_transform)
Testloader = Data.MNIST(Data_root,download=True,train=False, transform = test_transform)
else:
Trainloader = Data.MNIST(os.path.join(Data_root,"MNIST"),download=False,train=True,transform = img_transform)
Testloader = Data.MNIST(os.path.join(Data_root,"MNIST"),download=False,train=False, transform = test_transform)
elif Dataset_name == "CIFAR10": # Shape: (3,32,32)
if Data_download:
if not os.path.isdir(os.path.join(Data_root,"cifar10")):
os.mkdir(os.path.join(Data_root,"cifar10"))
Trainloader = Data.CIFAR10(os.path.join(Data_root,"cifar10"),download=True,train=True,transform = img_transform)
Testloader = Data.CIFAR10(os.path.join(Data_root,"cifar10"),download=True,train=False, transform = test_transform)
else:
Trainloader = Data.CIFAR10(os.path.join(Data_root,"cifar10"),download=False,train=True,transform = img_transform)
Testloader = Data.CIFAR10(os.path.join(Data_root,"cifar10"),download=False,train=False, transform = test_transform)
elif Dataset_name == "CIFAR100": # Shape: (3,32,32)
if Data_download:
Trainloader = Data.CIFAR100(Data_root,download=True,train=True,transform = img_transform)
Testloader = Data.CIFAR100(Data_root,download=True,train=False, transform = test_transform)
else:
Trainloader = Data.CIFAR100(Data_root,download=False,train=True,transform = img_transform)
Testloader = Data.CIFAR100(Data_root,download=False,train=False, transform = test_transform)
elif Dataset_name == "Fashion-MNIST":
if Data_download:
Trainloader = Data.FashionMNIST(Data_root,download=True,train=True,transform = img_transform)
Testloader = Data.FashionMNIST(Data_root,download=True,train=False, transform = test_transform)
else:
Trainloader = Data.FashionMNIST(os.path.join(Data_root,"Fashion-MNIST"),download=False,train=True,transform = img_transform)
Testloader = Data.FashionMNIST(os.path.join(Data_root,"Fashion-MNIST"),download=False,train=False, transform = test_transform)
elif Dataset_name == "SVHN":
if Data_download:
if not os.path.isdir(os.path.join(Data_root,"SVHN")):
os.mkdir(os.path.join(Data_root,"SVHN"))
Trainloader = Data.SVHN(os.path.join(Data_root,"SVHN"),download=True,split="train",transform = img_transform)
Testloader = Data.SVHN(os.path.join(Data_root,"SVHN"),download=True,split="test", transform = test_transform)
else:
Trainloader = Data.SVHN(os.path.join(Data_root,"SVHN"),download=False,split="train",transform = img_transform)
Testloader = Data.SVHN(os.path.join(Data_root,"SVHN"),download=False,split="test", transform = test_transform)
elif Dataset_name == "STL10":
if Data_download:
Trainloader = Data.STL10(os.path.join(Data_root),download=True,split="train",transform = img_transform)
Testloader = Data.STL10(os.path.join(Data_root),download=True,split="test", transform = test_transform)
else:
Trainloader = Data.STL10(os.path.join(Data_root),download=False,split="train",transform = img_transform)
Testloader = Data.STL10(os.path.join(Data_root),download=False,split="test", transform = test_transform)
elif Dataset_name == "Caltech":
if Data_download:
if not os.path.isdir(os.path.join(Data_root,"Caltech")):
os.mkdir(os.path.join(Data_root,"Caltech"))
Trainloader = Caltech256(os.path.join(Data_root,"Caltech"),download=True,train=True,transform=img_transform)
Testloader = Caltech256(os.path.join(Data_root,"Caltech"),download=True,train=False, transform = test_transform)
else:
Trainloader = Caltech256(os.path.join(Data_root,"Caltech"),train=True,transform=img_transform)
Testloader = Caltech256(os.path.join(Data_root,"Caltech"),train=False, transform = test_transform)
elif Dataset_name == "Imagenet":
Trainloader = ImageNetDataset(os.path.join(Data_root,"ImageNet/ILSVRC-train.lmdb"),transform=img_transform)
Testloader = ImageNetDataset(os.path.join(Data_root,"ImageNet/ILSVRC-val.lmdb"),transform=test_transform)
elif Dataset_name == "CUB2011":
if Data_download:
Trainloader = Cub2011(Data_root,train=True,transform = img_transform,download=True)
Testloader = Cub2011(Data_root,train=False,transform = test_transform,download=True)
else:
Trainloader = Cub2011(Data_root,train=True,transform = img_transform,download=False)
Testloader = Cub2011(Data_root,train=False,transform = test_transform,download=False)
else:
raise ImportError("Dataset not supported")
# Creating train and test loaders if not ImageNet dataset
train_loader = TD.DataLoader(dataset=Trainloader,
batch_size= Train_configer['batch_size'],
shuffle= Train_configer['shuffle'])
#num_workers= Train_configer['num_workers'],
#pin_memory=True)
test_loader = TD.DataLoader(dataset=Testloader,
batch_size= Val_configer['batch_size'],
shuffle= Val_configer['shuffle'])
#num_workers= Val_configer['num_workers'],
#pin_memory=True)
print ('---------- Training and Test data Loaded ')
print("Dataset: {}".format(Dataset_name))
return train_loader,test_loader
| 49.805085
| 139
| 0.536498
|
4a0430582bed0ae9b0e498430d1391c8f171889f
| 411
|
py
|
Python
|
congress/migrations/0010_alter_summarystat_timeframe.py
|
InsiderUnlocked/Backend
|
efd71d3eb874b9bfdbca89d0aa9e610338f9fa52
|
[
"blessing"
] | 3
|
2022-01-22T06:53:52.000Z
|
2022-02-13T10:16:29.000Z
|
congress/migrations/0010_alter_summarystat_timeframe.py
|
InsiderUnlocked/Backend
|
efd71d3eb874b9bfdbca89d0aa9e610338f9fa52
|
[
"blessing"
] | null | null | null |
congress/migrations/0010_alter_summarystat_timeframe.py
|
InsiderUnlocked/Backend
|
efd71d3eb874b9bfdbca89d0aa9e610338f9fa52
|
[
"blessing"
] | null | null | null |
# Generated by Django 3.2.6 on 2022-01-19 03:46
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('congress', '0009_alter_summarystat_unique_together'),
]
operations = [
migrations.AlterField(
model_name='summarystat',
name='timeframe',
field=models.IntegerField(unique=True),
),
]
| 21.631579
| 63
| 0.622871
|
4a0431558737604a3f07a97907ac9cbe60ced0b0
| 19,225
|
py
|
Python
|
tests/store/tracking/test_rest_store.py
|
ericgosno91/mlflow
|
8d1a9e354b22919423e5295afd650e39191f701a
|
[
"Apache-2.0"
] | 1
|
2021-09-02T01:46:17.000Z
|
2021-09-02T01:46:17.000Z
|
tests/store/tracking/test_rest_store.py
|
ericgosno91/mlflow
|
8d1a9e354b22919423e5295afd650e39191f701a
|
[
"Apache-2.0"
] | null | null | null |
tests/store/tracking/test_rest_store.py
|
ericgosno91/mlflow
|
8d1a9e354b22919423e5295afd650e39191f701a
|
[
"Apache-2.0"
] | null | null | null |
import json
import unittest
from unittest import mock
import pytest
import mlflow
from mlflow.entities import (
Param,
Metric,
RunTag,
SourceType,
ViewType,
ExperimentTag,
Experiment,
LifecycleStage,
)
from mlflow.exceptions import MlflowException
from mlflow.models import Model
from mlflow.protos.service_pb2 import (
CreateRun,
DeleteExperiment,
DeleteRun,
LogBatch,
LogMetric,
LogParam,
RestoreExperiment,
RestoreRun,
RunTag as ProtoRunTag,
SearchRuns,
SetTag,
DeleteTag,
SetExperimentTag,
GetExperimentByName,
ListExperiments,
LogModel,
)
from mlflow.protos.databricks_pb2 import (
RESOURCE_DOES_NOT_EXIST,
ENDPOINT_NOT_FOUND,
REQUEST_LIMIT_EXCEEDED,
INTERNAL_ERROR,
ErrorCode,
)
from mlflow.store.tracking.rest_store import (
RestStore,
DatabricksRestStore,
)
from mlflow.utils.proto_json_utils import message_to_json
from mlflow.utils.rest_utils import MlflowHostCreds, _DEFAULT_HEADERS
class MyCoolException(Exception):
pass
class CustomErrorHandlingRestStore(RestStore):
def _call_endpoint(self, api, json_body):
raise MyCoolException()
def mock_http_request():
return mock.patch(
"mlflow.utils.rest_utils.http_request",
return_value=mock.MagicMock(status_code=200, text="{}"),
)
class TestRestStore(object):
@mock.patch("requests.Session.request")
def test_successful_http_request(self, request):
def mock_request(*args, **kwargs):
# Filter out None arguments
assert args == ("GET", "https://hello/api/2.0/mlflow/experiments/list")
kwargs = dict((k, v) for k, v in kwargs.items() if v is not None)
assert kwargs == {
"params": {"view_type": "ACTIVE_ONLY"},
"headers": _DEFAULT_HEADERS,
"verify": True,
"timeout": 10,
}
response = mock.MagicMock()
response.status_code = 200
response.text = '{"experiments": [{"name": "Exp!", "lifecycle_stage": "active"}]}'
return response
request.side_effect = mock_request
store = RestStore(lambda: MlflowHostCreds("https://hello"))
experiments = store.list_experiments()
assert experiments[0].name == "Exp!"
@mock.patch("requests.Session.request")
def test_failed_http_request(self, request):
response = mock.MagicMock()
response.status_code = 404
response.text = '{"error_code": "RESOURCE_DOES_NOT_EXIST", "message": "No experiment"}'
request.return_value = response
store = RestStore(lambda: MlflowHostCreds("https://hello"))
with pytest.raises(MlflowException) as cm:
store.list_experiments()
assert "RESOURCE_DOES_NOT_EXIST: No experiment" in str(cm.value)
@mock.patch("requests.Session.request")
def test_failed_http_request_custom_handler(self, request):
response = mock.MagicMock()
response.status_code = 404
response.text = '{"error_code": "RESOURCE_DOES_NOT_EXIST", "message": "No experiment"}'
request.return_value = response
store = CustomErrorHandlingRestStore(lambda: MlflowHostCreds("https://hello"))
with pytest.raises(MyCoolException):
store.list_experiments()
@mock.patch("requests.Session.request")
def test_response_with_unknown_fields(self, request):
experiment_json = {
"experiment_id": "1",
"name": "My experiment",
"artifact_location": "foo",
"lifecycle_stage": "deleted",
"OMG_WHAT_IS_THIS_FIELD": "Hooly cow",
}
response = mock.MagicMock()
response.status_code = 200
experiments = {"experiments": [experiment_json]}
response.text = json.dumps(experiments)
request.return_value = response
store = RestStore(lambda: MlflowHostCreds("https://hello"))
experiments = store.list_experiments()
assert len(experiments) == 1
assert experiments[0].name == "My experiment"
def _args(self, host_creds, endpoint, method, json_body):
res = {
"host_creds": host_creds,
"endpoint": "/api/2.0/mlflow/%s" % endpoint,
"method": method,
}
if method == "GET":
res["params"] = json.loads(json_body)
else:
res["json"] = json.loads(json_body)
return res
def _verify_requests(self, http_request, host_creds, endpoint, method, json_body):
http_request.assert_any_call(**(self._args(host_creds, endpoint, method, json_body)))
def test_requestor(self):
creds = MlflowHostCreds("https://hello")
store = RestStore(lambda: creds)
user_name = "mock user"
source_name = "rest test"
source_name_patch = mock.patch(
"mlflow.tracking.context.default_context._get_source_name", return_value=source_name
)
source_type_patch = mock.patch(
"mlflow.tracking.context.default_context._get_source_type",
return_value=SourceType.LOCAL,
)
with mock_http_request() as mock_http, mock.patch(
"mlflow.tracking._tracking_service.utils._get_store", return_value=store
), mock.patch(
"mlflow.tracking.context.default_context._get_user", return_value=user_name
), mock.patch(
"time.time", return_value=13579
), source_name_patch, source_type_patch:
with mlflow.start_run(experiment_id="43"):
cr_body = message_to_json(
CreateRun(
experiment_id="43",
user_id=user_name,
start_time=13579000,
tags=[
ProtoRunTag(key="mlflow.source.name", value=source_name),
ProtoRunTag(key="mlflow.source.type", value="LOCAL"),
ProtoRunTag(key="mlflow.user", value=user_name),
],
)
)
expected_kwargs = self._args(creds, "runs/create", "POST", cr_body)
assert mock_http.call_count == 1
actual_kwargs = mock_http.call_args[1]
# Test the passed tag values separately from the rest of the request
# Tag order is inconsistent on Python 2 and 3, but the order does not matter
expected_tags = expected_kwargs["json"].pop("tags")
actual_tags = actual_kwargs["json"].pop("tags")
assert sorted(expected_tags, key=lambda t: t["key"]) == sorted(
actual_tags, key=lambda t: t["key"]
)
assert expected_kwargs == actual_kwargs
with mock_http_request() as mock_http:
store.log_param("some_uuid", Param("k1", "v1"))
body = message_to_json(
LogParam(run_uuid="some_uuid", run_id="some_uuid", key="k1", value="v1")
)
self._verify_requests(mock_http, creds, "runs/log-parameter", "POST", body)
with mock_http_request() as mock_http:
store.set_experiment_tag("some_id", ExperimentTag("t1", "abcd" * 1000))
body = message_to_json(
SetExperimentTag(experiment_id="some_id", key="t1", value="abcd" * 1000)
)
self._verify_requests(mock_http, creds, "experiments/set-experiment-tag", "POST", body)
with mock_http_request() as mock_http:
store.set_tag("some_uuid", RunTag("t1", "abcd" * 1000))
body = message_to_json(
SetTag(run_uuid="some_uuid", run_id="some_uuid", key="t1", value="abcd" * 1000)
)
self._verify_requests(mock_http, creds, "runs/set-tag", "POST", body)
with mock_http_request() as mock_http:
store.delete_tag("some_uuid", "t1")
body = message_to_json(DeleteTag(run_id="some_uuid", key="t1"))
self._verify_requests(mock_http, creds, "runs/delete-tag", "POST", body)
with mock_http_request() as mock_http:
store.log_metric("u2", Metric("m1", 0.87, 12345, 3))
body = message_to_json(
LogMetric(run_uuid="u2", run_id="u2", key="m1", value=0.87, timestamp=12345, step=3)
)
self._verify_requests(mock_http, creds, "runs/log-metric", "POST", body)
with mock_http_request() as mock_http:
metrics = [
Metric("m1", 0.87, 12345, 0),
Metric("m2", 0.49, 12345, -1),
Metric("m3", 0.58, 12345, 2),
]
params = [Param("p1", "p1val"), Param("p2", "p2val")]
tags = [RunTag("t1", "t1val"), RunTag("t2", "t2val")]
store.log_batch(run_id="u2", metrics=metrics, params=params, tags=tags)
metric_protos = [metric.to_proto() for metric in metrics]
param_protos = [param.to_proto() for param in params]
tag_protos = [tag.to_proto() for tag in tags]
body = message_to_json(
LogBatch(run_id="u2", metrics=metric_protos, params=param_protos, tags=tag_protos)
)
self._verify_requests(mock_http, creds, "runs/log-batch", "POST", body)
with mock_http_request() as mock_http:
store.delete_run("u25")
self._verify_requests(
mock_http, creds, "runs/delete", "POST", message_to_json(DeleteRun(run_id="u25"))
)
with mock_http_request() as mock_http:
store.restore_run("u76")
self._verify_requests(
mock_http, creds, "runs/restore", "POST", message_to_json(RestoreRun(run_id="u76"))
)
with mock_http_request() as mock_http:
store.delete_experiment("0")
self._verify_requests(
mock_http,
creds,
"experiments/delete",
"POST",
message_to_json(DeleteExperiment(experiment_id="0")),
)
with mock_http_request() as mock_http:
store.restore_experiment("0")
self._verify_requests(
mock_http,
creds,
"experiments/restore",
"POST",
message_to_json(RestoreExperiment(experiment_id="0")),
)
with mock.patch("mlflow.utils.rest_utils.http_request") as mock_http:
response = mock.MagicMock()
response.status_code = 200
response.text = '{"runs": ["1a", "2b", "3c"], "next_page_token": "67890fghij"}'
mock_http.return_value = response
result = store.search_runs(
["0", "1"],
"params.p1 = 'a'",
ViewType.ACTIVE_ONLY,
max_results=10,
order_by=["a"],
page_token="12345abcde",
)
expected_message = SearchRuns(
experiment_ids=["0", "1"],
filter="params.p1 = 'a'",
run_view_type=ViewType.to_proto(ViewType.ACTIVE_ONLY),
max_results=10,
order_by=["a"],
page_token="12345abcde",
)
self._verify_requests(
mock_http, creds, "runs/search", "POST", message_to_json(expected_message)
)
assert result.token == "67890fghij"
with mock_http_request() as mock_http:
run_id = "run_id"
m = Model(artifact_path="model/path", run_id="run_id", flavors={"tf": "flavor body"})
result = store.record_logged_model("run_id", m)
expected_message = LogModel(run_id=run_id, model_json=m.to_json())
self._verify_requests(
mock_http, creds, "runs/log-model", "POST", message_to_json(expected_message)
)
@pytest.mark.parametrize("store_class", [RestStore, DatabricksRestStore])
def test_get_experiment_by_name(self, store_class):
creds = MlflowHostCreds("https://hello")
store = store_class(lambda: creds)
with mock.patch("mlflow.utils.rest_utils.http_request") as mock_http:
response = mock.MagicMock()
response.status_code = 200
experiment = Experiment(
experiment_id="123",
name="abc",
artifact_location="/abc",
lifecycle_stage=LifecycleStage.ACTIVE,
)
response.text = json.dumps(
{"experiment": json.loads(message_to_json(experiment.to_proto()))}
)
mock_http.return_value = response
result = store.get_experiment_by_name("abc")
expected_message0 = GetExperimentByName(experiment_name="abc")
self._verify_requests(
mock_http,
creds,
"experiments/get-by-name",
"GET",
message_to_json(expected_message0),
)
assert result.experiment_id == experiment.experiment_id
assert result.name == experiment.name
assert result.artifact_location == experiment.artifact_location
assert result.lifecycle_stage == experiment.lifecycle_stage
# Test GetExperimentByName against nonexistent experiment
mock_http.reset_mock()
nonexistent_exp_response = mock.MagicMock()
nonexistent_exp_response.status_code = 404
nonexistent_exp_response.text = MlflowException(
"Exp doesn't exist!", RESOURCE_DOES_NOT_EXIST
).serialize_as_json()
mock_http.return_value = nonexistent_exp_response
assert store.get_experiment_by_name("nonexistent-experiment") is None
expected_message1 = GetExperimentByName(experiment_name="nonexistent-experiment")
self._verify_requests(
mock_http,
creds,
"experiments/get-by-name",
"GET",
message_to_json(expected_message1),
)
assert mock_http.call_count == 1
# Test REST client behavior against a mocked old server, which has handler for
# ListExperiments but not GetExperimentByName
mock_http.reset_mock()
list_exp_response = mock.MagicMock()
list_exp_response.text = json.dumps(
{"experiments": [json.loads(message_to_json(experiment.to_proto()))]}
)
list_exp_response.status_code = 200
def response_fn(*args, **kwargs):
# pylint: disable=unused-argument
if kwargs.get("endpoint") == "/api/2.0/mlflow/experiments/get-by-name":
raise MlflowException(
"GetExperimentByName is not implemented", ENDPOINT_NOT_FOUND
)
else:
return list_exp_response
mock_http.side_effect = response_fn
result = store.get_experiment_by_name("abc")
expected_message2 = ListExperiments(view_type=ViewType.ALL)
self._verify_requests(
mock_http,
creds,
"experiments/get-by-name",
"GET",
message_to_json(expected_message0),
)
self._verify_requests(
mock_http, creds, "experiments/list", "GET", message_to_json(expected_message2)
)
assert result.experiment_id == experiment.experiment_id
assert result.name == experiment.name
assert result.artifact_location == experiment.artifact_location
assert result.lifecycle_stage == experiment.lifecycle_stage
# Verify that REST client won't fall back to ListExperiments for 429 errors (hitting
# rate limits)
mock_http.reset_mock()
def rate_limit_response_fn(*args, **kwargs):
# pylint: disable=unused-argument
raise MlflowException(
"Hit rate limit on GetExperimentByName", REQUEST_LIMIT_EXCEEDED
)
mock_http.side_effect = rate_limit_response_fn
with pytest.raises(MlflowException) as exc_info:
store.get_experiment_by_name("imspamming")
assert exc_info.value.error_code == ErrorCode.Name(REQUEST_LIMIT_EXCEEDED)
assert mock_http.call_count == 1
def test_databricks_rest_store_get_experiment_by_name(self):
creds = MlflowHostCreds("https://hello")
store = DatabricksRestStore(lambda: creds)
with mock.patch("mlflow.utils.rest_utils.http_request") as mock_http:
# Verify that Databricks REST client won't fall back to ListExperiments for 500-level
# errors that are not ENDPOINT_NOT_FOUND
def rate_limit_response_fn(*args, **kwargs):
# pylint: disable=unused-argument
raise MlflowException("Some internal error!", INTERNAL_ERROR)
mock_http.side_effect = rate_limit_response_fn
with pytest.raises(MlflowException) as exc_info:
store.get_experiment_by_name("abc")
assert exc_info.value.error_code == ErrorCode.Name(INTERNAL_ERROR)
assert exc_info.value.message == "Some internal error!"
expected_message0 = GetExperimentByName(experiment_name="abc")
self._verify_requests(
mock_http,
creds,
"experiments/get-by-name",
"GET",
message_to_json(expected_message0),
)
assert mock_http.call_count == 1
def test_databricks_paginate_list_experiments(self):
creds = MlflowHostCreds("https://hello")
store = DatabricksRestStore(lambda: creds)
list_exp_responses = []
next_page_tokens = ["a", "b", None]
for next_page_token in next_page_tokens:
experiment = Experiment(
experiment_id="123",
name=str(next_page_token),
artifact_location="/abc",
lifecycle_stage=LifecycleStage.ACTIVE,
)
list_exp_response = mock.MagicMock()
list_exp_response.text = json.dumps(
{
"experiments": [json.loads(message_to_json(experiment.to_proto()))],
"next_page_token": next_page_token,
}
)
list_exp_response.status_code = 200
list_exp_responses.append(list_exp_response)
with mock.patch("mlflow.utils.rest_utils.http_request", side_effect=list_exp_responses):
for idx, experiments in enumerate(
store._paginate_list_experiments(ViewType.ACTIVE_ONLY)
):
assert experiments[0].name == str(next_page_tokens[idx])
assert experiments.token == next_page_tokens[idx]
if __name__ == "__main__":
unittest.main()
| 40.219665
| 100
| 0.591209
|
4a0431924c7638390a8a1bad061597199381d650
| 1,361
|
py
|
Python
|
aliyun-python-sdk-csb/aliyunsdkcsb/request/v20171118/CommitSuccessedServicesRequest.py
|
liusc27/aliyun-openapi-python-sdk
|
5e3db3535dd21de987dc5981e71151327d5a884f
|
[
"Apache-2.0"
] | 1
|
2019-12-23T12:36:43.000Z
|
2019-12-23T12:36:43.000Z
|
aliyun-python-sdk-csb/aliyunsdkcsb/request/v20171118/CommitSuccessedServicesRequest.py
|
liusc27/aliyun-openapi-python-sdk
|
5e3db3535dd21de987dc5981e71151327d5a884f
|
[
"Apache-2.0"
] | null | null | null |
aliyun-python-sdk-csb/aliyunsdkcsb/request/v20171118/CommitSuccessedServicesRequest.py
|
liusc27/aliyun-openapi-python-sdk
|
5e3db3535dd21de987dc5981e71151327d5a884f
|
[
"Apache-2.0"
] | null | null | null |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
class CommitSuccessedServicesRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'CSB', '2017-11-18', 'CommitSuccessedServices')
self.set_protocol_type('https');
self.set_method('POST')
def get_CsbName(self):
return self.get_query_params().get('CsbName')
def set_CsbName(self,CsbName):
self.add_query_param('CsbName',CsbName)
def get_Services(self):
return self.get_body_params().get('Services')
def set_Services(self,Services):
self.add_body_params('Services', Services)
| 35.815789
| 76
| 0.761205
|
4a04324212d68a0401c4a878ccf1c0f623b69acd
| 5,123
|
py
|
Python
|
org/apache/helix/HelixManager.py
|
davzhang/helix-python-binding
|
11a9ecf730bce07720e0b0bcf7f0ec1cd2b25878
|
[
"Apache-2.0"
] | 3
|
2015-04-08T22:51:04.000Z
|
2015-05-03T06:42:35.000Z
|
org/apache/helix/HelixManager.py
|
zzhang5/helix-python-binding
|
11a9ecf730bce07720e0b0bcf7f0ec1cd2b25878
|
[
"Apache-2.0"
] | null | null | null |
org/apache/helix/HelixManager.py
|
zzhang5/helix-python-binding
|
11a9ecf730bce07720e0b0bcf7f0ec1cd2b25878
|
[
"Apache-2.0"
] | 1
|
2020-03-31T21:43:01.000Z
|
2020-03-31T21:43:01.000Z
|
# package org.apache.helix
#from org.apache.helix import *
#from java.util import List
# do not include controller stuff: dzhang
#from org.apache.helix.controller.GenericHelixController import GenericHelixController
#from org.apache.helix.healthcheck.ParticipantHealthReportCollector import ParticipantHealthReportCollector
#from org.apache.helix.participant.HelixStateMachineEngine import HelixStateMachineEngine
#from org.apache.helix.participant.StateMachineEngine import StateMachineEngine
#from org.apache.helix.spectator.RoutingTableProvider import RoutingTableProvider
#from org.apache.helix.store.PropertyStore import PropertyStore
#from org.apache.helix.store.zk.ZkHelixPropertyStore import ZkHelixPropertyStore
class HelixManager:
def connect(self):
"""
Returns void
Throws:
Exception
"""
pass
def isConnected(self):
"""
Returns boolean
"""
pass
def disconnect(self):
"""
Returns void
"""
pass
def addIdealStateChangeListener(self, listener):
"""
Returns void
Parameters:
listener: IdealStateChangeListener
Throws:
Exception
"""
pass
def addLiveInstanceChangeListener(self, listener):
"""
Returns void
Parameters:
listener: LiveInstanceChangeListener
Throws:
Exception
"""
pass
def addConfigChangeListener(self, listener):
"""
Returns void
Parameters:
listener: ConfigChangeListener
Throws:
Exception
"""
pass
def addMessageListener(self, listener, instanceName):
"""
Returns void
Parameters:
listener: MessageListenerinstanceName: String
Throws:
Exception
"""
pass
def addCurrentStateChangeListener(self, listener, instanceName, sessionId):
"""
Returns void
Parameters:
listener: CurrentStateChangeListenerinstanceName: StringsessionId: String
Throws:
Exception
"""
pass
def addHealthStateChangeListener(self, listener, instanceName):
"""
Returns void
Parameters:
listener: HealthStateChangeListenerinstanceName: String
Throws:
Exception
"""
pass
def addExternalViewChangeListener(self, listener):
"""
Returns void
Parameters:
listener: ExternalViewChangeListener
Throws:
Exception
"""
pass
def addControllerListener(self, listener):
"""
Returns void
Parameters:
listener: ControllerChangeListener
"""
pass
def removeListener(self, listener):
"""
Returns boolean
Parameters:
listener: Object
"""
pass
def getDataAccessor(self):
"""
Returns DataAccessor
@Deprecated
"""
pass
def getHelixDataAccessor(self):
"""
Returns HelixDataAccessor
"""
pass
def getConfigAccessor(self):
"""
Returns ConfigAccessor
"""
pass
def getClusterName(self):
"""
Returns String
"""
pass
def getInstanceName(self):
"""
Returns String
"""
pass
def getSessionId(self):
"""
Returns String
"""
pass
def getLastNotificationTime(self):
"""
Returns long
"""
pass
def getClusterManagmentTool(self):
"""
Returns HelixAdmin
"""
pass
def getPropertyStore(self):
"""
Returns PropertyStore<ZNRecord>
@Deprecated
"""
pass
def getHelixPropertyStore(self):
"""
Returns ZkHelixPropertyStore<ZNRecord>
"""
pass
def getMessagingService(self):
"""
Returns ClusterMessagingService
"""
pass
def getHealthReportCollector(self):
"""
Returns ParticipantHealthReportCollector
"""
pass
def getInstanceType(self):
"""
Returns InstanceType
"""
pass
def getVersion(self):
"""
Returns String
"""
pass
def getStateMachineEngine(self):
"""
Returns StateMachineEngine
"""
pass
def isLeader(self):
"""
Returns boolean
"""
pass
def startTimerTasks(self):
"""
Returns void
"""
pass
def stopTimerTasks(self):
"""
Returns void
"""
pass
def addPreConnectCallback(self, callback):
"""
Returns void
Parameters:
callback: PreConnectCallback
"""
pass
| 15.292537
| 107
| 0.543432
|
4a0432986afd17bf20f55ee8c8c6e6ca3489b7b5
| 1,161
|
py
|
Python
|
app.py
|
potados99/mass-slide
|
58ab8aafcc239c7ce112fcbc33314038fc8e25db
|
[
"MIT"
] | 2
|
2019-04-18T07:36:54.000Z
|
2021-03-01T13:53:46.000Z
|
app.py
|
potados99/Mass-slide
|
58ab8aafcc239c7ce112fcbc33314038fc8e25db
|
[
"MIT"
] | null | null | null |
app.py
|
potados99/Mass-slide
|
58ab8aafcc239c7ce112fcbc33314038fc8e25db
|
[
"MIT"
] | 1
|
2021-12-28T20:44:28.000Z
|
2021-12-28T20:44:28.000Z
|
#-*- coding: utf-8 -*-
import pkg_resources
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
from datetime import datetime
from modules import IOModule
from modules import processModule
from modules import pptxModule
#PR = pkg_resources.resource_filename('__main__','')
PR = '/Users/potados/Documents/GitHub/mass-slide/'
now = datetime.now()
CREATED = PR + '/created'
FILENAME = ( '%s%s%s.pptx' % ( now.year, now.month, now.day ) )
TEMPLATE = sys.argv[1]
rawDict = IOModule.get_raw_chapter_dict(TEMPLATE)
processedChapterList = processModule.raw_to_processed(rawDict)
IOModule.write_processed(fileName='recent', chapterList=processedChapterList)
readProcessedChapterList = IOModule.get_processed_chapter_list(fileName='recent')
doneChapterList = processModule.processed_to_done(readProcessedChapterList, TEMPLATE)
IOModule.write_done(fileName='recent', chapterList=doneChapterList)
readDoneChapterList = IOModule.get_done_chapter_list(fileName='recent')
myPrs = pptxModule.new_presentation()
pptxModule.write_presentation(Presentation=myPrs, List=readDoneChapterList)
pptxModule.save_presentation(prs=myPrs, path=CREATED + '/' + FILENAME)
| 27.642857
| 85
| 0.800172
|
4a04329926af1e77384ba96cef7476052fc8e768
| 2,394
|
py
|
Python
|
spiders/a402.py
|
senlyu163/crawler
|
ecf95f7b356c726922b5e5d90000fda3e16ae90d
|
[
"Apache-2.0"
] | null | null | null |
spiders/a402.py
|
senlyu163/crawler
|
ecf95f7b356c726922b5e5d90000fda3e16ae90d
|
[
"Apache-2.0"
] | null | null | null |
spiders/a402.py
|
senlyu163/crawler
|
ecf95f7b356c726922b5e5d90000fda3e16ae90d
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
import scrapy
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
from ..utils import extract_CN_from_content
from ..items import ScrapySpiderItem
import re
from scrapy_splash import SplashRequest
class A402Spider(CrawlSpider):
name = '402'
allowed_domains = ['menglian.gov.cn']
start_urls = ['http://www.menglian.gov.cn/xxgklby_list.jsp?urltype=egovinfo.EgovTreeURl&wbtreeid=1100&type=egovinfodeptsubcattree&sccode=ml&subtype=1&dpcode=P004&gilevel=1']
rules = (
Rule(LinkExtractor(allow=r'info/.*\.htm'), callback='parse_item', follow=True),
Rule(LinkExtractor(allow=r'\?ainfolist\d+t=\d+&ainfolist\d+p=\d+&ainfolist\d+c=\d+&urltype=egovinfo\.EgovTreeURl&wbtreeid=\d+&type=egovinfodeptsubcattree&sccode=ml&subtype=\d+&dpcode=P\d+&gilevel=\d+'), follow=True),
# Rule(LinkExtractor(allow=r'Items/'), callback='parse_item', follow=True),
)
def _build_request(self, rule, link):
r = SplashRequest(url=link.url, callback=self._response_downloaded, args={"wait": 1.5})
r.meta.update(rule=rule, link_text=link.text)
return r
def _requests_to_follow(self, response):
# if not isinstance(response, HtmlResponse):
# return
seen = set()
for n, rule in enumerate(self._rules):
links = [lnk for lnk in rule.link_extractor.extract_links(response)
if lnk not in seen]
if links and rule.process_links:
links = rule.process_links(links)
for link in links:
seen.add(link)
r = self._build_request(n, link)
yield rule.process_request(r)
def parse_item(self, response):
item = ScrapySpiderItem()
item['url'] = response.url
date = response.xpath('/html/body/div[5]/div/table/tbody/tr[1]/td/table/tbody/tr[1]/td[1]/table/tbody/tr[2]/td[4]').extract_first()
date = re.search(r"(\d{4}-\d{2}-\d{2})", date).groups()[0]
item['date'] = date
title = response.xpath('/html/body/div[5]/div/table/tbody/tr[1]/td/table/tbody/tr[1]/td[1]/table/tbody/tr[3]/td[2]/text()').extract_first()
item['title'] = title
contents = response.xpath('//div[@id="vsb_content"]').extract()
item['contents'] = extract_CN_from_content(contents)
return item
| 45.169811
| 224
| 0.648287
|
4a0433a088ed7245b0d7888535f05914a1b676f0
| 1,340
|
py
|
Python
|
Support/Fuego/Pythia/pythia-0.4/packages/fuego/fuego/serialization/mechanisms/Entity.py
|
marient/PelePhysics
|
e6ad1839d77b194e09ab44ff850c9489652e5d81
|
[
"BSD-3-Clause-LBNL"
] | 1
|
2019-04-24T13:32:23.000Z
|
2019-04-24T13:32:23.000Z
|
Support/Fuego/Pythia/pythia-0.4/packages/fuego/fuego/serialization/mechanisms/Entity.py
|
marient/PelePhysics
|
e6ad1839d77b194e09ab44ff850c9489652e5d81
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
Support/Fuego/Pythia/pythia-0.4/packages/fuego/fuego/serialization/mechanisms/Entity.py
|
marient/PelePhysics
|
e6ad1839d77b194e09ab44ff850c9489652e5d81
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
#!/usr/bin/env python
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Michael A.G. Aivazis
# California Institute of Technology
# (C) 1998-2003 All Rights Reserved
#
# <LicenseText>
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
class Entity(object):
_FILENAME_LIMIT = 40
def locator(self,locator):
self.filename = locator.filename
self.line = locator.line
self.column = locator.column
return
def __init__(self, id, locator):
self.id = id
if locator:
self.filename = locator.filename
self.line = locator.line
self.column = locator.column
else:
self.filename = ""
self.line = 0
self.column = 0
return
def __str__(self):
filename = self.filename
if not filename:
return "<unknown>"
if len(filename) > self._FILENAME_LIMIT:
filename = filename[:self._FILENAME_LIMIT/2 - 3] + \
"..." + filename[-self._FILENAME_LIMIT/2 + 3:]
return "'%s':(%d, %d)" % (filename, self.line, self.column)
# version
__id__ = "$Id$"
# End of file
| 23.928571
| 82
| 0.446269
|
4a0433b6ea4d1688243908ac5a535709f0b61588
| 269
|
py
|
Python
|
backend/api/post/permissions.py
|
spider1119/django-vue-template
|
b16247f09b11d0014cd67a7d361c076708ea7360
|
[
"MIT"
] | null | null | null |
backend/api/post/permissions.py
|
spider1119/django-vue-template
|
b16247f09b11d0014cd67a7d361c076708ea7360
|
[
"MIT"
] | null | null | null |
backend/api/post/permissions.py
|
spider1119/django-vue-template
|
b16247f09b11d0014cd67a7d361c076708ea7360
|
[
"MIT"
] | 1
|
2022-02-07T02:42:14.000Z
|
2022-02-07T02:42:14.000Z
|
from rest_framework import permissions
class IsOwnerOrReadOnly(permissions.BasePermission):
def has_object_permission(self,request, view, obj):
if request.method in permissions.SAFE_METHODS:
return True
return obj.owner == request.user
| 33.625
| 55
| 0.739777
|
4a0434a31800010ead7633ee3145bff766977e84
| 1,059
|
py
|
Python
|
pymoo/model/individual.py
|
AIasd/pymoo
|
08705ca866367d9fab675c30ffe585c837df9654
|
[
"Apache-2.0"
] | 5
|
2022-01-06T01:10:47.000Z
|
2022-03-18T15:39:43.000Z
|
pymoo/model/individual.py
|
AIasd/pymoo
|
08705ca866367d9fab675c30ffe585c837df9654
|
[
"Apache-2.0"
] | 15
|
2022-01-03T19:36:36.000Z
|
2022-03-30T03:57:58.000Z
|
pymoo/model/individual.py
|
AIasd/pymoo
|
08705ca866367d9fab675c30ffe585c837df9654
|
[
"Apache-2.0"
] | 3
|
2021-11-22T08:01:47.000Z
|
2022-03-11T08:53:58.000Z
|
import copy
class Individual:
def __init__(self, X=None, F=None, CV=None, G=None, feasible=None, **kwargs) -> None:
self.X = X
self.F = F
self.CV = CV
self.G = G
self.feasible = feasible
self.data = kwargs
self.attr = set(self.__dict__.keys())
def has(self, key):
return key in self.attr or key in self.data
def set(self, key, value):
if key in self.attr:
self.__dict__[key] = value
else:
self.data[key] = value
def copy(self):
ind = copy.copy(self)
ind.data = self.data.copy()
return ind
def get(self, *keys):
def _get(key):
if key in self.data:
return self.data[key]
elif key in self.attr:
return self.__dict__[key]
else:
return None
ret = []
for key in keys:
ret.append(_get(key))
if len(ret) == 1:
return ret[0]
else:
return tuple(ret)
| 22.0625
| 89
| 0.491029
|
4a0435051a2ba13dfc34983c1a1d97a1c3507e42
| 49,824
|
py
|
Python
|
src/sage/interfaces/interface.py
|
qedhandle/sage
|
8453ffb849b047893b6c61dd09176a84c9133342
|
[
"BSL-1.0"
] | 1
|
2021-03-15T21:45:56.000Z
|
2021-03-15T21:45:56.000Z
|
src/sage/interfaces/interface.py
|
qedhandle/sage
|
8453ffb849b047893b6c61dd09176a84c9133342
|
[
"BSL-1.0"
] | null | null | null |
src/sage/interfaces/interface.py
|
qedhandle/sage
|
8453ffb849b047893b6c61dd09176a84c9133342
|
[
"BSL-1.0"
] | null | null | null |
r"""
Common Interface Functionality
See the examples in the other sections for how to use specific
interfaces. The interface classes all derive from the generic
interface that is described in this section.
AUTHORS:
- William Stein (2005): initial version
- William Stein (2006-03-01): got rid of infinite loop on startup if
client system missing
- Felix Lawrence (2009-08-21): edited ._sage_() to support lists and float exponents in foreign notation.
- Simon King (2010-09-25): Expect._local_tmpfile() depends on
Expect.pid() and is cached; Expect.quit() clears that cache,
which is important for forking.
- Jean-Pierre Flori (2010,2011): Split non Pexpect stuff into a parent class.
- Simon King (2015): Improve pickling for InterfaceElement
"""
#*****************************************************************************
# Copyright (C) 2005 William Stein <wstein@gmail.com>
#
# Distributed under the terms of the GNU General Public License (GPL)
#
# This code is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# The full text of the GPL is available at:
#
# https://www.gnu.org/licenses/
#*****************************************************************************
import operator
from sage.structure.sage_object import SageObject
from sage.structure.parent_base import ParentWithBase
from sage.structure.element import Element, parent
from sage.structure.richcmp import rich_to_bool
import sage.misc.sage_eval
from sage.misc.fast_methods import WithEqualityById
from sage.docs.instancedoc import instancedoc
class AsciiArtString(str):
def __repr__(self):
return str(self)
class Interface(WithEqualityById, ParentWithBase):
"""
Interface interface object.
.. NOTE::
Two interfaces compare equal if and only if they are identical
objects (this is a critical constraint so that caching of
representations of objects in interfaces works
correctly). Otherwise they are never equal.
"""
def __init__(self, name):
"""
Initialize ``self``.
EXAMPLES::
sage: Maxima() == maxima
False
sage: maxima == maxima
True
sage: Maxima() != maxima
True
sage: maxima != maxima
False
"""
self.__name = name
self.__coerce_name = '_' + name.lower() + '_'
self.__seq = -1
self._available_vars = []
self._seed = None
ParentWithBase.__init__(self, self)
def _repr_(self):
return self.__name.capitalize()
def name(self, new_name=None):
return self.__name
def get_seed(self):
"""
Return the seed used to set the random number generator in
this interface.
The seed is initialized as ``None`` but should be set when the
interface starts.
EXAMPLES::
sage: s = Singular()
sage: s.set_seed(107)
107
sage: s.get_seed()
107
"""
return self._seed
def rand_seed(self):
"""
Return a random seed that can be put into ``set_seed`` function
for any interpreter.
This should be overridden if the particular interface needs
something other than a small positive integer.
EXAMPLES::
sage: from sage.interfaces.interface import Interface
sage: i = Interface("")
sage: i.rand_seed() # random
318491487L
sage: s = Singular()
sage: s.rand_seed() # random
365260051L
"""
import sage.doctest
if sage.doctest.DOCTEST_MODE:
# set the random seed through the current randstate
from sage.misc.randstate import current_randstate
seed = current_randstate().seed()
else:
from sage.misc.randstate import randstate
seed = randstate().seed()
return seed & 0x1FFFFFFF
def set_seed(self, seed=None):
"""
Set the random seed for the interpreter and return the new
value of the seed.
This is dependent on which interpreter so must be implemented
in each separately. For examples see gap.py or singular.py.
If seed is ``None`` then should generate a random seed.
EXAMPLES::
sage: s = Singular()
sage: s.set_seed(1)
1
sage: [s.random(1,10) for i in range(5)]
[8, 10, 4, 9, 1]
sage: from sage.interfaces.interface import Interface
sage: i = Interface("")
sage: i.set_seed()
Traceback (most recent call last):
...
NotImplementedError: This interpreter did not implement a set_seed function
"""
raise NotImplementedError("This interpreter did not implement a set_seed function")
def interact(self):
r"""
This allows you to interactively interact with the child
interpreter. Press Ctrl-D or type 'quit' or 'exit' to exit and
return to Sage.
.. note::
This is completely different than the console() member
function. The console function opens a new copy of the
child interpreter, whereas the interact function gives you
interactive access to the interpreter that is being used by
Sage. Use sage(xxx) or interpretername(xxx) to pull objects
in from sage to the interpreter.
"""
from sage.repl.interpreter import interface_shell_embed
shell = interface_shell_embed(self)
try:
ipython = get_ipython()
except NameError:
shell()
else:
shell(local_ns=dict(ipython.user_ns))
def _pre_interact(self):
pass
def _post_interact(self):
pass
def cputime(self):
"""
CPU time since this process started running.
"""
raise NotImplementedError
def read(self, filename):
r"""
EXAMPLES::
sage: filename = tmp_filename()
sage: f = open(filename, 'w')
sage: _ = f.write('x = 2\n')
sage: f.close()
sage: octave.read(filename) # optional - octave
sage: octave.get('x') # optional - octave
' 2'
sage: import os
sage: os.unlink(filename)
"""
self.eval(self._read_in_file_command(filename))
def _read_in_file_command(self, filename):
raise NotImplementedError
def eval(self, code, **kwds):
"""
Evaluate code in an interface.
This method needs to be implemented in sub-classes.
Note that it is not always to be expected that
it returns a non-empty string. In contrast,
:meth:`get` is supposed to return the result of applying
a print command to the object so that the output is easier
to parse.
Likewise, the method :meth:`_eval_line` for evaluation of a single
line, often makes sense to be overridden.
"""
raise NotImplementedError
_eval_line = eval
def execute(self, *args, **kwds):
return self.eval(*args, **kwds)
def __call__(self, x, name=None):
r"""
Create a new object in self from x.
The object X returned can be used like any Sage object, and
wraps an object in self. The standard arithmetic operators
work. Moreover if foo is a function then
X.foo(y,z,...)
calls foo(X, y, z, ...) and returns the corresponding object.
EXAMPLES::
sage: gp(2)
2
sage: gp('2')
2
sage: a = gp(2); gp(a) is a
True
TESTS:
Check conversion of Booleans (:trac:`28705`)::
sage: giac(True)
true
sage: maxima(True)
true
"""
cls = self._object_class()
#Handle the case when x is an object
#in some interface.
if isinstance(x, InterfaceElement):
if x.parent() is self:
return x
#We convert x into an object in this
#interface by first going through Sage.
try:
return self(x._sage_())
except (NotImplementedError, TypeError):
pass
if isinstance(x, str):
return cls(self, x, name=name)
try:
# Special methods do not and should not have an option to
# set the name directly, as the identifier assigned by the
# interface should stay consistent. An identifier with a
# user-assigned name might change its value, so we return a
# new element.
result = self._coerce_from_special_method(x)
return result if name is None else result.name(new_name=name)
except TypeError:
raise
except AttributeError:
pass
try:
result = self._coerce_impl(x, use_special=False)
return result if name is None else result.name(new_name=name)
except TypeError as msg:
try:
return cls(self, str(x), name=name)
except TypeError:
raise TypeError(msg)
def _coerce_from_special_method(self, x):
"""
Tries to coerce to self by calling a special underscore method.
If no such method is defined, raises an AttributeError instead of a
TypeError.
"""
s = '_%s_'%self.name()
if s == '_maxima_lib_':
s = '_maxima_'
if s == '_pari_':
s = '_gp_'
try:
return (x.__getattribute__(s))(self)
except AttributeError:
return self(x._interface_init_())
def _coerce_impl(self, x, use_special=True):
if isinstance(x, bool):
return self(self._true_symbol() if x else self._false_symbol())
elif isinstance(x, int):
import sage.rings.all
return self(sage.rings.all.Integer(x))
elif isinstance(x, float):
import sage.rings.all
return self(sage.rings.all.RDF(x))
if use_special:
try:
return self._coerce_from_special_method(x)
except AttributeError:
pass
if isinstance(x, (list, tuple)):
A = []
z = []
cls = self._object_class()
for v in x:
if isinstance(v, cls):
A.append(v.name())
z.append(v)
else:
w = self(v)
A.append(w.name())
z.append(w)
X = ','.join(A)
r = self.new('%s%s%s'%(self._left_list_delim(), X, self._right_list_delim()))
r.__sage_list = z # do this to avoid having the entries of the list be garbage collected
return r
raise TypeError("unable to coerce element into %s"%self.name())
def new(self, code):
return self(code)
###################################################################
# these should all be appropriately overloaded by the derived class
###################################################################
def _left_list_delim(self):
return "["
def _right_list_delim(self):
return "]"
def _left_func_delim(self):
return "("
def _right_func_delim(self):
return ")"
def _assign_symbol(self):
return "="
def _equality_symbol(self):
raise NotImplementedError
# For efficiency purposes, you should definitely override these
# in your derived class.
def _true_symbol(self):
try:
return self.__true_symbol
except AttributeError:
self.__true_symbol = self.get('1 %s 1'%self._equality_symbol())
return self.__true_symbol
def _false_symbol(self):
try:
return self.__false_symbol
except AttributeError:
self.__false_symbol = self.get('1 %s 2'%self._equality_symbol())
return self.__false_symbol
def _lessthan_symbol(self):
return '<'
def _greaterthan_symbol(self):
return '>'
def _inequality_symbol(self):
return '!='
def _relation_symbols(self):
"""
Returns a dictionary with operators as the keys and their
string representation as the values.
EXAMPLES::
sage: import operator
sage: symbols = mathematica._relation_symbols()
sage: symbols[operator.eq]
'=='
"""
return dict([(operator.eq, self._equality_symbol()), (operator.ne, self._inequality_symbol()),
(operator.lt, self._lessthan_symbol()), (operator.le, "<="),
(operator.gt, self._greaterthan_symbol()), (operator.ge, ">=")])
def _exponent_symbol(self):
"""
Return the symbol used to denote *10^ in floats, e.g 'e' in 1.5e6
EXAMPLES::
sage: from sage.interfaces.expect import Expect
sage: Expect('nonexistent_interface', 'fake')._exponent_symbol()
'e'
"""
return 'e'
############################################################
# Functions for working with variables.
# The first three must be overloaded by derived classes,
# and the definition depends a lot on the class. But
# the functionality one gets from this is very nice.
############################################################
def set(self, var, value):
"""
Set the variable var to the given value.
"""
cmd = '%s%s%s;'%(var,self._assign_symbol(), value)
self.eval(cmd)
def get(self, var):
"""
Get the value of the variable var.
Note that this needs to be overridden in some interfaces,
namely when getting the string representation of an object
requires an explicit print command.
"""
return self.eval(var)
def get_using_file(self, var):
r"""
Return the string representation of the variable var in self,
possibly using a file. Use this if var has a huge string
representation, since it may be way faster.
.. warning::
In fact unless a special derived class implements this, it
will *not* be any faster. This is the case for this class
if you're reading it through introspection and seeing this.
"""
return self.get(var)
def clear(self, var):
"""
Clear the variable named var.
"""
self._available_vars.append(var)
def _next_var_name(self):
if len(self._available_vars) != 0:
v = self._available_vars[0]
del self._available_vars[0]
return v
self.__seq += 1
return "sage%s"%self.__seq
def _create(self, value, name=None):
name = self._next_var_name() if name is None else name
self.set(name, value)
return name
def _object_class(self):
"""
EXAMPLES::
sage: from sage.interfaces.expect import Expect
sage: Expect._object_class(maxima)
<class 'sage.interfaces.expect.ExpectElement'>
"""
return InterfaceElement
def _function_class(self):
"""
EXAMPLES::
sage: from sage.interfaces.interface import Interface
sage: Interface._function_class(maxima)
<class 'sage.interfaces.interface.InterfaceFunction'>
"""
return InterfaceFunction
def _function_element_class(self):
"""
EXAMPLES::
sage: from sage.interfaces.interface import Interface
sage: Interface._function_element_class(maxima)
<class 'sage.interfaces.interface.InterfaceFunctionElement'>
"""
return InterfaceFunctionElement
def _convert_args_kwds(self, args=None, kwds=None):
"""
Converts all of the args and kwds to be elements of this
interface.
EXAMPLES::
sage: args = [5]
sage: kwds = {'x': 6}
sage: args, kwds = gap._convert_args_kwds(args, kwds)
sage: args
[5]
sage: list(map(type, args))
[<class 'sage.interfaces.gap.GapElement'>]
sage: type(kwds['x'])
<class 'sage.interfaces.gap.GapElement'>
"""
args = [] if args is None else args
kwds = {} if kwds is None else kwds
if not isinstance(args, list):
args = [args]
for i, arg in enumerate(args):
if not isinstance(arg, InterfaceElement) or arg.parent() is not self:
args[i] = self(arg)
for key, value in kwds.items():
if not isinstance(value, InterfaceElement) or value.parent() is not self:
kwds[key] = self(value)
return args, kwds
def _check_valid_function_name(self, function):
"""
Checks to see if function is a valid function name in this
interface. If it is not, an exception is raised. Otherwise, nothing
is done.
EXAMPLES::
sage: gap._check_valid_function_name('SymmetricGroup')
sage: gap._check_valid_function_name('')
Traceback (most recent call last):
...
ValueError: function name must be nonempty
sage: gap._check_valid_function_name('__foo')
Traceback (most recent call last):
...
AttributeError
"""
if function == '':
raise ValueError("function name must be nonempty")
if function[:2] == "__":
raise AttributeError
def function_call(self, function, args=None, kwds=None):
"""
EXAMPLES::
sage: maxima.quad_qags(x, x, 0, 1, epsrel=1e-4)
[0.5,5.5511151231257...e-15,21,0]
sage: maxima.function_call('quad_qags', [x, x, 0, 1], {'epsrel':'1e-4'})
[0.5,5.5511151231257...e-15,21,0]
"""
args, kwds = self._convert_args_kwds(args, kwds)
self._check_valid_function_name(function)
s = self._function_call_string(function,
[s.name() for s in args],
['%s=%s'%(key,value.name()) for key, value in kwds.items()])
return self.new(s)
def _function_call_string(self, function, args, kwds):
"""
Returns the string used to make function calls.
EXAMPLES::
sage: maxima._function_call_string('diff', ['f(x)', 'x'], [])
'diff(f(x),x)'
"""
return "%s(%s)"%(function, ",".join(list(args) + list(kwds)))
def call(self, function_name, *args, **kwds):
return self.function_call(function_name, args, kwds)
def _contains(self, v1, v2):
raise NotImplementedError
def __getattr__(self, attrname):
"""
TESTS::
sage: ParentWithBase.__getattribute__(singular, '_coerce_map_from_')
<bound method Singular._coerce_map_from_ of Singular>
"""
try:
return ParentWithBase.__getattribute__(self, attrname)
except AttributeError:
if attrname[:1] == "_":
raise
return self._function_class()(self, attrname)
def console(self):
raise NotImplementedError
def help(self, s):
return AsciiArtString('No help on %s available'%s)
@instancedoc
class InterfaceFunction(SageObject):
"""
Interface function.
"""
def __init__(self, parent, name):
self._parent = parent
self._name = name
def _repr_(self):
return "%s"%self._name
def __call__(self, *args, **kwds):
return self._parent.function_call(self._name, list(args), kwds)
def _instancedoc_(self):
"""
EXAMPLES::
sage: gp.gcd.__doc__
'gcd(x,{y}): greatest common divisor of x and y.'
"""
M = self._parent
return M.help(self._name)
@instancedoc
class InterfaceFunctionElement(SageObject):
"""
Interface function element.
"""
def __init__(self, obj, name):
self._obj = obj
self._name = name
def _repr_(self):
return "%s" % self._name
def __call__(self, *args, **kwds):
return self._obj.parent().function_call(self._name, [self._obj] + list(args), kwds)
def help(self):
print(self.__doc__)
def _instancedoc_(self):
"""
EXAMPLES::
sage: gp(2).gcd.__doc__
'gcd(x,{y}): greatest common divisor of x and y.'
"""
M = self._obj.parent()
return M.help(self._name)
def is_InterfaceElement(x):
return isinstance(x, InterfaceElement)
@instancedoc
class InterfaceElement(Element):
"""
Interface element.
"""
def __init__(self, parent, value, is_name=False, name=None):
Element.__init__(self, parent)
self._create = value
if parent is None:
return # means "invalid element"
# idea: Joe Wetherell -- try to find out if the output
# is too long and if so get it using file, otherwise
# don't.
if is_name:
self._name = value
else:
try:
self._name = parent._create(value, name=name)
except (TypeError, RuntimeError, ValueError) as x:
raise TypeError(x)
def _latex_(self):
# return "\\begin{verbatim}%s\\end{verbatim}"%self
string = str(self)
if not '|' in string:
delim = '|'
elif not '#' in string:
delim = '#'
elif not '@' in string:
delim = '@'
elif not '~' in string:
delim = '~'
return "\\verb%s%s%s"%(delim, string, delim)
def __iter__(self):
for i in range(1, len(self)+1):
yield self[i]
def __len__(self):
"""
Call self.sage() and return the length of that sage object.
This approach is inefficient - each interface should override
this method with one that calls the external program's length
function.
EXAMPLES::
sage: len(gp([1,2,3]))
3
AUTHORS:
- Felix Lawrence (2009-08-21)
"""
return len(self.sage())
def __reduce__(self):
"""
The default linearisation is to return self's parent,
which will then get the items returned by :meth:`_reduce`
as arguments to reconstruct the element.
EXAMPLES::
sage: G = gap.SymmetricGroup(6)
sage: loads(dumps(G)) == G # indirect doctest
True
sage: y = gap(34)
sage: loads(dumps(y))
34
sage: type(_)
<class 'sage.interfaces.gap.GapElement'>
sage: y = singular(34)
sage: loads(dumps(y))
34
sage: type(_)
<class 'sage.interfaces.singular.SingularElement'>
sage: G = gap.PolynomialRing(QQ, ['x'])
sage: loads(dumps(G))
PolynomialRing( Rationals, ["x"] )
sage: S = singular.ring(0, ('x'))
sage: loads(dumps(S))
polynomial ring, over a field, global ordering
// coefficients: QQ
// number of vars : 1
// block 1 : ordering lp
// : names x
// block 2 : ordering C
Here are further examples of pickling of interface elements::
sage: loads(dumps(gp('"abc"')))
abc
sage: loads(dumps(gp([1,2,3])))
[1, 2, 3]
sage: loads(dumps(pari('"abc"')))
"abc"
sage: loads(dumps(pari([1,2,3])))
[1, 2, 3]
sage: loads(dumps(r('"abc"')))
[1] "abc"
sage: loads(dumps(r([1,2,3])))
[1] 1 2 3
sage: loads(dumps(maxima([1,2,3])))
[1,2,3]
Unfortunately, strings in maxima can't be pickled yet::
sage: loads(dumps(maxima('"abc"')))
Traceback (most recent call last):
...
TypeError: unable to make sense of Maxima expression '"abc"' in Sage
"""
return self.parent(), (self._reduce(),)
def _reduce(self):
"""
Helper for pickling.
By default, if self is a string, then the representation of
that string is returned (not the string itself). Otherwise,
it is attempted to return the corresponding Sage object.
If this fails with a NotImplementedError, the string
representation of self is returned instead.
EXAMPLES::
sage: S = singular.ring(0, ('x'))
sage: S._reduce()
Univariate Polynomial Ring in x over Rational Field
sage: G = gap.PolynomialRing(QQ, ['x'])
sage: G._reduce()
'PolynomialRing( Rationals, ["x"] )'
sage: G.sage()
Traceback (most recent call last):
...
NotImplementedError: Unable to parse output: PolynomialRing( Rationals, ["x"] )
sage: singular('"abc"')._reduce()
"'abc'"
sage: singular('1')._reduce()
1
TESTS:
Special care has to be taken with strings. Since for example `r("abc")` will be
interpreted as the R-command abc (not a string in R), we have to reduce to
`"'abc'"` instead. That is dependant on the Elements `is_string` function to
be implemented correctly. This has gone wrong in the past and remained uncaught
by the doctests because the original identifier was reused. This test makes sure
that does not happen again:
sage: a = r("'abc'")
sage: b = dumps(a)
sage: r.set(a.name(), 0) # make identifier reuse doesn't accidentally lead to success
sage: loads(b)
[1] "abc"
"""
if self.is_string():
return repr(self.sage())
try:
return self.sage()
except NotImplementedError:
return repr(self)
def __call__(self, *args):
self._check_valid()
P = self.parent()
return getattr(P, self.name())(*args)
def __contains__(self, x):
P = self._check_valid()
if not isinstance(x, InterfaceElement) or x.parent() is not self.parent():
x = P.new(x)
return P._contains(x.name(), self.name())
def _instancedoc_(self):
"""
EXAMPLES::
sage: gp(2).__doc__
'2'
"""
return str(self)
def __hash__(self):
"""
Returns the hash of self. This is a default implementation of hash
which just takes the hash of the string of self.
"""
return hash('%s' % self)
def _richcmp_(self, other, op):
"""
Comparison of interface elements.
NOTE:
GAP has a special role here. It may in some cases raise an error
when comparing objects, which is unwanted in Python. We catch
these errors. Moreover, GAP does not recognise certain objects as
equal even if there definitions are identical.
NOTE:
This methods need to be overridden if the subprocess would
not return a string representation of a boolean value unless
an explicit print command is used.
TESTS:
Here are examples in which GAP succeeds with a comparison::
sage: gap('SymmetricGroup(8)')==gap('SymmetricGroup(8)')
True
sage: gap('SymmetricGroup(8)')>gap('AlternatingGroup(8)')
False
sage: gap('SymmetricGroup(8)')<gap('AlternatingGroup(8)')
True
Here, GAP fails to compare, and so ``False`` is returned.
In previous Sage versions, this example actually resulted
in an error; compare :trac:`5962`.
::
sage: gap('DihedralGroup(8)')==gap('DihedralGroup(8)')
False
"""
P = self._check_valid()
try:
if P.eval("%s %s %s"%(self.name(), P._equality_symbol(),
other.name())) == P._true_symbol():
return rich_to_bool(op, 0)
except RuntimeError:
pass
try:
if P.eval("%s %s %s"%(self.name(), P._lessthan_symbol(), other.name())) == P._true_symbol():
return rich_to_bool(op, -1)
except RuntimeError:
pass
try:
if P.eval("%s %s %s"%(self.name(), P._greaterthan_symbol(), other.name())) == P._true_symbol():
return rich_to_bool(op, 1)
except Exception:
pass
return NotImplemented
def is_string(self):
"""
Tell whether this element is a string.
By default, the answer is negative.
"""
return False
def _matrix_(self, R):
raise NotImplementedError
def _vector_(self, R):
raise NotImplementedError
def _check_valid(self):
"""
Check that this object is valid, i.e., the session in which this
object is defined is still running. This is relevant for
interpreters that can't be interrupted via ctrl-C, hence get
restarted.
"""
try:
P = self.parent()
if P is None:
raise ValueError("The %s session in which this object was defined is no longer running."%P.name())
except AttributeError:
raise ValueError("The session in which this object was defined is no longer running.")
return P
def __del__(self):
try:
self._check_valid()
except ValueError:
return
if hasattr(self,'_name'):
P = self.parent()
if not (P is None):
P.clear(self._name)
def _sage_repr(self):
"""
Return a sage-friendly string representation of the object.
Some programs use different notation to Sage, e.g. Mathematica
writes lists with {} instead of []. This method calls repr(self)
then converts the foreign notation into Sage's notation.
OUTPUT:
A string representation of the object that is ready for
sage_eval().
EXAMPLES::
sage: repr(mathematica([1,2,3])) # optional - mathematica
'{1, 2, 3}'
sage: mathematica([1,2,3])._sage_repr() # optional - mathematica
'[1, 2, 3]'
::
sage: gp(10.^80)._sage_repr()
'1.0000000000000000000000000000000000000e80' # 64-bit
'1.000000000000000000000000000e80' # 32-bit
sage: mathematica('10.^80')._sage_repr() # optional - mathematica
'1.e80'
AUTHORS:
- Felix Lawrence (2009-08-21)
"""
#TO DO: this could use file transfers when self.is_remote()
string = repr(self).replace('\n',' ').replace('\r', '')
# Translate the external program's function notation to Sage's
lfd = self.parent()._left_func_delim()
if '(' != lfd:
string = string.replace(lfd, '(')
rfd = self.parent()._right_func_delim()
if ')' != rfd:
string = string.replace(rfd, ')')
# Translate the external program's list formatting to Sage's
lld = self.parent()._left_list_delim()
if '[' != lld:
string = string.replace(lld, '[')
rld = self.parent()._right_list_delim()
if ']' != rld:
string = string.replace(rld, ']')
# Translate the external program's exponent formatting
expl = self.parent()._exponent_symbol()
if 'e' != expl:
string = string.replace(expl, 'e')
return string
def _sage_(self):
"""
Attempt to return a Sage version of this object.
This is a generic routine that just tries to evaluate
the repr(self).
EXAMPLES::
sage: gp(1/2)._sage_()
1/2
sage: _.parent()
Rational Field
AUTHORS:
- William Stein
- Felix Lawrence (2009-08-21)
"""
string = self._sage_repr()
try:
return sage.misc.sage_eval.sage_eval(string)
except Exception:
raise NotImplementedError("Unable to parse output: %s" % string)
def sage(self, *args, **kwds):
"""
Attempt to return a Sage version of this object.
This method does nothing more than calling :meth:`_sage_`,
simply forwarding any additional arguments.
EXAMPLES::
sage: gp(1/2).sage()
1/2
sage: _.parent()
Rational Field
sage: singular.lib("matrix")
sage: R = singular.ring(0, '(x,y,z)', 'dp')
sage: singular.matrix(2,2).sage()
[0 0]
[0 0]
"""
return self._sage_(*args, **kwds)
def __repr__(self):
"""
To obtain the string representation, it is first checked whether
the element is still valid. Then, if ``self._cached_repr`` is
a string then it is returned. Otherwise, ``self._repr_()``
is called (and the result is cached, if ``self._cached_repr``
evaluates to ``True``).
If the string obtained so far contains ``self._name``, then it
is replaced by ``self``'s custom name, if available.
To implement a custom string representation, override the method
``_repr_``, but do not override this double underscore method.
EXAMPLES:
Here is one example showing that the string representation will
be cached when requested::
sage: from sage.interfaces.maxima_lib import maxima_lib
sage: M = maxima_lib('sqrt(2) + 1/3')
sage: M._cached_repr
True
sage: repr(M) is repr(M) # indirect doctest
True
sage: M._cached_repr
'sqrt(2)+1/3'
sage: M
sqrt(2)+1/3
If the interface breaks then it is reflected in the string representation::
sage: s = singular('2')
sage: s
2
sage: singular.quit()
sage: s
(invalid Singular object -- The singular session in which this object was defined is no longer running.)
"""
try:
self._check_valid()
except ValueError as msg:
return '(invalid {} object -- {})'.format(self.parent() or type(self), msg)
cr = getattr(self, '_cached_repr', None)
if isinstance(cr, str):
s = cr
else:
s = self._repr_()
if self._name in s:
try:
s = s.replace(self._name, getattr(self, '__custom_name'))
except AttributeError:
pass
if cr:
self._cached_repr = s
return s
def _repr_(self):
"""
Default implementation of a helper method for string representation.
It is supposed that immediately before calling this method,
the validity of ``self``'s parent was confirmed. So, when you
override this method, you can assume that the parent is valid.
TESTS:
In :trac:`22501`, several string representation methods have been
removed in favour of using the default implementation. The corresponding
tests have been moved here::
sage: gap(SymmetricGroup(8)) # indirect doctest
SymmetricGroup( [ 1 .. 8 ] )
sage: gap(2)
2
sage: x = var('x')
sage: giac(x)
x
sage: giac(5)
5
sage: M = matrix(QQ,2,range(4))
sage: giac(M)
[[0,1],[2,3]]
sage: x = var('x') # optional - maple
sage: maple(x) # optional - maple
x
sage: maple(5) # optional - maple
5
sage: M = matrix(QQ,2,range(4)) # optional - maple
sage: maple(M) # optional - maple
Matrix(2, 2, [[0,1],[2,3]])
sage: maxima('sqrt(2) + 1/3')
sqrt(2)+1/3
sage: mupad.package('"MuPAD-Combinat"') # optional - mupad-Combinat
sage: S = mupad.examples.SymmetricFunctions(); S # optional - mupad-Combinat
examples::SymmetricFunctions(Dom::ExpressionField())
"""
P = self.parent()
try:
if self._get_using_file:
return P.get_using_file(self._name).rstrip()
except AttributeError:
return self.parent().get(self._name).rstrip()
def __getattr__(self, attrname):
try:
P = self._check_valid()
except ValueError:
raise AttributeError(attrname)
if attrname[:1] == "_":
raise AttributeError
return P._function_element_class()(self, attrname)
def get_using_file(self):
"""
Return this element's string representation using a file. Use this
if self has a huge string representation. It'll be way faster.
EXAMPLES::
sage: a = maxima(str(2^1000))
sage: a.get_using_file()
'10715086071862673209484250490600018105614048117055336074437503883703510511249361224931983788156958581275946729175531468251871452856923140435984577574698574803934567774824230985421074605062371141877954182153046474983581941267398767559165543946077062914571196477686542167660429831652624386837205668069376'
"""
try:
self._check_valid()
except ValueError as msg:
return '(invalid {} object -- {})'.format(self.parent() or type(self), msg)
return self.parent().get_using_file(self._name)
def hasattr(self, attrname):
"""
Returns whether the given attribute is already defined by this
object, and in particular is not dynamically generated.
EXAMPLES::
sage: m = maxima('2')
sage: m.hasattr('integral')
True
sage: m.hasattr('gcd')
False
"""
return not isinstance(getattr(self, attrname), (InterfaceFunctionElement, InterfaceElement))
def attribute(self, attrname):
"""
If this wraps the object x in the system, this returns the object
x.attrname. This is useful for some systems that have object
oriented attribute access notation.
EXAMPLES::
sage: g = gap('SO(1,4,7)')
sage: k = g.InvariantQuadraticForm()
sage: k.attribute('matrix')
[ [ 0*Z(7), Z(7)^0, 0*Z(7), 0*Z(7) ], [ 0*Z(7), 0*Z(7), 0*Z(7), 0*Z(7) ],
[ 0*Z(7), 0*Z(7), Z(7), 0*Z(7) ], [ 0*Z(7), 0*Z(7), 0*Z(7), Z(7)^0 ] ]
::
sage: e = gp('ellinit([0,-1,1,-10,-20])')
sage: e.attribute('j')
-122023936/161051
"""
P = self._check_valid()
return P('%s.%s'%(self.name(), attrname))
def __getitem__(self, n):
P = self._check_valid()
if not isinstance(n, tuple):
return P.new('%s[%s]'%(self._name, n))
else:
return P.new('%s[%s]'%(self._name, str(n)[1:-1]))
def __int__(self):
"""
EXAMPLES::
sage: int(maxima('1'))
1
sage: type(_)
<... 'int'>
"""
return int(repr(self))
def bool(self):
"""
Convert this element to a boolean.
EXAMPLES::
sage: singular(0).bool()
False
sage: singular(1).bool()
True
"""
return bool(self)
def __bool__(self):
"""
Return whether this element is not ``False``.
.. NOTE::
This method needs to be overridden if the subprocess would
not return a string representation of a boolean value unless
an explicit print command is used.
EXAMPLES::
sage: bool(maxima(0))
False
sage: bool(maxima(1))
True
TESTS:
By default this returns ``True`` for elements that are considered to be
not ``False`` by the interface (:trac:`28705`)::
sage: bool(giac('"a"'))
True
"""
P = self._check_valid()
cmd = '%s %s %s' % (self._name, P._equality_symbol(),
P._false_symbol())
return P.eval(cmd) != P._true_symbol()
__nonzero__ = __bool__
def __float__(self):
"""
EXAMPLES::
sage: m = maxima('1/2')
sage: m.__float__()
0.5
sage: float(m)
0.5
"""
return float(repr(self))
def _integer_(self, ZZ=None):
"""
EXAMPLES::
sage: m = maxima('1')
sage: m._integer_()
1
sage: _.parent()
Integer Ring
sage: QQ(m)
1
"""
import sage.rings.all
return sage.rings.all.Integer(repr(self))
def _rational_(self):
"""
EXAMPLES::
sage: m = maxima('1/2')
sage: m._rational_()
1/2
sage: _.parent()
Rational Field
sage: QQ(m)
1/2
"""
import sage.rings.all
return sage.rings.all.Rational(repr(self))
def name(self, new_name=None):
"""
Returns the name of self. If new_name is passed in, then this
function returns a new object identical to self whose name is
new_name.
Note that this can overwrite existing variables in the system.
EXAMPLES::
sage: x = r([1,2,3]); x
[1] 1 2 3
sage: x.name()
'sage...'
sage: x = r([1,2,3]).name('x'); x
[1] 1 2 3
sage: x.name()
'x'
::
sage: s5 = gap.SymmetricGroup(5).name('s5')
sage: s5
SymmetricGroup( [ 1 .. 5 ] )
sage: s5.name()
's5'
"""
if new_name is not None:
if not isinstance(new_name, str):
raise TypeError("new_name must be a string")
p = self.parent()
p.set(new_name, self._name)
return p._object_class()(p, new_name, is_name=True)
return self._name
def gen(self, n):
P = self._check_valid()
return P.new('%s.%s'%(self._name, int(n)))
def _operation(self, operation, other=None):
r"""
Return the result of applying the binary operation
``operation`` on the arguments ``self`` and ``other``, or the
unary operation on ``self`` if ``other`` is not given.
This is a utility function which factors out much of the
commonality used in the arithmetic operations for interface
elements.
INPUT:
- ``operation`` -- a string representing the operation
being performed. For example, '*', or '1/'.
- ``other`` -- the other operand. If ``other`` is ``None``,
then the operation is assumed to be unary rather than binary.
OUTPUT: an interface element
EXAMPLES::
sage: a = gp('23')
sage: b = gp('5')
sage: a._operation('%', b)
3
sage: a._operation('19+')
42
sage: a._operation('!@#$%')
Traceback (most recent call last):
...
TypeError: Error executing code in GP:...
"""
P = self._check_valid()
if other is None:
cmd = '%s %s'%(operation, self._name)
else:
cmd = '%s %s %s'%(self._name, operation, other._name)
try:
return P.new(cmd)
except Exception as msg:
raise TypeError(msg)
def _add_(self, right):
"""
EXAMPLES::
sage: f = maxima.cos(x)
sage: g = maxima.sin(x)
sage: f + g
sin(_SAGE_VAR_x)+cos(_SAGE_VAR_x)
sage: f + 2
cos(_SAGE_VAR_x)+2
sage: 2 + f
cos(_SAGE_VAR_x)+2
::
sage: x,y = var('x,y')
sage: f = maxima.function('x','sin(x)')
sage: g = maxima.function('x','-cos(x)')
sage: f+g
sin(x)-cos(x)
sage: f+3
sin(x)+3
The Maxima variable ``x`` is different from the Sage symbolic variable::
sage: (f+maxima.cos(x))
sin(x)+cos(_SAGE_VAR_x)
sage: (f+maxima.cos(y))
sin(x)+cos(_SAGE_VAR_y)
Note that you may get unexpected results when calling symbolic expressions
and not explicitly giving the variables::
sage: (f+maxima.cos(x))(2)
cos(_SAGE_VAR_x)+sin(2)
sage: (f+maxima.cos(y))(2)
cos(_SAGE_VAR_y)+sin(2)
"""
return self._operation("+", right)
def _sub_(self, right):
"""
EXAMPLES::
sage: f = maxima.cos(x)
sage: g = maxima.sin(x)
sage: f - g
cos(_SAGE_VAR_x)-sin(_SAGE_VAR_x)
sage: f - 2
cos(_SAGE_VAR_x)-2
sage: 2 - f
2-cos(_SAGE_VAR_x)
::
sage: x,y = var('x,y')
sage: f = maxima.function('x','sin(x)')
The Maxima variable ``x`` is different from the Sage symbolic variable::
sage: (f-maxima.cos(x))
sin(x)-cos(_SAGE_VAR_x)
sage: (f-maxima.cos(y))
sin(x)-cos(_SAGE_VAR_y)
Note that you may get unexpected results when calling symbolic expressions
and not explicitly giving the variables::
sage: (f-maxima.cos(x))(2)
sin(2)-cos(_SAGE_VAR_x)
sage: (f-maxima.cos(y))(2)
sin(2)-cos(_SAGE_VAR_y)
"""
return self._operation('-', right)
def _neg_(self):
"""
EXAMPLES::
sage: f = maxima('sin(x)')
sage: -f
-sin(x)
sage: f = maxima.function('x','sin(x)')
sage: -f
-sin(x)
"""
return self._operation('-')
def _mul_(self, right):
"""
EXAMPLES::
sage: f = maxima.cos(x)
sage: g = maxima.sin(x)
sage: f*g
cos(_SAGE_VAR_x)*sin(_SAGE_VAR_x)
sage: 2*f
2*cos(_SAGE_VAR_x)
::
sage: f = maxima.function('x','sin(x)')
sage: g = maxima('-cos(x)') # not a function!
sage: f*g
-cos(x)*sin(x)
sage: _(2)
-cos(2)*sin(2)
::
sage: f = maxima.function('x','sin(x)')
sage: g = maxima('-cos(x)')
sage: g*f
-cos(x)*sin(x)
sage: _(2)
-cos(2)*sin(2)
sage: 2*f
2*sin(x)
"""
return self._operation('*', right)
def _div_(self, right):
"""
EXAMPLES::
sage: f = maxima.cos(x)
sage: g = maxima.sin(x)
sage: f/g
cos(_SAGE_VAR_x)/sin(_SAGE_VAR_x)
sage: f/2
cos(_SAGE_VAR_x)/2
::
sage: f = maxima.function('x','sin(x)')
sage: g = maxima('-cos(x)')
sage: f/g
-sin(x)/cos(x)
sage: _(2)
-sin(2)/cos(2)
::
sage: f = maxima.function('x','sin(x)')
sage: g = maxima('-cos(x)')
sage: g/f
-cos(x)/sin(x)
sage: _(2)
-cos(2)/sin(2)
sage: 2/f
2/sin(x)
"""
return self._operation("/", right)
def __invert__(self):
"""
EXAMPLES::
sage: f = maxima('sin(x)')
sage: ~f
1/sin(x)
sage: f = maxima.function('x','sin(x)')
sage: ~f
1/sin(x)
"""
return self._operation('1/')
def _mod_(self, right):
"""
EXAMPLES::
sage: f = gp("x^3 + x")
sage: g = gp("2*x + 1")
sage: f % g
-5/8
"""
return self._operation("%", right)
def __pow__(self, n):
"""
EXAMPLES::
sage: a = maxima('2')
sage: a^(3/4)
2^(3/4)
::
sage: f = maxima.function('x','sin(x)')
sage: g = maxima('-cos(x)')
sage: f^g
1/sin(x)^cos(x)
::
sage: f = maxima.function('x','sin(x)')
sage: g = maxima('-cos(x)') # not a function
sage: g^f
(-cos(x))^sin(x)
"""
P = self._check_valid()
if parent(n) is not P:
n = P(n)
return self._operation("^", n)
| 30.086957
| 316
| 0.532093
|
4a04353e29e96274ea79aab845de6c692d22e3e6
| 503
|
py
|
Python
|
light_meter.py
|
tchamberlin/odroid_prometheus
|
204ff88693436777a643e16da0ca9c2e1c8a4611
|
[
"MIT"
] | null | null | null |
light_meter.py
|
tchamberlin/odroid_prometheus
|
204ff88693436777a643e16da0ca9c2e1c8a4611
|
[
"MIT"
] | 1
|
2020-03-02T16:01:58.000Z
|
2020-03-02T16:01:58.000Z
|
light_meter.py
|
tchamberlin/odroid_prometheus
|
204ff88693436777a643e16da0ca9c2e1c8a4611
|
[
"MIT"
] | null | null | null |
"""Real-time monitor of light level"""
import math
import os
import sys
import time
import prometheus_client as prom
import SI1132
import BME280
I2C_DEVICE_FILE = "/dev/i2c-2"
def main():
verbose = len(sys.argv) > 1
si1132 = SI1132.SI1132(I2C_DEVICE_FILE)
while True:
light_visible_lux = si1132.readVisible()
if verbose:
os.system("clear")
print(f"lux: {light_visible_lux:e}")
time.sleep(0.05)
if __name__ == "__main__":
main()
| 15.71875
| 48
| 0.642147
|
4a0438ddbfaa1446af2969b28b67b8189fff95f4
| 136,175
|
py
|
Python
|
madgraph/loop/loop_helas_objects.py
|
madnklo/madnklo
|
646a3db9c8efd7b4cb00e9d89b9197cd5394c01b
|
[
"NCSA"
] | 1
|
2019-12-14T15:25:38.000Z
|
2019-12-14T15:25:38.000Z
|
madgraph/loop/loop_helas_objects.py
|
madnklo/madnklo
|
646a3db9c8efd7b4cb00e9d89b9197cd5394c01b
|
[
"NCSA"
] | 26
|
2018-10-08T15:49:32.000Z
|
2020-05-15T13:33:36.000Z
|
madgraph/loop/loop_helas_objects.py
|
madnklo/madnklo
|
646a3db9c8efd7b4cb00e9d89b9197cd5394c01b
|
[
"NCSA"
] | 2
|
2019-03-25T17:28:48.000Z
|
2021-04-21T12:15:53.000Z
|
################################################################################
#
# Copyright (c) 2009 The MadGraph5_aMC@NLO Development team and Contributors
#
# This file is a part of the MadGraph5_aMC@NLO project, an application which
# automatically generates Feynman diagrams and matrix elements for arbitrary
# high-energy processes in the Standard Model and beyond.
#
# It is subject to the MadGraph5_aMC@NLO license which should accompany this
# distribution.
#
# For more information, visit madgraph.phys.ucl.ac.be and amcatnlo.web.cern.ch
#
################################################################################
"""Definitions of objects inheriting from the classes defined in
helas_objects.py and which have special attributes and function
devoted to the treatment of Loop processes"""
import array
import copy
import logging
import itertools
import math
import aloha
import aloha.create_aloha as create_aloha
from madgraph import MadGraph5Error
import madgraph.core.base_objects as base_objects
import madgraph.loop.loop_base_objects as loop_base_objects
import madgraph.core.diagram_generation as diagram_generation
import madgraph.loop.loop_diagram_generation as loop_diagram_generation
import madgraph.core.color_amp as color_amp
import madgraph.loop.loop_color_amp as loop_color_amp
import madgraph.core.color_algebra as color
import madgraph.core.helas_objects as helas_objects
import madgraph.various.misc as misc
#===============================================================================
#
#===============================================================================
logger = logging.getLogger('madgraph.helas_objects')
#===============================================================================
# LoopUVCTHelasAmplitude
#===============================================================================
class LoopHelasUVCTAmplitude(helas_objects.HelasAmplitude):
"""LoopHelasUVCTAmplitude object, behaving exactly as an amplitude except that
it also contains additional vertices with coupling constants corresponding
to the 'UVCTVertices' defined in the 'UVCTVertices ' of the
loop_base_objects.LoopUVCTDiagram of the LoopAmplitude. These are stored
in the additional attribute 'UVCT_interaction_ids' of this class.
"""
# Customized constructor
def __init__(self, *arguments):
"""Constructor for the LoopHelasAmplitude. For now, it works exactly
as for the HelasMatrixElement one."""
if arguments:
super(LoopHelasUVCTAmplitude, self).__init__(*arguments)
else:
super(LoopHelasUVCTAmplitude, self).__init__()
def default_setup(self):
"""Default values for all properties"""
super(LoopHelasUVCTAmplitude,self).default_setup()
# Store interactions ID of the UV counterterms related to this diagram
self['UVCT_couplings'] = []
self['UVCT_orders'] = {}
def filter(self, name, value):
"""Filter for valid LoopHelasAmplitude property values."""
if name=='UVCT_couplings':
if not isinstance(value, list):
raise self.PhysicsObjectError, \
"%s is not a valid list for UVCT_couplings" % str(value)
for id in value:
if not isinstance(id, str) and not isinstance(id, int):
raise self.PhysicsObjectError, \
"%s is not a valid string or integer for UVCT_couplings" % str(value)
if name == 'UVCT_orders':
if not isinstance(value, dict):
raise self.PhysicsObjectError, \
"%s is not a valid dictionary" % str(value)
if name == 'type':
if not isinstance(value, str):
raise self.PhysicsObjectError, \
"%s is not a valid string" % str(value)
else:
return super(LoopHelasUVCTAmplitude,self).filter(name, value)
def get_sorted_keys(self):
"""Return LoopHelasAmplitude property names as a nicely sorted list."""
return super(LoopHelasUVCTAmplitude,self).get_sorted_keys()+\
['UVCT_couplings','UVCT_orders','type']
return True
def get_call_key(self):
""" Exactly as a regular HelasAmplitude except that here we must add
an entry to mutliply the final result by the coupling constants of the
interaction in UVCT_couplings if there are any"""
original_call_key = super(LoopHelasUVCTAmplitude,self).get_call_key()
if self.get_UVCT_couplings()=='1.0d0':
return original_call_key
else:
return (original_call_key[0],original_call_key[1],'UVCT')
def get_used_UVCT_couplings(self):
""" Returns a list of the string UVCT_couplings defined for this
amplitudes. """
return [coupl for coupl in self['UVCT_couplings'] if \
isinstance(coupl,str)]
def get_UVCT_couplings(self):
""" Returns the string corresponding to the overall UVCT coupling which
factorize this amplitude """
if self['UVCT_couplings']==[]:
return '1.0d0'
answer=[]
integer_sum=0
for coupl in list(set(self['UVCT_couplings'])):
if isinstance(coupl,int):
integer_sum+=coupl
else:
answer.append(str(len([1 for c in self['UVCT_couplings'] if \
c==coupl]))+'.0d0*'+coupl)
if integer_sum!=0:
answer.append(str(integer_sum)+'.0d0')
if answer==[] and (integer_sum==0 or integer_sum==1):
return '1.0d0'
else:
return '+'.join(answer)
def get_base_diagram(self, wf_dict, vx_list = [], optimization = 1):
"""Return the loop_base_objects.LoopUVCTDiagram which corresponds to this
amplitude, using a recursive method for the wavefunctions."""
vertices = super(LoopHelasUVCTAmplitude,self).get_base_diagram(\
wf_dict, vx_list, optimization)['vertices']
return loop_base_objects.LoopUVCTDiagram({'vertices': vertices, \
'UVCT_couplings': self['UVCT_couplings'], \
'UVCT_orders': self['UVCT_orders'], \
'type': self['type']})
def get_helas_call_dict(self, index=1, OptimizedOutput=False,\
specifyHel=True, **opt):
""" return a dictionary to be used for formatting
HELAS call. """
out = helas_objects.HelasAmplitude.get_helas_call_dict(self,
index=index,OptimizedOutput=OptimizedOutput)
out['uvct'] = self.get_UVCT_couplings()
out.update(opt)
return out
#===============================================================================
# LoopHelasAmplitude
#===============================================================================
class LoopHelasAmplitude(helas_objects.HelasAmplitude):
"""LoopHelasAmplitude object, behaving exactly as an amplitude except that
it also contains loop wave-functions closed on themselves, building an
amplitude corresponding to the closed loop.
"""
# Customized constructor
def __init__(self, *arguments):
"""Constructor for the LoopHelasAmplitude. For now, it works exactly
as for the HelasMatrixElement one."""
if arguments:
super(LoopHelasAmplitude, self).__init__(*arguments)
else:
super(LoopHelasAmplitude, self).__init__()
def is_equivalent(self, other):
"""Comparison between different LoopHelasAmplitude in order to recognize
which ones are equivalent at the level of the file output.
I decided not to overload the operator __eq__ to be sure not to interfere
with other functionalities of the code."""
if(len(self.get('wavefunctions'))!=len(other.get('wavefunctions')) or
len(self.get('amplitudes'))!=len(other.get('amplitudes')) or
[len(wf.get('coupling')) for wf in self.get('wavefunctions')]!=
[len(wf.get('coupling')) for wf in other.get('wavefunctions')] or
[len(amp.get('coupling')) for amp in self.get('amplitudes')]!=
[len(amp.get('coupling')) for amp in other.get('amplitudes')]):
return False
wfArgsToCheck = ['fermionflow','lorentz','state','onshell','spin',\
'is_part','self_antipart','color']
for arg in wfArgsToCheck:
if [wf.get(arg) for wf in self.get('wavefunctions')]!=\
[wf.get(arg) for wf in other.get('wavefunctions')]:
return False
if [wf.find_outgoing_number() for wf in self.get('wavefunctions')]!=\
[wf.find_outgoing_number() for wf in other.get('wavefunctions')]:
return False
ampArgsToCheck = ['lorentz',]
for arg in ampArgsToCheck:
if [amp.get(arg) for amp in self.get('amplitudes')]!=\
[amp.get(arg) for amp in other.get('amplitudes')]:
return False
# Finally just check that the loop and external mother wavefunctions
# of the loop wavefunctions and loop amplitudes arrive at the same places
# in both self and other. The characteristics of the mothers is irrelevant,
# the only thing that matters is that the loop-type and external-type mothers
# are in the same order.
if [[m.get('is_loop') for m in lwf.get('mothers')] for lwf in self.get('wavefunctions')]!=\
[[m.get('is_loop') for m in lwf.get('mothers')] for lwf in other.get('wavefunctions')]:
return False
if [[m.get('is_loop') for m in lwf.get('mothers')] for lwf in self.get('amplitudes')]!=\
[[m.get('is_loop') for m in lwf.get('mothers')] for lwf in other.get('amplitudes')]:
return False
return True
def default_setup(self):
"""Default values for all properties"""
super(LoopHelasAmplitude,self).default_setup()
# Store the wavefunctions building this loop
self['wavefunctions'] = helas_objects.HelasWavefunctionList()
# In this first version, a LoopHelasAmplitude is always built out of
# a single amplitude, it was realized later that one would never need
# more than one. But until now we kept the structure as such.
self['amplitudes'] = helas_objects.HelasAmplitudeList()
# The pairing is used for the output to know at each loop interactions
# how many non-loop mothers are necessary. This list is ordered as the
# helas calls building the loop
self['pairing'] = []
# To keep the 'type' (L-cut particle ID) of the LoopDiagram this
# Loop amplitude tracks.
# In principle this info is recoverable from the loop wfs.
self['type'] = -1
# The loop_group_id gives the place of this LoopHelasAmplitude
# in the 'loop_groups' attribute of the LoopHelasMatrixElement it belongs
# to.
self['loop_group_id']=-1
# To store the symmetry factor of the loop
self['loopsymmetryfactor'] = 0
# Loop diagrams can be identified to others which are numerically exactly
# equivalent. This is the case for example for the closed massless quark
# loops. In this case, only one copy of the diagram is kept and this
# multiplier attribute is set the to number of identified diagrams.
# At the Helas level, this multiplier is given to each LoopHelasAmplitude
self['multiplier'] = 1
# Enhanced get function
def get(self, name):
"""Get the value of the property name."""
if name == 'loopsymmetryfactor' and not self[name]:
self.calculate_loopsymmetryfactor()
return super(LoopHelasAmplitude, self).get(name)
def filter(self, name, value):
"""Filter for valid LoopHelasAmplitude property values."""
if name=='wavefunctions':
if not isinstance(value, helas_objects.HelasWavefunctionList):
raise self.PhysicsObjectError, \
"%s is not a valid list of HelasWaveFunctions" % str(value)
for wf in value:
if not wf['is_loop']:
raise self.PhysicsObjectError, \
"Wavefunctions from a LoopHelasAmplitude must be from a loop."
elif name=='amplitudes':
if not isinstance(value, helas_objects.HelasAmplitudeList):
raise self.PhysicsObjectError, \
"%s is not a valid list of HelasAmplitudes" % str(value)
elif name in ['type','loop_group_id','multiplier','loopsymmetryfactor']:
if not isinstance(value, int):
raise self.PhysicsObjectError, \
"%s is not a valid integer for the attribute '%s'" %(str(value),name)
else:
return super(LoopHelasAmplitude,self).filter(name, value)
return True
def get_sorted_keys(self):
"""Return LoopHelasAmplitude property names as a nicely sorted list."""
return super(LoopHelasAmplitude,self).get_sorted_keys()+\
['wavefunctions', 'amplitudes','loop_group_id']
def get_lcut_size(self):
""" Return the wavefunction size (i.e. number of elements) based on the
spin of the l-cut particle """
return helas_objects.HelasWavefunction.spin_to_size(
self.get_final_loop_wavefunction().get('spin'))
def get_starting_loop_wavefunction(self):
""" Return the starting external loop mother of this loop helas amplitude.
It is the loop wavefunction of the l-cut leg one."""
loop_wf=self.get_final_loop_wavefunction()
loop_wf_mother=loop_wf.get_loop_mother()
while loop_wf_mother:
loop_wf=loop_wf_mother
loop_wf_mother=loop_wf.get_loop_mother()
return loop_wf
def get_final_loop_wavefunction(self):
"""Return the non-external loop mother of the helas amplitude building
this loop amplitude"""
final_lwf=[lwf for lwf in self.get('amplitudes')[0].get('mothers') if \
lwf.get('mothers')]
if len(final_lwf)!=1:
raise MadGraph5Error, 'The helas amplitude building the helas loop'+\
' amplitude should be made of exactly one loop wavefunctions'+\
' with mothers.'
return final_lwf[0]
def get_base_diagram(self, wf_dict, vx_list = [], optimization = 1):
"""Return the loop_base_objects.LoopDiagram which corresponds to this
amplitude, using a recursive method for the wavefunctions.
Remember that this diagram is not tagged and structures are not
recognized."""
vertices = self['amplitudes'][0].get_base_diagram(\
wf_dict, vx_list, optimization)['vertices']
out = loop_base_objects.LoopDiagram({'vertices': vertices,\
'type':self['type']})
# The generation of Helas diagram sometimes return that the two
# loop external wavefunctions have the same external_id due to the
# recycling of the first external wavefunctions.
# i. e. ((5(5*),1(21)>1(5*),id:160),(1(5*),2(21)>1(5*),id:160),(1(5*),3(37)>1(6*),id:21),(1(6*),4(-37)>1(5*),id:22),(5(-5*),1(5*),id:-1))
# This only problematic when creating diagram with get_base_amplitude and
# using them for the identifyME tagging
starting_loop_line = out.get_starting_loop_line()
finishing_loop_line = out.get_finishing_loop_line()
if starting_loop_line['number'] == finishing_loop_line['number']:
# This is the problematic case.
# Since both particles have the same id, the routine get_external_legs
# is always missing a particle. So we need to add one to have the correct
# number of external particles (including the l-cut particle)
nb_external = len(out.get_external_legs()) +1
if nb_external == starting_loop_line['number']:
starting_loop_line.set('number', nb_external -1)
else:
starting_loop_line.set('number', nb_external)
return out
def set_mothers_and_pairing(self):
""" Sets the mothers of this amplitude in the same order as they will
be used in the arguments of the helas calls building this loop"""
if len(self.get('amplitudes'))!=1:
self.PhysicsObjectError, \
"HelasLoopAmplitude is for now designed to contain only one \
HelasAmplitude"
self.set('mothers',helas_objects.HelasWavefunctionList())
for lwf in [wf for wf in self.get('wavefunctions') if wf.get('mothers')]:
mothersList=[wf for wf in lwf.get('mothers') if not wf['is_loop']]
self['mothers'].extend(mothersList)
self['pairing'].append(len(mothersList))
def get_vertex_leg_numbers(self,
veto_inter_id=base_objects.Vertex.ID_to_veto_for_multichanneling,
max_n_loop=0):
"""Get a list of the number of legs in vertices in this diagram"""
if max_n_loop == 0:
max_n_loop = base_objects.Vertex.max_n_loop_for_multichanneling
# There is no need to check for self.get('interaction_id')==-2 when
# applying the max_n_loop check because we already know that this
# vertex is a loop one since it is a LoopHelasAmplitude
vertex_leg_numbers = [len(self.get('mothers'))] if \
(self.get('interaction_id') not in veto_inter_id) or \
len(self.get('mothers'))>max_n_loop else []
for mother in self.get('mothers'):
vertex_leg_numbers.extend(mother.get_vertex_leg_numbers(
veto_inter_id=veto_inter_id, max_n_loop=max_n_loop))
return vertex_leg_numbers
def get_denominators(self):
""" Returns the denominator structure as a tuple (tupleA, tupleB) whose
elements are of this form ((external_part_ids),mass) where
external_part_ids are all the leg id building the momentum flowing in
the loop, i.e:
D_i=(q+Sum(p_j,j))^2 - m^2
"""
denoms=[]
last_loop_wf=self.get_final_loop_wavefunction()
last_loop_wf_mother=last_loop_wf.get_loop_mother()
while last_loop_wf_mother:
denoms.append((tuple(last_loop_wf.get_struct_external_leg_ids()),
last_loop_wf.get('mass')))
last_loop_wf=last_loop_wf_mother
last_loop_wf_mother=last_loop_wf.get_loop_mother()
denoms.reverse()
return tuple(denoms)
def get_masses(self):
""" Returns the list of the masses of the loop particles as they should
appear for cuttools (L-cut particles specified last) """
masses=[]
if not aloha.complex_mass:
for lwf in [wf for wf in self.get('wavefunctions') if wf.get('mothers')]:
masses.append(lwf.get('mass'))
else:
for lwf in [wf for wf in self.get('wavefunctions') if wf.get('mothers')]:
if (lwf.get('width') == 'ZERO' or lwf.get('mass') == 'ZERO'):
masses.append(lwf.get('mass'))
else:
masses.append('CMASS_%s' % lwf.get('mass'))
return masses
def get_couplings(self):
""" Returns the list of the couplings of the different helas objects
building this HelasLoopAmplitude. They are ordered as they will appear
in the helas calls."""
return (sum([wf.get('coupling') for wf in self.get('wavefunctions') \
if wf.get('coupling')!=['none']],[])\
+sum([amp.get('coupling') for amp in self.get('amplitudes') if \
amp.get('coupling')!=['none']],[]))
def get_helas_call_dict(self, OptimizedOutput=False,specifyHel=True,**opt):
""" return a dictionary to be used for formatting
HELAS call. """
output = {}
output['numLoopLines']='_%d'%(len(self.get('wavefunctions'))-2)
# Plus one below because fortran array start at 1.
output['loop_group_id']=self.get('loop_group_id')+1
output['ampNumber']=self.get('amplitudes')[0].get('number')
if len(self.get('mothers'))!=len(self.get('pairing')):
output['numMotherWfs']='_%d'%len(self.get('mothers'))
else:
output['numMotherWfs']=''
for i, pairing in enumerate(self.get('pairing')):
output["Pairing%d"%i]=pairing
output['numCouplings']='_%d'%len(self.get('coupling'))
output['numeratorNumber']=self.get('number')
output["LoopRank"]=self.get_analytic_info('wavefunction_rank')
if OptimizedOutput:
if self.get('loop_group_id')==-1:
output['loopNumber']=self.get('number')
else:
output['loopNumber']=self.get('loop_group_id')+1
else:
output['loopNumber']=self.get('amplitudes')[0].get('number')
for i , wf in enumerate(self.get('mothers')):
output["MotherID%d"%(i+1)]=wf.get('number')
for i , mass in enumerate(self.get_masses()):
output["LoopMass%d"%(i+1)]=mass
for i , coupling in enumerate(self.get('coupling')):
output["LoopCoupling%d"%(i+1)]=coupling
output["LoopSymmetryFactor"] = self.get('loopsymmetryfactor')
output["LoopMultiplier"] = self.get('multiplier')
output.update(opt)
return output
def get_call_key(self):
""" The helas call to a loop is simple and only depends on the number
of loop lines and mothers. This how it is reflected in the call key. """
return ("LOOP",len(self.get('wavefunctions'))-2,\
len(self.get('mothers')),len(self.get('coupling')))
def get_orders(self):
""" Compute the orders building this loop amplitude only (not from the
struct wavefunctions. Uses the cached result if available."""
if self.get('orders') != {}:
return self.get('orders')
else:
coupling_orders = {}
last_wf = self.get_final_loop_wavefunction()
while last_wf.get_loop_mother()!=None:
for order in last_wf.get('orders').keys():
try:
coupling_orders[order] += last_wf.get('orders')[order]
except Exception:
coupling_orders[order] = last_wf.get('orders')[order]
last_wf = last_wf.get_loop_mother()
return coupling_orders
def get_analytic_info(self, info, alohaModel=None):
""" Returns an analytic information of the loop numerator, for example
the 'wavefunction_rank' i.e. the maximum power to which the loop momentum
is elevated in the loop numerator. All analytic pieces of information
are for now identical to the one retrieved from the final_loop_wavefunction."""
return self.get_final_loop_wavefunction().\
get_analytic_info(info, alohaModel)
def compute_analytic_information(self,alohaModel):
""" Make sure that all analytic pieces of information about this
wavefunction are computed so that they can be recycled later, typically
without the need of specifying an alohaModel. For now, all analytic
information about the loop helas amplitude are identical to those of the
final loop wavefunction."""
self.get_final_loop_wavefunction().compute_analytic_information(\
alohaModel)
def calculate_fermionfactor(self):
""" The fermion factor is not implemented for this object but in the
subamplitude"""
self['fermion_factor']=0
for amp in self.get('amplitudes'):
amp.get('fermionfactor')
def calculate_loopsymmetryfactor(self):
""" Calculate the loop symmetry factor. For one-loop matrix elements,
it is always 2 for bubble with identical particles and tadpoles with self-conjugated particles
and 1 otherwise."""
# Assign a loop symmetry factor of 1 to all loops tadpoles with a self-conjugated loop particle
# and bubbles featuring two identical (but not necessarily self-conjugated) particles running in
# the loop, for which the correct symmetry factor of 2 is assigned instead.
self['loopsymmetryfactor']=1
physical_wfs = [wf for wf in self.get('wavefunctions') if wf.get('interaction_id')!=0]
if len(physical_wfs)==1:
if physical_wfs[0].get('self_antipart'):
self['loopsymmetryfactor']=2
elif len(physical_wfs)==2:
if physical_wfs[0].get('particle')==physical_wfs[1].get('antiparticle'):
self['loopsymmetryfactor']=2
#===============================================================================
# LoopHelasDiagram
#===============================================================================
class LoopHelasDiagram(helas_objects.HelasDiagram):
"""LoopHelasDiagram object, behaving exactly as a Diagram except that
it has a couple of additional functions which can reconstruct and
handle loop amplitudes.
"""
def get_regular_amplitudes(self):
""" Quick access to ALL non-loop amplitudes, including those which are
inside the LoopAmplitudes defined in this diagram."""
ampList=helas_objects.HelasAmplitudeList()
for loopAmp in self.get_loop_amplitudes():
ampList.extend(loopAmp['amplitudes'])
ampList.extend(self.get_ct_amplitudes())
return ampList
def get_ct_amplitudes(self):
""" Quick access to the regular amplitudes defined directly in this
diagram (not in the LoopAmplitudes). Usually they correspond to the
counter-terms. """
return helas_objects.HelasAmplitudeList([amp for amp in \
self['amplitudes'] if not isinstance(amp, LoopHelasAmplitude)])
def get_loop_amplitudes(self):
""" Quick access to the loop amplitudes only"""
return helas_objects.HelasAmplitudeList([amp for amp in \
self['amplitudes'] if isinstance(amp, LoopHelasAmplitude)])
def get_loop_UVCTamplitudes(self):
""" Quick access to the loop amplitudes only"""
return helas_objects.HelasAmplitudeList([amp for amp in \
self['amplitudes'] if isinstance(amp, LoopHelasUVCTAmplitude)])
#===============================================================================
# LoopHelasMatrixElement
#===============================================================================
class LoopHelasMatrixElement(helas_objects.HelasMatrixElement):
"""LoopHelasMatrixElement: list of processes with identical Helas
calls, and the list of LoopHelasDiagrams associated with the processes.
It works as for the HelasMatrixElement except for the loop-related features
which are defined here. """
def default_setup(self):
"""Default values for all properties"""
super(LoopHelasMatrixElement,self).default_setup()
# Store separately the color basis for the loop and born diagrams
self['born_color_basis'] = loop_color_amp.LoopColorBasis()
self['loop_color_basis'] = loop_color_amp.LoopColorBasis()
# To store the grouping of HelasLoopAmplitudes which share the same
# denominators.
# List of (key,value) where keys are tuples corresponding to the
# denominator structures (see get_denominators() of LoopHelasAmplitudes)
# and values are lists of LoopHelasAmplitudes. It is not a dictionary
# because we want for each LoopHelasAmplitude to assign a 'loop_group_id'
# which indicates where it is placed in this list
self['loop_groups'] = []
def filter(self, name, value):
"""Filter for valid diagram property values."""
if name=='born_color_basis' or name=='loop_color_basis':
if not isinstance(value,color_amp.ColorBasis):
raise self.PhysicsObjectError, \
"%s is not a valid color basis" % str(value)
elif name=='loop_groups':
if not isinstance(value,list):
raise self.PhysicsObjectError, \
"%s is not a valid list"%str(value)
for (dkey, dvalue) in value:
if not isinstance(dvalue,helas_objects.HelasAmplitudeList):
raise self.PhysicsObjectError, \
"%s is not a valid HelasAmplitudeList."%str(dvalue)
if not isinstance(dkey,tuple):
raise self.PhysicsObjectError, \
"%s is not a valid tuple."%str(dkey)
else:
return super(LoopHelasMatrixElement,self).filter(name, value)
return True
def get(self,name):
"""Overload in order to return the loop_color_basis when simply asked
for color_basis. The setter is not updated to avoid side effects."""
if name=='color_basis':
return self['loop_color_basis']
elif name=='loop_groups':
if not self['loop_groups']:
self.identify_loop_groups()
return self['loop_groups']
else:
return super(LoopHelasMatrixElement,self).get(name)
def identify_loop_groups(self):
""" Identify what are the loops sharing the same denominators and put
them together in the 'loop_groups' attribute of this object. """
identified_denom_structures=[]
for lamp in [lamp for ldiag in self.get_loop_diagrams() for lamp in \
ldiag.get_loop_amplitudes()]:
denom_structure=lamp.get_denominators()
try:
denom_index=identified_denom_structures.index(denom_structure)
self['loop_groups'][denom_index][1].append(lamp)
except ValueError:
denom_index=len(self['loop_groups'])
self['loop_groups'].append((denom_structure,
helas_objects.HelasAmplitudeList([lamp,])))
identified_denom_structures.append(denom_structure)
lamp.set('loop_group_id',denom_index)
# Now make sure that the loop amplitudes lists in values of the
# dictionary are ordering in decreasing ranks, so that the first one
# (later to be the reference amplitude) has the highest rank
self['loop_groups']=[(group[0],helas_objects.HelasAmplitudeList(
sorted(group[1],key=lambda lamp: \
lamp.get_analytic_info('wavefunction_rank'),reverse=True)))
for group in self['loop_groups']]
# Also, order them so to put first the groups with the smallest
# reference amplitude number
self['loop_groups']=sorted(self['loop_groups'],key=lambda group: \
group[1][0].get('number'))
self.update_loop_group_ids()
def reuse_outdated_wavefunctions(self, helas_diagrams):
""" Make sure never to use this optimization in the loop context."""
# But just make sure that me_id is simply the number.
for diag in helas_diagrams:
for wf in diag['wavefunctions']:
wf.set('me_id',wf.get('number'))
return helas_diagrams
def update_loop_group_ids(self):
""" Make sure that the attribute 'loop_group_id' of all loop amplitudes
in the 'loop_groups' list is correct given the order of 'loop_groups'"""
for i, group in enumerate(self['loop_groups']):
for lamp in group[1]:
lamp.set('loop_group_id',i)
def process_color(self):
""" Perform the simple color processing from a single matrix element
(without optimization then). This is called from the initialization
and overloaded here in order to have the correct treatment """
# Generation of helas objects is assumed to be finished so we can relabel
# optimaly the 'number' attribute of these objects.
self.relabel_helas_objects()
self.get('loop_color_basis').build_loop(self.get('base_amplitude'))
if self.get('base_amplitude')['process']['has_born']:
self.get('born_color_basis').build_born(self.get('base_amplitude'))
self.set('color_matrix',\
color_amp.ColorMatrix(self.get('loop_color_basis'),\
self.get('born_color_basis')))
else:
self.set('color_matrix',\
color_amp.ColorMatrix(self.get('loop_color_basis')))
def get_sorted_keys(self):
"""Return particle property names as a nicely sorted list."""
return ['processes', 'identical_particle_factor',
'diagrams', 'born_color_basis','loop_color_basis',
'color_matrix','base_amplitude', 'has_mirror_process',
'loop_groups']
# Customized constructor
def __init__(self, amplitude=None, optimization=1,
decay_ids=[], gen_color=True, optimized_output=False):
"""Constructor for the LoopHelasMatrixElement. For now, it works exactly
as for the HelasMatrixElement one."""
self.optimized_output=optimized_output
super(LoopHelasMatrixElement, self).__init__(amplitude, optimization,\
decay_ids, gen_color)
# Comparison between different amplitudes, to allow check for
# identical processes. Note that we are then not interested in
# interaction id, but in all other properties.
def __eq__(self, other):
"""Comparison between different loop matrix elements. It works exactly as for
the HelasMatrixElement for now."""
return super(LoopHelasMatrixElement,self).__eq__(other)
def __ne__(self, other):
"""Overloading the nonequality operator, to make comparison easy"""
return not self.__eq__(other)
def generate_helas_diagrams(self, amplitude, optimization=1,
decay_ids=[]):
"""Starting from a list of LoopDiagrams from the diagram
generation, generate the corresponding LoopHelasDiagrams, i.e.,
the wave functions and amplitudes (for the loops and their R2 and UV
counterterms). Choose between default optimization (= 1, maximum
recycling of wavefunctions) or no optimization (= 0, no recycling of
wavefunctions, useful for GPU calculations with very restricted memory).
Note that we need special treatment for decay chains, since
the end product then is a wavefunction, not an amplitude.
"""
assert isinstance(amplitude, loop_diagram_generation.LoopAmplitude), \
"Bad arguments for generate_helas_diagrams in LoopHelasMatrixElement"
assert isinstance(optimization, int), \
"Bad arguments for generate_helas_diagrams in LoopHelasMatrixElement"
structures = amplitude.get('structure_repository')
process = amplitude.get('process')
has_born = amplitude.get('has_born')
model = process.get('model')
# First make sure that the 'split_orders' are ordered according to their
# weight.
self.sort_split_orders(self.get('processes')[0].get('split_orders'))
# Before starting, and if split_orders are defined in the amplitude
# process, we must reorder the generated diagrams so as to put together
# all those which share the same coupling orders. Then, we sort these
# *group of diagrams* in decreasing WEIGHTED order, so that the
# leading contributions are placed first (I will therfore be possible
# to compute them only, saving the time of the rest of the computation)
amplitude.order_diagrams_according_to_split_orders(\
self.get('processes')[0].get('split_orders'))
# All the previously defined wavefunctions
wavefunctions = []
# List of dictionaries from struct ID to wave function,
# keeps track of the structures already scanned.
# The key is the struct ID and the value infos is the tuple
# (wfs, colorlists). 'wfs' is the list of wavefunctions,
# one for each color-lorentz structure of the FDStructure.
# Same for the 'colorlists', everything appearing
# in the same order in these lists
structID_to_infos = {}
# List of minimal information for comparison with previous
# wavefunctions
wf_mother_arrays = []
# Keep track of wavefunction number
wf_number = 0
# Generate wavefunctions for the external particles
external_wavefunctions = dict([(leg.get('number'),
helas_objects.HelasWavefunction(\
leg, 0, model, decay_ids)) \
for leg in process.get('legs')])
# To store the starting external loop wavefunctions needed
# (They are never output so they are not in the diagrams wavefunctions)
external_loop_wfs_dict={}
# For initial state bosons, need to flip part-antipart
# since all bosons should be treated as outgoing
for key in external_wavefunctions.keys():
wf = external_wavefunctions[key]
if wf.is_boson() and wf.get('state') == 'initial' and \
not wf.get('self_antipart'):
wf.set('is_part', not wf.get('is_part'))
# For initial state particles, need to flip PDG code (if has
# antipart)
for key in external_wavefunctions.keys():
wf = external_wavefunctions[key]
if wf.get('leg_state') == False and \
not wf.get('self_antipart'):
wf.flip_part_antipart()
# Initially, have one wavefunction for each external leg.
wf_number = len(process.get('legs'))
# Now go through the diagrams, looking for undefined wavefunctions
helas_diagrams = helas_objects.HelasDiagramList()
# Keep track of amplitude number and diagram number
amplitude_number = 0
diagram_number = 0
def process_born_diagram(diagram, wfNumber, amplitudeNumber, UVCTdiag=False):
""" Helper function to process a born diagrams exactly as it is done in
HelasMatrixElement for tree-level diagrams. This routine can also
process LoopUVCTDiagrams, and if so the argument UVCTdiag must be set
to true"""
# List of dictionaries from leg number to wave function,
# keeps track of the present position in the tree.
# Need one dictionary per coupling multiplicity (diagram)
number_to_wavefunctions = [{}]
# Need to keep track of the color structures for each amplitude
color_lists = [[]]
# Initialize wavefunctions for this diagram
diagram_wavefunctions = helas_objects.HelasWavefunctionList()
vertices = copy.copy(diagram.get('vertices'))
# Single out last vertex, since this will give amplitude
lastvx = vertices.pop()
# Go through all vertices except the last and create
# wavefunctions
for vertex in vertices:
# In case there are diagrams with multiple Lorentz/color
# structures, we need to keep track of the wavefunctions
# for each such structure separately, and generate
# one HelasDiagram for each structure.
# We use the array number_to_wavefunctions to keep
# track of this, with one dictionary per chain of
# wavefunctions
# Note that all wavefunctions relating to this diagram
# will be written out before the first amplitude is written.
new_number_to_wavefunctions = []
new_color_lists = []
for number_wf_dict, color_list in zip(number_to_wavefunctions,
color_lists):
legs = copy.copy(vertex.get('legs'))
last_leg = legs.pop()
# Generate list of mothers from legs
mothers = self.getmothers(legs, number_wf_dict,
external_wavefunctions,
wavefunctions,
diagram_wavefunctions)
inter = model.get('interaction_dict')[vertex.get('id')]
# Now generate new wavefunction for the last leg
# Need one amplitude for each color structure,
done_color = {} # store link to color
for coupl_key in sorted(inter.get('couplings').keys()):
color = coupl_key[0]
if color in done_color:
wf = done_color[color]
wf.get('coupling').append(inter.get('couplings')[coupl_key])
wf.get('lorentz').append(inter.get('lorentz')[coupl_key[1]])
continue
wf = helas_objects.HelasWavefunction(last_leg, \
vertex.get('id'), model)
wf.set('coupling', [inter.get('couplings')[coupl_key]])
if inter.get('color'):
wf.set('inter_color', inter.get('color')[coupl_key[0]])
done_color[color] = wf
wf.set('lorentz', [inter.get('lorentz')[coupl_key[1]]])
wf.set('color_key', color)
wf.set('mothers',mothers)
# Need to set incoming/outgoing and
# particle/antiparticle according to the fermion flow
# of mothers
wf.set_state_and_particle(model)
# Need to check for clashing fermion flow due to
# Majorana fermions, and modify if necessary
# Also need to keep track of the wavefunction number.
wf, wfNumber = wf.check_and_fix_fermion_flow(\
wavefunctions,
diagram_wavefunctions,
external_wavefunctions,
wfNumber)
# Create new copy of number_wf_dict
new_number_wf_dict = copy.copy(number_wf_dict)
# Store wavefunction
try:
wf = diagram_wavefunctions[\
diagram_wavefunctions.index(wf)]
except ValueError:
# Update wf number
wfNumber = wfNumber + 1
wf.set('number', wfNumber)
try:
# Use wf_mother_arrays to locate existing
# wavefunction
wf = wavefunctions[wf_mother_arrays.index(\
wf.to_array())]
# Since we reuse the old wavefunction, reset
# wfNumber
wfNumber = wfNumber - 1
except ValueError:
diagram_wavefunctions.append(wf)
new_number_wf_dict[last_leg.get('number')] = wf
# Store the new copy of number_wf_dict
new_number_to_wavefunctions.append(\
new_number_wf_dict)
# Add color index and store new copy of color_lists
new_color_list = copy.copy(color_list)
new_color_list.append(coupl_key[0])
new_color_lists.append(new_color_list)
number_to_wavefunctions = new_number_to_wavefunctions
color_lists = new_color_lists
# Generate all amplitudes corresponding to the different
# copies of this diagram
if not UVCTdiag:
helas_diagram = helas_objects.HelasDiagram()
else:
helas_diagram = LoopHelasDiagram()
for number_wf_dict, color_list in zip(number_to_wavefunctions,
color_lists):
# Now generate HelasAmplitudes from the last vertex.
if lastvx.get('id'):
inter = model.get_interaction(lastvx.get('id'))
keys = sorted(inter.get('couplings').keys())
pdg_codes = [p.get_pdg_code() for p in \
inter.get('particles')]
else:
# Special case for decay chain - amplitude is just a
# placeholder for replaced wavefunction
inter = None
keys = [(0, 0)]
pdg_codes = None
# Find mothers for the amplitude
legs = lastvx.get('legs')
mothers = self.getmothers(legs, number_wf_dict,
external_wavefunctions,
wavefunctions,
diagram_wavefunctions).\
sort_by_pdg_codes(pdg_codes, 0)[0]
# Need to check for clashing fermion flow due to
# Majorana fermions, and modify if necessary
wfNumber = mothers.check_and_fix_fermion_flow(wavefunctions,
diagram_wavefunctions,
external_wavefunctions,
None,
wfNumber,
False,
number_to_wavefunctions)
done_color = {}
for i, coupl_key in enumerate(keys):
color = coupl_key[0]
if inter and color in done_color.keys():
amp = done_color[color]
amp.get('coupling').append(inter.get('couplings')[coupl_key])
amp.get('lorentz').append(inter.get('lorentz')[coupl_key[1]])
continue
if not UVCTdiag:
amp = helas_objects.HelasAmplitude(lastvx, model)
else:
amp = LoopHelasUVCTAmplitude(lastvx, model)
amp.set('UVCT_orders',diagram.get('UVCT_orders'))
amp.set('UVCT_couplings',diagram.get('UVCT_couplings'))
amp.set('type',diagram.get('type'))
if inter:
amp.set('coupling', [inter.get('couplings')[coupl_key]])
amp.set('lorentz', [inter.get('lorentz')[coupl_key[1]]])
if inter.get('color'):
amp.set('inter_color', inter.get('color')[color])
amp.set('color_key', color)
done_color[color] = amp
amp.set('mothers', mothers)
amplitudeNumber = amplitudeNumber + 1
amp.set('number', amplitudeNumber)
# Add the list with color indices to the amplitude
new_color_list = copy.copy(color_list)
if inter:
new_color_list.append(color)
amp.set('color_indices', new_color_list)
# Add amplitude to amplitdes in helas_diagram
helas_diagram.get('amplitudes').append(amp)
# After generation of all wavefunctions and amplitudes,
# add wavefunctions to diagram
helas_diagram.set('wavefunctions', diagram_wavefunctions)
# Sort the wavefunctions according to number
diagram_wavefunctions.sort(lambda wf1, wf2: \
wf1.get('number') - wf2.get('number'))
if optimization:
wavefunctions.extend(diagram_wavefunctions)
wf_mother_arrays.extend([wf.to_array() for wf \
in diagram_wavefunctions])
else:
wfNumber = len(process.get('legs'))
if self.optimized_output:
# Add one for the starting external loop wavefunctions
# which is fixed
wfNumber = wfNumber+1
# Return the diagram obtained
return helas_diagram, wfNumber, amplitudeNumber
def process_struct(sID, diag_wfs, wfNumber):
""" Scan a structure, create the necessary wavefunctions, add them
to the diagram wavefunctions list, and return a list of bridge
wavefunctions (i.e. those attached to the loop) with a list, ordered
in the same way, of color lists. Each element of these lists
correspond to one choice of color-lorentz structure of this
tree-structure #sID. """
# List of dictionaries from leg number to wave function,
# keeps track of the present position in the tree structure.
# Need one dictionary per coupling multiplicity (diagram)
number_to_wavefunctions = [{}]
# Need to keep track of the color structures for each amplitude
color_lists = [[]]
# Bridge wavefunctions
bridge_wfs = helas_objects.HelasWavefunctionList()
vertices = copy.copy(structures[sID].get('vertices'))
# First treat the special case of a structure made solely of one
# external leg
if len(vertices)==0:
binding_leg=copy.copy(structures[sID]['binding_leg'])
binding_wf = self.getmothers(base_objects.LegList([binding_leg,]),
{},
external_wavefunctions,
wavefunctions,
diag_wfs)
# Simply return the wf of this external leg along with an
# empty color list
return [(binding_wf[0],[])] ,wfNumber
# Go through all vertices except the last and create
# wavefunctions
for i, vertex in enumerate(vertices):
# In case there are diagrams with multiple Lorentz/color
# structures, we need to keep track of the wavefunctions
# for each such structure separately, and generate
# one HelasDiagram for each structure.
# We use the array number_to_wavefunctions to keep
# track of this, with one dictionary per chain of
# wavefunctions
# Note that all wavefunctions relating to this diagram
# will be written out before the first amplitude is written.
new_number_to_wavefunctions = []
new_color_lists = []
for number_wf_dict, color_list in zip(number_to_wavefunctions,
color_lists):
legs = copy.copy(vertex.get('legs'))
last_leg = legs.pop()
# Generate list of mothers from legs
mothers = self.getmothers(legs, number_wf_dict,
external_wavefunctions,
wavefunctions,
diag_wfs)
inter = model.get('interaction_dict')[vertex.get('id')]
# Now generate new wavefunction for the last leg
# Need one amplitude for each color structure,
done_color = {} # store link to color
for coupl_key in sorted(inter.get('couplings').keys()):
color = coupl_key[0]
if color in done_color:
wf = done_color[color]
wf.get('coupling').append(inter.get('couplings')[coupl_key])
wf.get('lorentz').append(inter.get('lorentz')[coupl_key[1]])
continue
wf = helas_objects.HelasWavefunction(last_leg, vertex.get('id'), model)
wf.set('coupling', [inter.get('couplings')[coupl_key]])
if inter.get('color'):
wf.set('inter_color', inter.get('color')[coupl_key[0]])
done_color[color] = wf
wf.set('lorentz', [inter.get('lorentz')[coupl_key[1]]])
wf.set('color_key', color)
wf.set('mothers',mothers)
###print "in process_struct and adding wf with"
###print " mothers id:"
###for ii, mot in enumerate(mothers):
### print " mother ",ii,"=",mot['number_external'],"("+str(mot.get_pdg_code())+") number=",mot['number']
###print " and iself =",wf['number_external'],"("+str(wf.get_pdg_code())+") number=",wf['number']
# Need to set incoming/outgoing and
# particle/antiparticle according to the fermion flow
# of mothers
wf.set_state_and_particle(model)
# Need to check for clashing fermion flow due to
# Majorana fermions, and modify if necessary
# Also need to keep track of the wavefunction number.
wf, wfNumber = wf.check_and_fix_fermion_flow(\
wavefunctions,
diag_wfs,
external_wavefunctions,
wfNumber)
# Create new copy of number_wf_dict
new_number_wf_dict = copy.copy(number_wf_dict)
# Store wavefunction
try:
wf = diag_wfs[\
diag_wfs.index(wf)]
except ValueError:
# Update wf number
wfNumber = wfNumber + 1
wf.set('number', wfNumber)
try:
# Use wf_mother_arrays to locate existing
# wavefunction
wf = wavefunctions[wf_mother_arrays.index(\
wf.to_array())]
# Since we reuse the old wavefunction, reset
# wfNumber
wfNumber = wfNumber - 1
except ValueError:
diag_wfs.append(wf)
new_number_wf_dict[last_leg.get('number')] = wf
if i==(len(vertices)-1):
# Last vertex of the structure so we should define
# the bridge wavefunctions.
bridge_wfs.append(wf)
# Store the new copy of number_wf_dict
new_number_to_wavefunctions.append(\
new_number_wf_dict)
# Add color index and store new copy of color_lists
new_color_list = copy.copy(color_list)
new_color_list.append(coupl_key[0])
new_color_lists.append(new_color_list)
number_to_wavefunctions = new_number_to_wavefunctions
color_lists = new_color_lists
###print "bridg wfs returned="
###for wf in bridge_wfs:
### print " bridge =",wf['number_external'],"("+str(wf.get_pdg_code())+") number=",wf['number']
return zip(bridge_wfs, color_lists), wfNumber
def getloopmothers(loopWfsIn, structIDs, color_list, diag_wfs, wfNumber):
"""From the incoming loop leg(s) and the list of structures IDs
connected to the loop at this point, it generates the list of
mothers, a list of colorlist and a number_to_wavefunctions
dictionary list for which each element correspond to one
lorentz-color structure of the tree-structure attached to the loop.
It will launch the reconstruction procedure of the structures
which have not been encountered yet."""
# The mothers list and the color lists There is one element in these
# lists, in the same order, for each combination of the
# lorentz-color tree-structures of the FDStructures attached to
# this point.
mothers_list = [loopWfsIn,]
color_lists = [color_list,]
# Scanning of the FD tree-structures attached to the loop at this
# point.
for sID in structIDs:
try:
struct_infos = structID_to_infos[sID]
except KeyError:
# The structure has not been encountered yet, we must
# scan it
struct_infos, wfNumber = \
process_struct(sID, diag_wfs, wfNumber)
if optimization:
# Only if there is optimization the dictionary is
# because otherwise we must always rescan the
# structures to correctly add all the necessary
# wavefunctions to the diagram wavefunction list
structID_to_infos[sID]=copy.copy(struct_infos)
# The orig object are those already existing before treating
# this structure
new_mothers_list = []
new_color_lists = []
for mothers, orig_color_list in zip(mothers_list, color_lists):
for struct_wf, struct_color_list in struct_infos:
new_color_list = copy.copy(orig_color_list)+\
copy.copy(struct_color_list)
new_mothers = copy.copy(mothers)
new_mothers.append(struct_wf)
new_color_lists.append(new_color_list)
new_mothers_list.append(new_mothers)
mothers_list = new_mothers_list
color_lists = new_color_lists
###print "getloop mothers returned with sID", structIDs
###print "len mothers_list=",len(mothers_list)
###for wf in mothers_list[0]:
### print " mother =",wf['number_external'],"("+str(wf.get_pdg_code())+") number=",wf['number']
return (mothers_list, color_lists), wfNumber
def process_loop_diagram(diagram, wavefunctionNumber, amplitudeNumber):
""" Helper function to process a the loop diagrams which features
several different aspects compared to the tree born diagrams."""
# Initialize here the loop helas diagram we are about to create
helas_diagram = LoopHelasDiagram()
# List of dictionaries from leg number to wave function,
# keeps track of the present position in the loop.
# We only need to retain the last loop wavefunctions created
# This is a list to store all the last loop wavefunctions created
# due to the possibly many color-lorentz structure of the last
# loop vertex.
last_loop_wfs = helas_objects.HelasWavefunctionList()
# Need to keep track of the color structures for each amplitude
color_lists = [[]]
# Initialize wavefunctions for this diagram
diagram_wavefunctions = helas_objects.HelasWavefunctionList()
# Copy the original tag of the loop which contains all the necessary
# information with the interaction ID in the tag replaced by the
# corresponding vertex
tag = copy.deepcopy(diagram.get('tag'))
loop_vertices = copy.deepcopy(diagram.get('vertices'))
for i in range(len(tag)):
tag[i][2]=loop_vertices[i]
# Copy the ct vertices of the loop
ct_vertices = copy.copy(diagram.get('CT_vertices'))
# First create the starting external loop leg
external_loop_wf=helas_objects.HelasWavefunction(\
tag[0][0], 0, model, decay_ids)
# When on the optimized output mode, the starting loop wavefunction
# can be recycled if it has the same pdg because whatever its pdg
# it has the same coefficients and loop momentum zero,
# so it is in principle not necessary to add it to the
# diagram_wavefunction. However, this is necessary for the function
# check_and_fix_fermion_flow to correctly update the dependances of
# previous diagrams to an external L-cut majorana wavefunction which
# needs flipping.
if not self.optimized_output:
wavefunctionNumber=wavefunctionNumber+1
external_loop_wf.set('number',wavefunctionNumber)
diagram_wavefunctions.append(external_loop_wf)
else:
try:
external_loop_wf=\
external_loop_wfs_dict[external_loop_wf.get('pdg_code')]
except KeyError:
wavefunctionNumber=wavefunctionNumber+1
external_loop_wf.set('number',wavefunctionNumber)
external_loop_wfs_dict[external_loop_wf.get('pdg_code')]=\
external_loop_wf
diagram_wavefunctions.append(external_loop_wf)
# Setup the starting point of the reading of the loop flow.
last_loop_wfs.append(external_loop_wf)
def process_tag_elem(tagElem, wfNumber, lastloopwfs, colorlists):
"""Treat one tag element of the loop diagram (not the last one
which provides an amplitude)"""
# We go through all the structures generated during the
# exploration of the structures attached at this point
# of the loop. Let's define the new color_lists and
# last_loop_wfs we will use for next iteration
new_color_lists = []
new_last_loop_wfs = helas_objects.HelasWavefunctionList()
# In case there are diagrams with multiple Lorentz/color
# structures, we need to keep track of the wavefunctions
# for each such structure separately, and generate
# one HelasDiagram for each structure.
# We use the array number_to_wavefunctions to keep
# track of this, with one dictionary per chain of
# wavefunctions
# Note that all wavefunctions relating to this diagram
# will be written out before the first amplitude is written.
vertex=tagElem[2]
structIDs=tagElem[1]
for last_loop_wf, color_list in zip(lastloopwfs,
colorlists):
loopLegOut = copy.copy(vertex.get('legs')[-1])
# From the incoming loop leg and the struct IDs, it generates
# a list of mothers, colorlists and number_to_wavefunctions
# dictionary for which each element correspond to one
# lorentz-color structure of the tree-structure attached to
# the loop.
(motherslist, colorlists), wfNumber = \
getloopmothers(\
helas_objects.HelasWavefunctionList([last_loop_wf,]),
structIDs,\
color_list, diagram_wavefunctions, wfNumber)
inter = model.get('interaction_dict')[vertex.get('id')]
# Now generate new wavefunctions for the last leg
for mothers, structcolorlist in zip(motherslist, colorlists):
# Need one amplitude for each color structure,
done_color = {} # store link to color
for coupl_key in sorted(inter.get('couplings').keys()):
color = coupl_key[0]
if color in done_color:
wf = done_color[color]
wf.get('coupling').append(inter.get('couplings')[coupl_key])
wf.get('lorentz').append(inter.get('lorentz')[coupl_key[1]])
continue
wf = helas_objects.HelasWavefunction(loopLegOut, \
vertex.get('id'), model)
wf.set('coupling', [inter.get('couplings')[coupl_key]])
if inter.get('color'):
wf.set('inter_color', inter.get('color')[coupl_key[0]])
done_color[color] = wf
wf.set('lorentz', [inter.get('lorentz')[coupl_key[1]]])
wf.set('color_key', color)
wf.set('mothers',mothers)
# Need to set incoming/outgoing and
# particle/antiparticle according to the fermion flow
# of mothers
wf.set_state_and_particle(model)
# Need to check for clashing fermion flow due to
# Majorana fermions, and modify if necessary
# Also need to keep track of the wavefunction number.
wf, wfNumber = wf.check_and_fix_fermion_flow(\
wavefunctions,
diagram_wavefunctions,
external_wavefunctions,
wfNumber)
# Store wavefunction
try:
wf = diagram_wavefunctions[\
diagram_wavefunctions.index(wf)]
except ValueError:
# Update wf number
wfNumber = wfNumber + 1
wf.set('number', wfNumber)
# Depending on wether we are on the
# loop_optimized_output mode or now we want to
# reuse the loop wavefunctions as well.
try:
if not self.optimized_output:
raise ValueError
# Use wf_mother_arrays to locate existing
# wavefunction
wf = wavefunctions[wf_mother_arrays.index(\
wf.to_array())]
# Since we reuse the old wavefunction, reset
# wfNumber
wfNumber = wfNumber - 1
# To keep track of the number of loop
# wfs reused
self.lwf_reused += 1
except ValueError:
diagram_wavefunctions.append(wf)
# Update the last_loop_wfs list with the loop wf
# we just created.
new_last_loop_wfs.append(wf)
# Add color index and store new copy of color_lists
new_color_list = copy.copy(structcolorlist)
new_color_list.append(coupl_key[0])
new_color_lists.append(new_color_list)
# We update the lastloopwfs list and the color_lists for the
# next iteration, i.e. the treatment of the next loop vertex
# by returning them to the calling environnement.
return wfNumber, new_last_loop_wfs, new_color_lists
# Go through all vertices except the last and create
# wavefunctions
def create_amplitudes(lastvx, wfNumber, amplitudeNumber):
"""Treat the last tag element of the loop diagram (which
provides an amplitude)"""
# First create the other external loop leg closing the loop.
# It will not be in the final output, and in this sense, it is
# a dummy wavefunction, but it is structurally important.
# Because it is only structurally important, we do not need to
# add it to the list of the wavefunctions for this ME or this
# HELAS loop amplitude, nor do we need to update its number.
other_external_loop_wf=helas_objects.HelasWavefunction()
# wfNumber=wfNumber+1
for leg in [leg for leg in lastvx['legs'] if leg['loop_line']]:
if last_loop_wfs[0]['number_external']!=leg['number']:
other_external_loop_wf=\
helas_objects.HelasWavefunction(leg, 0, model, decay_ids)
# other_external_loop_wf.set('number',wfNumber)
break
# diagram_wavefunctions.append(other_external_loop_wf)
for last_loop_wf, color_list in zip(last_loop_wfs,color_lists):
# Now generate HelasAmplitudes from the last vertex.
if lastvx.get('id')!=-1:
raise self.PhysicsObjectError, \
"The amplitude vertex of a loop diagram must be a "+\
"two point vertex with id=-1"
# skip the boson and Dirac fermions
# adjust the fermion flow of external majorana loop wfs
if other_external_loop_wf.is_majorana():
fix_lcut_majorana_fermion_flow(last_loop_wf,\
other_external_loop_wf)
# fix the fermion flow
mothers=helas_objects.HelasWavefunctionList(\
[last_loop_wf,other_external_loop_wf])
wfNumber = mothers.check_and_fix_fermion_flow(wavefunctions,
diagram_wavefunctions,
external_wavefunctions,
None,
wfNumber,
False,
[]) # number_to_wavefunctions is useless in loop case
amp = helas_objects.HelasAmplitude(lastvx, model)
amp.set('interaction_id',-1)
amp.set('mothers',mothers)
#amp.set('mothers', helas_objects.HelasWavefunctionList(\
# [last_loop_wf,other_external_loop_wf]))
amp.set('pdg_codes',[last_loop_wf.get_pdg_code(),
other_external_loop_wf.get_pdg_code()])
###print "mothers added for amp="
###for wf in mothers:
### print " mother =",wf['number_external'],"("+str(wf.get_pdg_code())+") number=",wf['number']
# Add the list with color indices to the amplitude
amp.set('color_indices', copy.copy(color_list))
# Add this amplitude to the LoopHelasAmplitude of this
# diagram.
amplitudeNumber = amplitudeNumber + 1
amp.set('number', amplitudeNumber)
amp.set('type','loop')
loop_amp = LoopHelasAmplitude()
loop_amp.set('amplitudes',\
helas_objects.HelasAmplitudeList([amp,]))
# Set the loop wavefunctions building this amplitude
# by tracking them from the last loop wavefunction
# added and its loop wavefunction among its mothers
loop_amp_wfs=helas_objects.HelasWavefunctionList(\
[last_loop_wf,])
while loop_amp_wfs[-1].get('mothers'):
loop_amp_wfs.append([lwf for lwf in \
loop_amp_wfs[-1].get('mothers') if lwf['is_loop']][0])
# Sort the loop wavefunctions of this amplitude
# according to their correct order of creation for
# the HELAS calls (using their 'number' attribute
# would work as well, but I want something less naive)
# 1) Add the other L-cut particle at the end
loop_amp_wfs.append(other_external_loop_wf)
# 2) Reverse to have a consistent ordering of creation
# of helas wavefunctions.
loop_amp_wfs.reverse()
loop_amp.set('wavefunctions',loop_amp_wfs)
loop_amp.set('type',diagram.get('type'))
loop_amp.set('multiplier',diagram.get('multiplier'))
# 'number' is not important as it will be redefined later.
loop_amp.set('number',min([amp.get('number') for amp
in loop_amp.get('amplitudes')]))
loop_amp.set('coupling',loop_amp.get_couplings())
loop_amp.set('orders',loop_amp.get_orders())
helas_diagram.get('amplitudes').append(loop_amp)
# here we check the two L-cut loop helas wavefunctions are
# in consistent flow
check_lcut_fermion_flow_consistency(\
loop_amp_wfs[0],loop_amp_wfs[1])
return wfNumber, amplitudeNumber
def check_lcut_fermion_flow_consistency(lcut_wf1, lcut_wf2):
"""Checks that the two L-cut loop helas wavefunctions have
a consistent fermion flow."""
if lcut_wf1.is_boson():
if lcut_wf1.get('state')!='final' or\
lcut_wf2.get('state')!='final':
raise MadGraph5Error,\
"Inconsistent flow in L-cut bosons."
elif not lcut_wf1.is_majorana():
for lcut_wf in [lcut_wf1,lcut_wf2]:
if not ((lcut_wf.get('is_part') and \
lcut_wf.get('state')=='outgoing') or\
(not lcut_wf.get('is_part') and\
lcut_wf.get('state')=='incoming')):
raise MadGraph5Error,\
"Inconsistent flow in L-cut Dirac fermions."
elif lcut_wf1.is_majorana():
if (lcut_wf1.get('state'), lcut_wf2.get('state')) not in \
[('incoming','outgoing'),('outgoing','incoming')]:
raise MadGraph5Error,\
"Inconsistent flow in L-cut Majorana fermions."
def fix_lcut_majorana_fermion_flow(last_loop_wf,\
other_external_loop_wf):
"""Fix the fermion flow of the last external Majorana loop
wavefunction through the fermion flow of the first external
Majorana loop wavefunction."""
# skip the boson and Dirac fermions
# if not other_external_loop_wf.is_majorana():return
loop_amp_wfs=helas_objects.HelasWavefunctionList(\
[last_loop_wf,])
while loop_amp_wfs[-1].get('mothers'):
loop_amp_wfs.append([lwf for lwf in \
loop_amp_wfs[-1].get('mothers') if lwf['is_loop']][0])
loop_amp_wfs.append(other_external_loop_wf)
loop_amp_wfs.reverse()
# loop_amp_wfs[0] is the last external loop wavefunction
# while loop_amp_wfs[1] is the first external loop wavefunction
rep={'incoming':'outgoing','outgoing':'incoming'}
# Check if we need to flip the state of the external L-cut majorana
other_external_loop_wf['state']=rep[loop_amp_wfs[1]['state']]
return
def process_counterterms(ct_vertices, wfNumber, amplitudeNumber):
"""Process the counterterms vertices defined in this loop
diagram."""
structIDs=[]
for tagElem in tag:
structIDs += tagElem[1]
# Here we call getloopmothers without any incoming loop
# wavefunctions such that the function will return exactly
# the mother of the counter-term amplitude we wish to create
# We start with an empty color list as well in this case
(motherslist, colorlists), wfNumber = getloopmothers(\
helas_objects.HelasWavefunctionList(), structIDs, \
[], diagram_wavefunctions, wfNumber)
for mothers, structcolorlist in zip(motherslist, colorlists):
for ct_vertex in ct_vertices:
# Now generate HelasAmplitudes from this ct_vertex.
inter = model.get_interaction(ct_vertex.get('id'))
keys = sorted(inter.get('couplings').keys())
pdg_codes = [p.get_pdg_code() for p in \
inter.get('particles')]
mothers = mothers.sort_by_pdg_codes(pdg_codes, 0)[0]
# Need to check for clashing fermion flow due to
# Majorana fermions, and modify if necessary
wfNumber = mothers.check_and_fix_fermion_flow(wavefunctions,
diagram_wavefunctions,
external_wavefunctions,
None,
wfNumber,
False,
[])
done_color = {}
for i, coupl_key in enumerate(keys):
color = coupl_key[0]
if color in done_color.keys():
amp = done_color[color]
amp.get('coupling').append(inter.get('couplings')[coupl_key])
amp.get('lorentz').append(inter.get('lorentz')[coupl_key[1]])
continue
amp = helas_objects.HelasAmplitude(ct_vertex, model)
amp.set('coupling', [inter.get('couplings')[coupl_key]])
amp.set('lorentz', [inter.get('lorentz')[coupl_key[1]]])
if inter.get('color'):
amp.set('inter_color', inter.get('color')[color])
amp.set('color_key', color)
done_color[color] = amp
amp.set('mothers', mothers)
amplitudeNumber = amplitudeNumber + 1
amp.set('number', amplitudeNumber)
# Add the list with color indices to the amplitude
amp_color_list = copy.copy(structcolorlist)
amp_color_list.append(color)
amp.set('color_indices', amp_color_list)
amp.set('type',inter.get('type'))
# Add amplitude to amplitdes in helas_diagram
helas_diagram.get('amplitudes').append(amp)
return wfNumber, amplitudeNumber
for tagElem in tag:
wavefunctionNumber, last_loop_wfs, color_lists = \
process_tag_elem(tagElem, wavefunctionNumber, \
last_loop_wfs, color_lists)
# Generate all amplitudes corresponding to the different
# copies of this diagram
wavefunctionNumber, amplitudeNumber = create_amplitudes(
loop_vertices[-1], wavefunctionNumber, amplitudeNumber)
# Add now the counter-terms vertices
if ct_vertices:
wavefunctionNumber, amplitudeNumber = process_counterterms(\
ct_vertices, wavefunctionNumber, amplitudeNumber)
# Identify among the diagram wavefunctions those from the structures
# which will fill the 'wavefunctions' list of the diagram
struct_wfs=helas_objects.HelasWavefunctionList(\
[wf for wf in diagram_wavefunctions if not wf['is_loop']])
loop_wfs=helas_objects.HelasWavefunctionList(\
[wf for wf in diagram_wavefunctions if wf['is_loop']])
# Sort the wavefunctions according to number
struct_wfs.sort(lambda wf1, wf2: \
wf1.get('number') - wf2.get('number'))
# After generation of all wavefunctions and amplitudes,
# add wavefunctions to diagram
helas_diagram.set('wavefunctions', struct_wfs)
# Of course we only allow to reuse the struct wavefunctions but
# never the loop ones which have to be present and reused in each
# loop diagram, UNLESS we are in the loop_optimized_output mode.
if optimization:
wavefunctions.extend(struct_wfs)
wf_mother_arrays.extend([wf.to_array() for wf in struct_wfs])
if self.optimized_output:
wavefunctions.extend(loop_wfs)
wf_mother_arrays.extend([wf.to_array() for wf in loop_wfs])
else:
wavefunctionNumber = len(process.get('legs'))
if self.optimized_output:
# Add one for the starting external loop wavefunctions
# which is fixed
wavefunctionNumber = wavefunctionNumber+1
# And to the loop helas diagram if under the optimized output.
# In the default output, one use those stored in the loop amplitude
# since they are anyway not recycled. Notice that we remove the
# external L-cut loop wavefunctions from this list since they do
# not need to be computed.
if self.optimized_output:
loop_wfs = helas_objects.HelasWavefunctionList(
[lwf for lwf in loop_wfs if len(lwf.get('mothers'))>0])
helas_diagram.set('loop_wavefunctions',loop_wfs)
# Return the diagram obtained
return helas_diagram, wavefunctionNumber, amplitudeNumber
# Let's first treat the born diagrams
if has_born:
for diagram in amplitude.get('born_diagrams'):
helBornDiag, wf_number, amplitude_number=\
process_born_diagram(diagram, wf_number, amplitude_number)
diagram_number = diagram_number + 1
helBornDiag.set('number', diagram_number)
helas_diagrams.append(helBornDiag)
# Now we treat the loop diagrams
self.lwf_reused=0
for diagram in amplitude.get('loop_diagrams'):
loopHelDiag, wf_number, amplitude_number=\
process_loop_diagram(diagram, wf_number, amplitude_number)
diagram_number = diagram_number + 1
loopHelDiag.set('number', diagram_number)
helas_diagrams.append(loopHelDiag)
# We finally turn to the UVCT diagrams
for diagram in amplitude.get('loop_UVCT_diagrams'):
loopHelDiag, wf_number, amplitude_number=\
process_born_diagram(diagram, wf_number, amplitude_number, \
UVCTdiag=True)
diagram_number = diagram_number + 1
loopHelDiag.set('number', diagram_number)
# We must add the UVCT_orders to the regular orders of the
# LooopHelasUVCTAmplitude
for lamp in loopHelDiag.get_loop_UVCTamplitudes():
new_orders = copy.copy(lamp.get('orders'))
for order, value in lamp.get('UVCT_orders').items():
try:
new_orders[order] = new_orders[order] + value
except KeyError:
new_orders[order] = value
lamp.set('orders', new_orders)
helas_diagrams.append(loopHelDiag)
self.set('diagrams', helas_diagrams)
# Check wf order consistency
if __debug__:
for diag in self.get('diagrams'):
# This is just a monitoring function, it will *NOT* affect the
# wavefunctions list of the diagram, but just raise an Error
# if the order is inconsistent, namely if a wavefunction in this
# list has a mother which appears after its position in the list.
diag.get('wavefunctions').check_wavefunction_numbers_order()
# Inform how many loop wavefunctions have been reused.
if self.optimized_output:
logger.debug('%d loop wavefunctions have been reused'%self.lwf_reused+
', for a total of %d ones'%sum([len(ldiag.get('loop_wavefunctions'))
for ldiag in self.get_loop_diagrams()]))
# Sort all mothers according to the order wanted in Helas calls
for wf in self.get_all_wavefunctions():
wf.set('mothers', helas_objects.HelasMatrixElement.sorted_mothers(wf))
for amp in self.get_all_amplitudes():
amp.set('mothers', helas_objects.HelasMatrixElement.sorted_mothers(amp))
# Not really necessary for the LoopHelasAmplitude as the color
# indices of the amplitudes should be correct. It is however
# cleaner like this. For debugging purposes we leave here an assert.
gen_colors = amp.get('color_indices')
amp.set('color_indices', amp.get_color_indices())
if isinstance(amp,LoopHelasAmplitude):
assert (amp.get('color_indices')==gen_colors), \
"Error in the treatment of color in the loop helas diagram "+\
"generation. It could be harmless, but report this bug to be sure."+\
" The different keys are %s vs %s."%(str(gen_colors),\
str(amp.get('color_indices')))
for loopdiag in self.get_loop_diagrams():
for loopamp in loopdiag.get_loop_amplitudes():
loopamp.set_mothers_and_pairing()
# As a final step, we compute the analytic information for the loop
# wavefunctions and amplitudes building this loop matrix element.
# Because we want to have the same AlohaModel used for various
# HelasMatrix elements, we instead perform the call below in the
# export which will use its AlohaModel for several HelasME's.
# Hence we comment it here.
# self.compute_all_analytic_information()
def get_split_orders_mapping(self):
"""This function returns a list and a dictionary:
squared_orders, amps_orders
===
The squared_orders lists all contributing squared_orders as tuple whose
elements are the power at which are elevated the couplings orderered as
in the 'split_orders'.
squared_orders : All possible contributing squared orders among those
specified in the process['split_orders'] argument. The elements of
the list are tuples of the format
((OrderValue1,OrderValue2,...),
(max_contrib_ct_amp_number,
max_contrib_uvct_amp_number,
max_contrib_loop_amp_number,
max_contrib_group_id))
with OrderValue<i> correspond to the value of the <i>th order in
process['split_orders'] (the others are summed over and therefore
left unspecified).
Ex for dijet with process['split_orders']=['QCD','QED']:
=> [((4,0),(8,2,3)),((2,2),(10,3,3)),((0,4),(20,5,4))]
'max_contrib_loop_amp_number': For optimization purposes, it is good to
know what is the maximum loop amplitude number contributing to any given
squared order. The fortran output is structured so that if the user
is interested in a given squared order contribution only, then
all the open loop coefficients for the amplitudes with a number above
this value can be skipped.
'max_contrib_(uv)ct_amp_number': Same as above but for the
(uv)ctamplitude number.
'max_contrib_group_id': The same as above, except this time
it is for the loop group id used for the loop reduction.
===
The amps_orders is a *dictionary* with keys
'born_amp_orders',
'loop_amp_orders'
with values being the tuples described below.
If process['split_orders'] is empty, all these tuples are set empty.
'born_amp_orders' : Exactly as for squared order except that this list specifies
the contributing order values for the amplitude (i.e. not 'squared').
Also, the tuple describing the amplitude order is nested with a
second one listing all amplitude numbers contributing to this order.
Ex for dijet with process['split_orders']=['QCD','QED']:
=> [((2, 0), (2,)), ((0, 2), (1, 3, 4))]
The function returns () if the process has no borns.
'loop_amp_orders' : The same as for born_amp_orders but for the loop
type of amplitudes only.
Keep in mind that the orders of the elements of the outter most list is
important as it dictates the order for the corresponding "order indices"
in the fortran code output by the exporters.
"""
split_orders=self.get('processes')[0].get('split_orders')
# If no split_orders are defined, then return the obvious
amps_orders = {'born_amp_orders':[],
'loop_amp_orders':[]}
if len(split_orders)==0:
self.squared_orders = []
return [],amps_orders
# First make sure that the 'split_orders' are ordered according to their
# weight.
self.sort_split_orders(split_orders)
process = self.get('processes')[0]
# First make sure that the 'split_orders' are ordered according to their
# weight.
self.sort_split_orders(split_orders)
loop_amp_orders = self.get_split_orders_mapping_for_diagram_list(\
self.get_loop_diagrams(), split_orders,
get_amplitudes_function = lambda diag: diag.get_loop_amplitudes(),
# We chose at this stage to store not only the amplitude numbers but
# also the reference reduction id in the loop grouping, necessary
# for returning the max_contrib_ref_amp_numbers.
get_amp_number_function = lambda amp:
(amp.get('amplitudes')[0].get('number'),amp.get('loop_group_id')))
ct_amp_orders = self.get_split_orders_mapping_for_diagram_list(\
self.get_loop_diagrams(), split_orders,
get_amplitudes_function = lambda diag: diag.get_ct_amplitudes())
uvct_amp_orders = self.get_split_orders_mapping_for_diagram_list(\
self.get_loop_UVCT_diagrams(), split_orders)
# With this function, we just return the contributing amplitude numbers
# The format is therefore the same as for the born_amp_orders and
# ct_amp_orders
amps_orders['loop_amp_orders'] = dict([(lao[0],
[el[0] for el in lao[1]]) for lao in loop_amp_orders])
# Now add there the ct_amp_orders and uvct_amp_orders
for ct_amp_order in ct_amp_orders+uvct_amp_orders:
try:
amps_orders['loop_amp_orders'][ct_amp_order[0]].extend(\
list(ct_amp_order[1]))
except KeyError:
amps_orders['loop_amp_orders'][ct_amp_order[0]] = \
list(ct_amp_order[1])
# We must now turn it back to a list
amps_orders['loop_amp_orders'] = [
(key, tuple(sorted(amps_orders['loop_amp_orders'][key])))
for key in amps_orders['loop_amp_orders'].keys()]
# and re-sort it to make sure it follows an increasing WEIGHT order.
order_hierarchy = self.get('processes')[0]\
.get('model').get('order_hierarchy')
if set(order_hierarchy.keys()).union(set(split_orders))==\
set(order_hierarchy.keys()):
amps_orders['loop_amp_orders'].sort(key= lambda so:
sum([order_hierarchy[split_orders[i]]*order_power for \
i, order_power in enumerate(so[0])]))
# Finally the born amp orders
if process.get('has_born'):
born_amp_orders = self.get_split_orders_mapping_for_diagram_list(\
self.get_born_diagrams(),split_orders)
amps_orders['born_amp_orders'] = born_amp_orders
# Now we construct the interference splitting order matrix.
# For this we flatten the list of many individual 2-tuples of the form
# (amp_number, ref_amp_number) into one big 2-tuple of the form
# (tuple_of_all_amp_numers, tuple_of_all_ref_amp_numbers).
loop_orders = [(lso[0],tuple(zip(*list(lso[1])))) for lso in loop_amp_orders]
# For the reference orders (against which the loop and ct amps are squared)
# we only need the value of the orders, not the corresponding amp numbers.
if process.get('has_born'):
ref_orders = [bao[0] for bao in born_amp_orders]
else:
ref_orders = [lao[0] for lao in loop_orders+ct_amp_orders]
# Temporarily we set squared_orders to be a dictionary with keys being
# the actual contributing squared_orders and the values are the list
# [max_contrib_uvctamp_number,max_contrib_ct_amp_number,
# max_contrib_loop_amp_number,
# max_contrib_ref_amp_number]
# In the event where they would be no contributing amplitude in one of
# the four class above, then the list on which the function max will be
# called will be empty and we need to have the function not crash but
# return -1 instead.
def smax(AmpNumList):
return -1 if len(AmpNumList)==0 else max(AmpNumList)
squared_orders = {}
for ref_order in ref_orders:
for uvct_order in uvct_amp_orders:
key = tuple([ord1 + ord2 for ord1,ord2 in zip(uvct_order[0],
ref_order)])
try:
# Finding the max_contrib_uvct_amp_number
squared_orders[key][0] = smax([squared_orders[key][0]]+
list(uvct_order[1]))
except KeyError:
squared_orders[key] = [smax(list(uvct_order[1])),-1,-1,-1]
for ct_order in ct_amp_orders:
key = tuple([ord1 + ord2 for ord1,ord2 in zip(ct_order[0],
ref_order)])
try:
# Finding the max_contrib_ct_amp_number
squared_orders[key][1] = smax([squared_orders[key][1]]+
list(ct_order[1]))
except KeyError:
squared_orders[key] = [-1,smax(list(ct_order[1])),-1,-1]
for loop_order in loop_orders:
key = tuple([ord1 + ord2 for ord1,ord2 in zip(loop_order[0],
ref_order)])
try:
# Finding the max_contrib_loop_amp_number
squared_orders[key][2] = smax([squared_orders[key][2]]+
list(loop_order[1][0]))
# Finding the max_contrib_loop_id
squared_orders[key][3] = smax([squared_orders[key][3]]+
list(loop_order[1][1]))
except KeyError:
squared_orders[key] = [-1,-1,smax(list(loop_order[1][0])),
smax(list(loop_order[1][1]))]
# To sort the squared_orders, we now turn it into a list instead of a
# dictionary. Each element of the list as the format
# ( squared_so_powers_tuple,
# (max_uvct_amp_number, max_ct_amp_number,
# max_loop_amp_number, max_loop_id) )
squared_orders = [(sqso[0],tuple(sqso[1])) for sqso in \
squared_orders.items()]
# Sort the squared orders if the hierarchy defines them all.
order_hierarchy = self.get('processes')[0].get('model').get('order_hierarchy')
if set(order_hierarchy.keys()).union(set(split_orders))==\
set(order_hierarchy.keys()):
squared_orders.sort(key= lambda so:
sum([order_hierarchy[split_orders[i]]*order_power for \
i, order_power in enumerate(so[0])]))
# Cache the squared_orders information
self.squared_orders = squared_orders
return squared_orders, amps_orders
def get_squared_order_contribs(self):
"""Return the squared_order contributions as returned by the function
get_split_orders_mapping. It uses the cached value self.squared_orders
if it was already defined during a previous call to get_split_orders_mapping.
"""
if not hasattr(self, "squared_orders"):
self.get_split_orders_mapping()
return self.squared_orders
def find_max_loop_coupling(self):
""" Find the maximum number of loop couplings appearing in any of the
LoopHelasAmplitude in this LoopHelasMatrixElement"""
if len(self.get_loop_diagrams())==0:
return 0
return max([len(amp.get('coupling')) for amp in \
sum([d.get_loop_amplitudes() for d in self.get_loop_diagrams()],[])])
def get_max_loop_vertex_rank(self):
""" Returns the maximum power of loop momentum brought by a loop
interaction. For renormalizable theories, it should be no more than one.
"""
return max([lwf.get_analytic_info('interaction_rank') for lwf in \
self.get_all_loop_wavefunctions()])
def get_max_loop_rank(self):
""" Returns the rank of the contributing loop with maximum rank """
r_list = [lamp.get_analytic_info('wavefunction_rank') for ldiag in \
self.get_loop_diagrams() for lamp in ldiag.get_loop_amplitudes()]
if len(r_list)==0:
return 0
else:
return max(r_list)
def get_max_spin_connected_to_loop(self):
"""Returns the maximum spin that any particle either connected to a loop
or running in it has, among all the loops contributing to this ME"""
# Remember that the loop wavefunctions running in the loop are stored in
# the attribute 'loop_wavefunctions' of the HelasLoopDiagram in the
# optimized mode and in the 'wavefunction' attribute of the LoopHelasAmplitude
# in the default mode.
return max(
max(l.get('spin') for l in lamp.get('mothers')+
lamp.get('wavefunctions')+d.get('loop_wavefunctions'))
for d in self['diagrams'] if isinstance(d,LoopHelasDiagram)
for lamp in d.get_loop_amplitudes()
)
def get_max_loop_particle_spin(self):
""" Returns the spin of the loop particle with maximum spin among all
the loop contributing to this ME"""
return max([lwf.get('spin') for lwf in \
self.get_all_loop_wavefunctions()])
def relabel_loop_amplitudes(self):
"""Give a unique number to each non-equivalent (at the level of the output)
LoopHelasAmplitude """
LoopHelasAmplitudeRecognized=[]
for lamp in \
sum([d.get_loop_amplitudes() for d in self.get_loop_diagrams()],[]):
lamp.set('number',-1)
for lamp2 in LoopHelasAmplitudeRecognized:
if lamp.is_equivalent(lamp2):
# The if statement below would be to turn the optimization off
# if False:
lamp.set('number',lamp2.get('number'))
break;
if lamp.get('number')==-1:
lamp.set('number',(len(LoopHelasAmplitudeRecognized)+1))
LoopHelasAmplitudeRecognized.append(lamp)
def relabel_loop_amplitudes_optimized(self):
"""Give a unique number to each LoopHelasAmplitude. These will be the
number used for the LOOPCOEF array in the optimized output and the
grouping is done in a further stage by adding all the LOOPCOEF sharing
the same denominator to a given one using the 'loop_group_id' attribute
of the LoopHelasAmplitudes. """
lamp_number=1
for lamp in \
sum([d.get_loop_amplitudes() for d in self.get_loop_diagrams()],[]):
lamp.set('number',lamp_number)
lamp_number += 1
def relabel_loop_wfs_and_amps(self,wfnumber):
""" Give the correct number for the default output to the wavefunctions
and amplitudes building the loops """
# We want first the CT amplitudes and only then the loop ones.
CT_ampnumber=1
loop_ampnumber=self.get_number_of_CT_amplitudes()+1
loopwfnumber=1
# Now the loop ones
for loopdiag in self.get_loop_diagrams():
for wf in loopdiag.get('wavefunctions'):
wf.set('number',wfnumber)
wfnumber=wfnumber+1
for loopamp in loopdiag.get_loop_amplitudes():
loopwfnumber=1
for loopwf in loopamp['wavefunctions']:
loopwf.set('number',loopwfnumber)
loopwfnumber=loopwfnumber+1
for amp in loopamp['amplitudes']:
amp.set('number',loop_ampnumber)
loop_ampnumber=loop_ampnumber+1
for ctamp in loopdiag.get_ct_amplitudes():
ctamp.set('number',CT_ampnumber)
CT_ampnumber=CT_ampnumber+1
# Finally the loopUVCT ones
for loopUVCTdiag in self.get_loop_UVCT_diagrams():
for wf in loopUVCTdiag.get('wavefunctions'):
wf.set('number',wfnumber)
wfnumber=wfnumber+1
for amp in loopUVCTdiag.get('amplitudes'):
amp.set('number',CT_ampnumber)
CT_ampnumber=CT_ampnumber+1
def relabel_loop_wfs_and_amps_optimized(self, wfnumber):
""" Give the correct number for the optimized output to the wavefunctions
and amplitudes building the loops """
CT_ampnumber=1
loop_ampnumber=self.get_number_of_CT_amplitudes()+1
loopwfnumber=1
# Now the loop ones
for loopdiag in self.get_loop_diagrams():
for wf in loopdiag.get('wavefunctions'):
wf.set('number',wfnumber)
wfnumber=wfnumber+1
for lwf in loopdiag.get('loop_wavefunctions'):
lwf.set('number',loopwfnumber)
loopwfnumber=loopwfnumber+1
for loopamp in loopdiag.get_loop_amplitudes():
# Set the number of the starting wavefunction (common to all
# diagrams) to one.
loopamp.get_starting_loop_wavefunction().set('number',0)
for amp in loopamp['amplitudes']:
amp.set('number',loop_ampnumber)
loop_ampnumber=loop_ampnumber+1
for ctamp in loopdiag.get_ct_amplitudes():
ctamp.set('number',CT_ampnumber)
CT_ampnumber=CT_ampnumber+1
# Finally the loopUVCT ones
for loopUVCTdiag in self.get_loop_UVCT_diagrams():
for wf in loopUVCTdiag.get('wavefunctions'):
wf.set('number',wfnumber)
wfnumber=wfnumber+1
for amp in loopUVCTdiag.get('amplitudes'):
amp.set('number',CT_ampnumber)
CT_ampnumber=CT_ampnumber+1
def relabel_helas_objects(self):
"""After the generation of the helas objects, we can give up on having
a unique number identifying the helas wavefunction and amplitudes and
instead use a labeling which is optimal for the output of the loop process.
Also we tag all the LoopHelasAmplitude which are identical with the same
'number' attribute."""
# Number the LoopHelasAmplitude depending of the type of output
if self.optimized_output:
self.relabel_loop_amplitudes_optimized()
else:
self.relabel_loop_amplitudes()
# Start with the born diagrams
wfnumber=1
ampnumber=1
for borndiag in self.get_born_diagrams():
for wf in borndiag.get('wavefunctions'):
wf.set('number',wfnumber)
wfnumber=wfnumber+1
for amp in borndiag.get('amplitudes'):
amp.set('number',ampnumber)
ampnumber=ampnumber+1
# Number the HelasWavefunctions and Amplitudes from the loops
# depending of the type of output
if self.optimized_output:
self.relabel_loop_wfs_and_amps_optimized(wfnumber)
for lwf in [lwf for loopdiag in self.get_loop_diagrams() for \
lwf in loopdiag.get('loop_wavefunctions')]:
lwf.set('me_id',lwf.get('number'))
else:
self.relabel_loop_wfs_and_amps(wfnumber)
# Finally, for loops we do not reuse previously defined wavefunctions to
# store new ones. So that 'me_id' is always equal to 'number'.
for wf in self.get_all_wavefunctions():
wf.set('me_id',wf.get('number'))
def get_number_of_wavefunctions(self):
"""Gives the total number of wavefunctions for this ME, including the
loop ones"""
return len(self.get_all_wavefunctions())
def get_number_of_loop_wavefunctions(self):
""" Gives the total number of loop wavefunctions for this ME."""
return sum([len(ldiag.get('loop_wavefunctions')) for ldiag in \
self.get_loop_diagrams()])
def get_number_of_external_wavefunctions(self):
"""Gives the total number of wavefunctions for this ME, excluding the
loop ones."""
return sum([ len(d.get('wavefunctions')) for d in self.get('diagrams')])
def get_all_wavefunctions(self):
"""Gives a list of all wavefunctions for this ME"""
allwfs=sum([d.get('wavefunctions') for d in self.get('diagrams')], [])
for d in self['diagrams']:
if isinstance(d,LoopHelasDiagram):
for l in d.get_loop_amplitudes():
allwfs += l.get('wavefunctions')
return allwfs
def get_all_loop_wavefunctions(self):
"""Gives a list of all the loop wavefunctions for this ME"""
return helas_objects.HelasWavefunctionList(
# In the default output, this is where the loop wavefunction
# are placed
[lwf for ldiag in self.get_loop_diagrams()
for lamp in ldiag.get_loop_amplitudes()
for lwf in lamp.get('wavefunctions')]+
# In the optimized one they are directly in the
# 'loop_wavefunctions' attribute of the loop diagrams
[lwf for ldiag in self.get_loop_diagrams() for lwf in
ldiag.get('loop_wavefunctions')])
def get_nexternal_ninitial(self):
"""Gives (number or external particles, number of
incoming particles)"""
external_wfs = filter(lambda wf:
not wf.get('mothers') and not wf.get('is_loop'),
self.get_all_wavefunctions())
return (len(set([wf.get('number_external') for wf in \
external_wfs])),
len(set([wf.get('number_external') for wf in \
filter(lambda wf: wf.get('leg_state') == False,
external_wfs)])))
def get_number_of_amplitudes(self):
"""Gives the total number of amplitudes for this ME, including the loop
ones."""
return len(self.get_all_amplitudes())
def get_number_of_CT_amplitudes(self):
"""Gives the total number of CT amplitudes for this ME. (i.e the amplitudes
which are not LoopHelasAmplitudes nor within them.)"""
return sum([len(d.get_ct_amplitudes()) for d in (self.get_loop_diagrams()+
self.get_loop_UVCT_diagrams())])
def get_number_of_external_amplitudes(self):
"""Gives the total number of amplitudes for this ME, excluding those
inside the loop amplitudes. (So only one is counted per loop amplitude.)
"""
return sum([ len(d.get('amplitudes')) for d in \
self.get('diagrams')])
def get_number_of_loop_amplitudes(self):
"""Gives the total number of helas amplitudes for the loop diagrams of this ME,
excluding those inside the loop amplitudes, but including the CT-terms.
(So only one amplitude is counted per loop amplitude.)
"""
return sum([len(d.get('amplitudes')) for d in (self.get_loop_diagrams()+
self.get_loop_UVCT_diagrams())])
def get_number_of_born_amplitudes(self):
"""Gives the total number of amplitudes for the born diagrams of this ME
"""
return sum([len(d.get('amplitudes')) for d in self.get_born_diagrams()])
def get_all_amplitudes(self):
"""Gives a list of all amplitudes for this ME"""
allamps=sum([d.get_regular_amplitudes() for d in self.get('diagrams')], [])
for d in self['diagrams']:
if isinstance(d,LoopHelasDiagram):
for l in d.get_loop_amplitudes():
allamps += l.get('amplitudes')
return allamps
def get_born_diagrams(self):
"""Gives a list of the born diagrams for this ME"""
return helas_objects.HelasDiagramList([hd for hd in self['diagrams'] if\
not isinstance(hd,LoopHelasDiagram)])
def get_loop_diagrams(self):
"""Gives a list of the loop diagrams for this ME"""
return helas_objects.HelasDiagramList([hd for hd in self['diagrams'] if\
isinstance(hd,LoopHelasDiagram) and\
len(hd.get_loop_amplitudes())>=1])
def get_loop_UVCT_diagrams(self):
"""Gives a list of the loop UVCT diagrams for this ME"""
return helas_objects.HelasDiagramList([hd for hd in self['diagrams'] if\
isinstance(hd,LoopHelasDiagram) and\
len(hd.get_loop_UVCTamplitudes())>=1])
def compute_all_analytic_information(self, alohaModel=None):
"""Make sure that all analytic pieces of information about all
loop wavefunctions and loop amplitudes building this loop helas matrix
element are computed so that they can be recycled later, typically
without the need of specifying an alohaModel.
Notice that for now this function is called at the end of the
generat_helas_diagrams function and the alohaModel is created here.
In principle, it might be better to have this function called by the
exporter just after export_v4 because at this stage an alohaModel is
already created and can be specified here instead of being generated.
This can make a difference for very complicated models."""
if alohaModel is None:
# Generate it here
model = self.get('processes')[0].get('model')
myAlohaModel = create_aloha.AbstractALOHAModel(model.get('name'))
myAlohaModel.add_Lorentz_object(model.get('lorentz'))
else:
# Use the one provided
myAlohaModel = alohaModel
for lwf in self.get_all_loop_wavefunctions():
lwf.compute_analytic_information(myAlohaModel)
for diag in self.get_loop_diagrams():
for amp in diag.get_loop_amplitudes():
amp.compute_analytic_information(myAlohaModel)
def get_used_lorentz(self):
"""Return a list of (lorentz_name, tags, outgoing) with
all lorentz structures used by this LoopHelasMatrixElement."""
# Loop version of the function which add to the tuple wether it is a loop
# structure or not so that aloha knows if it has to produce the subroutine
# which removes the denominator in the propagator of the wavefunction created.
output = []
for wa in self.get_all_wavefunctions() + self.get_all_amplitudes():
if wa.get('interaction_id') in [0,-1]:
continue
output.append(wa.get_aloha_info(self.optimized_output));
return output
def get_used_helas_loop_amps(self):
""" Returns the list of the helas loop amplitude of type
CALL LOOP_I_J(_K)(...) used for this matrix element """
# In the optimized output, we don't care about the number of couplings
# in a given loop.
if self.optimized_output:
last_relevant_index=3
else:
last_relevant_index=4
return list(set([lamp.get_call_key()[1:last_relevant_index] \
for ldiag in self.get_loop_diagrams() for lamp in \
ldiag.get_loop_amplitudes()]))
def get_used_wl_updates(self):
""" Returns a list of the necessary updates of the loop wavefunction
polynomials """
return list(set([(lwf.get_analytic_info('wavefunction_rank')-\
lwf.get_analytic_info('interaction_rank'),
lwf.get_analytic_info('interaction_rank'))
for ldiag in self.get_loop_diagrams()
for lwf in ldiag.get('loop_wavefunctions')]))
def get_used_couplings(self):
"""Return a list with all couplings used by this
HelasMatrixElement."""
answer = super(LoopHelasMatrixElement, self).get_used_couplings()
for diag in self.get_loop_UVCT_diagrams():
answer.extend([amp.get_used_UVCT_couplings() for amp in \
diag.get_loop_UVCTamplitudes()])
return answer
def get_color_amplitudes(self):
""" Just to forbid the usage of this generic function in a
LoopHelasMatrixElement"""
raise self.PhysicsObjectError, \
"Usage of get_color_amplitudes is not allowed in a LoopHelasMatrixElement"
def get_born_color_amplitudes(self):
"""Return a list of (coefficient, amplitude number) lists,
corresponding to the JAMPs for this born color basis and the born
diagrams of this LoopMatrixElement. The coefficients are given in the
format (fermion factor, color coeff (frac), imaginary, Nc power)."""
return super(LoopHelasMatrixElement,self).generate_color_amplitudes(\
self['born_color_basis'],self.get_born_diagrams())
def get_loop_color_amplitudes(self):
"""Return a list of (coefficient, amplitude number) lists,
corresponding to the JAMPs for this loop color basis and the loop
diagrams of this LoopMatrixElement. The coefficients are given in the
format (fermion factor, color coeff (frac), imaginary, Nc power)."""
diagrams=self.get_loop_diagrams()
color_basis=self['loop_color_basis']
if not color_basis:
# No color, simply add all amplitudes with correct factor
# for first color amplitude
col_amp = []
for diagram in diagrams:
for amplitude in diagram.get('amplitudes'):
col_amp.append(((amplitude.get('fermionfactor'),
1, False, 0),
amplitude.get('number')))
return [col_amp]
# There is a color basis - create a list of coefficients and
# amplitude numbers
# Remember that with get_base_amplitude of LoopHelasMatrixElement,
# we get several base_objects.Diagrams for a given LoopHelasDiagram:
# One for the loop and one for each counter-term.
# We should then here associate what are the HelasAmplitudes associated
# to each diagram number using the function
# get_helas_amplitudes_loop_diagrams().
LoopDiagramsHelasAmplitudeList=self.get_helas_amplitudes_loop_diagrams()
# The HelasLoopAmplitudes should be unfolded to the HelasAmplitudes
# (only one for the current version) they contain.
for i, helas_amp_list in enumerate(LoopDiagramsHelasAmplitudeList):
new_helas_amp_list=helas_objects.HelasAmplitudeList()
for helas_amp in helas_amp_list:
if isinstance(helas_amp,LoopHelasAmplitude):
new_helas_amp_list.extend(helas_amp['amplitudes'])
else:
new_helas_amp_list.append(helas_amp)
LoopDiagramsHelasAmplitudeList[i]=new_helas_amp_list
# print "I get LoopDiagramsHelasAmplitudeList="
# for i, elem in enumerate(LoopDiagramsHelasAmplitudeList):
# print "LoopDiagramsHelasAmplitudeList[",i,"]=",[amp.get('number') for amp in LoopDiagramsHelasAmplitudeList[i]]
col_amp_list = []
for i, col_basis_elem in \
enumerate(sorted(color_basis.keys())):
col_amp = []
# print "color_basis[col_basis_elem]=",color_basis[col_basis_elem]
for diag_tuple in color_basis[col_basis_elem]:
res_amps = filter(lambda amp: \
tuple(amp.get('color_indices')) == diag_tuple[1],
LoopDiagramsHelasAmplitudeList[diag_tuple[0]])
if not res_amps:
raise self.PhysicsObjectError, \
"""No amplitude found for color structure
%s and color index chain (%s) (diagram %i)""" % \
(col_basis_elem,
str(diag_tuple[1]),
diag_tuple[0])
for res_amp in res_amps:
col_amp.append(((res_amp.get('fermionfactor'),
diag_tuple[2],
diag_tuple[3],
diag_tuple[4]),
res_amp.get('number')))
col_amp_list.append(col_amp)
return col_amp_list
def get_helas_amplitudes_loop_diagrams(self):
""" When creating the base_objects.Diagram in get_base_amplitudes(),
each LoopHelasDiagram will lead to one loop_base_objects.LoopDiagram
for its LoopHelasAmplitude and one other for each of its counter-term
(with different interaction id). This function return a list for which
each element is a HelasAmplitudeList corresponding to the HelasAmplitudes
related to a given loop_base_objects.LoopDiagram generated """
amplitudes_loop_diagrams=[]
for diag in self.get_loop_diagrams():
# We start by adding the loop topology
amplitudes_loop_diagrams.append(diag.get_loop_amplitudes())
# Then add a diagram for each counter-term with a different
# interactions id. (because it involves a different interaction
# which possibly brings new color structures).
# This is strictly speaking not necessary since Counter-Terms
# cannot in principle bring new color structures into play.
# The dictionary ctIDs has the ct interactions ID as keys
# and a HelasAmplitudeList of the corresponding HelasAmplitude as
# values.
ctIDs={}
for ctamp in diag.get_ct_amplitudes():
try:
ctIDs[ctamp.get('interaction_id')].append(ctamp)
except KeyError:
ctIDs[ctamp.get('interaction_id')]=\
helas_objects.HelasAmplitudeList([ctamp])
# To have a canonical order of the CT diagrams, we sort them according
# to their interaction_id value.
keys=ctIDs.keys()
keys.sort()
for key in keys:
amplitudes_loop_diagrams.append(ctIDs[key])
for diag in self.get_loop_UVCT_diagrams():
amplitudes_loop_diagrams.append(diag.get_loop_UVCTamplitudes())
return amplitudes_loop_diagrams
def get_base_amplitude(self):
"""Generate a loop_diagram_generation.LoopAmplitude from a
LoopHelasMatrixElement. This is used to generate both color
amplitudes and diagram drawing."""
# Need to take care of diagram numbering for decay chains
# before this can be used for those!
optimization = 1
if len(filter(lambda wf: wf.get('number') == 1,
self.get_all_wavefunctions())) > 1:
optimization = 0
model = self.get('processes')[0].get('model')
wf_dict = {}
vx_list = []
diagrams = base_objects.DiagramList()
# Start with the born
for diag in self.get_born_diagrams():
newdiag=diag.get('amplitudes')[0].get_base_diagram(\
wf_dict, vx_list, optimization)
diagrams.append(loop_base_objects.LoopDiagram({
'vertices':newdiag['vertices'],'type':0}))
# Store here the type of the last LoopDiagram encountered to reuse the
# same value, but negative, for the corresponding counter-terms.
# It is not strictly necessary, it only has to be negative.
dtype=1
for HelasAmpList in self.get_helas_amplitudes_loop_diagrams():
# We use uniformly the class LoopDiagram for the diagrams stored
# in LoopAmplitude
if isinstance(HelasAmpList[0],LoopHelasAmplitude):
diagrams.append(HelasAmpList[0].get_base_diagram(\
wf_dict, vx_list, optimization))
dtype=diagrams[-1]['type']
elif isinstance(HelasAmpList[0],LoopHelasUVCTAmplitude):
diagrams.append(HelasAmpList[0].\
get_base_diagram(wf_dict, vx_list, optimization))
else:
newdiag=HelasAmpList[0].get_base_diagram(wf_dict, vx_list, optimization)
diagrams.append(loop_base_objects.LoopDiagram({
'vertices':newdiag['vertices'],'type':-dtype}))
for diag in diagrams:
diag.calculate_orders(self.get('processes')[0].get('model'))
return loop_diagram_generation.LoopAmplitude({\
'process': self.get('processes')[0],
'diagrams': diagrams})
#===============================================================================
# LoopHelasProcess
#===============================================================================
class LoopHelasProcess(helas_objects.HelasMultiProcess):
"""LoopHelasProcess: Analogous of HelasMultiProcess except that it is suited
for LoopAmplitude and with the peculiarity that it is always treating only
one loop amplitude. So this LoopHelasProcess correspond to only one single
subprocess without multiparticle labels (contrary to HelasMultiProcess)."""
# Type of HelasMatrixElement to be generated by this class of HelasMultiProcess
matrix_element_class = LoopHelasMatrixElement
def __init__(self, argument=None, combine_matrix_elements=True,
optimized_output = True, compute_loop_nc = False, matrix_element_opts={}):
""" Allow for the initialization of the HelasMultiProcess with the
right argument 'optimized_output' for the helas_matrix_element options.
"""
matrix_element_opts = dict(matrix_element_opts)
matrix_element_opts.update({'optimized_output' : optimized_output})
super(LoopHelasProcess, self).__init__(argument, combine_matrix_elements,
compute_loop_nc = compute_loop_nc,
matrix_element_opts = matrix_element_opts)
@classmethod
def process_color(cls,matrix_element,color_information,compute_loop_nc=False):
""" Process the color information for a given matrix
element made of a loop diagrams. It will create a different
color matrix depending on wether the process has a born or not.
The compute_loop_nc sets wheter independent tracking of Nc power coming
from the color loop trace is necessary or not (it is time consuming).
"""
if matrix_element.get('processes')[0]['has_born']:
logger.debug('Computing the loop and Born color basis')
else:
logger.debug('Computing the loop color basis')
# Define the objects stored in the contained color_information
for key in color_information:
exec("%s=color_information['%s']"%(key,key))
# Now that the Helas Object generation is finished, we must relabel
# the wavefunction and the amplitudes according to what should be
# used for the output.
matrix_element.relabel_helas_objects()
# Always create an empty color basis, and the
# list of raw colorize objects (before
# simplification) associated with amplitude
new_amp = matrix_element.get_base_amplitude()
matrix_element.set('base_amplitude', new_amp)
# Process the loop color basis which is needed anyway
loop_col_basis = loop_color_amp.LoopColorBasis(
compute_loop_nc = compute_loop_nc)
loop_colorize_obj = loop_col_basis.create_loop_color_dict_list(\
matrix_element.get('base_amplitude'),
)
try:
# If the loop color configuration of the ME has
# already been considered before, recycle
# the information
loop_col_basis_index = list_colorize.index(loop_colorize_obj)
loop_col_basis = list_color_basis[loop_col_basis_index]
except ValueError:
# If not, create color basis accordingly
list_colorize.append(loop_colorize_obj)
loop_col_basis.build()
loop_col_basis_index = len(list_color_basis)
list_color_basis.append(loop_col_basis)
logger.info(\
"Processing color information for %s" % \
matrix_element.get('processes')[0].nice_string(print_weighted=False).\
replace('Process', 'loop process'))
else: # Found identical color
logger.info(\
"Reusing existing color information for %s" % \
matrix_element.get('processes')[0].nice_string(print_weighted=False).\
replace('Process', 'loop process'))
if new_amp['process']['has_born']:
born_col_basis = loop_color_amp.LoopColorBasis()
born_colorize_obj = born_col_basis.create_born_color_dict_list(\
matrix_element.get('base_amplitude'))
try:
# If the loop color configuration of the ME has
# already been considered before, recycle
# the information
born_col_basis_index = list_colorize.index(born_colorize_obj)
born_col_basis = list_color_basis[born_col_basis_index]
except ValueError:
# If not, create color basis accordingly
list_colorize.append(born_colorize_obj)
born_col_basis.build()
born_col_basis_index = len(list_color_basis)
list_color_basis.append(born_col_basis)
logger.info(\
"Processing color information for %s" % \
matrix_element.get('processes')[0].nice_string(print_weighted=False).\
replace('Process', 'born process'))
else: # Found identical color
logger.info(\
"Reusing existing color information for %s" % \
matrix_element.get('processes')[0].nice_string(print_weighted=False).\
replace('Process', 'born process'))
loopborn_matrices_key=(loop_col_basis_index,born_col_basis_index)
else:
loopborn_matrices_key=(loop_col_basis_index,loop_col_basis_index)
# Now we try to recycle the color matrix
try:
# If the color configuration of the ME has
# already been considered before, recycle
# the information
col_matrix = dict_loopborn_matrices[loopborn_matrices_key]
except KeyError:
# If not, create color matrix accordingly
col_matrix = color_amp.ColorMatrix(\
list_color_basis[loopborn_matrices_key[0]],
list_color_basis[loopborn_matrices_key[1]])
dict_loopborn_matrices[loopborn_matrices_key]=col_matrix
logger.info(\
"Creating color matrix %s" % \
matrix_element.get('processes')[0].nice_string().\
replace('Process', 'loop process'))
else: # Found identical color
logger.info(\
"Reusing existing color matrix for %s" % \
matrix_element.get('processes')[0].nice_string().\
replace('Process', 'loop process'))
matrix_element.set('loop_color_basis',loop_col_basis)
if new_amp['process']['has_born']:
matrix_element.set('born_color_basis',born_col_basis)
matrix_element.set('color_matrix',col_matrix)
| 50.641502
| 201
| 0.562776
|
4a04399602b824a499a6110729c9b718b54b8b28
| 1,039
|
py
|
Python
|
LeetCode/InterviewQuestions/ZS Associates/interview.py
|
shrey199325/LeetCodeSolution
|
fede0d98d2aeeedc3cd501ac5042f2217494f4c6
|
[
"Unlicense"
] | null | null | null |
LeetCode/InterviewQuestions/ZS Associates/interview.py
|
shrey199325/LeetCodeSolution
|
fede0d98d2aeeedc3cd501ac5042f2217494f4c6
|
[
"Unlicense"
] | null | null | null |
LeetCode/InterviewQuestions/ZS Associates/interview.py
|
shrey199325/LeetCodeSolution
|
fede0d98d2aeeedc3cd501ac5042f2217494f4c6
|
[
"Unlicense"
] | 1
|
2020-09-18T09:38:26.000Z
|
2020-09-18T09:38:26.000Z
|
"""
Technical round
arr1 = ["apple", "mango", "mango" "grapes"] -> n
arr2 = ["mango", "papaya", "papaya"] -> m
output -> ["apple", "mango", "mango", "grapes", "papaya"]
"""
def solution(arr1, arr2):
solution_set1 = set()
solution_set1.update(arr1) # O(n)
sol_set2 = []
for elem in arr2: # O(m)
if elem not in solution_set1: # O(1)
sol_set2.append(elem) # O(1) / s: O(m)
return arr1 + sol_set2 # O(n+m) / s: O(m+n)
def solution2(arr1, arr2):
sol_dict = dict() # {"apple": 1}
for elem in arr1:
if elem in sol_dict:
sol_dict[elem] += 1
else:
sol_dict[elem] = 1
for elem in arr2:
if elem not in sol_dict:
if elem in sol_dict:
sol_dict[elem] += 1
else:
sol_dict[elem] = 1
return sol_dict
arr1 = ["apple", "mango", "mango" "grapes"]
arr2 = ["mango", "papaya", "papaya"]
sol = solution2(arr1, arr2)
sol = sorted(sol.items(), key=lambda x: x[1], reverse=True)
| 25.975
| 59
| 0.526468
|
4a043a0a481189e0154f283acf2fa2e8a0aa7bbc
| 10,636
|
py
|
Python
|
huaweicloud-sdk-apig/huaweicloudsdkapig/v2/model/list_apis_v2_request.py
|
wuchen-huawei/huaweicloud-sdk-python-v3
|
3683d703f4320edb2b8516f36f16d485cff08fc2
|
[
"Apache-2.0"
] | 1
|
2021-11-03T07:54:50.000Z
|
2021-11-03T07:54:50.000Z
|
huaweicloud-sdk-apig/huaweicloudsdkapig/v2/model/list_apis_v2_request.py
|
wuchen-huawei/huaweicloud-sdk-python-v3
|
3683d703f4320edb2b8516f36f16d485cff08fc2
|
[
"Apache-2.0"
] | null | null | null |
huaweicloud-sdk-apig/huaweicloudsdkapig/v2/model/list_apis_v2_request.py
|
wuchen-huawei/huaweicloud-sdk-python-v3
|
3683d703f4320edb2b8516f36f16d485cff08fc2
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
import pprint
import re
import six
class ListApisV2Request:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'instance_id': 'str',
'id': 'str',
'name': 'str',
'group_id': 'str',
'req_protocol': 'str',
'req_method': 'str',
'req_uri': 'str',
'auth_type': 'str',
'env_id': 'str',
'type': 'int',
'offset': 'int',
'limit': 'int',
'precise_search': 'str'
}
attribute_map = {
'instance_id': 'instance_id',
'id': 'id',
'name': 'name',
'group_id': 'group_id',
'req_protocol': 'req_protocol',
'req_method': 'req_method',
'req_uri': 'req_uri',
'auth_type': 'auth_type',
'env_id': 'env_id',
'type': 'type',
'offset': 'offset',
'limit': 'limit',
'precise_search': 'precise_search'
}
def __init__(self, instance_id=None, id=None, name=None, group_id=None, req_protocol=None, req_method=None, req_uri=None, auth_type=None, env_id=None, type=None, offset=None, limit=None, precise_search=None):
"""ListApisV2Request - a model defined in huaweicloud sdk"""
self._instance_id = None
self._id = None
self._name = None
self._group_id = None
self._req_protocol = None
self._req_method = None
self._req_uri = None
self._auth_type = None
self._env_id = None
self._type = None
self._offset = None
self._limit = None
self._precise_search = None
self.discriminator = None
self.instance_id = instance_id
if id is not None:
self.id = id
if name is not None:
self.name = name
if group_id is not None:
self.group_id = group_id
if req_protocol is not None:
self.req_protocol = req_protocol
if req_method is not None:
self.req_method = req_method
if req_uri is not None:
self.req_uri = req_uri
if auth_type is not None:
self.auth_type = auth_type
if env_id is not None:
self.env_id = env_id
if type is not None:
self.type = type
if offset is not None:
self.offset = offset
if limit is not None:
self.limit = limit
if precise_search is not None:
self.precise_search = precise_search
@property
def instance_id(self):
"""Gets the instance_id of this ListApisV2Request.
实例编号
:return: The instance_id of this ListApisV2Request.
:rtype: str
"""
return self._instance_id
@instance_id.setter
def instance_id(self, instance_id):
"""Sets the instance_id of this ListApisV2Request.
实例编号
:param instance_id: The instance_id of this ListApisV2Request.
:type: str
"""
self._instance_id = instance_id
@property
def id(self):
"""Gets the id of this ListApisV2Request.
API编号
:return: The id of this ListApisV2Request.
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this ListApisV2Request.
API编号
:param id: The id of this ListApisV2Request.
:type: str
"""
self._id = id
@property
def name(self):
"""Gets the name of this ListApisV2Request.
API名称
:return: The name of this ListApisV2Request.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this ListApisV2Request.
API名称
:param name: The name of this ListApisV2Request.
:type: str
"""
self._name = name
@property
def group_id(self):
"""Gets the group_id of this ListApisV2Request.
API分组编号
:return: The group_id of this ListApisV2Request.
:rtype: str
"""
return self._group_id
@group_id.setter
def group_id(self, group_id):
"""Sets the group_id of this ListApisV2Request.
API分组编号
:param group_id: The group_id of this ListApisV2Request.
:type: str
"""
self._group_id = group_id
@property
def req_protocol(self):
"""Gets the req_protocol of this ListApisV2Request.
请求协议
:return: The req_protocol of this ListApisV2Request.
:rtype: str
"""
return self._req_protocol
@req_protocol.setter
def req_protocol(self, req_protocol):
"""Sets the req_protocol of this ListApisV2Request.
请求协议
:param req_protocol: The req_protocol of this ListApisV2Request.
:type: str
"""
self._req_protocol = req_protocol
@property
def req_method(self):
"""Gets the req_method of this ListApisV2Request.
请求方法
:return: The req_method of this ListApisV2Request.
:rtype: str
"""
return self._req_method
@req_method.setter
def req_method(self, req_method):
"""Sets the req_method of this ListApisV2Request.
请求方法
:param req_method: The req_method of this ListApisV2Request.
:type: str
"""
self._req_method = req_method
@property
def req_uri(self):
"""Gets the req_uri of this ListApisV2Request.
请求路径
:return: The req_uri of this ListApisV2Request.
:rtype: str
"""
return self._req_uri
@req_uri.setter
def req_uri(self, req_uri):
"""Sets the req_uri of this ListApisV2Request.
请求路径
:param req_uri: The req_uri of this ListApisV2Request.
:type: str
"""
self._req_uri = req_uri
@property
def auth_type(self):
"""Gets the auth_type of this ListApisV2Request.
授权类型
:return: The auth_type of this ListApisV2Request.
:rtype: str
"""
return self._auth_type
@auth_type.setter
def auth_type(self, auth_type):
"""Sets the auth_type of this ListApisV2Request.
授权类型
:param auth_type: The auth_type of this ListApisV2Request.
:type: str
"""
self._auth_type = auth_type
@property
def env_id(self):
"""Gets the env_id of this ListApisV2Request.
发布的环境编号
:return: The env_id of this ListApisV2Request.
:rtype: str
"""
return self._env_id
@env_id.setter
def env_id(self, env_id):
"""Sets the env_id of this ListApisV2Request.
发布的环境编号
:param env_id: The env_id of this ListApisV2Request.
:type: str
"""
self._env_id = env_id
@property
def type(self):
"""Gets the type of this ListApisV2Request.
API类型
:return: The type of this ListApisV2Request.
:rtype: int
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this ListApisV2Request.
API类型
:param type: The type of this ListApisV2Request.
:type: int
"""
self._type = type
@property
def offset(self):
"""Gets the offset of this ListApisV2Request.
偏移量,表示从此偏移量开始查询,偏移量小于0时,自动转换为0
:return: The offset of this ListApisV2Request.
:rtype: int
"""
return self._offset
@offset.setter
def offset(self, offset):
"""Sets the offset of this ListApisV2Request.
偏移量,表示从此偏移量开始查询,偏移量小于0时,自动转换为0
:param offset: The offset of this ListApisV2Request.
:type: int
"""
self._offset = offset
@property
def limit(self):
"""Gets the limit of this ListApisV2Request.
每页显示的条目数量
:return: The limit of this ListApisV2Request.
:rtype: int
"""
return self._limit
@limit.setter
def limit(self, limit):
"""Sets the limit of this ListApisV2Request.
每页显示的条目数量
:param limit: The limit of this ListApisV2Request.
:type: int
"""
self._limit = limit
@property
def precise_search(self):
"""Gets the precise_search of this ListApisV2Request.
指定需要精确匹配查找的参数名称,目前仅支持name、req_uri
:return: The precise_search of this ListApisV2Request.
:rtype: str
"""
return self._precise_search
@precise_search.setter
def precise_search(self, precise_search):
"""Sets the precise_search of this ListApisV2Request.
指定需要精确匹配查找的参数名称,目前仅支持name、req_uri
:param precise_search: The precise_search of this ListApisV2Request.
:type: str
"""
self._precise_search = precise_search
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ListApisV2Request):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 24.506912
| 212
| 0.565062
|
4a043a5eee98476c772f49915d96171598e46c14
| 14,399
|
py
|
Python
|
openstack_dashboard/dashboards/project/images/images/forms.py
|
hemantsonawane95/horizon-apelby
|
01a5e72219aeca8c1451701ee85e232ed0618751
|
[
"Apache-2.0"
] | null | null | null |
openstack_dashboard/dashboards/project/images/images/forms.py
|
hemantsonawane95/horizon-apelby
|
01a5e72219aeca8c1451701ee85e232ed0618751
|
[
"Apache-2.0"
] | null | null | null |
openstack_dashboard/dashboards/project/images/images/forms.py
|
hemantsonawane95/horizon-apelby
|
01a5e72219aeca8c1451701ee85e232ed0618751
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Views for managing images.
"""
from django.conf import settings
from django.core import validators
from django.forms import ValidationError
from django.forms.widgets import HiddenInput
from django.template import defaultfilters
from django.utils.translation import gettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon import messages
from openstack_dashboard import api
from openstack_dashboard import policy
IMAGE_BACKEND_SETTINGS = settings.OPENSTACK_IMAGE_BACKEND
IMAGE_FORMAT_CHOICES = IMAGE_BACKEND_SETTINGS['image_formats']
class ImageURLField(forms.URLField):
default_validators = [validators.URLValidator(schemes=["http", "https"])]
if api.glance.get_image_upload_mode() == 'direct':
FileField = forms.ExternalFileField
class CreateParent(forms.SelfHandlingForm,
metaclass=forms.ExternalUploadMeta):
pass
else:
FileField = forms.FileField
CreateParent = forms.SelfHandlingForm
class CreateImageForm(CreateParent):
name = forms.CharField(max_length=255, label=_("Name"))
description = forms.CharField(
max_length=255,
widget=forms.Textarea(attrs={'rows': 4}),
label=_("Description"),
required=False)
source_type = forms.ChoiceField(
label=_('Image Source'),
required=False,
choices=[('url', _('Image Location')),
('file', _('Image File'))],
widget=forms.ThemableSelectWidget(attrs={
'class': 'switchable',
'data-slug': 'source'}))
image_url_attrs = {
'class': 'switched',
'data-switch-on': 'source',
'data-source-url': _('Image Location'),
'ng-model': 'ctrl.copyFrom',
'ng-change': 'ctrl.selectImageFormat(ctrl.copyFrom)',
'placeholder': 'http://example.com/image.img'
}
image_url = ImageURLField(label=_("Image Location"),
help_text=_("An external (HTTP/HTTPS) URL to "
"load the image from."),
widget=forms.TextInput(attrs=image_url_attrs),
required=False)
image_attrs = {
'class': 'switched',
'data-switch-on': 'source',
'data-source-file': _('Image File'),
'ng-model': 'ctrl.imageFile',
'ng-change': 'ctrl.selectImageFormat(ctrl.imageFile.name)',
'image-file-on-change': None
}
image_file = FileField(label=_("Image File"),
help_text=_("A local image to upload."),
widget=forms.FileInput(attrs=image_attrs),
required=False)
kernel = forms.ChoiceField(
label=_('Kernel'),
required=False,
widget=forms.ThemableSelectWidget(
transform=lambda x: "%s (%s)" % (
x.name, defaultfilters.filesizeformat(x.size))))
ramdisk = forms.ChoiceField(
label=_('Ramdisk'),
required=False,
widget=forms.ThemableSelectWidget(
transform=lambda x: "%s (%s)" % (
x.name, defaultfilters.filesizeformat(x.size))))
disk_format = forms.ChoiceField(label=_('Format'),
choices=[],
widget=forms.ThemableSelectWidget(attrs={
'class': 'switchable',
'ng-model': 'ctrl.diskFormat'}))
architecture = forms.CharField(
max_length=255,
label=_("Architecture"),
help_text=_('CPU architecture of the image.'),
required=False)
min_disk = forms.IntegerField(
label=_("Minimum Disk (GB)"),
min_value=0,
help_text=_('The minimum disk size required to boot the image. '
'If unspecified, this value defaults to 0 (no minimum).'),
required=False)
min_ram = forms.IntegerField(
label=_("Minimum RAM (MB)"),
min_value=0,
help_text=_('The minimum memory size required to boot the image. '
'If unspecified, this value defaults to 0 (no minimum).'),
required=False)
is_copying = forms.BooleanField(
label=_("Copy Data"), initial=True, required=False,
help_text=_('Specify this option to copy image data to the image '
'service. If unspecified, image data will be used in its '
'current location.'),
widget=forms.CheckboxInput(attrs={
'class': 'switched',
'data-source-url': _('Image Location'),
'data-switch-on': 'source'}))
is_public = forms.BooleanField(
label=_("Public"),
help_text=_('Make the image visible across projects.'),
required=False)
protected = forms.BooleanField(
label=_("Protected"),
help_text=_('Prevent the deletion of the image.'),
required=False)
def __init__(self, request, *args, **kwargs):
super().__init__(request, *args, **kwargs)
if (api.glance.get_image_upload_mode() == 'off' or
not policy.check((("image", "upload_image"),), request)):
self._hide_file_source_type()
if not policy.check((("image", "set_image_location"),), request):
self._hide_url_source_type()
# GlanceV2 feature removals
if api.glance.VERSIONS.active >= 2:
# NOTE: GlanceV2 doesn't support copy-from feature, sorry!
self._hide_is_copying()
if not settings.IMAGES_ALLOW_LOCATION:
self._hide_url_source_type()
if (api.glance.get_image_upload_mode() == 'off' or not
policy.check((("image", "upload_image"),), request)):
# Neither setting a location nor uploading image data is
# allowed, so throw an error.
msg = _('The current Horizon settings indicate no valid '
'image creation methods are available. Providing '
'an image location and/or uploading from the '
'local file system must be allowed to support '
'image creation.')
messages.error(request, msg)
raise ValidationError(msg)
if not policy.check((("image", "publicize_image"),), request):
self._hide_is_public()
self.fields['disk_format'].choices = \
api.glance.get_image_formats(request)
try:
kernel_images = api.glance.image_list_detailed(
request, filters={'disk_format': 'aki'})[0]
except Exception:
kernel_images = []
msg = _('Unable to retrieve image list.')
messages.error(request, msg)
if kernel_images:
choices = [('', _("Choose an image"))]
for image in kernel_images:
choices.append((image.id, image))
self.fields['kernel'].choices = choices
else:
del self.fields['kernel']
try:
ramdisk_images = api.glance.image_list_detailed(
request, filters={'disk_format': 'ari'})[0]
except Exception:
ramdisk_images = []
msg = _('Unable to retrieve image list.')
messages.error(request, msg)
if ramdisk_images:
choices = [('', _("Choose an image"))]
for image in ramdisk_images:
choices.append((image.id, image))
self.fields['ramdisk'].choices = choices
else:
del self.fields['ramdisk']
def _hide_file_source_type(self):
self.fields['image_file'].widget = HiddenInput()
source_type = self.fields['source_type']
source_type.choices = [choice for choice in source_type.choices
if choice[0] != 'file']
if len(source_type.choices) == 1:
source_type.widget = HiddenInput()
def _hide_url_source_type(self):
self.fields['image_url'].widget = HiddenInput()
source_type = self.fields['source_type']
source_type.choices = [choice for choice in source_type.choices
if choice[0] != 'url']
if len(source_type.choices) == 1:
source_type.widget = HiddenInput()
def _hide_is_public(self):
self.fields['is_public'].widget = HiddenInput()
self.fields['is_public'].initial = False
def _hide_is_copying(self):
self.fields['is_copying'].widget = HiddenInput()
self.fields['is_copying'].initial = False
def clean(self):
data = super().clean()
# The image_file key can be missing based on particular upload
# conditions. Code defensively for it here...
source_type = data.get('source_type', None)
image_file = data.get('image_file', None)
image_url = data.get('image_url', None)
if not image_url and not image_file:
msg = _("An image file or an external location must be specified.")
if source_type == 'file':
error_msg = {'image_file': [msg, ]}
else:
error_msg = {'image_url': [msg, ]}
raise ValidationError(error_msg)
return data
def handle(self, request, data):
meta = api.glance.create_image_metadata(data)
# Add image source file or URL to metadata
if (api.glance.get_image_upload_mode() != 'off' and
policy.check((("image", "upload_image"),), request) and
data.get('image_file', None)):
meta['data'] = data['image_file']
elif data.get('is_copying'):
meta['copy_from'] = data['image_url']
else:
meta['location'] = data['image_url']
try:
image = api.glance.image_create(request, **meta)
messages.info(request,
_('Your image %s has been queued for creation.') %
meta['name'])
return image
except Exception as e:
msg = _('Unable to create new image')
# TODO(nikunj2512): Fix this once it is fixed in glance client
if hasattr(e, 'code') and e.code == 400:
if "Invalid disk format" in e.details:
msg = _('Unable to create new image: Invalid disk format '
'%s for image.') % meta['disk_format']
elif "Image name too long" in e.details:
msg = _('Unable to create new image: Image name too long.')
elif "not supported" in e.details:
msg = _('Unable to create new image: URL scheme not '
'supported.')
exceptions.handle(request, msg)
return False
class UpdateImageForm(forms.SelfHandlingForm):
image_id = forms.CharField(widget=forms.HiddenInput())
name = forms.CharField(max_length=255, label=_("Name"))
description = forms.CharField(
max_length=255,
widget=forms.Textarea(attrs={'rows': 4}),
label=_("Description"),
required=False)
kernel = forms.CharField(
max_length=36,
label=_("Kernel ID"),
required=False,
widget=forms.TextInput(attrs={'readonly': 'readonly'}),
)
ramdisk = forms.CharField(
max_length=36,
label=_("Ramdisk ID"),
required=False,
widget=forms.TextInput(attrs={'readonly': 'readonly'}),
)
architecture = forms.CharField(
label=_("Architecture"),
required=False,
widget=forms.TextInput(attrs={'readonly': 'readonly'}),
)
disk_format = forms.ThemableChoiceField(
label=_("Format"),
)
min_disk = forms.IntegerField(
label=_("Minimum Disk (GB)"),
min_value=0,
help_text=_('The minimum disk size required to boot the image. '
'If unspecified, this value defaults to 0 (no minimum).'),
required=False)
min_ram = forms.IntegerField(
label=_("Minimum RAM (MB)"),
min_value=0,
help_text=_('The minimum memory size required to boot the image. '
'If unspecified, this value defaults to 0 (no minimum).'),
required=False)
is_public = forms.BooleanField(label=_("Public"), required=False)
protected = forms.BooleanField(label=_("Protected"), required=False)
def __init__(self, request, *args, **kwargs):
super().__init__(request, *args, **kwargs)
self.fields['disk_format'].choices = [(value, name) for value,
name in IMAGE_FORMAT_CHOICES
if value]
if not policy.check((("image", "publicize_image"),), request):
self.fields['is_public'].widget = forms.CheckboxInput(
attrs={'readonly': 'readonly', 'disabled': 'disabled'})
self.fields['is_public'].help_text = _(
'Non admin users are not allowed to make images public.')
def handle(self, request, data):
image_id = data['image_id']
error_updating = _('Unable to update image "%s".')
meta = api.glance.create_image_metadata(data)
try:
image = api.glance.image_update(request, image_id, **meta)
messages.success(request, _('Image was successfully updated.'))
return image
except Exception:
exceptions.handle(request, error_updating % image_id)
| 40.22067
| 79
| 0.583096
|
4a043b9e857a6f16915a3f2d05fce82779dbb5ec
| 54
|
py
|
Python
|
py-remote/gui/__init__.py
|
Bulkin/servo-ctl
|
54818a21ad4bd2a7d97e265054318fb050c49c0a
|
[
"BSD-2-Clause"
] | null | null | null |
py-remote/gui/__init__.py
|
Bulkin/servo-ctl
|
54818a21ad4bd2a7d97e265054318fb050c49c0a
|
[
"BSD-2-Clause"
] | null | null | null |
py-remote/gui/__init__.py
|
Bulkin/servo-ctl
|
54818a21ad4bd2a7d97e265054318fb050c49c0a
|
[
"BSD-2-Clause"
] | null | null | null |
__all__ = ["renderer", "twoaxiswidget", "input_devs"]
| 27
| 53
| 0.703704
|
4a043cb54bce8962b79eb571029d43a41fb25dd2
| 37,711
|
py
|
Python
|
test/test_core_pipeline.py
|
gbdrt/lale
|
291f824a6b96f088e787979ca768f50d7758424e
|
[
"Apache-2.0"
] | null | null | null |
test/test_core_pipeline.py
|
gbdrt/lale
|
291f824a6b96f088e787979ca768f50d7758424e
|
[
"Apache-2.0"
] | null | null | null |
test/test_core_pipeline.py
|
gbdrt/lale
|
291f824a6b96f088e787979ca768f50d7758424e
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pickle
import traceback
import typing
import unittest
import sklearn.datasets
import sklearn.pipeline
from sklearn.metrics import accuracy_score
import lale.datasets.openml
import lale.helpers
import lale.operators
from lale.helpers import import_from_sklearn_pipeline
from lale.lib.autogen import SGDClassifier
from lale.lib.lale import ConcatFeatures, NoOp
from lale.lib.sklearn import (
PCA,
GaussianNB,
KNeighborsClassifier,
LinearRegression,
LinearSVC,
LogisticRegression,
Nystroem,
OneHotEncoder,
PassiveAggressiveClassifier,
StandardScaler,
)
from lale.lib.xgboost import XGBClassifier
class TestCreation(unittest.TestCase):
def setUp(self):
from sklearn.model_selection import train_test_split
data = sklearn.datasets.load_iris()
X, y = data.data, data.target
self.X_train, self.X_test, self.y_train, self.y_test = train_test_split(X, y)
def test_pipeline_create(self):
from lale.operators import Pipeline
pipeline = Pipeline(([("pca1", PCA()), ("lr1", LogisticRegression())]))
trained = pipeline.fit(self.X_train, self.y_train)
predictions = trained.predict(self.X_test)
accuracy_score(self.y_test, predictions)
def test_pipeline_create_trainable(self):
import lale.lib.sklearn
import lale.operators
pipeline = lale.lib.sklearn.Pipeline(
steps=[("pca1", PCA()), ("lr1", LogisticRegression())]
)
self.assertIsInstance(pipeline, lale.operators.TrainableIndividualOp)
trained = pipeline.fit(self.X_train, self.y_train)
pca_trained, lr_trained = [op for _, op in trained.hyperparams()["steps"]]
self.assertIsInstance(pca_trained, lale.operators.TrainedIndividualOp)
self.assertIsInstance(lr_trained, lale.operators.TrainedIndividualOp)
predictions = trained.predict(self.X_test)
accuracy_score(self.y_test, predictions)
def test_pipeline_create_trained(self):
import lale.lib.sklearn
import lale.operators
orig_trainable = PCA() >> LogisticRegression()
orig_trained = orig_trainable.fit(self.X_train, self.y_train)
self.assertIsInstance(orig_trained, lale.operators.TrainedPipeline)
pca_trained, lr_trained = orig_trained.steps()
pre_trained = lale.lib.sklearn.Pipeline(
steps=[("pca1", pca_trained), ("lr1", lr_trained)]
)
self.assertIsInstance(pre_trained, lale.operators.TrainedIndividualOp)
predictions = pre_trained.predict(self.X_test)
accuracy_score(self.y_test, predictions)
def test_pipeline_clone(self):
from sklearn.base import clone
from lale.operators import Pipeline
pipeline = Pipeline(([("pca1", PCA()), ("lr1", LogisticRegression())]))
trained = pipeline.fit(self.X_train, self.y_train)
predictions = trained.predict(self.X_test)
orig_acc = accuracy_score(self.y_test, predictions)
cloned_pipeline = clone(pipeline)
trained = cloned_pipeline.fit(self.X_train, self.y_train)
predictions = trained.predict(self.X_test)
cloned_acc = accuracy_score(self.y_test, predictions)
self.assertEqual(orig_acc, cloned_acc)
def test_make_pipeline(self):
tfm = PCA(n_components=10)
clf = LogisticRegression(random_state=42)
trainable = lale.operators.make_pipeline(tfm, clf)
digits = sklearn.datasets.load_digits()
trained = trainable.fit(digits.data, digits.target)
_ = trained.predict(digits.data)
def test_compose2(self):
tfm = PCA(n_components=10)
clf = LogisticRegression(random_state=42)
trainable = tfm >> clf
digits = sklearn.datasets.load_digits()
trained = trainable.fit(digits.data, digits.target)
_ = trained.predict(digits.data)
def test_compose3(self):
nys = Nystroem(n_components=15)
pca = PCA(n_components=10)
lr = LogisticRegression(random_state=42)
trainable = nys >> pca >> lr
digits = sklearn.datasets.load_digits()
trained = trainable.fit(digits.data, digits.target)
_ = trained.predict(digits.data)
def test_pca_nys_lr(self):
from lale.operators import make_union
nys = Nystroem(n_components=15)
pca = PCA(n_components=10)
lr = LogisticRegression(random_state=42)
trainable = make_union(nys, pca) >> lr
digits = sklearn.datasets.load_digits()
trained = trainable.fit(digits.data, digits.target)
_ = trained.predict(digits.data)
def test_compose4(self):
digits = sklearn.datasets.load_digits()
_ = digits
ohe = OneHotEncoder(handle_unknown=OneHotEncoder.handle_unknown.ignore)
ohe.get_params()
no_op = NoOp()
pca = PCA()
nys = Nystroem()
lr = LogisticRegression()
knn = KNeighborsClassifier()
step1 = ohe | no_op
step2 = pca | nys
step3 = lr | knn
model_plan = step1 >> step2 >> step3
_ = model_plan
# TODO: optimize on this plan and then fit and predict
def test_compose5(self):
ohe = OneHotEncoder(handle_unknown=OneHotEncoder.handle_unknown.ignore)
digits = sklearn.datasets.load_digits()
lr = LogisticRegression()
lr_trained = lr.fit(digits.data, digits.target)
lr_trained.predict(digits.data)
pipeline1 = ohe >> lr
pipeline1_trained = pipeline1.fit(digits.data, digits.target)
pipeline1_trained.predict(digits.data)
def test_compare_with_sklearn(self):
tfm = PCA()
clf = LogisticRegression(
LogisticRegression.solver.lbfgs, LogisticRegression.multi_class.auto
)
trainable = lale.operators.make_pipeline(tfm, clf)
digits = sklearn.datasets.load_digits()
trained = trainable.fit(digits.data, digits.target)
predicted = trained.predict(digits.data)
from sklearn.decomposition import PCA as SklearnPCA
from sklearn.linear_model import LogisticRegression as SklearnLR
sklearn_pipeline = sklearn.pipeline.make_pipeline(
SklearnPCA(), SklearnLR(solver="lbfgs", multi_class="auto")
)
sklearn_pipeline.fit(digits.data, digits.target)
predicted_sklearn = sklearn_pipeline.predict(digits.data)
lale_score = accuracy_score(digits.target, predicted)
scikit_score = accuracy_score(digits.target, predicted_sklearn)
self.assertEqual(lale_score, scikit_score)
class TestImportExport(unittest.TestCase):
def setUp(self):
from sklearn.model_selection import train_test_split
data = sklearn.datasets.load_iris()
X, y = data.data, data.target
self.X_train, self.X_test, self.y_train, self.y_test = train_test_split(X, y)
def assert_equal_predictions(self, pipeline1, pipeline2):
trained = pipeline1.fit(self.X_train, self.y_train)
predictions1 = trained.predict(self.X_test)
trained = pipeline2.fit(self.X_train, self.y_train)
predictions2 = trained.predict(self.X_test)
[self.assertEqual(p1, predictions2[i]) for i, p1 in enumerate(predictions1)]
def test_import_from_sklearn_pipeline(self):
from sklearn.feature_selection import SelectKBest, f_regression
from sklearn.pipeline import Pipeline
from sklearn.svm import SVC as SklearnSVC
anova_filter = SelectKBest(f_regression, k=3)
clf = SklearnSVC(kernel="linear")
sklearn_pipeline = Pipeline([("anova", anova_filter), ("svc", clf)])
lale_pipeline = typing.cast(
lale.operators.TrainablePipeline,
import_from_sklearn_pipeline(sklearn_pipeline),
)
for i, pipeline_step in enumerate(sklearn_pipeline.named_steps):
sklearn_step_params = sklearn_pipeline.named_steps[
pipeline_step
].get_params()
lale_sklearn_params = lale_pipeline.steps()[
i
]._impl._wrapped_model.get_params()
self.assertEqual(sklearn_step_params, lale_sklearn_params)
self.assert_equal_predictions(sklearn_pipeline, lale_pipeline)
def test_import_from_sklearn_pipeline1(self):
from sklearn.decomposition import PCA as SklearnPCA
from sklearn.neighbors import KNeighborsClassifier as SklearnKNN
sklearn_pipeline = sklearn.pipeline.make_pipeline(
SklearnPCA(n_components=3), SklearnKNN()
)
lale_pipeline = typing.cast(
lale.operators.TrainablePipeline,
import_from_sklearn_pipeline(sklearn_pipeline),
)
for i, pipeline_step in enumerate(sklearn_pipeline.named_steps):
sklearn_step_params = sklearn_pipeline.named_steps[
pipeline_step
].get_params()
lale_sklearn_params = lale_pipeline.steps()[
i
]._impl._wrapped_model.get_params()
self.assertEqual(sklearn_step_params, lale_sklearn_params)
self.assert_equal_predictions(sklearn_pipeline, lale_pipeline)
def test_import_from_sklearn_pipeline_feature_union(self):
from sklearn.decomposition import PCA as SklearnPCA
from sklearn.kernel_approximation import Nystroem as SklearnNystroem
from sklearn.neighbors import KNeighborsClassifier as SklearnKNN
from sklearn.pipeline import FeatureUnion
union = FeatureUnion(
[
("pca", SklearnPCA(n_components=1)),
("nys", SklearnNystroem(n_components=2, random_state=42)),
]
)
sklearn_pipeline = sklearn.pipeline.make_pipeline(union, SklearnKNN())
lale_pipeline = typing.cast(
lale.operators.TrainablePipeline,
import_from_sklearn_pipeline(sklearn_pipeline),
)
self.assertEqual(len(lale_pipeline.edges()), 3)
from lale.lib.lale.concat_features import ConcatFeaturesImpl
from lale.lib.sklearn.k_neighbors_classifier import KNeighborsClassifierImpl
from lale.lib.sklearn.nystroem import NystroemImpl
from lale.lib.sklearn.pca import PCAImpl
self.assertEqual(lale_pipeline.edges()[0][0]._impl_class(), PCAImpl)
self.assertEqual(lale_pipeline.edges()[0][1]._impl_class(), ConcatFeaturesImpl)
self.assertEqual(lale_pipeline.edges()[1][0]._impl_class(), NystroemImpl)
self.assertEqual(lale_pipeline.edges()[1][1]._impl_class(), ConcatFeaturesImpl)
self.assertEqual(lale_pipeline.edges()[2][0]._impl_class(), ConcatFeaturesImpl)
self.assertEqual(
lale_pipeline.edges()[2][1]._impl_class(), KNeighborsClassifierImpl
)
self.assert_equal_predictions(sklearn_pipeline, lale_pipeline)
def test_import_from_sklearn_pipeline_nested_pipeline(self):
from sklearn.decomposition import PCA as SklearnPCA
from sklearn.feature_selection import SelectKBest
from sklearn.kernel_approximation import Nystroem as SklearnNystroem
from sklearn.neighbors import KNeighborsClassifier as SklearnKNN
from sklearn.pipeline import FeatureUnion
union = FeatureUnion(
[
(
"selectkbest_pca",
sklearn.pipeline.make_pipeline(
SelectKBest(k=3), SklearnPCA(n_components=1)
),
),
("nys", SklearnNystroem(n_components=2, random_state=42)),
]
)
sklearn_pipeline = sklearn.pipeline.make_pipeline(union, SklearnKNN())
lale_pipeline = typing.cast(
lale.operators.TrainablePipeline,
import_from_sklearn_pipeline(sklearn_pipeline),
)
self.assertEqual(len(lale_pipeline.edges()), 4)
from lale.lib.lale.concat_features import ConcatFeaturesImpl
from lale.lib.sklearn.k_neighbors_classifier import KNeighborsClassifierImpl
from lale.lib.sklearn.nystroem import NystroemImpl
from lale.lib.sklearn.pca import PCAImpl
from lale.lib.sklearn.select_k_best import SelectKBestImpl
# These assertions assume topological sort
self.assertEqual(lale_pipeline.edges()[0][0]._impl_class(), SelectKBestImpl)
self.assertEqual(lale_pipeline.edges()[0][1]._impl_class(), PCAImpl)
self.assertEqual(lale_pipeline.edges()[1][0]._impl_class(), PCAImpl)
self.assertEqual(lale_pipeline.edges()[1][1]._impl_class(), ConcatFeaturesImpl)
self.assertEqual(lale_pipeline.edges()[2][0]._impl_class(), NystroemImpl)
self.assertEqual(lale_pipeline.edges()[2][1]._impl_class(), ConcatFeaturesImpl)
self.assertEqual(lale_pipeline.edges()[3][0]._impl_class(), ConcatFeaturesImpl)
self.assertEqual(
lale_pipeline.edges()[3][1]._impl_class(), KNeighborsClassifierImpl
)
self.assert_equal_predictions(sklearn_pipeline, lale_pipeline)
def test_import_from_sklearn_pipeline_nested_pipeline1(self):
from sklearn.decomposition import PCA as SklearnPCA
from sklearn.feature_selection import SelectKBest
from sklearn.kernel_approximation import Nystroem as SklearnNystroem
from sklearn.neighbors import KNeighborsClassifier as SklearnKNN
from sklearn.pipeline import FeatureUnion
union = FeatureUnion(
[
(
"selectkbest_pca",
sklearn.pipeline.make_pipeline(
SelectKBest(k=3),
FeatureUnion(
[
("pca", SklearnPCA(n_components=1)),
(
"nested_pipeline",
sklearn.pipeline.make_pipeline(
SelectKBest(k=2), SklearnNystroem()
),
),
]
),
),
),
("nys", SklearnNystroem(n_components=2, random_state=42)),
]
)
sklearn_pipeline = sklearn.pipeline.make_pipeline(union, SklearnKNN())
lale_pipeline = typing.cast(
lale.operators.TrainablePipeline,
import_from_sklearn_pipeline(sklearn_pipeline),
)
self.assertEqual(len(lale_pipeline.edges()), 8)
# These assertions assume topological sort, which may not be unique. So the assertions are brittle.
from lale.lib.lale.concat_features import ConcatFeaturesImpl
from lale.lib.sklearn.k_neighbors_classifier import KNeighborsClassifierImpl
from lale.lib.sklearn.nystroem import NystroemImpl
from lale.lib.sklearn.pca import PCAImpl
from lale.lib.sklearn.select_k_best import SelectKBestImpl
self.assertEqual(lale_pipeline.edges()[0][0]._impl_class(), SelectKBestImpl)
self.assertEqual(lale_pipeline.edges()[0][1]._impl_class(), PCAImpl)
self.assertEqual(lale_pipeline.edges()[1][0]._impl_class(), SelectKBestImpl)
self.assertEqual(lale_pipeline.edges()[1][1]._impl_class(), SelectKBestImpl)
self.assertEqual(lale_pipeline.edges()[2][0]._impl_class(), SelectKBestImpl)
self.assertEqual(lale_pipeline.edges()[2][1]._impl_class(), NystroemImpl)
self.assertEqual(lale_pipeline.edges()[3][0]._impl_class(), PCAImpl)
self.assertEqual(lale_pipeline.edges()[3][1]._impl_class(), ConcatFeaturesImpl)
self.assertEqual(lale_pipeline.edges()[4][0]._impl_class(), NystroemImpl)
self.assertEqual(lale_pipeline.edges()[4][1]._impl_class(), ConcatFeaturesImpl)
self.assertEqual(lale_pipeline.edges()[5][0]._impl_class(), ConcatFeaturesImpl)
self.assertEqual(lale_pipeline.edges()[5][1]._impl_class(), ConcatFeaturesImpl)
self.assertEqual(lale_pipeline.edges()[6][0]._impl_class(), NystroemImpl)
self.assertEqual(lale_pipeline.edges()[6][1]._impl_class(), ConcatFeaturesImpl)
self.assertEqual(lale_pipeline.edges()[7][0]._impl_class(), ConcatFeaturesImpl)
self.assertEqual(
lale_pipeline.edges()[7][1]._impl_class(), KNeighborsClassifierImpl
)
self.assert_equal_predictions(sklearn_pipeline, lale_pipeline)
def test_import_from_sklearn_pipeline_nested_pipeline2(self):
from sklearn.decomposition import PCA as SklearnPCA
from sklearn.feature_selection import SelectKBest
from sklearn.kernel_approximation import Nystroem as SklearnNystroem
from sklearn.neighbors import KNeighborsClassifier as SklearnKNN
from sklearn.pipeline import FeatureUnion
union = FeatureUnion(
[
(
"selectkbest_pca",
sklearn.pipeline.make_pipeline(
SelectKBest(k=3),
sklearn.pipeline.make_pipeline(SelectKBest(k=2), SklearnPCA()),
),
),
("nys", SklearnNystroem(n_components=2, random_state=42)),
]
)
sklearn_pipeline = sklearn.pipeline.make_pipeline(union, SklearnKNN())
lale_pipeline = typing.cast(
lale.operators.TrainablePipeline,
import_from_sklearn_pipeline(sklearn_pipeline),
)
self.assertEqual(len(lale_pipeline.edges()), 5)
from lale.lib.lale.concat_features import ConcatFeaturesImpl
from lale.lib.sklearn.k_neighbors_classifier import KNeighborsClassifierImpl
from lale.lib.sklearn.nystroem import NystroemImpl
from lale.lib.sklearn.pca import PCAImpl
from lale.lib.sklearn.select_k_best import SelectKBestImpl
self.assertEqual(lale_pipeline.edges()[0][0]._impl_class(), SelectKBestImpl)
self.assertEqual(lale_pipeline.edges()[0][1]._impl_class(), SelectKBestImpl)
self.assertEqual(lale_pipeline.edges()[1][0]._impl_class(), SelectKBestImpl)
self.assertEqual(lale_pipeline.edges()[1][1]._impl_class(), PCAImpl)
self.assertEqual(lale_pipeline.edges()[2][0]._impl_class(), PCAImpl)
self.assertEqual(lale_pipeline.edges()[2][1]._impl_class(), ConcatFeaturesImpl)
self.assertEqual(lale_pipeline.edges()[3][0]._impl_class(), NystroemImpl)
self.assertEqual(lale_pipeline.edges()[3][1]._impl_class(), ConcatFeaturesImpl)
self.assertEqual(lale_pipeline.edges()[4][0]._impl_class(), ConcatFeaturesImpl)
self.assertEqual(
lale_pipeline.edges()[4][1]._impl_class(), KNeighborsClassifierImpl
)
self.assert_equal_predictions(sklearn_pipeline, lale_pipeline)
def test_import_from_sklearn_pipeline_noop(self):
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.pipeline import Pipeline
from lale.helpers import import_from_sklearn_pipeline
pipe = Pipeline([("noop", None), ("gbc", GradientBoostingClassifier())])
with self.assertRaises(ValueError):
_ = import_from_sklearn_pipeline(pipe)
def test_import_from_sklearn_pipeline_noop1(self):
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.pipeline import Pipeline
from lale.helpers import import_from_sklearn_pipeline
pipe = Pipeline([("noop", NoOp()), ("gbc", GradientBoostingClassifier())])
_ = import_from_sklearn_pipeline(pipe)
def test_export_to_sklearn_pipeline(self):
lale_pipeline = PCA(n_components=3) >> KNeighborsClassifier()
trained_lale_pipeline = lale_pipeline.fit(self.X_train, self.y_train)
sklearn_pipeline = trained_lale_pipeline.export_to_sklearn_pipeline()
for i, pipeline_step in enumerate(sklearn_pipeline.named_steps):
sklearn_step_params = sklearn_pipeline.named_steps[
pipeline_step
].get_params()
lale_sklearn_params = trained_lale_pipeline.steps()[
i
]._impl._wrapped_model.get_params()
self.assertEqual(sklearn_step_params, lale_sklearn_params)
self.assert_equal_predictions(sklearn_pipeline, trained_lale_pipeline)
def test_export_to_sklearn_pipeline1(self):
from sklearn.feature_selection import SelectKBest
lale_pipeline = SelectKBest(k=3) >> KNeighborsClassifier()
trained_lale_pipeline = lale_pipeline.fit(self.X_train, self.y_train)
sklearn_pipeline = trained_lale_pipeline.export_to_sklearn_pipeline()
for i, pipeline_step in enumerate(sklearn_pipeline.named_steps):
sklearn_step_params = type(sklearn_pipeline.named_steps[pipeline_step])
lale_sklearn_params = (
type(trained_lale_pipeline.steps()[i]._impl._wrapped_model)
if hasattr(trained_lale_pipeline.steps()[i]._impl, "_wrapped_model")
else type(trained_lale_pipeline.steps()[i]._impl)
)
self.assertEqual(sklearn_step_params, lale_sklearn_params)
self.assert_equal_predictions(sklearn_pipeline, trained_lale_pipeline)
def test_export_to_sklearn_pipeline2(self):
from sklearn.feature_selection import SelectKBest
from sklearn.pipeline import FeatureUnion
lale_pipeline = (
(
(
(PCA(svd_solver="randomized", random_state=42) & SelectKBest(k=3))
>> ConcatFeatures()
)
& Nystroem(random_state=42)
)
>> ConcatFeatures()
>> KNeighborsClassifier()
)
trained_lale_pipeline = lale_pipeline.fit(self.X_train, self.y_train)
sklearn_pipeline = trained_lale_pipeline.export_to_sklearn_pipeline()
self.assertIsInstance(
sklearn_pipeline.named_steps["featureunion"], FeatureUnion
)
from sklearn.neighbors import KNeighborsClassifier as SklearnKNN
self.assertIsInstance(
sklearn_pipeline.named_steps["kneighborsclassifier"], SklearnKNN
)
self.assert_equal_predictions(sklearn_pipeline, trained_lale_pipeline)
def test_export_to_sklearn_pipeline3(self):
from sklearn.feature_selection import SelectKBest
from sklearn.pipeline import FeatureUnion
lale_pipeline = (
(
(PCA() >> SelectKBest(k=2))
& (Nystroem(random_state=42) >> SelectKBest(k=3))
& (SelectKBest(k=3))
)
>> ConcatFeatures()
>> SelectKBest(k=2)
>> LogisticRegression()
)
trained_lale_pipeline = lale_pipeline.fit(self.X_train, self.y_train)
sklearn_pipeline = trained_lale_pipeline.export_to_sklearn_pipeline()
self.assertIsInstance(
sklearn_pipeline.named_steps["featureunion"], FeatureUnion
)
self.assertIsInstance(sklearn_pipeline.named_steps["selectkbest"], SelectKBest)
from sklearn.linear_model import LogisticRegression as SklearnLR
self.assertIsInstance(
sklearn_pipeline.named_steps["logisticregression"], SklearnLR
)
self.assert_equal_predictions(sklearn_pipeline, trained_lale_pipeline)
def test_export_to_sklearn_pipeline4(self):
lale_pipeline = lale.operators.make_pipeline(LogisticRegression())
trained_lale_pipeline = lale_pipeline.fit(self.X_train, self.y_train)
sklearn_pipeline = trained_lale_pipeline.export_to_sklearn_pipeline()
from sklearn.linear_model import LogisticRegression as SklearnLR
self.assertIsInstance(
sklearn_pipeline.named_steps["logisticregression"], SklearnLR
)
self.assert_equal_predictions(sklearn_pipeline, trained_lale_pipeline)
def test_export_to_sklearn_pipeline5(self):
lale_pipeline = PCA() >> (XGBClassifier() | SGDClassifier())
with self.assertRaises(ValueError):
_ = lale_pipeline.export_to_sklearn_pipeline()
def test_export_to_pickle(self):
lale_pipeline = lale.operators.make_pipeline(LogisticRegression())
trained_lale_pipeline = lale_pipeline.fit(self.X_train, self.y_train)
pickle.dumps(lale_pipeline)
pickle.dumps(trained_lale_pipeline)
def test_import_from_sklearn_pipeline2(self):
from sklearn.feature_selection import SelectKBest, f_regression
from sklearn.pipeline import Pipeline
from sklearn.svm import SVC as SklearnSVC
anova_filter = SelectKBest(f_regression, k=3)
clf = SklearnSVC(kernel="linear")
sklearn_pipeline = Pipeline([("anova", anova_filter), ("svc", clf)])
sklearn_pipeline.fit(self.X_train, self.y_train)
lale_pipeline = typing.cast(
lale.operators.TrainedPipeline,
import_from_sklearn_pipeline(sklearn_pipeline),
)
lale_pipeline.predict(self.X_test)
def test_import_from_sklearn_pipeline3(self):
from sklearn.feature_selection import SelectKBest, f_regression
from sklearn.pipeline import Pipeline
from sklearn.svm import SVC as SklearnSVC
anova_filter = SelectKBest(f_regression, k=3)
clf = SklearnSVC(kernel="linear")
sklearn_pipeline = Pipeline([("anova", anova_filter), ("svc", clf)])
lale_pipeline = typing.cast(
lale.operators.TrainablePipeline,
import_from_sklearn_pipeline(sklearn_pipeline, fitted=False),
)
with self.assertRaises(
ValueError
): # fitted=False returns a Trainable, so calling predict is invalid.
lale_pipeline.predict(self.X_test)
def test_export_to_sklearn_pipeline_with_noop_1(self):
lale_pipeline = NoOp() >> PCA(n_components=3) >> KNeighborsClassifier()
trained_lale_pipeline = lale_pipeline.fit(self.X_train, self.y_train)
sklearn_pipeline = trained_lale_pipeline.export_to_sklearn_pipeline()
self.assert_equal_predictions(sklearn_pipeline, trained_lale_pipeline)
def test_export_to_sklearn_pipeline_with_noop_2(self):
lale_pipeline = PCA(n_components=3) >> NoOp() >> KNeighborsClassifier()
trained_lale_pipeline = lale_pipeline.fit(self.X_train, self.y_train)
sklearn_pipeline = trained_lale_pipeline.export_to_sklearn_pipeline()
self.assert_equal_predictions(sklearn_pipeline, trained_lale_pipeline)
def test_export_to_sklearn_pipeline_with_noop_3(self):
# This test is probably unnecessary, but doesn't harm at this point
lale_pipeline = PCA(n_components=3) >> KNeighborsClassifier() >> NoOp()
trained_lale_pipeline = lale_pipeline.fit(self.X_train, self.y_train)
_ = trained_lale_pipeline.export_to_sklearn_pipeline()
def test_export_to_sklearn_pipeline_with_noop_4(self):
lale_pipeline = NoOp() >> KNeighborsClassifier()
trained_lale_pipeline = lale_pipeline.fit(self.X_train, self.y_train)
sklearn_pipeline = trained_lale_pipeline.export_to_sklearn_pipeline()
self.assert_equal_predictions(sklearn_pipeline, trained_lale_pipeline)
class TestComposition(unittest.TestCase):
def setUp(self):
from sklearn.model_selection import train_test_split
data = sklearn.datasets.load_iris()
X, y = data.data, data.target
self.X_train, self.X_test, self.y_train, self.y_test = train_test_split(X, y)
def test_two_estimators_predict(self):
pipeline = (
StandardScaler()
>> (PCA() & Nystroem() & LogisticRegression())
>> ConcatFeatures()
>> NoOp()
>> LogisticRegression()
)
trained = pipeline.fit(self.X_train, self.y_train)
trained.predict(self.X_test)
def test_two_estimators_predict1(self):
pipeline = (
StandardScaler()
>> (PCA() & Nystroem() & PassiveAggressiveClassifier())
>> ConcatFeatures()
>> NoOp()
>> PassiveAggressiveClassifier()
)
trained = pipeline.fit(self.X_train, self.y_train)
trained.predict(self.X_test)
def test_two_estimators_predict_proba(self):
pipeline = (
StandardScaler()
>> (PCA() & Nystroem() & LogisticRegression())
>> ConcatFeatures()
>> NoOp()
>> LogisticRegression()
)
trained = pipeline.fit(self.X_train, self.y_train)
trained.predict_proba(self.X_test)
def test_two_estimators_predict_proba1(self):
pipeline = (
StandardScaler()
>> (PCA() & Nystroem() & GaussianNB())
>> ConcatFeatures()
>> NoOp()
>> GaussianNB()
)
pipeline.fit(self.X_train, self.y_train)
pipeline.predict_proba(self.X_test)
def test_multiple_estimators_predict_predict_proba(self):
pipeline = (
StandardScaler()
>> (LogisticRegression() & PCA())
>> ConcatFeatures()
>> (NoOp() & LinearSVC())
>> ConcatFeatures()
>> KNeighborsClassifier()
)
pipeline.fit(self.X_train, self.y_train)
_ = pipeline.predict_proba(self.X_test)
_ = pipeline.predict(self.X_test)
def test_two_transformers(self):
tfm1 = PCA()
tfm2 = Nystroem()
trainable = tfm1 >> tfm2
digits = sklearn.datasets.load_digits()
trained = trainable.fit(digits.data, digits.target)
_ = trained.transform(digits.data)
def test_duplicate_instances(self):
tfm = PCA()
clf = LogisticRegression(
LogisticRegression.solver.lbfgs, LogisticRegression.multi_class.auto
)
with self.assertRaises(ValueError):
_ = lale.operators.make_pipeline(tfm, tfm, clf)
def test_increase_num_rows(self):
from test.mock_custom_operators import IncreaseRows
increase_rows = IncreaseRows()
trainable = increase_rows >> LogisticRegression()
iris = sklearn.datasets.load_iris()
X, y = iris.data, iris.target
trained = trainable.fit(X, y)
_ = trained.predict(X)
def test_remove_last1(self):
pipeline = (
StandardScaler()
>> (PCA() & Nystroem() & PassiveAggressiveClassifier())
>> ConcatFeatures()
>> NoOp()
>> PassiveAggressiveClassifier()
)
new_pipeline = pipeline.remove_last()
self.assertEqual(len(new_pipeline._steps), 6)
self.assertEqual(len(pipeline._steps), 7)
def test_remove_last2(self):
pipeline = (
StandardScaler()
>> (PCA() & Nystroem() & PassiveAggressiveClassifier())
>> ConcatFeatures()
>> NoOp()
>> (PassiveAggressiveClassifier() & LogisticRegression())
)
with self.assertRaises(ValueError):
pipeline.remove_last()
def test_remove_last3(self):
pipeline = (
StandardScaler()
>> (PCA() & Nystroem() & PassiveAggressiveClassifier())
>> ConcatFeatures()
>> NoOp()
>> PassiveAggressiveClassifier()
)
pipeline.remove_last().freeze_trainable()
def test_remove_last4(self):
pipeline = (
StandardScaler()
>> (PCA() & Nystroem() & PassiveAggressiveClassifier())
>> ConcatFeatures()
>> NoOp()
>> PassiveAggressiveClassifier()
)
new_pipeline = pipeline.remove_last(inplace=True)
self.assertEqual(len(new_pipeline._steps), 6)
self.assertEqual(len(pipeline._steps), 6)
def test_remove_last5(self):
pipeline = (
StandardScaler()
>> (PCA() & Nystroem() & PassiveAggressiveClassifier())
>> ConcatFeatures()
>> NoOp()
>> PassiveAggressiveClassifier()
)
pipeline.remove_last(inplace=True).freeze_trainable()
class TestAutoPipeline(unittest.TestCase):
def _fit_predict(self, prediction_type, all_X, all_y, verbose=True):
import sklearn.metrics
import sklearn.model_selection
if verbose:
file_name, line, fn_name, text = traceback.extract_stack()[-2]
print(f"--- TestAutoPipeline.{fn_name}() ---")
from lale.lib.lale import AutoPipeline
train_X, test_X, train_y, test_y = sklearn.model_selection.train_test_split(
all_X, all_y
)
trainable = AutoPipeline(
prediction_type=prediction_type, max_evals=10, verbose=verbose
)
trained = trainable.fit(train_X, train_y)
predicted = trained.predict(test_X)
if prediction_type == "regression":
score = f"r2 score {sklearn.metrics.r2_score(test_y, predicted):.2f}"
else:
score = f"accuracy {sklearn.metrics.accuracy_score(test_y, predicted):.1%}"
if verbose:
print(score)
print(trained.get_pipeline().pretty_print(show_imports=False))
def test_sklearn_iris(self):
# classification, only numbers, no missing values
all_X, all_y = sklearn.datasets.load_iris(return_X_y=True)
self._fit_predict("classification", all_X, all_y)
def test_sklearn_digits(self):
# classification, numbers but some appear categorical, no missing values
all_X, all_y = sklearn.datasets.load_digits(return_X_y=True)
self._fit_predict("classification", all_X, all_y)
def test_sklearn_boston(self):
# regression, categoricals+numbers, no missing values
all_X, all_y = sklearn.datasets.load_boston(return_X_y=True)
self._fit_predict("regression", all_X, all_y)
def test_sklearn_diabetes(self):
# regression, categoricals+numbers, no missing values
all_X, all_y = sklearn.datasets.load_diabetes(return_X_y=True)
self._fit_predict("regression", all_X, all_y)
def test_openml_creditg(self):
import sklearn.model_selection
# classification, categoricals+numbers incl. string, no missing values
(orig_train_X, orig_train_y), _ = lale.datasets.openml.fetch(
"credit-g", "classification", preprocess=False
)
subsample_X, _, subsample_y, _ = sklearn.model_selection.train_test_split(
orig_train_X, orig_train_y, train_size=0.05
)
self._fit_predict("classification", subsample_X, subsample_y)
def test_missing_iris(self):
# classification, only numbers, synthetically added missing values
all_X, all_y = sklearn.datasets.load_iris(return_X_y=True)
with_missing_X = lale.helpers.add_missing_values(all_X)
with self.assertRaisesRegex(ValueError, "Input contains NaN"):
lr_trainable = LogisticRegression()
_ = lr_trainable.fit(with_missing_X, all_y)
self._fit_predict("classification", with_missing_X, all_y)
def test_missing_boston(self):
# regression, categoricals+numbers, synthetically added missing values
all_X, all_y = sklearn.datasets.load_boston(return_X_y=True)
with_missing_X = lale.helpers.add_missing_values(all_X)
with self.assertRaisesRegex(ValueError, "Input contains NaN"):
lr_trainable = LinearRegression()
_ = lr_trainable.fit(with_missing_X, all_y)
self._fit_predict("regression", with_missing_X, all_y)
def test_missing_creditg(self):
import sklearn.model_selection
# classification, categoricals+numbers incl. string, synth. missing
(orig_train_X, orig_train_y), _ = lale.datasets.openml.fetch(
"credit-g", "classification", preprocess=False
)
subsample_X, _, subsample_y, _ = sklearn.model_selection.train_test_split(
orig_train_X, orig_train_y, train_size=0.05
)
with_missing_X = lale.helpers.add_missing_values(subsample_X)
self._fit_predict("classification", with_missing_X, subsample_y)
class TestOperatorChoice(unittest.TestCase):
def test_make_choice_with_instance(self):
from sklearn.datasets import load_iris
from lale.operators import make_choice
iris = load_iris()
X, y = iris.data, iris.target
tfm = PCA() | Nystroem() | NoOp()
with self.assertRaises(AttributeError):
# we are trying to trigger a runtime error here, so we ignore the static warning
_ = tfm.fit(X, y) # type: ignore
_ = (OneHotEncoder | NoOp) >> tfm >> (LogisticRegression | KNeighborsClassifier)
_ = (
(OneHotEncoder | NoOp)
>> (PCA | Nystroem)
>> (LogisticRegression | KNeighborsClassifier)
)
_ = (
make_choice(OneHotEncoder, NoOp)
>> make_choice(PCA, Nystroem)
>> make_choice(LogisticRegression, KNeighborsClassifier)
)
| 42.853409
| 107
| 0.663122
|
4a043cc49a5fe70cafc395fa264c7897ad690ba3
| 4,053
|
py
|
Python
|
kubernetes/client/models/v1_capabilities.py
|
pllsxyc/python
|
442ebc019056c2dc246be94f85cf61f1e1d26a88
|
[
"Apache-2.0"
] | 1
|
2019-10-07T13:54:36.000Z
|
2019-10-07T13:54:36.000Z
|
kubernetes/client/models/v1_capabilities.py
|
pllsxyc/python
|
442ebc019056c2dc246be94f85cf61f1e1d26a88
|
[
"Apache-2.0"
] | 8
|
2020-10-28T01:18:36.000Z
|
2021-06-11T01:06:15.000Z
|
kubernetes/client/models/v1_capabilities.py
|
pllsxyc/python
|
442ebc019056c2dc246be94f85cf61f1e1d26a88
|
[
"Apache-2.0"
] | 1
|
2021-03-16T16:05:33.000Z
|
2021-03-16T16:05:33.000Z
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: release-1.16
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from kubernetes.client.configuration import Configuration
class V1Capabilities(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'add': 'list[str]',
'drop': 'list[str]'
}
attribute_map = {
'add': 'add',
'drop': 'drop'
}
def __init__(self, add=None, drop=None, local_vars_configuration=None): # noqa: E501
"""V1Capabilities - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._add = None
self._drop = None
self.discriminator = None
if add is not None:
self.add = add
if drop is not None:
self.drop = drop
@property
def add(self):
"""Gets the add of this V1Capabilities. # noqa: E501
Added capabilities # noqa: E501
:return: The add of this V1Capabilities. # noqa: E501
:rtype: list[str]
"""
return self._add
@add.setter
def add(self, add):
"""Sets the add of this V1Capabilities.
Added capabilities # noqa: E501
:param add: The add of this V1Capabilities. # noqa: E501
:type: list[str]
"""
self._add = add
@property
def drop(self):
"""Gets the drop of this V1Capabilities. # noqa: E501
Removed capabilities # noqa: E501
:return: The drop of this V1Capabilities. # noqa: E501
:rtype: list[str]
"""
return self._drop
@drop.setter
def drop(self, drop):
"""Sets the drop of this V1Capabilities.
Removed capabilities # noqa: E501
:param drop: The drop of this V1Capabilities. # noqa: E501
:type: list[str]
"""
self._drop = drop
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1Capabilities):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1Capabilities):
return True
return self.to_dict() != other.to_dict()
| 26.84106
| 124
| 0.564027
|
4a043d42b4b6c96db8577ea2b62bea1237993d8c
| 4,306
|
py
|
Python
|
hw4_1.py
|
hollylessthan/ML-3
|
02a11e868d8eb12d02196636e9fa2c27cc62af76
|
[
"Apache-2.0"
] | null | null | null |
hw4_1.py
|
hollylessthan/ML-3
|
02a11e868d8eb12d02196636e9fa2c27cc62af76
|
[
"Apache-2.0"
] | null | null | null |
hw4_1.py
|
hollylessthan/ML-3
|
02a11e868d8eb12d02196636e9fa2c27cc62af76
|
[
"Apache-2.0"
] | null | null | null |
import random as rd
import numpy as np
desk ="/Users/lofangyu/Desktop/"
f = open(desk + "hw4_dataset.txt", "r")
list = f.readlines()
new_list=[]
for i in range(len(list)-1):
m = list[i]
n = m.replace(","," ")
new_list.append(n.split())
initial = []
initial1 = []
initial2 = []
learn_rate = 0.1
for i in range (16):
I = rd.uniform(-0.1, 0.1)
J = rd.uniform(-0.1, 0.1)
initial.append(I)
initial1.append(J)
for i in range(12):
K = rd.uniform(-0.1, 0.1)
initial2.append(K)
initial = [initial[l:l+4] for l in range(0, len(initial),4)]
initial1 = [initial1[l:l+4] for l in range(0, len(initial1),4)]
initial2 = [initial2[l:l+3] for l in range(0, len(initial2),3)]
for k in range(len(new_list)):
hid1 = []
hid2 = []
y = []
q1 = []
q2 = []
q3 = []
target = []
Delta = []
Delta1 = []
#part A
h0 = eval(new_list[k][0]) * initial[0][0] + eval(new_list[k][1]) * initial[1][0] + eval(new_list[k][2]) * initial[2][0] + eval(new_list[k][3]) * initial[3][0]
h1 = eval(new_list[k][0]) * initial[0][1] + eval(new_list[k][1]) * initial[1][1] + eval(new_list[k][2]) * initial[2][1] + eval(new_list[k][3]) * initial[3][1]
h2 = eval(new_list[k][0]) * initial[0][2] + eval(new_list[k][1]) * initial[1][2] + eval(new_list[k][2]) * initial[2][2] + eval(new_list[k][3]) * initial[3][2]
h3 = eval(new_list[k][0]) * initial[0][3] + eval(new_list[k][1]) * initial[1][3] + eval(new_list[k][2]) * initial[2][3] + eval(new_list[k][3]) * initial[3][3]
H0 = (1 + np.e** (- h0)) ** (-1)
H1 = (1 + np.e** (- h1)) ** (-1)
H2 = (1 + np.e** (- h2)) ** (-1)
H3 = (1 + np.e** (- h3)) ** (-1)
hid1.append(H0)
hid1.append(H1)
hid1.append(H2)
hid1.append(H3)
#part B
h0 = hid1[0] * initial1[0][0] + hid1[1] * initial1[1][0] + hid1[2] * initial1[2][0] + hid1[3] * initial1[3][0]
h1 = hid1[0] * initial1[0][1] + hid1[1] * initial1[1][1] + hid1[2] * initial1[2][1] + hid1[3] * initial1[3][1]
h2 = hid1[0] * initial1[0][2] + hid1[1] * initial1[1][2] + hid1[2] * initial1[2][2] + hid1[3] * initial1[3][2]
h3 = hid1[0] * initial1[0][3] + hid1[1] * initial1[1][3] + hid1[2] * initial1[2][3] + hid1[3] * initial1[3][3]
H0 = (1 + np.e** (- h0)) ** (-1)
H1 = (1 + np.e** (- h1)) ** (-1)
H2 = (1 + np.e** (- h2)) ** (-1)
H3 = (1 + np.e** (- h3)) ** (-1)
hid2.append(H0)
hid2.append(H1)
hid2.append(H2)
hid2.append(H3)
#part C
h0 = hid2[0] * initial2[0][0] + hid2[1] * initial2[1][0] + hid2[2] * initial2[2][0] + hid2[3] * initial2[3][0]
h1 = hid2[0] * initial2[0][1] + hid2[1] * initial2[1][1] + hid2[2] * initial2[2][1] + hid2[3] * initial2[3][1]
h2 = hid2[0] * initial2[0][2] + hid2[1] * initial2[1][2] + hid2[2] * initial2[2][2] + hid2[3] * initial2[3][2]
H0 = (1 + np.e** (- h0)) ** (-1)
H1 = (1 + np.e** (- h1)) ** (-1)
H2 = (1 + np.e** (- h2)) ** (-1)
y.append(H0)
y.append(H1)
y.append(H2)
# part D
if new_list[k][4] == 'Iris-setosa':
target = [1, 0, 0]
elif new_list[k][4] == 'Iris-versicolor':
target = [0, 1, 0]
else:
target = [0, 0, 1]
#part E
for s in range(len(y)):
q = y[s] * (1 - y[s]) * (target[s] - y[s])
q1.append(q)
#part F
for i in range (len(initial2)):
delt = q1[0] * initial2[i][0] + q1[1] * initial2[i][1] + q1[2] * initial2[i][2]
Delta.append(delt)
for r in range (len(hid2)):
q = hid2[r] * (1 - hid2[r]) * Delta[r]
q2.append(q)
for i in range (len(initial1)):
delt = q1[0] * initial1[i][0] + q1[1] * initial1[i][1] + q1[2] * initial1[i][2]
Delta1.append(delt)
for r in range (len(hid1)):
q = hid1[r] * (1 - hid1[r]) * Delta1[r]
q3.append(q)
#part G
for j in range(len(initial2[0])):
for i in range(len(initial2)):
initial2[i][j] = initial2[i][j] + learn_rate * q1[j] * hid2[i]
for j in range(len(initial1[0])):
for i in range(len(initial1)):
initial1[i][j] = initial1[i][j] + learn_rate * q2[j] * hid1[i]
for j in range(len(initial[0])):
for i in range(len(initial)):
initial[i][j] = initial[i][j] + learn_rate * q3[j] * eval(new_list[k][i])
print(initial)
print(initial1)
print(initial2)
| 34.725806
| 162
| 0.513934
|
4a043d8b46e597fee5d445e46388bdf0e8ad96d7
| 159,005
|
py
|
Python
|
ClointFusion/ClointFusion.py
|
tlynx538/ClointFusion
|
4f55139cf9177105a5712ea34d6b511b5059858f
|
[
"BSD-4-Clause"
] | null | null | null |
ClointFusion/ClointFusion.py
|
tlynx538/ClointFusion
|
4f55139cf9177105a5712ea34d6b511b5059858f
|
[
"BSD-4-Clause"
] | null | null | null |
ClointFusion/ClointFusion.py
|
tlynx538/ClointFusion
|
4f55139cf9177105a5712ea34d6b511b5059858f
|
[
"BSD-4-Clause"
] | null | null | null |
# Project Name: ClointFusion
# Project Description: A Python based RPA Automation Framework for Desktop GUI, Citrix, Web and basic Excel operations.
# Project Structure
# 1. All imports
# 2. All global variables
# 3. All function definitions
# 4. All test cases
# 5. All default services
# 1. All imports
import subprocess
import os
import sys
import platform
import urllib.request
import emoji
from pandas.core.algorithms import mode
from xlrd.formula import colname
from datetime import datetime
import pyautogui as pg
import time
import pandas as pd
import keyboard as kb
import PySimpleGUI as sg
import xlrd
import numpy
import openpyxl as op
from openpyxl import Workbook
from openpyxl import load_workbook
import datetime
import subprocess
from functools import lru_cache
import threading
from threading import Timer
import socket
from cv2 import cv2
import base64
import imutils
import clipboard
import re
from openpyxl import load_workbook
from openpyxl.styles import Font
from matplotlib.pyplot import axis
import plotly.express as px
from kaleido.scopes.plotly import PlotlyScope
import plotly.graph_objects as go
import zipcodes
import folium
from json import (load as jsonload, dump as jsondump)
from helium import *
from os import link
from selenium.webdriver import ChromeOptions
import dis
import texthero as hero
from texthero import preprocessing
from urllib.request import urlopen
from hashlib import sha256
from PIL import Image
from wordcloud import WordCloud
from bs4 import BeautifulSoup
import requests
import watchdog.events
import watchdog.observers
from PyQt5 import QtWidgets, QtCore, QtGui
import tkinter as tk
from PIL import ImageGrab
from pathlib import Path
from pandas.core.common import flatten
import webbrowser
import logging
import tempfile
from pif.utils import get_public_ip
import pyautogui as pg
from email_validator import validate_email, EmailNotValidError
from skimage.metrics import structural_similarity
import warnings
os_name = str(platform.system()).lower()
#Windows OS specific packages
if os_name == "windows":
from unicodedata import name
import pygetwindow as gw
sg.theme('Dark') # for PySimpleGUI FRONT END
# 2. All global variables
base_dir = ""
config_folder_path = ""
log_path = ""
img_folder_path = ""
batch_file_path = ""
config_folder_path = ""
output_folder_path = ""
error_screen_shots_path = ""
status_log_excel_filepath = ""
bot_name = ""
current_working_dir = os.path.dirname(os.path.realpath(__file__)) #get cwd
temp_current_working_dir = tempfile.mkdtemp(prefix="cloint_",suffix="_fusion")
temp_current_working_dir = Path(temp_current_working_dir)
chrome_service = ""
browser_driver = ""
cf_icon_file_path = Path(os.path.join(current_working_dir,"Cloint-ICON.ico"))
cf_logo_file_path = Path(os.path.join(current_working_dir,"Cloint-LOGO.PNG"))
ss_path_b = Path(os.path.join(config_folder_path,"my_screen_shot_before.png")) #before search
ss_path_a = Path(os.path.join(config_folder_path,"my_screen_shot_after.png")) #after search
enable_semi_automatic_mode = False
Browser_Service_Started = False
ai_screenshot = ""
ai_processes = []
helium_service_launched=False
# 3. All function definitions
#decorator to push a function to background using asyncio
def background(f):
"""
Decorator function to push a function to background using asyncio
"""
import asyncio
try:
from functools import wraps
@wraps(f)
def wrapped(*args, **kwargs):
loop = asyncio.get_event_loop()
if callable(f):
return loop.run_in_executor(None, f, *args, **kwargs)
else:
raise TypeError('Task must be a callable')
return wrapped
except Exception as ex:
print("Task pushed to background = "+str(f) + str(ex))
def get_image_from_base64(imgFileName,imgBase64Str):
"""
Function which converts the given Base64 string to an image and saves in given path
Parameters:
imgFileName (str) : Image file name with png extension
imgBase64Str (str) : Base64 string for conversion.
"""
if not os.path.exists(imgFileName) :
try:
img_binary = base64.decodebytes(imgBase64Str)
with open(imgFileName,"wb") as f:
f.write(img_binary)
except Exception as ex:
print("Error in get_image_from_base64="+str(ex))
# @background
def _download_cloint_ico_png():
"""
Internal function to download ClointFusion ICON from GitHub
"""
try:
if not os.path.exists(cf_icon_file_path):
urllib.request.urlretrieve('https://raw.githubusercontent.com/ClointFusion/Image_ICONS_GIFs/main/Cloint-ICON.ico',cf_icon_file_path)
if not os.path.exists(cf_logo_file_path):
urllib.request.urlretrieve('https://raw.githubusercontent.com/ClointFusion/Image_ICONS_GIFs/main/Cloint-LOGO.PNG',cf_logo_file_path)
except Exception as ex:
print("Error while downloading Cloint ICOn/LOGO = "+str(ex))
def show_emoji(strInput="thumbsup"):
"""
Function which prints Emojis
Usage:
print(show_emoji('thumbsup'))
print("OK",show_emoji('thumbsup'))
Default: thumbsup
"""
return(emoji.emojize(":{}:".format(str(strInput).lower()),use_aliases=True,variant="emoji_type"))
def _load_missing_python_packages_windows():
"""
Installs Windows OS specific python packages
"""
list_of_required_packages = ["pywin32","PyGetWindow"]
try:
reqs = subprocess.check_output([sys.executable, '-m', 'pip', 'list'])
installed_packages = [r.decode().split('==')[0] for r in reqs.split()]
missing_packages = ' '.join(list(set(list_of_required_packages)-set(installed_packages)))
if missing_packages:
print("{} package(s) are missing".format(missing_packages))
os.system("{} -m pip install --upgrade pip".format(sys.executable))
cmd = "pip install --upgrade {}".format(missing_packages)
print(cmd)
os.system(cmd)
except Exception as ex:
print("Error in _load_missing_python_packages_windows="+str(ex))
def is_execution_required_today(function_name,execution_type="D",save_todays_date_month=False):
"""
Function which ensures that a another function which calls this function is executed only once per day.
Returns boolean True/False if another function to be executed today or not
execution_type = D = Execute only once per day
execution_type = M = Execute only once per month
"""
if config_folder_path:
last_updated_date_file = os.path.join(config_folder_path,function_name + ".txt")
else:
last_updated_date_file = os.path.join(current_working_dir,function_name + ".txt")
last_updated_date_file = Path(last_updated_date_file)
EXECUTE_NOW = False
last_updated_on_date = ""
if save_todays_date_month == False:
try:
with open(last_updated_date_file, 'r') as f:
last_updated_on_date = str(f.read())
except:
save_todays_date_month = True
if save_todays_date_month:
with open(last_updated_date_file, 'w',encoding="utf-8") as f:
if execution_type == "D":
last_updated_on_date = datetime.date.today().strftime('%d')
elif execution_type == "M":
last_updated_on_date = datetime.date.today().strftime('%m')
f.write(str(last_updated_on_date))
EXECUTE_NOW = True
today_date_month = ""
if execution_type == "D":
today_date_month = str(datetime.date.today().strftime('%d'))
elif execution_type == "M":
today_date_month = str(datetime.date.today().strftime('%m'))
if last_updated_on_date != today_date_month:
EXECUTE_NOW = True
return EXECUTE_NOW,last_updated_date_file
def _welcome_to_clointfusion():
"""
Internal Function to display welcome message & push a notification to ClointFusion Slack
"""
welcome_msg = "Welcome to ClointFusion, Made in India with " + show_emoji("red_heart")
print(welcome_msg)
def _set_bot_name(strBotName=""):
"""
Internal function
If a botname is given, it will be used in the log file and in Task Scheduler
we can also access the botname variable globally.
Parameters :
strBotName (str) : Name of the bot
"""
global base_dir
global bot_name
if not strBotName: #if user has not given bot_name
try:
bot_name = current_working_dir[current_working_dir.rindex("\\") + 1 : ] #Assumption that user has given proper folder name and so taking it as BOT name
except:
bot_name = current_working_dir[current_working_dir.rindex("/") + 1 : ] #Assumption that user has given proper folder name and so taking it as BOT name
else:
strBotName = ''.join(e for e in strBotName if e.isalnum())
bot_name = strBotName
base_dir = str(base_dir) + "_" + bot_name
base_dir = Path(base_dir)
def folder_create(strFolderPath=""):
"""
while making leaf directory if any intermediate-level directory is missing,
folder_create() method will create them all.
Parameters:
folderPath (str) : path to the folder where the folder is to be created.
For example consider the following path:
"""
try:
if not strFolderPath:
strFolderPath = gui_get_any_input_from_user('folder path to Create folder')
if not os.path.exists(strFolderPath):
os.makedirs(strFolderPath)
except Exception as ex:
print("Error in folder_create="+str(ex))
def _create_status_log_file(xtLogFilePath):
"""
Internal Function to create Status Log File
"""
try:
if not os.path.exists(xtLogFilePath):
df = pd.DataFrame({'Timestamp': [], 'Status':[]})
writer = pd.ExcelWriter(xtLogFilePath)
df.to_excel(writer, sheet_name='Sheet1', index=False)
writer.save()
except Exception as ex:
print("Error in _create_status_log_file = " +str(ex))
# @timeit
def _init_log_file():
"""
Generates the log and saves it to the file in the given base directory. Internal function
"""
global log_path
global status_log_excel_filepath
try:
if bot_name:
excelFileName = str(bot_name) + "-StatusLog.xlsx"
else:
excelFileName = "StatusLog.xlsx"
folder_create(status_log_excel_filepath)
status_log_excel_filepath = os.path.join(status_log_excel_filepath,excelFileName)
status_log_excel_filepath = Path(status_log_excel_filepath)
_create_status_log_file(status_log_excel_filepath)
except Exception as ex:
print("ERROR in _init_log_file="+str(ex))
def _folder_read_text_file(txt_file_path=""):
"""
Reads from a given text file and returns entire contents as a single list
"""
try:
with open(txt_file_path) as f:
file_contents = f.read()
return file_contents
except:
return None
def _folder_write_text_file(txt_file_path="",contents=""):
"""
Writes given contents to a text file
"""
try:
f = open(txt_file_path,'w',encoding="utf-8")
f.write(str(contents))
f.close()
except Exception as ex:
print("Error in folder_write_text_file="+str(ex))
def _ask_user_semi_automatic_mode():
"""
Ask user to 'Enable Semi Automatic Mode'
"""
global enable_semi_automatic_mode
values = []
file_path = os.path.join(config_folder_path, 'Dont_Ask_Again.txt')
file_path = Path(file_path)
stored_do_not_ask_user_preference = _folder_read_text_file(file_path)
file_path = os.path.join(config_folder_path, 'Semi_Automatic_Mode.txt')
file_path = Path(file_path)
enable_semi_automatic_mode = _folder_read_text_file(file_path)
if enable_semi_automatic_mode:
enable_semi_automatic_mode = enable_semi_automatic_mode[0]
bot_config_path = os.path.join(config_folder_path,bot_name + ".xlsx")
bot_config_path = Path(bot_config_path)
if stored_do_not_ask_user_preference is None or str(stored_do_not_ask_user_preference[0]).lower() == 'false':
layout = [[sg.Text('Do you want me to store GUI responses & use them next time when you run this BOT ?',text_color='orange',font='Courier 13')],
[sg.Submit('Yes',bind_return_key=True,button_color=('white','green'),font='Courier 14'), sg.CloseButton('No', button_color=('white','firebrick'),font='Courier 14')],
[sg.Checkbox('Do not ask me again', key='-DONT_ASK_AGAIN-',default=False, text_color='yellow',enable_events=True)],
[sg.Text("To see this message again, goto 'Config_Files' folder of your BOT and change 'Dont_Ask_Again.txt' to False. \n Please find path here: {}".format(Path(os.path.join(config_folder_path, 'Dont_Ask_Again.txt'))),key='-DND-',visible=False,font='Courier 8')]]
window = sg.Window('ClointFusion - Enable Semi Automatic Mode ?',layout,return_keyboard_events=True,use_default_focus=False,disable_close=False,element_justification='c',keep_on_top=True,finalize=True,icon=cf_icon_file_path)
file_path = os.path.join(config_folder_path, 'Dont_Ask_Again.txt')
file_path = Path(file_path)
_folder_write_text_file(file_path,str(False))
while True:
event, values = window.read()
if event == '-DONT_ASK_AGAIN-':
stored_do_not_ask_user_preference = values['-DONT_ASK_AGAIN-']
file_path = os.path.join(config_folder_path, 'Dont_Ask_Again.txt')
file_path = Path(file_path)
_folder_write_text_file(file_path,str(stored_do_not_ask_user_preference))
if values['-DONT_ASK_AGAIN-']:
window.Element('-DND-').Update(visible=True)
else:
window.Element('-DND-').Update(visible=False)
if event in (sg.WIN_CLOSED, 'No'): #ask me every time
enable_semi_automatic_mode = False
break
elif event == 'Yes': #do not ask me again
enable_semi_automatic_mode = True
break
window.close()
if not os.path.exists(bot_config_path):
df = pd.DataFrame({'SNO': [],'KEY': [], 'VALUE':[]})
writer = pd.ExcelWriter(bot_config_path)
df.to_excel(writer, sheet_name='Sheet1', index=False)
writer.save()
if enable_semi_automatic_mode:
print("Semi Automatic Mode is ENABLED "+ show_emoji())
else:
print("Semi Automatic Mode is DISABLED "+ show_emoji())
file_path = os.path.join(config_folder_path, 'Semi_Automatic_Mode.txt')
file_path = Path(file_path)
_folder_write_text_file(file_path,str(enable_semi_automatic_mode))
def timeit(method):
"""
Decorator for computing time taken
parameters:
Method() name, by using @timeit just above the def: - defination of the function.
returns:
prints time take by the function
"""
def timed(*args, **kw):
ts = time.time()
result = method(*args, **kw)
te = time.time()
print('%r %2.2f ms' % (method.__name__, (te - ts) * 1000))
return result
return timed
def read_semi_automatic_log(key):
"""
Function to read a value from semi_automatic_log for a given key
"""
try:
if config_folder_path:
bot_config_path = os.path.join(config_folder_path,bot_name + ".xlsx")
bot_config_path = Path(bot_config_path)
else:
bot_config_path = os.path.join(current_working_dir,"First_Run.xlsx")
bot_config_path = Path(bot_config_path)
if not os.path.exists(bot_config_path):
df = pd.DataFrame({'SNO': [],'KEY': [], 'VALUE':[]})
writer = pd.ExcelWriter(bot_config_path)
df.to_excel(writer, sheet_name='Sheet1', index=False)
writer.save()
df = pd.read_excel(bot_config_path)
value = df[df['KEY'] == key]['VALUE'].to_list()
value = str(value[0])
return value
except:
return None
def _excel_if_value_exists(excel_path="",sheet_name='Sheet1',header=0,usecols="",value=""):
"""
Check if a given value exists in given excel. Returns True / False
"""
try:
if usecols:
df = pd.read_excel(excel_path, sheet_name=sheet_name, header=header, usecols=usecols)
else:
df = pd.read_excel(excel_path, sheet_name=sheet_name, header=header)
if value in df.values:
df = ''
return True
else:
df = ''
return False
except Exception as ex:
print("Error in _excel_if_value_exists="+str(ex))
def message_pop_up(strMsg="",delay=3):
"""
Specified message will popup on the screen for a specified duration of time.
Parameters:
strMsg (str) : message to popup.
delay (int) : duration of the popup.
"""
try:
# if not strMsg:
# strMsg = gui_get_any_input_from_user("pop-up message")
sg.popup_no_wait(strMsg,title='ClointFusion',auto_close_duration=delay, auto_close=True, keep_on_top=True,background_color="white",text_color="black")#,icon=cloint_ico_logo_base64)
except Exception as ex:
print("Error in message_pop_up="+str(ex))
def update_semi_automatic_log(key, value):
"""
Update semi automatic excel log
"""
try:
if config_folder_path:
bot_config_path = os.path.join(config_folder_path,bot_name + ".xlsx")
else:
bot_config_path = os.path.join(current_working_dir,"First_Run.xlsx")
bot_config_path = Path(bot_config_path)
if _excel_if_value_exists(bot_config_path,usecols=['KEY'],value=key):
df = pd.read_excel(bot_config_path)
row_index = df.index[df['KEY'] == key].tolist()[0]
df.loc[row_index,'VALUE'] = value
df.to_excel(bot_config_path,index=False)
else:
reader = pd.read_excel(bot_config_path)
df = pd.DataFrame({'SNO': [len(reader)+1], 'KEY': [key], 'VALUE':[value]})
writer = pd.ExcelWriter(bot_config_path, engine='openpyxl')
writer.book = load_workbook(bot_config_path)
writer.sheets = dict((ws.title, ws) for ws in writer.book.worksheets)
df.to_excel(writer,index=False,header=False,startrow=len(reader)+1)
writer.save()
except Exception as ex:
print("Error in update_semi_automatic_log="+str(ex))
def gui_get_any_file_from_user(msgForUser="the file : ",Extension_Without_Dot="*"):
"""
Generic function to accept file path from user using GUI. Returns the filepath value in string format.Default allows all files i.e *
Default Text: "Please choose "
"""
values = []
try:
oldValue = ""
oldKey = msgForUser
show_gui = False
existing_value = read_semi_automatic_log(msgForUser)
if existing_value is None:
show_gui = True
if str(enable_semi_automatic_mode).lower() == 'false' and existing_value:
show_gui = True
oldValue = existing_value
if show_gui:
layout = [[sg.Text("ClointFusion - Set Yourself Free for Better Work", font='Courier 16',text_color='orange')],
[sg.Text('Please choose '),sg.Text(text=oldKey + " (ending with .{})".format(str(Extension_Without_Dot).lower()),font=('Courier 12'),text_color='yellow'),sg.Input(default_text=oldValue ,key='-FILE-', enable_events=True), sg.FileBrowse(file_types=((".{} File".format(Extension_Without_Dot), "*.{}".format(Extension_Without_Dot)),))],
[sg.Submit('Done',button_color=('white','green'),bind_return_key=True),sg.CloseButton('Close',button_color=('white','firebrick'))]]
window = sg.Window('ClointFusion',layout, return_keyboard_events=True,use_default_focus=True,disable_close=False,element_justification='c',keep_on_top=True, finalize=True,icon=cf_icon_file_path)
while True:
event, values = window.read()
if event == sg.WIN_CLOSED or event == 'Close':
break
if event == 'Done':
if values['-FILE-']:
break
else:
message_pop_up("Please enter the required values")
# print("Please enter the values")
window.close()
if values and event == 'Done':
values['-KEY-'] = msgForUser
if str(values['-KEY-']) and str(values['-FILE-']):
update_semi_automatic_log(str(values['-KEY-']).strip(),str(values['-FILE-']).strip())
if values is not None and str(values['-FILE-']):
return str(values['-FILE-']).strip()
else:
return None
else:
return str(existing_value)
except Exception as ex:
print("Error in gui_get_any_file_from_user="+str(ex))
def folder_read_text_file(txt_file_path=""):
"""
Reads from a given text file and returns entire contents as a single list
"""
try:
if not txt_file_path:
txt_file_path = gui_get_any_file_from_user('the text file to READ from',"txt")
with open(txt_file_path) as f:
file_contents = f.readlines()
return file_contents
except:
return None
def folder_write_text_file(txt_file_path="",contents=""):
"""
Writes given contents to a text file
"""
try:
if not txt_file_path:
txt_file_path = gui_get_any_file_from_user('the text file to WRITE to',"txt")
if not contents:
contents = gui_get_any_input_from_user('text file contents')
f = open(txt_file_path,'w',encoding="utf-8")
f.writelines(str(contents))
f.close()
except Exception as ex:
print("Error in folder_write_text_file="+str(ex))
def excel_get_all_sheet_names(excelFilePath=""):
"""
Gives you all names of the sheets in the given excel sheet.
Parameters:
excelFilePath (str) : Full path to the excel file with slashes.
returns :
all the names of the excelsheets as a LIST.
"""
try:
if not excelFilePath:
excelFilePath = gui_get_any_file_from_user("xlsx")
xls = xlrd.open_workbook(excelFilePath, on_demand=True)
return xls.sheet_names()
except Exception as ex:
print("Error in excel_get_all_sheet_names="+str(ex))
def message_counter_down_timer(strMsg="Calling ClointFusion Function in (seconds)",start_value=5):
"""
Function to show count-down timer. Default is 5 seconds.
Ex: message_counter_down_timer()
"""
CONTINUE = True
layout = [[sg.Text(strMsg,justification='c')],[sg.Text('',size=(10, 0),font=('Helvetica', 20),justification='c', key='text')],
[sg.Exit(button_color=('white', 'firebrick4'), key='Cancel')]]
window = sg.Window('ClointFusion - Countdown Timer', layout, no_titlebar=True, auto_size_buttons=False,keep_on_top=True, grab_anywhere=False, element_justification='c',element_padding=(0, 0),finalize=True,icon=cf_icon_file_path)
current_value = start_value + 1
while True:
event, values = window.read(timeout=2)
current_value = current_value - 1
time.sleep(1)
if current_value == 0:
CONTINUE = True
break
if event in (sg.WIN_CLOSED, 'Cancel'):
CONTINUE = False
print("Action cancelled by user")
break
window['text'].update(value=current_value)
window.close()
return CONTINUE
def gui_get_consent_from_user(msgForUser="Continue ?"):
"""
Generic function to get consent from user using GUI. Returns the yes or no
Default Text: "Do you want to "
"""
values = []
try:
oldValue = ""
oldKey = msgForUser
show_gui = False
existing_value = read_semi_automatic_log(msgForUser)
if existing_value is None:
show_gui = True
if str(enable_semi_automatic_mode).lower() == 'false' and existing_value:
show_gui = True
oldValue = existing_value
if show_gui:
layout = [[sg.Text("ClointFusion - Set Yourself Free for Better Work", font='Courier 16',text_color='orange')],
[sg.Text('Do you want to '),sg.Text(text=oldKey,font=('Courier 12'),text_color='yellow')],
[sg.Submit('Yes',button_color=('white','green'),font=('Courier 14'),bind_return_key=True),sg.Submit('No',button_color=('white','firebrick'),font=('Courier 14'))]]
window = sg.Window('ClointFusion',layout, return_keyboard_events=True,use_default_focus=True,disable_close=False,element_justification='c',keep_on_top=True, finalize=True,icon=cf_icon_file_path)
while True:
event, values = window.read()
if event == 'No':
oldValue = 'No'
break
if event == 'Yes':
oldValue = 'Yes'
break
window.close()
values['-KEY-'] = msgForUser
if str(values['-KEY-']):
update_semi_automatic_log(str(values['-KEY-']).strip(),str(oldValue))
return oldValue
else:
return str(existing_value)
except Exception as ex:
print("Error in gui_get_consent_from_user="+str(ex))
def gui_get_dropdownlist_values_from_user(msgForUser="",dropdown_list=[],multi_select=True):
"""
Generic function to accept one of the drop-down value from user using GUI. Returns all chosen values in list format.
Default Text: "Please choose the item(s) from "
"""
values = []
dropdown_list = dropdown_list
if dropdown_list:
try:
oldValue = []
oldKey = msgForUser
show_gui = False
existing_value = read_semi_automatic_log(msgForUser)
if existing_value is None:
show_gui = True
if str(enable_semi_automatic_mode).lower() == 'false' and existing_value:
show_gui = True
oldValue = existing_value
if show_gui:
if multi_select:
layout = [[sg.Text("ClointFusion - Set Yourself Free for Better Work", font='Courier 16', text_color='orange')],
[sg.Text('Please choose the item(s) from '),sg.Text(text=oldKey,font=('Courier 12'),text_color='yellow'),sg.Listbox(dropdown_list,size=(30, 5),key='-EXCELCOL-',default_values=oldValue,select_mode=sg.LISTBOX_SELECT_MODE_MULTIPLE,enable_events=True,change_submits=True)],#oldExcelCols
[sg.Submit('Done',button_color=('white','green'),bind_return_key=True),sg.CloseButton('Close',button_color=('white','firebrick'))]]
else:
layout = [[sg.Text("ClointFusion - Set Yourself Free for Better Work", font='Courier 16', text_color='orange')],
[sg.Text('Please choose an item from '),sg.Text(text=oldKey,font=('Courier 12'),text_color='yellow'),sg.Listbox(dropdown_list,size=(30, 5),key='-EXCELCOL-',default_values=oldValue,select_mode=sg.LISTBOX_SELECT_MODE_SINGLE,enable_events=True,change_submits=True)],#oldExcelCols
[sg.Submit('Done',button_color=('white','green'),bind_return_key=True),sg.CloseButton('Close',button_color=('white','firebrick'))]]
window = sg.Window('ClointFusion',layout, return_keyboard_events=True,use_default_focus=False,disable_close=False,element_justification='c',keep_on_top=True, finalize=True,icon=cf_icon_file_path)
while True:
event, values = window.read()
if event is None or event == 'Cancel' or event == "Escape:27":
values = []
break
if event == 'Done':
if values and values['-EXCELCOL-']:
break
else:
message_pop_up("Please enter all the values")
window.close()
if values and event == 'Done':
values['-KEY-'] = msgForUser
if str(values['-KEY-']) and str(values['-EXCELCOL-']):
update_semi_automatic_log(str(values['-KEY-']).strip(),str(values['-EXCELCOL-']).strip())
return values['-EXCELCOL-']
else:
return oldValue
else:
return oldValue
except Exception as ex:
print("Error in gui_get_dropdownlist_values_from_user="+str(ex))
else:
print('gui_get_dropdownlist_values_from_user - List is empty')
def gui_get_excel_sheet_header_from_user(msgForUser=""):
"""
Generic function to accept excel path, sheet name and header from user using GUI. Returns all these values in disctionary format.
Default Text: "Please choose the excel "
"""
values = []
sheet_namesLst = []
try:
oldValue = "" + "," + "Sheet1" + "," + "0"
oldKey = msgForUser
show_gui = False
existing_value = read_semi_automatic_log(msgForUser)
if existing_value is None:
show_gui = True
if str(enable_semi_automatic_mode).lower() == 'false' and existing_value:
show_gui = True
oldValue = existing_value
if show_gui:
oldFilePath, oldSheet , oldHeader = str(oldValue).split(",")
layout = [[sg.Text("ClointFusion - Set Yourself Free for Better Work", font='Courier 16', text_color='orange')],
[sg.Text('Please choose the excel '),sg.Text(text=oldKey,font=('Courier 12'),text_color='yellow'),sg.Input(default_text=oldFilePath,key="-FILEPATH-",enable_events=True,change_submits=True), sg.FileBrowse(file_types=(("Excel File", "*.xls"),("Excel File", "*.xlsx")))],
[sg.Text('Sheet Name'), sg.Combo(sheet_namesLst,default_value=oldSheet,size=(20, 0),key="-SHEET-",enable_events=True)],
[sg.Text('Choose the header row'),sg.Spin(values=('0', '1', '2', '3', '4', '5'),initial_value=int(oldHeader),key="-HEADER-",enable_events=True,change_submits=True)],
# [sg.Checkbox('Use this excel file for all the excel related operations of this BOT',enable_events=True, key='-USE_THIS_EXCEL-',default=old_Use_This_excel, text_color='yellow')],
[sg.Submit('Done',button_color=('white','green'),bind_return_key=True),sg.CloseButton('Close',button_color=('white','firebrick'))]]
window = sg.Window('ClointFusion',layout, return_keyboard_events=True,use_default_focus=False,disable_close=False,element_justification='c',keep_on_top=True, finalize=True,icon=cf_icon_file_path)
while True:
if oldFilePath:
sheet_namesLst = excel_get_all_sheet_names(oldFilePath)
window['-SHEET-'].update(values=sheet_namesLst)
event, values = window.read()
if event is None or event == 'Cancel' or event == "Escape:27":
values = []
break
if event == 'Done':
if values and values['-FILEPATH-'] and values['-SHEET-']:
break
else:
message_pop_up("Please enter all the values")
if event == '-FILEPATH-':
sheet_namesLst = excel_get_all_sheet_names(values['-FILEPATH-'])
window['-SHEET-'].update(values=sheet_namesLst)
window.refresh()
oldFilePath = ""
if len(sheet_namesLst) >= 1:
window['-SHEET-'].update(value=sheet_namesLst[0])
if event == '-SHEET-':
window['-SHEET-'].update(value=values['-SHEET-'])
window.close()
values['-KEY-'] = msgForUser
concatenated_value = values['-FILEPATH-'] + "," + values ['-SHEET-'] + "," + values['-HEADER-']
if str(values['-KEY-']) and concatenated_value:
update_semi_automatic_log(str(values['-KEY-']).strip(),str(concatenated_value))
return values['-FILEPATH-'] , values ['-SHEET-'] , int(values['-HEADER-'])
else:
oldFilePath, oldSheet , oldHeader = str(existing_value).split(",")
return oldFilePath, oldSheet , int(oldHeader)
except Exception as ex:
print("Error in gui_get_excel_sheet_header_from_user="+str(ex))
def gui_get_folder_path_from_user(msgForUser="the folder : "):
"""
Generic function to accept folder path from user using GUI. Returns the folderpath value in string format.
Default text: "Please choose "
"""
values = []
try:
oldValue = ""
oldKey = msgForUser
show_gui = False
existing_value = read_semi_automatic_log(msgForUser)
if existing_value is None:
show_gui = True
if str(enable_semi_automatic_mode).lower() == 'false' and existing_value:
show_gui = True
oldValue = existing_value
if show_gui:
layout = [[sg.Text("ClointFusion - Set Yourself Free for Better Work", font='Courier 16',text_color='orange')],
[sg.Text('Please choose '),sg.Text(text=oldKey,font=('Courier 12'),text_color='yellow'),sg.Input(default_text=oldValue ,key='-FOLDER-', enable_events=True), sg.FolderBrowse()],
[sg.Submit('Done',button_color=('white','green'),bind_return_key=True),sg.CloseButton('Close',button_color=('white','firebrick'))]]
window = sg.Window('ClointFusion',layout, return_keyboard_events=True,use_default_focus=True,disable_close=False,element_justification='c',keep_on_top=True,finalize=True,icon=cf_icon_file_path)
while True:
event, values = window.read()
if event == sg.WIN_CLOSED or event == 'Close':
break
if event == 'Done':
if values and values['-FOLDER-']:
break
else:
message_pop_up("Please enter the required values")
window.close()
if values and event == 'Done':
values['-KEY-'] = msgForUser
if str(values['-KEY-']) and str(values['-FOLDER-']):
update_semi_automatic_log(str(values['-KEY-']).strip(),str(values['-FOLDER-']).strip())
if values is not None:
return str(values['-FOLDER-']).strip()
else:
return None
else:
return str(existing_value)
except Exception as ex:
print("Error in gui_get_folder_path_from_user="+str(ex))
def gui_get_any_input_from_user(msgForUser="the value : ",password=False,mandatory_field=True):
"""
Generic function to accept any input (text / numeric) from user using GUI. Returns the value in string format.
Please use unique message (key) for each value.
Default Text: "Please enter "
"""
values = []
try:
oldValue = ""
oldKey = msgForUser
show_gui = False
existing_value = read_semi_automatic_log(msgForUser)
if existing_value == "nan":
existing_value = None
if existing_value is None:
show_gui = True
if str(enable_semi_automatic_mode).lower() == 'false' and existing_value:
show_gui = True
oldValue = existing_value
layout = ""
if show_gui:
if password:
layout = [[sg.Text("ClointFusion - Set Yourself Free for Better Work", font='Courier 16',text_color='orange')],
[sg.Text('Please enter '),sg.Text(text=oldKey,font=('Courier 12'),text_color='yellow'),sg.Input(default_text=oldValue,key='-VALUE-', justification='c',password_char='*')],
[sg.Text('This field is mandatory',text_color='red')],
[sg.Submit('Done',button_color=('white','green'),bind_return_key=True),sg.CloseButton('Close',button_color=('white','firebrick'))]]
elif not password and mandatory_field:
layout = [[sg.Text("ClointFusion - Set Yourself Free for Better Work", font='Courier 16',text_color='orange')],
[sg.Text('Please enter '),sg.Text(text=oldKey,font=('Courier 12'),text_color='yellow'),sg.Input(default_text=oldValue,key='-VALUE-', justification='c')],
[sg.Text('This field is mandatory',text_color='red')],
[sg.Submit('Done',button_color=('white','green'),bind_return_key=True),sg.CloseButton('Close',button_color=('white','firebrick'))]]
elif not password and not mandatory_field:
layout = [[sg.Text("ClointFusion - Set Yourself Free for Better Work", font='Courier 16',text_color='orange')],
[sg.Text('Please enter '),sg.Text(text=oldKey,font=('Courier 12'),text_color='yellow'),sg.Input(default_text=oldValue,key='-VALUE-', justification='c')],
[sg.Submit('Done',button_color=('white','green'),bind_return_key=True),sg.CloseButton('Close',button_color=('white','firebrick'))]]
window = sg.Window('ClointFusion',layout, return_keyboard_events=True,use_default_focus=True,disable_close=True,element_justification='c',keep_on_top=True,finalize=True,icon=cf_icon_file_path)
while True:
event, values = window.read()
if event == sg.WIN_CLOSED or event == 'Close':
if oldValue or (values and values['-VALUE-']):
break
else:
if mandatory_field:
message_pop_up("Its a mandatory field !.. Cannot proceed, exiting now..")
print("Exiting ClointFusion, as Mandatory field is missing")
sys.exit(0)
else:
print("Mandatory field is missing, continuing with None/Empty value")
break
if event == 'Done':
if values['-VALUE-']:
break
else:
if mandatory_field:
message_pop_up("This value is required. Please enter the value..")
else:
break
window.close()
if values and event == 'Done':
values['-KEY-'] = msgForUser
if values is not None and str(values['-KEY-']) and str(values['-VALUE-']):
update_semi_automatic_log(str(values['-KEY-']).strip(),str(values['-VALUE-']).strip())
if values is not None and str(values['-VALUE-']):
return str(values['-VALUE-']).strip()
else:
return None
else:
return str(existing_value)
except Exception as ex:
print("Error in gui_get_any_input_from_user="+str(ex))
def excel_get_all_header_columns(excel_path="",sheet_name="Sheet1",header=0):
"""
Gives you all column header names of the given excel sheet.
"""
col_lst = []
try:
if not excel_path:
excel_path,sheet_name,header = gui_get_excel_sheet_header_from_user('to all header columns as a list')
col_lst = pd.read_excel(excel_path,sheet_name=sheet_name,header=header,nrows=1,dtype=str).columns.tolist()
return col_lst
except Exception as ex:
print("Error in excel_get_all_header_columns="+str(ex))
def _extract_filename_from_filepath(strFilePath=""):
"""
Function which extracts file name from the given filepath
"""
if strFilePath:
try:
strFileName = Path(strFilePath).name
strFileName = str(strFileName).split(".")[0]
# strFileName = strFilePath[strFilePath.rindex("\\") + 1 : ]
# strFileName = strFileName.split(".")[0]
return strFileName
except Exception as ex:
print("Error in _extract_filename_from_filepath="+str(ex))
else:
print("Please enter the value="+str(strFilePath))
def string_remove_special_characters(inputStr=""):
"""
Removes all the special character.
Parameters:
inputStr (str) : string for removing all the special character in it.
Returns :
outputStr (str) : returns the alphanumeric string.
"""
if not inputStr:
inputStr = gui_get_any_input_from_user('input string to remove Special characters')
if inputStr:
outputStr = ''.join(e for e in inputStr if e.isalnum())
return outputStr
# @background
def excel_create_excel_file_in_given_folder(fullPathToTheFolder="",excelFileName="",sheet_name="Sheet1"):
"""
Creates an excel file in the desired folder with desired filename
Internally this uses folder_create() method to create folders if the folder/s does not exist.
Parameters:
fullPathToTheFolder (str) : Complete path to the folder with double slashes.
excelFileName (str) : File Name of the excel to be created (.xlsx extension will be added automatically.
sheet_name (str) : By default it will be "Sheet1".
Returns:
returns boolean TRUE if the excel file is created
"""
try:
wb = Workbook()
ws = wb.active
ws.title =sheet_name
if not fullPathToTheFolder:
fullPathToTheFolder = gui_get_folder_path_from_user('the folder to create excel file')
if not excelFileName:
excelFileName = gui_get_any_input_from_user("excel file name (without extension)")
folder_create(fullPathToTheFolder)
if ".xlsx" in excelFileName:
excel_path = os.path.join(fullPathToTheFolder,excelFileName)
else:
excel_path = os.path.join(fullPathToTheFolder,excelFileName + ".xlsx")
excel_path = Path(excel_path)
wb.save(filename = excel_path)
return True
except Exception as ex:
print("Error in excel_create_excel_file_in_given_folder="+str(ex))
def folder_create_text_file(textFolderPath="",txtFileName=""):
"""
Creates Text file in the given path.
Internally this uses folder_create() method to create folders if the folder/s does not exist.
automatically adds txt extension if not given in textFilePath.
Parameters:
textFilePath (str) : Complete path to the folder with double slashes.
"""
try:
if not textFolderPath:
textFolderPath = gui_get_folder_path_from_user('the folder to create text file')
if not txtFileName:
txtFileName = gui_get_any_input_from_user("text file name")
txtFileName = txtFileName
if ".txt" not in txtFileName:
txtFileName = txtFileName + ".txt"
file_path = os.path.join(textFolderPath, txtFileName)
file_path = Path(file_path)
f = open(file_path, 'w',encoding="utf-8")
f.close()
except Exception as ex:
print("Error in folder_create_text_file="+str(ex))
def excel_if_value_exists(excel_path="",sheet_name='Sheet1',header=0,usecols="",value=""):
"""
Check if a given value exists in given excel. Returns True / False
"""
try:
if not excel_path:
excel_path, sheet_name, header = gui_get_excel_sheet_header_from_user('to search the VALUE')
if not value:
value = gui_get_any_input_from_user('VALUE to be searched')
if usecols:
df = pd.read_excel(excel_path, sheet_name=sheet_name, header=header, usecols=usecols)
else:
df = pd.read_excel(excel_path, sheet_name=sheet_name, header=header)
if value in df.values:
df = ''
return True
else:
df = ''
return False
except Exception as ex:
print("Error in excel_if_value_exists="+str(ex))
# WatchDog : Monitors the given folder for creation / modification / deletion
class FileMonitor_Handler(watchdog.events.PatternMatchingEventHandler):
file_path = ""
def __init__(self):
watchdog.events.PatternMatchingEventHandler.__init__(self, ignore_patterns = None,
ignore_directories = False, case_sensitive = True)
def on_created(self, event):
file_path = Path(str(event.src_path))
print("Created : {}".format(file_path))
def on_deleted(self, event):
file_path = Path(str(event.src_path))
print("Deleted : {}".format(file_path))
def on_modified(self,event):
file_path = Path(str(event.src_path))
print("Modified : {}".format(file_path))
def create_batch_file(application_exe_pyw_file_path=""):
"""
Creates .bat file for the given application / exe or even .pyw BOT developed by you. This is required in Task Scheduler.
"""
global batch_file_path
try:
if not application_exe_pyw_file_path:
application_exe_pyw_file_path = gui_get_any_file_from_user('.pyw/.exe file for which .bat is to be made')
while not (str(application_exe_pyw_file_path).endswith(".exe") or str(application_exe_pyw_file_path).endswith(".pyw")):
print("Please choose the file ending with .pyw or .exe")
application_exe_pyw_file_path = gui_get_any_file_from_user('.pyw/.exe file for which .bat is to be made')
application_name= ""
if str(application_exe_pyw_file_path).endswith(".exe"):
application_name = _extract_filename_from_filepath(application_exe_pyw_file_path) + ".exe"
else:
application_name = _extract_filename_from_filepath(application_exe_pyw_file_path) + ".pyw"
cmd = ""
if "exe" in application_name:
application_name = str(application_name).replace("exe","bat")
cmd = "start \"\" " + '"' + application_exe_pyw_file_path + '" /popup\n'
elif "pyw" in application_name:
application_name = str(application_name).replace("pyw","bat")
cmd = "start \"\" " + '"' + sys.executable + '" ' + '"' + application_exe_pyw_file_path + '" /popup\n'
batch_file_path = os.path.join(batch_file_path,application_name)
batch_file_path = Path(batch_file_path)
if not os.path.exists(batch_file_path):
f = open(batch_file_path, 'w',encoding="utf-8")
f.write("@ECHO OFF\n")
f.write("timeout 5 > nul\n")
f.write(cmd)
f.write("exit")
f.close()
print("Batch file saved in " + str(batch_file_path))
except Exception as ex:
print("Error in create_batch_file="+str(ex))
def excel_create_file(fullPathToTheFile="",fileName="",sheet_name="Sheet1"):
try:
if not fullPathToTheFile:
fullPathToTheFile = gui_get_any_input_from_user('folder path to create excel')
if not fileName:
fileName = gui_get_any_input_from_user("Excel File Name (without extension)")
if not os.path.exists(fullPathToTheFile):
os.makedirs(fullPathToTheFile)
if ".xlsx" not in fileName:
fileName = fileName + ".xlsx"
wb = Workbook()
ws = wb.active
ws.title =sheet_name
fileName = os.path.join(fullPathToTheFile,fileName)
fileName = Path(fileName)
wb.save(filename = fileName)
return True
except Exception as ex:
print("Error in excel_create_file="+str(ex))
def folder_get_all_filenames_as_list(strFolderPath="",extension='all'):
"""
Get all the files of the given folder in a list.
Parameters:
strFolderPath (str) : Location of the folder.
extension (str) : extention of the file. by default all the files will be listed regarless of the extension.
returns:
allFilesOfaFolderAsLst (List) : all the file names as a list.
"""
try:
if not strFolderPath:
strFolderPath = gui_get_folder_path_from_user('a folder to get all its filenames')
if extension == "all":
allFilesOfaFolderAsLst = [ f for f in os.listdir(strFolderPath)]
else:
allFilesOfaFolderAsLst = [ f for f in os.listdir(strFolderPath) if f.endswith(extension) ]
return allFilesOfaFolderAsLst
except Exception as ex:
print("Error in folder_get_all_filenames_as_list="+str(ex))
def folder_delete_all_files(fullPathOfTheFolder="",file_extension_without_dot="all"):
"""
Deletes all the files of the given folder
Parameters:
fullPathOfTheFolder (str) : Location of the folder.
extension (str) : extention of the file. by default all the files will be deleted inside the given folder
regarless of the extension.
returns:
count (int) : number of files deleted.
"""
file_extension_with_dot = ''
try:
if not fullPathOfTheFolder:
fullPathOfTheFolder = gui_get_folder_path_from_user('a folder to delete all its files')
count = 0
if "." not in file_extension_without_dot :
file_extension_with_dot = "." + file_extension_without_dot
if file_extension_with_dot.lower() == ".all":
filelist = [ f for f in os.listdir(fullPathOfTheFolder) ]
else:
filelist = [ f for f in os.listdir(fullPathOfTheFolder) if f.endswith(file_extension_with_dot) ]
print(filelist)
for f in filelist:
try:
file_path = os.path.join(fullPathOfTheFolder, f)
file_path = Path(file_path)
os.remove(file_path)
count +=1
except:
pass
return count
except Exception as ex:
print("Error in folder_delete_all_files="+str(ex))
return -1
def key_hit_enter():
"""
Enter key will be pressed once.
"""
time.sleep(0.5)
kb.press_and_release('enter')
time.sleep(0.5)
def message_flash(msg="",delay=3):
"""
specified msg will popup for a specified duration of time with OK button.
Parameters:
msg (str) : message to popup.
delay (int) : duration of the popup.
"""
try:
if not msg:
msg = gui_get_any_input_from_user("flash message")
r = Timer(int(delay), key_hit_enter)
r.start()
pg.alert(text=msg, title='ClointFusion', button='OK')
except Exception as ex:
print("ERROR in message_flash="+str(ex))
def window_show_desktop():
"""
Minimizes all the applications and shows Desktop.
"""
try:
time.sleep(0.5)
kb.press_and_release('win+d')
time.sleep(0.5)
except Exception as ex:
print("Error in window_show_desktop="+str(ex))
def window_get_all_opened_titles_windows():
"""
Gives the title of all the existing (open) windows.
Returns:
allTitles_lst (list) : returns all the titles of the window as list.
"""
try:
allTitles_lst = []
lst = gw.getAllTitles()
for item in lst:
if str(item).strip() != "" and str(item).strip() not in allTitles_lst:
allTitles_lst.append(str(item).strip())
return allTitles_lst
except Exception as ex:
print("Error in window_get_all_opened_titles="+str(ex))
def _window_find_exact_name(windowName=""):
"""
Gives you the exact window name you are looking for.
Parameters:
windowName (str) : Name of the window to find.
Returns:
win (str) : Exact window name.
window_found (boolean) : A boolean TRUE if the window is found
"""
win = ""
window_found = False
if not windowName:
windowName = gui_get_any_input_from_user("Partial Window Name")
try:
lst = gw.getAllTitles()
for item in lst:
if str(item).strip():
if str(windowName).lower() in str(item).lower():
win = item
window_found = True
break
return win, window_found
except Exception as ex:
print("Error in _window_find_exact_name="+str(ex))
def window_activate_and_maximize_windows(windowName=""):
"""
Activates and maximizes the desired window.
Parameters:
windowName (str) : Name of the window to maximize.
"""
try:
if not windowName:
open_win_list = window_get_all_opened_titles_windows()
windowName = gui_get_dropdownlist_values_from_user("window titles to Activate & Maximize",dropdown_list=open_win_list,multi_select=False)[0]
item,window_found = _window_find_exact_name(windowName)
if window_found:
windw = gw.getWindowsWithTitle(item)[0]
windw.activate()
time.sleep(2)
windw.maximize()
time.sleep(2)
else:
print("No window OPEN by name="+str(windowName))
except Exception as ex:
print("Error in window_activate_and_maximize="+str(ex))
def window_minimize_windows(windowName=""):
"""
Activates and minimizes the desired window.
Parameters:
windowName (str) : Name of the window to miniimize.
"""
try:
if not windowName:
open_win_list = window_get_all_opened_titles_windows()
windowName = gui_get_dropdownlist_values_from_user("window titles to Minimize",dropdown_list=open_win_list,multi_select=False)[0]
item,window_found = _window_find_exact_name(windowName)
if window_found:
windw = gw.getWindowsWithTitle(item)[0]
windw.minimize()
time.sleep(1)
else:
print("No window available to minimize by name="+str(windowName))
except Exception as ex:
print("Error in window_minimize="+str(ex))
def window_close_windows(windowName=""):
"""
Close the desired window.
Parameters:
windowName (str) : Name of the window to close.
"""
try:
if not windowName:
open_win_list = window_get_all_opened_titles_windows()
windowName = gui_get_dropdownlist_values_from_user("window titles to Close",dropdown_list=open_win_list,multi_select=False)[0]
item,window_found = _window_find_exact_name(windowName)
if window_found:
windw = gw.getWindowsWithTitle(item)[0]
windw.close()
time.sleep(1)
else:
print("No window available to close, by name="+str(windowName))
except Exception as ex:
print("Error in window_close="+str(ex))
def launch_any_exe_bat_application(pathOfExeFile=""):
"""
Launches any exe or batch file or excel file etc.
Parameters:
pathOfExeFile (str) : location of the file with extension.
"""
try:
if not pathOfExeFile:
pathOfExeFile = gui_get_any_file_from_user('EXE or BAT file')
try:
subprocess.Popen(pathOfExeFile)
except:
os.startfile(pathOfExeFile)
time.sleep(2)
try:
import win32gui, win32con
time.sleep(3)
hwnd = win32gui.GetForegroundWindow()
win32gui.ShowWindow(hwnd, win32con.SW_MAXIMIZE)
except Exception as ex1:
print("launch_any_exe_bat_application"+str(ex1))
time.sleep(1)
except Exception as ex:
print("ERROR in launch_any_exe_bat_application="+str(ex))
class myThread1 (threading.Thread):
def __init__(self,err_str):
threading.Thread.__init__(self)
self.err_str = err_str
def run(self):
message_flash(self.err_str)
class myThread2 (threading.Thread):
def __init__(self,strFilePath):
threading.Thread.__init__(self)
self.strFilePath = strFilePath
def run(self):
time.sleep(1)
img = pg.screenshot()
time.sleep(1)
dt_tm= str(datetime.datetime.now())
dt_tm = dt_tm.replace(" ","_")
dt_tm = dt_tm.replace(":","-")
dt_tm = dt_tm.split(".")[0]
filePath = self.strFilePath + str(dt_tm) + ".PNG"
img.save(str(filePath))
def take_error_screenshot(err_str):
"""
Takes screenshot of an error popup parallely without waiting for the flow of the program.
The screenshot will be saved in the log folder for reference.
Parameters:
err_str (str) : exception.
"""
global error_screen_shots_path
try:
thread1 = myThread1(err_str)
thread2 = myThread2(error_screen_shots_path)
thread1.start()
thread2.start()
thread1.join()
thread2.join()
except Exception as ex:
print("Error in take_error_screenshot="+str(ex))
def update_log_excel_file(message=""):
"""
Given message will be updated in the excel log file.
Parameters:
message (str) : message to update.
Retursn:
returns a boolean true if updated sucessfully
"""
global status_log_excel_filepath
try:
if not message:
message = gui_get_any_input_from_user("message to Update Log file")
df = pd.DataFrame({'Timestamp': [datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")], 'Status':[message]})
writer = pd.ExcelWriter(status_log_excel_filepath, engine='openpyxl')
writer.book = load_workbook(status_log_excel_filepath)
writer.sheets = dict((ws.title, ws) for ws in writer.book.worksheets)
reader = pd.read_excel(status_log_excel_filepath)
df.to_excel(writer,index=False,header=False,startrow=len(reader)+1)
writer.save()
return True
except Exception as ex:
print("Error in update_log_excel_file="+str(ex))
return False
def string_extract_only_alphabets(inputString=""):
"""
Returns only alphabets from given input string
"""
if not inputString:
inputString = gui_get_any_input_from_user("input string to get only Alphabets")
outputStr = ''.join(e for e in inputString if e.isalpha())
return outputStr
def string_extract_only_numbers(inputString=""):
"""
Returns only numbers from given input string
"""
if not inputString:
inputString = gui_get_any_input_from_user("input string to get only Numbers")
outputStr = ''.join(e for e in inputString if e.isnumeric())
return outputStr
@lru_cache(None)
def call_otsu_threshold(img_title, is_reduce_noise=False):
"""
OpenCV internal function for OCR
"""
image = cv2.imread(img_title, 0)
if is_reduce_noise:
image = cv2.GaussianBlur(image, (5, 5), 0)
_ , image_result = cv2.threshold(
image, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU,
)
cv2.imwrite(img_title, image_result)
cv2.destroyAllWindows()
@lru_cache(None)
def read_image_cv2(img_path):
"""
Saves the image in cv2 format.
Parameters:
img_path (str) : location of the image.
returns:
image (cv2) : image in cv2 format will be returned.
"""
if img_path and os.path.exists(img_path):
try:
image = cv2.imread(img_path)
return image
except Exception as ex:
print("read_image_cv2 = "+str(ex))
else:
print("File not found="+str(img_path))
def excel_get_row_column_count(excel_path="", sheet_name="Sheet1", header=0):
"""
Gets the row and coloumn count of the provided excel sheet.
Parameters:
excel_path (str) : Full path to the excel file with slashes.
sheet_name (str) : by default it is Sheet1.
Returns:
row (int) : number of rows
col (int) : number of coloumns
"""
try:
if not excel_path:
excel_path, sheet_name, header = gui_get_excel_sheet_header_from_user("to get row/column count")
df = pd.read_excel(excel_path, sheet_name=sheet_name, header=header)
row, col = df.shape
row = row + 1
return row, col
except Exception as ex:
print("Error in excel_get_row_column_count="+str(ex))
def excel_copy_range_from_sheet(excel_path="", sheet_name='Sheet1', startCol=0, startRow=0, endCol=0, endRow=0): #*
"""
Copies the specific range from the provided excel sheet and returns copied data as a list
Parameters:
excel_path :"Full path of the excel file with double slashes"
sheet_name :"Source sheet name from where contents are to be copied"
startCol :"Starting column number (index starts from 1) from where copying starts"
startRow :"Starting row number (index starts from 1) from where copying starts"
endCol :"Ending column number ex:4 upto where cells to be copied"
endRow :"Ending column number ex:5 upto where cells to be copied"
Returns:
rangeSelected : the copied range data
"""
try:
if not excel_path:
excel_path, sheet_name, header = gui_get_excel_sheet_header_from_user('to copy range from')
if startCol == 0 and startRow ==0 and endCol == 0 and endRow == 0:
sRow_sCol_eRow_Col = gui_get_any_input_from_user('startRow , startCol, endRow, endCol (comma separated, index from 1)')
if sRow_sCol_eRow_Col:
startRow , startCol, endRow, endCol = str(sRow_sCol_eRow_Col).split(",")
startRow = int(startRow)
startCol = int(startCol)
endRow = int(endRow)
endCol = int(endCol)
from_wb = load_workbook(filename = excel_path)
try:
fromSheet = from_wb[sheet_name]
except:
fromSheet = from_wb.worksheets[0]
rangeSelected = []
if endRow < startRow:
endRow = startRow
#Loops through selected Rows
for i in range(startRow,endRow + 1,1):
#Appends the row to a RowSelected list
rowSelected = []
for j in range(startCol,endCol+1,1):
rowSelected.append(fromSheet.cell(row = i, column = j).value)
#Adds the RowSelected List and nests inside the rangeSelected
rangeSelected.append(rowSelected)
return rangeSelected
except Exception as ex:
print("Error in copy_range_from_excel_sheet="+str(ex))
def excel_copy_paste_range_from_to_sheet(excel_path="", sheet_name='Sheet1', startCol=0, startRow=0, endCol=0, endRow=0, copiedData=""):#*
"""
Pastes the copied data in specific range of the given excel sheet.
"""
try:
try:
if not copiedData:
copiedData = excel_copy_range_from_sheet()
if not excel_path:
excel_path, sheet_name, header = gui_get_excel_sheet_header_from_user('to paste range into')
if startCol == 0 and startRow ==0 and endCol == 0 and endRow == 0:
sRow_sCol_eRow_Col = gui_get_any_input_from_user('startRow , startCol, endRow, endCol (comma separated, index from 1)')
if sRow_sCol_eRow_Col:
startRow , startCol, endRow, endCol = str(sRow_sCol_eRow_Col).split(",")
startRow = int(startRow)
startCol = int(startCol)
endRow = int(endRow)
endCol = int(endCol)
to_wb = load_workbook(filename = excel_path)
toSheet = to_wb[sheet_name]
except:
try:
excel_create_excel_file_in_given_folder((str(excel_path[:(str(excel_path).rindex("\\"))])),(str(excel_path[str(excel_path).rindex("\\")+1:excel_path.find(".")])),sheet_name)
except:
excel_create_excel_file_in_given_folder((str(excel_path[:(str(excel_path).rindex("/"))])),(str(excel_path[str(excel_path).rindex("/")+1:excel_path.find(".")])),sheet_name)
to_wb = load_workbook(filename = excel_path)
toSheet = to_wb[sheet_name]
if endRow < startRow:
endRow = startRow
countRow = 0
for i in range(startRow,endRow+1,1):
countCol = 0
for j in range(startCol,endCol+1,1):
toSheet.cell(row = i, column = j).value = copiedData[countRow][countCol]
countCol += 1
countRow += 1
to_wb.save(excel_path)
return countRow-1
except Exception as ex:
print("Error in excel_copy_paste_range_from_to_sheet="+str(ex))
def _excel_copy_range(startCol=1, startRow=1, endCol=1, endRow=1, sheet='Sheet1'):
"""
Copies the specific range from the given excel sheet.
"""
try:
rangeSelected = []
#Loops through selected Rows
for k in range(startRow,endRow + 1,1):
#Appends the row to a RowSelected list
rowSelected = []
for l in range(startCol,endCol+1,1):
rowSelected.append(sheet.cell(row = k, column = l).value)
#Adds the RowSelected List and nests inside the rangeSelected
rangeSelected.append(rowSelected)
return rangeSelected
except Exception as ex:
print("Error in _excel_copy_range="+str(ex))
def _excel_paste_range(startCol=1, startRow=1, endCol=1, endRow=1, sheetReceiving='Sheet1',copiedData=[]):
"""
Pastes the specific range to the given excel sheet.
"""
try:
countRow = 0
for k in range(startRow,endRow+1,1):
countCol = 0
for l in range(startCol,endCol+1,1):
sheetReceiving.cell(row = k, column = l).value = copiedData[countRow][countCol]
countCol += 1
countRow += 1
return countRow
except Exception as ex:
print("Error in _excel_paste_range="+str(ex))
def excel_split_by_column(excel_path="",sheet_name='Sheet1',header=0,columnName=""):#*
"""
Splits the excel file by Column Name
"""
try:
if not excel_path:
excel_path, sheet_name, header = gui_get_excel_sheet_header_from_user('to split by column')
if not columnName:
col_lst = excel_get_all_header_columns(excel_path, sheet_name, header)
columnName = gui_get_dropdownlist_values_from_user('this list of Columns (to split)',col_lst)
data_df = pd.read_excel(excel_path,sheet_name=sheet_name,header=header,dtype=str)
grouped_df = data_df.groupby(columnName)
for data in grouped_df:
file_path = os.path.join(output_folder_path,str(data[0]) + ".xlsx")
file_path = Path(file_path)
grouped_df.get_group(data[0]).to_excel(file_path, index=False)
except Exception as ex:
print("Error in excel_split_by_column="+str(ex))
def excel_split_the_file_on_row_count(excel_path="", sheet_name = 'Sheet1', rowSplitLimit="", outputFolderPath="", outputTemplateFileName ="Split"):#*
"""
Splits the excel file as per given row limit
"""
try:
if not excel_path:
excel_path, sheet_name, _ = gui_get_excel_sheet_header_from_user('to split on row count')
if not rowSplitLimit:
rowSplitLimit = int(gui_get_any_input_from_user("row split Count/Limit Ex: 20"))
if not outputFolderPath:
outputFolderPath = gui_get_folder_path_from_user('output folder to Save split excel files')
src_wb = op.load_workbook(excel_path)
src_ws = src_wb.worksheets[0]
src_ws_max_rows = src_ws.max_row
src_ws_max_cols= src_ws.max_column
i = 1
start_row = 2
while start_row <= src_ws_max_rows:
dest_wb = Workbook()
dest_ws = dest_wb.active
dest_ws.title = sheet_name
#Copy ROW-1 (Header) from SOURCE to Each DESTINATION file
selectedRange = _excel_copy_range(1,1,src_ws_max_cols,1,src_ws) #startCol, startRow, endCol, endRow, sheet
_ =_excel_paste_range(1,1,src_ws_max_cols,1,dest_ws,selectedRange) #startCol, startRow, endCol, endRow, sheetReceiving,copiedData
selectedRange = ""
selectedRange = _excel_copy_range(1,start_row,src_ws_max_cols,start_row + rowSplitLimit - 1,src_ws) #startCol, startRow, endCol, endRow, sheet
_ =_excel_paste_range(1,2,src_ws_max_cols,rowSplitLimit + 1,dest_ws,selectedRange) #startCol, startRow, endCol, endRow, sheetReceiving,copiedData
start_row = start_row + rowSplitLimit
try:
dest_file_name = str(outputFolderPath) + "\\" + outputTemplateFileName + "-" + str(i) + ".xlsx"
except:
dest_file_name = str(outputFolderPath) + "/" + outputTemplateFileName + "-" + str(i) + ".xlsx"
dest_file_name = Path(dest_file_name)
dest_wb.save(dest_file_name)
i = i + 1
return True
except Exception as ex:
print("Error in excel_split_the_file_on_row_count="+str(ex))
def excel_merge_all_files(input_folder_path="",output_folder_path=""):
"""
Merges all the excel files in the given folder
"""
try:
if not input_folder_path:
input_folder_path = gui_get_folder_path_from_user('input folder to MERGE files from')
if not output_folder_path:
output_folder_path = gui_get_folder_path_from_user('output folder to store Final merged file')
filelist = [ f for f in os.listdir(input_folder_path) if f.endswith(".xlsx") ]
all_excel_file_lst = []
for file1 in filelist:
file_path = os.path.join(input_folder_path,file1)
file_path = Path(file_path)
all_excel_file = pd.read_excel(file_path,dtype=str)
all_excel_file_lst.append(all_excel_file)
appended_df = pd.concat(all_excel_file_lst)
time_stamp_now=datetime.datetime.now().strftime("%m-%d-%Y")
final_path = os.path.join(output_folder_path, "Final-" + time_stamp_now + ".xlsx")
final_path= Path(final_path)
appended_df.to_excel(final_path, index=False)
return True
except Exception as ex:
print("Error in excel_merge_all_files="+str(ex))
def excel_drop_columns(excel_path="", sheet_name='Sheet1', header=0, columnsToBeDropped = ""):
"""
Drops the desired column from the given excel file
"""
try:
if not excel_path:
excel_path, sheet_name, header = gui_get_excel_sheet_header_from_user('input excel to Drop the columns from')
if not columnsToBeDropped:
col_lst = excel_get_all_header_columns(excel_path, sheet_name, header)
columnsToBeDropped = gui_get_dropdownlist_values_from_user('columns list to drop',col_lst)
df=pd.read_excel(excel_path,sheet_name=sheet_name, header=header)
if isinstance(columnsToBeDropped, list):
df.drop(columnsToBeDropped, axis = 1, inplace = True)
else:
df.drop([columnsToBeDropped], axis = 1, inplace = True)
with pd.ExcelWriter(excel_path) as writer:
df.to_excel(writer, sheet_name=sheet_name,index=False)
except Exception as ex:
print("Error in excel_drop_columns="+str(ex))
def excel_sort_columns(excel_path="",sheet_name='Sheet1',header=0,firstColumnToBeSorted=None,secondColumnToBeSorted=None,thirdColumnToBeSorted=None,firstColumnSortType=True,secondColumnSortType=True,thirdColumnSortType=True):#*
"""
A function which takes excel full path to excel and column names on which sort is to be performed
"""
try:
if not excel_path:
excel_path, sheet_name, header = gui_get_excel_sheet_header_from_user('to sort the column')
if not firstColumnToBeSorted:
col_lst = excel_get_all_header_columns(excel_path, sheet_name, header)
usecols = gui_get_dropdownlist_values_from_user('minimum 1 and maximum 3 columns to sort',col_lst)
if len(usecols) == 3:
firstColumnToBeSorted , secondColumnToBeSorted , thirdColumnToBeSorted = usecols
elif len(usecols) == 2:
firstColumnToBeSorted , secondColumnToBeSorted = usecols
elif len(usecols) == 1:
firstColumnToBeSorted = usecols[0]
df=pd.read_excel(excel_path,sheet_name=sheet_name, header=header)
if thirdColumnToBeSorted is not None and secondColumnToBeSorted is not None and firstColumnToBeSorted is not None:
df=df.sort_values([firstColumnToBeSorted,secondColumnToBeSorted,thirdColumnToBeSorted],ascending=[firstColumnSortType,secondColumnSortType,thirdColumnSortType])
elif secondColumnToBeSorted is not None and firstColumnToBeSorted is not None:
df=df.sort_values([firstColumnToBeSorted,secondColumnToBeSorted],ascending=[firstColumnSortType,secondColumnSortType])
elif firstColumnToBeSorted is not None:
df=df.sort_values([firstColumnToBeSorted],ascending=[firstColumnSortType])
writer = pd.ExcelWriter(excel_path, engine='openpyxl')
writer.book = load_workbook(excel_path)
writer.sheets = dict((ws.title, ws) for ws in writer.book.worksheets)
df.to_excel(writer,sheet_name=sheet_name,index=False)
writer.save()
return True
except Exception as ex:
print("Error in excel_sort_columns="+str(ex))
def excel_clear_sheet(excel_path="",sheet_name="Sheet1", header=0):
"""
Clears the contents of given excel files keeping header row intact
"""
try:
if not excel_path:
excel_path, sheet_name, header = gui_get_excel_sheet_header_from_user('to clear the sheet')
df = pd.read_excel(excel_path,sheet_name=sheet_name,header=header)
df = df.head(0)
with pd.ExcelWriter(excel_path) as writer:
df.to_excel(writer,sheet_name=sheet_name, index=False)
except Exception as ex:
print("Error in excel_clear_sheet="+str(ex))
def excel_set_single_cell(excel_path="", sheet_name="Sheet1", header=0, columnName="", cellNumber=0, setText=""): #*
"""
Writes the given text to the desired column/cell number for the given excel file
"""
try:
if not excel_path:
excel_path, sheet_name, header = gui_get_excel_sheet_header_from_user('to set cell')
if not columnName:
col_lst = excel_get_all_header_columns(excel_path, sheet_name, header)
columnName = gui_get_dropdownlist_values_from_user('list of columns to set vlaue',col_lst,multi_select=False)
if not setText:
setText = gui_get_any_input_from_user("text value to set the cell")
df = pd.read_excel(excel_path,sheet_name=sheet_name,header=header)
writer = pd.ExcelWriter(excel_path, engine='openpyxl')
writer.book = load_workbook(excel_path)
writer.sheets = dict((ws.title, ws) for ws in writer.book.worksheets)
df.at[cellNumber,columnName] = setText
df.to_excel(writer, sheet_name=sheet_name ,index=False)
writer.save()
return True
except Exception as ex:
print("Error in excel_set_single_cell="+str(ex))
def excel_get_single_cell(excel_path="",sheet_name="Sheet1",header=0, columnName="",cellNumber=0): #*
"""
Gets the text from the desired column/cell number of the given excel file
"""
try:
if not excel_path:
excel_path, sheet_name, header = gui_get_excel_sheet_header_from_user('to get cell')
if not columnName:
col_lst = excel_get_all_header_columns(excel_path, sheet_name, header)
columnName = gui_get_dropdownlist_values_from_user('list of columns to get vlaue',col_lst,multi_select=False)
if not isinstance(columnName, list):
columnName = [columnName]
df = pd.read_excel(excel_path,sheet_name=sheet_name,header=header,usecols={columnName[0]})
cellValue = df.at[cellNumber,columnName[0]]
return cellValue
except Exception as ex:
print("Error in excel_get_single_cell="+str(ex))
def excel_remove_duplicates(excel_path="",sheet_name="Sheet1", header=0, columnName="", saveResultsInSameExcel=True, which_one_to_keep="first"): #*
"""
Drops the duplicates from the desired Column of the given excel file
"""
try:
if not excel_path:
excel_path, sheet_name, header = gui_get_excel_sheet_header_from_user('to remove duplicates')
if not columnName:
col_lst = excel_get_all_header_columns(excel_path, sheet_name, header)
columnName = gui_get_dropdownlist_values_from_user('list of columns to remove duplicates',col_lst)
df = pd.read_excel(excel_path, sheet_name=sheet_name,header=header)
count = 0
if saveResultsInSameExcel:
df.drop_duplicates(subset=columnName, keep=which_one_to_keep, inplace=True)
with pd.ExcelWriter(excel_path) as writer:
df.to_excel(writer,sheet_name=sheet_name,index=False)
count = df.shape[0]
else:
df1 = df.drop_duplicates(subset=columnName, keep=which_one_to_keep, inplace=False)
excel_path = str(excel_path).replace(".","_DupDropped.")
with pd.ExcelWriter(excel_path) as writer:
df1.to_excel(writer,sheet_name=sheet_name,index=False)
count = df1.shape[0]
return count
except Exception as ex:
print("Error in excel_remove_duplicates="+str(ex))
def excel_vlook_up(filepath_1="", sheet_name_1 = 'Sheet1', header_1 = 0, filepath_2="", sheet_name_2 = 'Sheet1', header_2 = 0, Output_path="", OutputExcelFileName="", match_column_name="",how='left'):#*
"""
Performs excel_vlook_up on the given excel files for the desired columns. Possible values for how are "inner","left", "right", "outer"
"""
try:
if not filepath_1:
filepath_1, sheet_name_1, header_1 = gui_get_excel_sheet_header_from_user('(Vlookup) first excel')
if not filepath_2:
filepath_2, sheet_name_2, header_2 = gui_get_excel_sheet_header_from_user('(Vlookup) second excel')
if not match_column_name:
col_lst = excel_get_all_header_columns(filepath_1, sheet_name_1, header_1)
match_column_name = gui_get_dropdownlist_values_from_user('Vlookup column name to be matched',col_lst,multi_select=False)
match_column_name = match_column_name[0]
df1 = pd.read_excel(filepath_1, sheet_name = sheet_name_1, header = header_1)
df2 = pd.read_excel(filepath_2, sheet_name = sheet_name_2, header = header_2)
df = pd.merge(df1, df2, on= match_column_name, how = how)
output_file_path = ""
if str(OutputExcelFileName).endswith(".*"):
OutputExcelFileName = OutputExcelFileName.split(".")[0]
if Output_path and OutputExcelFileName:
if ".xlsx" in OutputExcelFileName:
output_file_path = os.path.join(Output_path, OutputExcelFileName)
else:
output_file_path = os.path.join(Output_path, OutputExcelFileName + ".xlsx")
else:
output_file_path = filepath_1
output_file_path = Path(output_file_path)
with pd.ExcelWriter(output_file_path) as writer:
df.to_excel(writer, index=False)
return True
except Exception as ex:
print("Error in excel_vlook_up="+str(ex))
def screen_clear_search(delay=0.2):
"""
Clears previously found text (crtl+f highlight)
"""
try:
kb.press_and_release("ctrl+f")
time.sleep(delay)
pg.typewrite("^%#")
time.sleep(delay)
kb.press_and_release("esc")
time.sleep(delay)
except Exception as ex:
print("Error in screen_clear_search="+str(ex))
def scrape_save_contents_to_notepad(folderPathToSaveTheNotepad="",X=pg.size()[0]/2,Y=pg.size()[1]/2): #"Full path to the folder (with double slashes) where notepad is to be stored"
"""
Copy pastes all the available text on the screen to notepad and saves it.
"""
try:
if not folderPathToSaveTheNotepad:
folderPathToSaveTheNotepad = gui_get_folder_path_from_user('folder to save notepad contents')
message_counter_down_timer("Screen scraping in (seconds)",3)
time.sleep(1)
pg.click(X,Y)
time.sleep(0.5)
kb.press_and_release("ctrl+a")
time.sleep(1)
kb.press_and_release("ctrl+c")
time.sleep(1)
clipboard_data = clipboard.paste()
time.sleep(2)
screen_clear_search()
notepad_file_path = Path(folderPathToSaveTheNotepad)
notepad_file_path = notepad_file_path / 'notepad-contents.txt'
f = open(notepad_file_path, "w", encoding="utf-8")
f.write(clipboard_data)
time.sleep(10)
f.close()
clipboard_data = ''
return "Saved the contents at " + str(notepad_file_path)
except Exception as ex:
print("Error in scrape_save_contents_to_notepad = "+str(ex))
def scrape_get_contents_by_search_copy_paste(highlightText=""):
"""
Gets the focus on the screen by searching given text using crtl+f and performs copy/paste of all data. Useful in Citrix applications
This is useful in Citrix applications
"""
output_lst_newline_removed = []
try:
if not highlightText:
highlightText = gui_get_any_input_from_user("text to be searched in Citrix environment")
time.sleep(1)
kb.press_and_release("ctrl+f")
time.sleep(1)
pg.typewrite(highlightText)
time.sleep(1)
kb.press_and_release("enter")
time.sleep(1)
kb.press_and_release("esc")
time.sleep(2)
pg.PAUSE = 2
kb.press_and_release("ctrl+a")
time.sleep(2)
kb.press_and_release("ctrl+c")
time.sleep(2)
clipboard_data = clipboard.paste()
time.sleep(2)
screen_clear_search()
entire_data_as_list= clipboard_data.splitlines()
for line in entire_data_as_list:
if line.strip():
output_lst_newline_removed.append(line.strip())
clipboard_data = ''
return output_lst_newline_removed
except Exception as ex:
print("Error in scrape_get_contents_by_search_copy_paste="+str(ex))
def mouse_move(x="",y=""):
"""
Moves the cursor to the given X Y Co-ordinates.
"""
try:
if not x and not y:
x_y = str(gui_get_any_input_from_user("X,Y co-ordinates to the move Mouse to. Ex: 200,215"))
if "," in x_y:
x, y = x_y.split(",")
x = int(x)
y = int(y)
else:
x = x_y.split(" ")[0]
y = x_y.split(" ")[1]
if x and y:
time.sleep(0.2)
pg.moveTo(x,y)
time.sleep(0.2)
except Exception as ex:
print("Error in mouse_move="+str(ex))
def mouse_get_color_by_position(pos=[]):
"""
Gets the color by X Y co-ordinates of the screen.
"""
try:
if not pos:
pos1 = gui_get_any_input_from_user("X,Y co-ordinates to get its color. Ex: 200,215")
pos = tuple(map(int, pos1.split(',')))
im = pg.screenshot()
time.sleep(0.5)
return im.getpixel(pos)
except Exception as ex:
print("Error in mouse_get_color_by_position = "+str(ex))
def mouse_click(x="", y="", left_or_right="left", single_double_triple="single", copyToClipBoard_Yes_No="no"):
"""
Clicks at the given X Y Co-ordinates on the screen using ingle / double / tripple click(s).
Optionally copies selected data to clipboard (works for double / triple clicks)
"""
try:
if not x and not y:
x_y = str(gui_get_any_input_from_user("X,Y co-ordinates to perform Mouse (Left) Click. Ex: 200,215"))
if "," in x_y:
x, y = x_y.split(",")
x = int(x)
y = int(y)
else:
x = int(x_y.split(" ")[0])
y = int(x_y.split(" ")[1])
copiedText = ""
time.sleep(1)
if x and y:
if single_double_triple.lower() == "single" and left_or_right.lower() == "left":
pg.click(x,y)
elif single_double_triple.lower() == "double" and left_or_right.lower() == "left":
pg.doubleClick(x,y)
elif single_double_triple.lower() == "triple" and left_or_right.lower() == "left":
pg.tripleClick(x,y)
elif single_double_triple.lower() == "single" and left_or_right.lower() == "right":
pg.rightClick(x,y)
time.sleep(1)
if copyToClipBoard_Yes_No.lower() == "yes":
kb.press_and_release("ctrl+c")
time.sleep(1)
copiedText = clipboard.paste().strip()
time.sleep(1)
time.sleep(1)
return copiedText
except Exception as ex:
print("Error in mouseClick="+str(ex))
def mouse_drag_from_to(X1="",Y1="",X2="",Y2="",delay=0.5):
"""
Clicks and drags from X1 Y1 co-ordinates to X2 Y2 Co-ordinates on the screen
"""
try:
if not X1 and not Y1:
x_y = str(gui_get_any_input_from_user("Mouse Drag FROM Values ex: 200,215"))
if "," in x_y:
X1, Y1 = x_y.split(",")
X1 = int(X1)
Y1 = int(Y1)
if not X2 and not Y2:
x_y = str(gui_get_any_input_from_user("Mouse Drag TO Values ex: 200,215"))
if "," in x_y:
X2, Y2 = x_y.split(",")
X2 = int(X2)
Y2 = int(Y2)
time.sleep(0.2)
pg.moveTo(X1,Y1,duration=delay)
pg.dragTo(X2,Y2,duration=delay,button='left')
time.sleep(0.2)
except Exception as ex:
print("Error in mouse_drag_from_to="+str(ex))
def search_highlight_tab_enter_open(searchText="",hitEnterKey="Yes",shift_tab='No'):
"""
Searches for a text on screen using crtl+f and hits enter.
This function is useful in Citrix environment
"""
try:
if not searchText:
searchText = gui_get_any_input_from_user("Search Text to Highlight (in Citrix Environment)")
time.sleep(0.5)
kb.press_and_release("ctrl+f")
time.sleep(0.5)
kb.write(searchText)
time.sleep(0.5)
kb.press_and_release("enter")
time.sleep(0.5)
kb.press_and_release("esc")
time.sleep(0.2)
if hitEnterKey.lower() == "yes" and shift_tab.lower() == "yes":
kb.press_and_release("tab")
time.sleep(0.3)
kb.press_and_release("shift+tab")
time.sleep(0.3)
kb.press_and_release("enter")
time.sleep(2)
elif hitEnterKey.lower() == "yes" and shift_tab.lower() == "no":
kb.press_and_release("enter")
time.sleep(2)
return True
except Exception as ex:
print("Error in search_highlight_tab_enter_open="+str(ex))
def key_press(strKeys=""):
"""
Emulates the given keystrokes.
"""
try:
if not strKeys:
strKeys = gui_get_any_input_from_user("keys combination using + as delimeter. Ex: ctrl+O")
strKeys = strKeys.lower()
if "shift" in strKeys:
strKeys = strKeys.replace("shift","left shift+right shift")
time.sleep(0.5)
kb.press_and_release(strKeys)
time.sleep(0.5)
except Exception as ex:
print("Error in key_press="+str(ex))
def key_write_enter(strMsg="",delay=1,key="e"):
"""
Writes/Types the given text and press enter (by default) or tab key.
"""
try:
if not strMsg:
strMsg = gui_get_any_input_from_user("message / username / any text")
time.sleep(0.2)
kb.write(strMsg)
time.sleep(delay)
if key.lower() == "e":
key_press('enter')
elif key.lower() == "t":
key_press('tab')
time.sleep(1)
except Exception as ex:
print("Error in key_write_enter="+str(ex))
def date_convert_to_US_format(input_str=""):
"""
Converts the given date to US date format.
"""
try:
if not input_str:
input_str = gui_get_any_input_from_user('Date value Ex: 01/01/2021')
match = re.search(r'\d{4}-\d{2}-\d{2}', input_str) #1
if match == None:
match = re.search(r'\d{2}-\d{2}-\d{4}', input_str) #2
if match == None:
match = re.search(r'\d{2}/\d{2}/\d{4}', input_str) #3
if match == None:
match = re.search(r'\d{4}/\d{2}/\d{2}', input_str) #4
if match == None:
match = re.findall(r'(?:Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec)\s\d\d,\s\d{4}',input_str) #5
dt=datetime.datetime.strptime(match[0], '%b %d, %Y').date() #5 Jan 01, 2020
else:
dt=datetime.datetime.strptime(match.group(), '%Y/%m/%d').date() #4
else:
try:
dt=datetime.datetime.strptime(match.group(),'%d/%m/%Y').date() #3
except:
dt=datetime.datetime.strptime(match.group(),'%m/%d/%Y').date() #3
else:
try:
dt=datetime.datetime.strptime(match.group(), '%d-%m-%Y').date()#2
except:
dt=datetime.datetime.strptime(match.group(), '%m-%d-%Y').date()#2
else:
dt=datetime.datetime.strptime(match.group(), '%Y-%m-%d').date() #1
return dt.strftime('%m/%d/%Y')
except Exception as ex:
print("Error in date_convert_to_US_format="+str(ex))
def mouse_search_snip_return_coordinates_x_y(img="", conf=0.9, wait=180,region=(0,0,pg.size()[0],pg.size()[1])):
"""
Searches the given image on the screen and returns its center of X Y co-ordinates.
"""
try:
if not img:
img = gui_get_any_file_from_user("snip image file, to get X,Y coordinates","png")
time.sleep(1)
pos = pg.locateOnScreen(img,confidence=conf,region=region)
i = 0
while pos == None and i < int(wait):
pos = ()
pos = pg.locateOnScreen(img, confidence=conf,region=region)
time.sleep(1)
i = i + 1
time.sleep(1)
if pos:
x,y = pos.left + int(pos.width / 2), pos.top + int(pos.height / 2)
pos = ()
pos=(x,y)
return pos
return pos
except Exception as ex:
print("Error in mouse_search_snip_return_coordinates_x_y="+str(ex))
def mouse_search_snips_return_coordinates_x_y(img_lst=[], conf=0.9, wait=180, region=(0,0,pg.size()[0],pg.size()[1])):
"""
Searches the given set of images on the screen and returns its center of X Y co-ordinates of FIRST OCCURANCE
"""
try:
if not img_lst:
img_lst_folder_path = gui_get_folder_path_from_user("folder having snip image files, to get X,Y coordinates of any one")
for img_file in img_lst:
img_file = os.path.join(img_lst_folder_path,img_file)
img_file = Path(str(img_file))
img_lst.append(img_file)
time.sleep(1)
if len(img_lst) > 0:
#Logic = Locate Image Immediately
pos = ()
for img in img_lst:
pos = pg.locateOnScreen(img,confidence=conf,region=region)
if pos != None:
break
#Logic = Locate Image with Delay
i = 0
while pos == None and i < int(wait):
pos = ()
for img in img_lst:
pos = pg.locateOnScreen(img,confidence=conf,region=region)
if pos != None:
break
time.sleep(1)
i = i + 1
time.sleep(1)
if pos:
x,y = pos.left + int(pos.width / 2), pos.top + int(pos.height / 2)
pos = ()
pos=(x,y)
return pos
return pos
except Exception as ex:
print("Error in mouse_search_snips_return_coordinates_x_y="+str(ex))
def find_text_on_screen(searchText="",delay=0.1, occurance=1,isSearchToBeCleared=False):
"""
Clears previous search and finds the provided text on screen.
"""
screen_clear_search() #default
if not searchText:
searchText = gui_get_any_input_from_user("search text to Find on screen")
time.sleep(delay)
kb.press_and_release("ctrl+f")
time.sleep(delay)
pg.typewrite(searchText)
time.sleep(delay)
for i in range(occurance-1):
kb.press_and_release("enter")
time.sleep(delay)
kb.press_and_release("esc")
time.sleep(delay)
if isSearchToBeCleared:
screen_clear_search()
def mouse_search_snip_return_coordinates_box(img="", conf=0.9, wait=180,region=(0,0,pg.size()[0],pg.size()[1])):
"""
Searches the given image on the screen and returns the 4 bounds co-ordinates (x,y,w,h)
"""
try:
if not img:
img = gui_get_any_file_from_user("snip image file, to get BOX coordinates","png")
time.sleep(1)
pos = pg.locateOnScreen(img,confidence=conf,region=region)
i = 0
while pos == None and i < int(wait):
pos = ()
pos = pg.locateOnScreen(img, confidence=conf,region=region)
time.sleep(1)
i = i + 1
time.sleep(1)
return pos
except Exception as ex:
print("Error in mouse_search_snip_return_coordinates_box="+str(ex))
def mouse_find_highlight_click(searchText="",delay=0.1,occurance=1,left_right="left",single_double_triple="single",copyToClipBoard_Yes_No="no"):
"""
Searches the given text on the screen, highlights and clicks it.
"""
try:
if not searchText:
searchText = gui_get_any_input_from_user("search text to Highlight & Click")
time.sleep(0.2)
find_text_on_screen(searchText,delay=delay,occurance=occurance,isSearchToBeCleared = True) #clear the search
img = pg.screenshot()
img.save(ss_path_b)
time.sleep(0.2)
imageA = cv2.imread(ss_path_b)
time.sleep(0.2)
find_text_on_screen(searchText,delay=delay,occurance=occurance,isSearchToBeCleared = False) #dont clear the searched text
img = pg.screenshot()
img.save(ss_path_a)
time.sleep(0.2)
imageB = cv2.imread(ss_path_a)
time.sleep(0.2)
# convert both images to grayscale
grayA = cv2.cvtColor(imageA, cv2.COLOR_BGR2GRAY)
grayB = cv2.cvtColor(imageB, cv2.COLOR_BGR2GRAY)
# compute the Structural Similarity Index (SSIM) between the two
(_, diff) = structural_similarity(grayA, grayB, full=True)
diff = (diff * 255).astype("uint8")
thresh = cv2.threshold(diff, 0, 255,
cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)[1]
cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
# loop over the contours
for c in cnts:
(x, y, w, h) = cv2.boundingRect(c)
X = int(x + (w/2))
Y = int(y + (h/2))
mouse_click(x=X,y=Y,left_or_right=left_right,single_double_triple=single_double_triple,copyToClipBoard_Yes_No=copyToClipBoard_Yes_No)
time.sleep(0.5)
break
except Exception as ex:
print("Error in mouse_find_highlight_click="+str(ex))
def schedule_create_task_windows(Weekly_Daily="D",week_day="Sun",start_time_hh_mm_24_hr_frmt="11:00"):#*
"""
Schedules (weekly & daily options as of now) the current BOT (.bat) using Windows Task Scheduler. Please call create_batch_file() function before using this function to convert .pyw file to .bat
"""
global batch_file_path
try:
str_cmd = ""
if not batch_file_path:
batch_file_path = gui_get_any_file_from_user('BATCH file to Schedule. Please call create_batch_file() to create one')
if Weekly_Daily == "D":
str_cmd = r"powershell.exe Start-Process schtasks '/create /SC DAILY /tn ClointFusion\{} /tr {} /st {}' ".format(bot_name,batch_file_path,start_time_hh_mm_24_hr_frmt)
elif Weekly_Daily == "W":
str_cmd = r"powershell.exe Start-Process schtasks '/create /SC WEEKLY /D {} /tn ClointFusion\{} /tr {} /st {}' ".format(week_day,bot_name,batch_file_path,start_time_hh_mm_24_hr_frmt)
subprocess.call(str_cmd)
print("Task Scheduled")
except Exception as ex:
print("Error in schedule_create_task_windows="+str(ex))
def schedule_delete_task_windows():
"""
Deletes already scheduled task. Asks user to supply task_name used during scheduling the task. You can also perform this action from Windows Task Scheduler.
"""
try:
str_cmd = r"powershell.exe Start-Process schtasks '/delete /tn ClointFusion\{} ' ".format(bot_name)
subprocess.call(str_cmd)
print("Task {} Deleted".format(bot_name))
except Exception as ex:
print("Error in schedule_delete_task="+str(ex))
@lru_cache(None)
def _get_tabular_data_from_website(Website_URL):
"""
internal function
"""
all_tables = ""
try:
all_tables = pd.read_html(Website_URL)
return all_tables
except Exception as ex:
print("Error in _get_tabular_data_from_website="+str(ex))
finally:
return all_tables
def browser_get_html_tabular_data_from_website(Website_URL="",table_index=-1,drop_first_row=False,drop_first_few_rows=[0],drop_last_row=False):
"""
Web Scrape HTML Tables : Gets Website Table Data Easily as an Excel using Pandas. Just pass the URL of Website having HTML Tables.
If there are 5 tables on that HTML page and you want 4th table, pass table_index as 3
Ex: browser_get_html_tabular_data_from_website(Website_URL=URL)
"""
try:
if not Website_URL:
Website_URL= gui_get_any_input_from_user("website URL to get HTML Tabular Data ex: https://www.google.com ")
all_tables = _get_tabular_data_from_website(Website_URL)
if all_tables:
# if no table_index is specified, then get all tables in output
if table_index == -1:
try:
strFileName = Website_URL[Website_URL.rindex("\\")+1:] + "_All_Tables" + ".xlsx"
except:
strFileName = Website_URL[Website_URL.rindex("/")+1:] + "_All_Tables" + ".xlsx"
excel_create_excel_file_in_given_folder(output_folder_path,strFileName)
else:
try:
strFileName = Website_URL[Website_URL.rindex("\\")+1:] + "_" + str(table_index) + ".xlsx"
except:
strFileName = Website_URL[Website_URL.rindex("/")+1:] + "_" + str(table_index) + ".xlsx"
strFileName = os.path.join(output_folder_path,strFileName)
strFileName = Path(strFileName)
if table_index == -1:
for i in range(len(all_tables)):
table = all_tables[i] #lool thru table_index values
table = table.reset_index(drop=True) #Avoid multi index error in our dataframes
with pd.ExcelWriter(strFileName) as writer:
table.to_excel(writer, sheet_name=str(i)) #index=False
else:
table = all_tables[table_index] #get required table_index
if drop_first_row:
table = table.drop(drop_first_few_rows) # Drop first few rows (passed as list)
if drop_last_row:
table = table.drop(len(table)-1) # Drop last row
# table.columns = list(table.iloc[0])
# table = table.drop(len(drop_first_few_rows))
table = table.reset_index(drop=True)
table.to_excel(strFileName, index=False)
print("Table saved as Excel at {} ".format(strFileName))
else:
print("No tables found in given website " + str(Website_URL))
except Exception as ex:
print("Error in browser_get_html_tabular_data_from_website="+str(ex))
def excel_draw_charts(excel_path="",sheet_name='Sheet1', header=0, x_col="", y_col="", color="", chart_type='bar', title='ClointFusion', show_chart=False):
"""
Interactive data visualization function, which accepts excel file, X & Y column.
Chart types accepted are bar , scatter , pie , sun , histogram , box , strip.
You can pass color column as well, having a boolean value.
Image gets saved as .PNG in the same path as excel file.
Usage: excel_charts(<excel path>,x_col='Name',y_col='Age', chart_type='bar',show_chart=True)
"""
try:
if not excel_path:
excel_path, sheet_name, header = gui_get_excel_sheet_header_from_user('for data visualization')
if not x_col:
col_lst = excel_get_all_header_columns(excel_path, sheet_name, header)
x_col = gui_get_dropdownlist_values_from_user('X Axis Column',col_lst,multi_select=False)[0]
if not y_col:
col_lst = excel_get_all_header_columns(excel_path, sheet_name, header)
y_col = gui_get_dropdownlist_values_from_user('Y Axis Column',col_lst,multi_select=False)[0]
if x_col and y_col:
if color:
df = pd.read_excel(excel_path,sheet_name=sheet_name,header=header,usecols={x_col,y_col,color})
else:
df = pd.read_excel(excel_path,sheet_name=sheet_name,header=header,usecols={x_col,y_col})
fig = go.Figure()
if chart_type == 'bar':
fig.add_trace(go.Bar(x=df[x_col].values.tolist()))
fig.add_trace(go.Bar(y=df[y_col].values.tolist()))
if color:
fig = px.bar(df, x=x_col, y=y_col, barmode="group",color=color)
else:
fig = px.bar(df, x=x_col, y=y_col, barmode="group")
elif chart_type == 'scatter':
fig.add_trace(go.Scatter(x=df[x_col].values.tolist()))
fig.add_trace(go.Scatter(y=df[x_col].values.tolist()))
elif chart_type =='pie':
if color:
fig = px.pie(df, names=x_col, values=y_col, title=title,color=color)#,hover_data=df.columns)
else:
fig = px.pie(df, names=x_col, values=y_col, title=title)#,hover_data=df.columns)
elif chart_type =='sun':
if color:
fig = px.sunburst(df, path=[x_col], values=y_col,hover_data=df.columns,color=color)
else:
fig = px.sunburst(df, path=[x_col], values=y_col,hover_data=df.columns)
elif chart_type == 'histogram':
if color:
fig = px.histogram(df, x=x_col, y=y_col, marginal="rug",color=color, hover_data=df.columns)
else:
fig = px.histogram(df, x=x_col, y=y_col, marginal="rug",hover_data=df.columns)
elif chart_type == 'box':
if color:
fig = px.box(df, x=x_col, y=y_col, notched=True,color=color)
else:
fig = px.box(df, x=x_col, y=y_col, notched=True)
elif chart_type == 'strip':
if color:
fig = px.strip(df, x=x_col, y=y_col, orientation="h",color=color)
else:
fig = px.strip(df, x=x_col, y=y_col, orientation="h")
fig.update_layout(title = title)
if show_chart:
fig.show()
strFileName = _extract_filename_from_filepath(excel_path)
strFileName = os.path.join(output_folder_path,strFileName + ".PNG")
strFileName = Path(strFileName)
scope = PlotlyScope()
with open(strFileName, "wb") as f:
f.write(scope.transform(fig, format="png"))
print("Chart saved at " + str(strFileName))
else:
print("Please supply all the required values")
except Exception as ex:
print("Error in excel_draw_charts=" + str(ex))
def get_long_lat(strZipCode=0):
"""
Function takes zip_code as input (int) and returns longitude, latitude, state, city, county.
"""
try:
if not strZipCode:
strZipCode = str(gui_get_any_input_from_user("USA Zip Code ex: 77429"))
all_data_dict=zipcodes.matching(str(strZipCode))
all_data_dict = all_data_dict[0]
long = all_data_dict['long']
lat = all_data_dict['lat']
state = all_data_dict['state']
city = all_data_dict['city']
county = all_data_dict['county']
return long, lat, state, city, county
except Exception as ex:
print("Error in get_long_lat="+str(ex))
def excel_geotag_using_zipcodes(excel_path="",sheet_name='Sheet1',header=0,zoom_start=5,zip_code_column="",data_columns_as_list=[],color_boolean_column=""):
"""
Function takes Excel file having ZipCode column as input. Takes one data column at present.
Creates .html file having geo-tagged markers/baloons on the page.
Ex: excel_geotag_using_zipcodes()
"""
try:
if not excel_path:
excel_path, sheet_name, header = gui_get_excel_sheet_header_from_user('for geo tagging (Note: As of now, works only for USA Zip codes)')
if not zip_code_column:
col_lst = excel_get_all_header_columns(excel_path, sheet_name, header)
zip_code_column = gui_get_dropdownlist_values_from_user('having Zip Codes',col_lst,multi_select=False)[0]
m = folium.Map(location=[40.178877,-100.914253 ], zoom_start=zoom_start)
if len(data_columns_as_list) == 1:
data_columns_as_str = str(data_columns_as_list).replace("[","").replace("]","").replace("'","")
else:
data_columns_as_str = str(data_columns_as_list).replace("[","").replace("]","")
data_columns_as_str = data_columns_as_str[1:-1]
use_cols = data_columns_as_list
use_cols.append(zip_code_column)
if color_boolean_column:
use_cols.append(color_boolean_column)
df = pd.read_excel(excel_path,sheet_name=sheet_name,header=header,usecols=use_cols)
for _, row in df.iterrows():
if not pd.isna(row[zip_code_column]) and str(row[zip_code_column]).isnumeric():
long, lat, state, city, county = get_long_lat(str(row[zip_code_column]))
county = str(county).replace("County","")
if color_boolean_column and data_columns_as_str and row[color_boolean_column] == True:
folium.Marker(location=[lat, long], popup='State: ' + state + ',\nCity:' + city + ',\nCounty:' + county + ',\nDevice:' + row[data_columns_as_str], icon=folium.Icon(color='green', icon='info-sign')).add_to(m)
elif data_columns_as_str:
folium.Marker(location=[lat, long], popup='State: ' + state + ',\nCity:' + city + ',\nCounty:' + county + ',\nDevice:' + row[data_columns_as_str], icon=folium.Icon(color='red', icon='info-sign')).add_to(m)
else:
folium.Marker(location=[lat, long], popup='State: ' + state + ',\nCity:' + city + ',\nCounty:' + county, icon=folium.Icon(color='blue', icon='info-sign')).add_to(m)
graphFileName = _extract_filename_from_filepath(excel_path)
graphFileName = os.path.join(output_folder_path,graphFileName + ".html")
graphFileName = Path(graphFileName)
print("GeoTagged Graph saved at "+ graphFileName)
m.save(graphFileName)
except Exception as ex:
print("Error in excel_geotag_using_zipcodes="+str(ex))
def _accept_cookies_h():
"""
Internal function to accept cookies.
"""
try:
if Text('Accept cookies?').exists():
click('I accept')
except Exception as ex:
print("Error in _accept_cookies_h="+str(ex))
def launch_website_h(URL="",dp=False,dn=True,igc=True,smcp=True,i=False,headless=False):
try:
"""
Internal function to launch browser.
"""
if not URL:
URL = gui_get_any_input_from_user("website URL to Launch Website using Helium functions. Ex https://www.google.com")
global helium_service_launched
helium_service_launched=True
options = ChromeOptions()
if dp:
options.add_argument("--disable-popup-blocking")
if dn:
options.add_argument("--disable-notifications")
if igc:
options.add_argument("--ignore-certificate-errors")
if smcp:
options.add_argument("--suppress-message-center-popups")
if i:
options.add_argument("--incognito")
options.add_argument("--disable-translate")
options.add_argument("--start-maximized")
options.add_argument("--ignore-autocomplete-off-autofill")
options.add_argument("--no-first-run")
#options.add_argument("--window-size=1920,1080")
try:
start_chrome(url=URL,options=options,headless=headless)
except:
try:
start_firefox(url=URL,options=options,headless=headless) #to be tested
except Exception as ex:
print('Either Chrome & Forefox is required='+str(ex))
Config.implicit_wait_secs = 120
_accept_cookies_h()
except Exception as ex:
print("Error in launch_website_h = "+str(ex))
kill_browser()
helium_service_launched = False
def browser_navigate_h(url="",dp=False,dn=True,igc=True,smcp=True,i=False,headless=False):
try:
"""
Navigates to Specified URL.
"""
if not url:
url = gui_get_any_input_from_user("website URL to Navigate using Helium functions. Ex: https://www.google.com")
global helium_service_launched
if not helium_service_launched:
launch_website_h(URL=url,dp=dp,dn=dn,igc=igc,smcp=smcp,i=i,headless=headless)
return
go_to(url.lower())
_accept_cookies_h()
except Exception as ex:
print("Error in browser_navigate_h = "+str(ex))
helium_service_launched = False
def browser_write_h(Value="",User_Visible_Text_Element="",alert=False):
"""
Write a string on the given element.
"""
try:
if not User_Visible_Text_Element:
User_Visible_Text_Element = gui_get_any_input_from_user('visible element (placeholder) to WRITE your value. Ex: Username')
if not Value:
Value= gui_get_any_input_from_user('Value to be Written')
if not alert:
if Value and User_Visible_Text_Element:
write(Value, into=User_Visible_Text_Element)
if alert:
if Value and User_Visible_Text_Element:
write(Value, into=Alert(User_Visible_Text_Element))
except Exception as ex:
print("Error in browser_write_h = "+str(ex))
def browser_mouse_click_h(User_Visible_Text_Element="",element="d"):
"""
click on the given element.
"""
try:
if not User_Visible_Text_Element:
User_Visible_Text_Element = gui_get_any_input_from_user("visible text element (button/link/checkbox/radio etc) to Click")
if User_Visible_Text_Element and element.lower()=="d": #default
click(User_Visible_Text_Element)
elif User_Visible_Text_Element and element.lower()=="l": #link
click(link(User_Visible_Text_Element))
elif User_Visible_Text_Element and element.lower()=="b": #button
click(Button(User_Visible_Text_Element))
elif User_Visible_Text_Element and element.lower()=="t": #textfield
click(TextField(User_Visible_Text_Element))
elif User_Visible_Text_Element and element.lower()=="c": #checkbox
click(CheckBox(User_Visible_Text_Element))
elif User_Visible_Text_Element and element.lower()=="r": #radiobutton
click(RadioButton(User_Visible_Text_Element))
elif User_Visible_Text_Element and element.lower()=="i": #image ALT Text
click(Image(alt=User_Visible_Text_Element))
except Exception as ex:
print("Error in browser_mouse_click_h = "+str(ex))
def browser_mouse_double_click_h(User_Visible_Text_Element=""):
"""
Doubleclick on the given element.
"""
try:
if not User_Visible_Text_Element:
User_Visible_Text_Element = gui_get_any_input_from_user("visible text element (button/link/checkbox/radio etc) to Double Click")
if User_Visible_Text_Element:
doubleclick(User_Visible_Text_Element)
except Exception as ex:
print("Error in browser_mouse_double_click_h = "+str(ex))
def browser_locate_element_h(element="",get_text=False):
"""
Find the element by Xpath, id or css selection.
"""
try:
if not element:
element = gui_get_any_input_from_user('browser element to locate (Helium)')
if get_text:
return S(element).web_element.text
return S(element).web_element
except Exception as ex:
print("Error in browser_locate_element_h = "+str(ex))
def browser_locate_elements_h(element="",get_text=False):
"""
Find the elements by Xpath, id or css selection.
"""
try:
if not element:
element = gui_get_any_input_from_user('browser ElementS to locate (Helium)')
if get_text:
return find_all(S(element).web_element.text)
return find_all(S(element).web_element)
except Exception as ex:
print("Error in browser_locate_elements_h = "+str(ex))
def browser_wait_until_h(text="",element="t"):
"""
Wait until a specific element is found.
"""
try:
if not text:
text = gui_get_any_input_from_user("visible text element to Search & Wait for")
if element.lower()=="t":
wait_until(Text(text).exists,10) #text
elif element.lower()=="b":
wait_until(Button(text).exists,10) #button
except Exception as ex:
print("Error in browser_wait_until_h = "+str(ex))
def browser_refresh_page_h():
"""
Refresh the page.
"""
try:
refresh()
except Exception as ex:
print("Error in browser_refresh_page_h = "+str(ex))
def browser_hit_enter_h():
"""
Hits enter KEY using Browser Helium Functions
"""
try:
press(ENTER)
except Exception as ex:
print("Error in browser_hit_enter_h="+str(ex))
def browser_quit_h():
"""
Close the Helium browser.
"""
try:
kill_browser()
except Exception as ex:
print("Error in browser_quit_h = "+str(ex))
#Utility Functions
def dismantle_code(strFunctionName=""):
"""
This functions dis-assembles given function and shows you column-by-column summary to explain the output of disassembled bytecode.
Ex: dismantle_code(show_emoji)
"""
try:
if not strFunctionName:
strFunctionName = gui_get_any_input_from_user('Exact function name to dis-assemble. Ex: show_emoji')
print("Code dismantling {}".format(strFunctionName))
return dis.dis(strFunctionName)
except Exception as ex:
print("Error in dismantle_code="+str(ex))
def excel_clean_data(excel_path="",sheet_name='Sheet1',header=0,column_to_be_cleaned="",cleaning_pipe_line="Default"):
"""
fillna(s) Replace not assigned values with empty spaces.
lowercase(s) Lowercase all text.
remove_digits() Remove all blocks of digits.
remove_diacritics() Remove all accents from strings.
remove_stopwords() Remove all stop words.
remove_whitespace() Remove all white space between words.
"""
try:
if not excel_path:
excel_path, sheet_name, header = gui_get_excel_sheet_header_from_user('to clean the data')
if not column_to_be_cleaned:
col_lst = excel_get_all_header_columns(excel_path, sheet_name, header)
column_to_be_cleaned = gui_get_dropdownlist_values_from_user('column list to Clean (removes digits/puntuation/stop words etc)',col_lst,multi_select=False)
column_to_be_cleaned = column_to_be_cleaned[0]
if column_to_be_cleaned:
df = pd.read_excel(excel_path,sheet_name=sheet_name,header=header)
new_column_name = "Clean_" + column_to_be_cleaned
if 'Default' in cleaning_pipe_line:
df[new_column_name] = df[column_to_be_cleaned].pipe(hero.clean)
else:
custom_pipeline = [preprocessing.fillna, preprocessing.lowercase]
df[new_column_name] = df[column_to_be_cleaned].pipe(hero.clean,custom_pipeline)
with pd.ExcelWriter(path=excel_path) as writer:
df.to_excel(writer,index=False)
print("Data Cleaned. Please see the output in {}".format(new_column_name))
except Exception as ex:
print("Error in excel_clean_data="+str(ex))
def compute_hash(inputData=""):
"""
Returns the hash of the inputData
"""
try:
if not inputData:
inputData = gui_get_any_input_from_user('input string to compute Hash')
return sha256(inputData.encode()).hexdigest()
except Exception as ex:
print("Error in compute_hash="+str(ex))
def browser_get_html_text(url=""):
"""
Function to get HTML text without tags using Beautiful soup
"""
try:
if not url:
url = gui_get_any_input_from_user("website URL to get HTML Text (without tags). Ex: https://www.cloint.com")
html_text = requests.get(url)
soup = BeautifulSoup(html_text.content, 'lxml')
text = str(soup.text).strip()
text = ' '.join(text.split())
return text
except Exception as ex:
print("Error in browser_get_html_text="+str(ex))
def word_cloud_from_url(url=""):
"""
Function to create word cloud from a given website
"""
try:
text = browser_get_html_text(url=url)
wc = WordCloud(max_words=2000, width=800, height=600,background_color='white',max_font_size=40, random_state=None, relative_scaling=0)
wc.generate(text)
file_path = os.path.join(output_folder_path,"URL_WordCloud.png")
file_path = Path(file_path)
wc.to_file(file_path)
print("URL WordCloud saved at {}".format(file_path))
except Exception as ex:
print("Error in word_cloud_from_url="+str(ex))
def excel_describe_data(excel_path="",sheet_name='Sheet1',header=0):
"""
Describe statistical data for the given excel
"""
try:
if not excel_path:
excel_path, sheet_name, header = gui_get_excel_sheet_header_from_user("to Statistically Describe excel data")
df = pd.read_excel(excel_path, sheet_name=sheet_name, header=header)
#user_option_lst = ['Numerical','String','Both']
#user_choice = gui_get_dropdownlist_values_from_user("list of datatypes",user_option_lst)
#if user_choice == 'Numerical':
# return df.describe(include = [np.number])
#elif user_choice == 'String':
# return df.describe(include = ['O'])
#else:
# return df.describe(include='all')
return df.describe()
except Exception as ex:
print("Error in excel_describe_data="+str(ex))
def camera_capture_image(user_name=""):
try:
user_consent = gui_get_consent_from_user("turn ON camera & take photo ?")
if user_consent == 'Yes':
SECONDS = 5
TIMER = int(SECONDS)
window_name = "ClointFusion"
cap = cv2.VideoCapture(0)
if not cap.isOpened():
print("Error in opening camera")
cv2.namedWindow(window_name, cv2.WND_PROP_FULLSCREEN)
cv2.setWindowProperty(window_name, cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN)
font = cv2.FONT_HERSHEY_SIMPLEX
if not user_name:
user_name = gui_get_any_input_from_user("your name")
while True:
ret, img = cap.read()
cv2.imshow(window_name, img)
prev = time.time()
text = "Taking selfie in 5 second(s)".format(str(TIMER))
textsize = cv2.getTextSize(text, font, 1, 2)[0]
print(str(textsize))
textX = int((img.shape[1] - textsize[0]) / 2)
textY = int((img.shape[0] + textsize[1]) / 2)
while TIMER >= 0:
ret, img = cap.read()
cv2.putText(img, "Saving image in {} second(s)".format(str(TIMER)),
(textX, textY ), font,
1, (255, 0, 255),
2)
cv2.imshow(window_name, img)
cv2.waitKey(125)
cur = time.time()
if cur-prev >= 1:
prev = cur
TIMER = TIMER-1
ret, img = cap.read()
cv2.imshow(window_name, img)
cv2.waitKey(1000)
file_path = os.path.join(output_folder_path,user_name + ".PNG")
file_path = Path(file_path)
cv2.imwrite(file_path, img)
print("Image saved at {}".format(file_path))
cap.release()
cv2.destroyAllWindows()
break
else:
print("Operation cancelled by user")
except Exception as ex:
print("Error in camera_capture_image="+str(ex))
def convert_csv_to_excel(csv_path="",sep=""):
"""
Function to convert CSV to Excel
Ex: convert_csv_to_excel()
"""
try:
if not csv_path:
csv_path = gui_get_any_file_from_user("CSV to convert to EXCEL","csv")
if not sep:
sep = gui_get_any_input_from_user("Delimeter Ex: |")
csv_file_name = _extract_filename_from_filepath(csv_path)
excel_file_name = csv_file_name + ".xlsx"
excel_file_path = os.path.join(output_folder_path,excel_file_name)
excel_file_path = Path(excel_file_path)
writer = pd.ExcelWriter(excel_file_path)
df=pd.read_csv(csv_path,sep=sep)
df.to_excel(writer, sheet_name='Sheet1', index=False)
writer.save()
print("Excel file saved : "+str(excel_file_path))
except Exception as ex:
print("Error in convert_csv_to_excel="+str(ex))
# Class related to capture_snip_now
class CaptureSnip(QtWidgets.QWidget):
def __init__(self):
super().__init__()
root = tk.Tk()
screen_width = root.winfo_screenwidth()
screen_height = root.winfo_screenheight()
self.setGeometry(0, 0, screen_width, screen_height)
self.setWindowTitle(' ')
self.begin = QtCore.QPoint()
self.end = QtCore.QPoint()
self.setWindowOpacity(0.3)
QtWidgets.QApplication.setOverrideCursor(
QtGui.QCursor(QtCore.Qt.CrossCursor)
)
self.setWindowFlags(QtCore.Qt.FramelessWindowHint)
print('Capture now...')
self.show()
def paintEvent(self, event):
qp = QtGui.QPainter(self)
qp.setPen(QtGui.QPen(QtGui.QColor('black'), 3))
qp.setBrush(QtGui.QColor(128, 128, 255, 128))
qp.drawRect(QtCore.QRect(self.begin, self.end))
def mousePressEvent(self, event):
self.begin = event.pos()
self.end = self.begin
self.update()
def mouseMoveEvent(self, event):
self.end = event.pos()
self.update()
def mouseReleaseEvent(self, event):
self.close()
x1 = min(self.begin.x(), self.end.x())
y1 = min(self.begin.y(), self.end.y())
x2 = max(self.begin.x(), self.end.x())
y2 = max(self.begin.y(), self.end.y())
img = ImageGrab.grab(bbox=(x1, y1, x2, y2))
file_num = str(len(os.listdir(img_folder_path)))
file_name = os.path.join(img_folder_path,file_num + "_snip.PNG" )
file_name = Path(file_name)
print("Snip saved at " + str(file_name))
img.save(file_name)
def capture_snip_now():
"""
Captures the snip and stores in Image Folder of the BOT by giving continous numbering
Ex: capture_snip_now()
"""
app = ""
try:
if message_counter_down_timer("Capturing snip in (seconds)",3):
app = QtWidgets.QApplication(sys.argv)
window = CaptureSnip()
window.activateWindow()
app.aboutToQuit.connect(app.deleteLater)
sys.exit(app.exec_())
except Exception as ex:
print("Error in capture_snip_now="+str(ex))
try:
sys.exit(app.exec_())
except:
pass
def ON_semi_automatic_mode():
"""
This function sets semi_automatic_mode as True => ON
"""
global enable_semi_automatic_mode
semi_automatic_config_file_path = os.path.join(config_folder_path,"Semi_Automatic_Mode.txt")
semi_automatic_config_file_path = Path(semi_automatic_config_file_path)
try:
with open(semi_automatic_config_file_path, 'w') as f:
f.write('True')
enable_semi_automatic_mode = True
print("Semi Automatic Mode is ENABLED "+ show_emoji())
except Exception as ex:
print("Error in ON_semi_automatic_mode="+str(ex))
def OFF_semi_automatic_mode():
"""
This function sets semi_automatic_mode as False => OFF
"""
global enable_semi_automatic_mode
semi_automatic_config_file_path = os.path.join(config_folder_path,"Semi_Automatic_Mode.txt")
semi_automatic_config_file_path = Path(semi_automatic_config_file_path)
try:
with open(semi_automatic_config_file_path, 'w') as f:
f.write('False')
enable_semi_automatic_mode = False
print("Semi Automatic Mode is DISABLED "+ show_emoji())
except Exception as ex:
print("Error in OFF_semi_automatic_mode="+str(ex))
def _init_cf_quick_test_log_file(log_path_arg):
"""
Internal function to generates the log and saves it to the file in the given base directory.
"""
global log_path
log_path = log_path_arg
from pif import get_public_ip
try:
dt_tm= str(datetime.datetime.now())
dt_tm = dt_tm.replace(" ","_")
dt_tm = dt_tm.replace(":","-")
dt_tm = dt_tm.split(".")[0]
log_path = Path(os.path.join(log_path, str(dt_tm) + ".txt"))
for handler in logging.root.handlers[:]:
logging.root.removeHandler(handler)
logging.basicConfig(filename=log_path, level=logging.INFO, format='%(asctime)s : %(message)s',datefmt='%Y-%m-%d %H:%M:%S')
except Exception as ex:
print("ERROR in _init_log_file="+str(ex))
finally:
host_ip = socket.gethostbyname(socket.gethostname())
logging.info("{} ClointFusion Self Testing initiated".format(os_name))
logging.info("{}/{}".format(host_ip,str(get_public_ip())))
def _download_cloint_quick_test_png():
"""
Internal function to download ClointFusion ICON from GitHub
"""
try:
if not os.path.exists(cf_icon_file_path):
urllib.request.urlretrieve('https://raw.githubusercontent.com/ClointFusion/Image_ICONS_GIFs/main/Cloint-ICON.ico',cf_icon_file_path)
if not os.path.exists(cf_logo_file_path):
urllib.request.urlretrieve('https://raw.githubusercontent.com/ClointFusion/Image_ICONS_GIFs/main/Cloint-LOGO.PNG',cf_logo_file_path)
except Exception as ex:
print("Error while downloading Cloint ICOn/LOGO = "+str(ex))
def _rerun_clointfusion_first_run(ex):
pg.alert("Please Re-run..."+str(ex))
_,last_updated_date_file = is_execution_required_today('clointfusion_self_test',execution_type="M",save_todays_date_month=False)
with open(last_updated_date_file, 'w',encoding="utf-8") as f:
last_updated_on_date = int(datetime.date.today().strftime('%m')) - 1
f.write(str(last_updated_on_date))
def clointfusion_self_test_cases(user_chosen_test_folder):
"""
Main function for Self Test, which is called by GUI
"""
global os_name
chrome_close_PNG_1 = temp_current_working_dir / "Chrome-Close_1.PNG"
chrome_close_PNG_2 = temp_current_working_dir / "Chrome-Close_2.PNG"
chrome_close_PNG_3 = temp_current_working_dir / "Chrome-Close_3.PNG"
twenty_PNG_1 = temp_current_working_dir / "Twenty_1.PNG"
twenty_PNG_2 = temp_current_working_dir / "Twenty_2.PNG"
twenty_PNG_3 = temp_current_working_dir / "Twenty_3.PNG"
if not os.path.exists(chrome_close_PNG_1):
urllib.request.urlretrieve('https://raw.githubusercontent.com/ClointFusion/Image_ICONS_GIFs/main/Chrome-Close_1.png',chrome_close_PNG_1)
if not os.path.exists(chrome_close_PNG_2):
urllib.request.urlretrieve('https://raw.githubusercontent.com/ClointFusion/Image_ICONS_GIFs/main/Chrome-Close_2.png',chrome_close_PNG_2)
if not os.path.exists(chrome_close_PNG_3):
urllib.request.urlretrieve('https://raw.githubusercontent.com/ClointFusion/Image_ICONS_GIFs/main/Chrome-Close_3.png',chrome_close_PNG_3)
if not os.path.exists(twenty_PNG_1):
urllib.request.urlretrieve('https://raw.githubusercontent.com/ClointFusion/Image_ICONS_GIFs/main/Twenty_1.png',twenty_PNG_1)
if not os.path.exists(twenty_PNG_2):
urllib.request.urlretrieve('https://raw.githubusercontent.com/ClointFusion/Image_ICONS_GIFs/main/Twenty_2.png',twenty_PNG_2)
if not os.path.exists(twenty_PNG_3):
urllib.request.urlretrieve('https://raw.githubusercontent.com/ClointFusion/Image_ICONS_GIFs/main/Twenty_3.png',twenty_PNG_3)
test_folder_path = Path(os.path.join(user_chosen_test_folder,"ClointFusion_Self_Tests"))
test_run_excel_path = Path(os.path.join(test_folder_path,'Quick_Self_Test_Excel.xlsx'))
user_chosen_test_folder = Path(user_chosen_test_folder)
test_folder_path = Path(test_folder_path)
test_run_excel_path = Path(test_run_excel_path)
try:
message_pop_up('Importing ClointFusion')
print('Importing ClointFusion')
print()
print('ClointFusion imported successfully '+ show_emoji())
print("____________________________________________________________")
print()
logging.info('ClointFusion imported successfully')
try:
base_dir = Path(user_chosen_test_folder)
folder_create(base_dir)
print('Test folder location {}'.format(base_dir))
logging.info('Test folder location {}'.format(base_dir))
img_folder_path = os.path.join(base_dir, "Images")
batch_file_path = os.path.join(base_dir, "Batch_File")
config_folder_path = os.path.join(base_dir, "Config_Files")
output_folder_path = os.path.join(base_dir, "Output")
error_screen_shots_path = os.path.join(base_dir, "Error_Screenshots")
try:
print('Creating sub folders viz. img/batch/config/output/error_screen_shot at {}'.format(base_dir))
folder_create(img_folder_path)
folder_create(batch_file_path)
folder_create(config_folder_path)
folder_create(error_screen_shots_path)
folder_create(output_folder_path)
except Exception as ex:
print('Unable to create basic sub-folders for img/batch/config/output/error_screen_shot=' + str(ex))
logging.info('Unable to create basic sub-folders for img/batch/config/output/error_screen_shot')
print()
print('ClointFusion Self Testing Initiated '+show_emoji())
logging.info('ClointFusion Self Testing Initiated')
except Exception as ex:
print('Error while creating sub-folders='+str(ex))
logging.info('Error while creating sub-folders='+str(ex))
try:
print()
print('Testing folder operations')
folder_create(Path(os.path.join(test_folder_path,"My Test Folder")))
folder_create_text_file(test_folder_path, "My Text File")
excel_create_excel_file_in_given_folder(test_folder_path,'Quick_Self_Test_Excel')
excel_create_excel_file_in_given_folder(test_folder_path,'My Excel-1')
excel_create_excel_file_in_given_folder(test_folder_path,'My Excel-2')
try:
excel_create_excel_file_in_given_folder(os.path.join(test_folder_path,"Delete Excel"),'Delete-Excel-1')
excel_create_excel_file_in_given_folder(os.path.join(test_folder_path,"Delete Excel"),'Delete-Excel-2')
folder_delete_all_files(os.path.join(test_folder_path,'Delete Excel'), "xlsx")
except Exception as ex:
print('Unable to delete files in test folder='+str(ex))
logging.info('Unable to delete files in test folder='+str(ex))
folder_create(Path(test_folder_path / 'Split_Merge'))
print(folder_get_all_filenames_as_list(test_folder_path))
print(folder_get_all_filenames_as_list(test_folder_path, extension="xlsx"))
print('Folder operations tested successfully '+show_emoji())
print("____________________________________________________________")
logging.info('Folder operations tested successfully')
except Exception as ex:
print('Error while testing Folder operations='+str(ex))
logging.info('Error while testing Folder operations='+str(ex))
if os_name == 'windows':
try:
print()
print('Testing window based operations')
window_show_desktop()
launch_any_exe_bat_application(test_run_excel_path)
window_minimize_windows('Quick_Self_Test_Excel')
window_activate_and_maximize_windows('Quick_Self_Test_Excel')
window_close_windows('Quick_Self_Test_Excel')
print(window_get_all_opened_titles_windows())
print('Window based operations tested successfully '+show_emoji())
print("____________________________________________________________")
logging.info('Window based operations tested successfully')
except Exception as ex:
print('Error while testing window based operations='+str(ex))
logging.info('Error while testing window based operations='+str(ex))
else:
print('Skipping window operations as it is Windows OS specific')
logging.info('Skipping window operations as it is Windows OS specific')
try:
print()
print('Testing String Operations')
print(string_remove_special_characters("C!@loin#$tFu*(sion"))
print(string_extract_only_alphabets(inputString="C1l2o#%^int&*Fus12i5on"))
print(string_extract_only_numbers("C1l2o3i4n5t6F7u8i9o0n"))
print(date_convert_to_US_format("31-01-2021"))
print('String operations tested successfully '+show_emoji())
print("____________________________________________________________")
logging.info('String operations tested successfully')
except Exception as ex:
print('Error while testing string operations='+str(ex))
logging.info('Error while testing string operations='+str(ex))
try:
print()
print('Testing keyboard operations')
if os_name == 'windows':
launch_any_exe_bat_application("notepad")
else:
launch_any_exe_bat_application("gedit") #Ubuntu / macOS ?
if os_name == 'windows':
key_write_enter("Performing ClointFusion Self Test for Notepad")
key_hit_enter()
key_press('alt+f4,n')
else:
pg.write("Performing ClointFusion Self Test for Text Editor / GEDIT")
pg.press('enter')
pg.hotkey('alt','f4')
time.sleep(2)
pg.hotkey('alt','w')
message_counter_down_timer("Starting Keyboard Operations in (seconds)",3)
print('Keyboard operations tested successfully '+show_emoji())
print("____________________________________________________________")
logging.info('Keyboard operations tested successfully')
except Exception as ex:
print('Error in keyboard operations='+str(ex))
logging.info('Error in keyboard operations='+str(ex))
try:
key_press('alt+f4')
except:
pg.hotkey('alt','f4')
message_counter_down_timer("Starting Excel Operations in (seconds)",3)
try:
print()
print('Testing excel operations')
excel_create_excel_file_in_given_folder(test_folder_path, "Test_Excel_File", "Test_Sheet")
print(excel_get_row_column_count(test_run_excel_path))
excel_create_excel_file_in_given_folder(test_folder_path,excelFileName="Excel_Test_Data")
test_excel_path = test_folder_path / "Excel_Test_Data.xlsx"
excel_set_single_cell(test_excel_path,columnName="Name",cellNumber=0,setText="A")
excel_set_single_cell(test_excel_path,columnName="Name",cellNumber=1,setText="B")
excel_set_single_cell(test_excel_path,columnName="Name",cellNumber=2,setText="C")
excel_set_single_cell(test_excel_path,columnName="Name",cellNumber=3,setText="D")
excel_set_single_cell(test_excel_path,columnName="Name",cellNumber=4,setText="E")
excel_set_single_cell(test_excel_path,columnName="Age",cellNumber=0,setText="1")
excel_set_single_cell(test_excel_path,columnName="Age",cellNumber=1,setText="2")
excel_set_single_cell(test_excel_path,columnName="Age",cellNumber=2,setText="4")
excel_set_single_cell(test_excel_path,columnName="Age",cellNumber=3,setText="3")
excel_set_single_cell(test_excel_path,columnName="Age",cellNumber=4,setText="5")
print(excel_get_single_cell(test_excel_path,sheet_name='Sheet1',columnName='Name'))
excel_create_file(test_folder_path,"My New Paste Excel")
excel_create_excel_file_in_given_folder(test_folder_path,'My Excel-3','CF-Sheet-1')
excel_file_path = test_folder_path / 'My Excel-3.xlsx'
print(excel_get_all_sheet_names(excel_file_path))
print(excel_get_all_sheet_names(test_run_excel_path))
excel_copied_Data=excel_copy_range_from_sheet(test_excel_path, sheet_name="Sheet1", startCol=1, startRow=1, endCol=2, endRow=6)
print(excel_copied_Data)
excel_copy_paste_range_from_to_sheet(Path(os.path.join(test_folder_path,"My New Paste Excel.xlsx")), sheet_name="Sheet1", startCol=1, startRow=1, endCol=2, endRow=6, copiedData=excel_copied_Data)
excel_split_by_column(excel_path=Path(os.path.join(test_folder_path,"My New Paste Excel.xlsx")), sheet_name="Sheet1", header=0, columnName="Name")
folder_create(Path(test_folder_path / 'Split_Merge'))
excel_split_the_file_on_row_count(excel_path=Path(test_folder_path / "My New Paste Excel.xlsx"), sheet_name="Sheet1", rowSplitLimit=1, outputFolderPath=os.path.join(test_folder_path,'Split_Merge'), outputTemplateFileName="Split")
excel_merge_all_files(input_folder_path=test_folder_path / "Split_Merge", output_folder_path=Path(test_folder_path,'Split_Merge'))
excel_drop_columns(Path(test_folder_path / "My New Paste Excel.xlsx"), columnsToBeDropped ="Age")
excel_sort_columns(excel_path=test_excel_path, sheet_name="Sheet1", header=0, firstColumnToBeSorted="Age", secondColumnToBeSorted="Name")
excel_clear_sheet(Path(test_folder_path / "My New Paste Excel.xlsx"), sheet_name="Sheet1", header=0)
excel_set_single_cell(test_excel_path,columnName="Name",cellNumber=5,setText="E")
excel_set_single_cell(test_excel_path,columnName="Age",cellNumber=5,setText="5")
excel_remove_duplicates(excel_path=test_excel_path, sheet_name="Sheet1", header=0,columnName="Name", which_one_to_keep="first")
excel_create_file(test_folder_path,"My VLookUp Excel")
excel_set_single_cell(Path(test_folder_path,"My VLookUp Excel.xlsx"),columnName="Name",cellNumber=0,setText="A")
excel_set_single_cell(Path(test_folder_path,"My VLookUp Excel.xlsx"),columnName="Name",cellNumber=1,setText="B")
excel_set_single_cell(Path(test_folder_path,"My VLookUp Excel.xlsx"),columnName="Name",cellNumber=2,setText="C")
excel_set_single_cell(Path(test_folder_path,"My VLookUp Excel.xlsx"),columnName="Name",cellNumber=3,setText="D")
excel_set_single_cell(Path(test_folder_path,"My VLookUp Excel.xlsx"),columnName="Name",cellNumber=4,setText="E")
excel_set_single_cell(Path(test_folder_path,"My VLookUp Excel.xlsx"),columnName="Salary",cellNumber=0,setText="1")
excel_set_single_cell(Path(test_folder_path,"My VLookUp Excel.xlsx"),columnName="Salary",cellNumber=1,setText="2")
excel_set_single_cell(Path(test_folder_path,"My VLookUp Excel.xlsx"),columnName="Salary",cellNumber=2,setText="4")
excel_set_single_cell(Path(test_folder_path,"My VLookUp Excel.xlsx"),columnName="Salary",cellNumber=3,setText="3")
excel_set_single_cell(Path(test_folder_path,"My VLookUp Excel.xlsx"),columnName="Salary",cellNumber=4,setText="5")
excel_vlook_up(filepath_1=test_excel_path,filepath_2=Path(test_folder_path,"My VLookUp Excel.xlsx"),match_column_name="Name")
print('Excel operations tested successfully '+show_emoji())
print("____________________________________________________________")
logging.info('Excel operations tested successfully')
except Exception as ex:
print("Error while testing Excel Operations="+str(ex))
logging.info("Error while testing Excel Operations="+str(ex))
message_counter_down_timer("Starting Screen Scraping Operations in (seconds)",3)
try:
print()
print("Testing screen-scraping functions")
webbrowser.open('https://sites.google.com/view/clointfusion-hackathon')
message_counter_down_timer("Waiting for page to load in (seconds)",5)
pos=mouse_search_snip_return_coordinates_x_y(str(twenty_PNG_3),conf=0.5,wait=5)
print(pos)
pos=mouse_search_snip_return_coordinates_x_y(str(twenty_PNG_2),conf=0.5,wait=5)
print(pos)
pos=mouse_search_snip_return_coordinates_x_y(str(twenty_PNG_1),conf=0.5,wait=5)
print(pos)
pos=mouse_search_snips_return_coordinates_x_y([str(twenty_PNG_1),str(twenty_PNG_2),str(twenty_PNG_3)],conf=0.5,wait=10)
print(pos)
folder_create(os.path.join(test_folder_path,'Screen_scrape'))
scrape_save_contents_to_notepad(test_folder_path / 'Screen_scrape')
print("Screen-scraping functions tested successfully "+ show_emoji())
print("____________________________________________________________")
logging.info("Screen-scraping functions tested successfully")
except Exception as ex:
print('Error while testing screenscraping functions='+str(ex))
logging.info('Error while testing screenscraping functions='+str(ex))
try:
print()
print("Testing mouse operations")
mouse_move(850,600)
print(mouse_get_color_by_position((800,500)))
time.sleep(2)
mouse_drag_from_to(600,510,1150,680)
message_counter_down_timer("Testing Mouse Operations in (seconds)",3)
search_highlight_tab_enter_open("chat.whatsapp")
pos = mouse_search_snips_return_coordinates_x_y([str(chrome_close_PNG_1),str(chrome_close_PNG_2),str(chrome_close_PNG_3)],conf=0.8,wait=3)
print(pos)
if pos is not None:
mouse_click(*pos)
pos = mouse_search_snips_return_coordinates_x_y([str(chrome_close_PNG_1),str(chrome_close_PNG_2),str(chrome_close_PNG_3)],conf=0.8,wait=3)
print(pos)
if pos is not None:
mouse_click(*pos)
mouse_click(int(pg.size()[0]/2),int(pg.size()[1]/2)) #Click at center of the screen
print('Mouse operations tested successfully ' + show_emoji())
print("____________________________________________________________")
logging.info('Mouse operations tested successfully')
except Exception as ex:
print('Error in mouse operations='+str(ex))
logging.info('Error in mouse operations='+str(ex))
key_press('ctrl+w')
message_counter_down_timer("Calling Helium Functions in (seconds)",3)
try:
print()
print("Testing Browser's Helium functions")
launch_website_h("https://pypi.org")
browser_write_h("ClointFusion",User_Visible_Text_Element="Search projects")
browser_hit_enter_h()
browser_mouse_click_h("ClointFusion 0.")
browser_mouse_double_click_h("RPA")
browser_mouse_click_h("Open in Colab")
browser_quit_h()
print("Tested Browser's Helium functions successfully " + show_emoji())
print("____________________________________________________________")
logging.info("Tested Browser's Helium functions successfully")
except Exception as ex:
print("Error while Testing Browser Helium functions="+str(ex))
logging.info("Error while Testing Browser Helium functions="+str(ex))
key_press('ctrl+w') #to close any open browser
message_counter_down_timer("Almost Done... Please Wait... (in seconds)",3)
try:
print("____________________________________________________________")
print()
print("Congratulations - ClointFusion is compatible with your computer " + show_emoji('clap') + show_emoji('clap'))
message_pop_up("Congratulations !!!\n\nClointFusion is compatible with your computer settings")
logging.info("Flash message tested successfully")
except Exception as ex:
print("Error while testing Flash message="+str(ex))
logging.info("Error while testing Flash message="+str(ex))
except Exception as ex:
print("ClointFusion Automated Testing Failed "+str(ex))
logging.info("ClointFusion Automated Testing Failed "+str(ex))
finally:
_folder_write_text_file(Path(os.path.join(current_working_dir,'Running_ClointFusion_Self_Tests.txt')),str(False))
print("____________________________________________________________")
print("____________________________________________________________")
print()
print("ClointFusion Self Testing Completed")
logging.info("ClointFusion Self Testing Completed")
return True
def clointfusion_self_test():
global os_name
strEmail = ""
start_time = time.monotonic()
try:
layout = [ [sg.Text("ClointFusion's First Run Setup",justification='c',font='Courier 18',text_color='orange')],
[sg.T("Please enter your name",text_color='white'),sg.In(key='-NAME-',text_color='blue')],
[sg.T("Please enter your email",text_color='white'),sg.In(key='-EMAIL-',text_color='blue')],
[sg.T("I am",text_color='white'),sg.Combo(values=['Student','Hobbyist','Professor','Professional','Others'], size=(20, 20), key='-ROLE-',text_color='blue')],
[sg.Text("We will be collecting & using ClointFusion's Self Test Report, to improve ClointFusion",justification='c',text_color='green',font='Courier 12')],
[sg.Text('Its highly recommended to close all open files/folders/browsers before running this self test',size=(0, 1),justification='l',text_color='red',font='Courier 12')],
[sg.Text('This Automated Self Test, takes around 4-5 minutes...Kindly do not move the mouse or type anything.',size=(0, 1),justification='l',text_color='red',font='Courier 12')],
[sg.Output(size=(140,20), key='-OUTPUT-')],
[sg.Button('Start',bind_return_key=True,button_color=('white','green'),font='Courier 14'), sg.Button('Close',button_color=('white','firebrick'),font='Courier 14')] ]
if os_name == 'windows':
window = sg.Window('Welcome to ClointFusion - Made in India with LOVE', layout, return_keyboard_events=True,use_default_focus=False,disable_minimize=True,grab_anywhere=False, disable_close=False,element_justification='c',keep_on_top=False,finalize=True,icon=cf_icon_file_path)
else:
window = sg.Window('Welcome to ClointFusion - Made in India with LOVE', layout, return_keyboard_events=True,use_default_focus=False,disable_minimize=False,grab_anywhere=False, disable_close=False,element_justification='c',keep_on_top=False,finalize=True,icon=cf_icon_file_path)
while True:
event, values = window.read()
if event == 'Start':
try:
if values['-EMAIL-']:
valid = validate_email(str(values['-EMAIL-']))
strEmail = valid.email
except EmailNotValidError as e:
pg.alert("Sorry, "+str(e))
if strEmail and values['-NAME-'] and values['-ROLE-']:
window['Start'].update(disabled=True)
window['Close'].update(disabled=True)
window['-NAME-'].update(disabled=True)
window['-EMAIL-'].update(disabled=True)
window['-ROLE-'].update(disabled=True)
_folder_write_text_file(os.path.join(current_working_dir,'Running_ClointFusion_Self_Tests.txt'),str(True))
print("Starting ClointFusion's Automated Self Testing Module")
print('This may take several minutes to complete...')
print('During this test, some excel file, notepad, browser etc may be opened & closed automatically')
print('Please sitback & relax till all the test-cases are run...')
print()
_init_cf_quick_test_log_file(temp_current_working_dir)
if clointfusion_self_test_cases(temp_current_working_dir):
window['Close'].update(disabled=False)
else:
pg.alert("Please enter all the values")
if event in (sg.WIN_CLOSED, 'Close'):
file_contents = ''
try:
with open(log_path,encoding="utf-8") as f:
file_contents = f.readlines()
except:
file_contents = 'Unable to read the file'
if file_contents and file_contents != 'Unable to read the file':
from datetime import timedelta
time_taken= timedelta(seconds=time.monotonic() - start_time)
my_ip = "HN:{}".format(socket.gethostname()) + ",IP:" + str(socket.gethostbyname(socket.gethostname())) + "/" + str(get_public_ip())
my_id = values['-NAME-'] + ";" + strEmail + ";" + values['-ROLE-']
os_name = str(os_name) + ";" + str(my_ip) + ";" + str(my_id)
URL = 'https://docs.google.com/forms/d/e/1FAIpQLSehRuz_RWJDcqZMAWRPMOfV7CVZB7PjFruXZtQKXO1Q81jOgw/formResponse?usp=pp_url&entry.1012698071={}&entry.705740227={}&submit=Submit'.format(os_name + ";" + str(time_taken),file_contents)
webbrowser.open(URL)
message_counter_down_timer("Closing browser (in seconds)",10)
#Ensure to close all browser if left open by this self test
time.sleep(2)
try:
key_press('alt+f4')
except:
pg.hotkey('alt','f4')
time.sleep(2)
is_execution_required_today('clointfusion_self_test',execution_type="M",save_todays_date_month=True)
break
except Exception as ex:
pg.alert('Error in Clointfusion Self Test = '+str(ex))
_rerun_clointfusion_first_run(str(ex))
finally:
print('Thank you !')
sys.exit(0)
# 4. All default services
# All new functions to be added before this line
# ########################
# ClointFusion's DEFAULT SERVICES
_welcome_to_clointfusion()
EXECUTE_SELF_TEST_NOW,last_updated_date_file = is_execution_required_today('clointfusion_self_test',execution_type="M")
if EXECUTE_SELF_TEST_NOW :
try:
clointfusion_self_test()
except Exception as ex:
print("Error in Self Test="+str(ex))
_rerun_clointfusion_first_run(str(ex))
else:
base_dir = gui_get_folder_path_from_user('Workspace Folder')
if base_dir:
base_dir = os.path.join(base_dir,"ClointFusion_BOT")
base_dir = Path(base_dir)
_set_bot_name()
_download_cloint_ico_png()
folder_create(base_dir)
log_path = Path(os.path.join(base_dir, "Logs"))
img_folder_path = Path(os.path.join(base_dir, "Images"))
batch_file_path = Path(os.path.join(base_dir, "Batch_File"))
config_folder_path = Path(os.path.join(base_dir, "Config_Files"))
output_folder_path = Path(os.path.join(base_dir, "Output"))
error_screen_shots_path = Path(os.path.join(base_dir, "Error_Screenshots"))
status_log_excel_filepath = Path(os.path.join(base_dir,"StatusLogExcel"))
folder_create(log_path)
folder_create(img_folder_path)
folder_create(batch_file_path)
folder_create(config_folder_path)
folder_create(error_screen_shots_path)
folder_create(output_folder_path)
_init_log_file()
update_log_excel_file(bot_name +'- BOT initiated')
_ask_user_semi_automatic_mode()
else:
pg.alert('Please re-run & select the Workspace Folder')
sys.exit(0)
if os_name == 'windows':
_load_missing_python_packages_windows()
# ########################
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=PendingDeprecationWarning)
warnings.filterwarnings("ignore", category=DeprecationWarning)
| 39.910894
| 348
| 0.621817
|
4a043dd92f3a6dd8c1e0aef19a71dbbe07234bd2
| 598
|
py
|
Python
|
python-pass.py
|
R1vEnCS/GIZ-pass-python
|
9e90d911febe1f74c72d8006f762f0b994a1597f
|
[
"MIT"
] | null | null | null |
python-pass.py
|
R1vEnCS/GIZ-pass-python
|
9e90d911febe1f74c72d8006f762f0b994a1597f
|
[
"MIT"
] | null | null | null |
python-pass.py
|
R1vEnCS/GIZ-pass-python
|
9e90d911febe1f74c72d8006f762f0b994a1597f
|
[
"MIT"
] | null | null | null |
class Solution:
def longestPalindrome(self, s: str) -> str:
def isPalindrome(s):
if s is "":
return False
for i in range(len(s)//2):
if s[i] != s[-1-i]:
return False
return True
common_subs = {}
if s is "":
return ""
for i in range(len(s)):
for j in range(1, len(s)+1):
if isPalindrome(s[i:j]):
common_subs[s[i:j]] = len(s[i:j])
return max(common_subs, key=common_subs.get)
| 27.181818
| 53
| 0.414716
|
4a043e70e5ca36bffe125eff8339b3e63530466c
| 9,314
|
py
|
Python
|
testing/test_remote.py
|
ohmu/pytest-xdist
|
aac3941500e5502b392662611448c5165e2506c8
|
[
"MIT"
] | null | null | null |
testing/test_remote.py
|
ohmu/pytest-xdist
|
aac3941500e5502b392662611448c5165e2506c8
|
[
"MIT"
] | null | null | null |
testing/test_remote.py
|
ohmu/pytest-xdist
|
aac3941500e5502b392662611448c5165e2506c8
|
[
"MIT"
] | null | null | null |
import py
from xdist.slavemanage import SlaveController, unserialize_report
from xdist.remote import serialize_report
import execnet
queue = py.builtin._tryimport("queue", "Queue")
import marshal
WAIT_TIMEOUT = 10.0
def check_marshallable(d):
try:
marshal.dumps(d)
except ValueError:
py.std.pprint.pprint(d)
raise ValueError("not marshallable")
class EventCall:
def __init__(self, eventcall):
self.name, self.kwargs = eventcall
def __str__(self):
return "<EventCall %s(**%s)>" %(self.name, self.kwargs)
class SlaveSetup:
use_callback = False
def __init__(self, request):
self.testdir = request.getfuncargvalue("testdir")
self.request = request
self.events = queue.Queue()
def setup(self, ):
self.testdir.chdir()
#import os ; os.environ['EXECNET_DEBUG'] = "2"
self.gateway = execnet.makegateway()
self.config = config = self.testdir.parseconfigure()
putevent = self.use_callback and self.events.put or None
self.slp = SlaveController(None, self.gateway, config, putevent)
self.request.addfinalizer(self.slp.ensure_teardown)
self.slp.setup()
def popevent(self, name=None):
while 1:
if self.use_callback:
data = self.events.get(timeout=WAIT_TIMEOUT)
else:
data = self.slp.channel.receive(timeout=WAIT_TIMEOUT)
ev = EventCall(data)
if name is None or ev.name == name:
return ev
print("skipping %s" % (ev,))
def sendcommand(self, name, **kwargs):
self.slp.sendcommand(name, **kwargs)
def pytest_funcarg__slave(request):
return SlaveSetup(request)
def test_remoteinitconfig(testdir):
from xdist.remote import remote_initconfig
config1 = testdir.parseconfig()
config2 = remote_initconfig(config1.option.__dict__, config1.args)
assert config2.option.__dict__ == config1.option.__dict__
assert config2.pluginmanager.getplugin("terminal") in (-1, None)
class TestReportSerialization:
def test_itemreport_outcomes(self, testdir):
reprec = testdir.inline_runsource("""
import py
def test_pass(): pass
def test_fail(): 0/0
@py.test.mark.skipif("True")
def test_skip(): pass
def test_skip_imperative():
py.test.skip("hello")
@py.test.mark.xfail("True")
def test_xfail(): 0/0
def test_xfail_imperative():
py.test.xfail("hello")
""")
reports = reprec.getreports("pytest_runtest_logreport")
assert len(reports) == 17 # with setup/teardown "passed" reports
for rep in reports:
d = serialize_report(rep)
check_marshallable(d)
newrep = unserialize_report("testreport", d)
assert newrep.passed == rep.passed
assert newrep.failed == rep.failed
assert newrep.skipped == rep.skipped
if newrep.skipped and not hasattr(newrep, "wasxfail"):
assert len(newrep.longrepr) == 3
assert newrep.outcome == rep.outcome
assert newrep.when == rep.when
assert newrep.keywords == rep.keywords
if rep.failed:
assert newrep.longrepr == str(rep.longrepr)
def test_collectreport_passed(self, testdir):
reprec = testdir.inline_runsource("def test_func(): pass")
reports = reprec.getreports("pytest_collectreport")
for rep in reports:
d = serialize_report(rep)
check_marshallable(d)
newrep = unserialize_report("collectreport", d)
assert newrep.passed == rep.passed
assert newrep.failed == rep.failed
assert newrep.skipped == rep.skipped
def test_collectreport_fail(self, testdir):
reprec = testdir.inline_runsource("qwe abc")
reports = reprec.getreports("pytest_collectreport")
assert reports
for rep in reports:
d = serialize_report(rep)
check_marshallable(d)
newrep = unserialize_report("collectreport", d)
assert newrep.passed == rep.passed
assert newrep.failed == rep.failed
assert newrep.skipped == rep.skipped
if rep.failed:
assert newrep.longrepr == str(rep.longrepr)
def test_extended_report_deserialization(self, testdir):
reprec = testdir.inline_runsource("qwe abc")
reports = reprec.getreports("pytest_collectreport")
assert reports
for rep in reports:
rep.extra = True
d = serialize_report(rep)
check_marshallable(d)
newrep = unserialize_report("collectreport", d)
assert newrep.extra
assert newrep.passed == rep.passed
assert newrep.failed == rep.failed
assert newrep.skipped == rep.skipped
if rep.failed:
assert newrep.longrepr == str(rep.longrepr)
class TestSlaveInteractor:
def test_basic_collect_and_runtests(self, slave):
slave.testdir.makepyfile("""
def test_func():
pass
""")
slave.setup()
ev = slave.popevent()
assert ev.name == "slaveready"
ev = slave.popevent()
assert ev.name == "collectionstart"
assert not ev.kwargs
ev = slave.popevent("collectionfinish")
assert ev.kwargs['topdir'] == slave.testdir.tmpdir
ids = ev.kwargs['ids']
assert len(ids) == 1
slave.sendcommand("runtests", indices=list(range(len(ids))))
slave.sendcommand("shutdown")
ev = slave.popevent("logstart")
assert ev.kwargs["nodeid"].endswith("test_func")
assert len(ev.kwargs["location"]) == 3
ev = slave.popevent("testreport") # setup
ev = slave.popevent("testreport")
assert ev.name == "testreport"
rep = unserialize_report(ev.name, ev.kwargs['data'])
assert rep.nodeid.endswith("::test_func")
assert rep.passed
assert rep.when == "call"
ev = slave.popevent("slavefinished")
assert 'slaveoutput' in ev.kwargs
def test_remote_collect_skip(self, slave):
slave.testdir.makepyfile("""
import py
py.test.skip("hello")
""")
slave.setup()
ev = slave.popevent("collectionstart")
assert not ev.kwargs
ev = slave.popevent()
assert ev.name == "collectreport"
ev = slave.popevent()
assert ev.name == "collectreport"
rep = unserialize_report(ev.name, ev.kwargs['data'])
assert rep.skipped
ev = slave.popevent("collectionfinish")
assert not ev.kwargs['ids']
def test_remote_collect_fail(self, slave):
slave.testdir.makepyfile("""aasd qwe""")
slave.setup()
ev = slave.popevent("collectionstart")
assert not ev.kwargs
ev = slave.popevent()
assert ev.name == "collectreport"
ev = slave.popevent()
assert ev.name == "collectreport"
rep = unserialize_report(ev.name, ev.kwargs['data'])
assert rep.failed
ev = slave.popevent("collectionfinish")
assert not ev.kwargs['ids']
def test_runtests_all(self, slave):
slave.testdir.makepyfile("""
def test_func(): pass
def test_func2(): pass
""")
slave.setup()
ev = slave.popevent()
assert ev.name == "slaveready"
ev = slave.popevent()
assert ev.name == "collectionstart"
assert not ev.kwargs
ev = slave.popevent("collectionfinish")
ids = ev.kwargs['ids']
assert len(ids) == 2
slave.sendcommand("runtests_all", )
slave.sendcommand("shutdown", )
for func in "::test_func", "::test_func2":
for i in range(3): # setup/call/teardown
ev = slave.popevent("testreport")
assert ev.name == "testreport"
rep = unserialize_report(ev.name, ev.kwargs['data'])
assert rep.nodeid.endswith(func)
ev = slave.popevent("slavefinished")
assert 'slaveoutput' in ev.kwargs
def test_happy_run_events_converted(self, testdir, slave):
py.test.xfail("implement a simple test for event production")
assert not slave.use_callback
slave.testdir.makepyfile("""
def test_func():
pass
""")
slave.setup()
hookrec = testdir.getreportrecorder(slave.config)
for data in slave.slp.channel:
slave.slp.process_from_remote(data)
slave.slp.process_from_remote(slave.slp.ENDMARK)
py.std.pprint.pprint(hookrec.hookrecorder.calls)
hookrec.hookrecorder.contains([
("pytest_collectstart", "collector.fspath == aaa"),
("pytest_pycollect_makeitem", "name == 'test_func'"),
("pytest_collectreport", "report.collector.fspath == aaa"),
("pytest_collectstart", "collector.fspath == bbb"),
("pytest_pycollect_makeitem", "name == 'test_func'"),
("pytest_collectreport", "report.collector.fspath == bbb"),
])
| 37.405622
| 72
| 0.602963
|
4a043f29fb80d515781f563f0d983cc2f2a5c0cc
| 846
|
py
|
Python
|
src/examples/gpiozero/servo_example.py
|
grayerbeard/aiy
|
1a9868d90b8dfcdde2d2fde81e415a222f2642b1
|
[
"Apache-2.0"
] | null | null | null |
src/examples/gpiozero/servo_example.py
|
grayerbeard/aiy
|
1a9868d90b8dfcdde2d2fde81e415a222f2642b1
|
[
"Apache-2.0"
] | null | null | null |
src/examples/gpiozero/servo_example.py
|
grayerbeard/aiy
|
1a9868d90b8dfcdde2d2fde81e415a222f2642b1
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
"""Demonstrates simultaneous control of two servos on the hat.
One servo uses the simple default configuration, the other servo is tuned to
ensure the full range is reachable.
"""
from time import sleep
from gpiozero import Servo
from aiy.pins import PIN_A
from aiy.pins import PIN_B
# Create a default servo that will not be able to use quite the full range.
simple_servo = Servo(PIN_A)
# Create a servo with the custom values to give the full dynamic range.
tuned_servo = Servo(PIN_B, min_pulse_width=.0005, max_pulse_width=.0019)
# Move the Servos back and forth until the user terminates the example.
while True:
simple_servo.min()
tuned_servo.max()
sleep(1)
simple_servo.mid()
tuned_servo.mid()
sleep(1)
simple_servo.max()
tuned_servo.min()
sleep(1)
| 29.172414
| 77
| 0.72104
|
4a043f8d308651ecdd1ba69996a099c0ab70c191
| 825
|
py
|
Python
|
scripts/stl/hlt/hlt_4vlans.py
|
timgates42/trex-core
|
efe94752fcb2d0734c83d4877afe92a3dbf8eccd
|
[
"Apache-2.0"
] | 956
|
2015-06-24T15:04:55.000Z
|
2022-03-30T06:25:04.000Z
|
scripts/stl/hlt/hlt_4vlans.py
|
angelyouyou/trex-core
|
fddf78584cae285d9298ef23f9f5c8725e16911e
|
[
"Apache-2.0"
] | 782
|
2015-09-20T15:19:00.000Z
|
2022-03-31T23:52:05.000Z
|
scripts/stl/hlt/hlt_4vlans.py
|
angelyouyou/trex-core
|
fddf78584cae285d9298ef23f9f5c8725e16911e
|
[
"Apache-2.0"
] | 429
|
2015-06-27T19:34:21.000Z
|
2022-03-23T11:02:51.000Z
|
from trex.stl.trex_stl_hltapi import STLHltStream
class STLS1(object):
'''
Eth/802.1Q/802.1Q/802.1Q/802.1Q/IPv6/TCP stream without VM
Missing values will be filled with defaults
'''
def get_streams (self, direction = 0, **kwargs):
return STLHltStream(frame_size = 100,
vlan_id = [1, 2, 3, 4], # can be either array or string separated by spaces
vlan_protocol_tag_id = '8100 0x8100', # hex with optional prefix '0x'
vlan_user_priority = '4 3 2', # forth will be default
l3_protocol = 'ipv6',
l4_protocol = 'tcp',
direction = direction)
# dynamic load - used for trex console or simulator
def register():
return STLS1()
| 33
| 103
| 0.557576
|
4a0442a0a632b1a13dad638bb9730b69df3d9a9d
| 9,974
|
py
|
Python
|
playground/analysis/analysis.py
|
murlokito/playground
|
405a7091bbfd6705db967e872ed6c4591bd892e6
|
[
"MIT"
] | null | null | null |
playground/analysis/analysis.py
|
murlokito/playground
|
405a7091bbfd6705db967e872ed6c4591bd892e6
|
[
"MIT"
] | null | null | null |
playground/analysis/analysis.py
|
murlokito/playground
|
405a7091bbfd6705db967e872ed6c4591bd892e6
|
[
"MIT"
] | null | null | null |
__title__ = "playground"
__author__ = "murlux"
__copyright__ = "Copyright 2019, " + __author__
__credits__ = (__author__, )
__license__ = "MIT"
__email__ = "murlux@protonmail.com"
import pandas as pd
from queue import Queue
from logging import Logger
from talib import abstract as abstract_ta
from talib import MA_Type
from playground import settings as s
from playground.analysis.mrfi import MRFI
from playground.util import setup_logger
class Analysis:
"""
Base class that defines and performs asset analysis.
"""
logger: Logger = None
dataset: pd.DataFrame = None
df: pd.DataFrame = None
initial_file: str = None
final_file: str = None
read_queue: Queue = None
def __init__(self, item: dict = None) -> None:
""" Initialize. """
self.logger = setup_logger(name=__name__)
self.read_queue = Queue()
if item is not None:
self.logger.info('Starting analysis of {} - {}'.format(item.get('pair'), item.get('timeframe')))
self.run(item=item)
def run(self, item: dict = None) -> None:
"""
Run it.
"""
self.prepare_dataset(item=item)
self.analyse(df=self.dataset)
self.save_dataset(item=item)
def get_read_queue(self) -> Queue:
"""
Get the reading Queue.
"""
return self.read_queue
def prepare_dataset(self, item: dict) -> None:
"""
Prepares the Analyser and Dataset for analysis.
"""
self.initial_file = s.DATASET_FOLDER + '{}_{}.csv'.format(item.get('pair'), item.get('timeframe')).replace(' ', '')
self.final_file = s.DATASET_FOLDER + '{}_{}_analyzed_v1.csv'.format(item.get('pair'), item.get('timeframe')).replace(' ', '')
try:
self.dataset = pd.read_csv(self.initial_file, nrows=s.MAX_ROWS, error_bad_lines=False).set_index('time')
except KeyError:
self.dataset = pd.read_csv(self.initial_file, error_bad_lines=False).set_index('time')
self.dataset.sort_index(inplace=True, ascending=True)
if s.ANALYSIS_VERBOSITY:
self.logger.info('-------'*20 + str(item))
self.logger.info(self.initial_file)
self.logger.info(self.final_file)
#self.logger.info('IDF ' + str(dataset))
def save_dataset(self, item: dict) -> None:
"""
Save the dataset to disk.
"""
if s.ANALYSIS_VERBOSITY:
self.logger.info( '{} - {} - DATASET FINAL -\n {} '.format(item['pair'], item['timeframe'], str(self.df)) + '-------'*20)
self.df.sort_index(inplace=True, ascending=False)
self.df.to_csv(self.final_file)
def analyse(self, df: pd.DataFrame) -> pd.DataFrame:
""" Method that performs needed logic before actually performing analysis. """
self.df = self.add_indicators(df=df)
self.df = self.process_indicators(df=self.df)
"""
try:
df = self.add_indicators(df=df)
try:
df = self.process_indicators(df=df)
except Exception as e:
logger.exception('Exception occurred while processing indicators in analysis :: pair: ' + str(item.get('pair')) + ' ' + str(item.get('timeframe')), exc_info=e)
except Exception as e:
logger.exception('Exception occurred while adding indicators in analysis :: pair: ' + str(item.get('pair')) + ' ' + str(item.get('timeframe')), exc_info=e)
"""
return self.df
def crossover(
self, df: pd.DataFrame = None, crossing_col: str = '', crossed_col: str = '', new_col: str = '',
) -> pd.DataFrame:
"""Perform crossover logic on passed columns, return the data to the passed new_col."""
previous_crossing_col = df[crossing_col].shift(1)
previous_crossed_col = df[crossed_col].shift(1)
crossing = ((df[crossing_col] > df[crossed_col]) & (previous_crossing_col < previous_crossed_col))
df.loc[crossing, new_col] = True
df[new_col].fillna(value='False', inplace=True)
return df[new_col]
def process_indicators(self, df: pd.DataFrame = None) -> pd.DataFrame:
"""Process the indicators added."""
# EMA Cross
df['ema20_50_cross'] = self.crossover(df=df, crossing_col='ema20', crossed_col='ema50', new_col='ema20_50_cross')
df['ema20_100_cross'] = self.crossover(df=df, crossing_col='ema20', crossed_col='ema100', new_col='ema20_100_cross')
df['ema50_100_cross'] = self.crossover(df=df, crossing_col='ema50', crossed_col='ema100', new_col='ema50_100_cross')
df['ema100_200_cross'] = self.crossover(df=df, crossing_col='ema100', crossed_col='ema200', new_col='ema100_200_cross')
df['ema100_300_cross'] = self.crossover(df=df, crossing_col='ema100', crossed_col='ema300', new_col='ema100_300_cross')
# EMA Cross-under
df['ema50_20_cross'] = self.crossover(df=df, crossing_col='ema50', crossed_col='ema20', new_col='ema50_20_cross')
df['ema100_20_cross'] = self.crossover(df=df, crossing_col='ema100', crossed_col='ema20', new_col='ema100_20_cross')
df['ema100_50_cross'] = self.crossover(df=df, crossing_col='ema100', crossed_col='ema50', new_col='ema100_50_cross')
df['ema200_100_cross'] = self.crossover(df=df, crossing_col='ema200', crossed_col='ema100', new_col='ema200_100_cross')
df['ema300_100_cross'] = self.crossover(df=df, crossing_col='ema300', crossed_col='ema100', new_col='ema300_100_cross')
# Bollinger Bands Crossing
df['touch_upper'] = df.high >= df.upper
df['touch_lower'] = df.low <= df.lower
df['crossing_dn'] = (df.close < df.middle) & (df.open > df.middle)
df['crossing_up'] = (df.close > df.middle) & (df.open < df.middle)
# Medivh Relative Flow Index
df['smrfi_ob'] = df.smrfi > 70
df['smrfi_os'] = df.smrfi < 30
df['mrfi_ob'] = df.mrfi > 75
df['mrfi_os'] = df.mrfi < 25
df['mfi_os'] = df.mfi < 20
df['mfi_ob'] = df.mfi > 80
df['rsi_os'] = df.rsi < 30
df['rsi_ob'] = df.rsi > 70
# Stoch Crossover SMRFI / MRFI
df['slow_stoch_crossover_smrfi'] = self.crossover(df=df, crossing_col='slow_stoch', crossed_col='smrfi', new_col='slow_stoch_crossover_smrfi')
df['slow_stoch_crossover_mrfi'] = self.crossover(df=df, crossing_col='slow_stoch', crossed_col='mrfi', new_col='slow_stoch_crossover_mrfi')
df['slow_stoch14_crossover_smrfi'] = self.crossover(df=df, crossing_col='slow_stoch_sma14', crossed_col='smrfi', new_col='slow_stoch14_crossover_smrfi')
df['slow_stoch14_crossover_mrfi'] = self.crossover(df=df, crossing_col='slow_stoch_sma14', crossed_col='mrfi', new_col='slow_stoch14_crossover_mrfi')
df['slow_stoch26_crossover_smrfi'] = self.crossover(df=df, crossing_col='slow_stoch_sma26', crossed_col='smrfi', new_col='slow_stoch26_crossover_smrfi')
df['slow_stoch26_crossover_mrfi'] = self.crossover(df=df, crossing_col='slow_stoch_sma26', crossed_col='mrfi', new_col='slow_stoch26_crossover_mrfi')
# Stoch Crossunder SMRFI / MRFI
df['slow_stoch_crossunder_smrfi'] = self.crossover(df=df, crossing_col='smrfi', crossed_col='slow_stoch', new_col='slow_stoch_crossunder_smrfi')
df['slow_stoch_crossunder_mrfi'] = self.crossover(df=df, crossing_col='mrfi', crossed_col='slow_stoch', new_col='slow_stoch_crossunder_mrfi')
df['slow_stoch14_crossunder_smrfi'] = self.crossover(df=df, crossing_col='smrfi', crossed_col='slow_stoch_sma14', new_col='slow_stoch14_crossunder_smrfi')
df['slow_stoch14_crossunder_mrfi'] = self.crossover(df=df, crossing_col='mrfi', crossed_col='slow_stoch_sma14', new_col='slow_stoch14_crossunder_mrfi')
df['slow_stoch26_crossunder_smrfi'] = self.crossover(df=df, crossing_col='smrfi', crossed_col='slow_stoch_sma26', new_col='slow_stoch26_crossunder_smrfi')
df['slow_stoch26_crossunder_mrfi'] = self.crossover(df=df, crossing_col='mrfi', crossed_col='slow_stoch_sma26', new_col='slow_stoch26_crossunder_mrfi')
return df
def add_indicators(self, df: pd.DataFrame = None) -> pd.DataFrame:
"""Add indicators."""
cols = ['high', 'low', 'open', 'close', 'volume']
HLOCV = {key: df[key].values for key in df if key in cols}
try:
df['volume'] = df['volumeto']
except:
pass
# Moving Averages
df['sma'] = abstract_ta.SMA(df, timeperiod=25)
df['ema20'] = abstract_ta.EMA(df, timeperiod=20)
df['ema50'] = abstract_ta.EMA(df, timeperiod=50)
df['ema100'] = abstract_ta.EMA(df, timeperiod=100)
df['ema200'] = abstract_ta.EMA(df, timeperiod=200)
df['ema300'] = abstract_ta.EMA(df, timeperiod=300)
# Bollinger Bands
u, m, l = abstract_ta.BBANDS(HLOCV, timeperiod=24, nbdevup=2.5, nbdevdn=2.5, matype=MA_Type.T3)
df['upper'] = u
df['middle'] = m
df['lower'] = l
# Stochastic
# uses high, low, close (default)
slowk, slowd = abstract_ta.STOCH(HLOCV, 5, 3, 0, 3, 0) # uses high, low, close by default
df['slowk'] = slowk
df['slowd'] = slowd
df['slow_stoch'] = (slowk + slowd)/2
df['slow_stoch_sma14'] = df.slow_stoch.rolling(window=14).mean()
df['slow_stoch_sma26'] = df.slow_stoch.rolling(window=26).mean()
# Relative Strength Index
rsi = abstract_ta.RSI(df, timeperiod=14)
df['rsi'] = rsi
# Money Flow Index
mfi = abstract_ta.MFI(df, timeperiod=14)
df['mfi'] = mfi
# Medivh Relative Flow Index
mrfi_df = MRFI(df)
df['mrfi'] = mrfi_df['mrfi'].astype(float)
df['smrfi'] = mrfi_df['smrfi'].astype(float)
df['mrfi_basis'] = mrfi_df['mrfi_basis'].astype(float)
df['mrfi_inverse'] = mrfi_df['mrfi_inverse'].astype(float)
return df
| 45.543379
| 175
| 0.64237
|
4a04448e8049fb6d6b10300165ace61a96ce682f
| 7,274
|
py
|
Python
|
PolygonLineTools/Scripts/split_by_sector.py
|
Dan-Patterson/Tools_for_ArcGIS_Pro
|
b5c253d59d57bd1abe7e2433a77aed7d3ea22567
|
[
"Info-ZIP"
] | 23
|
2020-05-15T18:40:25.000Z
|
2022-03-31T08:44:39.000Z
|
PolygonLineTools/Scripts/split_by_sector.py
|
Dan-Patterson/Tools_for_ArcGIS_Pro
|
b5c253d59d57bd1abe7e2433a77aed7d3ea22567
|
[
"Info-ZIP"
] | 1
|
2021-12-14T16:47:00.000Z
|
2021-12-15T03:06:26.000Z
|
PolygonLineTools/Scripts/split_by_sector.py
|
Dan-Patterson/Tools_for_ArcGIS_Pro
|
b5c253d59d57bd1abe7e2433a77aed7d3ea22567
|
[
"Info-ZIP"
] | 3
|
2021-08-09T05:42:19.000Z
|
2022-03-31T08:44:59.000Z
|
# -*- coding: UTF-8 -*-
"""
split_by_sector
===============
Script : split_by_sector.py
Author : Dan.Patterson@carleton.ca
Modified : 2018-08-30
Purpose : tools for working with numpy arrays
Source :
References:
----------
`<https://stackoverflow.com/questions/3252194/numpy-and-line-intersections>`_.
`<https://community.esri.com/message/627051?commentID=627051#comment-627051>`
`<https://community.esri.com/message/779043-re-how-to-divide-irregular-
polygon-into-equal-areas-using-arcgis-105?commentID=779043#comment-779043>`
This is a good one
`<https://tereshenkov.wordpress.com/2017/09/10/dividing-a-polygon-into-a-given
-number-of-equal-areas-with-arcpy/>`
"""
# ---- imports, formats, constants ----
import sys
import numpy as np
from arcpytools_plt import(tweet, fc_info, get_polys)
import arcpy
ft={'bool': lambda x: repr(x.astype('int32')),
'float': '{: 0.3f}'.format}
np.set_printoptions(edgeitems=10, linewidth=80, precision=2,
suppress=True, threshold=100,
formatter=ft)
script = sys.argv[0]
__all__ = ["plot_",
"to_polygon",
"cal_sect",
"sectors",
"process"
]
#---- functions ----
def plot_(pnts):
"""plot a circle, arc sector etc
"""
import matplotlib.pyplot as plt
import matplotlib
from matplotlib.patches import Polygon
from matplotlib.collections import PatchCollection
#x_min = pnts[:,0].min()
#x_max = pnts[:,0].max()
#y_min = pnts[:,1].min()
#y_max = pnts[:,1].max()
fig, ax = plt.subplots()
patches = []
# Points need to form a closed loopset closed to True if your 1st and
# last pnt aren't equal.
for i in pnts:
polygon = Polygon(i, closed=False)
patches.append(polygon)
p = PatchCollection(patches, cmap=matplotlib.cm.jet, alpha=1.0)
colors = 100*np.random.rand(len(patches))
p.set_array(np.array(colors))
#ax.set_xlim(x_min-0.5, x_max+0.5) # (x_min, x_max)
#ax.set_ylim(y_min-0.5, y_max+0.5) # y_min, y_max)
ax.add_collection(p)
plt.axis('equal')
plt.show()
# plt.close()
#return fig, ax
def to_polygon(pnts):
"""create polygons from list or array pairs. pass [pnts] if you are only
creating one polygon. multiple polygons will already be represented as a
list of lists of points.
In short, it expects a 3d array or its list equivalent
"""
polygons = []
for pair in pnts:
pl = arcpy.Polygon(arcpy.Array([arcpy.Point(*xy) for xy in pair]))
polygons.append(pl)
return polygons
def cal_sect(poly, cutters, pnts, factor):
"""Calculate the areas
"""
# ---- have to intersect here
cuts = [cutters[i].intersect(poly, 4) for i in range(len(cutters))]
tot_area = poly.area
fract_areas = np.array([c.area/tot_area for c in cuts])
c_sum = np.cumsum(fract_areas)
f_list = np.linspace(0.0, 1.0, factor, endpoint=False)
idxs = [np.argwhere(c_sum <= i)[-1][0] for i in f_list[1:]]
splits = np.split(cuts, idxs, axis=0)
return idxs, splits
def sectors(radius=100, theta=0.5, xc=0.0, yc=0.0):
"""Create sectors radiating out from the geometry center. A circle is
created and the geometry is parsed adding the center point to each set
of points to form a polygon. The first and last point can be duplicated
if needed.
"""
def xy(x_s, y_s):
z = np.zeros((len(x_s), 2), 'float')
z[:, 0] = x_s
z[:, 1] = y_s
return z
# ---- Make a circle first ----
angles = np.deg2rad(np.arange(180.0, -180.0-theta, step=-theta))
x_s = radius*np.cos(angles) # X values
y_s = radius*np.sin(angles) # Y values
pnts = xy(x_s, y_s)
# ----
fr = pnts[:-1]
too = pnts[1:]
cent = np.array([[xc, yc]])
z = np.array([[0., 0.]])
zs = z.repeat(len(fr), axis=0)
sect = np.array(list(zip(zs, fr, too))) + cent
pnts = pnts + cent
return sect, pnts
def process(in_polys):
"""Process the splits
Parameters:
-----------
in_fc: text
input featureclass
out_fc: text
output featureclass
s_fac: integer
split factor
Requires:
---------
sectors, to_polygon, cal_sect
Notes:
------
You can fine-tune the analysis by changing the theta value from 1.0 to a
smaller value 360 circle sectors result when theta = 1, 720 when it equal
0.5. Processing time changes minimally on a per polygon basis.
"""
result_ = []
for i in range(len(in_polys)):
poly = in_polys[i]
ext = max(poly.extent.width, poly.extent.height)
xc, yc = cent = [poly.centroid.X, poly.centroid.Y]
sect, pnts = sectors(radius=ext, theta=0.5, xc=xc, yc=yc) # theta=1
cutters = to_polygon(sect)
idxs, splits = cal_sect(poly, cutters, pnts, s_fac)
ps = np.split(pnts, np.array(idxs)+1)
new_polys = [np.vstack((cent, ps[i-1], ps[i][0], cent))
for i in range(0, len(ps))]
r = to_polygon(new_polys)
rs = [i.intersect(poly, 4) for i in r]
#p = arcpy.Polygon(r[0])
result_.extend(rs)
return result_
# ---- demo and tool section -------------------------------------------------
# large Canada Can_0_sp_lcc
if len(sys.argv) == 1:
testing = True
in_pth = script.split("/")[:-2] + ["Polygon_lineTools.gdb"]
in_fc = "/".join(in_pth) + "/shapes_mtm9" # "/Big" #
out_fc = "/".join(in_pth) + "/s1"
s_fac = 4
else:
testing = False
in_fc = sys.argv[1]
out_fc = sys.argv[2]
s_fac = int(sys.argv[3])
# ---- for both
#
shp_fld, oid_fld, shp_type, SR = fc_info(in_fc)
# ---- instant bail if not projected
if SR.type == 'Projected':
in_polys, out_ids = get_polys(in_fc)
out_polys = process(in_polys)
if not testing:
if arcpy.Exists(out_fc):
arcpy.Delete_management(out_fc)
arcpy.CopyFeatures_management(out_polys, out_fc)
out_ids = np.repeat(out_ids, s_fac)
id_fld = np.zeros((len(out_polys),),
dtype=[("key", "<i4"), ("Old_ID", "<i4")])
id_fld["key"] = np.arange(1, len(out_polys) + 1)
id_fld["Old_ID"] = out_ids
arcpy.da.ExtendTable(out_fc, oid_fld, id_fld, "key")
else:
msg = """
-----------------------------------------------------------------
Input data is not in a projected coordinate system....
bailing...
-----------------------------------------------------------------
"""
tweet(msg)
#----------------------
if __name__=="__main__":
"""Uncomment what you want to see"""
#print("Script... {}".format(script))
#circ_pnts = _circle(radius=1, theta=30, xc=5, yc=5)
#print("\ncircle points...\n{}".format(circ_pnts))
#arc_pnts = _arc(radius=10, start=0, stop=90.5, step=5, xc=0.0, yc=0.0)
#print("\narc points...\n{}".format(arc_pnts))
#pnts = arc_sector()
#pnts = buffer_ring()
#multi_sector_demo()
#multiring_buffer_demo()
| 31.626087
| 79
| 0.569425
|
4a0444f56c1f77b529aa07e01eff517d53a83b93
| 4,951
|
py
|
Python
|
pogom/models.py
|
charbec1/pokemapfuntimesyay
|
d8301930c7733041114ca33fe26117d7157d9149
|
[
"MIT"
] | null | null | null |
pogom/models.py
|
charbec1/pokemapfuntimesyay
|
d8301930c7733041114ca33fe26117d7157d9149
|
[
"MIT"
] | null | null | null |
pogom/models.py
|
charbec1/pokemapfuntimesyay
|
d8301930c7733041114ca33fe26117d7157d9149
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
# -*- coding: utf-8 -*-
import logging
from peewee import Model, SqliteDatabase, InsertQuery, IntegerField,\
CharField, FloatField, BooleanField, DateTimeField
from datetime import datetime
from base64 import b64encode
from .utils import get_pokemon_name
db = SqliteDatabase('pogom.db')
log = logging.getLogger(__name__)
class BaseModel(Model):
class Meta:
database = db
class Pokemon(BaseModel):
IGNORE = None
ONLY = None
# We are base64 encoding the ids delivered by the api
# because they are too big for sqlite to handle
encounter_id = CharField(primary_key=True)
spawnpoint_id = CharField()
pokemon_id = IntegerField()
latitude = FloatField()
longitude = FloatField()
disappear_time = DateTimeField()
detect_time = DateTimeField()
@classmethod
def get_active(cls, stamp):
r_stamp = datetime.fromtimestamp(int(stamp)/1e3)
query = (Pokemon
.select()
.where(Pokemon.disappear_time > datetime.now(), Pokemon.detect_time >= r_stamp)
.dicts())
log.info("Get Pokemons for stamp: {}".format(r_stamp))
pokemons = []
for p in query:
p['pokemon_name'] = get_pokemon_name(p['pokemon_id'])
pokemon_name = p['pokemon_name'].lower()
pokemon_id = str(p['pokemon_id'])
if cls.IGNORE:
if pokemon_name in cls.IGNORE or pokemon_id in cls.IGNORE:
continue
if cls.ONLY:
if pokemon_name not in cls.ONLY and pokemon_id not in cls.ONLY:
continue
pokemons.append(p)
return pokemons
class Pokestop(BaseModel):
pokestop_id = CharField(primary_key=True)
enabled = BooleanField()
latitude = FloatField()
longitude = FloatField()
last_modified = DateTimeField()
lure_expiration = DateTimeField(null=True)
class Gym(BaseModel):
UNCONTESTED = 0
TEAM_MYSTIC = 1
TEAM_VALOR = 2
TEAM_INSTINCT = 3
gym_id = CharField(primary_key=True)
team_id = IntegerField()
guard_pokemon_id = IntegerField()
enabled = BooleanField()
latitude = FloatField()
longitude = FloatField()
last_modified = DateTimeField()
def parse_map(map_dict):
pokemons = {}
pokestops = {}
gyms = {}
detect_time = datetime.now()
cells = map_dict['responses']['GET_MAP_OBJECTS']['map_cells']
for cell in cells:
for p in cell.get('wild_pokemons', []):
pokemons[p['encounter_id']] = {
'encounter_id': b64encode(str(p['encounter_id'])),
'spawnpoint_id': p['spawnpoint_id'],
'pokemon_id': p['pokemon_data']['pokemon_id'],
'latitude': p['latitude'],
'longitude': p['longitude'],
'disappear_time': datetime.fromtimestamp(
(p['last_modified_timestamp_ms'] +
p['time_till_hidden_ms']) / 1000.0),
'detect_time': detect_time
}
for f in cell.get('forts', []):
if f.get('type') == 1: # Pokestops
if 'lure_info' in f:
lure_expiration = datetime.fromtimestamp(
f['lure_info']['lure_expires_timestamp_ms'] / 1000.0)
else:
lure_expiration = None
pokestops[f['id']] = {
'pokestop_id': f['id'],
'enabled': f['enabled'],
'latitude': f['latitude'],
'longitude': f['longitude'],
'last_modified': datetime.fromtimestamp(
f['last_modified_timestamp_ms'] / 1000.0),
'lure_expiration': lure_expiration
}
else: # Currently, there are only stops and gyms
gyms[f['id']] = {
'gym_id': f['id'],
'team_id': f['owned_by_team'],
'guard_pokemon_id': f['guard_pokemon_id'],
'enabled': f['enabled'],
'latitude': f['latitude'],
'longitude': f['longitude'],
'last_modified': datetime.fromtimestamp(
f['last_modified_timestamp_ms'] / 1000.0),
}
if pokemons:
log.info("Upserting {} pokemon".format(len(pokemons)))
InsertQuery(Pokemon, rows=pokemons.values()).upsert().execute()
#if pokestops:
# log.info("Upserting {} pokestops".format(len(pokestops)))
# InsertQuery(Pokestop, rows=pokestops.values()).upsert().execute()
if gyms:
log.info("Upserting {} gyms".format(len(gyms)))
InsertQuery(Gym, rows=gyms.values()).upsert().execute()
def create_tables():
db.connect()
db.create_tables([Pokemon, Pokestop, Gym], safe=True)
db.close()
| 32.572368
| 96
| 0.561907
|
4a0445052b1ded1ea39bbd737a8822710f41140c
| 2,525
|
py
|
Python
|
bikeshed/shorthands/section.py
|
svgeesus/bikeshed
|
d994f2176e9af0cdea0bf01899356b5c50cdedca
|
[
"CC0-1.0"
] | null | null | null |
bikeshed/shorthands/section.py
|
svgeesus/bikeshed
|
d994f2176e9af0cdea0bf01899356b5c50cdedca
|
[
"CC0-1.0"
] | null | null | null |
bikeshed/shorthands/section.py
|
svgeesus/bikeshed
|
d994f2176e9af0cdea0bf01899356b5c50cdedca
|
[
"CC0-1.0"
] | null | null | null |
import re
from ..h import E, outerHTML
from . import steps
class SectionShorthand:
def __init__(self):
self.stage = "start"
self.escapedText = None
self.linkText = []
self.bsAutolink = ""
def respond(self, match, dom=None):
if self.stage == "start":
return self.respondStart(match)
elif self.stage == "link text":
return self.respondLinkText(match, dom)
elif self.stage == "end":
return self.respondEnd()
def respondStart(self, match):
self.bsAutolink = match.group(0)
escape, self.spec, self.section, self.justPage, hasLinkText = match.groups()
if escape:
self.escapedText = match.group(0)[1:]
if hasLinkText:
self.stage = "link text"
return steps.NextBody(endRe)
else:
self.stage = "end"
return steps.NextLiteral(endRe)
def respondLinkText(self, match, dom):
self.linkText = dom
self.bsAutolink += outerHTML(dom)
return self.respondEnd()
def respondEnd(self):
if self.escapedText:
return steps.Success(
skips=["["], nodes=[self.escapedText[1:], *self.linkText, "]]"]
)
self.bsAutolink += "]]"
if not self.linkText:
self.linkText = "" # will get filled in by a later step
if self.spec is None:
# local section link
attrs = {
"section": "",
"href": self.section,
"bs-autolink-syntax": self.bsAutolink,
}
return steps.Success(E.a(attrs, self.linkText))
elif self.justPage is not None:
# foreign link, to an actual page from a multipage spec
attrs = {
"spec-section": self.justPage + "#",
"spec": self.spec,
"bs-autolink-syntax": self.bsAutolink,
}
return steps.Success(E.span(attrs, self.linkText))
else:
# foreign link
attrs = {
"spec-section": self.section,
"spec": self.spec,
"bs-autolink-syntax": self.bsAutolink,
}
return steps.Success(E.span(attrs, self.linkText))
SectionShorthand.startRe = re.compile(
r"""
(\\)?
\[\[
([\w.+-]+)?
(?:
((?:\/[\w.+-]*)?(?:\#[\w.+-]+)) |
(\/[\w.+-]+)
)
(\|)?
""",
re.X,
)
endRe = re.compile(r"]]")
| 27.747253
| 84
| 0.503762
|
4a04454d0a5d8d79c54fa54896b04e467e485546
| 1,142
|
py
|
Python
|
RTK/scripts/alvinxy/gpstoxy.py
|
ScarecrowStraw/JunFlyBot
|
0bdd27fdcfff3972941538730f390ce2a27595d8
|
[
"MIT"
] | null | null | null |
RTK/scripts/alvinxy/gpstoxy.py
|
ScarecrowStraw/JunFlyBot
|
0bdd27fdcfff3972941538730f390ce2a27595d8
|
[
"MIT"
] | null | null | null |
RTK/scripts/alvinxy/gpstoxy.py
|
ScarecrowStraw/JunFlyBot
|
0bdd27fdcfff3972941538730f390ce2a27595d8
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import rospy
from std_msgs.msg import String
from data_collection.msg import rtk
from geometry_msgs.msg import Point
import alvinxy
reload(alvinxy)
# Specify an origin
origin = [21.0454563, 105.7859686]
def callback(data):
rospy.loginfo(rospy.get_caller_id() + 'I heard %s %s %s', str(data.latitude), str(data.longitude), str(data.altitude))
xx,yy = alvinxy.ll2xy(data.latitude,data.longitude,origin[0],origin[1])
pub = rospy.Publisher('gps_to_point/', Point, queue_size=10)
msg = Point()
msg.x = xx
msg.y = yy
msg.z = data.altitude
pub.publish(msg)
def listener():
# In ROS, nodes are uniquely named. If two nodes with the same
# name are launched, the previous one is kicked off. The
# anonymous=True flag means that rospy will choose a unique
# name for our 'listener' node so that multiple listeners can
# run simultaneously.
rospy.init_node('converter', anonymous=True)
rospy.Subscriber('ublox_rtk', rtk, callback)
# spin() simply keeps python from exiting until this node is stopped
rospy.spin()
if __name__ == '__main__':
listener()
| 29.282051
| 122
| 0.704904
|
4a0445f8b36bb42e908d982d7de9cbd128d8631d
| 14,783
|
py
|
Python
|
airmozilla/manage/tests/views/test_eventtweets.py
|
mozilla/airmozilla
|
fa6acbbbacc1e22553457807bcea7ce7a9ef6fe3
|
[
"BSD-3-Clause"
] | 115
|
2015-01-06T18:45:39.000Z
|
2022-02-07T10:56:49.000Z
|
airmozilla/manage/tests/views/test_eventtweets.py
|
april/airmozilla
|
ee357f5396cdcb50147c72ff1e81a610f9cb292c
|
[
"BSD-3-Clause"
] | 321
|
2015-01-02T15:19:25.000Z
|
2018-07-05T14:58:50.000Z
|
airmozilla/manage/tests/views/test_eventtweets.py
|
april/airmozilla
|
ee357f5396cdcb50147c72ff1e81a610f9cb292c
|
[
"BSD-3-Clause"
] | 101
|
2015-01-13T17:59:15.000Z
|
2020-12-15T02:58:38.000Z
|
import datetime
import json
from nose.tools import eq_, ok_
import mock
from django.conf import settings
from django.contrib.auth.models import Group
from django.utils import timezone
from django.core.urlresolvers import reverse
from airmozilla.main.models import (
Event,
EventTweet,
Location,
Approval
)
from .base import ManageTestCase
from airmozilla.base.tests.test_utils import Response
class TestEventTweets(ManageTestCase):
event_base_data = {
'status': Event.STATUS_SCHEDULED,
'description': '...',
'privacy': 'public',
'location': '1',
'channels': '1',
'tags': 'xxx',
'template': '1',
'start_time': '2012-3-4 12:00',
'estimated_duration': '3600',
'timezone': 'US/Pacific'
}
placeholder = 'airmozilla/manage/tests/firefox.png'
@mock.patch('requests.get')
def test_prepare_new_tweet(self, rget):
def mocked_read(url, params):
assert url == settings.BITLY_URL
return Response({
u'status_code': 200,
u'data': {
u'url': u'http://mzl.la/1adh2wT',
u'hash': u'1adh2wT',
u'global_hash': u'1adh2wU',
u'long_url': u'https://air.mozilla.org/it-buildout/',
u'new_hash': 0
},
u'status_txt': u'OK'
})
rget.side_effect = mocked_read
event = Event.objects.get(title='Test event')
# the event must have a real placeholder image
with open(self.placeholder) as fp:
response = self.client.post(
reverse('manage:event_edit', args=(event.pk,)),
dict(self.event_base_data,
title=event.title,
short_description="Check out <b>This!</b>",
description="Something longer",
placeholder_img=fp)
)
assert response.status_code == 302, response.status_code
# on the edit page, there should be a link
response = self.client.get(
reverse('manage:event_edit', args=(event.pk,))
)
assert response.status_code == 200
url = reverse('manage:new_event_tweet', args=(event.pk,))
ok_(url in response.content)
response = self.client.get(url)
eq_(response.status_code, 200)
textarea = (
response.content
.split('<textarea')[1]
.split('>')[1]
.split('</textarea')[0]
)
ok_(textarea.strip().startswith('Check out This!'))
event = Event.objects.get(pk=event.pk)
event_url = 'http://testserver'
event_url += reverse('main:event', args=(event.slug,))
ok_('http://mzl.la/1adh2wT' in textarea)
ok_(event_url not in textarea)
# Sometimes, due to...
# https://bugzilla.mozilla.org/show_bug.cgi?id=1167211
# the session is cleared out here in this test, so we
# really make sure we're signed in
assert self.client.login(username='fake', password='fake')
assert self.client.session.items()
# load the form
response = self.client.get(url)
eq_(response.status_code, 200)
# try to submit it with longer than 140 characters
response = self.client.post(url, {
'text': 'x' * 141,
'include_placeholder': True,
})
eq_(response.status_code, 200)
assert not EventTweet.objects.all().count()
ok_('it has 141' in response.content)
# try again
response = self.client.post(url, {
'text': 'Bla bla #tag',
'include_placeholder': True,
})
eq_(response.status_code, 302)
ok_(EventTweet.objects.all().count())
now = timezone.now()
event_tweet, = EventTweet.objects.all()
# To avoid being unlucky about the second ticking over
# just before we compare these, make it OK to be up to 2 seconds
# apart.
diff = abs(event_tweet.send_date - now)
ok_(diff < datetime.timedelta(seconds=2))
ok_(not event_tweet.sent_date)
ok_(not event_tweet.error)
ok_(not event_tweet.tweet_id)
@mock.patch('requests.get')
def test_prepare_new_tweet_on_future_event(self, rget):
def mocked_read(url, params):
assert url == settings.BITLY_URL
return Response({
u'status_code': 200,
u'data': {
u'url': u'http://mzl.la/1adh2wT',
u'hash': u'1adh2wT',
u'global_hash': u'1adh2wU',
u'long_url': u'https://air.mozilla.org/it-buildout/',
u'new_hash': 0
},
u'status_txt': u'OK'
})
rget.side_effect = mocked_read
event = Event.objects.get(title='Test event')
event.start_time = timezone.now() + datetime.timedelta(days=10)
event.save()
assert event.is_scheduled()
assert event.location
assert event.location.timezone
# on the edit page, there should be a link
url = reverse('manage:new_event_tweet', args=(event.pk,))
response = self.client.get(url)
eq_(response.status_code, 200)
help_text_part = 'This event starts %s' % (
event.location_time.strftime('%Y-%m-%d %H:%M')
)
ok_(help_text_part in response.content)
def test_edit_event_tweet(self):
event = Event.objects.get(title='Test event')
assert event.location and event.location.timezone == 'US/Pacific'
tomorrow = timezone.now() + datetime.timedelta(days=1)
tweet = EventTweet.objects.create(
event=event,
text='Something something',
creator=self.user,
include_placeholder=True,
send_date=tomorrow,
)
url = reverse('manage:edit_event_tweet', args=(event.id, tweet.id))
response = self.client.get(url)
eq_(response.status_code, 200)
ok_('Something something' in response.content)
# tz = pytz.timezone(event.location.timezone)
data = {
'text': 'Different Bla ',
'include_placeholder': True,
'send_date': tweet.send_date.strftime('%Y-%m-%d %H:%M'),
}
response = self.client.post(url, data)
eq_(response.status_code, 302)
tweet = EventTweet.objects.get(id=tweet.id)
eq_(tweet.text, 'Different Bla')
# because we round but they won't be equal, but close
ok_(abs(tomorrow - tweet.send_date) <= datetime.timedelta(hours=1))
def test_event_tweets_empty(self):
event = Event.objects.get(title='Test event')
url = reverse('manage:event_tweets', args=(event.pk,))
response = self.client.get(url)
eq_(response.status_code, 200)
def test_event_tweets_states(self):
event = Event.objects.get(title='Test event')
assert event in Event.objects.approved()
group = Group.objects.create(name='testapprover')
Approval.objects.create(
event=event,
group=group,
)
assert event not in Event.objects.approved()
url = reverse('manage:event_tweets', args=(event.pk,))
response = self.client.get(url)
eq_(response.status_code, 200)
tweet = EventTweet.objects.create(
event=event,
text='Bla bla',
send_date=timezone.now(),
)
response = self.client.get(url)
eq_(response.status_code, 200)
ok_('Bla bla' in response.content)
ok_('Needs to be approved first' in response.content)
from airmozilla.main.templatetags.jinja_helpers import js_date
ok_(
js_date(tweet.send_date.replace(microsecond=0))
not in response.content
)
# also check that 'Bla bla' is shown on the Edit Event page
edit_url = reverse('manage:event_edit', args=(event.pk,))
response = self.client.get(edit_url)
eq_(response.status_code, 200)
ok_('Bla bla' in response.content)
tweet.tweet_id = '1234567890'
tweet.sent_date = (
timezone.now() -
datetime.timedelta(days=1)
)
tweet.save()
response = self.client.get(url)
eq_(response.status_code, 200)
ok_('Bla bla' in response.content)
ok_(
'https://twitter.com/%s/status/1234567890'
% settings.TWITTER_USERNAME
in response.content
)
ok_(
js_date(tweet.sent_date.replace(microsecond=0))
in response.content
)
tweet.tweet_id = None
tweet.error = "Some error"
tweet.save()
response = self.client.get(url)
eq_(response.status_code, 200)
ok_('Bla bla' in response.content)
ok_(
'https://twitter.com/%s/status/1234567890'
% settings.TWITTER_USERNAME
not in response.content
)
ok_(
js_date(tweet.sent_date.replace(microsecond=0))
in response.content
)
ok_('Failed to send' in response.content)
def test_all_event_tweets_states(self):
event = Event.objects.get(title='Test event')
assert event in Event.objects.approved()
group = Group.objects.create(name='testapprover')
Approval.objects.create(
event=event,
group=group,
)
assert event not in Event.objects.approved()
url = reverse('manage:all_event_tweets_data')
response = self.client.get(url)
eq_(response.status_code, 200)
tweet = EventTweet.objects.create(
event=event,
text='Bla bla',
send_date=timezone.now(),
)
response = self.client.get(url)
eq_(response.status_code, 200)
data = json.loads(response.content)
first_tweet, = data['tweets']
eq_(first_tweet['text'], 'Bla bla')
ok_(first_tweet['event']['_needs_approval'])
# also check that 'Bla bla' is shown on the Edit Event page
edit_url = reverse('manage:event_edit', args=(event.pk,))
response = self.client.get(edit_url)
eq_(response.status_code, 200)
ok_('Bla bla' in response.content)
tweet.tweet_id = '1234567890'
tweet.sent_date = timezone.now() - datetime.timedelta(days=1)
tweet.save()
response = self.client.get(url)
eq_(response.status_code, 200)
data = json.loads(response.content)
first_tweet, = data['tweets']
tweet_url = (
'https://twitter.com/%s/status/1234567890'
% settings.TWITTER_USERNAME
)
eq_(first_tweet['full_tweet_url'], tweet_url)
tweet.tweet_id = None
tweet.error = "Some error"
tweet.save()
response = self.client.get(url)
eq_(response.status_code, 200)
data = json.loads(response.content)
first_tweet, = data['tweets']
ok_('full_tweet_url' not in first_tweet)
ok_('creator' not in first_tweet)
assert self.user.email
tweet.creator = self.user
tweet.save()
response = self.client.get(url)
eq_(response.status_code, 200)
data = json.loads(response.content)
first_tweet, = data['tweets']
eq_(first_tweet['creator'], {'email': self.user.email})
@mock.patch('airmozilla.manage.views.events.send_tweet')
def test_force_send_now(self, mocked_send_tweet):
event = Event.objects.get(title='Test event')
tweet = EventTweet.objects.create(
event=event,
text='Bla bla',
send_date=timezone.now(),
)
def mock_send_tweet(event_tweet):
event_tweet.tweet_id = '1234567890'
event_tweet.save()
mocked_send_tweet.side_effect = mock_send_tweet
url = reverse('manage:event_tweets', args=(event.pk,))
response = self.client.post(url, {
'send': tweet.pk,
})
eq_(response.status_code, 302)
tweet = EventTweet.objects.get(pk=tweet.pk)
eq_(tweet.tweet_id, '1234567890')
def test_view_tweet_error(self):
event = Event.objects.get(title='Test event')
tweet = EventTweet.objects.create(
event=event,
text='Bla bla',
send_date=timezone.now(),
error='Crap!'
)
url = reverse('manage:event_tweets', args=(event.pk,))
response = self.client.post(url, {
'error': tweet.pk,
})
eq_(response.status_code, 200)
eq_(response['content-type'], 'text/plain')
ok_('Crap!' in response.content)
def test_cancel_event_tweet(self):
event = Event.objects.get(title='Test event')
tweet = EventTweet.objects.create(
event=event,
text='Bla bla',
send_date=timezone.now(),
)
url = reverse('manage:event_tweets', args=(event.pk,))
response = self.client.post(url, {
'cancel': tweet.pk,
})
eq_(response.status_code, 302)
ok_(not EventTweet.objects.all().count())
def test_create_event_tweet_with_location_timezone(self):
event = Event.objects.get(title='Test event')
event.location = Location.objects.create(
name='Paris',
timezone='Europe/Paris'
)
event.save()
# the event must have a real placeholder image
with open(self.placeholder) as fp:
response = self.client.post(
reverse('manage:event_edit', args=(event.pk,)),
dict(self.event_base_data,
title=event.title,
short_description="Check out <b>This!</b>",
description="Something longer",
placeholder_img=fp)
)
assert response.status_code == 302, response.status_code
url = reverse('manage:new_event_tweet', args=(event.pk,))
now = datetime.datetime.utcnow()
response = self.client.post(url, {
'text': 'Bla bla #tag',
'include_placeholder': True,
'send_date': now.strftime('%Y-%m-%d 12:00'),
})
eq_(response.status_code, 302)
event_tweet, = EventTweet.objects.all()
# we specified it as noon in Paris, but the save time
# will be UTC
ok_(event_tweet.send_date.hour != 12)
assert event_tweet.send_date.strftime('%Z') == 'UTC'
| 34.783529
| 75
| 0.575797
|
4a044707b5aab87dfb243a87b64f27e4829c1a61
| 1,904
|
py
|
Python
|
worldmap/utils/zip_extract.py
|
jvzsec/worldmap
|
437364e3814e6775d3b25da64afb583a4d6dbd6c
|
[
"MIT"
] | 1
|
2022-02-18T00:13:30.000Z
|
2022-02-18T00:13:30.000Z
|
worldmap/utils/zip_extract.py
|
jvzsec/worldmap
|
437364e3814e6775d3b25da64afb583a4d6dbd6c
|
[
"MIT"
] | 3
|
2020-03-29T22:24:39.000Z
|
2022-03-12T11:03:24.000Z
|
worldmap/utils/zip_extract.py
|
jvzsec/worldmap
|
437364e3814e6775d3b25da64afb583a4d6dbd6c
|
[
"MIT"
] | 3
|
2020-03-22T14:49:52.000Z
|
2021-04-28T13:27:05.000Z
|
""" This function extracts the content of a zipfile into a tmp directory
A= zip_extract(path_of_file, <optional>)
INPUT:
path_of_file: String, e.g.,
'./my_directory/deeper/myfile.zip'
OPTIONAL
verbose: Boolean [True,False]
False: No (default)
True: Yes
OUTPUT
output
DESCRIPTION
Extracts the content of a zipfile into a tmp directory
A = extract('./mydir/files.zip')
"""
#--------------------------------------------------------------------------
# Version : 1.0
# Author : E.Taskesen
# Contact : erdogant@gmail.com
#--------------------------------------------------------------------------
#from matplotlib.pyplot import plot
import os
import zipfile
#%%
def zip_extract(path_of_file, unpack=True, verbose=3):
# DECLARATIONS
out = dict()
config = dict()
config['verbose'] = verbose
config['unpack'] = unpack
# Setting up tempdirectory to unzip files
[pathname, filenameRAW]=os.path.split(path_of_file)
filename = filenameRAW[0:filenameRAW.find('.')]
# pathname = pathname+'/tmp/'
# Make tempdirectory
if not os.path.isdir(pathname):
os.mkdir(pathname)
if config['verbose']>=3: print('[EXTRACT FILES] Directory is created: %s' %pathname)
else:
if config['verbose']>=3: print('[EXTRACT FILES] Directory already exists and will be used: %s' %pathname)
# Extracting files
if config['unpack']:
if config['verbose']>=3: print('[EXTRACT FILES] Extracting %s..' %(filenameRAW))
zip_ref = zipfile.ZipFile(path_of_file, 'r')
zip_ref.extractall(pathname)
zip_ref.close()
# Return info
out['dir']=pathname
out['file']=filenameRAW
out['file_clean']=filename
out['path']=path_of_file
if config['verbose']>=3: print('[EXTRACT FILES] Done!')
return(out)
| 27.594203
| 113
| 0.578256
|
4a0447e2337b554ed6c6cc3472497ba114121ec8
| 967
|
py
|
Python
|
src/olympia/translations/tests/test_widgets.py
|
Osmose/olympia
|
774c3b927ec05ef971e4206e2669b4291b8b4f17
|
[
"BSD-3-Clause"
] | 1
|
2020-04-07T07:21:25.000Z
|
2020-04-07T07:21:25.000Z
|
src/olympia/translations/tests/test_widgets.py
|
Osmose/olympia
|
774c3b927ec05ef971e4206e2669b4291b8b4f17
|
[
"BSD-3-Clause"
] | null | null | null |
src/olympia/translations/tests/test_widgets.py
|
Osmose/olympia
|
774c3b927ec05ef971e4206e2669b4291b8b4f17
|
[
"BSD-3-Clause"
] | 2
|
2018-03-04T00:11:22.000Z
|
2019-12-14T09:45:55.000Z
|
from pyquery import PyQuery as pq
from olympia.amo.tests import TestCase
from olympia.translations import models, widgets
class TestWidget(TestCase):
def test_avoid_purified_translation(self):
# Even if we pass in a LinkifiedTranslation the widget switches to a
# normal Translation before rendering.
w = widgets.TransTextarea.widget()
link = models.LinkifiedTranslation(localized_string='<b>yum yum</b>',
locale='fr', id=10)
link.clean()
widget = w.render('name', link)
assert pq(widget).html().strip() == '<b>yum yum</b>'
def test_default_locale(self):
w = widgets.TransTextarea()
result = w.render('name', '')
assert pq(result)('textarea:not([lang=init])').attr('lang') == 'en-us'
w.default_locale = 'pl'
result = w.render('name', '')
assert pq(result)('textarea:not([lang=init])').attr('lang') == 'pl'
| 35.814815
| 78
| 0.611169
|
4a0449ab42da70dcfbe3481b4f0dd2641d60d8a8
| 4,066
|
py
|
Python
|
tutorials/plot_brainstorm_phantom_elekta.py
|
britta-wstnr/mne-python
|
b69afd1ff3337ac84f219b26c53537a5c8ceb1b9
|
[
"BSD-3-Clause"
] | null | null | null |
tutorials/plot_brainstorm_phantom_elekta.py
|
britta-wstnr/mne-python
|
b69afd1ff3337ac84f219b26c53537a5c8ceb1b9
|
[
"BSD-3-Clause"
] | 1
|
2020-10-29T19:53:46.000Z
|
2020-10-29T19:53:46.000Z
|
tutorials/plot_brainstorm_phantom_elekta.py
|
Sinaxist/mne-python
|
33146156f2660f122ecc04fa0d5b3fd3c34b549e
|
[
"BSD-3-Clause"
] | 1
|
2017-12-05T05:13:56.000Z
|
2017-12-05T05:13:56.000Z
|
# -*- coding: utf-8 -*-
"""
==========================================
Brainstorm Elekta phantom tutorial dataset
==========================================
Here we compute the evoked from raw for the Brainstorm Elekta phantom
tutorial dataset. For comparison, see [1]_ and:
http://neuroimage.usc.edu/brainstorm/Tutorials/PhantomElekta
References
----------
.. [1] Tadel F, Baillet S, Mosher JC, Pantazis D, Leahy RM.
Brainstorm: A User-Friendly Application for MEG/EEG Analysis.
Computational Intelligence and Neuroscience, vol. 2011, Article ID
879716, 13 pages, 2011. doi:10.1155/2011/879716
"""
# Authors: Eric Larson <larson.eric.d@gmail.com>
#
# License: BSD (3-clause)
import os.path as op
import numpy as np
import mne
from mne import find_events, fit_dipole
from mne.datasets.brainstorm import bst_phantom_elekta
from mne.io import read_raw_fif
print(__doc__)
###############################################################################
# The data were collected with an Elekta Neuromag VectorView system at 1000 Hz
# and low-pass filtered at 330 Hz. Here the medium-amplitude (200 nAm) data
# are read to construct instances of :class:`mne.io.Raw`.
data_path = bst_phantom_elekta.data_path()
raw_fname = op.join(data_path, 'kojak_all_200nAm_pp_no_chpi_no_ms_raw.fif')
raw = read_raw_fif(raw_fname)
###############################################################################
# Data channel array consisted of 204 MEG planor gradiometers,
# 102 axial magnetometers, and 3 stimulus channels. Let's get the events
# for the phantom, where each dipole (1-32) gets its own event:
events = find_events(raw, 'STI201')
raw.plot(events=events)
raw.info['bads'] = ['MEG2421']
###############################################################################
# The data have strong line frequency (60 Hz and harmonics) and cHPI coil
# noise (five peaks around 300 Hz). Here we plot only out to 60 seconds
# to save memory:
raw.plot_psd(tmax=60.)
###############################################################################
# Let's use Maxwell filtering to clean the data a bit.
# Ideally we would have the fine calibration and cross-talk information
# for the site of interest, but we don't, so we just do:
raw.fix_mag_coil_types()
raw = mne.preprocessing.maxwell_filter(raw, origin=(0., 0., 0.))
###############################################################################
# We know our phantom produces sinusoidal bursts below 25 Hz, so let's filter.
raw.filter(None, 40., fir_design='firwin')
raw.plot(events=events)
###############################################################################
# Now we epoch our data, average it, and look at the first dipole response.
# The first peak appears around 3 ms. Because we low-passed at 40 Hz,
# we can also decimate our data to save memory.
tmin, tmax = -0.1, 0.1
event_id = list(range(1, 33))
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, baseline=(None, -0.01),
decim=5, preload=True)
epochs['1'].average().plot()
###############################################################################
# Let's do some dipole fits. The phantom is properly modeled by a single-shell
# sphere with origin (0., 0., 0.). We compute covariance, then do the fits.
t_peak = 60e-3 # ~60 MS at largest peak
sphere = mne.make_sphere_model(r0=(0., 0., 0.), head_radius=None)
cov = mne.compute_covariance(epochs, tmax=0)
data = []
for ii in range(1, 33):
evoked = epochs[str(ii)].average().crop(t_peak, t_peak)
data.append(evoked.data[:, 0])
evoked = mne.EvokedArray(np.array(data).T, evoked.info, tmin=0.)
del epochs, raw
dip = fit_dipole(evoked, cov, sphere, n_jobs=1)[0]
###############################################################################
# Now we can compare to the actual locations, taking the difference in mm:
actual_pos = mne.dipole.get_phantom_dipoles()[0]
diffs = 1000 * np.sqrt(np.sum((dip.pos - actual_pos) ** 2, axis=-1))
print('Differences (mm):\n%s' % diffs[:, np.newaxis])
print('μ = %s' % (np.mean(diffs),))
| 38.358491
| 79
| 0.593458
|
4a044a65b116cdf1316c8d998185773ac39ec7d0
| 1,570
|
py
|
Python
|
tips/management/commands/createtips.py
|
Ichinga-Samuel/Python-Tips-API
|
65f5d6bae6fe1f9b45285cbdfd0410190b73ec6d
|
[
"MIT"
] | null | null | null |
tips/management/commands/createtips.py
|
Ichinga-Samuel/Python-Tips-API
|
65f5d6bae6fe1f9b45285cbdfd0410190b73ec6d
|
[
"MIT"
] | null | null | null |
tips/management/commands/createtips.py
|
Ichinga-Samuel/Python-Tips-API
|
65f5d6bae6fe1f9b45285cbdfd0410190b73ec6d
|
[
"MIT"
] | null | null | null |
import re
import csv
from django.core.management.base import BaseCommand, CommandError
from datetime import datetime as dt
from tips.models import Tips, Links, Tags
link = re.compile(r"\bhttp(?:s)?:\S*\b")
tag = re.compile(r"#\b.+?\b")
class Command(BaseCommand):
def add_arguments(self, parser):
pass
# parser.add_argument('path', nargs='+', type=str)
def handle(self, *args, **options):
with open('Daily Python Tips.csv', 'r', newline='', encoding='utf-8') as fh:
data = csv.reader(fh, delimiter=',')
data = (i for i in data)
for d in data:
try:
p = '%m/%d/%Y %H:%M:%S'
ts = dt.strptime(d[0], p)
except (TypeError, ValueError):
p = '%m/%d/%Y %H:%M'
ts = dt.strptime(d[0], p)
try:
tags = re.findall(tag, d[1])
links = re.findall(link, d[1])
tags = [t.strip('#').title() for t in tags]
tagmds = [Tags.objects.update_or_create(name=t, defaults={'name': t})[0] for t in tags]
tipmd = Tips.objects.create(timestamp=ts, tip=d[1], account=d[2].strip('@'), email=d[3])
linkmds = [Links.objects.update_or_create(name=i, tip=tipmd, defaults={'name': i, 'tip': tipmd})[1] for i in links if tipmd]
tipmd.tags.add(*tagmds)
tipmd.save()
except Exception as err:
self.stderr.write(err)
| 40.25641
| 144
| 0.506369
|
4a044b2de3ee7f6d19e778b1405927b3df36ee09
| 2,896
|
py
|
Python
|
parsers/pyBSRP/build/lib/bsrp/protocols/bysykkel.py
|
uky-transport-data-science/BSR_parsers
|
2d3a34b76481b94c74df8ab406340eb62aac28ab
|
[
"Apache-2.0"
] | 4
|
2019-12-27T13:33:33.000Z
|
2022-01-20T14:08:22.000Z
|
parsers/pyBSRP/build/lib/bsrp/protocols/bysykkel.py
|
uky-transport-data-science/BSR_parsers
|
2d3a34b76481b94c74df8ab406340eb62aac28ab
|
[
"Apache-2.0"
] | null | null | null |
parsers/pyBSRP/build/lib/bsrp/protocols/bysykkel.py
|
uky-transport-data-science/BSR_parsers
|
2d3a34b76481b94c74df8ab406340eb62aac28ab
|
[
"Apache-2.0"
] | 1
|
2021-03-16T16:20:40.000Z
|
2021-03-16T16:20:40.000Z
|
# gbfs.py
# Parser: General Bikeshare Feed Specification
import json, re, urllib2, requests
from bsrp import bsrputil
def scrape(df, apikey):
# get the GBFS 'pointer' file that indicates paths to the key files
try:
info_req = requests.get( df['feedurl'] )
info_json = json.loads(info_req.text)
except urllib2.URLError:
print("Couldn't access info feed for " + df['bssid'] + ".")
return False
# Get the station statuses
try:
status_req = requests.get( df['feedurl2'] )
status_json = json.loads(status_req.text)
except urllib2.URLError:
print("Couldn't access station status for " + df['bssid'] + ".")
return False
# Return both parts
return {'information': info_json, 'status': status_json}
def parse(df, data, utc):
# df is a dict with the following keys:
# [u'feedurl', u'feedname', u'bssid', u'format', u'feedurl2', u'keyreq', u'parsername', u'rid']
clean_stations_dict = dict()
# Go through station information
for stn in data['information']['stations']:
clean_stations_dict[stn['id']] = {'stnid': stn['id'], 'lat': stn['center']['latitude'], 'lon': stn['center']['longitude'], 'name': stn['title'].replace('\n','')}
# Go through station status and fill the clean_stations_dict with complementary status info
# Two possible bad outcomes a) No status for station, b) No station for status info
for stn in data['status']['stations']:
# Check if this status station exists in information list
try:
clean_stations_dict[stn['id']]['bikes'] = stn['availability']['bikes']
except KeyError:
#print 'Station ' + str(stn['id']) + ' does not exist in station information data. Dropping it from list.'
continue
clean_stations_dict[stn['id']]['docks'] = stn['availability']['locks']
clean_stations_dict[stn['id']]['active'] = 'yes'
# Check that each station has been filled with some status
for stn in clean_stations_dict.keys():
try:
clean_stations_dict[stn]['active']
except KeyError:
# That's fine we expect some to fail
clean_stations_dict.pop(stn)
# capture clean results in clean_stations_list
# stnid, lat, lng, docks, bikes, spaces, name, active
clean_stations_list = []
for stn in clean_stations_dict:
stn = clean_stations_dict[stn]
clean_stations_list.append([stn['stnid'], stn['lat'], stn['lon'], int(stn['docks']) + int(stn['bikes']), stn['bikes'], stn['docks'], stn['name'], stn['active']])
# check if we have some data
if len(clean_stations_list) == 0:
print(utc + ' ' + df['bssid'] + " Parser did not find any station's data.")
return False
return clean_stations_list
| 40.788732
| 170
| 0.618094
|
4a044c201f91a9b47184c03e6a3e7c25204549bd
| 908
|
py
|
Python
|
Python/Sorting/rooms.py
|
mukulgarg10/DataStruture-and-algroithms-program
|
f772db82cd41bbe8f1ae85eef8c0bdba02d716a2
|
[
"MIT"
] | 1
|
2021-11-09T10:46:57.000Z
|
2021-11-09T10:46:57.000Z
|
Python/Sorting/rooms.py
|
mukulgarg10/DataStruture-and-algroithms-program
|
f772db82cd41bbe8f1ae85eef8c0bdba02d716a2
|
[
"MIT"
] | 2
|
2021-10-07T20:06:45.000Z
|
2021-11-18T16:26:25.000Z
|
Python/Sorting/rooms.py
|
mukulgarg10/DataStruture-and-algroithms-program
|
f772db82cd41bbe8f1ae85eef8c0bdba02d716a2
|
[
"MIT"
] | 1
|
2022-02-25T19:06:08.000Z
|
2022-02-25T19:06:08.000Z
|
"""
Given an array of meeting time intervals consisting of
start and end times [[s1,e1],[s2,e2],...] (si < ei)],
determine if a person could attend all meetings.
Soluion
We will sort by end time of all the meetings
then check if there is a case where one meeting starts before the previous meeting stops.
If such a case exists,
then it is not possible to attend all the meetings.
Otherwise, it is possible to attend all the meetings.
For example,
Given
[[0, 30],[5, 10],[15, 20]]
return false.
[[4,6],[7,10]
return true
"""
def canAttendMeetings(intervals):
intervals.sort(key=lambda a: a[0])
for i in range(len(intervals)-1):
if intervals[i][-1] > intervals[i+1][0]:
return False
return True
lists1 = [[0, 30],[5, 10],[15, 20]]
lists2 = [[4,6],[7,10]]
result1 =canAttendMeetings(lists1)
result2 = canAttendMeetings(lists2)
print(result1)
print(result2)
| 22.7
| 90
| 0.678414
|
4a044d401e11fb7f386b8f382c039031c5464e99
| 66,665
|
py
|
Python
|
mip.py
|
mkiol/MobileIP
|
85eb4262a02ab94a85d45d093564201586c6ffbd
|
[
"MIT"
] | 8
|
2017-01-13T15:58:19.000Z
|
2020-11-27T18:21:44.000Z
|
mip.py
|
mkiol/MobileIP
|
85eb4262a02ab94a85d45d093564201586c6ffbd
|
[
"MIT"
] | null | null | null |
mip.py
|
mkiol/MobileIP
|
85eb4262a02ab94a85d45d093564201586c6ffbd
|
[
"MIT"
] | 3
|
2018-09-10T17:07:46.000Z
|
2020-11-27T18:21:53.000Z
|
# The MIT License (MIT)
#
# Copyright (C) 2016 Michal Kosciesza <michal@mkiol.net>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""Mobile IP implementation.
RFC 5944 implementation of the Mobile IP protocol, Home Agent and
Mobile Node Agent for Linux.
"""
import struct
import socket
import time
import hmac
import logging
import threading
import os
import sys
from ntplib import _to_int as timestamp_to_int
from ntplib import _to_frac as timestamp_to_frac
from ntplib import _to_time as timestamp_to_time
from ntplib import system_to_ntp_time, ntp_to_system_time
from pyroute2 import IPRoute, IPRouteRequest
from netaddr import IPAddress, IPNetwork
INFINITE_LIFETIME = 65535
_ipr = IPRoute()
if not hasattr(socket,'SO_BINDTODEVICE') :
socket.SO_BINDTODEVICE = 25
def get_ifname(address):
"""Search the interface with given IP address and return
tuple: interface name and subnet prefix lenght."""
ifname = None
prefixlen = None
addr_list = _ipr.get_addr(address=address)
if len(addr_list) > 0:
ifname = addr_list[0].get_attr("IFA_LABEL")
prefixlen = addr_list[0]["prefixlen"]
return (ifname, prefixlen)
def get_address(ifname):
"""Search the interface with given name and return
tuple: interface IP address and subnet prefix lenght."""
address = None
prefixlen = None
addr_list = _ipr.get_addr(label=ifname)
if len(addr_list) > 0:
address = addr_list[0].get_attr("IFA_ADDRESS")
prefixlen = addr_list[0]["prefixlen"]
return (address, prefixlen)
def get_interfaces_states(interfaces=None):
"""Return dict with state ("UP" or "DOWN") of network interfaces.
Interface is considered as "UP" if IP address is assigned."""
states = {}
links = _ipr.get_links()
for link in links:
ifname = link.get_attr("IFLA_IFNAME")
if interfaces is None or ifname in interfaces:
ip_list = _ipr.get_addr(family=socket.AF_INET, label=ifname)
if len(ip_list) > 0:
state = "UP"
else:
state = "DOWN"
states[ifname] = state
return states
def _get_default_gw():
"""Return tuple (IP address, interface name, metric) that describes
default gateway configured in the OS."""
dr_list = _ipr.get_default_routes(family=socket.AF_INET)
if len(dr_list) > 0:
ip = dr_list[0].get_attr("RTA_GATEWAY")
oif = dr_list[0].get_attr("RTA_OIF")
met = dr.get_attr("RTA_PRIORITY")
ifname = _ipr.get_links(oif)[0].get_attr("IFLA_IFNAME")
return (ip, ifname, met)
return (None, None, None)
def _get_default_gws():
"""Return list of tuples (IP address, interface name, metric) that
describes default gateways configured in the OS."""
result = []
dr_list = _ipr.get_default_routes(family=socket.AF_INET)
for dr in dr_list:
ip = dr.get_attr("RTA_GATEWAY")
oif = dr.get_attr("RTA_OIF")
met = dr.get_attr("RTA_PRIORITY")
ifname = _ipr.get_links(oif)[0].get_attr("IFLA_IFNAME")
result.append((ip, ifname, met))
return result
def is_address_in_subnet(address, network):
"""Return True if given IP address belongs to given network."""
if IPAddress(address) in IPNetwork(network):
return True
return False
def is_address_reachable(address):
"""Return True if given IP address belongs to any network configured
on the OS interfaces."""
links = _ipr.get_links()
for link in links:
ifname = link.get_attr("IFLA_IFNAME")
list = _ipr.get_addr(family=socket.AF_INET, label=ifname)
for ipo in list:
ifaddress = ipo.get_attr("IFA_ADDRESS")
ifprefixlen = ipo["prefixlen"]
#logging.debug("address: %s, network: %s/%s", address, ifaddress, ifprefixlen)
if (ifprefixlen > 0 and
is_address_in_subnet(address, "%s/%s"%(ifaddress, ifprefixlen))):
return True
return False
def _is_route_exists(dst):
"""Return True if destination (IP address/network prefix length,
e.g. "10.1.0.1/30") belongs to any network configured
on the OS interfaces."""
route_list = _ipr.get_routes(family=socket.AF_INET)
for route in route_list:
edst = "%s/%d" % (route.get_attr("RTA_DST"), route["dst_len"])
#logging.debug("edst: %s, dst: %s", edst, dst)
if dst == edst:
return True
return False
def _add_route(dst, gw):
"""Add route entry to the OS route table."""
if dst == "default" or dst == "0.0.0.0":
if gw == "default":
logging.error("Can't add default destination to default gateway.")
raise Error("Can't add default destination to default gateway.")
if gw == "default":
gw = _get_default_gw()[0]
if gw is None:
logging.error("Address of default gateway is unknown.")
raise Error("Address of default gateway is unknown.")
gw_is_dev = len(_ipr.link_lookup(ifname=gw)) > 0
if not gw_is_dev:
if not is_address_reachable(gw):
logging.warning("Gateway address is not reachable. Not adding.")
return
if _is_route_exists(dst):
logging.warning("Route for dst=%s already exists. " +
"Deleting existing route.", dst)
_ipr.route("del", dst=dst)
if dst == "default" or dst == "0.0.0.0":
# Deleting all existing default routes
_del_all_default_routes()
# Adding new route
logging.debug("Adding route: %s -> %s.", dst, gw)
if gw_is_dev:
os.system("ip route add %s dev %s" % (dst, gw))
else:
_ipr.route("add", dst=dst, gateway=gw)
def _del_all_default_routes():
"""Delete all default route entries from OS route table."""
gw_list = _get_default_gws()
for ip, ifname, met in gw_list:
if ip is None:
if met is None:
logging.debug("Deleting default route via %s interface.", ifname)
os.system("ip route del default dev %s" % ifname)
else:
logging.debug("Deleting default route via %s interface with metric %d.", ifname, met)
os.system("ip route del default dev %s metric %d" % (ifname, met))
else:
if met is None:
logging.debug("Deleting default route to %s via %s interface.", ip, ifname)
os.system("ip route del default via %s dev %s" % (ip, ifname))
else:
logging.debug("Deleting default route to %s via %s interface with metric %d.", ip, ifname, met)
os.system("ip route del default via %s dev %s metric %d" % (ip, ifname, met))
def _del_route(dst, gw=None):
"""Delete route entry from OS route table."""
if gw is not None:
logging.debug("Deleting route: %s -> %s.", dst, gw)
if len(_ipr.link_lookup(ifname=gw)) > 0:
os.system("ip route del %s dev %s" % (dst, gw))
else:
_ipr.route("del", dst=dst, gateway=gw)
else:
logging.debug("Deleting route: %s", dst)
os.system("ip route del %s" % dst)
def _create_tunnel(name, ip, gre_local, gre_remote, route_dst=None):
"""Create GRE tunnel interface with given name and IP address."""
logging.debug("Creating %s interface.", name)
_ipr.link("add", ifname=name, kind="gre",
gre_local=gre_local,
gre_remote=gre_remote,
gre_ttl=255)
logging.debug("Assigning %s address to %s interface.", ip, name)
index = _ipr.link_lookup(ifname=name)[0]
_ipr.link("set", index=index, state="down")
_ipr.addr("add", index=index, address=ip)
_ipr.link("set", index=index, state="up")
if route_dst is not None:
# Adding new route
_add_route(route_dst, name)
def _create_interface(name, ip, route_dst=None):
"""Create dummy interface with given name and IP address."""
logging.debug("Creating %s interface.", name)
_ipr.link("add", ifname=name, kind="dummy")
logging.debug("Assigning %s address to %s interface.", ip, name)
index = _ipr.link_lookup(ifname=name)[0]
_ipr.link("set", index=index, state="down")
_ipr.addr("add", index=index, address=ip)
_ipr.link("set", index=index, state="up")
if route_dst is not None:
# Adding new route
_add_route(route_dst, name)
def _destroy_interface(name):
"""Destroy interface with given name."""
links = _ipr.link_lookup(ifname=name)
if len(links) == 0:
logging.warning("Can't destroy %s interface. It doesn't exist.", name)
return
index = links[0]
# IP addresses assigned to interface
ip_list = _ipr.get_addr(family=socket.AF_INET, label=name)
for ipo in ip_list:
ip = ipo.get_attr("IFA_ADDRESS")
# Deleting routes
route_list = _ipr.get_routes(family=socket.AF_INET, gateway=ip)
for route in route_list:
rip = route.get_attr("RTA_DST") # route["dst_len"] <- mask
if rip is not None:
_del_route("%s/%d" % (rip, route["dst_len"]), ip)
route_list = _ipr.get_routes(family=socket.AF_INET, scope=253)
for route in route_list:
if route.get_attr("RTA_OIF") == index:
rip = route.get_attr("RTA_DST") # route["dst_len"] <- mask
if rip is not None:
_del_route("%s/%d" % (rip, route["dst_len"]), name)
# Deleting interface
logging.debug("Destroying %s interface.", name)
_ipr.link("set", index=index, state="down")
_ipr.link("del", index=index)
def _destroy_interfaces(name_prefix):
"""Destroy all interfaces with name starting with given name prefix."""
for link in _ipr.get_links():
name = link.get_attr('IFLA_IFNAME')
if name[0:3] == name_prefix:
_destroy_interface(name)
def get_ip_forward():
"""Return True if IP-Forward is enabled in the OS."""
with open("/proc/sys/net/ipv4/ip_forward", "r") as f:
value = True if int(f.read(1)) == 1 else False
logging.debug("IP forward is %s.", "enabled" if value else "disabled")
return value
def set_ip_forward(value):
"""Enable or disable IP-Forward in the OS."""
with open("/proc/sys/net/ipv4/ip_forward", "w") as f:
f.write("1\n" if value else "0\n")
logging.debug("IP forward has been %s.", "enabled" if value else "disabled")
def get_proxy_arp(ifname):
"""Return True if Proxy-ARP is enabled in the OS for
the given interface name."""
with open("/proc/sys/net/ipv4/conf/%s/proxy_arp" % ifname, "r") as f:
value = True if int(f.read(1)) == 1 else False
logging.debug("Proxy ARP for %s interface is %s.", ifname,
"enabled" if value else "disabled")
return value
def set_proxy_arp(ifname, value):
"""Enable or disable Proxy-ARP for given interface name in the OS."""
with open("/proc/sys/net/ipv4/conf/%s/proxy_arp" % ifname, "w") as f:
f.write("1\n" if value else "0\n")
logging.debug("Proxy ARP for %s interface has been %s.", ifname,
"enabled" if value else "disabled")
def set_proxy_arp_for_all(value):
"""Enable or disable Proxy-ARP for all interfaces in the OS."""
link_list = _ipr.get_links()
for link in link_list:
if link.get_attr("IFLA_OPERSTATE") == "UP":
set_proxy_arp(link.get_attr("IFLA_IFNAME"), value)
def ip_to_int(value):
"""Return integer representation of IP address given in dot notation."""
return struct.unpack("!I", socket.inet_aton(value))[0]
def int_to_ip(value):
"""Convert given IP address in integer representation to dot notation."""
return socket.inet_ntoa(struct.pack("!I", value))
def str_to_hex(string):
"""Convert given string to hex string."""
return ":".join("{:02x}".format(ord(c)) for c in string)
class Error(Exception):
"""Unspecified exception raised by MIP module."""
pass
class RegistrationFailed(Error):
"""Mobile Node Agent registration failed exception."""
pass
class Extension:
"""Mobile IP Extension class."""
TYPE_MHAE = 32 # Mobile-Home Authentication Extension
TYPE_MFAE = 33 # Mobile-Foreign Authentication Extension
TYPE_FHAE = 34 # Foreign-Home Authentication Extension
_TYPE_DESC_TABLE = {
32: "Mobile-Home Authentication Extension",
33: "Mobile-Foreign Authentication Extension",
34: "Foreign-Home Authentication Extension"
}
def __init__(self, type, length, data=None):
"""MIP Extension constructor.
Parameters:
type -- type of extension (e.g. Extension.TYPE_MHAE)
length -- lenght of data (number of bytes) in the extension
data -- data in the extension (optional)
"""
if data is not None and len(data) != length:
logging.error("Length of data is invalid.")
raise Error("Length of data is invalid.")
self.type = type
self.length = length
self.data = data
def __str__(self):
return "<MobileIP Extension, Type: %d, Length: %d>" % (self.type,
self.length)
class MobileHomeAuthExtension(Extension):
"""Mobile IP Mobile-Home Authentication Extension class for 128-bit
HMAC-MD5."""
_LENGTH = 20
def __init__(self, spi, authenticator=None):
"""MHAE constructor.
Parameters:
spi -- SPI value
authenticator -- Authentication data (optional)
"""
Extension.__init__(self, Extension.TYPE_MHAE,
MobileHomeAuthExtension._LENGTH)
self.spi = spi
self.authenticator = authenticator
def __str__(self):
return "<MobileIP Mobile-Home Auth Extension, SPI: %d>" % self.spi
class Packet:
"""Mobile IP packet class."""
TYPE_REG_REQUEST = 1
TYPE_REG_REPLY = 3
_TYPE_DESC_TABLE = {
1: "Registration Request",
3: "Registration Reply"
}
_FORMAT = "!B" # MIP packet format: first byte defines packet Type
def __init__(self, type, extensions=None):
"""MIP packet constructor.
Parameters:
type -- type of MIP packet (e.g. Packet.TYPE_REG_REQUEST)
extensions -- list of Extension instances (optional)
"""
self.type = type
self.extensions = [] if extensions is None else extensions
def __str__(self):
return "<MobileIP packet, Type: %i (%s), Extensions: %s>" % (
self.type, Packet._TYPE_DESC_TABLE[self.type], self.extensions)
def to_data(self):
"""Return byte array representation of the packet."""
logging.error("Unable to get data.")
raise Error("Unable to get data.")
def _calculate_mhae(self, spi, key):
"""Create and return MobileHomeAuthExtension of this packet."""
packed = self.to_data()
extension = MobileHomeAuthExtension(spi)
try:
packed += struct.pack("!2BI", extension.type, extension.length, spi)
except struct.error:
logging.error("Invalid MIP Mobile-Home Auth Extension fields.")
raise Error("Invalid MIP Mobile-Home Auth Extension fields.")
extension.authenticator = hmac.new(key, packed).digest()
return extension
def add_mhae(self, spi, key):
"""Create and add MobileHomeAuthExtension of this packet
with given SPI and KEY."""
# Deleting existing MHAE
for extension in self.extensions[:]:
if extension.type == Extension.TYPE_MHAE:
self.extensions.remove(extension)
self.extensions.append(self._calculate_mhae(spi, key))
def get_mhae(self):
"""Return MobileHomeAuthExtension of this packet."""
for extension in self.extensions:
if extension.type == Extension.TYPE_MHAE:
return extension
def verify_mhae(self, spi, key):
"""Return True if MobileHomeAuthExtension in this packet is valid
for given SPI and KEY."""
new_extensions = []
for extension in self.extensions:
if extension.type == Extension.TYPE_MHAE and extension.spi == spi:
mhae = extension
break
new_extensions.append(extension)
old_extensions = self.extensions
self.extensions = new_extensions
authenticator = self._calculate_mhae(spi, key).authenticator
self.extensions = old_extensions
return mhae.authenticator == authenticator
@staticmethod
def from_data(data):
"""Create and return MIP packet based on given byte data."""
try:
unpacked = struct.unpack(Packet._FORMAT,
data[0:struct.calcsize(Packet._FORMAT)])
except struct.error:
logging.error("Invalid MIP packet.")
raise Error("Invalid MIP packet.")
if unpacked[0] == Packet.TYPE_REG_REQUEST:
return RegRequestPacket.from_data(data)
if unpacked[0] == Packet.TYPE_REG_REPLY:
return RegReplyPacket.from_data(data)
logging.error("Unknown MIP packet type.")
raise Error("Unknown MIP packet type.")
@staticmethod
def _extensions_from_data(data):
"""Create and return list Extension instances based on
given byte data."""
extensions = []
i = 0
while i < len(data):
try:
unpacked = struct.unpack("!2B", data[i:i+2])
except struct.error:
logging.error("Invalid MIP Extension data.")
raise Error("Invalid MIP Extension data.")
type = unpacked[0]
length = unpacked[1]
if type == Extension.TYPE_MHAE:
try:
unpacked = struct.unpack("!I", data[i+2:i+2+4])
except struct.error:
logging.error("Invalid MIP Mobile-Home Auth Extension data.")
raise Error("Invalid MIP Mobile-Home Auth Extension data.")
spi = unpacked[0]
authenticator = data[i+2+4:i+2+length]
extensions.append(MobileHomeAuthExtension(spi,
authenticator=authenticator))
else:
extensions.append(Extension(type, length,
data[i+2:i+2+length]))
i += 2+length
return extensions
def _extensions_to_data(self, packed):
for extension in self.extensions:
if isinstance(extension, MobileHomeAuthExtension):
try:
packed += struct.pack("!2BI",extension.type,
extension.length, extension.spi)
packed += extension.authenticator[0:extension.length-4]
except struct.error:
logging.error("Invalid MIP Mobile-Home Auth Extension fields.")
raise Error("Invalid MIP Mobile-Home Auth Extension fields.")
else:
try:
packed += struct.pack("!2B", extension.type,
extension.length) + extension.data[0:extension.length]
except struct.error:
logging.error("Invalid MIP Extension fields.")
raise Error("Invalid MIP Extension fields.")
return packed
class RegRequestPacket(Packet):
"""Mobile IP Registration Request packet class."""
FLAG_S = 0B10000000 # Simultaneous bindings
FLAG_B = 0B01000000 # Broadcast datagrams
FLAG_D = 0B00100000 # Decapsulation by mobile node
FLAG_M = 0B00010000 # Minimal encapsulation
FLAG_G = 0B00001000 # GRE encapsulation
FLAG_r = 0B00000100 # reserved
FLAG_T = 0B00000010 # Reverse Tunneling requested
FLAG_x = 0B00000001 # reserved
_FLAG_DESC_TABLE = {
0B10000000: "S",
0B01000000: "B",
0B00100000: "D",
0B00010000: "M",
0B00001000: "G",
0B00000100: "r",
0B00000010: "T",
0B00000001: "x"
}
_FORMAT = "!2B H 5I"
def _print_flags_desc(self):
desc = ""
for key, value in RegRequestPacket._FLAG_DESC_TABLE.iteritems():
desc += value if self.flags & key else ""
return desc
def __init__(
self,
flags,
lifetime,
home_address,
home_agent,
care_of_address,
identification = None, # timestamp
extensions = None
):
"""MIP Registration Request constructor.
Parameters:
flags -- flags that will be included in to request
lifetime -- Lifetime value
home_address -- Home IP address (dot notation)
home_agent -- Home Agent IP address (dot notation)
care_of_address -- Care-of IP address (dot notation)
identification -- Identification value
extensions -- list of Extension instances
"""
Packet.__init__(self, Packet.TYPE_REG_REQUEST, extensions)
self.flags = flags
self.lifetime = lifetime
self.home_address = home_address
self.home_agent = home_agent
self.care_of_address = care_of_address
self.identification = (system_to_ntp_time(time.time())
if identification is None else identification)
self.expiration_date = 0 # timestamp when binding will expire
def __str__(self):
return ("<MobileIP Reg Request, Flags: %d (%s), Lifetime: %d, " +
"Home address: %s, Home agent: %s, Care-of address: %s, " +
"Identification: %f, Extensions: %s>") % (
self.flags,
self._print_flags_desc(),
self.lifetime,
self.home_address,
self.home_agent,
self.care_of_address,
self.identification,
self.extensions
)
def is_update_request(self, reg_req_packet):
"""Return True if given RegRequestPacket is an update."""
return (self.home_address == reg_req_packet.home_address and
self.home_agent == reg_req_packet.home_agent and
self.care_of_address == reg_req_packet.care_of_address)
def update_identification(self):
"""Update Identification value in the request."""
self.identification = system_to_ntp_time(time.time())
@staticmethod
def from_data(data):
"""Create and return RegRequestPacket based on given byte data."""
try:
unpacked = struct.unpack(
RegRequestPacket._FORMAT,
data[0:struct.calcsize(RegRequestPacket._FORMAT)])
except struct.error:
logging.error("Invalid MIP Registration Request packet.")
raise Error("Invalid MIP Registration Request packet.")
extensions = Packet._extensions_from_data(
data[struct.calcsize(RegRequestPacket._FORMAT):len(data)])
return RegRequestPacket(
unpacked[1],
unpacked[2],
int_to_ip(unpacked[3]),
int_to_ip(unpacked[4]),
int_to_ip(unpacked[5]),
timestamp_to_time(unpacked[6], unpacked[7]),
extensions
)
def to_data(self):
"""Return byte array representation."""
try:
packed = struct.pack(RegRequestPacket._FORMAT,
self.type,
self.flags,
self.lifetime,
ip_to_int(self.home_address),
ip_to_int(self.home_agent),
ip_to_int(self.care_of_address),
timestamp_to_int(self.identification),
timestamp_to_frac(self.identification)
)
except struct.error:
logging.error("Invalid Registration Request packet fields.")
raise Error("Invalid Registration Request packet fields.")
return self._extensions_to_data(packed)
class RegReplyPacket(Packet):
"""Mobile IP Registration Reply packet class."""
CODE_ACCEPTED = 0
CODE_DENIED_BY_FA = 64
CODE_DENIED_BY_HA = 128
CODE_MN_FAILED_AUTH = 131
CODE_IDENT_MISMATCH = 133
_CODE_DESC_TABLE = {
0: "Registration accepted",
1: "Registration accepted, mobility bindings unsupported",
64: "Reason unspecified",
65: "Administratively prohibited",
66: "Insufficient resources",
67: "Mobile node failed authentication",
68: "Home agent failed authentication",
69: "Requested Lifetime too long",
70: "Poorly formed Request",
71: "Poorly formed Reply",
72: "Requested encapsulation unavailable",
73: "Reserved and unavailable",
77: "Invalid care-of address",
78: "Registration timeout",
80: "Home network unreachable (ICMP error received)",
81: "Home agent host unreachable (ICMP error received)",
82: "Home agent port unreachable (ICMP error received)",
88: "Home agent unreachable (other ICMP error received)",
194: "Invalid Home Agent Address",
128: "Reason unspecified",
129: "Administratively prohibited",
130: "Insufficient resources",
131: "Mobile node failed authentication",
132: "Foreign agent failed authentication",
133: "Registration Identification mismatch",
134: "Poorly formed Request",
135: "Too many simultaneous mobility bindings",
136: "Unknown home agent address"
}
_FORMAT = "!2B H 4I"
def __init__(
self,
code,
lifetime,
home_address,
home_agent,
identification,
extensions = None,
):
"""MIP Registration Reply constructor.
Parameters:
code -- code of the reply (e.g. RegReplyPacket.CODE_ACCEPTED)
lifetime -- Lifetime value
home_address -- Home IP address (dot notation)
home_agent -- Home Agent IP address (dot notation)
identification -- Identification value
extensions -- list of Extension instances
"""
Packet.__init__(self, Packet.TYPE_REG_REPLY, extensions)
self.code = code
self.lifetime = lifetime
self.home_address = home_address
self.home_agent = home_agent
self.identification = identification
self.expiration_date = 0 # timestamp when binding will expire
def __str__(self):
return ("<MobileIP Reg Reply, Code: %d (%s), Lifetime: %d, " +
"Home address: %s, Home agent: %s, Identification: %f, " +
"Extensions: %s>") % (
self.code,
RegReplyPacket._CODE_DESC_TABLE[self.code],
self.lifetime,
self.home_address,
self.home_agent,
self.identification,
self.extensions
)
@staticmethod
def from_data(data):
"""Create and return RegReplyPacket based on given byte data."""
try:
unpacked = struct.unpack(
RegReplyPacket._FORMAT,
data[0:struct.calcsize(RegReplyPacket._FORMAT)])
except struct.error:
logging.error("Invalid MIP Registration Reply packet.")
raise Error("Invalid MIP Registration Reply packet.")
extensions = Packet._extensions_from_data(
data[struct.calcsize(RegReplyPacket._FORMAT):len(data)])
return RegReplyPacket(
unpacked[1],
unpacked[2],
int_to_ip(unpacked[3]),
int_to_ip(unpacked[4]),
timestamp_to_time(unpacked[5], unpacked[6]),
extensions
)
def to_data(self):
"""Return byte array representation."""
try:
packed = struct.pack(RegReplyPacket._FORMAT,
self.type,
self.code,
self.lifetime,
ip_to_int(self.home_address),
ip_to_int(self.home_agent),
timestamp_to_int(self.identification),
timestamp_to_frac(self.identification)
)
except struct.error:
logging.error("Invalid MIP Registration Reply packet fields.")
raise Error("Invalid MIP Registration Reply packet fields.")
return self._extensions_to_data(packed)
class _BindingChecker(threading.Thread):
"""Binding checker class."""
_SLEEP_TIME = 1
def __init__(self, lock, binding_table, lifetime_expired_handler):
threading.Thread.__init__(self)
self.setDaemon(True)
self.binding_table = binding_table
self.lifetime_expired_handler = lifetime_expired_handler
self.active = False
self.lock = lock
def start(self):
self.active = True
threading.Thread.start(self)
def stop(self):
if self.is_alive():
self.active = False
def run(self):
while self.active:
keys_to_handle = []
self.lock.acquire()
t = time.time()
for key, packet in self.binding_table.iteritems():
if 0 <= packet.expiration_date <= t:
keys_to_handle.append(key)
self.lock.release()
for key in keys_to_handle:
self.lifetime_expired_handler(packet)
time.sleep(_BindingChecker._SLEEP_TIME)
class _Timer(threading.Thread):
"""Call a function after a specified number of seconds"""
def __init__(self, interval, function, exception_handler=None,
args=None, kwargs=None):
threading.Thread.__init__(self)
self.interval = interval
self.function = function
self.exception_handler = exception_handler
self.args = args if args is not None else []
self.kwargs = kwargs if kwargs is not None else {}
self.finished = threading.Event()
def cancel(self):
"""Stop the timer if it hasn't finished yet."""
self.finished.set()
def run(self):
self.finished.wait(self.interval)
if not self.finished.is_set():
try:
self.function(*self.args, **self.kwargs)
except Exception,e:
logging.error("Exception has been thrown in the Timer thread.")
logging.exception(e)
if self.exception_handler is not None:
self.exception_handler(e)
self.finished.set()
class MobileNodeAgent:
"""Mobile IP Mobile Node agent"""
def __init__(self, mhae_spi, mhae_key, home_agent, home_address,
interfaces,
port=434,
flags=(RegRequestPacket.FLAG_D |
RegRequestPacket.FLAG_G |
RegRequestPacket.FLAG_T),
timeout=3,
num_of_retr=2,
rereg_time=0.8,
wait_for_dereg_reply=True,
):
"""Mobile Node Agent constructor.
Parameters:
mhae_spi -- SPI value needed for MHAE calculation (decimal integer)
mhae_key -- KEY value needed for MHAE calculation (string)
home_agent -- Home Agent IP address (dot notation)
home_address -- Home IP address (dot notation)
interfaces -- dict that conctains interface names as keys
and default gateway IP addresses as values,
e.g. {"eth0": "10.1.2.5", "wlan0": "10.1.3.5"}
port -- Home Agent service UDP port number (default is 434)
flags -- flags included in MIP requests
(default is FLAG_D | FLAG_G | FLAG_T)
timeout -- maximum waiting time (seconds) for
the HA response (default is 3)
num_of_retr -- number of request retries (default is 2)
rereg_time -- requested time of reregistration,
e.g. 0.5 means that reregistraton will be
after 0.5*lifetime (default is 0.8)
wait_for_dereg_reply -- indicator if agent should wait for
deregistration reply form HA (default is True)
"""
# Only co-located care-of address is supported, so
# D flag (Decapsulation by mobile node) is mandatory.
# Only GRE tunnel is supported, so G flag is mandatory and M is not allowed.
# Reverse Tunneling is mandatory, so T flag is mandatory
if not flags & RegRequestPacket.FLAG_D:
raise Error("D flag is not set but is mandatory.")
if not flags & RegRequestPacket.FLAG_G:
raise Error("G flag is not set but is mandatory.")
if not flags & RegRequestPacket.FLAG_T:
raise Error("T flag is not set but is mandatory.")
if flags & RegRequestPacket.FLAG_M:
raise Error("M flag is set but is not supported.")
self.mhae_spi = mhae_spi
self.mhae_key = mhae_key
self.home_agent = home_agent
self.home_address = home_address
self.port = port
self.flags = flags
self.timeout = timeout
self.num_of_retr = num_of_retr
self.rereg_time = rereg_time
self.wait_for_dereg_reply = wait_for_dereg_reply
self._listening = False
self._rereg_timer = None
self._socket = None
self._sent_reg_reqest = None
self._received_reg_reply = None
self._num_of_retr_done = 0
self._closing = False
self._is_rereg = False
self._exception_handler = None
self._gateway = None
self._interfaces = interfaces
self._lock = threading.Lock()
# Create dummy interface with home address
_destroy_interfaces("mip")
_del_route(home_agent+"/32")
_create_interface("mip0", home_address)
def __del__(self):
"""Mobile Node Agent destructor"""
# Destroying all mipX interfaces
_destroy_interfaces("mip")
if self._gateway is not None:
# Recreating original default routing
_add_route(dst="default", gw=self._gateway)
self._gateway = None
def _update_routes(self, ifname):
"""Create or update static route to Home Agent IP address."""
gw = self._interfaces[ifname]
if gw is None:
logging.error("Unknown gateway address.")
raise Error("Unknown gateway address.")
# Creating static route to home agent
_add_route(self.home_agent+"/32", gw)
def _create_tunnel(self, reg_req_packet):
"""Create GRE tunnel to Home Agent IP address."""
#ifname, prefixlen = get_ifname(reg_req_packet.care_of_address)
#gw = self._interfaces[ifname]
#if gw is None:
#gw = _get_default_gw()[0]
#if gw is None:
# raise Error("Unknown gateway address.")
#self._gateway = gw # Saving default route gateway
# Creating static route to home agent
#_add_route(self.home_agent+"/32", gw)
_create_tunnel(name="mip1",
ip=self.home_address,
gre_local=reg_req_packet.care_of_address,
gre_remote=reg_req_packet.home_agent,
route_dst="default")
def _destroy_tunnel(self):
"""Destroy GRE tunnel to Home Agent IP address."""
_destroy_interface("mip1")
#if self._gateway is not None:
# # Recreating original default routing
# _add_route(dst="default", gw=self._gateway)
# self._gateway = None
# Recreating default routing
#for ifname in self._interfaces.keys():
# if is_address_reachable(self._interfaces[ifname]):
# logging.info("Setting default route for %s interface.", ifname)
# _add_route(dst="default", gw=self._interfaces[ifname])
# break
# Deleting static route to home agent
#_del_route(self.home_agent+"/32")
def _stop_listening_stuff(self):
# Destroying tunnel
if self.is_registered():
self._destroy_tunnel()
#_del_route(self.home_agent+"/32")
self._sent_reg_reqest = None
self._is_rereg = False
self._stop_listening()
def _data_handler(self, data, addr):
"""Handle received data."""
try:
in_packet = Packet.from_data(data)
except Error:
logging.error("Invalid data received.")
return
logging.debug("Connected by %s host on %d port.", addr[0], addr[1])
logging.debug("Received: %s", in_packet)
#logging.debug("Extensions:")
#for extension in in_packet.extensions:
# logging.info(extension)
if not isinstance(in_packet, RegReplyPacket):
logging.error("Invalid packet type has been received. " +
"Discarding packet.")
return
# Registration Reply received
logging.info("Registration Reply has been received.")
# Identification verification
if in_packet.identification != self._sent_reg_reqest.identification:
logging.warning("Reply has unknown identification. " +
"Discarding packet.")
return
# MHAE verification
mhae = in_packet.get_mhae()
if mhae is None or mhae.spi != self.mhae_spi:
# Can't find matching SPI
logging.warning("Can't find matching MHAE SPI in reply. " +
"Discarding packet.")
return
if not in_packet.verify_mhae(self.mhae_spi, self.mhae_key):
# Authorization failed
logging.warning("Reply authorization is failed.")
self._stop_listening_stuff()
self._lock.release()
raise RegistrationFailed("Reply authorization is failed.")
# Registration Reply code verification
if in_packet.code is not RegReplyPacket.CODE_ACCEPTED:
# Registration is not accepted
logging.warning("Registration request has not been accepted.")
self._stop_listening_stuff()
self._lock.release()
raise RegistrationFailed("Registration has not been accepted.")
# Registration Reply lifetime verification
if in_packet.lifetime <= 0:
# Registration lifetime is 0
if self._sent_reg_reqest.lifetime != 0:
logging.warning("Reply lifetime is 0, but 0 wasn't requested.")
logging.debug("Reply lifetime is 0, so reply for deregistration.")
self._stop_listening_stuff()
return
# Registration is accepted
logging.info("Registration request has been accepted.")
# Verifing reply lifetime
if in_packet.lifetime > self._sent_reg_reqest.lifetime:
logging.warning("Lifetime in reply is longer than requested.")
in_packet.lifetime = self._sent_reg_reqest
# Saving reply
self._received_reg_reply = in_packet
# Setting up reregistration timer
if self._rereg_timer is not None:
logging.error("Rereg timer is not empty.")
self._rereg_timer = _Timer(
in_packet.lifetime * self.rereg_time, self._reregister,
exception_handler=self._exception_handler)
self._rereg_timer.start()
# Creating tunnel
if not self._is_rereg:
#if self._sent_reg_reqest.flags & RegRequestPacket.FLAG_T:
self._create_tunnel(self._sent_reg_reqest)
self._stop_listening()
self._is_rereg = False
def _send_packet(self, packet, addr):
"""Send given packet to given IP address."""
logging.debug("Sending: %s", packet)
self._socket.sendto(packet.to_data(), addr)
def is_registered(self):
"""Return True if agent is registered."""
if self._is_rereg:
return True
return self._received_reg_reply is not None
def get_status(self):
"""Return string containing status information."""
if not self.is_registered():
return {"registered": False}
ifname, prefixlen = get_ifname(address=self._sent_reg_reqest.care_of_address)
if ifname is None:
logging.error("Care-of address %s is not assigned " +
"to any interface.", self._sent_reg_reqest.care_of_address)
return {
"registered": True,
"home_address": self.home_address,
"home_agent": self.home_agent,
"care_of_address": self._sent_reg_reqest.care_of_address,
"ifname": ifname
}
def register(self, care_of_address=None, dereg_existing_reg=True,
lifetime=INFINITE_LIFETIME, ifname=None,
exception_handler=None):
"""Register Mobile Node Agent in Home Agent.
Parameters:
care_of_address -- Care-of address (optional if ifname is provided)
dereg_existing_reg -- if True, deregistration will be done
before new registration (default is True)
lifetime -- requested registration lifetime value
ifname -- name of network interface for the registration
(optional if care_of_address is provided)
exception_handler -- function that will be called when exception
occures in Mobile Node Agent thread
"""
self._lock.acquire()
prefixlen = None
# Addresses verification
if care_of_address is None and ifname is None:
logging.error("At least care-of address or interface " +
"name needs to be provided.")
self._lock.release()
raise Error("Care-of address or interface name not provided")
if care_of_address is None:
care_of_address, prefixlen = get_address(ifname=ifname)
if care_of_address is None or prefixlen is None:
logging.error("Interface %s has no address assigned or " +
"doesn't exist.", ifname)
self._lock.release()
raise RegistrationFailed("Interface has no address assigned.")
if ifname is None or prefixlen is None:
ifname, prefixlen = get_ifname(address=care_of_address)
if ifname is None or prefixlen is None:
logging.error("Care-of address %s is not assigned " +
"to any interface.", care_of_address)
self._lock.release()
raise RegistrationFailed("Care-of address is not assigned to any interface.")
if is_address_in_subnet(self.home_address,
"%s/%d"%(care_of_address, prefixlen)):
logging.error("Home address (%s) belongs to " +
"care-of address subnet (%s/%d), so you are in " +
"the home network.", self.home_address,
care_of_address, prefixlen)
self._lock.release()
raise RegistrationFailed("Home address belongs to care-of address subnet.")
# Check if already registered
if self.is_registered():
if (self._sent_reg_reqest.care_of_address == care_of_address and
self.rereg_time is not None):
self._exception_handler = exception_handler # updating handler
logging.warning("Care-of address is already registered. "+
"Request will not be sent.")
self._lock.release()
return
# Disabling rereg timer
if self._rereg_timer is not None:
self._rereg_timer.cancel()
self._rereg_timer = None
# Updating routes for home gateway
self._update_routes(ifname)
# Auto deregistration
if self.is_registered():
if dereg_existing_reg:
self.deregister(ifname=ifname)
else:
self.cancel()
# Resets
#self._destroy_tunnel()
self._closing = False
self._received_reg_reply = None
self._num_of_retr_done = 0
self._is_rereg = False
# Creating Registration Request
out_packet = RegRequestPacket(
flags=self.flags,
lifetime=lifetime,
home_address=self.home_address,
home_agent=self.home_agent,
care_of_address=care_of_address
)
out_packet.add_mhae(self.mhae_spi, self.mhae_key)
# Saving reg request
self._sent_reg_reqest = out_packet
self._exception_handler = exception_handler
logging.info("Sending Registration Request to %s (Home Agent) " +
"using %s interface.", self.home_agent, ifname)
#logging.debug("care_of_address: %s, ifname: %s, prefixlen: %s",
# care_of_address, ifname, prefixlen)
self._socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self._socket.bind((care_of_address, 0))
self._socket.setsockopt(socket.SOL_SOCKET,
socket.SO_BINDTODEVICE, ifname)
self._send_packet(out_packet, (self.home_agent, self.port))
# Listening for reply
self._start_listening()
self._lock.release()
def deregister(self, ifname=None, wait_for_reply=None):
"""Deregister Mobile Node Agent.
Parameters:
ifname -- name of network interface for the deregistration,
if not provided ifname will the same as for
the registration (optional)
wait_for_reply -- if True, positive reply from Home Agent is required
to accept deregistration (default is
wait_for_dereg_reply provided in constructor)
"""
# Disabling rereg timer
if self._rereg_timer is not None:
self._rereg_timer.cancel()
self._rereg_timer = None
if not self.is_registered():
logging.warning("There is nothing to deregister.")
return
# Resets
self._received_reg_reply = None
self._num_of_retr_done = self.num_of_retr # disable retransmissions
self._rereg_timer = None
self._closing = False
self._is_rereg = True
# Creating Deregistration Request
self._sent_reg_reqest.update_identification()
self._sent_reg_reqest.lifetime = 0 # Deregistration
self._sent_reg_reqest.add_mhae(self.mhae_spi, self.mhae_key)
care_of_address = self._sent_reg_reqest.care_of_address
difname, prefixlen = get_ifname(address=care_of_address)
if ifname is None and difname is None:
logging.error("Care-of address %s is not assigned " +
"to any interface. Cancelling registration.",
care_of_address)
self.cancel()
self._lock.release()
return
if ifname is None or difname == ifname:
logging.debug("Care-of address %s is assigned " +
"to interface.", care_of_address)
else:
address, prefixlen = get_address(ifname=ifname)
self._socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self._socket.bind((address, 0))
self._socket.setsockopt(socket.SOL_SOCKET,
socket.SO_BINDTODEVICE, ifname)
logging.info("Sending Deregistration Request to %s (Home Agent) " +
"via %s interface.",
self._sent_reg_reqest.home_agent, ifname)
self._send_packet(self._sent_reg_reqest,
(self._sent_reg_reqest.home_agent, self.port))
if (self.wait_for_dereg_reply if wait_for_reply is None
else wait_for_reply):
# Waiting for reply
#logging.info("Waiting for deregistration reply.")
self._start_listening()
else:
self._is_rereg = False
# Not waiting for reply, so destroying reverse tunnel immediately
#if self._sent_reg_reqest.flags & RegRequestPacket.FLAG_T:
self._destroy_tunnel()
def _handle_listening_timeout(self):
logging.warning("Request has timeout.")
if self.num_of_retr > self._num_of_retr_done and not self._closing:
# Doing retransmission
self._num_of_retr_done += 1 # increasing counter
logging.warning("Repeating request, #%d attempt.",
self._num_of_retr_done)
#self._sent_reg_reqest.update_identification()
self._sent_reg_reqest.add_mhae(self.mhae_spi, self.mhae_key)
self._send_packet(self._sent_reg_reqest,
(self.home_agent, self.port))
else:
# Reg request is failed
logging.error("Registration Request is failed due to timeout.")
self.cancel()
return
def _reregister(self):
self._lock.acquire()
logging.info("Refreshing registration.")
care_of_address = self._sent_reg_reqest.care_of_address
ifname, prefixlen = get_ifname(address=care_of_address)
if ifname is None or prefixlen is None:
logging.error("Care-of address %s is not assigned " +
"to any interface. Cancelling registration.",
care_of_address)
self.cancel()
self._lock.release()
return
# Resets
self._received_reg_reply = None
self._num_of_retr_done = 0
self._rereg_timer = None
self._is_rereg = True
# Updating Registration Request
self._sent_reg_reqest.update_identification()
self._sent_reg_reqest.add_mhae(self.mhae_spi, self.mhae_key)
logging.info("Sending Registration Request to %s (Home Agent) " +
"using %s interface.", self._sent_reg_reqest.home_agent,
ifname)
#logging.debug("care_of_address: %s, ifname: %s, prefixlen: %s",
# care_of_address, ifname, prefixlen)
self._send_packet(self._sent_reg_reqest,
(self._sent_reg_reqest.home_agent, self.port))
self._start_listening()
self._lock.release()
def _start_listening(self):
self._listening = True
# Staring listening for reply
while self._listening and not self._closing:
self._socket.settimeout(self.timeout) # Setting up timeout
try:
data, addr = self._socket.recvfrom(1024)
except socket.timeout:
self._handle_listening_timeout() # Timeout
else:
self._data_handler(data, addr) # Data received
self._listening = False
def _stop_listening(self):
self._listening = False
def cancel(self):
"""Cancel any ongoing registrations or registration attempts."""
if self.is_registered():
logging.info("Cancelling registration.")
#_del_route(self.home_agent+"/32")
self._closing = True
if self._rereg_timer is not None:
self._rereg_timer.cancel()
self._rereg_timer = None
self._destroy_tunnel()
self._received_reg_reply = None
self._sent_reg_reqest = None
self._is_rereg = False
self._gateway = None
class HomeAgent:
"""Mobile IP Home Agent class."""
def __init__(self,
auth_table,
address="0.0.0.0",
port=434,
max_lifetime=INFINITE_LIFETIME, # Maximum acceptable Lifetime for registration
max_ident_mismatch=7, # Accepted timestamp mismatch in sec for identification
ip_pool="172.16.0.0/24"):
"""Home Agent constructor.
Parameters:
auth_table -- dict that conctains SPIs as keys and
authorization KEYs as values
(e.g. {256: "1234567812345678"})
address -- Home Agent binding IP address (default is 0.0.0.0,
HA will listen on all network interfaces)
port -- Home Agent listening UDP port number (default is 434)
max_lifetime -- maximum acceptable Lifetime for registration
(default is INFINITE_LIFETIME)
max_ident_mismatch -- accepted timestamp mismatch in seconds for
identification (default is 7)
ip_pool -- IP pool used for tunnel interfaces
(default is 172.16.0.0/24)
"""
# Check if auth table is valid
if len(auth_table) is 0:
raise Error("Auth table is empty.")
self.auth_table = auth_table
self.address = address
self.port = port
self.max_lifetime = max_lifetime
self.max_ident_mismatch = max_ident_mismatch
self._ip_pool = IPNetwork(ip_pool)
self._socket = None
self._binding_table = {}
self._binding_table_lock = threading.Lock()
self._binding_checker = _BindingChecker(
lock=self._binding_table_lock,
binding_table=self._binding_table,
lifetime_expired_handler=self._lifetime_expired_handler)
def __del__():
"""Home Agent destructor"""
_destroy_interfaces("mip") # Destroying all mipX interfaces
set_ip_forward(False) # Disabling kernel IP forwarding
set_proxy_arp_for_all(False) # Disabling Proxy ARP
def _lifetime_expired_handler(self, reg_req_packet):
"""Handle registration expiration"""
logging.warning("Binding [home address=%s, CoA=%s] has expired.",
reg_req_packet.home_address,
reg_req_packet.care_of_address)
self._destroy_binding(reg_req_packet)
def _print_binding_table(self):
"""Return registration binding table description."""
desc = "{"
for key, value in self._binding_table.iteritems():
desc += "[home address=%s, CoA=%s]" % (key, value.care_of_address)
return desc + "}"
def _get_binding(self, home_address):
"""Return RegRequestPacket used in the registration for
given Home address."""
if home_address in self._binding_table:
return self._binding_table[home_address]
return None
def _destroy_binding(self, reg_req_packet):
"""Destroy registration binding for given RegRequestPacket."""
if reg_req_packet.home_address in self._binding_table:
self._destroy_tunnel(reg_req_packet)
self._binding_table_lock.acquire()
logging.debug("Destroing [home address=%s, CoA=%s] binding.",
reg_req_packet.home_address,
reg_req_packet.care_of_address)
del self._binding_table[reg_req_packet.home_address]
self._binding_table_lock.release()
else:
logging.warning("Unable to find binding for home address=%s.",
home_address)
def _create_binding(self, reg_req_packet):
"""Create registration binding for given RegRequestPacket."""
# Computing new expiration date
expiration_date = (0 if reg_req_packet.lifetime == INFINITE_LIFETIME
else time.time() + reg_req_packet.lifetime)
# Handling existing binding
existing_reg_req_packet = self._get_binding(reg_req_packet.home_address)
if existing_reg_req_packet is not None:
if existing_reg_req_packet.is_update_request(reg_req_packet):
# reg_req_packet is an update, so updating only expiration_date
logging.debug("Updating [home address=%s, CoA=%s] binding.",
existing_reg_req_packet.home_address,
existing_reg_req_packet.care_of_address)
existing_reg_req_packet.expiration_date = expiration_date
return
# reg_req_packet is not an update, so destroying existing binding
self._destroy_binding(existing_reg_req_packet)
# Creating new binding
self._binding_table_lock.acquire()
logging.debug("Creating new binding [home address=%s, CoA=%s].",
reg_req_packet.home_address,
reg_req_packet.care_of_address)
reg_req_packet.expiration_date = expiration_date
self._binding_table[reg_req_packet.home_address] = reg_req_packet
self._binding_table_lock.release()
# Create tunnel
self._create_tunnel(reg_req_packet)
def _get_binding_id(self, home_address):
"""Return id of registration binding for given Home Address."""
return self._binding_table.keys().index(home_address)
def _create_tunnel(self, reg_req_packet):
"""Create GRE tunnel for given RegRequestPacket."""
tid = self._get_binding_id(reg_req_packet.home_address)
_create_tunnel(name="mip"+str(tid),
ip=str(self._ip_pool[tid+1]),
gre_local=self.address,
gre_remote=reg_req_packet.care_of_address,
route_dst=reg_req_packet.home_address+"/32")
def _destroy_tunnel(self, reg_req_packet):
"""Destroy GRE tunnel for given RegRequestPacket."""
tid = self._get_binding_id(reg_req_packet.home_address)
_destroy_interface(name="mip"+str(tid))
def _send_packet(self, packet, addr):
"""Send packet to given address."""
logging.info("Sending: %s", packet)
self._socket.sendto(packet.to_data(), addr)
def _check_flags(self, flags):
"""Return True, if given flags are supported."""
# Flags verification. Some capabilities are not implemented yet...
# Only co-located care-of address are supported, so
# D flag (Decapsulation by mobile node) is mandatory.
# S (Simultaneous bindings) and B (Broadcast datagrams) are
# not supported.
# Only GRE tunnel is supported, so G is mandatory and M is not allowed.
is_ok = True
if not flags & RegRequestPacket.FLAG_D:
logging.warning("D flag is not set but is mandatory.")
is_ok = False
if flags & RegRequestPacket.FLAG_S:
logging.warning("S flag is set but is not supported.")
is_ok = False
if flags & RegRequestPacket.FLAG_B:
logging.warning("B flag is set but is not supported.")
is_ok = False
if not flags & RegRequestPacket.FLAG_G:
logging.warning("G flag is not set but is mandatory.")
is_ok = False
if flags & RegRequestPacket.FLAG_M:
logging.warning("M flag is set but is not supported.")
is_ok = False
return is_ok
def _data_handler(self, data, addr):
"""Handle received data."""
in_packet = Packet.from_data(data)
logging.debug("Connected by: %s", addr)
logging.debug("Received: %s", in_packet)
#logging.debug("Extensions:")
#for extension in in_packet.extensions:
# logging.info(extension)
if not isinstance(in_packet, RegRequestPacket):
logging.warning("Invalid packet type has been received. " +
"Discarding packet.")
return
# Registration Request received
logging.info("Registration Request has been received.")
logging.debug("Bindings table: %s" , self._print_binding_table())
# MHAE verification
mhae = in_packet.get_mhae()
if mhae is None or mhae.spi not in self.auth_table:
# Can't find matching SPI, so silently discarding
logging.warning("Can't find matching SPI in request. " +
"Discarding request.")
return
key = self.auth_table[mhae.spi]
if not in_packet.verify_mhae(mhae.spi, key):
# Authorization failed
logging.warning("Reqest authorization is failed.")
# Sending Registration Reply
out_packet = RegReplyPacket(
RegReplyPacket.CODE_MN_FAILED_AUTH,
0x0000,
in_packet.home_address,
in_packet.home_agent,
in_packet.identification)
out_packet.add_mhae(mhae.spi, key)
self._send_packet(out_packet, addr)
return
# Determining if duplicate
existing_reg_req_packet = self._get_binding(in_packet.home_address)
if existing_reg_req_packet is not None:
if (existing_reg_req_packet.identification == in_packet.identification
and existing_reg_req_packet.care_of_address == in_packet.care_of_address):
logging.warning("Request is a retransmission. " +
"Discarding request.")
return
# Timestamp verification
ha_time = time.time()
mn_time = ntp_to_system_time(in_packet.identification)
if abs(int(ha_time-mn_time)) > self.max_ident_mismatch:
# Registration ID mismatch
logging.warning("Registration identification is mismatch.")
out_packet = RegReplyPacket(
RegReplyPacket.CODE_IDENT_MISMATCH,
0x0000,
in_packet.home_address,
in_packet.home_agent,
in_packet.identification)
out_packet.add_mhae(mhae.spi, key)
self._send_packet(out_packet, addr)
return
# Flags verification
if not self._check_flags(in_packet.flags):
out_packet = RegReplyPacket(
RegReplyPacket.CODE_DENIED_BY_HA,
0x0000,
in_packet.home_address,
in_packet.home_agent,
in_packet.identification)
out_packet.add_mhae(mhae.spi, key)
self._send_packet(out_packet, addr)
return
# Addresses verification
if in_packet.care_of_address == in_packet.home_address:
logging.warning("Care-of address is the same as home address. " +
"Mobile node is in the home network.")
if in_packet.lifetime > 0:
logging.error("Mobile node is in the home network, " +
"but registration is requested.")
# TODO: Perhaps request should be rejected...
# Registration Request accepted
logging.info("Registration Request is valid.")
# Updatig lifetime if lifetime > max_lifetime
if in_packet.lifetime > self.max_lifetime:
logging.warning("Requested lifetime is greater than maximum.")
in_packet.lifetime = self.max_lifetime
# Creating or destroying binding
if in_packet.lifetime > 0:
# Registration
self._create_binding(in_packet)
else:
# Deregistration
logging.info("Deregistration is requested.")
self._destroy_binding(in_packet)
# Sending Registration Reply
out_packet = RegReplyPacket(
RegReplyPacket.CODE_ACCEPTED,
in_packet.lifetime,
in_packet.home_address,
in_packet.home_agent,
in_packet.identification)
out_packet.add_mhae(mhae.spi, key)
self._send_packet(out_packet, addr)
def start(self):
"""Start Home Agent server."""
if self._socket is not None:
logging.warning("Home Agent is already started.")
return
self._socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self._socket.bind((self.address, self.port))
self._binding_checker.start()
_destroy_interfaces("mip") # Destroying all mipX interfaces
set_proxy_arp_for_all(True) # Enabling Proxy ARP
set_ip_forward(True) # Enabling kernel IP forwarding
logging.info("Home Agent is started.")
while self._socket is not None:
data, addr = self._socket.recvfrom(1024)
self._data_handler(data, addr)
def stop(self):
"""Stop Home Agent server."""
self._stopping = True
self._binding_checker.stop()
_destroy_interfaces("mip") # Destroying all mipX interfaces
set_ip_forward(False) # Disabling kernel IP forwarding
set_proxy_arp_for_all(False) # Disabling Proxy ARP
if self._socket is not None:
self._socket.close()
self._socket = None
logging.info("Home Agent is stopped.")
else:
logging.warning("Home Agent is already stopped.")
| 36.349509
| 111
| 0.604905
|
4a044d493dbe665fcf7dd895e4e0dc60579654bf
| 1,117
|
py
|
Python
|
python/pyxir/contrib/target/DPUCVDX8G.py
|
Xilinx/pyxir
|
bef661d6d77adcdbd2cf4163f2cf3a1d31d40406
|
[
"Apache-2.0"
] | 25
|
2020-06-17T22:41:13.000Z
|
2022-03-22T16:28:22.000Z
|
python/pyxir/contrib/target/DPUCVDX8G.py
|
Xilinx/pyxir
|
bef661d6d77adcdbd2cf4163f2cf3a1d31d40406
|
[
"Apache-2.0"
] | 25
|
2021-03-16T06:26:44.000Z
|
2022-03-18T11:28:33.000Z
|
python/pyxir/contrib/target/DPUCVDX8G.py
|
Xilinx/pyxir
|
bef661d6d77adcdbd2cf4163f2cf3a1d31d40406
|
[
"Apache-2.0"
] | 19
|
2020-07-30T10:03:02.000Z
|
2021-06-29T01:18:16.000Z
|
# Copyright 2021 Xilinx Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""""Register DPUCVDX8G target"""
import pyxir
from .components.DPUCVDX8G.dpucvdx8g import xgraph_dpu_quantizer
from .components.DPUCVDX8G.dpucvdx8g import xgraph_dpu_optimizer
from .components.DPUCVDX8G.dpucvdx8g import xgraph_dpu_build_func
from .components.DPUCVDX8G.dpucvdx8g import xgraph_dpu_compiler
# Register target
pyxir.register_target(
"DPUCVDX8G",
xgraph_dpu_optimizer,
xgraph_dpu_quantizer,
xgraph_dpu_compiler,
xgraph_dpu_build_func,
)
# Register op support
from .components.DPUCVDX8G import op_support
| 31.914286
| 74
| 0.79051
|
4a044e312836b61e6a92e1fece0daa1d977fa5f6
| 80
|
py
|
Python
|
Python-Course/Workshops/October3rd/factorial.py
|
cmlimm/uni-projects
|
b63ac71cc0b971c7f035096a6bd15b0cbb5bb9f6
|
[
"MIT"
] | null | null | null |
Python-Course/Workshops/October3rd/factorial.py
|
cmlimm/uni-projects
|
b63ac71cc0b971c7f035096a6bd15b0cbb5bb9f6
|
[
"MIT"
] | null | null | null |
Python-Course/Workshops/October3rd/factorial.py
|
cmlimm/uni-projects
|
b63ac71cc0b971c7f035096a6bd15b0cbb5bb9f6
|
[
"MIT"
] | 1
|
2020-10-29T18:31:32.000Z
|
2020-10-29T18:31:32.000Z
|
def factorial(n):
if n == 0:
return 1
return factorial(n-1) * n
| 16
| 29
| 0.525
|
4a044e9f2f7a90bbf3ed4e6cc118b16a47a52af7
| 2,446
|
py
|
Python
|
doc/source/conf.py
|
mcx/ADS
|
b2f94f4daff02438f0b2cffd670be8051244ceec
|
[
"MIT"
] | null | null | null |
doc/source/conf.py
|
mcx/ADS
|
b2f94f4daff02438f0b2cffd670be8051244ceec
|
[
"MIT"
] | null | null | null |
doc/source/conf.py
|
mcx/ADS
|
b2f94f4daff02438f0b2cffd670be8051244ceec
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = 'adstool manual'
copyright = '2015 - 2021, Beckhoff Automation GmbH & Co. KG'
author = 'Patrick Brünn'
# The short X.Y version
version = ''
# The full version, including alpha/beta/rc tags
release = ''
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = None
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('adstool', 'adstool', 'ads, adstool, tcadstool Documentation', [author], 1),
]
| 30.962025
| 81
| 0.670482
|
4a044f8a279b74293bbad722b55535394ed7fd02
| 2,353
|
py
|
Python
|
sgui/Examples/layoutExample.py
|
DGriffin91/sgui
|
43e4d5142a8d5e967de4792c93d1b5d40db1511d
|
[
"MIT"
] | null | null | null |
sgui/Examples/layoutExample.py
|
DGriffin91/sgui
|
43e4d5142a8d5e967de4792c93d1b5d40db1511d
|
[
"MIT"
] | 2
|
2020-02-08T05:20:44.000Z
|
2020-02-08T05:21:40.000Z
|
sgui/Examples/layoutExample.py
|
DGriffin91/sgui
|
43e4d5142a8d5e967de4792c93d1b5d40db1511d
|
[
"MIT"
] | null | null | null |
import sgui
#Start TK window
window = sgui.Tk()
#Layout frame
mainBox = sgui.HBox(window)
# --- Callback functions ---
def buttonPress(btn):
print(btn.string + " pressed!")
print(testCheckbox.checked)
testCheckbox.checked = not testCheckbox.checked
def checkBoxed(chb):
print(chb.string + " " + str(chb.checked))
def printList(listb):
print(listb.items)
print(listb.selection)
print(listb.items[listb.selection])
def printText(textb):
print(textb.string)
# --- Labels ---
testLabel = sgui.Label(mainBox, string = "Stuff!")
testLabel2 = sgui.Label(mainBox)
testLabel2.string = "Stuff2!"
# --- Buttons ---
#myStyle = ttk.Style()
buttonBox = sgui.VBox(mainBox)
testLabel = sgui.Label(buttonBox, string = "Some buttons:")
testButton = sgui.Button(buttonBox, string = "Generic Fungus", command = buttonPress)
testButton2 = sgui.Button(buttonBox)
testButton2.string = "Jungle Rot"
testButton2.command = buttonPress
testButton3 = sgui.Button(buttonBox)
testButton3.string = "Episodic Appendage Spawning"
testButton3.command = buttonPress
# --- Checkboxes ---
checkBoxBox = sgui.VBox(mainBox)
testCheckbox = sgui.Checkbox(checkBoxBox, string = "Check me!", command = checkBoxed)
testCheckbox2 = sgui.Checkbox(checkBoxBox)
testCheckbox2.string = "No, check me!!"
testCheckbox2.command = checkBoxed
# --- Listboxes ---
listbox1 = sgui.Listbox(mainBox, items = ["AAA","BBB","CCC","DDD"], command = printList)
listbox2 = sgui.Listbox(mainBox)
listbox2.items = ["FFF","GGG","HHH","III"]
listbox2.command = printList
# --- Textboxes ---
textBox1 = sgui.Textbox(mainBox, command = printText)
textBox2 = sgui.Textbox(mainBox)
textBox2.command = printText
# --- Radiobuttons ---
radio1 = sgui.VBox(mainBox)
radiobuttons = sgui.Radiobuttons(radio1, items = ["JJJ","LLL","MMM","NNN"], command = printList)
radio2 = sgui.VBox(mainBox)
radiobuttons = sgui.Radiobuttons(radio2)
radiobuttons.items = ["OOO","PPP","QQQ","RRR"]
radiobuttons.command = printList
# --- TextEntry ---
textEntryBox = sgui.VBox(mainBox)
textEntry = sgui.Textentry(textEntryBox)
textEntry.command = printText
textEntry = sgui.Textentry(textEntryBox, string = "TEST!!", command = printText)
#Start GUI
window.startGUI()
| 23.29703
| 98
| 0.681258
|
4a0450d915784ec1f5bd67963bbb27a743e71044
| 2,974
|
py
|
Python
|
mmdet/core/bbox/bbox_target.py
|
droseger/mmdetection
|
355da53ea7c4b061c62c5a8430adce7641bc2894
|
[
"Apache-2.0"
] | 632
|
2019-04-10T02:05:03.000Z
|
2022-03-29T01:58:55.000Z
|
mmdet/core/bbox/bbox_target.py
|
droseger/mmdetection
|
355da53ea7c4b061c62c5a8430adce7641bc2894
|
[
"Apache-2.0"
] | 49
|
2019-04-12T14:42:39.000Z
|
2022-01-22T07:59:51.000Z
|
mmdet/core/bbox/bbox_target.py
|
droseger/mmdetection
|
355da53ea7c4b061c62c5a8430adce7641bc2894
|
[
"Apache-2.0"
] | 112
|
2019-04-10T12:01:44.000Z
|
2022-03-29T01:58:49.000Z
|
import torch
from .transforms import bbox2delta
from ..utils import multi_apply
def bbox_target(pos_bboxes_list,
neg_bboxes_list,
pos_gt_bboxes_list,
pos_gt_labels_list,
cfg,
reg_classes=1,
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0],
concat=True):
labels, label_weights, bbox_targets, bbox_weights = multi_apply(
bbox_target_single,
pos_bboxes_list,
neg_bboxes_list,
pos_gt_bboxes_list,
pos_gt_labels_list,
cfg=cfg,
reg_classes=reg_classes,
target_means=target_means,
target_stds=target_stds)
if concat:
labels = torch.cat(labels, 0)
label_weights = torch.cat(label_weights, 0)
bbox_targets = torch.cat(bbox_targets, 0)
bbox_weights = torch.cat(bbox_weights, 0)
return labels, label_weights, bbox_targets, bbox_weights
def bbox_target_single(pos_bboxes,
neg_bboxes,
pos_gt_bboxes,
pos_gt_labels,
cfg,
reg_classes=1,
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0]):
num_pos = pos_bboxes.size(0)
num_neg = neg_bboxes.size(0)
num_samples = num_pos + num_neg
labels = pos_bboxes.new_zeros(num_samples, dtype=torch.long)
label_weights = pos_bboxes.new_zeros(num_samples)
bbox_targets = pos_bboxes.new_zeros(num_samples, 4)
bbox_weights = pos_bboxes.new_zeros(num_samples, 4)
if num_pos > 0:
labels[:num_pos] = pos_gt_labels
pos_weight = 1.0 if cfg.pos_weight <= 0 else cfg.pos_weight
label_weights[:num_pos] = pos_weight
pos_bbox_targets = bbox2delta(pos_bboxes, pos_gt_bboxes, target_means,
target_stds)
bbox_targets[:num_pos, :] = pos_bbox_targets
bbox_weights[:num_pos, :] = 1
if num_neg > 0:
label_weights[-num_neg:] = 1.0
if reg_classes > 1:
bbox_targets, bbox_weights = expand_target(bbox_targets, bbox_weights,
labels, reg_classes)
return labels, label_weights, bbox_targets, bbox_weights
def expand_target(bbox_targets, bbox_weights, labels, num_classes):
bbox_targets_expand = bbox_targets.new_zeros((bbox_targets.size(0),
4 * num_classes))
bbox_weights_expand = bbox_weights.new_zeros((bbox_weights.size(0),
4 * num_classes))
for i in torch.nonzero(labels > 0).squeeze(-1):
start, end = labels[i] * 4, (labels[i] + 1) * 4
bbox_targets_expand[i, start:end] = bbox_targets[i, :]
bbox_weights_expand[i, start:end] = bbox_weights[i, :]
return bbox_targets_expand, bbox_weights_expand
| 38.623377
| 78
| 0.589442
|
4a04526f23e292762af03a8d7702203476bafe27
| 8,919
|
py
|
Python
|
docs/conf.py
|
TariqTNO/OpenQL
|
8da802cae3a7b8c244edb4d3b74be9e60250528c
|
[
"Apache-2.0"
] | 61
|
2019-04-24T08:25:41.000Z
|
2022-03-01T22:23:23.000Z
|
docs/conf.py
|
TariqTNO/OpenQL
|
8da802cae3a7b8c244edb4d3b74be9e60250528c
|
[
"Apache-2.0"
] | 141
|
2019-03-27T16:19:06.000Z
|
2022-03-03T10:11:47.000Z
|
docs/conf.py
|
TariqTNO/OpenQL
|
8da802cae3a7b8c244edb4d3b74be9e60250528c
|
[
"Apache-2.0"
] | 43
|
2019-03-27T13:40:45.000Z
|
2022-01-14T12:48:51.000Z
|
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
import os
import sys
sys.path.insert(0, os.path.abspath('../swig'))
# -- Doxygen build -----------------------------------------------------------
import subprocess
import os
import sys
original_workdir = os.getcwd()
docs_dir = os.path.dirname(__file__)
try:
os.chdir(docs_dir)
if not os.path.exists('doxygen/doxy'):
subprocess.check_call(['doxygen'])
subprocess.check_call(['mv', '-f', 'doxygen/html', 'doxygen/doxy'])
finally:
os.chdir(original_workdir)
html_extra_path = ['doxygen']
# -- Generate RST files from runtime docs ------------------------------------
import openql as ql
import m2r2
import re
def docs_to_rst_magic(text, header_level=1):
"""Conversion magic for converting from OpenQL runtime docs to ReadTheDocs
RST files."""
# Perform conversion magic.
output = []
rst_block = False
remove = False
blank = True
indent = 0
for line in text.split('\n'):
# Handle blank lines.
if not line.strip():
rst_block = False
remove = False
blank = True
continue
# Drop lines if we're in a removed section.
if remove:
continue
# Strip section indentation.
while not line.startswith(' '*indent):
indent -= 1
header_level -= 1
line = line[indent*2:]
# Handle the first line of a block of text, i.e. after a blank line.
if blank:
blank = False
output.append('')
# Handle section header.
if line.startswith('* ') and line.endswith(' *'):
output.append('#'*header_level + ' ' + line[2:-2])
indent += 1
header_level += 1
continue
# Handle "note" block.
elif line.startswith('NOTE: '):
output.append('.. note::')
rst_block = True
line = line[6:]
# Handle removed note blocks. This is used for "X is not included
# in this build" notifications that are naturally the case when
# ReadTheDocs is building OpenQL.
elif line.startswith('NOTE*: '):
remove = True
continue
# Handle "warning" block.
elif line.startswith('WARNING: '):
output.append('.. warning::')
rst_block = True
line = line[9:]
# A new RST block (note or warning) was opened, which means we need
# to capitalize the first letter.
if rst_block:
output.append(' ' + m2r2.convert(line[:1].upper() + line[1:]).strip())
continue
# Indent followup lines of RST blocks.
if rst_block:
line = ' ' + m2r2.convert(line).strip()
# Finished converting stuff.
output.append(line)
# Convert back to normal text.
text = '\n'.join(output) + '\n'
# Convert markdown syntax to RST.
text = m2r2.convert(text)
# m2r2 is a bit overzealous about things that look like HTML tags. After
# all, markdown permits insertion of raw HTML, and RST doesn't, so it
# does its best to convert things. That's not what we want; syntax like
# <stuff> is used all over the place as placeholders within code blocks
# and such, and there absolutely should never be raw HTML in the
# docstrings anyway. So we just revert m2r2's hard work in this respect
# by stripping all :raw-html-m2r:`` blocks.
text = re.sub(r'(?:\\ )?:raw-html-m2r:`([^`]+)`(?:\\ )?', r'\1', text)
return text
def get_version(verbose=0):
""" Extract version information from source code """
matcher = re.compile('[\t ]*#define[\t ]+OPENQL_VERSION_STRING[\t ]+"(.*)"')
with open(os.path.join('..', 'include', 'ql', 'version.h'), 'r') as f:
for ln in f:
m = matcher.match(ln)
if m:
version = m.group(1)
break
else:
raise Exception('failed to parse version string from include/ql/version.h')
return version
if not os.path.exists('gen'):
os.makedirs('gen')
# Version in installation instructions.
with open('manual/installation.rst.template', 'r') as f:
docs = f.read()
docs = docs.replace('{version}', get_version())
with open('gen/manual_installation.rst', 'w') as f:
f.write(docs)
# Architecture list.
with open('reference/architectures.rst.template', 'r') as f:
docs = f.read()
docs = docs.replace('{architectures}', docs_to_rst_magic(ql.dump_architectures(), 2))
with open('gen/reference_architectures.rst', 'w') as f:
f.write(docs)
# Global option list.
with open('reference/options.rst.template', 'r') as f:
docs = f.read()
docs = docs.replace('{options}', docs_to_rst_magic(ql.dump_options(), 2))
with open('gen/reference_options.rst', 'w') as f:
f.write(docs)
# Pass list.
with open('reference/passes.rst.template', 'r') as f:
docs = f.read()
docs = docs.replace('{passes}', docs_to_rst_magic(ql.dump_passes(), 2))
with open('gen/reference_passes.rst', 'w') as f:
f.write(docs)
# Resource list.
with open('reference/resources.rst.template', 'r') as f:
docs = f.read()
docs = docs.replace('{resources}', docs_to_rst_magic(ql.dump_resources(), 2))
with open('gen/reference_resources.rst', 'w') as f:
f.write(docs)
# Configuration file reference.
with open('reference/configuration.rst.template', 'r') as f:
docs = f.read()
docs = docs.replace('{platform}', docs_to_rst_magic(ql.dump_platform_docs(), 3))
docs = docs.replace('{compiler}', docs_to_rst_magic(ql.dump_compiler_docs(), 3))
with open('gen/reference_configuration.rst', 'w') as f:
f.write(docs)
# Output of simple.py.
import shutil
original_workdir = os.getcwd()
examples_dir = os.path.join(os.path.dirname(__file__), '..', 'examples')
try:
os.chdir(examples_dir)
subprocess.check_call([sys.executable, os.path.join(examples_dir, 'simple.py')])
finally:
os.chdir(original_workdir)
shutil.copyfile(os.path.join(examples_dir, 'output', 'my_program.qasm'), 'gen/my_program.qasm')
shutil.copyfile(os.path.join(examples_dir, 'output', 'my_program_scheduled.qasm'), 'gen/my_program_scheduled.qasm')
# -- Project information -----------------------------------------------------
project = 'OpenQL'
copyright = '2016-2021, QuTech, TU Delft'
author = 'QuTech, TU Delft'
master_doc = 'index'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'm2r2',
'sphinx.ext.todo',
'sphinx.ext.autodoc',
'sphinx.ext.napoleon',
'sphinx.ext.autosummary'
]
autodoc_default_flags = ['members']
# autosummary_generate = True
source_suffix = ['.rst', '.md']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# Some or just temporary files,
# other ones files 'include::'d by another .rst file.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store',
'platform_*.rst', 'mapping.rst', 'scheduling.rst', 'decomposition.rst',
'optimization.rst', 'scheduling_ccl.rst', 'scheduling_cc.rst']
def skip(app, what, name, obj, would_skip, options):
if name == "__init__":
return False
return would_skip
def setup(app):
app.connect("autodoc-skip-member", skip)
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path = ['_static']
[extensions]
todo_include_todos=True
# to enable figure numbering
numfig = True
autodoc_member_order = 'bysource'
| 33.033333
| 115
| 0.625743
|
4a045297e9891e1cfb61219082a89bf9c316a946
| 2,756
|
py
|
Python
|
sdk/python/lib/pulumi/automation/_cmd.py
|
alex-shafer-1001/pulumi
|
5ec0b2ffe6db08290a5c680f86e8e6b8ccf7d9b5
|
[
"Apache-2.0"
] | 12,004
|
2018-06-17T23:56:29.000Z
|
2022-03-31T18:00:09.000Z
|
sdk/python/lib/pulumi/automation/_cmd.py
|
alex-shafer-1001/pulumi
|
5ec0b2ffe6db08290a5c680f86e8e6b8ccf7d9b5
|
[
"Apache-2.0"
] | 6,263
|
2018-06-17T23:27:24.000Z
|
2022-03-31T19:20:35.000Z
|
sdk/python/lib/pulumi/automation/_cmd.py
|
alex-shafer-1001/pulumi
|
5ec0b2ffe6db08290a5c680f86e8e6b8ccf7d9b5
|
[
"Apache-2.0"
] | 706
|
2018-06-17T23:56:50.000Z
|
2022-03-31T11:20:23.000Z
|
# Copyright 2016-2021, Pulumi Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import subprocess
import tempfile
from typing import List, Mapping, Optional, Callable, Any
from .errors import create_command_error
OnOutput = Callable[[str], Any]
class CommandResult:
def __init__(self, stdout: str, stderr: str, code: int) -> None:
self.stdout = stdout
self.stderr = stderr
self.code = code
def __repr__(self):
return f"CommandResult(stdout={self.stdout!r}, stderr={self.stderr!r}, code={self.code!r})"
def __str__(self) -> str:
return f"\n code: {self.code}\n stdout: {self.stdout}\n stderr: {self.stderr}"
def _run_pulumi_cmd(args: List[str],
cwd: str,
additional_env: Mapping[str, str],
on_output: Optional[OnOutput] = None) -> CommandResult:
# All commands should be run in non-interactive mode.
# This causes commands to fail rather than prompting for input (and thus hanging indefinitely).
if "--non-interactive" not in args:
args.append("--non-interactive")
env = {**os.environ, **additional_env}
cmd = ["pulumi"]
cmd.extend(args)
stdout_chunks: List[str] = []
with tempfile.TemporaryFile() as stderr_file:
with subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=stderr_file,
cwd=cwd,
env=env) as process:
assert process.stdout is not None
while True:
output = process.stdout.readline().decode(encoding="utf-8")
if output == "" and process.poll() is not None:
break
if output:
text = output.rstrip()
if on_output:
on_output(text)
stdout_chunks.append(text)
code = process.returncode
stderr_file.seek(0)
stderr_contents = stderr_file.read().decode("utf-8")
result = CommandResult(stderr=stderr_contents, stdout='\n'.join(stdout_chunks), code=code)
if code != 0:
raise create_command_error(result)
return result
| 34.886076
| 99
| 0.615747
|
4a0452f053f0f824c9924afec7b8efbcf9a837d5
| 7,792
|
py
|
Python
|
lc5/lc4_rand_test.py
|
sqf-ice/ECC-Verilog
|
415639c467f1d5865e974139a1a5ab3f629e399f
|
[
"MIT"
] | 2
|
2021-06-14T09:18:11.000Z
|
2021-08-02T10:52:51.000Z
|
lc5/lc4_rand_test.py
|
sqf-ice/ECC-Verilog
|
415639c467f1d5865e974139a1a5ab3f629e399f
|
[
"MIT"
] | null | null | null |
lc5/lc4_rand_test.py
|
sqf-ice/ECC-Verilog
|
415639c467f1d5865e974139a1a5ab3f629e399f
|
[
"MIT"
] | 1
|
2021-06-26T06:39:30.000Z
|
2021-06-26T06:39:30.000Z
|
#!/usr/bin/python
# ---------------
# Address Spaces:
# ---------------
# USER_MIN = 0x0000
# USER_MAX = 0x7FFF
# USER_CODE_MIN = 0x0000
# USER_CODE_MAX = 0x1FFF
# OS_MIN = 0x8000
# OS_MAX = 0xFFFF
# OS_CODE_MIN 0x8000
# OS_CODE_MAX 0x9FFF
import sys, os, random
addr_lst = (0xB010, 0xC010, 0xD010)
addr_regs = range(0, 4)
alu_regs = range(4, 8)
label_counter = 0
max_label_counter = 0
def next_label():
global label_counter
label_counter += 1
return "LBL_%04x" % (label_counter-1)
def relative_label(offset):
global label_counter, max_label_counter
max_label_counter = max(label_counter+offset, max_label_counter)
return "LBL_%04x" % (label_counter+offset)
def align_label():
global max_label_counter
num = (label_counter + 32) & (~0xf)
max_label_counter = max(num, max_label_counter)
return "LBL_%04x" % num
def is_converged():
return (label_counter > max_label_counter)
def orig(addr):
global label_counter
label_counter = addr;
return ".ADDR x%04x" % addr
def parse_symbol(sym):
if sym == "reg":
return "R" + str(random.choice(addr_regs + alu_regs))
if sym == "addr_reg":
return "R" + str(random.choice(addr_regs))
if sym == "alu_reg":
return "R" + str(random.choice(alu_regs))
if sym == "label":
return relative_label(random.randint(2, 5));
if sym == "align_label":
return align_label()
if sym == "jmpr_label":
return relative_label(random.randint(8,12))
if sym[0] == "s": # signed immediate
size = int(sym[1:])
return "#" + str(random.randint(-2**(size-1), 2**(size-1) - 1))
if sym[0] == "u": # unsigned immediate
size = int(sym[1:])
return "#" + str(random.randint(0, (2**size)-1))
return sym;
def make_insn(opcode, template):
global label_counter
lst = [parse_symbol(s) for s in template.split()]
insn = "%s %s %s" % (next_label(), opcode, ", ".join(lst))
if opcode == "LEA":
label_counter += 1 # LEA expands to two instructions
return insn
alu_ops = (
(("ADD", "alu_reg reg reg"),
("MUL", "alu_reg reg reg"),
("SUB", "alu_reg reg reg"),
("ADD", "alu_reg reg s5"),),
(("CMP" , "reg reg"),
("CMPU" , "reg reg"),
("CMPI" , "reg s7"),
("CMPIU", "reg u7")),
(("AND" , "alu_reg reg reg"),
("NOT" , "alu_reg reg"),
("OR" , "alu_reg reg reg"),
("XOR" , "alu_reg reg reg"),
("AND" , "alu_reg reg s5"),),
(("CONST", "alu_reg s9"),),
(("SLL" , "alu_reg reg u4"),
("SRA" , "alu_reg reg u4"),
("SRL" , "alu_reg reg u4"),),
(("HICONST" , "alu_reg u8"),),
(("DIV", "alu_reg reg reg"),
("MOD", "alu_reg reg reg")),
)
mem_ops = (
(("LDR", "addr_reg addr_reg #0"),),
(("STR", "addr_reg addr_reg #0"),),
(("LDR", "addr_reg addr_reg s2"),),
(("STR", "addr_reg addr_reg s2"),),
(("ADD", "addr_reg addr_reg s2"),),
(("SLL", "addr_reg addr_reg #0"),
("SRA", "addr_reg addr_reg #0"),
("SRL", "addr_reg addr_reg #0")),
)
br_ops = (
(("BRn", "label"),
("BRnz", "label"),
("BRnp", "label"),
("BRz", "label"),
("BRzp", "label"),
("BRp", "label"),
("BRnzp", "label"),
("JMP", "label"),
("JSR", "align_label"),
("TRAP", "u8"),
("JSRR", ""),
("JMPR", ""),),
(("LOOP", ""),),
)
br_ld_ops = (
(("LDR", "addr_reg addr_reg #0"),),
(("LDR", "addr_reg addr_reg s2"),),
(("ADD", "addr_reg addr_reg s2"),
("STR", "addr_reg addr_reg s2")),
(("BRn", "label"),
("BRnz", "label"),
("BRnp", "label"),
("BRz", "label"),
("BRzp", "label"),
("BRp", "label"),
("BRnzp", "label"),),
)
def generate(filename, insn_lst):
random.seed(1)
output = open(filename, 'w')
print >>output, ".OS"
print >>output, ".CODE"
print >>output, orig(0x8200)
# Init all registers with valid addresses
for reg in addr_regs:
value = random.choice(addr_lst)
print >>output, make_insn("CONST", "R%d #%d" % (reg, value % 256))
print >>output, make_insn("HICONST", "R%d #%d" % (reg, value / 256))
# Generate random instructions
global label_counter
while label_counter <= (0xA000 - 100):
(opcode, format) = random.choice(random.choice(insn_lst))
# JSR writes R7 with the PC, so make sure it is already in the alu_regs bucket
if (opcode in ["JSR", "JSRR", "TRAP"]) and (7 not in alu_regs):
continue
if opcode in ["JSRR", "JMPR"]:
if not is_converged(): continue # avoid some other insn jumping to middle of LEA
lea_reg = random.choice(alu_regs);
alu_regs.remove(lea_reg)
print >>output, make_insn("LEA", "R%d jmpr_label" % lea_reg)
# Put in a random number of spacing instructions
for i in xrange(0, random.randint(0, 5)):
(op, format) = random.choice(random.choice(alu_ops))
print >>output, make_insn(op, format)
print >>output, make_insn(opcode, "R%d" % lea_reg)
alu_regs.append(lea_reg)
continue
if opcode == "LOOP":
if not is_converged(): continue
count_reg = random.choice(alu_regs)
alu_regs.remove(count_reg)
print >>output, make_insn("CONST", "R%d #0" % count_reg)
print >>output, make_insn("ADD", "R%d R%d #1" % (count_reg, count_reg))
loopsize = random.randint(1, 10)
# Put in a random number of insn in loop
for i in xrange(0, loopsize):
(op, format) = random.choice(random.choice(alu_ops))
print >>output, make_insn(op, format)
loopcount = random.randint(1, 7)
print >>output, make_insn("CMPI", "R%d #%d" % (count_reg, loopcount))
print >>output, make_insn("BRnp", relative_label(-(loopsize+2)))
alu_regs.append(count_reg)
continue
#### Generate a normal instruction
print >>output, make_insn(opcode, format)
# Swap a register between the two buckets, which can only be
# done if we know we're aren't going to skip past it
if is_converged() and random.randint(0, 20) == 1:
new_addr_reg = random.choice(alu_regs)
new_alu_reg = random.choice(addr_regs)
print >>output, make_insn("ADD", "R%d R%d 0" % (new_addr_reg, new_alu_reg))
alu_regs.remove(new_addr_reg)
alu_regs.append(new_alu_reg)
addr_regs.remove(new_alu_reg)
addr_regs.append(new_addr_reg)
# stop label
while not is_converged():
print >>output, make_insn("ADD", "reg reg reg")
print >>output, "END_LABEL ADD R1, R1, R1";
# Initialize 100 words before and after addresses
for addr in addr_lst:
print >>output, "\n.DATA"
print >>output, ".ADDR x%04x" % (addr-200)
for i in xrange(400):
print >>output, ".FILL x%04x" % (random.choice(addr_lst) + random.randint(-1, 1))
# TRAP table
print >>output, ".CODE"
print >>output, orig(0x8000)
for i in xrange(64):
print >>output, make_insn("CMP", "reg reg")
print >>output, make_insn("CMP", "reg reg")
print >>output, make_insn("CMP", "reg reg")
print >>output, make_insn("RET", "")
output.close()
def main():
generate("test_alu.asm", alu_ops)
generate("test_mem.asm", mem_ops)
generate("test_br.asm", br_ops+alu_ops)
generate("test_all.asm", mem_ops+alu_ops+br_ops)
generate("test_ld_br.asm", br_ld_ops)
if __name__ == "__main__":
main()
| 29.403774
| 93
| 0.558008
|
4a04549a20b9866037880833e67a5ebcd0601836
| 10,861
|
py
|
Python
|
DotsAndBoxes/DotsAndBoxes/dots_and_boxes/dots_and_boxes.py
|
SCIJLab/Dots-and-Boxes-GUI
|
28b0af7a34cb3345b38088d0d479020bc2372b7d
|
[
"MIT"
] | 4
|
2020-02-01T02:08:37.000Z
|
2020-07-21T01:09:39.000Z
|
DotsAndBoxes/DotsAndBoxes/dots_and_boxes/dots_and_boxes.py
|
Everyb0dyLies/Dots-and-Boxes
|
28b0af7a34cb3345b38088d0d479020bc2372b7d
|
[
"MIT"
] | null | null | null |
DotsAndBoxes/DotsAndBoxes/dots_and_boxes/dots_and_boxes.py
|
Everyb0dyLies/Dots-and-Boxes
|
28b0af7a34cb3345b38088d0d479020bc2372b7d
|
[
"MIT"
] | 5
|
2018-07-31T15:29:00.000Z
|
2019-07-01T05:54:32.000Z
|
# -*- coding: UTF-8 -*-
import json, time
from .game import *
from .player import *
class DotsAndBoxes:
def __init__(self, window_controller=None):
self._current_game = None
self._history = None
self._current_step = None
self._red_player = None
self._blue_player = None
self._window_controller = window_controller
self._update_time = time.time()
@property
def current_game(self):
return self._current_game
@property
def history(self):
return self._history.copy()
@property
def last_move(self):
if (self._current_game == None or self._current_step == 0):
return None
return self._history[self._current_step-1]
@property
def red_player(self):
return self._red_player
@red_player.setter
def red_player(self, value):
if (value.color != Color.red):
raise DBError("Invalid players", value)
if (self._current_game != None):
if (not self._current_game.is_end):
raise DBError("Current game is not over")
self._red_player = value
@property
def blue_player(self):
return self._blue_player
@blue_player.setter
def blue_player(self, value):
if (value.color != Color.blue):
raise DBError("Invalid players", value)
if (self._current_game != None):
if (not self._current_game.is_end):
raise DBError("Current game is not over")
self._blue_player = value
@property
def current_player(self):
return (self._red_player if self._current_game.current_player_color == Color.red else self._blue_player)
@property
def current_step(self):
# int 返回当前步数
return self._current_step
@property
def need_update(self, last_update_time):
return self._update_time > last_update_time
def _update(self):
self._update_time = time.time()
if (self._window_controller != None):
self._window_controller.update()
if self.current_game.is_end:
self.red_player._game_is_over(self.current_game.winner == Color.red)
self.blue_player._game_is_over(self.current_game.winner == Color.blue)
else:
if isinstance(self.red_player, AIPlayer):
self.red_player.last_move(self.last_move, self._current_game.board, self._current_game.history, self.current_player.color)
if isinstance(self.blue_player, AIPlayer):
self.blue_player.last_move(self.last_move, self._current_game.board, self._current_game.history, self.current_player.color)
def new_game(self):
if (self._current_game != None):
if (not self._current_game.is_end):
raise DBError("Current game is not over")
if (self._red_player == None or self._blue_player == None):
raise DBError("Lack of player")
self._new_game()
self._update()
def _new_game(self):
self._current_game = Game(self._red_player, self._blue_player)
self._history = []
self._current_step = 0
def end_game(self):
if (self._current_game == None):
raise DBError("Do not have current game")
self._current_game = None
self._history = None
self._current_step = None
def _move(self, piece):
self._current_game.move(piece)
if (self._current_step < len(self._history)): # 当从某一历史步直接下新步时 (先行判断可以避免_history越界)
if (piece != self._history[self._current_step]): # 如果新步与历史步的下一步历史不同
while (self._current_step < len(self._history)): # 先删除这一历史步之后的数据
self._history.pop()
self._history.append(piece)
else:
self._history.append(piece)
self._current_step = self._current_step + 1
def move(self, piece):
if (self._current_game == None):
raise DBError("Do not have current game")
if (piece.color != self._current_game.current_player_color):
raise MoveError("Player color is wrong")
self._move(piece)
self._update()
def move_with_str(self, input_str):
(color, user_coordinate) = self._str_to_coordinate(input_str)
if (color != self._current_game.current_player_color):
raise MoveError("Player color is wrong")
self.move(Piece(color, user_coordinate))
def _str_to_coordinate(self, input_str):
color = x = y = type = None
try:
if (input_str[0] == 'r' or input_str[0] == 'R'):
color = Color.red
elif (input_str[0] == 'b' or input_str[0] == 'B'):
color = Color.blue
else:
raise ValueError()
if (input_str[2] == 'a' or input_str[2] == 'A'):
x = 'a'
elif (input_str[2] == 'b' or input_str[2] == 'B'):
x = 'b'
elif (input_str[2] == 'c' or input_str[2] == 'C'):
x = 'c'
elif (input_str[2] == 'd' or input_str[2] == 'D'):
x = 'd'
elif (input_str[2] == 'e' or input_str[2] == 'E'):
x = 'e'
elif (input_str[2] == 'f' or input_str[2] == 'F'):
x = 'f'
else:
raise ValueError()
y = int(input_str[3])
if (y < 0 or y > 6):
raise ValueError()
if (input_str[5] == 'v' or input_str[5] == 'V'):
type = 'v'
elif (input_str[5] == 'h' or input_str[5] == 'H'):
type = 'h'
else:
raise ValueError
except (IndexError, ValueError, TypeError):
raise DBError("Invalid input", input_str)
return (color, (x, str(y), type))
def _back(self):
self._current_game.back()
self._current_step = self._current_step - 1
def back(self):
if (self._current_game == None):
raise DBError("Do not have current game")
if (self._current_step == 0):
raise DBError("Do not have step")
self._back()
self._update()
def turn_to_step(self, step_num):
if (self._current_game == None):
raise DBError("Do not have current game")
if (step_num < 0 or step_num > len(self._history) or step_num == self._current_step):
raise DBError("Invalid step num")
while (self._current_step > step_num):
self._back()
while (self._current_step < step_num):
self._move(self._history[self._current_step])
self._update()
def _data_as_dict(self):
if (self._current_game == None):
raise DBError("Do not have current game")
if (self._current_step == 0):
raise DBError("Do not have step data")
pieces = []
for piece in self._current_game.history:
piece_dict = {"timestamp": piece.datetime.timestamp(),
"player": "r" if piece.color == Color.red else "b",
"coordinate": "".join(piece.user_coordinate)}
if piece.annotation != "":
piece_dict["annotation"] = piece.annotation
pieces.append(piece_dict)
dict = {"R": self._red_player.name,
"B": self._blue_player.name,
"is_end": self._current_game.is_end,
"timestamp": self._current_game.datetime.timestamp(),
"pieces": pieces}
if (self._current_game.is_end):
dict["winner"] = "R" if self._current_game.winner == Color.red else "B"
return dict
def save_to_file(self, file_path, mode=1, event=None):
dict = self._data_as_dict()
#'''
if (mode == 0): # 非常智障的模式
if (not self._current_game.is_end):
raise DBError("Current game is not over")
if (event == None):
raise DBError("Invalid event")
pieces_arr = []
for piece in self._current_game.history:
piece_str = ""
if (piece.color == Color.red):
piece_str = piece_str + "r"
else:
piece_str = piece_str + "b"
piece_str = piece_str + "(" + "".join(piece.user_coordinate[0:2]) + "," + "".join(piece.user_coordinate[2]) + ")"
piece_dict = {"piece": piece_str}
if piece.annotation != "":
piece_dict["annotation"] = piece.annotation
pieces_arr.append(piece_dict)
dict = {"R": self._red_player.name,
"B": self._blue_player.name,
"winner": "R" if self._current_game.winner == Color.red else "B",
"RScore": self._red_player.score,
"BScore": self._blue_player.score,
"Date": self._current_game.datetime.strftime("%Y-%m-%d"),
"Event": event,
"game": pieces_arr}
file_path = file_path + "DB:" + self._red_player.name + " vs " + self._blue_player.name + ":"
file_path = file_path + ("先手胜" if self._current_game.winner == Color.red else "后手胜")
file_path = file_path + ".txt"#'''
f = open(file_path, 'w')
f.write(json.dumps(dict))
f.close()
return True
def load_from_file(self, file_path, mode=1):
f = open(file_path, 'r')
file_data = f.read()
f.close()
if (mode == 0): # 非常智障的模式
data = json.loads(file_data)
self._red_player = HumanPlayer(Color.red, data['R'], self)
self._blue_player = HumanPlayer(Color.blue, data['B'], self)
self._new_game()
for step in data['game']:
self.move_with_str(step["piece"])
else:
data = json.loads(file_data)
self._red_player = HumanPlayer(Color.red, data['R'], self)
self._blue_player = HumanPlayer(Color.blue, data['B'], self)
self._new_game()
for step_data in data['pieces']:
piece = Piece(Color.red if step_data['player'] == 'r' else Color.blue, (step_data['coordinate'][0], step_data['coordinate'][1], step_data['coordinate'][2]))
self.move(piece)
def set_piece_annotation(self, step_num, annotation):
if (self._current_game == None):
raise DBError("Do not have current game")
if (step_num < 0 or step_num > len(self._history)):
raise DBError("Invalid step num")
self._history[step_num].annotation = annotation
class DBError(DBException):
def __init__(self, *args, **kwargs):
super(DBError, self).__init__(args, kwargs)
| 35.963576
| 172
| 0.56063
|
4a04549e7c58d5285ad1bb454f04e5dedc213040
| 2,264
|
py
|
Python
|
cas/registration/registration.py
|
ced-mos/cas-assignment
|
0b72c69afd6d91a0da554db99d6897cd7ffd7052
|
[
"MIT"
] | null | null | null |
cas/registration/registration.py
|
ced-mos/cas-assignment
|
0b72c69afd6d91a0da554db99d6897cd7ffd7052
|
[
"MIT"
] | null | null | null |
cas/registration/registration.py
|
ced-mos/cas-assignment
|
0b72c69afd6d91a0da554db99d6897cd7ffd7052
|
[
"MIT"
] | null | null | null |
import os
import sys
sys.path.insert(0, os.path.join(os.path.dirname(sys.argv[0]), '../..'))
import numpy as np
np.set_printoptions(suppress=True)
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
import cas.registration.util as util
from assignments.registration import registration
def test_icp():
target_points = util.read_data('data/registration/TargetPoints.csv')
print(target_points)
template_points = util.read_data('data/registration/TemplatePoints.csv')
print(template_points)
T_rot_x = registration.get_initial_pose(template_points, target_points)
T, d, error = registration.icp(template_points, target_points, init_pose=T_rot_x)
template_points_T = util.make_homogenous(template_points)
template_points_T = np.dot(T, template_points_T.T).T[:, :3]
print(template_points_T)
print(T)
print(error)
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
sample_choice = np.random.choice(target_points.shape[0], 1000)
# print(sample_choice)
samples = target_points[sample_choice, :]
ax.scatter(samples[:, 0], samples[:, 1], samples[:, 2], c='b', marker='.')
# ax.scatter(template_points[:, 0], template_points[:, 1], template_points[:, 2], c='g', marker='x')
ax.scatter(template_points_T[:, 0], template_points_T[:, 1], template_points_T[:, 2], c='r', marker='x')
ax.set_xlabel('X Label')
ax.set_ylabel('Y Label')
ax.set_zlabel('Z Label')
# plt.axis([-3, 3, -3, 3])
plt.axis('equal')
plt.show()
def test_paired_points_matching():
N = 5
T_random = util.get_random_transformation_matrix()
target, source = util.get_random_point_clouds(N, T_random)
print('Target point cloud\n', target, '\n')
T, R, t = registration.paired_points_matching(source, target)
source_H = util.make_homogenous(source)
source_T = np.dot(T, source_H.T).T[:, :3]
print('Source point cloud\n', source_T, '\n')
error = np.linalg.norm(source_T - target)
print('Transformation\n', T, '\n')
print('Error\n', error, '\n')
if error < 0.1:
print("Successful")
else:
print("Check again")
return
if __name__ == "__main__":
test_paired_points_matching()
test_icp()
| 27.950617
| 108
| 0.681537
|
4a0455214db3427bbe58794f93de468d74de9e7d
| 14,022
|
py
|
Python
|
docs/conf.py
|
lhutton1/tvm
|
e9380e47f0b97c0b98b97f082b075eaa1308038b
|
[
"Zlib",
"Unlicense",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0"
] | 1
|
2021-06-03T20:20:24.000Z
|
2021-06-03T20:20:24.000Z
|
docs/conf.py
|
lhutton1/tvm
|
e9380e47f0b97c0b98b97f082b075eaa1308038b
|
[
"Zlib",
"Unlicense",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0"
] | null | null | null |
docs/conf.py
|
lhutton1/tvm
|
e9380e47f0b97c0b98b97f082b075eaa1308038b
|
[
"Zlib",
"Unlicense",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# documentation build configuration file, created by
# sphinx-quickstart on Thu Jul 23 19:40:08 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import gc
import sys
import inspect
import os, subprocess
import shlex
import sphinx_gallery
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
sys.path.insert(0, os.path.join(curr_path, "../python/"))
sys.path.insert(0, os.path.join(curr_path, "../vta/python"))
# -- General configuration ------------------------------------------------
# General information about the project.
project = "tvm"
author = "Apache Software Foundation"
copyright = "2020 - 2021, %s" % author
github_doc_root = "https://github.com/apache/tvm/tree/main/docs/"
os.environ["TVM_BUILD_DOC"] = "1"
def git_describe_version(original_version):
"""Get git describe version."""
ver_py = os.path.join(curr_path, "..", "version.py")
libver = {"__file__": ver_py}
exec(compile(open(ver_py, "rb").read(), ver_py, "exec"), libver, libver)
_, gd_version = libver["git_describe_version"]()
if gd_version != original_version:
print("Use git describe based version %s" % gd_version)
return gd_version
# Version information.
import tvm
from tvm import topi
from tvm import te
from tvm import testing
version = git_describe_version(tvm.__version__)
release = version
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.autosummary",
"sphinx.ext.intersphinx",
"sphinx.ext.napoleon",
"sphinx.ext.mathjax",
"sphinx_gallery.gen_gallery",
"autodocsumm",
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = [".rst", ".md"]
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# generate autosummary even if no references
autosummary_generate = True
# The master toctree document.
master_doc = "index"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ["_build"]
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme is set by the make target
html_theme = os.environ.get("TVM_THEME", "rtd")
on_rtd = os.environ.get("READTHEDOCS", None) == "True"
# only import rtd theme and set it if want to build docs locally
if not on_rtd and html_theme == "rtd":
import sphinx_rtd_theme
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
html_theme_options = {
"analytics_id": "UA-75982049-2",
"logo_only": True,
}
html_logo = "_static/img/tvm-logo-small.png"
html_favicon = "_static/img/tvm-logo-square.png"
# Output file base name for HTML help builder.
htmlhelp_basename = project + "doc"
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, "%s.tex" % project, project, author, "manual"),
]
intersphinx_mapping = {
"python": ("https://docs.python.org/{.major}".format(sys.version_info), None),
"numpy": ("https://numpy.org/doc/stable", None),
"scipy": ("https://docs.scipy.org/doc/scipy/", None),
"matplotlib": ("https://matplotlib.org/", None),
}
from sphinx_gallery.sorting import ExplicitOrder
examples_dirs = ["../tutorials/", "../vta/tutorials/"]
gallery_dirs = ["tutorials", "vta/tutorials"]
subsection_order = ExplicitOrder(
[
"../tutorials/get_started",
"../tutorials/frontend",
"../tutorials/language",
"../tutorials/optimize",
"../tutorials/autotvm",
"../tutorials/auto_scheduler",
"../tutorials/dev",
"../tutorials/topi",
"../tutorials/deployment",
"../tutorials/micro",
"../vta/tutorials/frontend",
"../vta/tutorials/optimize",
"../vta/tutorials/autotvm",
]
)
# Explicitly define the order within a subsection.
# The listed files are sorted according to the list.
# The unlisted files are sorted by filenames.
# The unlisted files always appear after listed files.
within_subsection_order = {
"get_started": [
"introduction.py",
"install.py",
"tvmc_command_line_driver.py",
"autotvm_relay_x86.py",
"tensor_expr_get_started.py",
"autotvm_matmul_x86.py",
"auto_scheduler_matmul_x86.py",
"cross_compilation_and_rpc.py",
"relay_quick_start.py",
],
"frontend": [
"from_pytorch.py",
"from_tensorflow.py",
"from_mxnet.py",
"from_onnx.py",
"from_keras.py",
"from_tflite.py",
"from_coreml.py",
"from_darknet.py",
"from_caffe2.py",
],
"language": [
"schedule_primitives.py",
"reduction.py",
"intrin_math.py",
"scan.py",
"extern_op.py",
"tensorize.py",
"tuple_inputs.py",
"tedd.py",
],
"optimize": [
"opt_gemm.py",
"opt_conv_cuda.py",
"opt_conv_tensorcore.py",
],
"autotvm": [
"tune_simple_template.py",
"tune_conv2d_cuda.py",
"tune_relay_cuda.py",
"tune_relay_x86.py",
"tune_relay_arm.py",
"tune_relay_mobile_gpu.py",
],
"auto_scheduler": [
"tune_matmul_x86.py",
"tune_conv2d_layer_cuda.py",
"tune_network_x86.py",
"tune_network_cuda.py",
],
"dev": [
"low_level_custom_pass.py",
"use_pass_infra.py",
"use_pass_instrument.py",
"bring_your_own_datatypes.py",
],
}
class WithinSubsectionOrder:
def __init__(self, src_dir):
self.src_dir = src_dir.split("/")[-1]
def __call__(self, filename):
# If the order is provided, use the provided order
if (
self.src_dir in within_subsection_order
and filename in within_subsection_order[self.src_dir]
):
index = within_subsection_order[self.src_dir].index(filename)
assert index < 1e10
return "\0%010d" % index
# Otherwise, sort by filename
return filename
# When running the tutorials on GPUs we are dependent on the Python garbage collector
# collecting TVM packed function closures for any device memory to also be released. This
# is not a good setup for machines with lots of CPU ram but constrained GPU ram, so force
# a gc after each example.
def force_gc(gallery_cong, fname):
print("(Forcing Python gc after '{}' to avoid lag in reclaiming CUDA memory)".format(fname))
gc.collect()
print("(Remaining garbage: {})".format(gc.garbage))
sphinx_gallery_conf = {
"backreferences_dir": "gen_modules/backreferences",
"doc_module": ("tvm", "numpy"),
"reference_url": {
"tvm": None,
"matplotlib": "https://matplotlib.org/",
"numpy": "https://numpy.org/doc/stable",
},
"examples_dirs": examples_dirs,
"within_subsection_order": WithinSubsectionOrder,
"gallery_dirs": gallery_dirs,
"subsection_order": subsection_order,
"filename_pattern": os.environ.get("TVM_TUTORIAL_EXEC_PATTERN", ".py"),
"find_mayavi_figures": False,
"download_all_examples": False,
"min_reported_time": 60,
"expected_failing_examples": [],
"reset_modules": (force_gc, "matplotlib", "seaborn"),
}
autodoc_default_options = {
"member-order": "bysource",
}
# Maps the original namespace to list of potential modules
# that we can import alias from.
tvm_alias_check_map = {
"tvm.te": ["tvm.tir"],
"tvm.tir": ["tvm.ir", "tvm.runtime"],
"tvm.relay": ["tvm.ir", "tvm.tir"],
}
## Setup header and other configs
import tlcpack_sphinx_addon
footer_copyright = "© 2020 Apache Software Foundation | All right reserved"
footer_note = " ".join(
"""
Copyright © 2020 The Apache Software Foundation. Apache TVM, Apache, the Apache feather,
and the Apache TVM project logo are either trademarks or registered trademarks of
the Apache Software Foundation.""".split(
"\n"
)
).strip()
header_logo = "https://tvm.apache.org/assets/images/logo.svg"
header_logo_link = "https://tvm.apache.org/"
header_links = [
("Community", "https://tvm.apache.org/community"),
("Download", "https://tvm.apache.org/download"),
("VTA", "https://tvm.apache.org/vta"),
("Blog", "https://tvm.apache.org/blog"),
("Docs", "https://tvm.apache.org/docs"),
("Conference", "https://tvmconf.org"),
("Github", "https://github.com/apache/tvm/"),
]
header_dropdown = {
"name": "ASF",
"items": [
("Apache Homepage", "https://apache.org/"),
("License", "https://www.apache.org/licenses/"),
("Sponsorship", "https://www.apache.org/foundation/sponsorship.html"),
("Security", "https://www.apache.org/security/"),
("Thanks", "https://www.apache.org/foundation/thanks.html"),
("Events", "https://www.apache.org/events/current-event"),
],
}
html_context = {
"footer_copyright": footer_copyright,
"footer_note": footer_note,
"header_links": header_links,
"header_dropdown": header_dropdown,
"header_logo": header_logo,
"header_logo_link": header_logo_link,
}
# add additional overrides
templates_path += [tlcpack_sphinx_addon.get_templates_path()]
html_static_path += [tlcpack_sphinx_addon.get_static_path()]
def update_alias_docstring(name, obj, lines):
"""Update the docstring of alias functions.
This function checks if the obj is an alias of another documented object
in a different module.
If it is an alias, then it will append the alias information to the docstring.
Parameters
----------
name : str
The full name of the object in the doc.
obj : object
The original object.
lines : list
The docstring lines, need to be modified inplace.
"""
arr = name.rsplit(".", 1)
if len(arr) != 2:
return
target_mod, target_name = arr
if target_mod not in tvm_alias_check_map:
return
if not hasattr(obj, "__module__"):
return
obj_mod = obj.__module__
for amod in tvm_alias_check_map[target_mod]:
if not obj_mod.startswith(amod):
continue
if hasattr(sys.modules[amod], target_name):
obj_type = ":py:func" if callable(obj) else ":py:class"
lines.append(".. rubric:: Alias of %s:`%s.%s`" % (obj_type, amod, target_name))
def process_docstring(app, what, name, obj, options, lines):
"""Sphinx callback to process docstring"""
if callable(obj) or inspect.isclass(obj):
update_alias_docstring(name, obj, lines)
def setup(app):
app.connect("autodoc-process-docstring", process_docstring)
| 31.510112
| 96
| 0.671659
|
4a0456420903c9d13fc95aa7b478dc9a183f1780
| 4,505
|
bzl
|
Python
|
def.bzl
|
howson/bazel-gazelle
|
f3d57a478ca0043905f818766b60bb21674eaaad
|
[
"Apache-2.0"
] | null | null | null |
def.bzl
|
howson/bazel-gazelle
|
f3d57a478ca0043905f818766b60bb21674eaaad
|
[
"Apache-2.0"
] | null | null | null |
def.bzl
|
howson/bazel-gazelle
|
f3d57a478ca0043905f818766b60bb21674eaaad
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2017 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
load(
"@bazel_skylib//lib:shell.bzl",
"shell",
)
load(
"//internal:go_repository.bzl",
_go_repository = "go_repository",
)
load(
"//internal:overlay_repository.bzl",
_git_repository = "git_repository",
_http_archive = "http_archive",
)
load(
"//internal:gazelle_binary.bzl",
_gazelle_binary = "gazelle_binary_wrapper",
)
go_repository = _go_repository
git_repository = _git_repository
http_archive = _http_archive
gazelle_binary = _gazelle_binary
DEFAULT_LANGUAGES = [
"@bazel_gazelle//language/proto:go_default_library",
"@bazel_gazelle//language/go:go_default_library",
]
def _gazelle_runner_impl(ctx):
args = [ctx.attr.command]
if ctx.attr.mode:
args.extend(["-mode", ctx.attr.mode])
if ctx.attr.external:
args.extend(["-external", ctx.attr.external])
if ctx.attr.prefix:
args.extend(["-go_prefix", ctx.attr.prefix])
if ctx.attr.build_tags:
args.extend(["-build_tags", ",".join(ctx.attr.build_tags)])
args.extend([ctx.expand_location(arg, ctx.attr.data) for arg in ctx.attr.extra_args])
out_file = ctx.actions.declare_file(ctx.label.name + ".bash")
go_tool = ctx.toolchains["@io_bazel_rules_go//go:toolchain"].sdk.go
substitutions = {
"@@ARGS@@": shell.array_literal(args),
"@@GAZELLE_LABEL@@": shell.quote(str(ctx.attr.gazelle.label)),
"@@GAZELLE_SHORT_PATH@@": shell.quote(ctx.executable.gazelle.short_path),
"@@GENERATED_MESSAGE@@": """
# Generated by {label}
# DO NOT EDIT
""".format(label = str(ctx.label)),
"@@RUNNER_LABEL@@": shell.quote(str(ctx.label)),
"@@GOTOOL@@": shell.quote(go_tool.short_path),
"@@WORKSPACE_NAME@@": shell.quote(ctx.workspace_name),
}
ctx.actions.expand_template(
template = ctx.file._template,
output = out_file,
substitutions = substitutions,
is_executable = True,
)
runfiles = ctx.runfiles(files = [
ctx.executable.gazelle,
go_tool,
] + ctx.files.data).merge(
ctx.attr.gazelle[DefaultInfo].default_runfiles,
)
return [DefaultInfo(
files = depset([out_file]),
runfiles = runfiles,
executable = out_file,
)]
_gazelle_runner = rule(
implementation = _gazelle_runner_impl,
attrs = {
"gazelle": attr.label(
default = "@bazel_gazelle//cmd/gazelle",
executable = True,
cfg = "host",
),
"command": attr.string(
values = [
"fix",
"update",
"update-repos",
],
default = "update",
),
"mode": attr.string(
values = ["", "print", "fix", "diff"],
default = "",
),
"external": attr.string(
values = ["", "external", "vendored"],
default = "",
),
"build_tags": attr.string_list(),
"prefix": attr.string(),
"extra_args": attr.string_list(),
"data": attr.label_list(allow_files = True),
"_template": attr.label(
default = "@bazel_gazelle//internal:gazelle.bash.in",
allow_single_file = True,
),
},
executable = True,
toolchains = ["@io_bazel_rules_go//go:toolchain"],
)
def gazelle(name, **kwargs):
if "args" in kwargs:
# The args attribute has special meaning for executable rules, but we
# always want extra_args here instead.
if "extra_args" in kwargs:
fail("{}: both args and extra_args were provided".format(name))
kwargs["extra_args"] = kwargs["args"]
kwargs.pop("args")
runner_name = name + "-runner"
_gazelle_runner(
name = runner_name,
tags = ["manual"],
**kwargs
)
native.sh_binary(
name = name,
srcs = [runner_name],
tags = ["manual"],
)
| 31.503497
| 89
| 0.611765
|
4a045896c6bcb4974698ffe2d97561f28ec9e2fe
| 2,361
|
py
|
Python
|
python-project/experiments/continuous/Exp_Geo_music.py
|
ferjorosa/bayesian-latent-forests
|
3d9e19f1d0be1e4cca0b390866589061a670cc20
|
[
"Apache-2.0"
] | null | null | null |
python-project/experiments/continuous/Exp_Geo_music.py
|
ferjorosa/bayesian-latent-forests
|
3d9e19f1d0be1e4cca0b390866589061a670cc20
|
[
"Apache-2.0"
] | null | null | null |
python-project/experiments/continuous/Exp_Geo_music.py
|
ferjorosa/bayesian-latent-forests
|
3d9e19f1d0be1e4cca0b390866589061a670cc20
|
[
"Apache-2.0"
] | null | null | null |
from experiments.continuous import ContinuousExperiment
from spn.structure.StatisticalTypes import MetaType
class Exp_Geo_music(ContinuousExperiment.ContinuousExperiment):
# 70 data attributes after filtering with 10 folds
meta_types = [MetaType.REAL, MetaType.REAL, MetaType.REAL, MetaType.REAL, MetaType.REAL,
MetaType.REAL, MetaType.REAL, MetaType.REAL, MetaType.REAL, MetaType.REAL,
MetaType.REAL, MetaType.REAL, MetaType.REAL, MetaType.REAL, MetaType.REAL,
MetaType.REAL, MetaType.REAL, MetaType.REAL, MetaType.REAL, MetaType.REAL,
MetaType.REAL, MetaType.REAL, MetaType.REAL, MetaType.REAL, MetaType.REAL,
MetaType.REAL, MetaType.REAL, MetaType.REAL, MetaType.REAL, MetaType.REAL,
MetaType.REAL, MetaType.REAL, MetaType.REAL, MetaType.REAL, MetaType.REAL,
MetaType.REAL, MetaType.REAL, MetaType.REAL, MetaType.REAL, MetaType.REAL,
MetaType.REAL, MetaType.REAL, MetaType.REAL, MetaType.REAL, MetaType.REAL,
MetaType.REAL, MetaType.REAL, MetaType.REAL, MetaType.REAL, MetaType.REAL,
MetaType.REAL, MetaType.REAL, MetaType.REAL, MetaType.REAL, MetaType.REAL,
MetaType.REAL, MetaType.REAL, MetaType.REAL, MetaType.REAL, MetaType.REAL,
MetaType.REAL, MetaType.REAL, MetaType.REAL, MetaType.REAL, MetaType.REAL,
MetaType.REAL, MetaType.REAL, MetaType.REAL, MetaType.REAL, MetaType.REAL]
var_types_string = "cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc"
def run(self, run: int, n_folds: int, fold_log: bool):
print("\n------------------------------------------------------------------")
print("------------------------------------------------------------------")
print("---------------------------- GEO_MUSIC ---------------------------")
print("------------------------------------------------------------------")
print("------------------------------------------------------------------\n")
super().run(run, n_folds, fold_log)
def main():
run = 1
n_folds = 10
data_name = "geo_music"
fold_log = True
exp = Exp_Geo_music(data_name)
exp.run(run, n_folds, fold_log)
if __name__ == "__main__":
main()
| 50.234043
| 95
| 0.573062
|
4a0458ac5fc6af149aaebba28b53b613103ed7f4
| 862
|
py
|
Python
|
isi_sdk_8_2_0/test/test_copy_errors.py
|
mohitjain97/isilon_sdk_python
|
a371f438f542568edb8cda35e929e6b300b1177c
|
[
"Unlicense"
] | 24
|
2018-06-22T14:13:23.000Z
|
2022-03-23T01:21:26.000Z
|
isi_sdk_8_2_0/test/test_copy_errors.py
|
mohitjain97/isilon_sdk_python
|
a371f438f542568edb8cda35e929e6b300b1177c
|
[
"Unlicense"
] | 46
|
2018-04-30T13:28:22.000Z
|
2022-03-21T21:11:07.000Z
|
isi_sdk_8_2_0/test/test_copy_errors.py
|
mohitjain97/isilon_sdk_python
|
a371f438f542568edb8cda35e929e6b300b1177c
|
[
"Unlicense"
] | 29
|
2018-06-19T00:14:04.000Z
|
2022-02-08T17:51:19.000Z
|
# coding: utf-8
"""
Isilon SDK
Isilon SDK - Language bindings for the OneFS API # noqa: E501
OpenAPI spec version: 7
Contact: sdk@isilon.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import isi_sdk_8_2_0
from isi_sdk_8_2_0.models.copy_errors import CopyErrors # noqa: E501
from isi_sdk_8_2_0.rest import ApiException
class TestCopyErrors(unittest.TestCase):
"""CopyErrors unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testCopyErrors(self):
"""Test CopyErrors"""
# FIXME: construct object with mandatory attributes with example values
# model = isi_sdk_8_2_0.models.copy_errors.CopyErrors() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 21.02439
| 79
| 0.691415
|
4a0458f87288d9d46da985cf5037f62d9ac3bd4b
| 12,749
|
py
|
Python
|
example/ssd/symbol/common.py
|
xudong-sun/mxnet
|
fe42d30d5885dd576cb871fd70594c53efce9b42
|
[
"Apache-2.0"
] | 399
|
2017-05-30T05:12:48.000Z
|
2022-01-29T05:53:08.000Z
|
example/ssd/symbol/common.py
|
greenpea0104/incubator-mxnet
|
fc9e70bf2d349ad4c6cb65ff3f0958e23a7410bf
|
[
"Apache-2.0"
] | 58
|
2017-05-30T23:25:32.000Z
|
2019-11-18T09:30:54.000Z
|
example/ssd/symbol/common.py
|
greenpea0104/incubator-mxnet
|
fc9e70bf2d349ad4c6cb65ff3f0958e23a7410bf
|
[
"Apache-2.0"
] | 107
|
2017-05-30T05:53:22.000Z
|
2021-06-24T02:43:31.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import mxnet as mx
import numpy as np
def conv_act_layer(from_layer, name, num_filter, kernel=(1,1), pad=(0,0), \
stride=(1,1), act_type="relu", use_batchnorm=False):
"""
wrapper for a small Convolution group
Parameters:
----------
from_layer : mx.symbol
continue on which layer
name : str
base name of the new layers
num_filter : int
how many filters to use in Convolution layer
kernel : tuple (int, int)
kernel size (h, w)
pad : tuple (int, int)
padding size (h, w)
stride : tuple (int, int)
stride size (h, w)
act_type : str
activation type, can be relu...
use_batchnorm : bool
whether to use batch normalization
Returns:
----------
(conv, relu) mx.Symbols
"""
conv = mx.symbol.Convolution(data=from_layer, kernel=kernel, pad=pad, \
stride=stride, num_filter=num_filter, name="{}_conv".format(name))
if use_batchnorm:
conv = mx.symbol.BatchNorm(data=conv, name="{}_bn".format(name))
relu = mx.symbol.Activation(data=conv, act_type=act_type, \
name="{}_{}".format(name, act_type))
return relu
def legacy_conv_act_layer(from_layer, name, num_filter, kernel=(1,1), pad=(0,0), \
stride=(1,1), act_type="relu", use_batchnorm=False):
"""
wrapper for a small Convolution group
Parameters:
----------
from_layer : mx.symbol
continue on which layer
name : str
base name of the new layers
num_filter : int
how many filters to use in Convolution layer
kernel : tuple (int, int)
kernel size (h, w)
pad : tuple (int, int)
padding size (h, w)
stride : tuple (int, int)
stride size (h, w)
act_type : str
activation type, can be relu...
use_batchnorm : bool
whether to use batch normalization
Returns:
----------
(conv, relu) mx.Symbols
"""
assert not use_batchnorm, "batchnorm not yet supported"
bias = mx.symbol.Variable(name="conv{}_bias".format(name),
init=mx.init.Constant(0.0), attr={'__lr_mult__': '2.0'})
conv = mx.symbol.Convolution(data=from_layer, bias=bias, kernel=kernel, pad=pad, \
stride=stride, num_filter=num_filter, name="conv{}".format(name))
relu = mx.symbol.Activation(data=conv, act_type=act_type, \
name="{}{}".format(act_type, name))
if use_batchnorm:
relu = mx.symbol.BatchNorm(data=relu, name="bn{}".format(name))
return conv, relu
def multi_layer_feature(body, from_layers, num_filters, strides, pads, min_filter=128):
"""Wrapper function to extract features from base network, attaching extra
layers and SSD specific layers
Parameters
----------
from_layers : list of str
feature extraction layers, use '' for add extra layers
For example:
from_layers = ['relu4_3', 'fc7', '', '', '', '']
which means extract feature from relu4_3 and fc7, adding 4 extra layers
on top of fc7
num_filters : list of int
number of filters for extra layers, you can use -1 for extracted features,
however, if normalization and scale is applied, the number of filter for
that layer must be provided.
For example:
num_filters = [512, -1, 512, 256, 256, 256]
strides : list of int
strides for the 3x3 convolution appended, -1 can be used for extracted
feature layers
pads : list of int
paddings for the 3x3 convolution, -1 can be used for extracted layers
min_filter : int
minimum number of filters used in 1x1 convolution
Returns
-------
list of mx.Symbols
"""
# arguments check
assert len(from_layers) > 0
assert isinstance(from_layers[0], str) and len(from_layers[0].strip()) > 0
assert len(from_layers) == len(num_filters) == len(strides) == len(pads)
internals = body.get_internals()
layers = []
for k, params in enumerate(zip(from_layers, num_filters, strides, pads)):
from_layer, num_filter, s, p = params
if from_layer.strip():
# extract from base network
layer = internals[from_layer.strip() + '_output']
layers.append(layer)
else:
# attach from last feature layer
assert len(layers) > 0
assert num_filter > 0
layer = layers[-1]
num_1x1 = max(min_filter, num_filter // 2)
conv_1x1 = conv_act_layer(layer, 'multi_feat_%d_conv_1x1' % (k),
num_1x1, kernel=(1, 1), pad=(0, 0), stride=(1, 1), act_type='relu')
conv_3x3 = conv_act_layer(conv_1x1, 'multi_feat_%d_conv_3x3' % (k),
num_filter, kernel=(3, 3), pad=(p, p), stride=(s, s), act_type='relu')
layers.append(conv_3x3)
return layers
def multibox_layer(from_layers, num_classes, sizes=[.2, .95],
ratios=[1], normalization=-1, num_channels=[],
clip=False, interm_layer=0, steps=[]):
"""
the basic aggregation module for SSD detection. Takes in multiple layers,
generate multiple object detection targets by customized layers
Parameters:
----------
from_layers : list of mx.symbol
generate multibox detection from layers
num_classes : int
number of classes excluding background, will automatically handle
background in this function
sizes : list or list of list
[min_size, max_size] for all layers or [[], [], []...] for specific layers
ratios : list or list of list
[ratio1, ratio2...] for all layers or [[], [], ...] for specific layers
normalizations : int or list of int
use normalizations value for all layers or [...] for specific layers,
-1 indicate no normalizations and scales
num_channels : list of int
number of input layer channels, used when normalization is enabled, the
length of list should equals to number of normalization layers
clip : bool
whether to clip out-of-image boxes
interm_layer : int
if > 0, will add a intermediate Convolution layer
steps : list
specify steps for each MultiBoxPrior layer, leave empty, it will calculate
according to layer dimensions
Returns:
----------
list of outputs, as [loc_preds, cls_preds, anchor_boxes]
loc_preds : localization regression prediction
cls_preds : classification prediction
anchor_boxes : generated anchor boxes
"""
assert len(from_layers) > 0, "from_layers must not be empty list"
assert num_classes > 0, \
"num_classes {} must be larger than 0".format(num_classes)
assert len(ratios) > 0, "aspect ratios must not be empty list"
if not isinstance(ratios[0], list):
# provided only one ratio list, broadcast to all from_layers
ratios = [ratios] * len(from_layers)
assert len(ratios) == len(from_layers), \
"ratios and from_layers must have same length"
assert len(sizes) > 0, "sizes must not be empty list"
if len(sizes) == 2 and not isinstance(sizes[0], list):
# provided size range, we need to compute the sizes for each layer
assert sizes[0] > 0 and sizes[0] < 1
assert sizes[1] > 0 and sizes[1] < 1 and sizes[1] > sizes[0]
tmp = np.linspace(sizes[0], sizes[1], num=(len(from_layers)-1))
min_sizes = [start_offset] + tmp.tolist()
max_sizes = tmp.tolist() + [tmp[-1]+start_offset]
sizes = zip(min_sizes, max_sizes)
assert len(sizes) == len(from_layers), \
"sizes and from_layers must have same length"
if not isinstance(normalization, list):
normalization = [normalization] * len(from_layers)
assert len(normalization) == len(from_layers)
assert sum(x > 0 for x in normalization) <= len(num_channels), \
"must provide number of channels for each normalized layer"
if steps:
assert len(steps) == len(from_layers), "provide steps for all layers or leave empty"
loc_pred_layers = []
cls_pred_layers = []
anchor_layers = []
num_classes += 1 # always use background as label 0
for k, from_layer in enumerate(from_layers):
from_name = from_layer.name
# normalize
if normalization[k] > 0:
from_layer = mx.symbol.L2Normalization(data=from_layer, \
mode="channel", name="{}_norm".format(from_name))
scale = mx.symbol.Variable(name="{}_scale".format(from_name),
shape=(1, num_channels.pop(0), 1, 1),
init=mx.init.Constant(normalization[k]),
attr={'__wd_mult__': '0.1'})
from_layer = mx.symbol.broadcast_mul(lhs=scale, rhs=from_layer)
if interm_layer > 0:
from_layer = mx.symbol.Convolution(data=from_layer, kernel=(3,3), \
stride=(1,1), pad=(1,1), num_filter=interm_layer, \
name="{}_inter_conv".format(from_name))
from_layer = mx.symbol.Activation(data=from_layer, act_type="relu", \
name="{}_inter_relu".format(from_name))
# estimate number of anchors per location
# here I follow the original version in caffe
# TODO: better way to shape the anchors??
size = sizes[k]
assert len(size) > 0, "must provide at least one size"
size_str = "(" + ",".join([str(x) for x in size]) + ")"
ratio = ratios[k]
assert len(ratio) > 0, "must provide at least one ratio"
ratio_str = "(" + ",".join([str(x) for x in ratio]) + ")"
num_anchors = len(size) -1 + len(ratio)
# create location prediction layer
num_loc_pred = num_anchors * 4
bias = mx.symbol.Variable(name="{}_loc_pred_conv_bias".format(from_name),
init=mx.init.Constant(0.0), attr={'__lr_mult__': '2.0'})
loc_pred = mx.symbol.Convolution(data=from_layer, bias=bias, kernel=(3,3), \
stride=(1,1), pad=(1,1), num_filter=num_loc_pred, \
name="{}_loc_pred_conv".format(from_name))
loc_pred = mx.symbol.transpose(loc_pred, axes=(0,2,3,1))
loc_pred = mx.symbol.Flatten(data=loc_pred)
loc_pred_layers.append(loc_pred)
# create class prediction layer
num_cls_pred = num_anchors * num_classes
bias = mx.symbol.Variable(name="{}_cls_pred_conv_bias".format(from_name),
init=mx.init.Constant(0.0), attr={'__lr_mult__': '2.0'})
cls_pred = mx.symbol.Convolution(data=from_layer, bias=bias, kernel=(3,3), \
stride=(1,1), pad=(1,1), num_filter=num_cls_pred, \
name="{}_cls_pred_conv".format(from_name))
cls_pred = mx.symbol.transpose(cls_pred, axes=(0,2,3,1))
cls_pred = mx.symbol.Flatten(data=cls_pred)
cls_pred_layers.append(cls_pred)
# create anchor generation layer
if steps:
step = (steps[k], steps[k])
else:
step = '(-1.0, -1.0)'
anchors = mx.symbol.contrib.MultiBoxPrior(from_layer, sizes=size_str, ratios=ratio_str,
clip=clip, name="{}_anchors".format(from_name),
steps=step)
anchors = mx.symbol.Flatten(data=anchors)
anchor_layers.append(anchors)
loc_preds = mx.symbol.Concat(*loc_pred_layers, num_args=len(loc_pred_layers), \
dim=1, name="multibox_loc_pred")
cls_preds = mx.symbol.Concat(*cls_pred_layers, num_args=len(cls_pred_layers), \
dim=1)
cls_preds = mx.symbol.Reshape(data=cls_preds, shape=(0, -1, num_classes))
cls_preds = mx.symbol.transpose(cls_preds, axes=(0, 2, 1), name="multibox_cls_pred")
anchor_boxes = mx.symbol.Concat(*anchor_layers, \
num_args=len(anchor_layers), dim=1)
anchor_boxes = mx.symbol.Reshape(data=anchor_boxes, shape=(0, -1, 4), name="multibox_anchors")
return [loc_preds, cls_preds, anchor_boxes]
| 42.215232
| 98
| 0.631893
|
4a0459ef87df6d749469b3cc0c2fc53ecc82feb4
| 2,259
|
py
|
Python
|
modules/video_encoder.py
|
AIM3-RUC/video-paragraph
|
072e7447a6bc12080f0baa3d41e9e96d3d240221
|
[
"MIT"
] | 53
|
2021-05-31T10:20:57.000Z
|
2022-03-30T02:48:42.000Z
|
modules/video_encoder.py
|
AIM3-RUC/video-paragraph
|
072e7447a6bc12080f0baa3d41e9e96d3d240221
|
[
"MIT"
] | 13
|
2021-06-21T08:46:54.000Z
|
2022-03-14T08:31:10.000Z
|
modules/video_encoder.py
|
AIM3-RUC/video-paragraph
|
072e7447a6bc12080f0baa3d41e9e96d3d240221
|
[
"MIT"
] | 3
|
2021-09-21T13:12:28.000Z
|
2022-03-09T04:47:16.000Z
|
from __future__ import print_function
from __future__ import division
import torch
import torch.nn as nn
import numpy as np
import framework.ops
from framework.ops import l2norm
import framework.configbase
class VideoEncoderConfig(framework.configbase.ModuleConfig):
def __init__(self):
super(VideoEncoderConfig, self).__init__()
self.rnn_type = 'gru' # lstm, gru
self.ft_dim = 4096
self.hidden_size = 512
self.num_layers = 1
self.bidirectional = True
self.dropout = 0.2
class VideoEncoder(nn.Module):
def __init__(self, config):
super(VideoEncoder, self).__init__()
self.config = config
self.embedding = nn.Linear(self.config.ft_dim, self.config.hidden_size*2)
self.rnn = framework.ops.rnn_factory(self.config.rnn_type,
input_size=self.config.hidden_size*2, hidden_size=self.config.hidden_size,
num_layers=self.config.num_layers, dropout=self.config.dropout,
bidirectional=self.config.bidirectional, bias=True, batch_first=True)
input_size = self.config.hidden_size*2 if self.config.bidirectional else self.config.hidden_size
self.fc = nn.Linear(input_size, 1024)
self.dropout = nn.Dropout(p=self.config.dropout)
self.init_weights()
def xavier_init_fc(self, fc):
"""Xavier initialization for the fully connected layer
"""
r = np.sqrt(6.) / np.sqrt(fc.in_features +
fc.out_features)
fc.weight.data.uniform_(-r, r)
fc.bias.data.fill_(0)
def init_weights(self):
"""Xavier initialization for the fully connected layer
"""
self.xavier_init_fc(self.embedding)
self.xavier_init_fc(self.fc)
def forward(self, inputs, seq_masks, init_states=None):
# outs.size = (batch, seq_len, num_directions * hidden_size)
#seq_masks = framework.ops.sequence_mask(seq_lens, max_len=inputs.size(1)).float()
embeds = self.embedding(inputs)
seq_lens = seq_masks.sum(dim=1).long()
self.rnn.flatten_parameters()
outs, states = framework.ops.calc_rnn_outs_with_sort(self.rnn, embeds, seq_lens, init_states)
outs = torch.sum(outs * seq_masks.float().unsqueeze(-1), 1) / seq_lens.unsqueeze(-1).float()
outs = self.dropout(outs)
embeds = l2norm(torch.tanh(self.fc(outs)))
return embeds
| 35.857143
| 100
| 0.718459
|
4a0459fedb96c6dacddab3e092b12112130de29e
| 1,926
|
py
|
Python
|
tool/model/transformers_model.py
|
GoniaW/protagonist_tagger
|
68451cf5e0fc671510188cd439f888ffbdad6722
|
[
"BSD-3-Clause"
] | null | null | null |
tool/model/transformers_model.py
|
GoniaW/protagonist_tagger
|
68451cf5e0fc671510188cd439f888ffbdad6722
|
[
"BSD-3-Clause"
] | null | null | null |
tool/model/transformers_model.py
|
GoniaW/protagonist_tagger
|
68451cf5e0fc671510188cd439f888ffbdad6722
|
[
"BSD-3-Clause"
] | null | null | null |
from transformers import pipeline
from transformers import AutoTokenizer, AutoModelForTokenClassification
import numpy as np
from tool.model.ner_model import NERModel
class TransformerModel(NERModel):
def __init__(self, model_path, save_personal_titles, fix_personal_titles):
super().__init__(save_personal_titles, fix_personal_titles)
if model_path == 'jplu/tf-xlm-r-ner-40-lang':
self.model = pipeline("ner", model=model_path,
tokenizer=(model_path, {"use_fast": True}),
framework="tf",
aggregation_strategy='simple')
else:
tokenizer = AutoTokenizer.from_pretrained(model_path)
model = AutoModelForTokenClassification.from_pretrained(model_path)
self.model = pipeline(
"token-classification",
aggregation_strategy="simple",
model=model,
tokenizer=tokenizer)
print('Transformers model loaded.')
def get_doc_entities(self, text):
results = self.model(text)
entities = []
for index, ent in enumerate(results):
if ent['entity_group'] == "PER":
start, end = ent['start'], ent['end']
if text[start] == ' ':
start += 1
while end < len(text) and text[end].isalpha():
end += 1
ent_text = text[start:end]
if self.fix_personal_titles and ent_text.startswith(
self.personal_titles):
start += (1 + len(ent_text.split(' ')[0]))
if self.save_personal_titles:
entities.append([start, end, "PERSON", None])
else:
entities.append([start, end, "PERSON"])
return {'content': text, 'entities': entities}
| 40.125
| 79
| 0.553479
|
4a045c3389205574764d51b6e1830fbb9dc00520
| 6,429
|
py
|
Python
|
app/app/settings.py
|
sudodoki/doccano
|
4bfa60185dd40b477b1941e590e530250d13a8e1
|
[
"MIT"
] | 2
|
2019-04-01T03:55:10.000Z
|
2020-05-06T01:39:24.000Z
|
app/app/settings.py
|
sudodoki/doccano
|
4bfa60185dd40b477b1941e590e530250d13a8e1
|
[
"MIT"
] | null | null | null |
app/app/settings.py
|
sudodoki/doccano
|
4bfa60185dd40b477b1941e590e530250d13a8e1
|
[
"MIT"
] | 1
|
2021-02-04T14:52:58.000Z
|
2021-02-04T14:52:58.000Z
|
"""
Django settings for app project.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
Any setting that is configured via an environment variable may
also be set in a `.env` file in the project base directory.
"""
from os import path
import django_heroku
import dj_database_url
from environs import Env
# Build paths inside the project like this: path.join(BASE_DIR, ...)
BASE_DIR = path.dirname(path.dirname(path.abspath(__file__)))
env = Env()
env.read_env(BASE_DIR, recurse=False)
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = env('SECRET_KEY',
'v8sk33sy82!uw3ty=!jjv5vp7=s2phrzw(m(hrn^f7e_#1h2al')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = env.bool('DEBUG', True)
# ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'whitenoise.runserver_nostatic',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'server.apps.ServerConfig',
'widget_tweaks',
'rest_framework',
'django_filters',
'social_django',
'polymorphic',
'webpack_loader',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'social_django.middleware.SocialAuthExceptionMiddleware',
'applicationinsights.django.ApplicationInsightsMiddleware',
]
ROOT_URLCONF = 'app.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [path.join(BASE_DIR, 'server/templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'social_django.context_processors.backends',
'social_django.context_processors.login_redirect',
],
'libraries': {
'analytics': 'server.templatetags.analytics',
},
},
},
]
STATICFILES_DIRS = [
path.join(BASE_DIR, 'server/static'),
]
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
WEBPACK_LOADER = {
'DEFAULT': {
'CACHE': not DEBUG,
'BUNDLE_DIR_NAME': 'bundle/',
'STATS_FILE': path.join(BASE_DIR, 'server', 'webpack-stats.json'),
'POLL_INTERVAL': 0.1,
'TIMEOUT': None,
'IGNORE': [r'.*\.hot-update.js', r'.+\.map']
}
}
WSGI_APPLICATION = 'app.wsgi.application'
AUTHENTICATION_BACKENDS = [
'social_core.backends.github.GithubOAuth2',
'social_core.backends.azuread_tenant.AzureADTenantOAuth2',
'django.contrib.auth.backends.ModelBackend',
]
SOCIAL_AUTH_GITHUB_KEY = env('OAUTH_GITHUB_KEY', None)
SOCIAL_AUTH_GITHUB_SECRET = env('OAUTH_GITHUB_SECRET', None)
SOCIAL_AUTH_AZUREAD_TENANT_OAUTH2_KEY = env('OAUTH_AAD_KEY', None)
SOCIAL_AUTH_AZUREAD_TENANT_OAUTH2_SECRET = env('OAUTH_AAD_SECRET', None)
SOCIAL_AUTH_AZUREAD_TENANT_OAUTH2_TENANT_ID = env('OAUTH_AAD_TENANT', None)
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
REST_FRAMEWORK = {
# Use Django's standard `django.contrib.auth` permissions,
# or allow read-only access for unauthenticated users.
'DEFAULT_PERMISSION_CLASSES': [
'rest_framework.permissions.DjangoModelPermissionsOrAnonReadOnly',
'rest_framework.permissions.IsAuthenticated',
],
'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.LimitOffsetPagination',
'PAGE_SIZE': 5,
'DEFAULT_FILTER_BACKENDS': ('django_filters.rest_framework.DjangoFilterBackend',),
'SEARCH_PARAM': 'q',
'DEFAULT_RENDERER_CLASSES': (
'rest_framework.renderers.JSONRenderer',
'rest_framework.renderers.BrowsableAPIRenderer',
'rest_framework_xml.renderers.XMLRenderer'
)
}
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_URL = '/static/'
LOGIN_URL = '/login/'
LOGIN_REDIRECT_URL = '/projects/'
LOGOUT_REDIRECT_URL = '/'
# Change 'default' database configuration with $DATABASE_URL.
DATABASES['default'].update(dj_database_url.config(conn_max_age=500, ssl_require=True))
# Honor the 'X-Forwarded-Proto' header for request.is_secure()
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# Allow all host headers
# ALLOWED_HOSTS = ['*']
# Size of the batch for creating documents
# on the import phase
IMPORT_BATCH_SIZE = 500
GOOGLE_TRACKING_ID = env('GOOGLE_TRACKING_ID', 'UA-125643874-2')
AZURE_APPINSIGHTS_IKEY = env('AZURE_APPINSIGHTS_IKEY', None)
APPLICATION_INSIGHTS = {
'ikey': AZURE_APPINSIGHTS_IKEY if AZURE_APPINSIGHTS_IKEY else None,
}
django_heroku.settings(locals(), test_runner=False)
| 29.490826
| 91
| 0.713175
|
4a045d6909aa525ec2c6af87ff3fdbf9a8b3aef3
| 4,996
|
py
|
Python
|
tests/test_result.py
|
shawnbrown/squint
|
a9d326ff8edb2e2b740c4355fd953edd2c0cf114
|
[
"Apache-2.0"
] | 3
|
2020-01-11T23:29:15.000Z
|
2020-05-30T09:39:15.000Z
|
tests/test_result.py
|
shawnbrown/squint
|
a9d326ff8edb2e2b740c4355fd953edd2c0cf114
|
[
"Apache-2.0"
] | 3
|
2019-10-15T13:23:31.000Z
|
2020-07-23T22:13:11.000Z
|
tests/test_result.py
|
shawnbrown/squint
|
a9d326ff8edb2e2b740c4355fd953edd2c0cf114
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import sys
from .common import unittest
from squint._compatibility.itertools import islice
from squint._utils import IterItems
from squint.result import Result
class TestFetch(unittest.TestCase):
def test_nonmappings(self):
"""Check collection types (i.e., sized, iterable containers)."""
result = Result([1, 2, 3], list)
self.assertEqual(result.fetch(), [1, 2, 3])
result = Result([1, 2, 3], set)
self.assertEqual(result.fetch(), set([1, 2, 3]))
result = Result(iter([1, 2, 3]), set)
self.assertEqual(result.fetch(), set([1, 2, 3]))
def test_mappings(self):
result = Result({'a': 1, 'b': 2}, dict)
self.assertEqual(result.fetch(), {'a': 1, 'b': 2})
result = Result(IterItems([('a', 1), ('b', 2)]), dict)
self.assertEqual(result.fetch(), {'a': 1, 'b': 2})
result = Result(iter([iter(['a', 1]), iter(['b', 2])]), dict)
self.assertEqual(result.fetch(), {'a': 1, 'b': 2})
with self.assertRaises(ValueError):
result = Result([('a', 1), 'b'], dict)
result.fetch() # <- Fails late (on fetch, only)
def test_bad_evaltype(self):
regex = 'evaltype must be a type, found instance of list'
with self.assertRaisesRegex(TypeError, regex):
typed = Result([1, 2, 3], [1])
class TestSharedIterator(unittest.TestCase):
def test_shared_iterator(self):
"""Dict result should not assume independent source iterators."""
def generate_items(): # <- Generator that reads from single iterator.
shared = iter([
'x', 1, 1, 1, 2, 2, 2, 3, 3, 3,
'y', 4, 4, 4, 5, 5, 5, 6, 6, 6,
])
yield next(shared), Result(islice(shared, 9), evaltype=list)
yield next(shared), Result(islice(shared, 9), evaltype=list)
result = Result(generate_items(), evaltype=dict)
expected = {
'x': [1, 1, 1, 2, 2, 2, 3, 3, 3],
'y': [4, 4, 4, 5, 5, 5, 6, 6, 6],
}
self.assertEqual(result.fetch(), expected)
class TestClosing(unittest.TestCase):
def setUp(self):
self.log = []
def closefunc():
self.log.append('closed')
self.closefunc = closefunc
def test_explicit_close(self):
result = Result(iter([1, 2, 3]), set, closefunc=self.closefunc)
self.assertEqual(self.log, [], msg='verify log is empty')
result.close()
self.assertEqual(self.log, ['closed'], msg='see if close was called')
result.close() # <- Second call.
self.assertEqual(self.log, ['closed'], msg='multiple calls pass without error')
def test_stopiteration(self):
""""Should call close() method when iterable is exhausted."""
result = Result(iter([1, 2, 3]), set, closefunc=self.closefunc)
self.assertEqual(self.log, [], msg='verify log is empty')
list(result) # Exhaust iterable.
self.assertEqual(self.log, ['closed'])
def test_delete(self):
""""Should call close() when object is garbage collected."""
result = Result(iter([1, 2, 3]), set, closefunc=self.closefunc)
self.assertEqual(self.log, [], msg='verify log is empty')
result.__del__() # Call __del__() directly.
self.assertEqual(self.log, ['closed'])
class TestGetCache(unittest.TestCase):
def test_tuple(self):
result = Result(iter([1, 2, 3, 4]), evaltype=tuple)
self.assertEqual(result._get_cache(), ())
result._next_cache()
self.assertEqual(result._get_cache(), (1,))
result._next_cache()
result._next_cache()
result._next_cache()
self.assertEqual(result._get_cache(), (1, 2, 3, 4))
with self.assertRaises(StopIteration):
result._next_cache()
self.assertEqual(result.fetch(), (1, 2, 3, 4))
def test_mapping(self):
iterable = IterItems([
('a', Result(iter([1, 2]), list)),
('b', Result(iter([3, 4]), list)),
('c', Result(iter([5, 6]), list)),
])
result = Result(iterable, dict)
self.assertEqual(result._get_cache(), {})
result._next_cache()
self.assertEqual(result._cache[0][0], 'a')
self.assertEqual(result._cache[0][1]._cache[0], 1)
self.assertEqual(result._get_cache(), {'a': [1]})
result._next_cache()
self.assertEqual(result._get_cache(), {'a': [1, 2]})
result._next_cache()
self.assertEqual(result._get_cache(), {'a': [1, 2], 'b': [3]})
result._next_cache()
result._next_cache()
result._next_cache()
self.assertEqual(result._get_cache(), {'a': [1, 2], 'b': [3, 4], 'c': [5, 6]})
with self.assertRaises(StopIteration):
result._next_cache()
self.assertEqual(result.fetch(), {'a': [1, 2], 'b': [3, 4], 'c': [5, 6]})
| 33.986395
| 87
| 0.571257
|
4a045e593e12cc45f27eff7a772acb061a78cdc8
| 668
|
py
|
Python
|
Python3/35.search-insert-position.py
|
610yilingliu/leetcode
|
30d071b3685c2131bd3462ba77c6c05114f3f227
|
[
"MIT"
] | null | null | null |
Python3/35.search-insert-position.py
|
610yilingliu/leetcode
|
30d071b3685c2131bd3462ba77c6c05114f3f227
|
[
"MIT"
] | null | null | null |
Python3/35.search-insert-position.py
|
610yilingliu/leetcode
|
30d071b3685c2131bd3462ba77c6c05114f3f227
|
[
"MIT"
] | null | null | null |
#
# @lc app=leetcode id=35 lang=python3
#
# [35] Search Insert Position
#
# @lc code=start
class Solution:
def searchInsert(self, nums: List[int], target: int):
if not nums:
return 0
if target <= nums[0]:
return 0
if target > nums[-1]:
return len(nums)
l, r = 0, len(nums) - 1
while l < r:
m = (l + r) >> 1
if target == nums[m]:
return m
if target > nums[m]:
l = m + 1
else:
r = m - 1
if target > nums[l]:
return l + 1
else:
return l
# @lc code=end
| 20.875
| 57
| 0.420659
|
4a045f03611fd776f535e4938d4e72a3a2189691
| 7,084
|
py
|
Python
|
mmn.py
|
uwmadison-chm/paper-fin-lott-2020
|
81e30126981e73883dfd630c4e3915445dfbf97b
|
[
"MIT"
] | null | null | null |
mmn.py
|
uwmadison-chm/paper-fin-lott-2020
|
81e30126981e73883dfd630c4e3915445dfbf97b
|
[
"MIT"
] | null | null | null |
mmn.py
|
uwmadison-chm/paper-fin-lott-2020
|
81e30126981e73883dfd630c4e3915445dfbf97b
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import sys
import argparse
import logging
import coloredlogs
import mne
from mne.preprocessing import ICA, create_ecg_epochs
import matplotlib.pyplot as plt
from eeg_shared import BDFWithMetadata
parser = argparse.ArgumentParser(description='Automate FMed study artifact rejection and analysis of MMN. By default loads the file for viewing')
parser.add_argument('input', help='Path to input file.')
parser.add_argument('-v', '--verbose', action='count', default=0)
parser.add_argument('--skip-view', action='store_true', help="Skip viewing file and editing artifact mask")
parser.add_argument('--topo', action='store_true', help="Topo map")
parser.add_argument('--shell', action='store_true', help="Drop into an interactive ipython environment")
parser.add_argument('--dms', metavar='ELECTRODE', action='store', help="Deviant minus standard of a specified electrode (Cz, Fz, T8, Pz)")
parser.add_argument('--dms-mean', action='store_true', help="Mean deviant minus standard across all 4 electrodes")
parser.add_argument('--epoch-image', action='store_true', help="Very slow colormap image of epochs")
parser.add_argument('--epoch-view', action='store_true', help="Simple linear view of epochs, default end view")
parser.add_argument('--psd', metavar='HZ', action='store', help="Plot power spectral density up to HZ")
parser.add_argument('--force', action='store_true', help="Force running outside of raw-data/subjects, saving masks to current directory")
parser.add_argument('--save-average', action='store_true', help="Save averaged evoked epochs in a standard MNE file")
parser.add_argument('--all', action='store_true', help="Generate all plots and save average evoked epochs")
parser.add_argument('--initial-laptop', action='store_true', help="Data is from 2013I (initial settings) north laptop after restore")
parser.add_argument('--bandpass-from', metavar='HZ', action='store', help="Lower frequency of bandpass (default is 1)")
parser.add_argument('--bandpass-to', metavar='HZ', action='store', help="Higher frequency of bandpass (default is 35)")
parser.add_argument('--no-reference', action='store_true', help="Do not reference mastoids")
parser.add_argument('--reference-o1', action='store_true', help="Only reference o1 mastoid")
parser.add_argument('--reference-o2', action='store_true', help="Only reference o2 mastoid")
parser.add_argument('--no-events', action='store_true', help="Do not show events")
parser.add_argument('--display-huge', action='store_true', help="Zoom way out to display entire file")
parser.add_argument('--no-crop', action='store_true', help="Do not crop file")
parser.add_argument('--no-notch', action='store_true', help="Do not notch filter at 50Hz")
args = parser.parse_args()
if args.verbose > 0:
coloredlogs.install(level='DEBUG')
else:
coloredlogs.install(level='INFO')
raw_file = args.input
f = BDFWithMetadata(raw_file, "mmn", args.force, is_2013I=args.initial_laptop, no_reference=args.no_reference, reference_o1=args.reference_o1, reference_o2=args.reference_o2, no_notch=(args.no_notch or args.skip_view), no_crop=args.no_crop)
f.load()
if args.bandpass_from:
f.highpass = float(args.bandpass_from)
logging.info(f"Overriding highpass frequency of band to {f.highpass}Hz")
if args.bandpass_to:
f.lowpass = float(args.bandpass_to)
logging.info(f"Overriding lowpass frequency of band to {f.lowpass}Hz")
if not args.skip_view:
f.artifact_rejection(args.display_huge, args.no_events)
if args.psd or args.all:
f.psd(int(args.psd or 120))
epochs = f.build_epochs()
# Only decimate if srate is high
if f.raw.info['sfreq'] > 16000:
# All the data was just reduced by a factor of 3 because that fits in memory better
# In the future, we probably want to reduce down to 512hz as the manual process did
# factor = f.raw.info['sfreq'] / 512
factor = 3
logging.info(f"Decimating epochs in memory by a factor of {factor}")
epochs.decimate(factor)
else:
logging.info("File already decimated, not decimating")
if args.dms or args.dms_mean or args.all:
# Plot standard and deviant on one figure, plus plot the deviant minus standard difference, like original matlab
deviant = epochs["Deviant"].average()
standard = epochs["Standard"].average()
difference = mne.combine_evoked([deviant, -standard], weights='equal')
evoked = dict()
evoked["Standard"] = standard
evoked["Deviant"] = deviant
evoked["Difference"] = difference
if args.shell:
logging.warning("Dropping into shell, epochs are in `epochs`, evoked dict is `evoked`, and the raw file wrapper is in `f`")
from IPython import embed
embed()
sys.exit()
colors = dict(Standard="Green", Deviant="Red", Difference="Black")
# TODO: Figure out what we need to change about the evoked data so we get confidence intervals displayed
# @agramfort in mne-tools/mne-python gitter said: "to have confidence intervals you need repetitions which I think is a list of evoked or not epochs you need to pass"
# May want to do something more like: https://mne.tools/stable/auto_examples/stats/plot_sensor_regression.html?highlight=plot_compare_evokeds
def plot_dms(electrode, scale=2.5, auto=False):
pick = standard.ch_names.index(electrode)
fig, ax = plt.subplots(figsize=(6, 4))
kwargs = dict(axes=ax, picks=pick,
truncate_yaxis=False,
truncate_xaxis=False,
colors=colors,
split_legend=True,
legend='lower right',
show_sensors=False,
ci=0.95,
show=False)
if auto:
name = "auto"
mne.viz.plot_compare_evokeds(evoked, **kwargs)
else:
name = str(scale)
mne.viz.plot_compare_evokeds(evoked, ylim=dict(eeg=[-1 * scale, scale]), **kwargs)
f.save_figure(fig, f"dms_{name}_{electrode}")
if args.all:
plot_dms("Cz", 2.5)
plot_dms("Fz", 2.5)
plot_dms("Pz", 2.5)
plot_dms("T8", 2.5)
plot_dms("Cz", 6.0)
plot_dms("Fz", 6.0)
plot_dms("Pz", 6.0)
plot_dms("T8", 6.0)
plot_dms("Cz", auto=True)
plot_dms("Fz", auto=True)
plot_dms("Pz", auto=True)
plot_dms("T8", auto=True)
if args.dms:
if args.dms in standard.ch_names:
plot_dms(args.dms)
else:
logging.warning(f"Could not find electrode '{args.dms}'")
if args.dms_mean:
picks = ['Cz', 'Fz', 'Pz', 'T8']
fig = mne.viz.plot_compare_evokeds(evoked, picks=picks,
colors=colors, combine='mean', ci=0.95, show=False)
f.save_figure(fig[0], f"dms_mean")
elif args.shell:
logging.warning("Dropping into shell, epochs are in `epochs` and the raw file wrapper is in `f`")
from IPython import embed
embed()
if args.epoch_image:
f.epoch_images()
if args.topo:
f.topo()
if args.epoch_view:
f.epoch_view()
if args.save_average or args.all:
f.save_average()
| 42.166667
| 240
| 0.691982
|
4a045faf3aeb7010cea9b148af307e4f53bed60e
| 1,829
|
py
|
Python
|
demo/experiment.py
|
DAIRLab/contact-nets
|
b0e197cbb0ab5550628d71d851a6de1dab616fb6
|
[
"BSD-3-Clause"
] | 16
|
2020-11-18T01:33:05.000Z
|
2022-02-15T17:52:55.000Z
|
demo/experiment.py
|
DAIRLab/contact-nets
|
b0e197cbb0ab5550628d71d851a6de1dab616fb6
|
[
"BSD-3-Clause"
] | null | null | null |
demo/experiment.py
|
DAIRLab/contact-nets
|
b0e197cbb0ab5550628d71d851a6de1dab616fb6
|
[
"BSD-3-Clause"
] | 1
|
2021-01-27T20:48:46.000Z
|
2021-01-27T20:48:46.000Z
|
"""Provide a simple way to run different Block3D methods."""
import distutils.dir_util
import pdb # noqa
import click
from contactnets.experiments import split
from contactnets.experiments.block3d import train
from contactnets.experiments.block3d.train import Block3DTraining
from contactnets.utils import dirs
@click.command()
@click.option('--method', type=click.Choice(['e2e', 'poly', 'deep', 'deepvertex']),
default='e2e', help='Which method to train with.')
@click.option('--tosses', default=100,
help='Number of training tosses. Data split is 50% training, 30% validation,'
'20% test (so total number of tosses used will be 2x what is specified).')
def main(method: str, tosses: int):
assert method is not None
assert tosses <= 284, 'Number of training tosses must be less than half the dataset size'
# Copy the tosses data and processing parameters into the working directory
distutils.dir_util.copy_tree(dirs.data_path('tosses_processed'),
dirs.out_path('data', 'all'))
distutils.dir_util.copy_tree(dirs.data_path('params_processed'),
dirs.out_path('params'))
epochs = 500
total_tosses = max(3, 2 * tosses)
patience = int(500.0 / total_tosses) + 12
print(f'Executing method {method} with training tosses={tosses}, '
f'patience={patience}, and epochs={epochs}')
split.do_split('50,30,20', num_tosses = total_tosses)
args = {'epochs': epochs, 'batch': 1, 'patience': patience, 'resume': False}
if method == 'e2e':
train.do_train_e2e(**args) # type: ignore
else:
training = Block3DTraining(net_type=method)
train.do_train_structured(**args, training=training) # type: ignore
if __name__ == '__main__': main()
| 37.326531
| 93
| 0.667578
|
4a045fe386f751f87914dd905714ce7528b34060
| 49,050
|
bzl
|
Python
|
tensorflow/workspace.bzl
|
stagedml/tensorflow
|
2ffb2fbed31bd0e43673b767bf4622d737bfab62
|
[
"Apache-2.0"
] | null | null | null |
tensorflow/workspace.bzl
|
stagedml/tensorflow
|
2ffb2fbed31bd0e43673b767bf4622d737bfab62
|
[
"Apache-2.0"
] | null | null | null |
tensorflow/workspace.bzl
|
stagedml/tensorflow
|
2ffb2fbed31bd0e43673b767bf4622d737bfab62
|
[
"Apache-2.0"
] | 1
|
2021-01-29T08:12:57.000Z
|
2021-01-29T08:12:57.000Z
|
# TensorFlow external dependencies that can be loaded in WORKSPACE files.
load("//third_party/gpus:cuda_configure.bzl", "cuda_configure")
load("//third_party/gpus:rocm_configure.bzl", "rocm_configure")
load("//third_party/tensorrt:tensorrt_configure.bzl", "tensorrt_configure")
load("//third_party/nccl:nccl_configure.bzl", "nccl_configure")
load("//third_party/mkl:build_defs.bzl", "mkl_repository")
load("//third_party/git:git_configure.bzl", "git_configure")
load("//third_party/py:python_configure.bzl", "python_configure")
load("//third_party/sycl:sycl_configure.bzl", "sycl_configure")
load("//third_party/systemlibs:syslibs_configure.bzl", "syslibs_configure")
load("//third_party/toolchains/remote:configure.bzl", "remote_execution_configure")
load("//third_party/toolchains/clang6:repo.bzl", "clang6_configure")
load("//third_party/toolchains/cpus/arm:arm_compiler_configure.bzl", "arm_compiler_configure")
load("//third_party:repo.bzl", "tf_http_archive")
load("//third_party/clang_toolchain:cc_configure_clang.bzl", "cc_download_clang_toolchain")
load("@bazel_tools//tools/build_defs/repo:java.bzl", "java_import_external")
load("@io_bazel_rules_closure//closure:defs.bzl", "filegroup_external")
load(
"//tensorflow/tools/def_file_filter:def_file_filter_configure.bzl",
"def_file_filter_configure",
)
load("//third_party/FP16:workspace.bzl", FP16 = "repo")
load("//third_party/FXdiv:workspace.bzl", FXdiv = "repo")
load("//third_party/aws:workspace.bzl", aws = "repo")
load("//third_party/clog:workspace.bzl", clog = "repo")
load("//third_party/cpuinfo:workspace.bzl", cpuinfo = "repo")
load("//third_party/flatbuffers:workspace.bzl", flatbuffers = "repo")
load("//third_party/hexagon:workspace.bzl", hexagon_nn = "repo")
load("//third_party/highwayhash:workspace.bzl", highwayhash = "repo")
load("//third_party/hwloc:workspace.bzl", hwloc = "repo")
load("//third_party/icu:workspace.bzl", icu = "repo")
load("//third_party/jpeg:workspace.bzl", jpeg = "repo")
load("//third_party/nasm:workspace.bzl", nasm = "repo")
load("//third_party/opencl_headers:workspace.bzl", opencl_headers = "repo")
load("//third_party/kissfft:workspace.bzl", kissfft = "repo")
load("//third_party/pasta:workspace.bzl", pasta = "repo")
load("//third_party/psimd:workspace.bzl", psimd = "repo")
load("//third_party/pthreadpool:workspace.bzl", pthreadpool = "repo")
load("//third_party/sobol_data:workspace.bzl", sobol_data = "repo")
def initialize_third_party():
""" Load third party repositories. See above load() statements. """
FP16()
FXdiv()
aws()
clog()
cpuinfo()
flatbuffers()
hexagon_nn()
highwayhash()
hwloc()
icu()
kissfft()
jpeg()
nasm()
opencl_headers()
pasta()
psimd()
pthreadpool()
sobol_data()
# Sanitize a dependency so that it works correctly from code that includes
# TensorFlow as a submodule.
def clean_dep(dep):
return str(Label(dep))
# If TensorFlow is linked as a submodule.
# path_prefix is no longer used.
# tf_repo_name is thought to be under consideration.
def tf_workspace(path_prefix = "", tf_repo_name = ""):
tf_repositories(path_prefix, tf_repo_name)
tf_bind()
# Define all external repositories required by TensorFlow
def tf_repositories(path_prefix = "", tf_repo_name = ""):
"""All external dependencies for TF builds."""
# Note that we check the minimum bazel version in WORKSPACE.
clang6_configure(name = "local_config_clang6")
cc_download_clang_toolchain(name = "local_config_download_clang")
cuda_configure(name = "local_config_cuda")
tensorrt_configure(name = "local_config_tensorrt")
nccl_configure(name = "local_config_nccl")
git_configure(name = "local_config_git")
sycl_configure(name = "local_config_sycl")
syslibs_configure(name = "local_config_syslibs")
python_configure(name = "local_config_python")
rocm_configure(name = "local_config_rocm")
remote_execution_configure(name = "local_config_remote_execution")
initialize_third_party()
# For windows bazel build
# TODO: Remove def file filter when TensorFlow can export symbols properly on Windows.
def_file_filter_configure(name = "local_config_def_file_filter")
# Point //external/local_config_arm_compiler to //external/arm_compiler
arm_compiler_configure(
name = "local_config_arm_compiler",
build_file = clean_dep("//third_party/toolchains/cpus/arm:BUILD"),
remote_config_repo = "../arm_compiler",
)
mkl_repository(
name = "mkl_linux",
build_file = clean_dep("//third_party/mkl:mkl.BUILD"),
sha256 = "a936d6b277a33d2a027a024ea8e65df62bd2e162c7ca52c48486ed9d5dc27160",
strip_prefix = "mklml_lnx_2019.0.5.20190502",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/intel/mkl-dnn/releases/download/v0.21/mklml_lnx_2019.0.5.20190502.tgz",
"https://github.com/intel/mkl-dnn/releases/download/v0.21/mklml_lnx_2019.0.5.20190502.tgz",
],
)
mkl_repository(
name = "mkl_windows",
build_file = clean_dep("//third_party/mkl:mkl.BUILD"),
sha256 = "33cc27652df3b71d7cb84b26718b5a2e8965e2c864a502347db02746d0430d57",
strip_prefix = "mklml_win_2020.0.20190813",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/intel/mkl-dnn/releases/download/v0.21/mklml_win_2020.0.20190813.zip",
"https://github.com/intel/mkl-dnn/releases/download/v0.21/mklml_win_2020.0.20190813.zip",
],
)
mkl_repository(
name = "mkl_darwin",
build_file = clean_dep("//third_party/mkl:mkl.BUILD"),
sha256 = "2fbb71a0365d42a39ea7906568d69b1db3bfc9914fee75eedb06c5f32bf5fa68",
strip_prefix = "mklml_mac_2019.0.5.20190502",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/intel/mkl-dnn/releases/download/v0.21/mklml_mac_2019.0.5.20190502.tgz",
"https://github.com/intel/mkl-dnn/releases/download/v0.21/mklml_mac_2019.0.5.20190502.tgz",
],
)
if path_prefix:
print("path_prefix was specified to tf_workspace but is no longer used " +
"and will be removed in the future.")
tf_http_archive(
name = "XNNPACK",
sha256 = "24b6285c679dece8805d2a7d63cc567413b7670279bc0c66a99e555123fe4700",
strip_prefix = "XNNPACK-9a88efe2d84fef93eb2b8acb6f0ac8f3cacee8b5",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/google/XNNPACK/archive/9a88efe2d84fef93eb2b8acb6f0ac8f3cacee8b5.zip",
"https://github.com/google/XNNPACK/archive/9a88efe2d84fef93eb2b8acb6f0ac8f3cacee8b5.zip",
],
)
# Important: If you are upgrading MKL-DNN, then update the version numbers
# in third_party/mkl_dnn/mkldnn.BUILD. In addition, the new version of
# MKL-DNN might require upgrading MKL ML libraries also. If they need to be
# upgraded then update the version numbers on all three versions above
# (Linux, Mac, Windows).
tf_http_archive(
name = "mkl_dnn",
build_file = clean_dep("//third_party/mkl_dnn:mkldnn.BUILD"),
sha256 = "ed56652dd237deb86ee9bf102c18de5f2625c059e5ab1d7512c8dc01e316b694",
strip_prefix = "mkl-dnn-0.21.2",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/intel/mkl-dnn/archive/v0.21.2.tar.gz",
"https://github.com/intel/mkl-dnn/archive/v0.21.2.tar.gz",
],
)
tf_http_archive(
name = "mkl_dnn_v1",
build_file = clean_dep("//third_party/mkl_dnn:mkldnn.BUILD"),
sha256 = "fcc2d951f7170eade0cfdd0d8d1d58e3e7785bd326bca6555f3722f8cba71811",
strip_prefix = "mkl-dnn-1.0-pc2",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/intel/mkl-dnn/archive/v1.0-pc2.tar.gz",
"https://github.com/intel/mkl-dnn/archive/v1.0-pc2.tar.gz",
],
)
tf_http_archive(
name = "com_google_absl",
build_file = clean_dep("//third_party:com_google_absl.BUILD"),
# TODO: Remove the patch when https://github.com/abseil/abseil-cpp/issues/326 is resolved
# and when TensorFlow is build against CUDA 10.2
patch_file = clean_dep("//third_party:com_google_absl_fix_mac_and_nvcc_build.patch"),
sha256 = "acd93f6baaedc4414ebd08b33bebca7c7a46888916101d8c0b8083573526d070",
strip_prefix = "abseil-cpp-43ef2148c0936ebf7cb4be6b19927a9d9d145b8f",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/abseil/abseil-cpp/archive/43ef2148c0936ebf7cb4be6b19927a9d9d145b8f.tar.gz",
"https://github.com/abseil/abseil-cpp/archive/43ef2148c0936ebf7cb4be6b19927a9d9d145b8f.tar.gz",
],
)
tf_http_archive(
name = "eigen_archive",
build_file = clean_dep("//third_party:eigen.BUILD"),
patch_file = clean_dep("//third_party/eigen3:gpu_packet_math.patch"),
sha256 = "26ea0481c517ea11c7afd1d2655fdcbefcc90fd5b4ff8a5313b78edd49170f6d",
strip_prefix = "eigen-4217a9f09018b1eb3ce800919a69c7c3df47f9cb",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/gitlab.com/libeigen/eigen/-/archive/4217a9f09018b1eb3ce800919a69c7c3df47f9cb/eigen-4217a9f09018b1eb3ce800919a69c7c3df47f9cb.tar.gz",
"https://gitlab.com/libeigen/eigen/-/archive/4217a9f09018b1eb3ce800919a69c7c3df47f9cb/eigen-4217a9f09018b1eb3ce800919a69c7c3df47f9cb.tar.gz",
],
)
tf_http_archive(
name = "arm_compiler",
build_file = clean_dep("//:arm_compiler.BUILD"),
sha256 = "b9e7d50ffd9996ed18900d041d362c99473b382c0ae049b2fce3290632d2656f",
strip_prefix = "rpi-newer-crosstools-eb68350c5c8ec1663b7fe52c742ac4271e3217c5/x64-gcc-6.5.0/arm-rpi-linux-gnueabihf/",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/rvagg/rpi-newer-crosstools/archive/eb68350c5c8ec1663b7fe52c742ac4271e3217c5.tar.gz",
"https://github.com/rvagg/rpi-newer-crosstools/archive/eb68350c5c8ec1663b7fe52c742ac4271e3217c5.tar.gz",
],
)
tf_http_archive(
name = "libxsmm_archive",
build_file = clean_dep("//third_party:libxsmm.BUILD"),
sha256 = "9c0af4509ea341d1ee2c6c19fc6f19289318c3bd4b17844efeb9e7f9691abf76",
strip_prefix = "libxsmm-1.14",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/hfp/libxsmm/archive/1.14.tar.gz",
"https://github.com/hfp/libxsmm/archive/1.14.tar.gz",
],
)
tf_http_archive(
name = "com_googlesource_code_re2",
sha256 = "d070e2ffc5476c496a6a872a6f246bfddce8e7797d6ba605a7c8d72866743bf9",
strip_prefix = "re2-506cfa4bffd060c06ec338ce50ea3468daa6c814",
system_build_file = clean_dep("//third_party/systemlibs:re2.BUILD"),
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/google/re2/archive/506cfa4bffd060c06ec338ce50ea3468daa6c814.tar.gz",
"https://github.com/google/re2/archive/506cfa4bffd060c06ec338ce50ea3468daa6c814.tar.gz",
],
)
tf_http_archive(
name = "com_github_googlecloudplatform_google_cloud_cpp",
sha256 = "e86a7190e87371259083595d756399f494b2257706a2b773c2917ec796f41d9a",
strip_prefix = "google-cloud-cpp-0.16.0",
system_build_file = clean_dep("//third_party/systemlibs:google_cloud_cpp.BUILD"),
system_link_files = {
"//third_party/systemlibs:google_cloud_cpp.google.cloud.bigtable.BUILD": "google/cloud/bigtable/BUILD",
},
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/googleapis/google-cloud-cpp/archive/v0.16.0.tar.gz",
"https://github.com/googleapis/google-cloud-cpp/archive/v0.16.0.tar.gz",
],
)
tf_http_archive(
name = "com_github_googleapis_googleapis",
build_file = clean_dep("//third_party:googleapis.BUILD"),
sha256 = "824870d87a176f26bcef663e92051f532fac756d1a06b404055dc078425f4378",
strip_prefix = "googleapis-f81082ea1e2f85c43649bee26e0d9871d4b41cdb",
system_build_file = clean_dep("//third_party/systemlibs:googleapis.BUILD"),
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/googleapis/googleapis/archive/f81082ea1e2f85c43649bee26e0d9871d4b41cdb.zip",
"https://github.com/googleapis/googleapis/archive/f81082ea1e2f85c43649bee26e0d9871d4b41cdb.zip",
],
)
tf_http_archive(
name = "gemmlowp",
sha256 = "6678b484d929f2d0d3229d8ac4e3b815a950c86bb9f17851471d143f6d4f7834",
strip_prefix = "gemmlowp-12fed0cd7cfcd9e169bf1925bc3a7a58725fdcc3",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/google/gemmlowp/archive/12fed0cd7cfcd9e169bf1925bc3a7a58725fdcc3.zip",
"https://github.com/google/gemmlowp/archive/12fed0cd7cfcd9e169bf1925bc3a7a58725fdcc3.zip",
],
)
tf_http_archive(
name = "farmhash_archive",
build_file = clean_dep("//third_party:farmhash.BUILD"),
sha256 = "6560547c63e4af82b0f202cb710ceabb3f21347a4b996db565a411da5b17aba0",
strip_prefix = "farmhash-816a4ae622e964763ca0862d9dbd19324a1eaf45",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/google/farmhash/archive/816a4ae622e964763ca0862d9dbd19324a1eaf45.tar.gz",
"https://github.com/google/farmhash/archive/816a4ae622e964763ca0862d9dbd19324a1eaf45.tar.gz",
],
)
tf_http_archive(
name = "png",
build_file = clean_dep("//third_party:png.BUILD"),
patch_file = clean_dep("//third_party:png_fix_rpi.patch"),
sha256 = "ca74a0dace179a8422187671aee97dd3892b53e168627145271cad5b5ac81307",
strip_prefix = "libpng-1.6.37",
system_build_file = clean_dep("//third_party/systemlibs:png.BUILD"),
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/glennrp/libpng/archive/v1.6.37.tar.gz",
"https://github.com/glennrp/libpng/archive/v1.6.37.tar.gz",
],
)
tf_http_archive(
name = "org_sqlite",
build_file = clean_dep("//third_party:sqlite.BUILD"),
sha256 = "adf051d4c10781ea5cfabbbc4a2577b6ceca68590d23b58b8260a8e24cc5f081",
strip_prefix = "sqlite-amalgamation-3300100",
system_build_file = clean_dep("//third_party/systemlibs:sqlite.BUILD"),
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/www.sqlite.org/2019/sqlite-amalgamation-3300100.zip",
"https://www.sqlite.org/2019/sqlite-amalgamation-3300100.zip",
],
)
tf_http_archive(
name = "gif",
build_file = clean_dep("//third_party:gif.BUILD"),
patch_file = clean_dep("//third_party:gif_fix_strtok_r.patch"),
sha256 = "31da5562f44c5f15d63340a09a4fd62b48c45620cd302f77a6d9acf0077879bd",
strip_prefix = "giflib-5.2.1",
system_build_file = clean_dep("//third_party/systemlibs:gif.BUILD"),
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/pilotfiber.dl.sourceforge.net/project/giflib/giflib-5.2.1.tar.gz",
"http://pilotfiber.dl.sourceforge.net/project/giflib/giflib-5.2.1.tar.gz",
],
)
tf_http_archive(
name = "six_archive",
build_file = clean_dep("//third_party:six.BUILD"),
sha256 = "d16a0141ec1a18405cd4ce8b4613101da75da0e9a7aec5bdd4fa804d0e0eba73",
strip_prefix = "six-1.12.0",
system_build_file = clean_dep("//third_party/systemlibs:six.BUILD"),
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/pypi.python.org/packages/source/s/six/six-1.12.0.tar.gz",
"https://pypi.python.org/packages/source/s/six/six-1.12.0.tar.gz",
],
)
tf_http_archive(
name = "astor_archive",
build_file = clean_dep("//third_party:astor.BUILD"),
sha256 = "95c30d87a6c2cf89aa628b87398466840f0ad8652f88eb173125a6df8533fb8d",
strip_prefix = "astor-0.7.1",
system_build_file = clean_dep("//third_party/systemlibs:astor.BUILD"),
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/pypi.python.org/packages/99/80/f9482277c919d28bebd85813c0a70117214149a96b08981b72b63240b84c/astor-0.7.1.tar.gz",
"https://pypi.python.org/packages/99/80/f9482277c919d28bebd85813c0a70117214149a96b08981b72b63240b84c/astor-0.7.1.tar.gz",
],
)
tf_http_archive(
name = "functools32_archive",
build_file = clean_dep("//third_party:functools32.BUILD"),
sha256 = "f6253dfbe0538ad2e387bd8fdfd9293c925d63553f5813c4e587745416501e6d",
strip_prefix = "functools32-3.2.3-2",
system_build_file = clean_dep("//third_party/systemlibs:functools32.BUILD"),
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/pypi.python.org/packages/c5/60/6ac26ad05857c601308d8fb9e87fa36d0ebf889423f47c3502ef034365db/functools32-3.2.3-2.tar.gz",
"https://pypi.python.org/packages/c5/60/6ac26ad05857c601308d8fb9e87fa36d0ebf889423f47c3502ef034365db/functools32-3.2.3-2.tar.gz",
],
)
tf_http_archive(
name = "gast_archive",
build_file = clean_dep("//third_party:gast.BUILD"),
sha256 = "fe939df4583692f0512161ec1c880e0a10e71e6a232da045ab8edd3756fbadf0",
strip_prefix = "gast-0.2.2",
system_build_file = clean_dep("//third_party/systemlibs:gast.BUILD"),
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/pypi.python.org/packages/4e/35/11749bf99b2d4e3cceb4d55ca22590b0d7c2c62b9de38ac4a4a7f4687421/gast-0.2.2.tar.gz",
"https://files.pythonhosted.org/packages/4e/35/11749bf99b2d4e3cceb4d55ca22590b0d7c2c62b9de38ac4a4a7f4687421/gast-0.2.2.tar.gz",
],
)
tf_http_archive(
name = "termcolor_archive",
build_file = clean_dep("//third_party:termcolor.BUILD"),
sha256 = "1d6d69ce66211143803fbc56652b41d73b4a400a2891d7bf7a1cdf4c02de613b",
strip_prefix = "termcolor-1.1.0",
system_build_file = clean_dep("//third_party/systemlibs:termcolor.BUILD"),
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/pypi.python.org/packages/8a/48/a76be51647d0eb9f10e2a4511bf3ffb8cc1e6b14e9e4fab46173aa79f981/termcolor-1.1.0.tar.gz",
"https://pypi.python.org/packages/8a/48/a76be51647d0eb9f10e2a4511bf3ffb8cc1e6b14e9e4fab46173aa79f981/termcolor-1.1.0.tar.gz",
],
)
tf_http_archive(
name = "opt_einsum_archive",
build_file = clean_dep("//third_party:opt_einsum.BUILD"),
sha256 = "d3d464b4da7ef09e444c30e4003a27def37f85ff10ff2671e5f7d7813adac35b",
strip_prefix = "opt_einsum-2.3.2",
system_build_file = clean_dep("//third_party/systemlibs:opt_einsum.BUILD"),
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/pypi.python.org/packages/f6/d6/44792ec668bcda7d91913c75237314e688f70415ab2acd7172c845f0b24f/opt_einsum-2.3.2.tar.gz",
"https://pypi.python.org/packages/f6/d6/44792ec668bcda7d91913c75237314e688f70415ab2acd7172c845f0b24f/opt_einsum-2.3.2.tar.gz",
],
)
tf_http_archive(
name = "absl_py",
sha256 = "280c76ec0c9ab7a1dff550cdc37b7c7cd28551103dc3955202760ea8e381aa9d",
strip_prefix = "abseil-py-pypi-v0.8.0",
system_build_file = clean_dep("//third_party/systemlibs:absl_py.BUILD"),
system_link_files = {
"//third_party/systemlibs:absl_py.absl.BUILD": "absl/BUILD",
"//third_party/systemlibs:absl_py.absl.flags.BUILD": "absl/flags/BUILD",
"//third_party/systemlibs:absl_py.absl.testing.BUILD": "absl/testing/BUILD",
},
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/abseil/abseil-py/archive/pypi-v0.8.0.tar.gz",
"https://github.com/abseil/abseil-py/archive/pypi-v0.8.0.tar.gz",
],
)
tf_http_archive(
name = "enum34_archive",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/pypi.python.org/packages/bf/3e/31d502c25302814a7c2f1d3959d2a3b3f78e509002ba91aea64993936876/enum34-1.1.6.tar.gz",
"https://pypi.python.org/packages/bf/3e/31d502c25302814a7c2f1d3959d2a3b3f78e509002ba91aea64993936876/enum34-1.1.6.tar.gz",
],
sha256 = "8ad8c4783bf61ded74527bffb48ed9b54166685e4230386a9ed9b1279e2df5b1",
build_file = clean_dep("//third_party:enum34.BUILD"),
system_build_file = clean_dep("//third_party/systemlibs:enum34.BUILD"),
strip_prefix = "enum34-1.1.6/enum",
)
tf_http_archive(
name = "org_python_pypi_backports_weakref",
build_file = clean_dep("//third_party:backports_weakref.BUILD"),
sha256 = "8813bf712a66b3d8b85dc289e1104ed220f1878cf981e2fe756dfaabe9a82892",
strip_prefix = "backports.weakref-1.0rc1/src",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/pypi.python.org/packages/bc/cc/3cdb0a02e7e96f6c70bd971bc8a90b8463fda83e264fa9c5c1c98ceabd81/backports.weakref-1.0rc1.tar.gz",
"https://pypi.python.org/packages/bc/cc/3cdb0a02e7e96f6c70bd971bc8a90b8463fda83e264fa9c5c1c98ceabd81/backports.weakref-1.0rc1.tar.gz",
],
)
filegroup_external(
name = "org_python_license",
licenses = ["notice"], # Python 2.0
sha256_urls = {
"e76cacdf0bdd265ff074ccca03671c33126f597f39d0ed97bc3e5673d9170cf6": [
"https://storage.googleapis.com/mirror.tensorflow.org/docs.python.org/2.7/_sources/license.rst.txt",
"https://docs.python.org/2.7/_sources/license.rst.txt",
],
},
)
# 310ba5ee72661c081129eb878c1bbcec936b20f0 is based on 3.8.0 with a fix for protobuf.bzl.
PROTOBUF_URLS = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/protocolbuffers/protobuf/archive/310ba5ee72661c081129eb878c1bbcec936b20f0.tar.gz",
"https://github.com/protocolbuffers/protobuf/archive/310ba5ee72661c081129eb878c1bbcec936b20f0.tar.gz",
]
PROTOBUF_SHA256 = "b9e92f9af8819bbbc514e2902aec860415b70209f31dfc8c4fa72515a5df9d59"
PROTOBUF_STRIP_PREFIX = "protobuf-310ba5ee72661c081129eb878c1bbcec936b20f0"
# protobuf depends on @zlib, it has to be renamed to @zlib_archive because "zlib" is already
# defined using bind for grpc.
PROTOBUF_PATCH = "//third_party/protobuf:protobuf.patch"
tf_http_archive(
name = "com_google_protobuf",
patch_file = clean_dep(PROTOBUF_PATCH),
sha256 = PROTOBUF_SHA256,
strip_prefix = PROTOBUF_STRIP_PREFIX,
system_build_file = clean_dep("//third_party/systemlibs:protobuf.BUILD"),
system_link_files = {
"//third_party/systemlibs:protobuf.bzl": "protobuf.bzl",
},
urls = PROTOBUF_URLS,
)
tf_http_archive(
name = "nsync",
sha256 = "caf32e6b3d478b78cff6c2ba009c3400f8251f646804bcb65465666a9cea93c4",
strip_prefix = "nsync-1.22.0",
system_build_file = clean_dep("//third_party/systemlibs:nsync.BUILD"),
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/google/nsync/archive/1.22.0.tar.gz",
"https://github.com/google/nsync/archive/1.22.0.tar.gz",
],
)
tf_http_archive(
name = "com_google_googletest",
sha256 = "ff7a82736e158c077e76188232eac77913a15dac0b22508c390ab3f88e6d6d86",
strip_prefix = "googletest-b6cd405286ed8635ece71c72f118e659f4ade3fb",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/google/googletest/archive/b6cd405286ed8635ece71c72f118e659f4ade3fb.zip",
"https://github.com/google/googletest/archive/b6cd405286ed8635ece71c72f118e659f4ade3fb.zip",
],
)
tf_http_archive(
name = "com_github_gflags_gflags",
sha256 = "ae27cdbcd6a2f935baa78e4f21f675649271634c092b1be01469440495609d0e",
strip_prefix = "gflags-2.2.1",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/gflags/gflags/archive/v2.2.1.tar.gz",
"https://github.com/gflags/gflags/archive/v2.2.1.tar.gz",
],
)
tf_http_archive(
name = "pcre",
build_file = clean_dep("//third_party:pcre.BUILD"),
sha256 = "69acbc2fbdefb955d42a4c606dfde800c2885711d2979e356c0636efde9ec3b5",
strip_prefix = "pcre-8.42",
system_build_file = clean_dep("//third_party/systemlibs:pcre.BUILD"),
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/ftp.exim.org/pub/pcre/pcre-8.42.tar.gz",
"http://ftp.exim.org/pub/pcre/pcre-8.42.tar.gz",
],
)
tf_http_archive(
name = "swig",
build_file = clean_dep("//third_party:swig.BUILD"),
sha256 = "58a475dbbd4a4d7075e5fe86d4e54c9edde39847cdb96a3053d87cb64a23a453",
strip_prefix = "swig-3.0.8",
system_build_file = clean_dep("//third_party/systemlibs:swig.BUILD"),
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/ufpr.dl.sourceforge.net/project/swig/swig/swig-3.0.8/swig-3.0.8.tar.gz",
"http://ufpr.dl.sourceforge.net/project/swig/swig/swig-3.0.8/swig-3.0.8.tar.gz",
"http://pilotfiber.dl.sourceforge.net/project/swig/swig/swig-3.0.8/swig-3.0.8.tar.gz",
],
)
tf_http_archive(
name = "curl",
build_file = clean_dep("//third_party:curl.BUILD"),
sha256 = "d0393da38ac74ffac67313072d7fe75b1fa1010eb5987f63f349b024a36b7ffb",
strip_prefix = "curl-7.66.0",
system_build_file = clean_dep("//third_party/systemlibs:curl.BUILD"),
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/curl.haxx.se/download/curl-7.66.0.tar.gz",
"https://curl.haxx.se/download/curl-7.66.0.tar.gz",
],
)
# WARNING: make sure ncteisen@ and vpai@ are cc-ed on any CL to change the below rule
tf_http_archive(
name = "grpc",
sha256 = "67a6c26db56f345f7cee846e681db2c23f919eba46dd639b09462d1b6203d28c",
strip_prefix = "grpc-4566c2a29ebec0835643b972eb99f4306c4234a3",
system_build_file = clean_dep("//third_party/systemlibs:grpc.BUILD"),
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/grpc/grpc/archive/4566c2a29ebec0835643b972eb99f4306c4234a3.tar.gz",
"https://github.com/grpc/grpc/archive/4566c2a29ebec0835643b972eb99f4306c4234a3.tar.gz",
],
)
tf_http_archive(
name = "com_github_nanopb_nanopb",
sha256 = "8bbbb1e78d4ddb0a1919276924ab10d11b631df48b657d960e0c795a25515735",
build_file = "@grpc//third_party:nanopb.BUILD",
strip_prefix = "nanopb-f8ac463766281625ad710900479130c7fcb4d63b",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/nanopb/nanopb/archive/f8ac463766281625ad710900479130c7fcb4d63b.tar.gz",
"https://github.com/nanopb/nanopb/archive/f8ac463766281625ad710900479130c7fcb4d63b.tar.gz",
],
)
tf_http_archive(
name = "linenoise",
build_file = clean_dep("//third_party:linenoise.BUILD"),
sha256 = "7f51f45887a3d31b4ce4fa5965210a5e64637ceac12720cfce7954d6a2e812f7",
strip_prefix = "linenoise-c894b9e59f02203dbe4e2be657572cf88c4230c3",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/antirez/linenoise/archive/c894b9e59f02203dbe4e2be657572cf88c4230c3.tar.gz",
"https://github.com/antirez/linenoise/archive/c894b9e59f02203dbe4e2be657572cf88c4230c3.tar.gz",
],
)
# Check out LLVM and MLIR from llvm-project.
LLVM_COMMIT = "11552433ebfc7243c0b66367bdffaba52e74b354"
LLVM_SHA256 = "bbdba20f1b44661b55062b449b5df6491c7272ab980827ff68fc8621fa180a3e"
LLVM_URLS = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT),
"https://github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT),
]
tf_http_archive(
name = "llvm-project",
# TODO: Remove when llvm revision at https://reviews.llvm.org/rG6656e961c08393c3949412ef945ade0272b66fca is
# integrated into TF.
patch_file = clean_dep("//third_party/llvm:windows_build_fix.patch"),
sha256 = LLVM_SHA256,
strip_prefix = "llvm-project-" + LLVM_COMMIT,
urls = LLVM_URLS,
additional_build_files = {
clean_dep("//third_party/llvm:llvm.autogenerated.BUILD"): "llvm/BUILD",
"//third_party/mlir:BUILD": "mlir/BUILD",
"//third_party/mlir:test.BUILD": "mlir/test/BUILD",
},
)
tf_http_archive(
name = "lmdb",
build_file = clean_dep("//third_party:lmdb.BUILD"),
sha256 = "f3927859882eb608868c8c31586bb7eb84562a40a6bf5cc3e13b6b564641ea28",
strip_prefix = "lmdb-LMDB_0.9.22/libraries/liblmdb",
system_build_file = clean_dep("//third_party/systemlibs:lmdb.BUILD"),
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/LMDB/lmdb/archive/LMDB_0.9.22.tar.gz",
"https://github.com/LMDB/lmdb/archive/LMDB_0.9.22.tar.gz",
],
)
tf_http_archive(
name = "jsoncpp_git",
build_file = clean_dep("//third_party:jsoncpp.BUILD"),
sha256 = "c49deac9e0933bcb7044f08516861a2d560988540b23de2ac1ad443b219afdb6",
strip_prefix = "jsoncpp-1.8.4",
system_build_file = clean_dep("//third_party/systemlibs:jsoncpp.BUILD"),
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/open-source-parsers/jsoncpp/archive/1.8.4.tar.gz",
"https://github.com/open-source-parsers/jsoncpp/archive/1.8.4.tar.gz",
],
)
tf_http_archive(
name = "boringssl",
sha256 = "1188e29000013ed6517168600fc35a010d58c5d321846d6a6dfee74e4c788b45",
strip_prefix = "boringssl-7f634429a04abc48e2eb041c81c5235816c96514",
system_build_file = clean_dep("//third_party/systemlibs:boringssl.BUILD"),
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/google/boringssl/archive/7f634429a04abc48e2eb041c81c5235816c96514.tar.gz",
"https://github.com/google/boringssl/archive/7f634429a04abc48e2eb041c81c5235816c96514.tar.gz",
],
)
tf_http_archive(
name = "zlib_archive",
build_file = clean_dep("//third_party:zlib.BUILD"),
sha256 = "c3e5e9fdd5004dcb542feda5ee4f0ff0744628baf8ed2dd5d66f8ca1197cb1a1",
strip_prefix = "zlib-1.2.11",
system_build_file = clean_dep("//third_party/systemlibs:zlib.BUILD"),
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/zlib.net/zlib-1.2.11.tar.gz",
"https://zlib.net/zlib-1.2.11.tar.gz",
],
)
tf_http_archive(
name = "fft2d",
build_file = clean_dep("//third_party/fft2d:fft2d.BUILD"),
sha256 = "ada7e99087c4ed477bfdf11413f2ba8db8a840ba9bbf8ac94f4f3972e2a7cec9",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/www.kurims.kyoto-u.ac.jp/~ooura/fft2d.tgz",
"http://www.kurims.kyoto-u.ac.jp/~ooura/fft2d.tgz",
],
)
tf_http_archive(
name = "snappy",
build_file = clean_dep("//third_party:snappy.BUILD"),
sha256 = "3dfa02e873ff51a11ee02b9ca391807f0c8ea0529a4924afa645fbf97163f9d4",
strip_prefix = "snappy-1.1.7",
system_build_file = clean_dep("//third_party/systemlibs:snappy.BUILD"),
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/google/snappy/archive/1.1.7.tar.gz",
"https://github.com/google/snappy/archive/1.1.7.tar.gz",
],
)
tf_http_archive(
name = "nccl_archive",
build_file = clean_dep("//third_party:nccl/archive.BUILD"),
patch_file = clean_dep("//third_party/nccl:archive.patch"),
sha256 = "9a7633e224982e2b60fa6b397d895d20d6b7498e3e02f46f98a5a4e187c5a44c",
strip_prefix = "nccl-0ceaec9cee96ae7658aa45686853286651f36384",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/nvidia/nccl/archive/0ceaec9cee96ae7658aa45686853286651f36384.tar.gz",
"https://github.com/nvidia/nccl/archive/0ceaec9cee96ae7658aa45686853286651f36384.tar.gz",
],
)
java_import_external(
name = "junit",
jar_sha256 = "59721f0805e223d84b90677887d9ff567dc534d7c502ca903c0c2b17f05c116a",
jar_urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/repo1.maven.org/maven2/junit/junit/4.12/junit-4.12.jar",
"http://repo1.maven.org/maven2/junit/junit/4.12/junit-4.12.jar",
"http://maven.ibiblio.org/maven2/junit/junit/4.12/junit-4.12.jar",
],
licenses = ["reciprocal"], # Common Public License Version 1.0
testonly_ = True,
deps = ["@org_hamcrest_core"],
)
java_import_external(
name = "org_hamcrest_core",
jar_sha256 = "66fdef91e9739348df7a096aa384a5685f4e875584cce89386a7a47251c4d8e9",
jar_urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/repo1.maven.org/maven2/org/hamcrest/hamcrest-core/1.3/hamcrest-core-1.3.jar",
"http://repo1.maven.org/maven2/org/hamcrest/hamcrest-core/1.3/hamcrest-core-1.3.jar",
"http://maven.ibiblio.org/maven2/org/hamcrest/hamcrest-core/1.3/hamcrest-core-1.3.jar",
],
licenses = ["notice"], # New BSD License
testonly_ = True,
)
java_import_external(
name = "com_google_testing_compile",
jar_sha256 = "edc180fdcd9f740240da1a7a45673f46f59c5578d8cd3fbc912161f74b5aebb8",
jar_urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/repo1.maven.org/maven2/com/google/testing/compile/compile-testing/0.11/compile-testing-0.11.jar",
"http://repo1.maven.org/maven2/com/google/testing/compile/compile-testing/0.11/compile-testing-0.11.jar",
],
licenses = ["notice"], # New BSD License
testonly_ = True,
deps = ["@com_google_guava", "@com_google_truth"],
)
java_import_external(
name = "com_google_truth",
jar_sha256 = "032eddc69652b0a1f8d458f999b4a9534965c646b8b5de0eba48ee69407051df",
jar_urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/repo1.maven.org/maven2/com/google/truth/truth/0.32/truth-0.32.jar",
"http://repo1.maven.org/maven2/com/google/truth/truth/0.32/truth-0.32.jar",
],
licenses = ["notice"], # Apache 2.0
testonly_ = True,
deps = ["@com_google_guava"],
)
java_import_external(
name = "org_checkerframework_qual",
jar_sha256 = "d261fde25d590f6b69db7721d469ac1b0a19a17ccaaaa751c31f0d8b8260b894",
jar_urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/repo1.maven.org/maven2/org/checkerframework/checker-qual/2.10.0/checker-qual-2.10.0.jar",
"http://repo1.maven.org/maven2/org/checkerframework/checker-qual/2.10.0/checker-qual-2.10.0.jar",
],
licenses = ["notice"], # Apache 2.0
)
java_import_external(
name = "com_squareup_javapoet",
jar_sha256 = "5bb5abdfe4366c15c0da3332c57d484e238bd48260d6f9d6acf2b08fdde1efea",
jar_urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/repo1.maven.org/maven2/com/squareup/javapoet/1.9.0/javapoet-1.9.0.jar",
"http://repo1.maven.org/maven2/com/squareup/javapoet/1.9.0/javapoet-1.9.0.jar",
],
licenses = ["notice"], # Apache 2.0
)
tf_http_archive(
name = "com_google_pprof",
build_file = clean_dep("//third_party:pprof.BUILD"),
sha256 = "e0928ca4aa10ea1e0551e2d7ce4d1d7ea2d84b2abbdef082b0da84268791d0c4",
strip_prefix = "pprof-c0fb62ec88c411cc91194465e54db2632845b650",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/google/pprof/archive/c0fb62ec88c411cc91194465e54db2632845b650.tar.gz",
"https://github.com/google/pprof/archive/c0fb62ec88c411cc91194465e54db2632845b650.tar.gz",
],
)
tf_http_archive(
name = "cub_archive",
build_file = clean_dep("//third_party:cub.BUILD"),
patch_file = clean_dep("//third_party:cub.pr170.patch"),
sha256 = "6bfa06ab52a650ae7ee6963143a0bbc667d6504822cbd9670369b598f18c58c3",
strip_prefix = "cub-1.8.0",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/NVlabs/cub/archive/1.8.0.zip",
"https://github.com/NVlabs/cub/archive/1.8.0.zip",
],
)
tf_http_archive(
name = "cython",
build_file = clean_dep("//third_party:cython.BUILD"),
delete = ["BUILD.bazel"],
sha256 = "bccc9aa050ea02595b2440188813b936eaf345e85fb9692790cecfe095cf91aa",
strip_prefix = "cython-0.28.4",
system_build_file = clean_dep("//third_party/systemlibs:cython.BUILD"),
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/cython/cython/archive/0.28.4.tar.gz",
"https://github.com/cython/cython/archive/0.28.4.tar.gz",
],
)
tf_http_archive(
name = "arm_neon_2_x86_sse",
build_file = clean_dep("//third_party:arm_neon_2_x86_sse.BUILD"),
sha256 = "213733991310b904b11b053ac224fee2d4e0179e46b52fe7f8735b8831e04dcc",
strip_prefix = "ARM_NEON_2_x86_SSE-1200fe90bb174a6224a525ee60148671a786a71f",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/intel/ARM_NEON_2_x86_SSE/archive/1200fe90bb174a6224a525ee60148671a786a71f.tar.gz",
"https://github.com/intel/ARM_NEON_2_x86_SSE/archive/1200fe90bb174a6224a525ee60148671a786a71f.tar.gz",
],
)
tf_http_archive(
name = "double_conversion",
build_file = clean_dep("//third_party:double_conversion.BUILD"),
sha256 = "2f7fbffac0d98d201ad0586f686034371a6d152ca67508ab611adc2386ad30de",
strip_prefix = "double-conversion-3992066a95b823efc8ccc1baf82a1cfc73f6e9b8",
system_build_file = clean_dep("//third_party/systemlibs:double_conversion.BUILD"),
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/google/double-conversion/archive/3992066a95b823efc8ccc1baf82a1cfc73f6e9b8.zip",
"https://github.com/google/double-conversion/archive/3992066a95b823efc8ccc1baf82a1cfc73f6e9b8.zip",
],
)
tf_http_archive(
name = "tflite_mobilenet_float",
build_file = clean_dep("//third_party:tflite_mobilenet_float.BUILD"),
sha256 = "2fadeabb9968ec6833bee903900dda6e61b3947200535874ce2fe42a8493abc0",
urls = [
"https://storage.googleapis.com/download.tensorflow.org/models/mobilenet_v1_2018_08_02/mobilenet_v1_1.0_224.tgz",
"https://storage.googleapis.com/download.tensorflow.org/models/mobilenet_v1_2018_08_02/mobilenet_v1_1.0_224.tgz",
],
)
tf_http_archive(
name = "tflite_mobilenet_quant",
build_file = clean_dep("//third_party:tflite_mobilenet_quant.BUILD"),
sha256 = "d32432d28673a936b2d6281ab0600c71cf7226dfe4cdcef3012555f691744166",
urls = [
"https://storage.googleapis.com/download.tensorflow.org/models/mobilenet_v1_2018_08_02/mobilenet_v1_1.0_224_quant.tgz",
"https://storage.googleapis.com/download.tensorflow.org/models/mobilenet_v1_2018_08_02/mobilenet_v1_1.0_224_quant.tgz",
],
)
tf_http_archive(
name = "tflite_mobilenet_ssd",
build_file = str(Label("//third_party:tflite_mobilenet.BUILD")),
sha256 = "767057f2837a46d97882734b03428e8dd640b93236052b312b2f0e45613c1cf0",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/storage.googleapis.com/download.tensorflow.org/models/tflite/mobilenet_ssd_tflite_v1.zip",
"https://storage.googleapis.com/download.tensorflow.org/models/tflite/mobilenet_ssd_tflite_v1.zip",
],
)
tf_http_archive(
name = "tflite_mobilenet_ssd_quant",
build_file = str(Label("//third_party:tflite_mobilenet.BUILD")),
sha256 = "a809cd290b4d6a2e8a9d5dad076e0bd695b8091974e0eed1052b480b2f21b6dc",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/storage.googleapis.com/download.tensorflow.org/models/tflite/coco_ssd_mobilenet_v1_0.75_quant_2018_06_29.zip",
"https://storage.googleapis.com/download.tensorflow.org/models/tflite/coco_ssd_mobilenet_v1_0.75_quant_2018_06_29.zip",
],
)
tf_http_archive(
name = "tflite_mobilenet_ssd_quant_protobuf",
build_file = str(Label("//third_party:tflite_mobilenet.BUILD")),
sha256 = "09280972c5777f1aa775ef67cb4ac5d5ed21970acd8535aeca62450ef14f0d79",
strip_prefix = "ssd_mobilenet_v1_quantized_300x300_coco14_sync_2018_07_18",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/storage.googleapis.com/download.tensorflow.org/models/object_detection/ssd_mobilenet_v1_quantized_300x300_coco14_sync_2018_07_18.tar.gz",
"https://storage.googleapis.com/download.tensorflow.org/models/object_detection/ssd_mobilenet_v1_quantized_300x300_coco14_sync_2018_07_18.tar.gz",
],
)
tf_http_archive(
name = "tflite_conv_actions_frozen",
build_file = str(Label("//third_party:tflite_mobilenet.BUILD")),
sha256 = "d947b38cba389b5e2d0bfc3ea6cc49c784e187b41a071387b3742d1acac7691e",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/storage.googleapis.com/download.tensorflow.org/models/tflite/conv_actions_tflite.zip",
"https://storage.googleapis.com/download.tensorflow.org/models/tflite/conv_actions_tflite.zip",
],
)
tf_http_archive(
name = "tflite_ovic_testdata",
build_file = clean_dep("//third_party:tflite_ovic_testdata.BUILD"),
sha256 = "033c941b7829b05ca55a124a26a6a0581b1ececc154a2153cafcfdb54f80dca2",
strip_prefix = "ovic",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/storage.googleapis.com/download.tensorflow.org/data/ovic_2019_04_30.zip",
"https://storage.googleapis.com/download.tensorflow.org/data/ovic_2019_04_30.zip",
],
)
tf_http_archive(
name = "rules_cc",
sha256 = "cf3b76a90c86c0554c5b10f4b160f05af71d252026b71362c4674e2fb9936cf9",
strip_prefix = "rules_cc-01d4a48911d5e7591ecb1c06d3b8af47fe872371",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/bazelbuild/rules_cc/archive/01d4a48911d5e7591ecb1c06d3b8af47fe872371.zip",
"https://github.com/bazelbuild/rules_cc/archive/01d4a48911d5e7591ecb1c06d3b8af47fe872371.zip",
],
)
tf_http_archive(
name = "build_bazel_rules_android",
sha256 = "cd06d15dd8bb59926e4d65f9003bfc20f9da4b2519985c27e190cddc8b7a7806",
strip_prefix = "rules_android-0.1.1",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/bazelbuild/rules_android/archive/v0.1.1.zip",
"https://github.com/bazelbuild/rules_android/archive/v0.1.1.zip",
],
)
tf_http_archive(
name = "tbb",
build_file = clean_dep("//third_party/ngraph:tbb.BUILD"),
sha256 = "c3245012296f09f1418b78a8c2f17df5188b3bd0db620f7fd5fabe363320805a",
strip_prefix = "tbb-2019_U1",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/01org/tbb/archive/2019_U1.zip",
"https://github.com/01org/tbb/archive/2019_U1.zip",
],
)
tf_http_archive(
name = "ngraph",
build_file = clean_dep("//third_party/ngraph:ngraph.BUILD"),
sha256 = "a1780f24a1381fc25e323b4b2d08b6ef5129f42e011305b2a34dcf43a48030d5",
strip_prefix = "ngraph-0.11.0",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/NervanaSystems/ngraph/archive/v0.11.0.tar.gz",
"https://github.com/NervanaSystems/ngraph/archive/v0.11.0.tar.gz",
],
)
tf_http_archive(
name = "nlohmann_json_lib",
build_file = clean_dep("//third_party/ngraph:nlohmann_json.BUILD"),
sha256 = "c377963a95989270c943d522bfefe7b889ef5ed0e1e15d535fd6f6f16ed70732",
strip_prefix = "json-3.4.0",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/nlohmann/json/archive/v3.4.0.tar.gz",
"https://github.com/nlohmann/json/archive/v3.4.0.tar.gz",
],
)
tf_http_archive(
name = "ngraph_tf",
build_file = clean_dep("//third_party/ngraph:ngraph_tf.BUILD"),
sha256 = "742a642d2c6622277df4c902b6830d616d0539cc8cd843d6cdb899bb99e66e36",
strip_prefix = "ngraph-tf-0.9.0",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/NervanaSystems/ngraph-tf/archive/v0.9.0.zip",
"https://github.com/NervanaSystems/ngraph-tf/archive/v0.9.0.zip",
],
)
tf_http_archive(
name = "pybind11",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/pybind/pybind11/archive/v2.3.0.tar.gz",
"https://github.com/pybind/pybind11/archive/v2.3.0.tar.gz",
],
sha256 = "0f34838f2c8024a6765168227ba587b3687729ebf03dc912f88ff75c7aa9cfe8",
strip_prefix = "pybind11-2.3.0",
build_file = clean_dep("//third_party:pybind11.BUILD"),
)
tf_http_archive(
name = "wrapt",
build_file = clean_dep("//third_party:wrapt.BUILD"),
sha256 = "8a6fb40e8f8b6a66b4ba81a4044c68e6a7b1782f21cfabc06fb765332b4c3e51",
strip_prefix = "wrapt-1.11.1/src/wrapt",
system_build_file = clean_dep("//third_party/systemlibs:wrapt.BUILD"),
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/GrahamDumpleton/wrapt/archive/1.11.1.tar.gz",
"https://github.com/GrahamDumpleton/wrapt/archive/1.11.1.tar.gz",
],
)
def tf_bind():
"""Bind targets for some external repositories"""
##############################################################################
# BIND DEFINITIONS
#
# Please do not add bind() definitions unless we have no other choice.
# If that ends up being the case, please leave a comment explaining
# why we can't depend on the canonical build target.
# gRPC wants a cares dependency but its contents is not actually
# important since we have set GRPC_ARES=0 in .bazelrc
native.bind(
name = "cares",
actual = "@com_github_nanopb_nanopb//:nanopb",
)
# Needed by Protobuf
native.bind(
name = "grpc_cpp_plugin",
actual = "@grpc//:grpc_cpp_plugin",
)
native.bind(
name = "grpc_python_plugin",
actual = "@grpc//:grpc_python_plugin",
)
native.bind(
name = "grpc_lib",
actual = "@grpc//:grpc++",
)
native.bind(
name = "grpc_lib_unsecure",
actual = "@grpc//:grpc++_unsecure",
)
# Needed by gRPC
native.bind(
name = "libssl",
actual = "@boringssl//:ssl",
)
# Needed by gRPC
native.bind(
name = "nanopb",
actual = "@com_github_nanopb_nanopb//:nanopb",
)
# Needed by gRPC
native.bind(
name = "protobuf",
actual = "@com_google_protobuf//:protobuf",
)
# gRPC expects //external:protobuf_clib and //external:protobuf_compiler
# to point to Protobuf's compiler library.
native.bind(
name = "protobuf_clib",
actual = "@com_google_protobuf//:protoc_lib",
)
# Needed by gRPC
native.bind(
name = "protobuf_headers",
actual = "@com_google_protobuf//:protobuf_headers",
)
# Needed by Protobuf
native.bind(
name = "python_headers",
actual = clean_dep("//third_party/python_runtime:headers"),
)
# Needed by Protobuf
native.bind(
name = "six",
actual = "@six_archive//:six",
)
# Needed by gRPC
native.bind(
name = "zlib",
actual = "@zlib_archive//:zlib",
)
| 47.027804
| 203
| 0.684098
|
4a046145fe958c902e50f8d0e4ef11add52b81e8
| 1,109
|
py
|
Python
|
topology/simple.py
|
lsk567/Serverlessnet
|
63ddc3d0a340284e9c81595ce89d43c5d3a9820f
|
[
"MIT"
] | null | null | null |
topology/simple.py
|
lsk567/Serverlessnet
|
63ddc3d0a340284e9c81595ce89d43c5d3a9820f
|
[
"MIT"
] | null | null | null |
topology/simple.py
|
lsk567/Serverlessnet
|
63ddc3d0a340284e9c81595ce89d43c5d3a9820f
|
[
"MIT"
] | null | null | null |
"""
Simple topology to simulate three containers (d1, d2, d3),
1 switch, and one controller:
(c)
|
- (s1)-
| | |
(d1) (d2) (d3)
sender open receiver
lambda
"""
from mininet.net import Containernet
from mininet.node import Controller
from mininet.cli import CLI
from mininet.link import TCLink
from mininet.log import info, setLogLevel
setLogLevel('info')
net = Containernet(controller=Controller)
info('*** Adding controller\n')
net.addController('c0')
info('*** Adding docker containers using ubuntu:trusty images\n')
d1 = net.addDocker('d1', ip='10.0.0.251', dimage="ubuntu:trusty")
d2 = net.addDocker('d2', ip='10.0.0.252', dimage="ubuntu:trusty")
d3 = net.addDocker('d3', ip='10.0.0.253', dimage="ubuntu:trusty")
info('*** Adding switches\n')
s1 = net.addSwitch('s1')
info('*** Creating links\n')
net.addLink(d1, s1)
net.addLink(d2, s1)
net.addLink(d3, s1)
info('*** Starting network\n')
net.start()
info('*** Testing connectivity\n')
net.ping([d1, d2, d3])
info('*** Running CLI\n')
CLI(net)
info('*** Stopping network')
net.stop()
| 27.725
| 65
| 0.65284
|
4a0461b9d1e1e339d40ba735fde2f017c0da01e2
| 9,511
|
py
|
Python
|
scripts/53_denovo.py
|
xiamaz/flowCat
|
5fea92eff3112ea3bb669595b469735b2bfa3938
|
[
"MIT"
] | 4
|
2020-03-06T14:06:12.000Z
|
2021-06-25T15:03:54.000Z
|
scripts/53_denovo.py
|
xiamaz/flowCat
|
5fea92eff3112ea3bb669595b469735b2bfa3938
|
[
"MIT"
] | 3
|
2020-03-25T10:54:52.000Z
|
2020-11-26T19:06:23.000Z
|
scripts/53_denovo.py
|
xiamaz/flowCat
|
5fea92eff3112ea3bb669595b469735b2bfa3938
|
[
"MIT"
] | 2
|
2020-04-14T11:26:25.000Z
|
2021-04-02T19:25:52.000Z
|
#!/usr/bin/env python3
"""
Train de-novo classifier for berlin som data.
"""
import numpy as np
from sklearn import metrics
from sklearn.preprocessing import LabelBinarizer
from tensorflow import keras
from tensorflow.keras import layers, regularizers, models # pylint: disable=import-error
# from keras import layers, regularizers, models
from argmagic import argmagic
from flowcat import utils, io_functions, mappings
from flowcat.som_dataset import SOMDataset, SOMSequence
def create_model_early_merge(input_shapes, yshape, global_decay=5e-6):
inputs = []
for xshape in input_shapes:
ix = layers.Input(shape=xshape)
inputs.append(ix)
x = layers.concatenate(inputs)
x = layers.Conv2D(
filters=64, kernel_size=4, activation="relu", strides=3,
kernel_regularizer=regularizers.l2(global_decay))(x)
x = layers.Conv2D(
filters=96, kernel_size=3, activation="relu", strides=2,
kernel_regularizer=regularizers.l2(global_decay))(x)
x = layers.Conv2D(
filters=128, kernel_size=1, activation="relu", strides=1,
kernel_regularizer=regularizers.l2(global_decay))(x)
x = layers.GlobalAveragePooling2D()(x)
# x = layers.MaxPooling2D(pool_size=2, strides=2)(x)
# x = layers.Dropout(0.2)(x)
# x = layers.Dense(
# units=128, activation="relu", kernel_initializer="uniform",
# kernel_regularizer=regularizers.l2(global_decay)
# )(x)
# x = layers.BatchNormalization()(x)
# x = layers.Dropout(0.2)(x)
x = layers.Dense(
units=128, activation="relu",
# kernel_initializer="uniform",
kernel_regularizer=regularizers.l2(global_decay)
)(x)
# x = layers.BatchNormalization()(x)
x = layers.Dense(
units=64, activation="relu",
# kernel_initializer="uniform",
kernel_regularizer=regularizers.l2(global_decay)
)(x)
# x = layers.BatchNormalization()(x)
# x = layers.BatchNormalization()(x)
# x = layers.Dropout(0.2)(x)
x = layers.Dense(
units=yshape, activation="softmax"
)(x)
model = models.Model(inputs=inputs, outputs=x)
for layer in model.layers:
print(layer.output_shape)
return model
def create_model_multi_input(input_shapes, yshape, global_decay=5e-6):
segments = []
inputs = []
print(input_shapes)
for xshape in input_shapes:
ix = layers.Input(shape=xshape)
inputs.append(ix)
x = layers.Conv2D(
filters=32, kernel_size=4, activation="relu", strides=1,
kernel_regularizer=regularizers.l2(global_decay),
)(ix)
x = layers.Conv2D(
filters=48, kernel_size=3, activation="relu", strides=1,
kernel_regularizer=regularizers.l2(global_decay),
)(x)
# x = layers.Conv2D(
# filters=32, kernel_size=2, activation="relu", strides=1,
# kernel_regularizer=regularizers.l2(global_decay),
# )(x)
# x = layers.Conv2D(
# filters=64, kernel_size=2, activation="relu", strides=1,
# # kernel_regularizer=regularizers.l2(global_decay),
# )(x)
# x = layers.MaxPooling2D(pool_size=2, strides=2)(x)
x = layers.Conv2D(
filters=48, kernel_size=2, activation="relu", strides=1,
kernel_regularizer=regularizers.l2(global_decay),
)(x)
x = layers.Conv2D(
filters=64, kernel_size=2, activation="relu", strides=1,
kernel_regularizer=regularizers.l2(global_decay),
)(x)
# x = layers.MaxPooling2D(pool_size=2, strides=2)(x)
# x = layers.GlobalAveragePooling2D()(x)
x = layers.GlobalMaxPooling2D()(x)
segments.append(x)
x = layers.concatenate(segments)
# x = layers.Conv2D(
# filters=32, kernel_size=2, activation="relu", strides=1,
# kernel_regularizer=regularizers.l2(global_decay))(x)
# x = layers.MaxPooling2D(pool_size=2, strides=2)(x)
# x = layers.Dropout(0.2)(x)
# x = layers.Flatten()(ix)
# x = layers.Dense(
# units=128, activation="relu", kernel_initializer="uniform",
# kernel_regularizer=regularizers.l2(global_decay)
# )(x)
# x = layers.BatchNormalization()(x)
# x = layers.Dropout(0.2)(x)
x = layers.Dense(
units=128, activation="relu",
# kernel_initializer="uniform",
kernel_regularizer=regularizers.l2(global_decay)
)(x)
# x = layers.BatchNormalization()(x)
x = layers.Dense(
units=64, activation="relu",
# kernel_initializer="uniform",
kernel_regularizer=regularizers.l2(global_decay)
)(x)
# x = layers.BatchNormalization()(x)
# x = layers.BatchNormalization()(x)
# x = layers.Dropout(0.2)(x)
x = layers.Dense(
units=yshape, activation="softmax"
)(x)
model = models.Model(inputs=inputs, outputs=x)
for layer in model.layers:
print(layer.output_shape)
return model
def get_model(channel_config, groups, **kwargs):
inputs = tuple([*d["dims"][:-1], len(d["channels"])] for d in channel_config.values())
output = len(groups)
# model = create_model_multi_input(inputs, output, **kwargs)
model = create_model_multi_input(inputs, output, **kwargs)
model.compile(
loss="categorical_crossentropy",
# loss="binary_crossentropy",
optimizer="adam",
# optimizer=optimizers.Adam(lr=0.0, decay=0.0, epsilon=epsilon),
metrics=[
"acc",
]
)
binarizer = LabelBinarizer()
binarizer.fit(groups)
return binarizer, model
def main(data: utils.URLPath, meta: utils.URLPath, output: utils.URLPath):
"""
Args:
data: Path to som dataset
output: Output path
"""
tubes = ("2", "3", "4")
pad_width = 1
group_mapping = mappings.GROUP_MAPS["8class"]
mapping = group_mapping["map"]
groups = group_mapping["groups"]
# dataset = io_functions.load_case_collection(data, meta)
dataset = SOMDataset.from_path(data)
if mapping:
dataset = dataset.map_groups(mapping)
dataset = dataset.filter(groups=[g for g in groups if g not in ("LPL", "MZL")])
dataset_groups = {d.group for d in dataset}
# if set(groups) != dataset_groups:
# raise RuntimeError(f"Group mismatch: {groups}, but got {dataset_groups}")
validate, train = dataset.create_split(10, stratify=True)
group_count = train.group_count
num_cases = sum(group_count.values())
balanced_nums = num_cases / len(dataset_groups)
balanced_loss_weights = [balanced_nums / group_count.get(g, balanced_nums) for g in groups]
min_ratio = min(balanced_loss_weights)
balanced_loss_weights = {i: v / min_ratio for i, v in enumerate(balanced_loss_weights)}
print(balanced_loss_weights)
# train = train.balance(2000)
# train = train.balance_per_group({
# "CM": 6000,
# # "CLL": 4000,
# # "MBL": 2000,
# "MCL": 1000,
# "PL": 1000,
# "LPL": 1000,
# "MZL": 1000,
# "FL": 1000,
# "HCL": 1000,
# "normal": 6000,
# })
io_functions.save_json(train.labels, output / "ids_train.json")
io_functions.save_json(validate.labels, output / "ids_validate.json")
som_config = io_functions.load_json(data + "_config.json")
selected_tubes = {tube: som_config[tube] for tube in tubes}
config = {
"tubes": selected_tubes,
"groups": groups,
"pad_width": pad_width,
"mapping": group_mapping,
}
io_functions.save_json(config, output / "config.json")
for tube in tubes:
x, y, z = selected_tubes[tube]["dims"]
selected_tubes[tube]["dims"] = (x + 2 * pad_width, y + 2 * pad_width, z)
binarizer, model = get_model(selected_tubes, groups=groups, global_decay=5e-7)
def getter_fun(sample, tube):
return sample.get_tube(tube)
trainseq = SOMSequence(
train, binarizer,
tube=tubes,
get_array_fun=getter_fun,
batch_size=32,
pad_width=pad_width)
validseq = SOMSequence(
validate, binarizer,
tube=tubes,
get_array_fun=getter_fun,
batch_size=128,
pad_width=pad_width)
tensorboard_dir = str(output / "tensorboard")
tensorboard_callback = keras.callbacks.TensorBoard(
log_dir=str(tensorboard_dir),
histogram_freq=5,
write_grads=True,
write_images=True,
)
nan_callback = keras.callbacks.TerminateOnNaN()
model.fit_generator(
epochs=15, shuffle=True,
callbacks=[tensorboard_callback, nan_callback],
class_weight=balanced_loss_weights,
generator=trainseq, validation_data=validseq)
model.save(str(output / "model.h5"))
io_functions.save_joblib(binarizer, output / "binarizer.joblib")
preds = []
for pred in model.predict_generator(validseq):
preds.append(pred)
pred_arr = np.array(preds)
pred_labels = binarizer.inverse_transform(pred_arr)
true_labels = validseq.true_labels
confusion = metrics.confusion_matrix(true_labels, pred_labels, labels=groups)
print(groups)
print(confusion)
balanced = metrics.balanced_accuracy_score(true_labels, pred_labels)
print(balanced)
# preds = []
# for pred in model.predict_generator(validseq):
# preds.append(pred)
# args.output.local.mkdir(parents=True, exist_ok=True)
if __name__ == "__main__":
argmagic(main)
| 32.571918
| 95
| 0.642835
|
4a04629f22426dfb22953f08abfe6e6050028667
| 468
|
py
|
Python
|
MECboard/migrations/0015_auto_20190803_1538.py
|
mec-by-shp-wbl-hjk/MEC
|
da722701ee1673bf3477219f81208c28fa0a3d18
|
[
"MIT"
] | 1
|
2019-07-18T15:43:12.000Z
|
2019-07-18T15:43:12.000Z
|
MECboard/migrations/0015_auto_20190803_1538.py
|
mec-by-shp-wbl-hjk/MEC
|
da722701ee1673bf3477219f81208c28fa0a3d18
|
[
"MIT"
] | null | null | null |
MECboard/migrations/0015_auto_20190803_1538.py
|
mec-by-shp-wbl-hjk/MEC
|
da722701ee1673bf3477219f81208c28fa0a3d18
|
[
"MIT"
] | 6
|
2019-07-17T07:59:14.000Z
|
2019-09-12T04:08:37.000Z
|
# Generated by Django 2.2.3 on 2019-08-03 06:38
from django.db import migrations
import imagekit.models.fields
class Migration(migrations.Migration):
dependencies = [
('MECboard', '0014_auto_20190803_1512'),
]
operations = [
migrations.AlterField(
model_name='board',
name='image_thumbnail',
field=imagekit.models.fields.ProcessedImageField(null=True, upload_to='media/thumbnail'),
),
]
| 23.4
| 101
| 0.647436
|
4a04631b991f0d8a8be89b7f6360d2fd4efcd897
| 3,839
|
py
|
Python
|
SDis_Self-Training/plotting/plotBoxPlot.py
|
mgeorgati/DasymetricMapping
|
d87b97a076cca3e03286c6b27b118904e03315c0
|
[
"BSD-3-Clause"
] | null | null | null |
SDis_Self-Training/plotting/plotBoxPlot.py
|
mgeorgati/DasymetricMapping
|
d87b97a076cca3e03286c6b27b118904e03315c0
|
[
"BSD-3-Clause"
] | null | null | null |
SDis_Self-Training/plotting/plotBoxPlot.py
|
mgeorgati/DasymetricMapping
|
d87b97a076cca3e03286c6b27b118904e03315c0
|
[
"BSD-3-Clause"
] | null | null | null |
import pandas as pd
import geopandas as gpd
import matplotlib.pyplot as plt
import seaborn as sns
from pathlib import Path
import numpy as np
def compareCounts(fileList, column):
df = pd.DataFrame()
for i in fileList:
path =Path(i)
name = path.stem
src = gpd.read_file(i)
#print(src)
src = src.loc[src['BU_CODE'].str.contains('BU0363')]
if 'BU_CODE' not in df.columns:
df['BU_CODE'] = src['BU_CODE']
df['{}'.format(name)] = src['{}'.format(column)]
#df = df.join(src.set_index('BU_CODE'), lsuffix='_l')
else:
df['{}'.format(name)] = src['{}'.format(column)]
print(df.head(3))
for col in df.columns:
if col!="ams_l1_totalpop_2018" and col!= "BU_CODE":
print(col)
df['{}'.format(col)] = df['{}'.format(col)].fillna(0).astype(np.int64)
df["dif_{}".format(col)] = df[col] - df["ams_l1_totalpop_2018"]
df["Error_{}".format(col)] = (df[col] - df["ams_l1_totalpop_2018"]) / df["ams_l1_totalpop_2018"].sum() * 100
df["Accuracy_{}".format(col)] = (df[col] / df["ams_l1_totalpop_2018"]) * 100
df["PrE_{}".format(col)] = (df[col] - df["ams_l1_totalpop_2018"]) * (df["ams_l1_totalpop_2018"] / df["ams_l1_totalpop_2018"].sum()) * 100#
#frame['dif_sum_2018_l1_totalpop'][frame.sum_2018_l1_totalpop == 0] = 0
#frame['Error_sum_2018_l1_totalpop'][frame.sum_2018_l1_totalpop == 0] = 0
return df
def BoxPlotCBS(directory, df, xLegend):
sns.boxplot(x=xLegend, y="Type", data=pd.melt(df, var_name='Type', value_name=xLegend), linewidth=1.0)
#plt.show()
plt.savefig(directory, dpi=300, bbox_inches='tight',)
plt.cla()
plt.close()
def BoxPlotCBS_NO(directory, df, xLegend):
for column in df.columns:
Q1 = df['{}'.format(column)].quantile(0.25)
Q3 = df['{}'.format(column)].quantile(0.75)
IQR = Q3 - Q1 #IQR is interquartile range.
filter = (df['{}'.format(column)] >= Q1 - 1.5 * IQR) & (df['{}'.format(column)] <= Q3 + 1.5 *IQR)
df =df.loc[filter]
sns.boxplot(x=xLegend, y="Type", data=pd.melt(df, var_name='Type', value_name=xLegend), linewidth=1.0)
#sns.swarmplot(x="Accuracy", y="Type", data=pd.melt(df, var_name='Type', value_name='Accuracy'), color=".15")
#plt.show()
plt.savefig(directory, dpi=300, bbox_inches='tight',)
plt.cla()
plt.close()
def BoxPlot(directory, fileList, column):
df = pd.DataFrame()
for i in fileList:
path =Path(i)
name = path.stem
src = gpd.read_file(i)
df['{}'.format(name)] = src['{}'.format(column)]
print(df.head(3))
sns.boxplot(x="Accuracy", y="Type", data=pd.melt(df, var_name='Type', value_name='Accuracy'), linewidth=1.0)
#plt.show()
plt.savefig(directory, dpi=300, bbox_inches='tight',)
plt.cla()
plt.close()
def BoxPlotNoOutliers(directory, fileList, column):
df = pd.DataFrame()
for i in fileList:
path =Path(i)
name = path.stem
src = gpd.read_file(i)
Q1 = src['{}'.format(column)].quantile(0.25)
Q3 = src['{}'.format(column)].quantile(0.75)
IQR = Q3 - Q1 #IQR is interquartile range.
filter = (src['{}'.format(column)] >= Q1 - 1.5 * IQR) & (src['{}'.format(column)] <= Q3 + 1.5 *IQR)
src= src.loc[filter]
df['{}'.format(name)] = src['{}'.format(column)]
print(df.head(3))
sns.boxplot(x="Accuracy", y="Type", data=pd.melt(df, var_name='Type', value_name='Accuracy'), linewidth=1.0)
#sns.swarmplot(x="Accuracy", y="Type", data=pd.melt(df, var_name='Type', value_name='Accuracy'), color=".15")
#plt.show()
plt.savefig(directory, dpi=300, bbox_inches='tight',)
plt.cla()
plt.close()
| 40.840426
| 150
| 0.586351
|
4a04631bad94205ac2535fc5b1fe99e7ebe67272
| 27
|
py
|
Python
|
intake/catalog/tests/catalog_search/example_packages/ep/__init__.py
|
raybellwaves/intake
|
8acc70d9adb19344ca15dee948315828b61e87b2
|
[
"BSD-2-Clause"
] | 578
|
2019-02-22T11:45:28.000Z
|
2022-03-31T08:32:22.000Z
|
intake/catalog/tests/catalog_search/example_packages/ep/__init__.py
|
raybellwaves/intake
|
8acc70d9adb19344ca15dee948315828b61e87b2
|
[
"BSD-2-Clause"
] | 336
|
2019-02-21T16:24:33.000Z
|
2022-03-30T09:23:53.000Z
|
intake/catalog/tests/catalog_search/example_packages/ep/__init__.py
|
raybellwaves/intake
|
8acc70d9adb19344ca15dee948315828b61e87b2
|
[
"BSD-2-Clause"
] | 99
|
2019-02-22T18:31:09.000Z
|
2022-03-22T03:27:54.000Z
|
class TestCatalog:
...
| 9
| 18
| 0.592593
|
4a0463a0b67c6a7beba7178a88654ec77a74b29a
| 7,866
|
py
|
Python
|
modules/dials/command_line/plot_Fo_vs_Fc.py
|
jorgediazjr/dials-dev20191018
|
77d66c719b5746f37af51ad593e2941ed6fbba17
|
[
"BSD-3-Clause"
] | null | null | null |
modules/dials/command_line/plot_Fo_vs_Fc.py
|
jorgediazjr/dials-dev20191018
|
77d66c719b5746f37af51ad593e2941ed6fbba17
|
[
"BSD-3-Clause"
] | null | null | null |
modules/dials/command_line/plot_Fo_vs_Fc.py
|
jorgediazjr/dials-dev20191018
|
77d66c719b5746f37af51ad593e2941ed6fbba17
|
[
"BSD-3-Clause"
] | 1
|
2020-02-04T15:39:06.000Z
|
2020-02-04T15:39:06.000Z
|
# LIBTBX_SET_DISPATCHER_NAME dials.plot_Fo_vs_Fc
"""
Create a plot of Fo vs Fc similar to that shown by Figure 6 in
https://doi.org/10.1107/S2059798317010348
Usage: dials.plot_Fo_vs_Fc hklin=refined.mtz
"""
from __future__ import absolute_import, division, print_function
import sys
from dials.util import Sorry, show_mail_on_error
from dials.util.options import OptionParser
# from libtbx.table_utils import simple_table
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
from matplotlib.ticker import MultipleLocator
from iotbx import mtz
from scitbx.array_family import flex
from scitbx.lstbx import normal_eqns, normal_eqns_solving
from math import sqrt
class HyperbolaFit(normal_eqns.non_linear_ls, normal_eqns.non_linear_ls_mixin):
"""Fit the function y = sqrt(x^2 + a^2) by non-linear regression. There is
just one parameter, a^2."""
# Initial guess for the value of a^2
a_sq0 = flex.double([1000])
def __init__(self, x, y):
super(HyperbolaFit, self).__init__(n_parameters=1)
self.x = x
self.y = y
self.n_data = len(self.x)
assert len(self.y) == self.n_data
self.restart()
def restart(self):
self.param = self.a_sq0.deep_copy()
self.old_param = None
def parameter_vector_norm(self):
return self.param.norm()
def build_up(self, objective_only=False):
a_sq = self.param[0]
model_y = flex.sqrt(flex.pow2(self.x) + a_sq)
residuals = model_y - self.y
self.reset()
if objective_only:
self.add_residuals(residuals, weights=None)
else:
dy_dp = 0.5 / model_y
jacobian = flex.double(flex.grid(self.n_data, 1))
jacobian.matrix_paste_column_in_place(dy_dp, 0)
self.add_equations(residuals, jacobian, weights=None)
def step_forward(self):
self.old_param = self.param.deep_copy()
self.param += self.step()
def step_backward(self):
assert self.old_param is not None
self.param, self.old_param = self.old_param, None
def goodness_of_fit(self):
"""Calculate various goodness of fit metrics (assumes fit has been
performed already)"""
a_sq = self.param[0]
model_y = flex.sqrt(flex.pow2(self.x) + a_sq)
resid = model_y - self.y
resid2 = flex.pow2(resid)
sse = flex.sum(resid2)
sst = flex.sum(flex.pow2(model_y - flex.mean(model_y)))
r_sq = 1 - sse / sst
rmse = sqrt(sse / (self.n_data - 1))
return {"SSE": sse, "R-square": r_sq, "RMSE": rmse}
class Script(object):
"""A class for running the script."""
def __init__(self):
"""Initialise the script."""
from libtbx.phil import parse
# The phil scope
phil_scope = parse(
"""
hklin = None
.type = path
.help = "MTZ file (containing observed and calculated structure "
"factors)"
Fo = F
.type = str
.help = "MTZ column name for Fobs"
Fc = FC_ALL
.type = str
.help = "MTZ column name for Fcalc (FC_ALL from Refmac includes the "
"bulk solvent contribution)"
max_Fc = 300
.type = float
.help = "Set plot limits to display data up to this value of Fc"
plot_filename = Fo_vs_Fc.pdf
.type = str
.help = "Filename for plot"
fit_hyperbola = True
.type = bool
.help = "Calculate and show the fit of a hyperbolic function given by "
"|Fo|^2 = |Fc|^2 + |Fe|^2, where |Fe| describes the error term "
"containing information about dynamic scattering and other "
"effects"
show_y_eq_x = True
.type = bool
.help = "Plot y=x as a dashed line"
""",
process_includes=True,
)
# The script usage
usage = "usage: dials.plot_Fo_vs_Fc hklin=refined.mtz"
# Create the parser
self.parser = OptionParser(usage=usage, phil=phil_scope, epilog=__doc__)
self.model_fit = None
return
def _extract_data_from_mtz(self):
try:
m = mtz.object(self.params.hklin)
except RuntimeError:
raise Sorry("Could not read {0}".format(self.params.hklin))
mad = m.as_miller_arrays_dict()
mad = {k[-1]: v for (k, v) in mad.items()}
fobs = mad.get(self.params.Fo)
fc = mad.get(self.params.Fc)
if [fobs, fc].count(None) > 0:
raise Sorry(
"Columns {0} not found in available labels: {1}".format(
", ".join([self.params.Fo, self.params.Fc]),
", ".join(m.column_labels()),
)
)
# Find common reflections (some fobs might be missing)
fobs, fc = fobs.common_sets(fc)
self.fobs = fobs.data()
self.fc = fc.amplitudes().data()
return
def _plot(self):
fig = plt.figure()
ax = fig.add_subplot(111)
minor_loc = MultipleLocator(10)
ax.yaxis.set_minor_locator(minor_loc)
ax.xaxis.set_minor_locator(minor_loc)
ax.grid(True, which="minor")
ax.set_axisbelow(True)
ax.set_aspect("equal")
ax.set_xlabel(r"$F_c$")
ax.set_ylabel(r"$F_o$")
ax.scatter(self.fc, self.fobs, s=1, c="indianred")
if self.params.max_Fc:
ax.set_xlim((0, self.params.max_Fc))
ax.set_ylim((0, self.params.max_Fc))
if self.params.show_y_eq_x:
ax.plot(ax.get_xlim(), ax.get_ylim(), ls="--", c="0.0", linewidth=0.8)
if self.model_fit:
x = flex.double_range(0, int(ax.get_xlim()[1]))
y = self.model_fit(x)
ax.plot(x, y, c="0.0", linewidth=0.8)
print("Saving plot to {0}".format(self.params.plot_filename))
plt.savefig(self.params.plot_filename)
def run(self):
"""Execute the script."""
# Parse the command line
self.params, _ = self.parser.parse_args(show_diff_phil=True)
if self.params.hklin is None:
self.parser.print_help()
sys.exit()
self._extract_data_from_mtz()
if self.params.fit_hyperbola:
# fit by NLLS Levenberg Marquardt algorithm
hyperbola_fit = HyperbolaFit(self.fc, self.fobs)
hyperbola_fit.restart()
normal_eqns_solving.levenberg_marquardt_iterations(
hyperbola_fit,
track_all=True,
gradient_threshold=1e-8,
step_threshold=1e-8,
tau=1e-4,
n_max_iterations=200,
)
intercept = hyperbola_fit.param[0]
print("Model fit described by the formula: |Fo|^2 = sqrt(|Fc|^2 + |Fe|^2)")
print("where |Fe| = {:.5f}\n".format(sqrt(intercept)))
print("Goodness of fit:")
gof = hyperbola_fit.goodness_of_fit()
print("SSE: {:.5g}".format(gof["SSE"]))
print("R-square: {:.5f}".format(gof["R-square"]))
print("RMSE: {:.2f}".format(gof["RMSE"]))
print()
# Set the model_fit function using the determined intercept
def hyperbola(x, c):
return flex.sqrt(flex.pow2(x) + c)
from functools import partial
self.model_fit = partial(hyperbola, c=intercept)
if self.params.plot_filename:
self._plot()
return
if __name__ == "__main__":
with show_mail_on_error():
script = Script()
script.run()
| 31.338645
| 88
| 0.569794
|
4a04641d89ce30c0b1fa07bb4746e2cb1afe1765
| 173,392
|
py
|
Python
|
electroncash/wallet.py
|
damascene/Electron-Cash-SLP
|
3819046ecbd585aa8f5fac1c58550046dd027266
|
[
"MIT"
] | null | null | null |
electroncash/wallet.py
|
damascene/Electron-Cash-SLP
|
3819046ecbd585aa8f5fac1c58550046dd027266
|
[
"MIT"
] | null | null | null |
electroncash/wallet.py
|
damascene/Electron-Cash-SLP
|
3819046ecbd585aa8f5fac1c58550046dd027266
|
[
"MIT"
] | null | null | null |
# Electrum - lightweight Bitcoin client
# Copyright (C) 2015 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# Wallet classes:
# - ImportedAddressWallet: imported address, no keystore
# - ImportedPrivkeyWallet: imported private keys, keystore
# - Standard_Wallet: one keystore, P2PKH
# - Multisig_Wallet: several keystores, P2SH
import copy
import errno
import json
import itertools
import os
import queue
import random
import re
import threading
import time
from collections import defaultdict, namedtuple
from enum import Enum, auto
from functools import partial
from typing import Set, Tuple, Union
from .i18n import ngettext
from .util import (NotEnoughFunds, NotEnoughFundsSlp, NotEnoughUnfrozenFundsSlp, ExcessiveFee, PrintError,
UserCancelled, profiler, format_satoshis, format_time, finalization_print_error, to_string,
TimeoutException, is_verbose)
from .address import Address, Script, ScriptOutput, PublicKey, OpCodes
from .bitcoin import *
from .version import *
from .keystore import load_keystore, Hardware_KeyStore, Imported_KeyStore, BIP32_KeyStore, xpubkey_to_address
from . import networks
from . import keystore
from .storage import multisig_type, WalletStorage
from . import transaction
from .transaction import Transaction, InputValueMissing
from .plugins import run_hook
from . import bitcoin
from . import coinchooser
from .synchronizer import Synchronizer
from .verifier import SPV, SPVDelegate
from . import schnorr
from . import ecc_fast
from .blockchain import NULL_HASH_HEX
from . import paymentrequest
from .paymentrequest import PR_PAID, PR_UNPAID, PR_UNKNOWN, PR_EXPIRED
from .paymentrequest import InvoiceStore
from .contacts import Contacts
from . import cashacct
from .slp import SlpMessage, SlpParsingError, SlpUnsupportedSlpTokenType, SlpNoMintingBatonFound, OpreturnError
from . import slp_validator_0x01, slp_validator_0x01_nft1
from .slp_graph_search import slp_gs_mgr
def _(message): return message
TX_STATUS = [
_('Unconfirmed parent'),
_('Low fee'),
_('Unconfirmed'),
_('Not Verified'),
]
del _
from .i18n import _
DEFAULT_CONFIRMED_ONLY = False
def relayfee(network):
RELAY_FEE = 5000
MAX_RELAY_FEE = 50000
f = network.relay_fee if network and network.relay_fee else RELAY_FEE
return min(f, MAX_RELAY_FEE)
def dust_threshold(network):
# Change < dust threshold is added to the tx fee
#return 182 * 3 * relayfee(network) / 1000 # original Electrum logic
#return 1 # <-- was this value until late Sept. 2018
return 546 # hard-coded Bitcoin Cash dust threshold. Was changed to this as of Sept. 2018
def sweep_preparations(privkeys, network, imax=100):
class InputsMaxxed(Exception):
pass
def append_utxos_to_inputs(inputs, pubkey, txin_type):
if txin_type == 'p2pkh':
address = Address.from_pubkey(pubkey)
else:
address = PublicKey.from_pubkey(pubkey)
sh = address.to_scripthash_hex()
u = network.synchronous_get(('blockchain.scripthash.listunspent', [sh]))
for item in u:
if len(inputs) >= imax:
raise InputsMaxxed()
item['address'] = address
item['type'] = txin_type
item['prevout_hash'] = item['tx_hash']
item['prevout_n'] = item['tx_pos']
item['pubkeys'] = [pubkey]
item['x_pubkeys'] = [pubkey]
item['signatures'] = [None]
item['num_sig'] = 1
inputs.append(item)
def find_utxos_for_privkey(txin_type, privkey, compressed):
pubkey = bitcoin.public_key_from_private_key(privkey, compressed)
append_utxos_to_inputs(inputs, pubkey, txin_type)
keypairs[pubkey] = privkey, compressed
inputs = []
keypairs = {}
try:
for sec in privkeys:
txin_type, privkey, compressed = bitcoin.deserialize_privkey(sec)
find_utxos_for_privkey(txin_type, privkey, compressed)
# do other lookups to increase support coverage
if is_minikey(sec):
# minikeys don't have a compressed byte
# we lookup both compressed and uncompressed pubkeys
find_utxos_for_privkey(txin_type, privkey, not compressed)
elif txin_type == 'p2pkh':
# WIF serialization does not distinguish p2pkh and p2pk
# we also search for pay-to-pubkey outputs
find_utxos_for_privkey('p2pk', privkey, compressed)
elif txin_type == 'p2sh':
raise ValueError(_("The specified WIF key '{}' is a p2sh WIF key. These key types cannot be swept.").format(sec))
except InputsMaxxed:
pass
if not inputs:
raise ValueError(_('No inputs found. (Note that inputs need to be confirmed)'))
return inputs, keypairs
def sweep(privkeys, network, config, recipient, fee=None, imax=100, sign_schnorr=False):
inputs, keypairs = sweep_preparations(privkeys, network, imax)
total = sum(i.get('value') for i in inputs)
if fee is None:
outputs = [(TYPE_ADDRESS, recipient, total)]
tx = Transaction.from_io(inputs, outputs, sign_schnorr=sign_schnorr)
fee = config.estimate_fee(tx.estimated_size())
if total - fee < 0:
raise NotEnoughFunds(_('Not enough funds on address.') + '\nTotal: %d satoshis\nFee: %d'%(total, fee))
if total - fee < dust_threshold(network):
raise NotEnoughFunds(_('Not enough funds on address.') + '\nTotal: %d satoshis\nFee: %d\nDust Threshold: %d'%(total, fee, dust_threshold(network)))
outputs = [(TYPE_ADDRESS, recipient, total - fee)]
locktime = network.get_local_height()
tx = Transaction.from_io(inputs, outputs, locktime=locktime, sign_schnorr=sign_schnorr)
tx.BIP_LI01_sort()
tx.sign(keypairs)
return tx
class Abstract_Wallet(PrintError, SPVDelegate):
"""
Wallet classes are created to handle various address generation methods.
Completion states (watching-only, single account, no seed, etc) are handled inside classes.
"""
max_change_outputs = 3
def __init__(self, storage):
self.electrum_version = PACKAGE_VERSION
self.pre_release_tag = PRE_RELEASE_TAG
self.storage = storage
self.thread = None # this is used by the qt main_window to store a QThread. We just make sure it's always defined as an attribute here.
self.network = None
# verifier (SPV) and synchronizer are started in start_threads
self.synchronizer = None
self.verifier = None
# CashAccounts subsystem. Its network-dependent layer is started in
# start_threads. Note: object instantiation should be lightweight here.
# self.cashacct.load() is called later in this function to load data.
self.cashacct = cashacct.CashAcct(self)
finalization_print_error(self.cashacct) # debug object lifecycle
# slp graph databases for token type 1 and NFT1
self.slp_graph_0x01, self.slp_graph_0x01_nft = None, None
self.weak_window = None # Some of the GUI classes, such as the Qt ElectrumWindow, use this to refer back to themselves. This should always be a weakref.ref (Weak.ref), or None
# Removes defunct entries from self.pruned_txo asynchronously
self.pruned_txo_cleaner_thread = None
# Cache of Address -> (c,u,x) balance. This cache is used by
# get_addr_balance to significantly speed it up (it is called a lot).
# Cache entries are invalidated when tx's are seen involving this
# address (address history chages). Entries to this cache are added
# only inside get_addr_balance.
# Note that this data structure is touched by the network and GUI
# thread concurrently without the use of locks, because Python GIL
# allows us to get away with such things. As such do not iterate over
# this dict, but simply add/remove items to/from it in 1-liners (which
# Python's GIL makes thread-safe implicitly).
self._addr_bal_cache = {}
# We keep a set of the wallet and receiving addresses so that is_mine()
# checks are O(logN) rather than O(N). This creates/resets that cache.
self.invalidate_address_set_cache()
self.gap_limit_for_change = 20 # constant
# saved fields
self.use_change = storage.get('use_change', True)
self.multiple_change = storage.get('multiple_change', False)
self.labels = storage.get('labels', {})
# Frozen addresses
frozen_addresses = storage.get('frozen_addresses',[])
self.frozen_addresses = set(Address.from_string(addr)
for addr in frozen_addresses)
# Frozen coins (UTXOs) -- note that we have 2 independent levels of "freezing": address-level and coin-level.
# The two types of freezing are flagged independently of each other and 'spendable' is defined as a coin that satisfies
# BOTH levels of freezing.
self.frozen_coins = set(storage.get('frozen_coins', []))
self.frozen_coins_tmp = set() # in-memory only
# address -> list(txid, height)
history = storage.get('addr_history',{})
self._history = self.to_Address_dict(history)
# there is a difference between wallet.up_to_date and interface.is_up_to_date()
# interface.is_up_to_date() returns true when all requests have been answered and processed
# wallet.up_to_date is true when the wallet is synchronized (stronger requirement)
self.up_to_date = False
# The only lock. We used to have two here. That was more technical debt
# without much purpose. 1 lock is sufficient. In particular data
# structures that are touched by the network thread as well as the GUI
# (such as self.transactions, history, etc) need to be synchronized
# using this mutex.
self.lock = threading.RLock()
# load requests
requests = self.storage.get('payment_requests', {})
for key, req in requests.items():
req['address'] = Address.from_string(key)
self.receive_requests = {req['address']: req
for req in requests.values()}
# Transactions pending verification. A map from tx hash to transaction
# height. Access is contended so a lock is needed. Client code should
# use get_unverified_tx to get a thread-safe copy of this dict.
self.unverified_tx = defaultdict(int)
# Verified transactions. Each value is a (height, timestamp, block_pos) tuple. Access with self.lock.
self.verified_tx = storage.get('verified_tx3', {})
# save wallet type the first time
if self.storage.get('wallet_type') is None:
self.storage.put('wallet_type', self.wallet_type)
# invoices and contacts
self.invoices = InvoiceStore(self.storage)
self.contacts = Contacts(self.storage)
# cashacct is started in start_threads, but it needs to have relevant
# data here, before the below calls happen
self.cashacct.load()
# Now, finally, after object is constructed -- we can do this
self.load_keystore_wrapper()
self.load_addresses()
self.load_transactions()
self.build_reverse_history()
self.check_history()
# Print debug message on finalization
finalization_print_error(self, "[{}/{}] finalized".format(type(self).__name__, self.diagnostic_name()))
@property
def is_slp(self):
''' Note that the various Slp_* classes explicitly write to storage
to set the proper wallet_type on construction unconditionally, so
this should always be valid for SLP wallets. '''
return "slp_" in self.storage.get('wallet_type', '')
@classmethod
def to_Address_dict(cls, d):
'''Convert a dict of strings to a dict of Adddress objects.'''
return {Address.from_string(text): value for text, value in d.items()}
@classmethod
def from_Address_dict(cls, d):
'''Convert a dict of Address objects to a dict of strings.'''
return {addr.to_storage_string(): value
for addr, value in d.items()}
def diagnostic_name(self):
return self.basename()
def __str__(self):
return self.basename()
def get_master_public_key(self):
return None
def load_keystore_wrapper(self):
""" Loads the keystore, but also tries to preserve derivation(s). Older
Electron Cash versions would not save the derivation for all keystore
types. So this function ensures:
1. That on first run, we store the keystore_derivations to top-level
storage (which is preserved always).
2. On subsequent runs we try and load the keystore_derivations from
storage and restore them if the individual keystore.derivation data
items were lost (because user loaded wallet with older Electron
Cash).
This function is provided to allow users to switch between old and new
EC versions. In the future if we deprecate the wallet format, or if
enough time has passed, this function may be removed and the simple
self.load_keystore() may be used instead. """
self.load_keystore()
if not hasattr(self, 'get_keystores'):
return
from .keystore import Deterministic_KeyStore, Old_KeyStore
keystores = self.get_keystores()
keystore_derivations = self.storage.get('keystore_derivations', [])
if len(keystore_derivations) != len(keystores):
keystore_derivations = [None] * len(keystores)
updated, updated_ks, updated_st = False, False, False
for i, keystore in enumerate(keystores):
if i == 0 and isinstance(keystore, Deterministic_KeyStore) and not keystore.seed_type:
# Attempt to update keystore.seed_type
if isinstance(keystore, Old_KeyStore):
keystore.seed_type = 'old'
updated_st = True
else:
# attempt to restore the seed_type based on wallet saved "seed_type"
typ = self.storage.get('seed_type')
if typ in ('standard', 'electrum'):
keystore.seed_type = 'electrum'
updated_st = True
elif typ == 'bip39':
keystore.seed_type = 'bip39'
updated_st = True
saved_der = keystore_derivations[i]
der = (keystore.has_derivation() and keystore.derivation) or None
if der != saved_der:
if der:
# keystore had a derivation, but top-level storage did not
# (this branch is typically taken on first run after
# restoring from seed or creating a new wallet)
keystore_derivations[i] = saved_der = der
updated = True
elif saved_der:
# we had a derivation but keystore did not. This branch is
# taken if the user has loaded this wallet with an older
# version of Electron Cash. Attempt to restore their
# derivation item in keystore.
keystore.derivation = der # write to keystore
updated_ks = True # tell it to re-save
if updated:
self.print_error("Updated keystore_derivations")
self.storage.put('keystore_derivations', keystore_derivations)
if updated_ks or updated_st:
if updated_ks:
self.print_error("Updated keystore (lost derivations restored)")
if updated_st:
self.print_error("Updated keystore (lost seed_type restored)")
self.save_keystore()
if any((updated, updated_ks, updated_st)):
self.storage.write()
@profiler
def load_transactions(self):
txi = self.storage.get('txi', {})
self.txi = {tx_hash: self.to_Address_dict(value)
for tx_hash, value in txi.items()
# skip empty entries to save memory and disk space
if value}
txo = self.storage.get('txo', {})
self.txo = {tx_hash: self.to_Address_dict(value)
for tx_hash, value in txo.items()
# skip empty entries to save memory and disk space
if value}
self.tx_fees = self.storage.get('tx_fees', {})
self.pruned_txo = self.storage.get('pruned_txo', {})
self.pruned_txo_values = set(self.pruned_txo.values())
tx_list = self.storage.get('transactions', {})
self.transactions = {}
for tx_hash, raw in tx_list.items():
tx = Transaction(raw)
self.transactions[tx_hash] = tx
if not self.txi.get(tx_hash) and not self.txo.get(tx_hash) and (tx_hash not in self.pruned_txo_values):
self.print_error("removing unreferenced tx", tx_hash)
self.transactions.pop(tx_hash)
self.cashacct.remove_transaction_hook(tx_hash)
self.slpv1_validity = self.storage.get('slpv1_validity', {})
self.token_types = self.storage.get('token_types', {})
self.tx_tokinfo = self.storage.get('tx_tokinfo', {})
# load up slp_txo as defaultdict-of-defaultdict-of-dicts
self._slp_txo = defaultdict(lambda: defaultdict(dict))
for addr, addrdict in self.to_Address_dict(self.storage.get('slp_txo',{})).items():
for txid, txdict in addrdict.items():
# need to do this iteration since json stores int keys as decimal strings.
self._slp_txo[addr][txid] = {int(idx):d for idx,d in txdict.items()}
ok = self.storage.get('slp_data_version', False)
if ok != 3:
self.rebuild_slp()
@profiler
def save_transactions(self, write=False):
with self.lock:
tx = {}
for k,v in self.transactions.items():
tx[k] = str(v)
self.storage.put('transactions', tx)
txi = {tx_hash: self.from_Address_dict(value)
for tx_hash, value in self.txi.items()
# skip empty entries to save memory and disk space
if value}
txo = {tx_hash: self.from_Address_dict(value)
for tx_hash, value in self.txo.items()
# skip empty entries to save memory and disk space
if value}
self.storage.put('txi', txi)
self.storage.put('txo', txo)
self.storage.put('tx_fees', self.tx_fees)
self.storage.put('pruned_txo', self.pruned_txo)
history = self.from_Address_dict(self._history)
self.storage.put('addr_history', history)
### SLP stuff
self.storage.put('slpv1_validity', self.slpv1_validity)
self.storage.put('token_types', self.token_types)
self.storage.put('slp_txo', self.from_Address_dict(self._slp_txo))
self.storage.put('tx_tokinfo', self.tx_tokinfo)
self.storage.put('slp_data_version', 3)
if write:
self.storage.write()
def activate_slp(self):
# This gets called in two situations:
# - Upon wallet startup, it checks config to see if SLP should be enabled.
# - During wallet operation, on a network reconnect, to "wake up" the validator -- According to JSCramer this is required. TODO: Investigate why that is
with self.lock:
for tx_hash, tti in self.tx_tokinfo.items():
# Fire up validation on unvalidated txes
try:
tx = self.transactions[tx_hash]
self.slp_check_validation(tx_hash, tx)
except KeyError:
continue
_add_token_hex_re = re.compile('^[a-f0-9]{64}$')
def add_token_type(self, token_id, entry, check_validation=True):
if not isinstance(token_id, str) or not self._add_token_hex_re.match(token_id):
# Paranoia: we enforce canonical hex string as lowercase to avoid
# problems with the same token-id being added as upper or lowercase
# by client code. This is because token_id becomes a dictionary key
# in various places and it not being identical would create chaos.
raise ValueError('token_id must be a lowercase hex string of exactly 64 characters!')
with self.lock:
self.token_types[token_id] = dict(entry)
self.storage.put('token_types', self.token_types)
for tx_hash, tti in self.tx_tokinfo.items():
# Fire up validation on unvalidated txes of matching token_id
try:
if tti['token_id'] == token_id and check_validation:
tx = self.transactions[tx_hash]
self.slp_check_validation(tx_hash, tx)
except KeyError: # This catches the case where tx_tokinfo was set to {}
continue
def add_token_safe(self, token_class: str, token_id: str, token_name: str,
decimals_divisibility: int,
*, error_callback=None, allow_overwrite=False,
write_storage=True) -> bool:
''' This code was refactored from main_window.py to allow other
subsystems (eg CLI/RPC, other platforms, etc) to add tokens.
This function does some minimal sanity checks and returns True
on success or False on failure. The optional error_callback
is called on False return. The callback takes a single translated string
argument which is an error message (suitable for display to the user).
On success (True) return, this method ends up calling
self.add_token_type(), and also will end up saving the changes to
wallet storage if write_storage=True (the default).
This function is thread-safe. '''
token_name = token_name.strip()
token_id = token_id.strip().lower()
# Check for duplication error
d = self.token_types.get(token_id)
group_id = d.get('group_id', None) if d else None
if d is not None and not allow_overwrite:
if error_callback:
error_callback(_('Token with this hash id already exists'))
return False
for tid, d in self.token_types.copy().items(): # <-- must take a snapshot-copy here since we aren't holding locks and other threads may modify this dict as we iterate
if d['name'] == token_name and tid != token_id:
token_name = token_name + "-" + token_id[:3]
break
#Hash id validation
gothex = self._add_token_hex_re.match(token_id)
if not gothex:
if error_callback:
error_callback(_('Invalid token_id hash'))
return False
#token name validation
# if len(token_name) < 1 or len(token_name) > 20:
# if error_callback:
# error_callback(_('Token name should be 1-20 characters'))
# return False
new_entry = {
'class' : token_class,
'name' : token_name,
'decimals' : decimals_divisibility,
}
if token_class == "SLP65":
if group_id is None:
new_entry['group_id'] = "?"
else:
new_entry['group_id'] = group_id
self.add_token_type(token_id, new_entry)
self.save_transactions(bool(write_storage))
return True
def add_token_from_genesis_tx(self, tx_or_raw, *, error_callback=None, allow_overwrite=True) -> SlpMessage:
''' Returns None on failure, optionally calling error_callback
with a translated UI-suitable error message. Returns a valid
SlpMessage object on success. In exceptional circumstances (garbage
inputs), may raise.
Note that unlike the other add_token_* functions, this version defaults
to allow_overwrite = True.'''
tx = tx_or_raw
if not isinstance(tx, Transaction):
tx = Transaction(tx)
def fail(msg):
if error_callback:
error_callback(msg)
return None
token_id = tx.txid()
try:
slpMsg = SlpMessage.parseSlpOutputScript(tx.outputs()[0][1])
except SlpUnsupportedSlpTokenType as e:
return fail(_("Unsupported SLP token version/type - %r.")%(e.args[0],))
except SlpInvalidOutputMessage as e:
return fail(_("This transaction does not contain a valid SLP message.\nReason: %r.")%(e.args,))
if slpMsg.transaction_type != 'GENESIS':
return fail(_("This is an SLP transaction, however it is not a genesis transaction."))
token_name = slpMsg.op_return_fields['ticker'].decode('utf-8') or slpMsg.op_return_fields['token_name'].decode('utf-8')
decimals = slpMsg.op_return_fields['decimals']
token_class = 'SLP%d' % (slpMsg.token_type,)
if self.add_token_safe(token_class, token_id, token_name, decimals, error_callback=fail, allow_overwrite=allow_overwrite):
return slpMsg
else:
return None
def save_verified_tx(self, write=False):
with self.lock:
self.storage.put('verified_tx3', self.verified_tx)
self.cashacct.save()
if write:
self.storage.write()
def clear_history(self):
with self.lock:
self.txi = {}
self.txo = {}
self.tx_fees = {}
self.pruned_txo = {}
self.pruned_txo_values = set()
self.save_transactions()
self._addr_bal_cache = {}
self._history = {}
self.tx_addr_hist = defaultdict(set)
self.cashacct.on_clear_history()
@profiler
def build_reverse_history(self):
self.tx_addr_hist = defaultdict(set)
for addr, hist in self._history.items():
for tx_hash, h in hist:
self.tx_addr_hist[tx_hash].add(addr)
@profiler
def check_history(self):
save = False
my_addrs = [addr for addr in self._history if self.is_mine(addr)]
for addr in set(self._history) - set(my_addrs):
self._history.pop(addr)
save = True
for addr in my_addrs:
hist = self._history[addr]
for tx_hash, tx_height in hist:
if tx_hash in self.pruned_txo_values or self.txi.get(tx_hash) or self.txo.get(tx_hash):
continue
tx = self.transactions.get(tx_hash)
if tx is not None:
self.add_transaction(tx_hash, tx)
save = True
if save:
self.save_transactions()
self.cashacct.save()
def basename(self):
return os.path.basename(self.storage.path)
def save_addresses(self):
addr_dict = {
'receiving': [addr.to_storage_string()
for addr in self.receiving_addresses],
'change': [addr.to_storage_string()
for addr in self.change_addresses],
}
self.storage.put('addresses', addr_dict)
def load_addresses(self):
d = self.storage.get('addresses', {})
if not isinstance(d, dict):
d = {}
self.receiving_addresses = Address.from_strings(d.get('receiving', []))
self.change_addresses = Address.from_strings(d.get('change', []))
def synchronize(self):
pass
def is_deterministic(self):
return self.keystore.is_deterministic()
def set_up_to_date(self, up_to_date):
with self.lock:
self.up_to_date = up_to_date
if up_to_date:
self.save_addresses()
self.save_transactions()
# if the verifier is also up to date, persist that too;
# otherwise it will persist its results when it finishes
if self.verifier and self.verifier.is_up_to_date():
self.save_verified_tx()
self.storage.write()
def is_up_to_date(self):
with self.lock: return self.up_to_date
def is_fully_settled_down(self):
''' Returns True iff the wallet is up to date and its synchronizer
and verifier aren't busy doing work, and its pruned_txo_values list
is currently empty. This is used as a final check by the Qt GUI
to decide if it should do a final refresh of all tabs in some cases.'''
with self.lock:
ret = self.up_to_date
if ret and self.verifier:
ret = self.verifier.is_up_to_date()
if ret and self.synchronizer:
ret = self.synchronizer.is_up_to_date()
ret = ret and not self.pruned_txo_values
return bool(ret)
def set_label(self, name, text=None, save=True):
with self.lock:
if isinstance(name, Address):
name = name.to_storage_string()
changed = False
old_text = self.labels.get(name)
if text:
text = text.replace("\n", " ")
if old_text != text:
self.labels[name] = text
changed = True
else:
if old_text:
self.labels.pop(name)
changed = True
if changed:
run_hook('set_label', self, name, text)
if save:
self.save_labels()
return changed
def save_labels(self):
self.storage.put('labels', self.labels)
def invalidate_address_set_cache(self):
"""This should be called from functions that add/remove addresses
from the wallet to ensure the address set caches are empty, in
particular from ImportedWallets which may add/delete addresses
thus the length check in is_mine() may not be accurate.
Deterministic wallets can neglect to call this function since their
address sets only grow and never shrink and thus the length check
of is_mine below is sufficient."""
self._recv_address_set_cached, self._change_address_set_cached = frozenset(), frozenset()
def is_mine(self, address):
"""Note this method assumes that the entire address set is
composed of self.get_change_addresses() + self.get_receiving_addresses().
In subclasses, if that is not the case -- REIMPLEMENT this method!"""
assert not isinstance(address, str)
# assumption here is get_receiving_addresses and get_change_addresses
# are cheap constant-time operations returning a list reference.
# If that is not the case -- reimplement this function.
ra, ca = self.get_receiving_addresses(), self.get_change_addresses()
# Detect if sets changed (addresses added/removed).
# Note the functions that add/remove addresses should invalidate this
# cache using invalidate_address_set_cache() above.
if len(ra) != len(self._recv_address_set_cached):
# re-create cache if lengths don't match
self._recv_address_set_cached = frozenset(ra)
if len(ca) != len(self._change_address_set_cached):
# re-create cache if lengths don't match
self._change_address_set_cached = frozenset(ca)
# Do a 2 x O(logN) lookup using sets rather than 2 x O(N) lookups
# if we were to use the address lists (this was the previous way).
# For small wallets it doesn't matter -- but for wallets with 5k or 10k
# addresses, it starts to add up siince is_mine() is called frequently
# especially while downloading address history.
return (address in self._recv_address_set_cached
or address in self._change_address_set_cached)
def is_change(self, address):
assert not isinstance(address, str)
ca = self.get_change_addresses()
if len(ca) != len(self._change_address_set_cached):
# re-create cache if lengths don't match
self._change_address_set_cached = frozenset(ca)
return address in self._change_address_set_cached
def get_address_index(self, address):
try:
return False, self.receiving_addresses.index(address)
except ValueError:
pass
try:
return True, self.change_addresses.index(address)
except ValueError:
pass
assert not isinstance(address, str)
raise Exception("Address {} not found".format(address))
def export_private_key(self, address, password):
""" extended WIF format """
if self.is_watching_only():
return []
index = self.get_address_index(address)
pk, compressed = self.keystore.get_private_key(index, password)
return bitcoin.serialize_privkey(pk, compressed, self.txin_type)
def get_public_keys(self, address):
sequence = self.get_address_index(address)
return self.get_pubkeys(*sequence)
def add_unverified_tx(self, tx_hash, tx_height):
with self.lock:
if tx_height == 0 and tx_hash in self.verified_tx:
self.verified_tx.pop(tx_hash)
if self.verifier:
self.verifier.merkle_roots.pop(tx_hash, None)
# tx will be verified only if height > 0
if tx_hash not in self.verified_tx:
self.unverified_tx[tx_hash] = tx_height
self.cashacct.add_unverified_tx_hook(tx_hash, tx_height)
def add_verified_tx(self, tx_hash, info, header):
# Remove from the unverified map and add to the verified map and
with self.lock:
self.unverified_tx.pop(tx_hash, None)
self.verified_tx[tx_hash] = info # (tx_height, timestamp, pos)
height, conf, timestamp = self.get_tx_height(tx_hash)
self.cashacct.add_verified_tx_hook(tx_hash, info, header)
self.network.trigger_callback('verified2', self, tx_hash, height, conf, timestamp)
def verification_failed(self, tx_hash, reason):
''' TODO: Notify gui of this if it keeps happening, try a different
server, rate-limited retries, etc '''
self.cashacct.verification_failed_hook(tx_hash, reason)
def get_unverified_txs(self):
'''Returns a map from tx hash to transaction height'''
with self.lock:
return self.unverified_tx.copy()
def get_unverified_tx_pending_count(self):
''' Returns the number of unverified tx's that are confirmed and are
still in process and should be verified soon.'''
with self.lock:
return len([1 for height in self.unverified_tx.values() if height > 0])
def undo_verifications(self, blockchain, height):
'''Used by the verifier when a reorg has happened'''
txs = set()
with self.lock:
for tx_hash, item in list(self.verified_tx.items()):
tx_height, timestamp, pos = item
if tx_height >= height:
header = blockchain.read_header(tx_height)
# fixme: use block hash, not timestamp
if not header or header.get('timestamp') != timestamp:
self.verified_tx.pop(tx_hash, None)
txs.add(tx_hash)
if txs: self.cashacct.undo_verifications_hook(txs)
if txs:
self._addr_bal_cache = {} # this is probably not necessary -- as the receive_history_callback will invalidate bad cache items -- but just to be paranoid we clear the whole balance cache on reorg anyway as a safety measure
return txs
def get_local_height(self):
""" return last known height if we are offline """
return self.network.get_local_height() if self.network else self.storage.get('stored_height', 0)
def get_tx_height(self, tx_hash):
""" return the height and timestamp of a verified transaction. """
with self.lock:
if tx_hash in self.verified_tx:
height, timestamp, pos = self.verified_tx[tx_hash]
conf = max(self.get_local_height() - height + 1, 0)
return height, conf, timestamp
elif tx_hash in self.unverified_tx:
height = self.unverified_tx[tx_hash]
return height, 0, 0
else:
return 0, 0, 0
def get_tx_block_hash(self, tx_hash):
''' Only works for tx's in wallet, for which we know the height. '''
height, ign, ign2 = self.get_tx_height(tx_hash)
return self.get_block_hash(height)
def get_block_hash(self, height):
'''Convenience method equivalent to Blockchain.get_height(), except our
version returns None instead of NULL_HASH_HEX on 'not found' header. '''
ret = None
if self.network and height is not None and height >= 0 and height <= self.get_local_height():
bchain = self.network.blockchain()
if bchain:
ret = bchain.get_hash(height)
if ret == NULL_HASH_HEX:
# if hash was NULL (all zeroes), prefer to return None
ret = None
return ret
def get_txpos(self, tx_hash):
"return position, even if the tx is unverified"
with self.lock:
if tx_hash in self.verified_tx:
height, timestamp, pos = self.verified_tx[tx_hash]
return height, pos
elif tx_hash in self.unverified_tx:
height = self.unverified_tx[tx_hash]
return (height, 0) if height > 0 else ((1e9 - height), 0)
else:
return (1e9+1, 0)
def is_found(self):
return any(value for value in self._history.values())
def get_num_tx(self, address):
""" return number of transactions where address is involved """
return len(self.get_address_history(address))
def get_tx_delta(self, tx_hash, address):
assert isinstance(address, Address)
"effect of tx on address"
# pruned
if tx_hash in self.pruned_txo_values:
return None
delta = 0
# substract the value of coins sent from address
d = self.txi.get(tx_hash, {}).get(address, [])
for n, v in d:
delta -= v
# add the value of the coins received at address
d = self.txo.get(tx_hash, {}).get(address, [])
for n, v, cb in d:
delta += v
return delta
WalletDelta = namedtuple("WalletDelta", "is_relevant, is_mine, v, fee")
WalletDelta2 = namedtuple("WalletDelta2", WalletDelta._fields + ("spends_coins_mine",))
def get_wallet_delta(self, tx) -> WalletDelta:
return self._get_wallet_delta(tx, ver=1)
def _get_wallet_delta(self, tx, *, ver=1) -> Union[WalletDelta, WalletDelta2]:
""" Effect of tx on wallet """
assert ver in (1, 2)
is_relevant = False
is_mine = False
is_pruned = False
is_partial = False
v_in = v_out = v_out_mine = 0
spends_coins_mine = list()
for item in tx.inputs():
addr = item['address']
if self.is_mine(addr):
is_mine = True
is_relevant = True
prevout_hash = item['prevout_hash']
prevout_n = item['prevout_n']
d = self.txo.get(prevout_hash, {}).get(addr, [])
for n, v, cb in d:
if n == prevout_n:
value = v
if ver == 2:
spends_coins_mine.append(f'{prevout_hash}:{prevout_n}')
break
else:
value = None
if value is None:
is_pruned = True
else:
v_in += value
else:
is_partial = True
if not is_mine:
is_partial = False
for _type, addr, value in tx.outputs():
v_out += value
if self.is_mine(addr):
v_out_mine += value
is_relevant = True
if is_pruned:
# some inputs are mine:
fee = None
if is_mine:
v = v_out_mine - v_out
else:
# no input is mine
v = v_out_mine
else:
v = v_out_mine - v_in
if is_partial:
# some inputs are mine, but not all
fee = None
else:
# all inputs are mine
fee = v_in - v_out
if not is_mine:
fee = None
if ver == 1:
return self.WalletDelta(is_relevant, is_mine, v, fee)
return self.WalletDelta2(is_relevant, is_mine, v, fee, spends_coins_mine)
TxInfo = namedtuple("TxInfo", "tx_hash, status, label, can_broadcast, amount, fee, height, conf, timestamp, exp_n")
class StatusEnum(Enum):
Unconfirmed = auto()
NotVerified = auto()
Confirmed = auto()
Signed = auto()
Unsigned = auto()
PartiallySigned = auto()
TxInfo2 = namedtuple("TxInfo2", TxInfo._fields + ("status_enum",))
def get_tx_info(self, tx) -> TxInfo:
""" Return information for a transaction """
return self._get_tx_info(tx, self.get_wallet_delta(tx), ver=1)
def get_tx_extended_info(self, tx) -> Tuple[WalletDelta2, TxInfo2]:
""" Get extended information for a transaction, combined into 1 call (for performance) """
delta2 = self._get_wallet_delta(tx, ver=2)
info2 = self._get_tx_info(tx, delta2, ver=2)
return (delta2, info2)
def _get_tx_info(self, tx, delta, *, ver=1) -> Union[TxInfo, TxInfo2]:
""" get_tx_info implementation """
assert ver in (1, 2)
if isinstance(delta, self.WalletDelta):
is_relevant, is_mine, v, fee = delta
else:
is_relevant, is_mine, v, fee, __ = delta
exp_n = None
can_broadcast = False
label = ''
height = conf = timestamp = None
status_enum = None
tx_hash = tx.txid()
if tx.is_complete():
if tx_hash in self.transactions:
label = self.get_label(tx_hash)
height, conf, timestamp = self.get_tx_height(tx_hash)
if height > 0:
if conf:
status = ngettext("{conf} confirmation", "{conf} confirmations", conf).format(conf=conf)
status_enum = self.StatusEnum.Confirmed
else:
status = _('Not verified')
status_enum = self.StatusEnum.NotVerified
else:
status = _('Unconfirmed')
status_enum = self.StatusEnum.Unconfirmed
if fee is None:
fee = self.tx_fees.get(tx_hash)
if fee and self.network and self.network.config.has_fee_estimates():
# NB: this branch will not be taken as has_fee_estimates()
# will always return false since we disabled querying
# the fee histogram as it's useless for BCH anyway.
size = tx.estimated_size()
fee_per_kb = fee * 1000 / size
exp_n = self.network.config.reverse_dynfee(fee_per_kb)
else:
status = _("Signed")
status_enum = self.StatusEnum.Signed
can_broadcast = self.network is not None
else:
s, r = tx.signature_count()
if s == 0:
status = _("Unsigned")
status_enum = self.StatusEnum.Unsigned
else:
status =_('Partially signed') + ' (%d/%d)'%(s,r)
status_enum = self.StatusEnum.PartiallySigned
if is_relevant:
if is_mine:
if fee is not None:
amount = v + fee
else:
amount = v
else:
amount = v
else:
amount = None
if ver == 1:
return self.TxInfo(tx_hash, status, label, can_broadcast, amount, fee, height, conf, timestamp, exp_n)
assert status_enum is not None
return self.TxInfo2(tx_hash, status, label, can_broadcast, amount, fee, height, conf, timestamp, exp_n,
status_enum)
def get_addr_io(self, address):
h = self.get_address_history(address)
received = {}
sent = {}
for tx_hash, height in h:
l = self.txo.get(tx_hash, {}).get(address, [])
for n, v, is_cb in l:
received[tx_hash + ':%d'%n] = (height, v, is_cb)
for tx_hash, height in h:
l = self.txi.get(tx_hash, {}).get(address, [])
for txi, v in l:
sent[txi] = height
return received, sent
def get_slp_token_info(self, tokenid):
with self.lock:
return self.tx_tokinfo[tokenid]
def get_slp_token_baton(self, slpTokenId, cache=True):
with self.lock:
slp_txos = copy.deepcopy(self._slp_txo)
# look for a minting baton
for addr, addrdict in slp_txos.items():
for txid, txdict in addrdict.items():
for idx, txo in txdict.items():
if txo['qty'] == 'MINT_BATON' and txo['token_id'] == slpTokenId:
try:
coins = self.get_slp_utxos(slpTokenId, domain = [addr], exclude_frozen = False, confirmed_only = False, slp_include_baton=True)
with self.lock:
val = self.tx_tokinfo[txid]['validity']
baton_utxo = [ utxo for utxo in coins if utxo['prevout_hash'] == txid and utxo['prevout_n'] == idx and val == 1][0]
except IndexError:
continue
return baton_utxo
raise SlpNoMintingBatonFound()
# This method is updated for SLP to prevent tokens from being spent
# in normal txn or txns with token_id other than the one specified
def get_addr_utxo(self, address, *, exclude_slp = True):
coins, spent = self.get_addr_io(address)
# removes spent coins
for txi in spent:
coins.pop(txi)
# cleanup/detect if the 'frozen coin' was spent and remove it from the frozen coin set
self.frozen_coins.discard(txi)
self.frozen_coins_tmp.discard(txi)
"""
SLP -- removes ALL SLP UTXOs that are either unrelated, or unvalidated
"""
if exclude_slp:
with self.lock:
addrdict = self._slp_txo.get(address,{})
for txid, txdict in addrdict.items():
for idx, txo in txdict.items():
coins.pop(txid + ":" + str(idx), None)
out = {}
for txo, v in coins.items():
tx_height, value, is_cb = v
prevout_hash, prevout_n = txo.split(':')
x = {
'address':address,
'value':value,
'prevout_n':int(prevout_n),
'prevout_hash':prevout_hash,
'height':tx_height,
'coinbase':is_cb,
'is_frozen_coin': txo in self.frozen_coins or txo in self.frozen_coins_tmp
}
out[txo] = x
return out
""" SLP -- keeps ONLY SLP UTXOs that are either unrelated, or unvalidated """
def get_slp_addr_utxo(self, address, slpTokenId, slp_include_invalid=False, slp_include_baton=False, ):
with self.lock:
coins, spent = self.get_addr_io(address)
addrdict = copy.deepcopy(self._slp_txo.get(address,{}))
# removes spent coins
for txi in spent:
coins.pop(txi)
# cleanup/detect if the 'frozen coin' was spent and remove it from the frozen coin set
self.frozen_coins.discard(txi)
self.frozen_coins_tmp.discard(txi)
coins_to_pop = []
for coin in coins.items():
if coin != None:
txid = coin[0].split(":")[0]
idx = coin[0].split(":")[1]
try:
slp_txo = addrdict[txid][int(idx)]
with self.lock:
slp_tx_info = self.tx_tokinfo[txid]
# handle special burning modes
if slp_txo['token_id'] == slpTokenId:
# allow inclusion and possible burning of a valid minting baton
if slp_include_baton and slp_txo['qty'] == "MINT_BATON" and slp_tx_info['validity'] == 1:
continue
# allow inclusion and possible burning of invalid SLP txos
if slp_include_invalid and slp_tx_info['validity'] != 0:
continue
# normal remove any txos that are not valid for this token ID
if slp_txo['token_id'] != slpTokenId or slp_tx_info['validity'] != 1 or slp_txo['qty'] == "MINT_BATON":
coins_to_pop.append(coin[0])
except KeyError:
coins_to_pop.append(coin[0])
for c in coins_to_pop:
coins.pop(c, None)
out = {}
for txo, v in coins.items():
tx_height, value, is_cb = v
prevout_hash, prevout_n = txo.split(':')
with self.lock:
tok_info = self.tx_tokinfo[prevout_hash]
x = {
'address': address,
'value': value,
'prevout_n': int(prevout_n),
'prevout_hash': prevout_hash,
'height': tx_height,
'coinbase': is_cb,
'is_frozen_coin': txo in self.frozen_coins or txo in self.frozen_coins_tmp,
'token_value': addrdict[prevout_hash][int(prevout_n)]['qty'],
'token_id_hex': tok_info['token_id'],
'token_type': tok_info['type'],
'token_validation_state': tok_info['validity']
}
out[txo] = x
return out
# return the total amount ever received by an address
def get_addr_received(self, address):
received, sent = self.get_addr_io(address)
return sum([v for height, v, is_cb in received.values()])
def get_addr_balance(self, address, exclude_frozen_coins=False):
''' Returns the balance of a bitcoin address as a tuple of:
(confirmed_matured, unconfirmed, unmatured)
Note that 'exclude_frozen_coins = True' only checks for coin-level
freezing, not address-level. '''
assert isinstance(address, Address)
mempoolHeight = self.get_local_height() + 1
if not exclude_frozen_coins: # we do not use the cache when excluding frozen coins as frozen status is a dynamic quantity that can change at any time in the UI
cached = self._addr_bal_cache.get(address)
if cached is not None:
return cached
received, sent = self.get_addr_io(address)
c = u = x = 0
had_cb = False
for txo, (tx_height, v, is_cb) in received.items():
if exclude_frozen_coins and (txo in self.frozen_coins or txo in self.frozen_coins_tmp):
continue
had_cb = had_cb or is_cb # remember if this address has ever seen a coinbase txo
if is_cb and tx_height + COINBASE_MATURITY > mempoolHeight:
x += v
elif tx_height > 0:
c += v
else:
u += v
if txo in sent:
if sent[txo] > 0:
c -= v
else:
u -= v
result = c, u, x
if not exclude_frozen_coins and not had_cb:
# Cache the results.
# Cache needs to be invalidated if a transaction is added to/
# removed from addr history. (See self._addr_bal_cache calls
# related to this littered throughout this file).
#
# Note that as a performance tweak we don't ever cache balances for
# addresses involving coinbase coins. The rationale being as
# follows: Caching of balances of the coinbase addresses involves
# a dynamic quantity: maturity of the coin (which considers the
# ever-changing block height).
#
# There wasn't a good place in this codebase to signal the maturity
# happening (and thus invalidate the cache entry for the exact
# address that holds the coinbase coin in question when a new
# block is found that matures a coinbase coin).
#
# In light of that fact, a possible approach would be to invalidate
# this entire cache when a new block arrives (this is what Electrum
# does). However, for Electron Cash with its focus on many addresses
# for future privacy features such as integrated CashShuffle --
# being notified in the wallet and invalidating the *entire* cache
# whenever a new block arrives (which is the exact time you do
# the most GUI refreshing and calling of this function) seems a bit
# heavy-handed, just for sake of the (relatively rare, for the
# average user) coinbase-carrying addresses.
#
# It's not a huge performance hit for the coinbase addresses to
# simply not cache their results, and have this function recompute
# their balance on each call, when you consider that as a
# consequence of this policy, all the other addresses that are
# non-coinbase can benefit from a cache that stays valid for longer
# than 1 block (so long as their balances haven't changed).
self._addr_bal_cache[address] = result
return result
def get_spendable_coins(self, domain, config, isInvoice = False):
confirmed_only = config.get('confirmed_only', DEFAULT_CONFIRMED_ONLY)
# if (isInvoice):
# confirmed_only = True
return self.get_utxos(domain=domain, exclude_frozen=True, mature=True, confirmed_only=confirmed_only)
def get_slp_spendable_coins(self, slpTokenId, domain, config, isInvoice = False):
confirmed_only = config.get('confirmed_only', False)
# if (isInvoice):
# confirmed_only = True
return self.get_slp_utxos(slpTokenId, domain=domain, exclude_frozen=True, confirmed_only=confirmed_only)
def get_slp_coins(self, slpTokenId, domain, config, isInvoice = False):
confirmed_only = config.get('confirmed_only', False)
# if (isInvoice):
# confirmed_only = True
return self.get_slp_utxos(slpTokenId, domain=domain, exclude_frozen=False, confirmed_only=confirmed_only)
def get_slp_token_balance(self, slpTokenId, config):
valid_token_bal = 0
unvalidated_token_bal = 0
invalid_token_bal = 0
unfrozen_valid_token_bal = 0
slp_coins = self.get_slp_coins(slpTokenId, None, config)
for coin in slp_coins:
txid = coin['prevout_hash']
validity = self.tx_tokinfo[txid]['validity']
if validity == 1: # Valid DAG
valid_token_bal += coin['token_value']
if not coin['is_frozen_coin'] and coin['address'] not in self.frozen_addresses:
unfrozen_valid_token_bal += coin['token_value']
elif validity > 1: # Invalid DAG (2=bad slpmessage, 3=inputs lack enough tokens / missing mint baton, 4=change token_type or bad NFT parent)
invalid_token_bal += coin['token_value']
elif validity == 0: # Unknown DAG status (should be in processing queue)
unvalidated_token_bal += coin['token_value']
return (valid_token_bal, unvalidated_token_bal, invalid_token_bal, unfrozen_valid_token_bal, valid_token_bal - unfrozen_valid_token_bal)
def get_utxos(self, domain = None, exclude_frozen = False, mature = False, confirmed_only = False,
*, addr_set_out = None, exclude_slp = True):
'''Note that exclude_frozen = True checks for BOTH address-level and
coin-level frozen status.
Optional kw-only arg `addr_set_out` specifies a set in which to add all
addresses encountered in the utxos returned. '''
with self.lock:
mempoolHeight = self.get_local_height() + 1
coins = []
if domain is None:
domain = self.get_addresses()
if exclude_frozen:
domain = set(domain) - self.frozen_addresses
for addr in domain:
utxos = self.get_addr_utxo(addr, exclude_slp=exclude_slp)
len_before = len(coins)
for x in utxos.values():
if exclude_frozen and x['is_frozen_coin']:
continue
if confirmed_only and x['height'] <= 0:
continue
# A note about maturity: Previous versions of Electrum
# and Electron Cash were off by one. Maturity is
# calculated based off mempool height (chain tip height + 1).
# See bitcoind consensus/tx_verify.cpp Consensus::CheckTxInputs
# and also txmempool.cpp CTxMemPool::removeForReorg.
if mature and x['coinbase'] and mempoolHeight - x['height'] < COINBASE_MATURITY:
continue
coins.append(x)
if addr_set_out is not None and len(coins) > len_before:
# add this address to the address set if it has results
addr_set_out.add(addr)
return coins
def get_slp_utxos(self, slpTokenId, domain = None, exclude_frozen = False, confirmed_only = False, slp_include_invalid=False, slp_include_baton=False,
*, addr_set_out = None):
'''Note that exclude_frozen = True checks for BOTH address-level and
coin-level frozen status.
Optional kw-only arg `addr_set_out` specifies a set in which to add all
addresses encountered in the utxos returned. '''
with self.lock:
coins = []
if domain is None:
domain = self.get_addresses()
if exclude_frozen:
domain = set(domain) - self.frozen_addresses
for addr in domain:
utxos = self.get_slp_addr_utxo(addr, slpTokenId, slp_include_invalid=slp_include_invalid, slp_include_baton=slp_include_baton)
len_before = len(coins)
for x in utxos.values():
if exclude_frozen and x['is_frozen_coin']:
continue
if confirmed_only and x['height'] <= 0:
continue
coins.append(x)
continue
if addr_set_out is not None and len(coins) > len_before:
# add this address to the address set if it has results
addr_set_out.add(addr)
return coins
def dummy_address(self):
return self.get_receiving_addresses()[0]
def get_addresses(self):
return self.get_receiving_addresses() + self.get_change_addresses()
def get_frozen_balance(self):
if not self.frozen_coins and not self.frozen_coins_tmp:
# performance short-cut -- get the balance of the frozen address set only IFF we don't have any frozen coins
return self.get_balance(self.frozen_addresses)
# otherwise, do this more costly calculation...
cc_no_f, uu_no_f, xx_no_f = self.get_balance(None, exclude_frozen_coins = True, exclude_frozen_addresses = True)
cc_all, uu_all, xx_all = self.get_balance(None, exclude_frozen_coins = False, exclude_frozen_addresses = False)
return (cc_all-cc_no_f), (uu_all-uu_no_f), (xx_all-xx_no_f)
def get_slp_locked_balance(self):
bch = 0
with self.lock:
for addr, addrdict in self._slp_txo.items():
_, spent = self.get_addr_io(addr)
for txid, txdict in addrdict.items():
for idx, txo in txdict.items():
if (txid + ":" + str(idx)) in spent:
continue
try:
for i, a, _ in self.txo[txid][addr]:
if i == idx:
bch+=a
except KeyError:
pass
return bch
def get_balance(self, domain=None, exclude_frozen_coins=False, exclude_frozen_addresses=False):
if domain is None:
domain = self.get_addresses()
if exclude_frozen_addresses:
domain = set(domain) - self.frozen_addresses
cc = uu = xx = 0
for addr in domain:
c, u, x = self.get_addr_balance(addr, exclude_frozen_coins)
cc += c
uu += u
xx += x
return cc, uu, xx
def get_address_history(self, address):
assert isinstance(address, Address)
return self._history.get(address, [])
def _clean_pruned_txo_thread(self):
''' Runs in the thread self.pruned_txo_cleaner_thread which is only
active if self.network. Cleans the self.pruned_txo dict and the
self.pruned_txo_values set of spends that are not relevant to the
wallet. The processing below is needed because as of 9/16/2019, Electron
Cash temporarily puts all spends that pass through add_transaction and
have an unparseable address (txi['address'] is None) into the dict
self.pruned_txo. This is necessary for handling tx's with esoteric p2sh
scriptSigs and detecting balance changes properly for txins
containing such scriptSigs. See #895. '''
def deser(ser):
prevout_hash, prevout_n = ser.split(':')
prevout_n = int(prevout_n)
return prevout_hash, prevout_n
def mkser(prevout_hash, prevout_n):
return f'{prevout_hash}:{prevout_n}'
def rm(ser, pruned_too=True, *, tup = None):
h, n = tup or deser(ser) # tup arg is for performance when caller already knows the info (avoid a redundant .split on ':')
s = txid_n[h]
s.discard(n)
if not s:
txid_n.pop(h, None)
if pruned_too:
with self.lock:
tx_hash = self.pruned_txo.pop(ser, None)
self.pruned_txo_values.discard(tx_hash)
def add(ser):
prevout_hash, prevout_n = deser(ser)
txid_n[prevout_hash].add(prevout_n)
def keep_running():
return bool(self.network and self.pruned_txo_cleaner_thread is me)
def can_do_work():
return bool(txid_n and self.is_up_to_date())
debug = False # set this to true here to get more verbose output
me = threading.current_thread()
q = me.q
me.txid_n = txid_n = defaultdict(set) # dict of prevout_hash -> set of prevout_n (int)
last = time.time()
try:
self.print_error(f"{me.name}: thread started")
with self.lock:
# Setup -- grab whatever was already in pruned_txo at thread
# start
for ser in self.pruned_txo:
h, n = deser(ser)
txid_n[h].add(n)
while keep_running():
try:
ser = q.get(timeout=5.0 if can_do_work() else 20.0)
if ser is None:
# quit thread
return
if ser.startswith('r_'):
# remove requested
rm(ser[2:], False)
else:
# ser was added
add(ser)
del ser
except queue.Empty:
pass
if not can_do_work():
continue
t0 = time.time()
if t0 - last < 1.0: # run no more often than once per second
continue
last = t0
defunct_ct = 0
for prevout_hash, s in txid_n.copy().items():
for prevout_n in s.copy():
ser = mkser(prevout_hash, prevout_n)
with self.lock:
defunct = ser not in self.pruned_txo
if defunct:
#self.print_error(f"{me.name}: skipping already-cleaned", ser)
rm(ser, False, tup=(prevout_hash, prevout_n))
defunct_ct += 1
continue
if defunct_ct and debug:
self.print_error(f"{me.name}: DEBUG", defunct_ct, "defunct txos removed in", time.time()-t0, "secs")
ct = 0
for prevout_hash, s in txid_n.copy().items():
try:
with self.lock:
tx = self.transactions.get(prevout_hash)
if tx is None:
tx = Transaction.tx_cache_get(prevout_hash)
if isinstance(tx, Transaction):
tx = Transaction(tx.raw) # take a copy
else:
if debug: self.print_error(f"{me.name}: DEBUG retrieving txid", prevout_hash, "...")
t1 = time.time()
tx = Transaction(self.network.synchronous_get(('blockchain.transaction.get', [prevout_hash])))
if debug: self.print_error(f"{me.name}: DEBUG network retrieve took", time.time()-t1, "secs")
# Paranoia; intended side effect of the below assert
# is to also deserialize the tx (by calling the slow
# .txid()) which ensures the tx from the server
# is not junk.
assert prevout_hash == tx.txid(), "txid mismatch"
Transaction.tx_cache_put(tx, prevout_hash) # will cache a copy
except Exception as e:
self.print_error(f"{me.name}: Error retrieving txid", prevout_hash, ":", repr(e))
if not keep_running(): # in case we got a network timeout *and* the wallet was closed
return
continue
if not keep_running():
return
for prevout_n in s.copy():
ser = mkser(prevout_hash, prevout_n)
try:
txo = tx.outputs()[prevout_n]
except IndexError:
self.print_error(f"{me.name}: ERROR -- could not find output", ser)
rm(ser, True, tup=(prevout_hash, prevout_n))
continue
_typ, addr, v = txo
rm_pruned_too = False
with self.lock:
mine = self.is_mine(addr)
if not mine and ser in self.pruned_txo:
ct += 1
rm_pruned_too = True
rm(ser, rm_pruned_too, tup=(prevout_hash, prevout_n))
if rm_pruned_too and debug:
self.print_error(f"{me.name}: DEBUG removed", ser)
if ct:
with self.lock:
# Save changes to storage -- this is cheap and doesn't
# actually write to file yet, just flags storage as
# 'dirty' for when wallet.storage.write() is called
# later.
self.storage.put('pruned_txo', self.pruned_txo)
self.print_error(f"{me.name}: removed", ct,
"(non-relevant) pruned_txo's in",
f'{time.time()-t0:3.2f}', "seconds")
except:
import traceback
self.print_error(f"{me.name}:", traceback.format_exc())
raise
finally:
self.print_error(f"{me.name}: thread exiting")
def add_transaction(self, tx_hash, tx):
if not tx.inputs():
# bad tx came in off the wire -- all 0's or something, see #987
self.print_error("add_transaction: WARNING a tx came in from the network with 0 inputs!"
" Bad server? Ignoring tx:", tx_hash)
return
is_coinbase = tx.inputs()[0]['type'] == 'coinbase'
with self.lock:
# HELPER FUNCTIONS
def add_to_self_txi(tx_hash, addr, ser, v):
''' addr must be 'is_mine' '''
d = self.txi.get(tx_hash)
if d is None:
self.txi[tx_hash] = d = {}
l = d.get(addr)
if l is None:
d[addr] = l = []
l.append((ser, v))
def find_in_self_txo(prevout_hash: str, prevout_n: int) -> tuple:
"""Returns a tuple of the (Address,value) for a given
prevout_hash:prevout_n, or (None, None) if not found. If valid
return, the Address object is found by scanning self.txo. The
lookup below is relatively fast in practice even on pathological
wallets."""
dd = self.txo.get(prevout_hash, {})
for addr2, item in dd.items():
for n, v, is_cb in item:
if n == prevout_n:
return addr2, v
return (None, None)
def txin_get_info(txi):
prevout_hash = txi['prevout_hash']
prevout_n = txi['prevout_n']
ser = f'{prevout_hash}:{prevout_n}'
return prevout_hash, prevout_n, ser
def put_pruned_txo(ser, tx_hash):
self.pruned_txo[ser] = tx_hash
self.pruned_txo_values.add(tx_hash)
t = self.pruned_txo_cleaner_thread
if t and t.q: t.q.put(ser)
def pop_pruned_txo(ser):
next_tx = self.pruned_txo.pop(ser, None)
if next_tx:
self.pruned_txo_values.discard(next_tx)
t = self.pruned_txo_cleaner_thread
if t and t.q: t.q.put('r_' + ser) # notify of removal
return next_tx
# /HELPER FUNCTIONS
# add inputs
self.txi[tx_hash] = d = {}
for txi in tx.inputs():
if txi['type'] == 'coinbase':
continue
addr = txi.get('address')
# find value from prev output
if self.is_mine(addr):
prevout_hash, prevout_n, ser = txin_get_info(txi)
dd = self.txo.get(prevout_hash, {})
for n, v, is_cb in dd.get(addr, []):
if n == prevout_n:
add_to_self_txi(tx_hash, addr, ser, v)
break
else:
# Coin's spend tx came in before its receive tx: flag
# the spend for when the receive tx will arrive into
# this function later.
put_pruned_txo(ser, tx_hash)
self._addr_bal_cache.pop(addr, None) # invalidate cache entry
del dd, prevout_hash, prevout_n, ser
elif addr is None:
# Unknown/unparsed address.. may be a strange p2sh scriptSig
# Try and find it in txout's if it's one of ours.
# See issue #895.
prevout_hash, prevout_n, ser = txin_get_info(txi)
# Find address in self.txo for this prevout_hash:prevout_n
addr2, v = find_in_self_txo(prevout_hash, prevout_n)
if addr2 is not None and self.is_mine(addr2):
add_to_self_txi(tx_hash, addr2, ser, v)
self._addr_bal_cache.pop(addr2, None) # invalidate cache entry
else:
# Not found in self.txo. It may still be one of ours
# however since tx's can come in out of order due to
# CTOR, etc, and self.txo may not have it yet. So we
# flag the spend now, and when the out-of-order prevout
# tx comes in later for this input (if it's indeed one
# of ours), the real address for this input will get
# picked up then in the "add outputs" section below in
# this function. At that point, self.txi will be
# properly updated to indicate the coin in question was
# spent via an add_to_self_txi call.
#
# If it's *not* one of ours, however, the below will
# grow pruned_txo with an irrelevant entry. However, the
# irrelevant entry will eventually be reaped and removed
# by the self.pruned_txo_cleaner_thread which runs
# periodically in the background.
put_pruned_txo(ser, tx_hash)
del addr2, v, prevout_hash, prevout_n, ser
# don't keep empty entries in self.txi
if not d:
self.txi.pop(tx_hash, None)
# add outputs
self.txo[tx_hash] = d = {}
op_return_ct = 0
deferred_cashacct_add = None
for n, txo in enumerate(tx.outputs()):
ser = tx_hash + ':%d'%n
_type, addr, v = txo
mine = False
if isinstance(addr, ScriptOutput):
if addr.is_opreturn():
op_return_ct += 1
if isinstance(addr, cashacct.ScriptOutput):
# auto-detect CashAccount registrations we see,
# and notify cashacct subsystem of that fact. But we
# can only do it after making sure it's the *only*
# OP_RETURN in the tx.
deferred_cashacct_add = (
lambda _tx_hash=tx_hash, _tx=tx, _n=n, _addr=addr:
self.cashacct.add_transaction_hook(_tx_hash, _tx, _n, _addr)
)
elif self.is_mine(addr):
# add coin to self.txo since it's mine.
mine = True
l = d.get(addr)
if l is None:
d[addr] = l = []
l.append((n, v, is_coinbase))
del l
self._addr_bal_cache.pop(addr, None) # invalidate cache entry
# give v to txi that spends me
next_tx = pop_pruned_txo(ser)
if next_tx is not None and mine:
add_to_self_txi(next_tx, addr, ser, v)
# don't keep empty entries in self.txo
if not d:
self.txo.pop(tx_hash, None)
# save
self.transactions[tx_hash] = tx
# Invoke the cashacct add hook (if defined) here at the end, with
# the lock held. We accept the cashacct.ScriptOutput only iff
# op_return_ct == 1 as per the Cash Accounts spec.
# See: https://gitlab.com/cash-accounts/lookup-server/blob/master/routes/parser.js#L253
if op_return_ct == 1 and deferred_cashacct_add:
deferred_cashacct_add()
### SLP: Handle incoming SLP transaction outputs here
self.handleSlpTransaction(tx_hash, tx)
"""
Callers are expected to take lock(s). We take no locks
"""
def handleSlpTransaction(self, tx_hash, tx):
txouts = tx.outputs()
try:
slpMsg = SlpMessage.parseSlpOutputScript(txouts[0][1])
except SlpUnsupportedSlpTokenType as e:
token_type = 'SLP%d'%(e.args[0],)
for i, (_type, addr, _) in enumerate(txouts):
if _type == TYPE_ADDRESS and self.is_mine(addr):
self._slp_txo[addr][tx_hash][i] = {
'type': token_type,
'qty': None,
'token_id': None,
}
return
except (SlpParsingError, IndexError, OpreturnError):
return
if slpMsg.transaction_type == 'SEND':
token_id_hex = slpMsg.op_return_fields['token_id_hex']
# truncate outputs list
amounts = slpMsg.op_return_fields['token_output'][:len(txouts)]
for i, qty in enumerate(amounts):
_type, addr, _ = txouts[i]
if _type == TYPE_ADDRESS and qty > 0 and self.is_mine(addr):
self._slp_txo[addr][tx_hash][i] = {
'type': 'SLP%d'%(slpMsg.token_type,),
'token_id': token_id_hex,
'qty': qty,
}
elif slpMsg.transaction_type == 'GENESIS':
token_id_hex = tx_hash
try:
_type, addr, _ = txouts[1]
if _type == TYPE_ADDRESS:
if slpMsg.op_return_fields['initial_token_mint_quantity'] > 0 and self.is_mine(addr):
self._slp_txo[addr][tx_hash][1] = {
'type': 'SLP%d'%(slpMsg.token_type,),
'token_id': token_id_hex,
'qty': slpMsg.op_return_fields['initial_token_mint_quantity'],
}
if slpMsg.op_return_fields['mint_baton_vout'] is not None:
i = slpMsg.op_return_fields['mint_baton_vout']
_type, addr, _ = txouts[i]
if _type == TYPE_ADDRESS:
self._slp_txo[addr][tx_hash][i] = {
'type': 'SLP%d'%(slpMsg.token_type,),
'token_id': token_id_hex,
'qty': 'MINT_BATON',
}
except IndexError: # if too few outputs (compared to mint_baton_vout)
pass
elif slpMsg.transaction_type == "MINT":
token_id_hex = slpMsg.op_return_fields['token_id_hex']
try:
_type, addr, _ = txouts[1]
if _type == TYPE_ADDRESS:
if slpMsg.op_return_fields['additional_token_quantity'] > 0 and self.is_mine(addr):
self._slp_txo[addr][tx_hash][1] = {
'type': 'SLP%d'%(slpMsg.token_type,),
'token_id': token_id_hex,
'qty': slpMsg.op_return_fields['additional_token_quantity'],
}
if slpMsg.op_return_fields['mint_baton_vout'] is not None:
i = slpMsg.op_return_fields['mint_baton_vout']
_type, addr, _ = txouts[i]
if _type == TYPE_ADDRESS:
self._slp_txo[addr][tx_hash][i] = {
'type': 'SLP%d'%(slpMsg.token_type,),
'token_id': token_id_hex,
'qty': 'MINT_BATON',
}
except IndexError: # if too few outputs (compared to mint_baton_vout)
pass
elif slpMsg.transaction_type == 'COMMIT':
# ignore COMMs, they aren't producing any tokens.
return
else:
raise RuntimeError(slpMsg.transaction_type)
# On receiving a new SEND, MINT, or GENESIS always add entry to token_types if wallet hasn't seen tokenId yet
if slpMsg.transaction_type in [ 'SEND', 'MINT', 'GENESIS' ]:
if slpMsg.transaction_type == 'GENESIS':
tokenid = tx_hash
else:
tokenid = slpMsg.op_return_fields['token_id_hex']
new_token = True
for k, v in self.tx_tokinfo.items():
try:
if v['token_id'] == tokenid:
new_token = False
except KeyError:
pass
if new_token and tokenid not in self.token_types:
tty = { 'class': 'SLP%d'%(slpMsg.token_type,),
'decimals': "?",
'name': 'unknown-' + tokenid[:6]
}
if slpMsg.token_type == 65:
tty['group_id'] = "?"
self.token_types[tokenid] = tty
# Always add entry to tx_tokinfo
tti = { 'type':'SLP%d'%(slpMsg.token_type,),
'transaction_type':slpMsg.transaction_type,
'token_id': token_id_hex,
'validity': 0,
}
self.tx_tokinfo[tx_hash] = tti
if self.is_slp: # Only start up validation if SLP enabled
self.slp_check_validation(tx_hash, tx)
def slp_check_validation(self, tx_hash, tx):
""" Callers are expected to take lock(s). We take no locks """
tti = self.tx_tokinfo[tx_hash]
try:
is_new = self.token_types[tti['token_id']]['decimals'] == '?'
except:
is_new = False
if tti['validity'] == 0 and tti['token_id'] in self.token_types and not is_new and tti['type'] in ['SLP1','SLP65','SLP129']:
def callback(job):
(txid,node), = job.nodes.items()
val = node.validity
tti['validity'] = val
if slp_gs_mgr.slp_validity_signal is not None:
slp_gs_mgr.slp_validity_signal.emit(txid, val)
if tti['type'] == 'SLP1':
job = self.slp_graph_0x01.make_job(tx, self, self.network,
debug=2 if is_verbose else 1, # set debug=2 here to see the verbose dag when running with -v
reset=False)
elif tti['type'] in ['SLP65', 'SLP129']:
job = self.slp_graph_0x01_nft.make_job(tx, self, self.network, nft_type=tti['type'],
debug=2 if is_verbose else 1, # set debug=2 here to see the verbose dag when running with -v
reset=False)
if job is not None:
job.add_callback(callback)
# This was commented out because it spammed the log so badly
# it impacted performance. SLP validation can create a *lot* of jobs!
#finalization_print_error(job, f"[{self.basename()}] Job for {tx_hash} type {tti['type']} finalized")
def rebuild_slp(self,):
"""Wipe away old SLP transaction data and rerun on the entire tx set.
"""
with self.lock:
self._slp_txo = defaultdict(lambda: defaultdict(dict))
self.tx_tokinfo = {}
for txid, tx in self.transactions.items():
self.handleSlpTransaction(txid, tx)
def remove_transaction(self, tx_hash):
with self.lock:
self.print_error("removing tx from history", tx_hash)
# Note that we don't actually remove the tx_hash from
# self.transactions, but instead rely on the unreferenced tx being
# removed the next time the wallet is loaded in self.load_transactions()
for ser, hh in list(self.pruned_txo.items()):
if hh == tx_hash:
self.pruned_txo.pop(ser)
self.pruned_txo_values.discard(hh)
# add tx to pruned_txo, and undo the txi addition
for next_tx, dd in self.txi.items():
for addr, l in list(dd.items()):
ll = l[:]
for item in ll:
ser, v = item
prev_hash, prev_n = ser.split(':')
if prev_hash == tx_hash:
self._addr_bal_cache.pop(addr, None) # invalidate cache entry
l.remove(item)
self.pruned_txo[ser] = next_tx
self.pruned_txo_values.add(next_tx)
if l == []:
dd.pop(addr)
else:
dd[addr] = l
# invalidate addr_bal_cache for outputs involving this tx
d = self.txo.get(tx_hash, {})
for addr in d:
self._addr_bal_cache.pop(addr, None) # invalidate cache entry
self.txi.pop(tx_hash, None)
self.txo.pop(tx_hash, None)
self.tx_fees.pop(tx_hash, None)
self.tx_tokinfo[tx_hash] = {}
for addr, addrdict in self._slp_txo.items():
if tx_hash in addrdict: addrdict[tx_hash] = {}
# do this with the lock held
self.cashacct.remove_transaction_hook(tx_hash)
def receive_tx_callback(self, tx_hash, tx, tx_height):
self.add_transaction(tx_hash, tx)
self.add_unverified_tx(tx_hash, tx_height)
def receive_history_callback(self, addr, hist, tx_fees):
with self.lock:
old_hist = self.get_address_history(addr)
for tx_hash, height in old_hist:
if (tx_hash, height) not in hist:
s = self.tx_addr_hist.get(tx_hash)
if s:
s.discard(addr)
if not s:
# if no address references this tx anymore, kill it
# from txi/txo dicts.
if s is not None:
# We won't keep empty sets around.
self.tx_addr_hist.pop(tx_hash)
# note this call doesn't actually remove the tx from
# storage, it merely removes it from the self.txi
# and self.txo dicts
self.remove_transaction(tx_hash)
self._addr_bal_cache.pop(addr, None) # unconditionally invalidate cache entry
self._history[addr] = hist
for tx_hash, tx_height in hist:
# add it in case it was previously unconfirmed
self.add_unverified_tx(tx_hash, tx_height)
# add reference in tx_addr_hist
self.tx_addr_hist[tx_hash].add(addr)
# if addr is new, we have to recompute txi and txo
tx = self.transactions.get(tx_hash)
if tx is not None and self.txi.get(tx_hash, {}).get(addr) is None and self.txo.get(tx_hash, {}).get(addr) is None:
self.add_transaction(tx_hash, tx)
# Store fees
self.tx_fees.update(tx_fees)
if self.network:
self.network.trigger_callback('on_history', self)
def get_slp_history(self, domain=None, validities_considered=(None,0,1)):
history = []
histories = self.get_slp_histories(domain=domain, validities_considered=validities_considered)
# Take separate token histories and flatten them, then sort them.
for token_id,t_history in histories.items():
for tx_hash, height, conf, timestamp, delta in t_history:
history.append((tx_hash, height, conf, timestamp, delta, token_id))
history.sort(key = lambda x: self.get_txpos(x[0]), reverse=True)
return history
def get_slp_histories(self, domain=None, validities_considered=(0,1)):
# Based on get_history.
# We return a dict of histories, one history per token_id.
# get domain
if domain is None:
domain = self.get_addresses()
#1. Big iteration to find all deltas and put them in the right place.
token_tx_deltas = defaultdict(lambda: defaultdict(int)) # defaultdict of defaultdicts of ints :)
for addr in domain:
h = self.get_address_history(addr)
with self.lock:
addrslptxo = self._slp_txo[addr]
for tx_hash, height in h:
if tx_hash in self.pruned_txo.values():
continue
tti = self.tx_tokinfo.get(tx_hash)
if tti and tti['validity'] in validities_considered:
txdict = addrslptxo.get(tx_hash,{})
for idx,d in txdict.items():
if isinstance(d['qty'],int):
token_tx_deltas[d['token_id']][tx_hash] += d['qty'] # received!
# scan over all txi's, trying to find if they were tokens, which tokens, and how much
# (note that non-SLP txes can spend (burn) SLP --- and SLP of tokenA can burn tokenB)
for n, _ in self.txi.get(tx_hash, {}).get(addr, ()):
prevtxid, prevout_str = n.rsplit(':',1)
tti = self.tx_tokinfo.get(prevtxid)
if not (tti and tti['validity'] in validities_considered):
continue
prevout = int(prevout_str)
d = addrslptxo.get(prevtxid,{}).get(prevout,{})
if isinstance(d.get('qty',None),int):
token_tx_deltas[d['token_id']][tx_hash] -= d['qty'] # received!
# 2. create history (no sorting needed since balances won't be computed)
histories = {}
for token_id, tx_deltas in token_tx_deltas.items():
history = histories[token_id] = []
for tx_hash in tx_deltas:
delta = tx_deltas[tx_hash]
height, conf, timestamp = self.get_tx_height(tx_hash)
history.append((tx_hash, height, conf, timestamp, delta))
# 3. At this point we could compute running balances, but let's not.
return histories
def add_tx_to_history(self, txid):
with self.lock:
for addr in itertools.chain(list(self.txi.get(txid, {}).keys()), list(self.txo.get(txid, {}).keys())):
cur_hist = self._history.get(addr, list())
if not any(True for x in cur_hist if x[0] == txid):
cur_hist.append((txid, 0))
self._history[addr] = cur_hist
TxHistory = namedtuple("TxHistory", "tx_hash, height, conf, timestamp, amount, balance")
def get_history(self, domain=None, *, reverse=False):
# get domain
if domain is None:
domain = self.get_addresses()
# 1. Get the history of each address in the domain, maintain the
# delta of a tx as the sum of its deltas on domain addresses
tx_deltas = defaultdict(int)
for addr in domain:
h = self.get_address_history(addr)
for tx_hash, height in h:
delta = self.get_tx_delta(tx_hash, addr)
if delta is None or tx_deltas[tx_hash] is None:
tx_deltas[tx_hash] = None
else:
tx_deltas[tx_hash] += delta
# 2. create sorted history
history = []
for tx_hash in tx_deltas:
delta = tx_deltas[tx_hash]
height, conf, timestamp = self.get_tx_height(tx_hash)
history.append((tx_hash, height, conf, timestamp, delta))
history.sort(key = lambda x: self.get_txpos(x[0]), reverse=True)
# 3. add balance
c, u, x = self.get_balance(domain)
balance = c + u + x
h2 = []
for tx_hash, height, conf, timestamp, delta in history:
h2.append(self.TxHistory(tx_hash, height, conf, timestamp, delta, balance))
if balance is None or delta is None:
balance = None
else:
balance -= delta
if not reverse:
h2.reverse()
return h2
def export_history(self, domain=None, from_timestamp=None, to_timestamp=None, fx=None,
show_addresses=False, decimal_point=8,
*, fee_calc_timeout=10.0, download_inputs=False,
progress_callback=None):
''' Export history. Used by RPC & GUI.
Arg notes:
- `fee_calc_timeout` is used when computing the fee (which is done
asynchronously in another thread) to limit the total amount of time in
seconds spent waiting for fee calculation. The timeout is a total time
allotment for this function call. (The reason the fee calc can take a
long time is for some pathological tx's, it is very slow to calculate
fee as it involves deserializing prevout_tx from the wallet, for each
input).
- `download_inputs`, if True, will allow for more accurate fee data to
be exported with the history by using the Transaction class input
fetcher to download *all* prevout_hash tx's for inputs (even for
inputs not in wallet). This feature requires self.network (ie, we need
to be online) otherwise it will behave as if download_inputs=False.
- `progress_callback`, if specified, is a callback which receives a
single float argument in the range [0.0,1.0] indicating how far along
the history export is going. This is intended for interop with GUI
code. Node the progress callback is not guaranteed to be called in the
context of the main thread, therefore GUI code should use appropriate
signals/slots to update the GUI with progress info.
Note on side effects: This function may update self.tx_fees. Rationale:
it will spend some time trying very hard to calculate accurate fees by
examining prevout_tx's (leveraging the fetch_input_data code in the
Transaction class). As such, it is worthwhile to cache the results in
self.tx_fees, which gets saved to wallet storage. This is not very
demanding on storage as even for very large wallets with huge histories,
tx_fees does not use more than a few hundred kb of space. '''
from .util import timestamp_to_datetime
# we save copies of tx's we deserialize to this temp dict because we do
# *not* want to deserialize tx's in wallet.transactoins since that
# wastes memory
local_tx_cache = {}
# some helpers for this function
t0 = time.time()
def time_remaining(): return max(fee_calc_timeout - (time.time()-t0), 0)
class MissingTx(RuntimeError):
''' Can happen in rare circumstances if wallet history is being
radically reorged by network thread while we are in this code. '''
def get_tx(tx_hash):
''' Try to get a tx from wallet, then from the Transaction class
cache if that fails. In either case it deserializes the copy and
puts the deserialized tx in local stack dict local_tx_cache. The
reason we don't deserialize the tx's from self.transactions is that
we do not want to keep deserialized tx's in memory. The
self.transactions dict should contain just raw tx's (not
deserialized). Deserialized tx's eat on the order of 10x the memory
because because of the Python lists, dict, etc they contain, per
instance. '''
tx = local_tx_cache.get(tx_hash)
if tx:
return tx
tx = Transaction.tx_cache_get(tx_hash)
if not tx:
tx = copy.deepcopy(self.transactions.get(tx_hash))
if tx:
tx.deserialize()
local_tx_cache[tx_hash] = tx
else:
raise MissingTx(f'txid {tx_hash} dropped out of wallet history while exporting')
return tx
def try_calc_fee(tx_hash):
''' Try to calc fee from cheapest to most expensive calculation.
Ultimately asks the transaction class to look at prevouts in wallet and uses
that scheme as a last (more CPU intensive) resort. '''
fee = self.tx_fees.get(tx_hash)
if fee is not None:
return fee
def do_get_fee(tx_hash):
tx = get_tx(tx_hash)
def try_get_fee(tx):
try: return tx.get_fee()
except InputValueMissing: pass
fee = try_get_fee(tx)
t_remain = time_remaining()
if fee is None and t_remain:
q = queue.Queue()
def done():
q.put(1)
tx.fetch_input_data(self, use_network=bool(download_inputs), done_callback=done)
try: q.get(timeout=t_remain)
except queue.Empty: pass
fee = try_get_fee(tx)
return fee
fee = do_get_fee(tx_hash)
if fee is not None:
self.tx_fees[tx_hash] = fee # save fee to wallet if we bothered to dl/calculate it.
return fee
def fmt_amt(v, is_diff):
if v is None:
return '--'
return format_satoshis(v, decimal_point=decimal_point,
is_diff=is_diff)
# grab bch history
h = self.get_history(domain, reverse=True)
out = []
# grab slp history
_slp_h = self.get_slp_history(domain=domain, validities_considered=(None,0,1,2,3,4))
def fmt_slp_amt(v, decimals):
if v is None:
return '--'
if decimals == "?":
decimals = 0
return format_satoshis(v, decimal_point=int(decimals), is_diff=True)
def get_token_info(token_id):
return self.token_types.get(token_id, {
'class': '?',
'decimals': 0,
'name': 'unknown'
})
slp_h = dict((tx_hash, { \
'value': fmt_slp_amt(delta, get_token_info(token_id)['decimals']), \
'token_id': token_id, \
'name': get_token_info(token_id)['name'] \
}) for tx_hash, _, _, _, delta, token_id in _slp_h)
def get_slp_tx(tx_hash):
if slp_h.get(tx_hash) is None:
return { 'value': '--', 'name': '--', 'token_id': '--' }
return slp_h.get(tx_hash)
n, l = 0, max(1, float(len(h)))
for tx_hash, height, conf, timestamp, value, balance in h:
if progress_callback:
progress_callback(n/l)
n += 1
timestamp_safe = timestamp
if timestamp is None:
timestamp_safe = time.time() # set it to "now" so below code doesn't explode.
if from_timestamp and timestamp_safe < from_timestamp:
continue
if to_timestamp and timestamp_safe >= to_timestamp:
continue
try:
fee = try_calc_fee(tx_hash)
except MissingTx as e:
self.print_error(str(e))
continue
slp_info = get_slp_tx(tx_hash)
item = {
'txid' : tx_hash,
'height' : height,
'confirmations' : conf,
'timestamp' : timestamp_safe,
'value' : fmt_amt(value, is_diff=True),
'fee' : fmt_amt(fee, is_diff=False),
'balance' : fmt_amt(balance, is_diff=False),
'slp_value' : slp_info['value'],
'slp_name' : slp_info['name'],
'slp_token_id' : slp_info['token_id']
}
if item['height'] > 0:
date_str = format_time(timestamp) if timestamp is not None else _("unverified")
else:
date_str = _("unconfirmed")
item['date'] = date_str
try:
# Defensive programming.. sanitize label.
# The below ensures strings are utf8-encodable. We do this
# as a paranoia measure.
item['label'] = self.get_label(tx_hash).encode(encoding='utf-8', errors='replace').decode(encoding='utf-8', errors='replace')
except UnicodeError:
self.print_error(f"Warning: could not export label for {tx_hash}, defaulting to ???")
item['label'] = "???"
if show_addresses:
tx = get_tx(tx_hash)
input_addresses = []
output_addresses = []
for x in tx.inputs():
if x['type'] == 'coinbase': continue
addr = x.get('address')
if addr == None: continue
input_addresses.append(addr.to_ui_string())
for _type, addr, v in tx.outputs():
output_addresses.append(addr.to_ui_string())
item['input_addresses'] = input_addresses
item['output_addresses'] = output_addresses
if fx is not None:
date = timestamp_to_datetime(timestamp_safe)
item['fiat_value'] = fx.historical_value_str(value, date)
item['fiat_balance'] = fx.historical_value_str(balance, date)
item['fiat_fee'] = fx.historical_value_str(fee, date)
out.append(item)
if progress_callback:
progress_callback(1.0) # indicate done, just in case client code expects a 1.0 in order to detect completion
return out
def get_label(self, tx_hash):
label = self.labels.get(tx_hash, '')
if not label:
label = self.get_default_label(tx_hash)
return label
def get_default_label(self, tx_hash):
if not self.txi.get(tx_hash):
d = self.txo.get(tx_hash, {})
labels = []
for addr in list(d.keys()): # use a copy to avoid possibility of dict changing during iteration, see #1328
label = self.labels.get(addr.to_storage_string())
if label:
labels.append(label)
return ', '.join(labels)
return ''
def get_tx_status(self, tx_hash, height, conf, timestamp):
if conf == 0:
tx = self.transactions.get(tx_hash)
if not tx:
return 3, 'unknown'
fee = self.tx_fees.get(tx_hash)
# we disable fee estimates in BCH for now.
#if fee and self.network and self.network.config.has_fee_estimates():
# size = len(tx.raw)/2
# low_fee = int(self.network.config.dynfee(0)*size/1000)
# is_lowfee = fee < low_fee * 0.5
#else:
# is_lowfee = False
# and instead if it's less than 1.0 sats/B we flag it as low_fee
try:
# NB len(tx.raw) is 2x the byte size as it's hex encoded.
is_lowfee = int(fee) / (int(len(tx.raw)) / 2.0) < 1.0 # if less than 1.0 sats/B, complain. otherwise don't.
except (TypeError, ValueError): # If for some reason fee was None or invalid, just pass on through.
is_lowfee = False
# /
if height < 0:
status = 0
elif height == 0 and is_lowfee:
status = 1
elif height == 0:
status = 2
else:
status = 3
else:
status = 3 + min(conf, 6)
time_str = format_time(timestamp) if timestamp else _("unknown")
status_str = _(TX_STATUS[status]) if status < 4 else time_str
return status, status_str
def relayfee(self):
return relayfee(self.network)
def dust_threshold(self):
return dust_threshold(self.network)
def check_sufficient_slp_balance(self, slpMessage, config):
if self.is_slp:
if slpMessage.transaction_type == 'SEND':
total_token_out = sum(slpMessage.op_return_fields['token_output'])
valid_token_balance, _, _, valid_unfrozen_token_balance, _ = self.get_slp_token_balance(slpMessage.op_return_fields['token_id_hex'], config)
if total_token_out > valid_token_balance:
raise NotEnoughFundsSlp()
elif total_token_out > valid_unfrozen_token_balance:
raise NotEnoughUnfrozenFundsSlp()
def make_unsigned_transaction(self, inputs, outputs, config, fixed_fee=None, change_addr=None, sign_schnorr=None, *, mandatory_coins=[]):
''' sign_schnorr flag controls whether to mark the tx as signing with
schnorr or not. Specify either a bool, or set the flag to 'None' to use
whatever the wallet is configured to use from the GUI '''
sign_schnorr = self.is_schnorr_enabled() if sign_schnorr is None else bool(sign_schnorr)
# check outputs
i_max = None
for i, o in enumerate(outputs):
_type, data, value = o
if value == '!':
if i_max is not None:
raise BaseException("More than one output set to spend max")
i_max = i
# Avoid index-out-of-range with inputs[0] below
if not inputs:
raise NotEnoughFunds()
if fixed_fee is None and config.fee_per_kb() is None:
raise BaseException('Dynamic fee estimates not available')
for item in inputs:
self.add_input_info(item)
for item in mandatory_coins:
self.add_input_info(item)
# change address
if change_addr:
change_addrs = [change_addr]
else:
addrs = self.get_change_addresses()[-self.gap_limit_for_change:]
if self.use_change and addrs:
# New change addresses are created only after a few
# confirmations. Select the unused addresses within the
# gap limit; if none take one at random
change_addrs = [addr for addr in addrs if
self.get_num_tx(addr) == 0]
if not change_addrs:
change_addrs = [random.choice(addrs)]
else:
change_addrs = [self.get_addresses()[0]]
assert all(isinstance(addr, Address) for addr in change_addrs)
# Fee estimator
if fixed_fee is None:
fee_estimator = config.estimate_fee
else:
fee_estimator = lambda size: fixed_fee
if i_max is None:
# Let the coin chooser select the coins to spend
max_change = self.max_change_outputs if self.multiple_change else 1
coin_chooser = coinchooser.CoinChooserPrivacy()
tx = coin_chooser.make_tx(inputs, outputs, change_addrs[:max_change],
fee_estimator, self.dust_threshold(), sign_schnorr=sign_schnorr,
mandatory_coins=mandatory_coins)
else:
inputs = mandatory_coins + inputs
sendable = sum(map(lambda x:x['value'], inputs))
_type, data, value = outputs[i_max]
outputs[i_max] = (_type, data, 0)
tx = Transaction.from_io(inputs, outputs, sign_schnorr=sign_schnorr)
fee = fee_estimator(tx.estimated_size())
amount = max(0, sendable - tx.output_value() - fee)
outputs[i_max] = (_type, data, amount)
tx = Transaction.from_io(inputs, outputs, sign_schnorr=sign_schnorr)
# If user tries to send too big of a fee (more than 50 sat/byte), stop them from shooting themselves in the foot
tx_in_bytes=tx.estimated_size()
fee_in_satoshis=tx.get_fee()
sats_per_byte=fee_in_satoshis/tx_in_bytes
if (sats_per_byte > 50):
raise ExcessiveFee()
return
# Sort the inputs and outputs deterministically
if not mandatory_coins:
tx.BIP_LI01_sort()
# Timelock tx to current height.
locktime = self.get_local_height()
if locktime == -1: # We have no local height data (no headers synced).
locktime = 0
tx.locktime = locktime
run_hook('make_unsigned_transaction', self, tx)
return tx
def make_unsigned_transaction_for_bitcoinfiles(self, inputs, outputs, config, fixed_fee=None, change_addr=None, sign_schnorr=None):
sign_schnorr = self.is_schnorr_enabled() if sign_schnorr is None else bool(sign_schnorr)
# check outputs
i_max = None
for i, o in enumerate(outputs):
_type, data, value = o
if value == '!':
if i_max is not None:
raise BaseException("More than one output set to spend max")
i_max = i
# Avoid index-out-of-range with inputs[0] below
if not inputs:
raise NotEnoughFunds()
if fixed_fee is None and config.fee_per_kb() is None:
raise BaseException('Dynamic fee estimates not available')
for item in inputs:
self.add_input_info_for_bitcoinfiles(item)
# change address
if change_addr:
change_addrs = [change_addr]
else:
addrs = self.get_change_addresses()[-self.gap_limit_for_change:]
if self.use_change and addrs:
# New change addresses are created only after a few
# confirmations. Select the unused addresses within the
# gap limit; if none take one at random
change_addrs = [addr for addr in addrs if
self.get_num_tx(addr) == 0]
if not change_addrs:
change_addrs = [random.choice(addrs)]
else:
change_addrs = [inputs[0]['address']]
assert all(isinstance(addr, Address) for addr in change_addrs)
# Fee estimator
if fixed_fee is None:
fee_estimator = config.estimate_fee
else:
fee_estimator = lambda size: fixed_fee
if i_max is None:
# Let the coin chooser select the coins to spend
max_change = self.max_change_outputs if self.multiple_change else 1
coin_chooser = coinchooser.CoinChooserPrivacy()
# determine if this transaction should utilize all available inputs
tx = coin_chooser.make_tx(inputs, outputs, change_addrs[:max_change],
fee_estimator, self.dust_threshold(), sign_schnorr=sign_schnorr)
else:
sendable = sum(map(lambda x:x['value'], inputs))
_type, data, value = outputs[i_max]
outputs[i_max] = (_type, data, 0)
tx = Transaction.from_io(inputs, outputs, sign_schnorr=sign_schnorr)
fee = fee_estimator(tx.estimated_size())
amount = max(0, sendable - tx.output_value() - fee)
outputs[i_max] = (_type, data, amount)
tx = Transaction.from_io(inputs, outputs, sign_schnorr=sign_schnorr)
# If user tries to send too big of a fee (more than 50 sat/byte), stop them from shooting themselves in the foot
tx_in_bytes=tx.estimated_size()
fee_in_satoshis=tx.get_fee()
sats_per_byte=fee_in_satoshis/tx_in_bytes
if (sats_per_byte > 50):
raise ExcessiveFee()
return
# Timelock tx to current height.
locktime = self.get_local_height()
if locktime == -1: # We have no local height data (no headers synced).
locktime = 0
tx.locktime = locktime
run_hook('make_unsigned_transaction', self, tx)
return tx
def mktx(self, outputs, password, config, fee=None, change_addr=None, domain=None, sign_schnorr=None):
coins = self.get_spendable_coins(domain, config)
tx = self.make_unsigned_transaction(coins, outputs, config, fee, change_addr, sign_schnorr=sign_schnorr)
self.sign_transaction(tx, password)
return tx
def is_frozen(self, addr):
""" Address-level frozen query. Note: this is set/unset independent of
'coin' level freezing. """
assert isinstance(addr, Address)
return addr in self.frozen_addresses
def is_frozen_coin(self, utxo: Union[str, dict, Set[str]]) -> Union[bool, Set[str]]:
""" 'coin' level frozen query. Note: this is set/unset independent of
address-level freezing.
`utxo` is a prevout:n string, or a dict as returned from get_utxos(),
in which case a bool is returned.
`utxo` may also be a set of prevout:n strings in which case a set is
returned which is the intersection of the internal frozen coin sets
and the `utxo` set. """
assert isinstance(utxo, (str, dict, set))
if isinstance(utxo, dict):
name = ("{}:{}".format(utxo['prevout_hash'], utxo['prevout_n']))
ret = name in self.frozen_coins or name in self.frozen_coins_tmp
if ret != utxo['is_frozen_coin']:
self.print_error("*** WARNING: utxo has stale is_frozen_coin flag", name)
utxo['is_frozen_coin'] = ret # update stale flag
return ret
elif isinstance(utxo, set):
# set is returned
return (self.frozen_coins | self.frozen_coins_tmp) & utxo
else:
return utxo in self.frozen_coins or utxo in self.frozen_coins_tmp
def set_frozen_state(self, addrs, freeze):
"""Set frozen state of the addresses to `freeze`, True or False. Note
that address-level freezing is set/unset independent of coin-level
freezing, however both must be satisfied for a coin to be defined as
spendable."""
if all(self.is_mine(addr) for addr in addrs):
if freeze:
self.frozen_addresses |= set(addrs)
else:
self.frozen_addresses -= set(addrs)
frozen_addresses = [addr.to_storage_string()
for addr in self.frozen_addresses]
self.storage.put('frozen_addresses', frozen_addresses)
return True
return False
def set_frozen_coin_state(self, utxos, freeze, *, temporary=False):
"""Set frozen state of the `utxos` to `freeze`, True or False. `utxos`
is a (possibly mixed) list of either "prevout:n" strings and/or
coin-dicts as returned from get_utxos(). Note that if passing prevout:n
strings as input, 'is_mine()' status is not checked for the specified
coin. Also note that coin-level freezing is set/unset independent of
address-level freezing, however both must be satisfied for a coin to be
defined as spendable.
The `temporary` flag only applies if `freeze = True`. In that case,
freezing coins will only affect the in-memory-only frozen set, which
doesn't get saved to storage. This mechanism was added so that plugins
(such as CashFusion) have a mechanism for ephemeral coin freezing that
doesn't persist across sessions.
Note that setting `freeze = False` effectively unfreezes both the
temporary and the permanent frozen coin sets all in 1 call. Thus after a
call to `set_frozen_coin_state(utxos, False), both the temporary and the
persistent frozen sets are cleared of all coins in `utxos`."""
add_set = self.frozen_coins if not temporary else self.frozen_coins_tmp
def add(utxo):
add_set.add(utxo)
def discard(utxo):
self.frozen_coins.discard(utxo)
self.frozen_coins_tmp.discard(utxo)
apply_operation = add if freeze else discard
original_size = len(self.frozen_coins)
with self.lock:
ok = 0
for utxo in utxos:
if isinstance(utxo, str):
apply_operation(utxo)
ok += 1
elif isinstance(utxo, dict):
# Note: we could do an is_mine check here for each coin dict here,
# but since all code paths leading to this branch always pass valid
# coins that are "mine", we removed the check to save CPU cycles.
#
# So an O(M logN) algorithm becomes O(M) without the is_mine check,
# where M = number of coins and N = number of addresses.
txo = "{}:{}".format(utxo['prevout_hash'], utxo['prevout_n'])
apply_operation(txo)
utxo['is_frozen_coin'] = bool(freeze)
ok += 1
if original_size != len(self.frozen_coins):
# Performance optimization: only set storage if the perma-set
# changed.
self.storage.put('frozen_coins', list(self.frozen_coins))
return ok
def prepare_for_verifier(self):
# review transactions that are in the history
for addr, hist in self._history.items():
for tx_hash, tx_height in hist:
# add it in case it was previously unconfirmed
self.add_unverified_tx(tx_hash, tx_height)
# if we are on a pruning server, remove unverified transactions
with self.lock:
vr = list(self.verified_tx.keys()) + list(self.unverified_tx.keys())
for tx_hash in list(self.transactions):
if tx_hash not in vr:
self.print_error("removing transaction", tx_hash)
self.transactions.pop(tx_hash)
def _slp_callback_on_status(self, event, *args):
if self.is_slp and args[0] == 'connected':
self.activate_slp()
def start_threads(self, network):
self.network = network
if self.network:
if self.is_slp:
# Note: it's important that SLP data structures are defined
# before the network (SPV/Synchronizer) callbacks are installed
# otherwise we may receive a tx from the network thread
# before SLP objects are properly constructed.
self.slp_graph_0x01 = slp_validator_0x01.shared_context
self.slp_graph_0x01_nft = slp_validator_0x01_nft1.shared_context_nft1
self.activate_slp()
self.network.register_callback(self._slp_callback_on_status, ['status'])
self.start_pruned_txo_cleaner_thread()
self.prepare_for_verifier()
self.verifier = SPV(self.network, self)
self.synchronizer = Synchronizer(self, network)
finalization_print_error(self.verifier)
finalization_print_error(self.synchronizer)
network.add_jobs([self.verifier, self.synchronizer])
self.cashacct.start(self.network) # start cashacct network-dependent subsystem, nework.add_jobs, etc
else:
self.verifier = None
self.synchronizer = None
def stop_threads(self):
if self.network:
# Note: syncrhonizer and verifier will remove themselves from the
# network thread the next time they run, as a result of the below
# release() calls.
# It is done this way (as opposed to an immediate clean-up here)
# because these objects need to do thier clean-up actions in a
# thread-safe fashion from within the thread where they normally
# operate on their data structures.
self.cashacct.stop()
self.synchronizer.release()
self.verifier.release()
self.synchronizer = None
self.verifier = None
self.stop_pruned_txo_cleaner_thread()
# Now no references to the syncronizer or verifier
# remain so they will be GC-ed
if self.is_slp:
# NB: it's important this be done here after network
# callbacks are torn down in the above lines.
self.network.unregister_callback(self._slp_callback_on_status)
jobs_stopped = self.slp_graph_0x01.stop_all_for_wallet(self, timeout=2.0)
self.print_error("Stopped", len(jobs_stopped), "slp_0x01 jobs")
#jobs_stopped = self.slp_graph_0x01_nft.stop_all_for_wallet(self)
#self.print_error("Stopped", len(jobs_stopped), "slp_0x01_nft jobs")
self.slp_graph_0x01_nft.kill()
self.slp_graph_0x01, self.slp_graph_0x01_nft = None, None
self.storage.put('stored_height', self.get_local_height())
self.save_network_state()
def save_network_state(self):
"""Save all the objects which are updated by the network thread. This is called
periodically by the Android app during long synchronizations.
"""
with self.lock:
self.save_addresses()
self.save_transactions()
self.save_verified_tx() # implicit cashacct.save
self.storage.put('frozen_coins', list(self.frozen_coins))
self.storage.write()
def start_pruned_txo_cleaner_thread(self):
self.pruned_txo_cleaner_thread = threading.Thread(target=self._clean_pruned_txo_thread, daemon=True, name='clean_pruned_txo_thread')
self.pruned_txo_cleaner_thread.q = queue.Queue()
self.pruned_txo_cleaner_thread.start()
def stop_pruned_txo_cleaner_thread(self):
t = self.pruned_txo_cleaner_thread
self.pruned_txo_cleaner_thread = None # this also signals a stop
if t and t.is_alive():
t.q.put(None) # signal stop
# if the join times out, it's ok. it means the thread was stuck in
# a network call and it will eventually exit.
t.join(timeout=3.0)
def wait_until_synchronized(self, callback=None, *, timeout=None):
tstart = time.time()
def check_timed_out():
if timeout is not None and time.time() - tstart > timeout:
raise TimeoutException()
def wait_for_wallet():
self.set_up_to_date(False)
while not self.is_up_to_date():
if callback:
msg = "%s\n%s %d"%(
_("Please wait..."),
_("Addresses generated:"),
len(self.addresses(True)))
callback(msg)
time.sleep(0.1)
check_timed_out()
def wait_for_network():
while not self.network.is_connected():
if callback:
msg = "%s \n" % (_("Connecting..."))
callback(msg)
time.sleep(0.1)
check_timed_out()
# wait until we are connected, because the user
# might have selected another server
if self.network:
wait_for_network()
wait_for_wallet()
else:
self.synchronize()
def can_export(self):
return not self.is_watching_only() and hasattr(self.keystore, 'get_private_key')
def is_used(self, address):
return self.get_address_history(address) and self.is_empty(address)
def is_empty(self, address):
assert isinstance(address, Address)
return not any(self.get_addr_balance(address))
def address_is_old(self, address, age_limit=2):
age = -1
local_height = self.get_local_height()
for tx_hash, tx_height in self.get_address_history(address):
if tx_height == 0:
tx_age = 0
else:
tx_age = local_height - tx_height + 1
if tx_age > age:
age = tx_age
if age > age_limit:
break # ok, it's old. not need to keep looping
return age > age_limit
def cpfp(self, tx, fee, sign_schnorr=None):
''' sign_schnorr is a bool or None for auto '''
sign_schnorr = self.is_schnorr_enabled() if sign_schnorr is None else bool(sign_schnorr)
txid = tx.txid()
for i, o in enumerate(tx.outputs()):
otype, address, value = o
if otype == TYPE_ADDRESS and self.is_mine(address):
break
else:
return
coins = self.get_addr_utxo(address)
item = coins.get(txid+':%d'%i)
if not item:
return
self.add_input_info(item)
inputs = [item]
outputs = [(TYPE_ADDRESS, address, value - fee)]
locktime = self.get_local_height()
# note: no need to call tx.BIP_LI01_sort() here - single input/output
return Transaction.from_io(inputs, outputs, locktime=locktime, sign_schnorr=sign_schnorr)
def add_input_info(self, txin):
address = txin['address']
if self.is_mine(address):
txin['type'] = self.get_txin_type(address)
# Bitcoin Cash needs value to sign
received, spent = self.get_addr_io(address)
item = received.get(txin['prevout_hash']+':%d'%txin['prevout_n'])
tx_height, value, is_cb = item
txin['value'] = value
self.add_input_sig_info(txin, address)
def add_input_info_for_bitcoinfiles(self, txin):
address = txin['address']
if self.is_mine(address):
txin['type'] = self.get_txin_type(address)
self.add_input_sig_info(txin, address)
def can_sign(self, tx):
if tx.is_complete():
return False
for k in self.get_keystores():
# setup "wallet advice" so Xpub wallets know how to sign 'fd' type tx inputs
# by giving them the sequence number ahead of time
if isinstance(k, BIP32_KeyStore):
for txin in tx.inputs():
for x_pubkey in txin['x_pubkeys']:
if not x_pubkey[0:2] in ['02', '03', '04']:
_, addr = xpubkey_to_address(x_pubkey)
try:
c, index = self.get_address_index(addr)
except:
continue
else:
c, index = k.scan_for_pubkey_index(x_pubkey)
if c == 0:
addr = self.receiving_addresses[index]
elif c == 1:
addr = self.change_addresses[index]
if index is not None:
k.set_wallet_advice(addr, [c,index])
if k.can_sign(tx):
return True
return False
def get_input_tx(self, tx_hash):
# First look up an input transaction in the wallet where it
# will likely be. If co-signing a transaction it may not have
# all the input txs, in which case we ask the network.
tx = self.transactions.get(tx_hash)
if not tx and self.network:
request = ('blockchain.transaction.get', [tx_hash])
tx = Transaction(self.network.synchronous_get(request))
return tx
def add_input_values_to_tx(self, tx):
""" add input values to the tx, for signing"""
for txin in tx.inputs():
if 'value' not in txin:
inputtx = self.get_input_tx(txin['prevout_hash'])
if inputtx is not None:
out_zero, out_addr, out_val = inputtx.outputs()[txin['prevout_n']]
txin['value'] = out_val
txin['prev_tx'] = inputtx # may be needed by hardware wallets
def add_hw_info(self, tx):
# add previous tx for hw wallets, if needed and not already there
if any([(isinstance(k, Hardware_KeyStore) and k.can_sign(tx) and k.needs_prevtx()) for k in self.get_keystores()]):
for txin in tx.inputs():
if 'prev_tx' not in txin:
txin['prev_tx'] = self.get_input_tx(txin['prevout_hash'])
# add output info for hw wallets
info = {}
xpubs = self.get_master_public_keys()
for txout in tx.outputs():
_type, addr, amount = txout
if self.is_change(addr):
index = self.get_address_index(addr)
pubkeys = self.get_public_keys(addr)
# sort xpubs using the order of pubkeys
sorted_pubkeys, sorted_xpubs = zip(*sorted(zip(pubkeys, xpubs)))
info[addr] = index, sorted_xpubs, self.m if isinstance(self, Multisig_Wallet) else None, self.txin_type
tx.output_info = info
def sign_transaction(self, tx, password, *, use_cache=False, anyonecanpay=False):
""" Sign a transaction, requires password (may be None for password-less
wallets). If `use_cache` is enabled then signing will be much faster.
For transactions with N inputs and M outputs, calculating all sighashes
takes only O(N + M) with the cache, as opposed to O(N^2 + NM) without
the cache.
Warning: If you modify non-signature parts of the transaction
afterwards, do not use `use_cache`! """
if self.is_watching_only():
return
# add input values for signing
self.add_input_values_to_tx(tx)
# hardware wallets require extra info
if any([(isinstance(k, Hardware_KeyStore) and k.can_sign(tx)) for k in self.get_keystores()]):
self.add_hw_info(tx)
# sign
for k in self.get_keystores():
try:
if k.can_sign(tx):
k.sign_transaction(tx, password, use_cache=use_cache, anyonecanpay=anyonecanpay)
except UserCancelled:
continue
def get_unused_addresses(self, *, for_change=False, frozen_ok=True):
# fixme: use slots from expired requests
with self.lock:
domain = self.get_receiving_addresses() if not for_change else (self.get_change_addresses() or self.get_receiving_addresses())
return [addr for addr in domain
if not self.get_address_history(addr)
and addr not in self.receive_requests
and (frozen_ok or addr not in self.frozen_addresses)]
def get_unused_address(self, *, for_change=False, frozen_ok=True):
addrs = self.get_unused_addresses(for_change=for_change, frozen_ok=frozen_ok)
if addrs:
return addrs[0]
def get_receiving_address(self, *, frozen_ok=True):
'''Returns a receiving address or None.'''
domain = self.get_unused_addresses(frozen_ok=frozen_ok)
if not domain:
domain = [a for a in self.get_receiving_addresses()
if frozen_ok or a not in self.frozen_addresses]
if domain:
return domain[0]
def get_payment_status(self, address, amount):
local_height = self.get_local_height()
received, sent = self.get_addr_io(address)
l = []
for txo, x in received.items():
h, v, is_cb = x
txid, n = txo.split(':')
info = self.verified_tx.get(txid)
if info:
tx_height, timestamp, pos = info
conf = local_height - tx_height
else:
conf = 0
l.append((conf, v))
vsum = 0
for conf, v in reversed(sorted(l)):
vsum += v
if vsum >= amount:
return True, conf
return False, None
def has_payment_request(self, addr):
''' Returns True iff Address addr has any extant payment requests
(even if expired), False otherwise. '''
assert isinstance(addr, Address)
return bool(self.receive_requests.get(addr))
def get_payment_request(self, addr, config):
assert isinstance(addr, Address)
r = self.receive_requests.get(addr)
if not r:
return
out = copy.copy(r)
addr_text = addr.to_ui_string()
if r.get('token_id', None):
amount_text = str(r['amount'])
else:
amount_text = format_satoshis(r['amount'])
if addr.FMT_UI == addr.FMT_CASHADDR:
out['URI'] = '{}:{}?amount={}'.format(networks.net.CASHADDR_PREFIX,
addr_text, amount_text)
elif addr.FMT_UI == addr.FMT_SLPADDR:
if r.get('token_id', None):
token_id = r['token_id']
out['URI'] = '{}:{}?amount={}-{}'.format(networks.net.SLPADDR_PREFIX,
addr_text, amount_text, token_id)
else:
out['URI'] = '{}:{}?amount={}'.format(networks.net.SLPADDR_PREFIX,
addr_text, amount_text)
status, conf = self.get_request_status(addr)
out['status'] = status
if conf is not None:
out['confirmations'] = conf
# check if bip70 file exists
rdir = config.get('requests_dir')
if rdir:
key = out.get('id', addr.to_storage_string())
path = os.path.join(rdir, 'req', key[0], key[1], key)
if os.path.exists(path):
baseurl = 'file://' + rdir
rewrite = config.get('url_rewrite')
if rewrite:
baseurl = baseurl.replace(*rewrite)
out['request_url'] = os.path.join(baseurl, 'req', key[0], key[1], key, key)
out['URI'] += '&r=' + out['request_url']
if not 'index_url' in out:
out['index_url'] = os.path.join(baseurl, 'index.html') + '?id=' + key
websocket_server_announce = config.get('websocket_server_announce')
if websocket_server_announce:
out['websocket_server'] = websocket_server_announce
else:
out['websocket_server'] = config.get('websocket_server', 'localhost')
websocket_port_announce = config.get('websocket_port_announce')
if websocket_port_announce:
out['websocket_port'] = websocket_port_announce
else:
out['websocket_port'] = config.get('websocket_port', 9999)
return out
def get_request_status(self, key):
r = self.receive_requests.get(key)
if r is None:
return PR_UNKNOWN
address = r['address']
amount = r.get('amount')
timestamp = r.get('time', 0)
if timestamp and type(timestamp) != int:
timestamp = 0
expiration = r.get('exp')
if expiration and type(expiration) != int:
expiration = 0
conf = None
if amount:
if self.up_to_date:
paid, conf = self.get_payment_status(address, amount)
status = PR_PAID if paid else PR_UNPAID
if status == PR_UNPAID and expiration is not None and time.time() > timestamp + expiration:
status = PR_EXPIRED
else:
status = PR_UNKNOWN
else:
status = PR_UNKNOWN
return status, conf
def make_payment_request(self, addr, amount, message, expiration=None, *,
op_return=None, op_return_raw=None, payment_url=None, token_id=None, index_url=None):
assert isinstance(addr, Address)
if op_return and op_return_raw:
raise ValueError("both op_return and op_return_raw cannot be specified as arguments to make_payment_request")
timestamp = int(time.time())
_id = bh2u(Hash(addr.to_storage_string() + "%d" % timestamp))[0:10]
d = {
'time': timestamp,
'amount': amount,
'exp': expiration,
'address': addr,
'memo': message,
'id': _id
}
if token_id:
d['token_id'] = token_id
if payment_url:
d['payment_url'] = payment_url + "/" + _id
if index_url:
d['index_url'] = index_url + "/" + _id
if op_return:
d['op_return'] = op_return
if op_return_raw:
d['op_return_raw'] = op_return_raw
return d
def serialize_request(self, r):
result = r.copy()
result['address'] = r['address'].to_storage_string()
return result
def save_payment_requests(self, write=True):
def delete_address(value):
del value['address']
return value
requests = {addr.to_storage_string() : delete_address(value.copy())
for addr, value in self.receive_requests.items()}
self.storage.put('payment_requests', requests)
self.save_labels() # In case address labels were set or cleared.
if write:
self.storage.write()
def sign_payment_request(self, key, alias, alias_addr, password):
req = self.receive_requests.get(key)
alias_privkey = self.export_private_key(alias_addr, password)
pr = paymentrequest.make_unsigned_request(req)
paymentrequest.sign_request_with_alias(pr, alias, alias_privkey)
req['name'] = to_string(pr.pki_data)
req['sig'] = bh2u(pr.signature)
self.receive_requests[key] = req
self.save_payment_requests()
def add_payment_request(self, req, config, set_address_label=True, save=True):
addr = req['address']
addr_text = addr.to_storage_string()
amount = req['amount']
message = req['memo']
self.receive_requests[addr] = req
if save:
self.save_payment_requests()
if set_address_label:
self.set_label(addr_text, message, save=save) # should be a default label
rdir = config.get('requests_dir')
if rdir and amount is not None:
key = req.get('id', addr_text)
pr = paymentrequest.make_request(config, req)
path = os.path.join(rdir, 'req', key[0], key[1], key)
if not os.path.exists(path):
try:
os.makedirs(path)
except OSError as exc:
if exc.errno != errno.EEXIST:
raise
with open(os.path.join(path, key), 'wb') as f:
f.write(pr.SerializeToString())
# reload
req = self.get_payment_request(addr, config)
req['address'] = req['address'].to_ui_string()
with open(os.path.join(path, key + '.json'), 'w', encoding='utf-8') as f:
f.write(json.dumps(req))
def remove_payment_request(self, addr, config, clear_address_label_if_no_tx=True,
save=True):
if isinstance(addr, str):
addr = Address.from_string(addr)
if addr not in self.receive_requests:
return False
r = self.receive_requests.pop(addr)
if clear_address_label_if_no_tx and not self.get_address_history(addr):
memo = r.get('memo')
# clear it only if the user didn't overwrite it with something else
if memo and memo == self.labels.get(addr.to_storage_string()):
self.set_label(addr, None, save=save)
rdir = config.get('requests_dir')
if rdir:
key = r.get('id', addr.to_storage_string())
for s in ['.json', '']:
n = os.path.join(rdir, 'req', key[0], key[1], key, key + s)
if os.path.exists(n):
os.unlink(n)
if save:
self.save_payment_requests()
return True
def get_sorted_requests(self, config):
m = map(lambda x: self.get_payment_request(x, config), self.receive_requests.keys())
try:
def f(x):
try:
addr = x['address']
return self.get_address_index(addr) or addr
except:
return addr
return sorted(m, key=f)
except TypeError:
# See issue #1231 -- can get inhomogenous results in the above
# sorting function due to the 'or addr' possible return.
# This can happen if addresses for some reason drop out of wallet
# while, say, the history rescan is running and it can't yet find
# an address index for an address. In that case we will
# return an unsorted list to the caller.
return list(m)
def get_fingerprint(self):
raise NotImplementedError()
def can_import_privkey(self):
return False
def can_import_address(self):
return False
def can_delete_address(self):
return False
def is_multisig(self):
# Subclass Multisig_Wallet overrides this
return False
def is_hardware(self):
return any([isinstance(k, Hardware_KeyStore) for k in self.get_keystores()])
def add_address(self, address):
assert isinstance(address, Address)
self._addr_bal_cache.pop(address, None) # paranoia, not really necessary -- just want to maintain the invariant that when we modify address history below we invalidate cache.
self.invalidate_address_set_cache()
if address not in self._history:
self._history[address] = []
if self.synchronizer:
self.synchronizer.add(address)
self.cashacct.on_address_addition(address)
def has_password(self):
return self.storage.get('use_encryption', False)
def check_password(self, password):
self.keystore.check_password(password)
def sign_message(self, address, message, password):
index = self.get_address_index(address)
return self.keystore.sign_message(index, message, password)
def decrypt_message(self, pubkey, message, password):
addr = self.pubkeys_to_address(pubkey)
index = self.get_address_index(addr)
return self.keystore.decrypt_message(index, message, password)
def rebuild_history(self):
''' This is an advanced function for use in the GUI when the user
wants to resynch the whole wallet from scratch, preserving labels
and contacts. '''
if not self.network or not self.network.is_connected():
raise RuntimeError('Refusing to rebuild wallet without a valid server connection!')
if not self.synchronizer or not self.verifier:
raise RuntimeError('Refusing to rebuild a stopped wallet!')
network = self.network
self.stop_threads()
do_addr_save = False
with self.lock:
self.transactions.clear(); self.unverified_tx.clear(); self.verified_tx.clear()
self._slp_txo.clear(); self.slpv1_validity.clear(); self.token_types.clear(); self.tx_tokinfo.clear()
self.clear_history()
if isinstance(self, Standard_Wallet):
# reset the address list to default too, just in case. New synchronizer will pick up the addresses again.
self.receiving_addresses, self.change_addresses = self.receiving_addresses[:self.gap_limit], self.change_addresses[:self.gap_limit_for_change]
do_addr_save = True
self.invalidate_address_set_cache()
if do_addr_save:
self.save_addresses()
self.save_transactions()
self.save_verified_tx() # implicit cashacct.save
self.storage.write()
self.start_threads(network)
self.network.trigger_callback('wallet_updated', self)
def is_schnorr_possible(self, reason: list = None) -> bool:
''' Returns True if this wallet type is compatible.
`reason` is an optional list where you would like a translated string
of why Schnorr isn't possible placed (on False return). '''
ok = bool(not self.is_multisig() and not self.is_hardware())
if not ok and isinstance(reason, list):
reason.insert(0, _('Schnorr signatures are disabled for this wallet type.'))
return ok
def is_schnorr_enabled(self) -> bool:
''' Returns whether schnorr is enabled AND possible for this wallet.
Schnorr is enabled per-wallet. '''
if not self.is_schnorr_possible():
# Short-circuit out of here -- it's not even possible with this
# wallet type.
return False
ss_cfg = self.storage.get('sign_schnorr', None)
if ss_cfg is None:
# Schnorr was not set in config; figure out intelligent defaults,
# preferring Schnorr if it's at least as fast as ECDSA (based on
# which libs user has installed). Note for watching-only we default
# to off if unspecified regardless, to not break compatibility
# with air-gapped signing systems that have older EC installed
# on the signing system. This is to avoid underpaying fees if
# signing system doesn't use Schnorr. We can turn on default
# Schnorr on watching-only sometime in the future after enough
# time has passed that air-gapped systems are unlikely to not
# have Schnorr enabled by default.
# TO DO: Finish refactor of txn serialized format to handle this
# case better!
if (not self.is_watching_only()
and (schnorr.has_fast_sign()
or not ecc_fast.is_using_fast_ecc())):
# Prefer Schnorr, all things being equal.
# - If not watching-only & schnorr possible AND
# - Either Schnorr is fast sign (native, ABC's secp256k1),
# so use it by default
# - Or both ECDSA & Schnorr are slow (non-native);
# so use Schnorr in that case as well
ss_cfg = 2
else:
# This branch is reached if Schnorr is slow but ECDSA is fast
# (core's secp256k1 lib was found which lacks Schnorr) -- so we
# default it to off. Also if watching only we default off.
ss_cfg = 0
return bool(ss_cfg)
def set_schnorr_enabled(self, b: bool):
''' Enable schnorr for this wallet. Note that if Schnorr is not possible,
(due to missing libs or invalid wallet type) is_schnorr_enabled() will
still return False after calling this function with a True argument. '''
# Note: we will have '1' at some point in the future which will mean:
# 'ask me per tx', so for now True -> 2.
self.storage.put('sign_schnorr', 2 if b else 0)
class Simple_Wallet(Abstract_Wallet):
# wallet with a single keystore
def get_keystore(self):
return self.keystore
def get_keystores(self):
return [self.keystore]
def is_watching_only(self):
return self.keystore.is_watching_only()
def can_change_password(self):
return self.keystore.can_change_password()
def update_password(self, old_pw, new_pw, encrypt=False):
if old_pw is None and self.has_password():
raise InvalidPassword()
if self.keystore is not None and self.keystore.can_change_password():
self.keystore.update_password(old_pw, new_pw)
self.save_keystore()
self.storage.set_password(new_pw, encrypt)
self.storage.write()
def save_keystore(self):
self.storage.put('keystore', self.keystore.dump())
class ImportedWalletBase(Simple_Wallet):
txin_type = 'p2pkh'
def get_txin_type(self, address):
return self.txin_type
def can_delete_address(self):
return len(self.get_addresses()) > 1 # Cannot delete the last address
def has_seed(self):
return False
def is_deterministic(self):
return False
def is_change(self, address):
return False
def get_master_public_keys(self):
return []
def is_beyond_limit(self, address, is_change):
return False
def get_fingerprint(self):
return ''
def get_receiving_addresses(self):
return self.get_addresses()
def get_change_addresses(self):
return []
def delete_address(self, address):
assert isinstance(address, Address)
all_addrs = self.get_addresses()
if len(all_addrs) <= 1 or address not in all_addrs:
return
del all_addrs
transactions_to_remove = set() # only referred to by this address
transactions_new = set() # txs that are not only referred to by address
with self.lock:
for addr, details in self._history.items():
if addr == address:
for tx_hash, height in details:
transactions_to_remove.add(tx_hash)
self.tx_addr_hist[tx_hash].discard(address)
if not self.tx_addr_hist.get(tx_hash):
self.tx_addr_hist.pop(tx_hash, None)
else:
for tx_hash, height in details:
transactions_new.add(tx_hash)
transactions_to_remove -= transactions_new
self._history.pop(address, None)
for tx_hash in transactions_to_remove:
self.remove_transaction(tx_hash)
self.tx_fees.pop(tx_hash, None)
self.verified_tx.pop(tx_hash, None)
self.unverified_tx.pop(tx_hash, None)
self.transactions.pop(tx_hash, None)
self._addr_bal_cache.pop(address, None) # not strictly necessary, above calls also have this side-effect. but here to be safe. :)
if self.verifier:
# TX is now gone. Toss its SPV proof in case we have it
# in memory. This allows user to re-add PK again and it
# will avoid the situation where the UI says "not verified"
# erroneously!
self.verifier.remove_spv_proof_for_tx(tx_hash)
# FIXME: what about pruned_txo?
self.storage.put('verified_tx3', self.verified_tx)
self.save_transactions()
self.set_label(address, None)
self.remove_payment_request(address, {})
self.set_frozen_state([address], False)
self.delete_address_derived(address)
self.cashacct.on_address_deletion(address)
self.cashacct.save()
self.save_addresses()
self.storage.write() # no-op if above already wrote
class ImportedAddressWallet(ImportedWalletBase):
# Watch-only wallet of imported addresses
wallet_type = 'imported_addr'
def __init__(self, storage):
self._sorted = None
super().__init__(storage)
@classmethod
def from_text(cls, storage, text):
wallet = cls(storage)
for address in text.split():
wallet.import_address(Address.from_string(address))
return wallet
def is_watching_only(self):
return True
def get_keystores(self):
return []
def can_import_privkey(self):
return False
def load_keystore(self):
self.keystore = None
def save_keystore(self):
pass
def load_addresses(self):
addresses = self.storage.get('addresses', [])
self.addresses = [Address.from_string(addr) for addr in addresses]
def save_addresses(self):
self.storage.put('addresses', [addr.to_storage_string()
for addr in self.addresses])
self.storage.write()
def can_change_password(self):
return False
def can_import_address(self):
return True
def get_addresses(self, include_change=False):
if not self._sorted:
self._sorted = sorted(self.addresses,
key=lambda addr: addr.to_ui_string())
return self._sorted
def import_address(self, address):
assert isinstance(address, Address)
if address in self.addresses:
return False
self.addresses.append(address)
self.add_address(address)
self.cashacct.save()
self.save_addresses()
self.storage.write() # no-op if already wrote in previous call
self._sorted = None
return True
def delete_address_derived(self, address):
self.addresses.remove(address)
self._sorted.remove(address)
def add_input_sig_info(self, txin, address):
x_pubkey = 'fd' + address.to_script_hex()
txin['x_pubkeys'] = [x_pubkey]
txin['signatures'] = [None]
class Slp_ImportedAddressWallet(ImportedAddressWallet):
# Watch-only wallet of imported addresses
wallet_type = 'slp_imported_addr'
def __init__(self, storage):
self._sorted = None
storage.put('wallet_type', self.wallet_type)
super().__init__(storage)
class ImportedPrivkeyWallet(ImportedWalletBase):
# wallet made of imported private keys
wallet_type = 'imported_privkey'
def __init__(self, storage):
Abstract_Wallet.__init__(self, storage)
@classmethod
def from_text(cls, storage, text, password=None):
wallet = cls(storage)
storage.put('use_encryption', bool(password))
for privkey in text.split():
wallet.import_private_key(privkey, password)
return wallet
def is_watching_only(self):
return False
def get_keystores(self):
return [self.keystore]
def can_import_privkey(self):
return True
def load_keystore(self):
if self.storage.get('keystore'):
self.keystore = load_keystore(self.storage, 'keystore')
else:
self.keystore = Imported_KeyStore({})
def save_keystore(self):
self.storage.put('keystore', self.keystore.dump())
def load_addresses(self):
pass
def save_addresses(self):
pass
def can_change_password(self):
return True
def can_import_address(self):
return False
def get_addresses(self, include_change=False):
return self.keystore.get_addresses()
def delete_address_derived(self, address):
self.keystore.remove_address(address)
self.save_keystore()
def get_address_index(self, address):
return self.get_public_key(address)
def get_public_key(self, address):
return self.keystore.address_to_pubkey(address)
def import_private_key(self, sec, pw):
pubkey = self.keystore.import_privkey(sec, pw)
self.save_keystore()
self.add_address(pubkey.address)
self.cashacct.save()
self.save_addresses()
self.storage.write() # no-op if above already wrote
return pubkey.address.to_ui_string()
def export_private_key(self, address, password):
'''Returned in WIF format.'''
pubkey = self.keystore.address_to_pubkey(address)
return self.keystore.export_private_key(pubkey, password)
def add_input_sig_info(self, txin, address):
assert txin['type'] == 'p2pkh'
pubkey = self.keystore.address_to_pubkey(address)
txin['num_sig'] = 1
txin['x_pubkeys'] = [pubkey.to_ui_string()]
txin['signatures'] = [None]
def pubkeys_to_address(self, pubkey):
pubkey = PublicKey.from_string(pubkey)
if pubkey in self.keystore.keypairs:
return pubkey.address
class Slp_ImportedPrivkeyWallet(ImportedPrivkeyWallet):
# wallet made of imported private keys
wallet_type = 'slp_imported_privkey'
def __init__(self, storage):
storage.put('wallet_type', self.wallet_type)
Abstract_Wallet.__init__(self, storage)
class Deterministic_Wallet(Abstract_Wallet):
def __init__(self, storage):
Abstract_Wallet.__init__(self, storage)
self.gap_limit = storage.get('gap_limit', 20)
def has_seed(self):
return self.keystore.has_seed()
def get_receiving_addresses(self):
return self.receiving_addresses
def get_change_addresses(self):
return self.change_addresses
def get_seed(self, password):
return self.keystore.get_seed(password)
def add_seed(self, seed, pw):
self.keystore.add_seed(seed, pw)
def change_gap_limit(self, value):
'''This method is not called in the code, it is kept for console use'''
with self.lock:
if value >= self.gap_limit:
self.gap_limit = value
self.storage.put('gap_limit', self.gap_limit)
return True
elif value >= self.min_acceptable_gap():
addresses = self.get_receiving_addresses()
k = self.num_unused_trailing_addresses(addresses)
n = len(addresses) - k + value
self.receiving_addresses = self.receiving_addresses[0:n]
self.gap_limit = value
self.storage.put('gap_limit', self.gap_limit)
self.save_addresses()
return True
else:
return False
def num_unused_trailing_addresses(self, addresses):
'''This method isn't called anywhere. Perhaps it is here for console use.
Can't be sure. -Calin '''
with self.lock:
k = 0
for addr in reversed(addresses):
if addr in self._history:
break
k = k + 1
return k
def min_acceptable_gap(self):
''' Caller needs to hold self.lock otherwise bad things may happen. '''
# fixme: this assumes wallet is synchronized
n = 0
nmax = 0
addresses = self.get_receiving_addresses()
k = self.num_unused_trailing_addresses(addresses)
for a in addresses[0:-k]:
if a in self._history:
n = 0
else:
n += 1
if n > nmax: nmax = n
return nmax + 1
def create_new_address(self, for_change=False, save=True):
for_change = bool(for_change)
with self.lock:
addr_list = self.change_addresses if for_change else self.receiving_addresses
n = len(addr_list)
x = self.derive_pubkeys(for_change, n)
address = self.pubkeys_to_address(x)
addr_list.append(address)
if save:
self.save_addresses()
self.add_address(address)
return address
def synchronize_sequence(self, for_change):
limit = self.gap_limit_for_change if for_change else self.gap_limit
while True:
addresses = self.get_change_addresses() if for_change else self.get_receiving_addresses()
if len(addresses) < limit:
self.create_new_address(for_change, save=False)
continue
if all(map(lambda a: not self.address_is_old(a), addresses[-limit:] )):
break
else:
self.create_new_address(for_change, save=False)
def synchronize(self):
with self.lock:
if self.storage.get('auto_maintain_gap', True):
self.synchronize_sequence(False)
self.synchronize_sequence(True)
def is_beyond_limit(self, address, is_change):
with self.lock:
if is_change:
addr_list = self.get_change_addresses()
limit = self.gap_limit_for_change
else:
addr_list = self.get_receiving_addresses()
limit = self.gap_limit
idx = addr_list.index(address)
if idx < limit:
return False
for addr in addr_list[-limit:]:
if addr in self._history:
return False
return True
def get_master_public_keys(self):
return [self.get_master_public_key()]
def get_fingerprint(self):
return self.get_master_public_key()
def get_txin_type(self, address):
return self.txin_type
class Simple_Deterministic_Wallet(Simple_Wallet, Deterministic_Wallet):
""" Deterministic Wallet with a single pubkey per address """
def __init__(self, storage):
Deterministic_Wallet.__init__(self, storage)
def get_public_key(self, address):
sequence = self.get_address_index(address)
pubkey = self.get_pubkey(*sequence)
return pubkey
def load_keystore(self):
self.keystore = load_keystore(self.storage, 'keystore')
try:
xtype = bitcoin.xpub_type(self.keystore.xpub)
except:
xtype = 'standard'
self.txin_type = 'p2pkh' if xtype == 'standard' else xtype
def get_pubkey(self, c, i):
return self.derive_pubkeys(c, i)
def get_public_keys(self, address):
return [self.get_public_key(address)]
def add_input_sig_info(self, txin, address):
derivation = self.get_address_index(address)
x_pubkey = self.keystore.get_xpubkey(*derivation)
txin['x_pubkeys'] = [x_pubkey]
txin['signatures'] = [None]
txin['num_sig'] = 1
def get_master_public_key(self):
return self.keystore.get_master_public_key()
def derive_pubkeys(self, c, i):
return self.keystore.derive_pubkey(c, i)
class Standard_Wallet(Simple_Deterministic_Wallet):
wallet_type = 'standard'
def __init__(self, storage):
super().__init__(storage)
def pubkeys_to_address(self, pubkey):
return Address.from_pubkey(pubkey)
class Slp_Standard_Wallet(Standard_Wallet):
wallet_type = 'slp_standard'
def __init__(self, storage):
storage.put('wallet_type', self.wallet_type)
super().__init__(storage)
class Multisig_Wallet(Deterministic_Wallet):
# generic m of n
gap_limit = 20
def __init__(self, storage):
self.wallet_type = storage.get('wallet_type')
self.m, self.n = multisig_type(self.wallet_type)
Deterministic_Wallet.__init__(self, storage)
def get_pubkeys(self, c, i):
return self.derive_pubkeys(c, i)
def pubkeys_to_address(self, pubkeys):
pubkeys = [bytes.fromhex(pubkey) for pubkey in pubkeys]
redeem_script = self.pubkeys_to_redeem_script(pubkeys)
return Address.from_multisig_script(redeem_script)
def pubkeys_to_redeem_script(self, pubkeys):
return Script.multisig_script(self.m, sorted(pubkeys))
def derive_pubkeys(self, c, i):
return [k.derive_pubkey(c, i) for k in self.get_keystores()]
def load_keystore(self):
self.keystores = {}
for i in range(self.n):
name = 'x%d/'%(i+1)
self.keystores[name] = load_keystore(self.storage, name)
self.keystore = self.keystores['x1/']
xtype = bitcoin.xpub_type(self.keystore.xpub)
self.txin_type = 'p2sh' if xtype == 'standard' else xtype
def save_keystore(self):
for name, k in self.keystores.items():
self.storage.put(name, k.dump())
def get_keystore(self):
return self.keystores.get('x1/')
def get_keystores(self):
return [self.keystores[i] for i in sorted(self.keystores.keys())]
def update_password(self, old_pw, new_pw, encrypt=False):
if old_pw is None and self.has_password():
raise InvalidPassword()
for name, keystore in self.keystores.items():
if keystore.can_change_password():
keystore.update_password(old_pw, new_pw)
self.storage.put(name, keystore.dump())
self.storage.set_password(new_pw, encrypt)
self.storage.write()
def has_seed(self):
return self.keystore.has_seed()
def can_change_password(self):
return self.keystore.can_change_password()
def is_watching_only(self):
return not any([not k.is_watching_only() for k in self.get_keystores()])
def get_master_public_key(self):
return self.keystore.get_master_public_key()
def get_master_public_keys(self):
return [k.get_master_public_key() for k in self.get_keystores()]
def get_fingerprint(self):
return ''.join(sorted(self.get_master_public_keys()))
def add_input_sig_info(self, txin, address):
# x_pubkeys are not sorted here because it would be too slow
# they are sorted in transaction.get_sorted_pubkeys
derivation = self.get_address_index(address)
txin['x_pubkeys'] = [k.get_xpubkey(*derivation) for k in self.get_keystores()]
txin['pubkeys'] = None
# we need n place holders
txin['signatures'] = [None] * self.n
txin['num_sig'] = self.m
def is_multisig(self):
return True
wallet_types = ['standard', 'slp_standard', 'multisig', 'slp_multisig', 'imported', 'slp_imported']
def register_wallet_type(category):
wallet_types.append(category)
wallet_constructors = {
'standard': Standard_Wallet,
'slp_standard': Slp_Standard_Wallet,
'old': Standard_Wallet,
'xpub': Standard_Wallet,
'imported_privkey': ImportedPrivkeyWallet,
'slp_imported_privkey': Slp_ImportedPrivkeyWallet,
'imported_addr': ImportedAddressWallet,
'slp_imported_addr': Slp_ImportedAddressWallet,
}
def register_constructor(wallet_type, constructor):
wallet_constructors[wallet_type] = constructor
class UnknownWalletType(RuntimeError):
''' Raised if encountering an unknown wallet type '''
pass
# former WalletFactory
class Wallet:
"""The main wallet "entry point".
This class is actually a factory that will return a wallet of the correct
type when passed a WalletStorage instance."""
def __new__(self, storage):
# Convert 'bip39-slp' wallet type to 'slp_standard' wallet type
if storage.get('wallet_type', '') == 'bip39-slp' or storage.get('wallet_type', '') == 'standard_slp':
storage.put('wallet_type', 'slp_standard')
wallet_type = storage.get('wallet_type')
WalletClass = Wallet.wallet_class(wallet_type)
wallet = WalletClass(storage)
# Convert hardware wallets restored with older versions of
# Electrum to BIP44 wallets. A hardware wallet does not have
# a seed and plugins do not need to handle having one.
rwc = getattr(wallet, 'restore_wallet_class', None)
if rwc and storage.get('seed', ''):
storage.print_error("converting wallet type to " + rwc.wallet_type)
storage.put('wallet_type', rwc.wallet_type)
wallet = rwc(storage)
return wallet
@staticmethod
def wallet_class(wallet_type):
if multisig_type(wallet_type):
return Multisig_Wallet
if wallet_type in wallet_constructors:
return wallet_constructors[wallet_type]
raise UnknownWalletType("Unknown wallet type: " + str(wallet_type))
def create_new_wallet(*, path, config, passphrase=None, password=None,
encrypt_file=True, seed_type=None, gap_limit=None) -> dict:
"""Create a new wallet"""
storage = WalletStorage(path)
if storage.file_exists():
raise Exception("Remove the existing wallet first!")
from .mnemonic import Mnemonic_Electrum, Mnemonic
if seed_type == 'electrum':
seed = Mnemonic_Electrum('en').make_seed()
else:
seed = Mnemonic('en').make_seed()
k = keystore.from_seed(seed, passphrase, seed_type = seed_type)
storage.put('keystore', k.dump())
storage.put('wallet_type', 'standard')
storage.put('seed_type', seed_type)
if gap_limit is not None:
storage.put('gap_limit', gap_limit)
wallet = Wallet(storage)
wallet.update_password(old_pw=None, new_pw=password, encrypt=encrypt_file)
wallet.synchronize()
msg = "Please keep your seed in a safe place; if you lose it, you will not be able to restore your wallet."
wallet.storage.write()
return {'seed': seed, 'wallet': wallet, 'msg': msg}
def restore_wallet_from_text(text, *, path, config,
passphrase=None, password=None, encrypt_file=True,
gap_limit=None) -> dict:
"""Restore a wallet from text. Text can be a seed phrase, a master
public key, a master private key, a list of bitcoin addresses
or bitcoin private keys."""
storage = WalletStorage(path)
if storage.file_exists():
raise Exception("Remove the existing wallet first!")
text = text.strip()
if keystore.is_address_list(text):
wallet = ImportedAddressWallet.from_text(storage, text)
wallet.save_addresses()
elif keystore.is_private_key_list(text,):
k = keystore.Imported_KeyStore({})
storage.put('keystore', k.dump())
wallet = ImportedPrivkeyWallet.from_text(storage, text, password)
else:
if keystore.is_master_key(text):
k = keystore.from_master_key(text)
elif keystore.is_seed(text):
k = keystore.from_seed(text, passphrase) # auto-detects seed type, preference order: old, electrum, bip39
else:
raise Exception("Seed or key not recognized")
storage.put('keystore', k.dump())
storage.put('wallet_type', 'standard')
seed_type = getattr(k, 'seed_type', None)
if seed_type:
storage.put('seed_type', seed_type) # Save, just in case
if gap_limit is not None:
storage.put('gap_limit', gap_limit)
wallet = Wallet(storage)
wallet.update_password(old_pw=None, new_pw=password, encrypt=encrypt_file)
wallet.synchronize()
msg = ("This wallet was restored offline. It may contain more addresses than displayed. "
"Start a daemon and use load_wallet to sync its history.")
wallet.storage.write()
return {'wallet': wallet, 'msg': msg}
| 43.686571
| 234
| 0.582876
|
4a046420cd0114f5633a25bd95664c666f577e31
| 1,253
|
py
|
Python
|
Cryptography/CAPTCHA Simulation/1411113_CAPTCHA.py
|
kartik2112/Interesting-Codes
|
423c93b7b2b66ab46d3c188e357d1f8af31251b2
|
[
"MIT"
] | null | null | null |
Cryptography/CAPTCHA Simulation/1411113_CAPTCHA.py
|
kartik2112/Interesting-Codes
|
423c93b7b2b66ab46d3c188e357d1f8af31251b2
|
[
"MIT"
] | null | null | null |
Cryptography/CAPTCHA Simulation/1411113_CAPTCHA.py
|
kartik2112/Interesting-Codes
|
423c93b7b2b66ab46d3c188e357d1f8af31251b2
|
[
"MIT"
] | null | null | null |
# Prerequisite:
# 1.
# captcha must be installed before running this program
# To do this type in terminal / Powershell:
# pip3 install captcha
# If this doesnt work
# Type these:
# python3
# import pip
# pip.main(['install','captcha'])
# 2.
# Image module must also be installed
# To do this type in terminal / Powershell:
# pip3 install Image
# If this doesnt work
# Type these:
# python3
# import pip
# pip.main(['install','Image'])
from io import BytesIO
from captcha.audio import AudioCaptcha
from captcha.image import ImageCaptcha
from PIL import Image
import random
R1 = random.randint(4,10)
str1 = ""
while R1>0:
R2 = random.randint(4,10)
if R2<6:
R3 = str(random.randint(4,10))
else:
R3 = chr(ord('A')+random.randint(0,25))
str1 += R3
R1-=1
# print("Generated Captcha is:",str1)
image = ImageCaptcha(fonts=['arial.ttf','times.ttf'])
data = image.generate(str1)
assert isinstance(data, BytesIO)
image.write(str1, 'out.png')
Image.open('out.png').show()
UInputCaptcha = input("Captcha Calculated and Image generated from text.\nEnter captcha text: ")
if UInputCaptcha == str1:
print("Correct captcha entered!")
else:
print("Incorrect captcha entered!!!")
| 22.781818
| 96
| 0.674381
|
4a046501f769bc555dd395e891b322e311e3c8d6
| 6,324
|
py
|
Python
|
backend/test/test_models.py
|
romic-kid/project-kfsystem
|
3ed63c5c063493dc0dd7e0c4b62ba7481bf63311
|
[
"BSD-3-Clause"
] | 2
|
2018-03-22T08:42:41.000Z
|
2018-07-03T09:22:28.000Z
|
backend/test/test_models.py
|
romic-kid/project-kfsystem
|
3ed63c5c063493dc0dd7e0c4b62ba7481bf63311
|
[
"BSD-3-Clause"
] | 2
|
2019-04-25T02:10:10.000Z
|
2022-03-02T01:11:28.000Z
|
backend/test/test_models.py
|
romic-kid/project-kfsystem
|
3ed63c5c063493dc0dd7e0c4b62ba7481bf63311
|
[
"BSD-3-Clause"
] | 1
|
2019-03-14T03:13:05.000Z
|
2019-03-14T03:13:05.000Z
|
from django.test import TestCase
from django.test import Client
from ..models import Admin, CustomerService, ChattingLog, SerialNumber, EnterpriseDisplayInfo, RobotInfo
from django.utils import timezone
class TestModelAdmin(TestCase):
def test(self):
time_now = timezone.now()
Admin.objects.create(id=1, email="admin1@test.com", nickname="a_nick1", password="a_pass1", web_url="a_weburl1", widget_url="a_weidgeturl1", mobile_url="a_mobileurl1", communication_key="a_key1", vid="a_vid1", vid_createtime=time_now)
instance = Admin.objects.get(id=1)
self.assertEqual(instance.id, 1)
self.assertEqual(instance.email, 'admin1@test.com')
self.assertEqual(instance.nickname, 'a_nick1')
self.assertEqual(instance.password, 'a_pass1')
self.assertEqual(instance.web_url, 'a_weburl1')
self.assertEqual(instance.widget_url, 'a_weidgeturl1')
self.assertEqual(instance.mobile_url, 'a_mobileurl1')
self.assertEqual(instance.communication_key, 'a_key1')
self.assertEqual(instance.vid, 'a_vid1')
class TestModelCs(TestCase):
def test(self):
time_now = timezone.now()
Admin.objects.create(id=1, email="admin1@test.com", nickname="a_nick1", password="a_pass1", web_url="a_weburl1", widget_url="a_weidgeturl1", mobile_url="a_mobileurl1", communication_key="a_key1", vid="a_vid1", vid_createtime=time_now)
admin_instance = Admin.objects.get(id=1)
CustomerService.objects.create(id=1, email="cs1@test.com", enterprise=admin_instance, nickname="c_nick1", password="c_pass1", is_register=False, is_online=False, connection_num=0, vid="c_vid1", vid_createtime=time_now)
cs_instance = CustomerService.objects.get(id=1)
self.assertEqual(cs_instance.id, 1)
self.assertEqual(cs_instance.email, 'cs1@test.com')
self.assertEqual(cs_instance.enterprise, admin_instance)
self.assertEqual(cs_instance.nickname, 'c_nick1')
self.assertEqual(cs_instance.password, 'c_pass1')
self.assertEqual(cs_instance.is_register, False)
self.assertEqual(cs_instance.is_online, False)
self.assertEqual(cs_instance.connection_num, 0)
self.assertEqual(cs_instance.vid, 'c_vid1')
class TestModelCl(TestCase):
def test(self):
Admin.objects.create(id=1, email="admin1@test.com", nickname="a_nick1", password="a_pass1", web_url="a_weburl1", widget_url="a_weidgeturl1", mobile_url="a_mobileurl1", communication_key="a_key1", vid="a_vid1")
admin_instance = Admin.objects.get(id=1)
CustomerService.objects.create(id=1, email="cs1@test.com", enterprise=admin_instance, nickname="c_nick1", password="c_pass1", is_register=False, is_online=False, connection_num=0, vid="c_vid1")
cs_instance = CustomerService.objects.get(id=1)
ChattingLog.objects.create(id=1, client_id='cid1', service_id=cs_instance, content='this is content', is_client=False)
cl_instance = ChattingLog.objects.get(id=1)
self.assertEqual(cl_instance.id, 1)
self.assertEqual(cl_instance.client_id, 'cid1')
self.assertEqual(cl_instance.service_id, cs_instance)
self.assertEqual(cl_instance.content, 'this is content')
self.assertEqual(cl_instance.is_client, False)
# self.assertEqual(cl_instance.time, False)
class TestModelSn(TestCase):
def test(self):
SerialNumber.objects.create(id=1, serials='s1', is_used=False)
sn_instance = SerialNumber.objects.get(id=1)
self.assertEqual(sn_instance.id, 1)
self.assertEqual(sn_instance.serials, 's1')
self.assertEqual(sn_instance.is_used, False)
'''
class TestModelImagelog(TestCase):
def test(self):
Admin.objects.create(id=1, email="admin1@test.com", nickname="a_nick1", password="a_pass1", web_url="a_weburl1", widget_url="a_weidgeturl1", mobile_url="a_mobileurl1", communication_key="a_key1", vid="a_vid1")
admin_instance = Admin.objects.get(id=1)
CustomerService.objects.create(id=1, email="cs1@test.com", enterprise=admin_instance, nickname="c_nick1", password="c_pass1", is_register=False, is_online=False, connection_num=0, vid="c_vid1")
cs_instance = CustomerService.objects.get(id=1)
ImageLog.objects.create(id=1, client_id='cid1', service_id=cs_instance, is_client=False)
image_instance = ImageLog.objects.get(id=1)
self.assertEqual(image_instance.id, 1)
self.assertEqual(image_instance.client_id, 'cid1')
self.assertEqual(image_instance.service_id, cs_instance)
self.assertEqual(image_instance.is_client, False)
'''
class TestModelEnterpriseDisplayInfo(TestCase):
def test(self):
Admin.objects.create(id=1, email="admin1@test.com", nickname="a_nick1", password="a_pass1", web_url="a_weburl1", widget_url="a_weidgeturl1", mobile_url="a_mobileurl1", communication_key="a_key1", vid="a_vid1")
admin_instance = Admin.objects.get(id=1)
EnterpriseDisplayInfo.objects.create(id=1, enterprise=admin_instance, name="info1", comment="this is info1")
displayinfo_instance = EnterpriseDisplayInfo.objects.get(id=1)
self.assertEqual(displayinfo_instance.id, 1)
self.assertEqual(displayinfo_instance.enterprise, admin_instance)
self.assertEqual(displayinfo_instance.name, "info1")
self.assertEqual(displayinfo_instance.comment, "this is info1")
class TestModelRobotInfo(TestCase):
def test(self):
Admin.objects.create(id=1, email="admin1@test.com", nickname="a_nick1", password="a_pass1", web_url="a_weburl1", widget_url="a_weidgeturl1", mobile_url="a_mobileurl1", communication_key="a_key1", vid="a_vid1")
admin_instance = Admin.objects.get(id=1)
RobotInfo.objects.create(id=1, enterprise=admin_instance, question="question1", answer="this is answer1", keyword='keyword1', weight=0)
robotinfo_instance = RobotInfo.objects.get(id=1)
self.assertEqual(robotinfo_instance.id, 1)
self.assertEqual(robotinfo_instance.enterprise, admin_instance)
self.assertEqual(robotinfo_instance.question, "question1")
self.assertEqual(robotinfo_instance.answer, "this is answer1")
self.assertEqual(robotinfo_instance.keyword, "keyword1")
self.assertEqual(robotinfo_instance.weight, 0)
| 56.972973
| 242
| 0.726913
|
4a04661abec5965df908fff5697dcff4e2d50b44
| 463
|
py
|
Python
|
server/utils/frozen.py
|
griseduardo/Facial-Recognition-Database-Management-System
|
5d451791cb131164930cc4f886ec6300d88a8c67
|
[
"MIT"
] | 6
|
2021-09-13T13:45:49.000Z
|
2021-12-20T15:36:10.000Z
|
server/utils/frozen.py
|
griseduardo/Facial-Recognition-Database-Management-System
|
5d451791cb131164930cc4f886ec6300d88a8c67
|
[
"MIT"
] | 31
|
2021-09-11T05:52:56.000Z
|
2021-11-07T14:35:41.000Z
|
server/utils/frozen.py
|
griseduardo/Facial-Recognition-Database-Management-System
|
5d451791cb131164930cc4f886ec6300d88a8c67
|
[
"MIT"
] | 2
|
2021-09-13T04:08:05.000Z
|
2021-09-26T04:06:53.000Z
|
import os
import sys
def isfrozen():
"""Check if program is running from a Pyinstaller bundle."""
if getattr(sys, "frozen", False) and hasattr(sys, "_MEIPASS"):
return True
else:
return False
def frozen_basedir():
"""Get base directory of the executable when running in frozen mode."""
if isfrozen():
return os.path.dirname(sys.executable)
else:
raise Exception("Python is not running in frozen mode.")
| 24.368421
| 75
| 0.654428
|
4a0466645ae0156bcf4ad4c967955d51d7aa6e86
| 861
|
py
|
Python
|
python/1094_car_pooling.py
|
liaison/LeetCode
|
8b10a1f6bbeb3ebfda99248994f7c325140ee2fd
|
[
"MIT"
] | 17
|
2016-03-01T22:40:53.000Z
|
2021-04-19T02:15:03.000Z
|
python/1094_car_pooling.py
|
liaison/LeetCode
|
8b10a1f6bbeb3ebfda99248994f7c325140ee2fd
|
[
"MIT"
] | null | null | null |
python/1094_car_pooling.py
|
liaison/LeetCode
|
8b10a1f6bbeb3ebfda99248994f7c325140ee2fd
|
[
"MIT"
] | 3
|
2019-03-07T03:48:43.000Z
|
2020-04-05T01:11:36.000Z
|
class Solution:
def carPooling(self, trips: List[List[int]], capacity: int) -> bool:
capacity_changes = []
for psg, start, end in trips:
# capacity decrease at pickup and increase at dropoff
# for overlapping intervals, we should drop-off first then pickup.
# as a result, the second element should also play a role in ordering
# Note: we must priroitize the dropoff before pickup
capacity_changes.append((start, psg))
capacity_changes.append((end, -psg))
heapq.heapify(capacity_changes)
used_capacity = 0
while capacity_changes:
timestamp, capacity_delta = heapq.heappop(capacity_changes)
used_capacity += capacity_delta
if used_capacity > capacity:
return False
return True
| 31.888889
| 82
| 0.620209
|
4a0466ed4ff054354ea0048f6e7d8fbf51957214
| 18,873
|
py
|
Python
|
hphp/hack/test/integration/test_save_state.py
|
compositor/hhvm
|
ac037244608e96b461b2d95cbc1ab95cbfa7cc88
|
[
"PHP-3.01",
"Zend-2.0"
] | null | null | null |
hphp/hack/test/integration/test_save_state.py
|
compositor/hhvm
|
ac037244608e96b461b2d95cbc1ab95cbfa7cc88
|
[
"PHP-3.01",
"Zend-2.0"
] | null | null | null |
hphp/hack/test/integration/test_save_state.py
|
compositor/hhvm
|
ac037244608e96b461b2d95cbc1ab95cbfa7cc88
|
[
"PHP-3.01",
"Zend-2.0"
] | null | null | null |
# pyre-strict
from __future__ import absolute_import, division, print_function, unicode_literals
import json
import os
import shlex
import shutil
import sqlite3
import stat
import sys
import time
import unittest
from typing import Optional, TextIO
import common_tests
import hierarchy_tests
from hh_paths import hh_client
from saved_state_test_driver import (
SavedStateClassicTestDriver,
SavedStateTestDriver,
SaveStateResult,
)
from test_case import TestCase
def write_echo_json(f: TextIO, obj: object) -> None:
f.write("echo %s\n" % shlex.quote(json.dumps(obj)))
class LazyInitTestDriver(SavedStateTestDriver):
def write_local_conf(self) -> None:
with open(os.path.join(self.repo_dir, "hh.conf"), "w") as f:
f.write(
r"""
# some comment
use_mini_state = true
use_watchman = true
watchman_subscribe_v2 = true
lazy_decl = true
lazy_parse = true
lazy_init2 = true
incremental_init = true
enable_fuzzy_search = false
max_workers = 2
"""
)
class LazyInitCommonTests(common_tests.CommonTests):
@classmethod
def get_test_driver(cls) -> LazyInitTestDriver:
return LazyInitTestDriver()
class LazyInitHeirarchyTests(hierarchy_tests.HierarchyTests):
@classmethod
def get_test_driver(cls) -> LazyInitTestDriver:
return LazyInitTestDriver()
class SavedStateCommonTests(common_tests.CommonTests):
@classmethod
def get_test_driver(cls) -> SavedStateTestDriver:
return SavedStateTestDriver()
class SavedStateBarebonesTestsClassic(common_tests.BarebonesTests):
@classmethod
def get_test_driver(cls) -> SavedStateClassicTestDriver:
return SavedStateClassicTestDriver()
class SavedStateHierarchyTests(hierarchy_tests.HierarchyTests):
@classmethod
def get_test_driver(cls) -> SavedStateTestDriver:
return SavedStateTestDriver()
class SavedStateTests(TestCase[SavedStateTestDriver]):
@classmethod
def get_test_driver(cls) -> SavedStateTestDriver:
return SavedStateTestDriver()
def test_hhconfig_change(self) -> None:
"""
Start hh_server, then change .hhconfig and check that the server
restarts itself
"""
self.test_driver.start_hh_server()
self.test_driver.check_cmd(["No errors!"])
with open(os.path.join(self.test_driver.repo_dir, ".hhconfig"), "w") as f:
f.write(
r"""
# some comment
assume_php = true
"""
)
# Server may take some time to kill itself.
time.sleep(2)
# The sleep(2) above also almost-always ensures another race condition
# goes the way we want: The informant-directed restart doesn't happen
# *during* processing of a new client connection. The ambiguity of that
# situation (whether or not the newly-connected client did read the
# new hhconfig file contents or not) means that the Monitor can't safely
# start a new server instance until the *next* client connects. Just in
# case the race doesn't go the way we want, add another "check_cmd"
# call here to force the Monitor into the state we want.
self.test_driver.check_cmd(None, assert_loaded_saved_state=False)
# this should start a new server
self.test_driver.check_cmd(["No errors!"])
# check how the old one exited
log_file = (
self.test_driver.proc_call(
[hh_client, "--logname", self.test_driver.repo_dir]
)[0].strip()
+ ".old"
)
with open(log_file) as f:
logs = f.read()
self.assertIn(".hhconfig changed in an incompatible way", logs)
def test_watchman_timeout(self) -> None:
with open(os.path.join(self.test_driver.repo_dir, "hh.conf"), "a") as f:
f.write(
r"""
watchman_init_timeout = 1
"""
)
with open(os.path.join(self.test_driver.bin_dir, "watchman"), "w") as f:
f.write(r"""sleep 2""")
os.fchmod(f.fileno(), stat.S_IRWXU)
self.test_driver.run_check()
# Stop the server, ensuring that its logs get flushed
self.test_driver.proc_call([hh_client, "stop", self.test_driver.repo_dir])
self.assertIn("Watchman_sig.Types.Timeout", self.test_driver.get_server_logs())
def test_save_partial_state(self) -> None:
self.test_driver.start_hh_server()
result1 = self.test_driver.save_partial(
files_to_check=["class_1.php"], assert_edges_added=True, filename="partial1"
)
self.assertTrue(
result1.returned_values.get_edges_added() == 0,
"class_1 has no dependencies",
)
result2 = self.test_driver.save_partial(
files_to_check=["class_2.php"], assert_edges_added=True, filename="partial2"
)
assert result2.returned_values.get_edges_added() > 0
result3 = self.test_driver.save_partial(
files_to_check=["class_3.php"], assert_edges_added=True, filename="partial3"
)
assert result3.returned_values.get_edges_added() > 0
result4 = self.test_driver.save_partial(
files_to_check=["class_1.php", "class_2.php", "class_3.php"],
assert_edges_added=True,
filename="partial4",
)
assert (
result4.returned_values.get_edges_added()
== result3.returned_values.get_edges_added()
)
result5 = self.test_driver.save_partial(
files_to_check=[
{"from_prefix_incl": "class_1.php", "to_prefix_excl": "class_3.php"}
],
assert_edges_added=True,
filename="partial5",
)
assert (
result5.returned_values.get_edges_added()
== result2.returned_values.get_edges_added()
)
def test_incrementally_generated_saved_state(self) -> None:
old_saved_state: SaveStateResult = self.test_driver.dump_saved_state()
new_file = os.path.join(self.test_driver.repo_dir, "class_3b.php")
self.add_file_that_depends_on_class_a(new_file)
self.test_driver.check_cmd(["No errors!"], assert_loaded_saved_state=False)
new_saved_state: SaveStateResult = self.test_driver.dump_saved_state(
assert_edges_added=True
)
assert new_saved_state.returned_values.get_edges_added() > 0
self.change_return_type_on_base_class(
os.path.join(self.test_driver.repo_dir, "class_1.php")
)
self.test_driver.check_cmd(
[
"{root}class_3.php:5:12,19: Invalid return type (Typing[4110])",
" {root}class_3.php:4:28,30: Expected `int`",
" {root}class_1.php:5:33,38: But got `string`",
"{root}class_3b.php:5:8,15: Invalid return type (Typing[4110])",
" {root}class_3b.php:4:26,28: Expected `int`",
" {root}class_1.php:5:33,38: But got `string`",
],
assert_loaded_saved_state=False,
)
self.test_driver.proc_call([hh_client, "stop", self.test_driver.repo_dir])
# Start server with the original saved state. Will be missing the
# second error because of the missing edge.
self.test_driver.start_hh_server(
changed_files=["class_1.php"], saved_state_path=old_saved_state.path
)
self.test_driver.check_cmd(
[
"{root}class_3.php:5:12,19: Invalid return type (Typing[4110])",
" {root}class_3.php:4:28,30: Expected `int`",
" {root}class_1.php:5:33,38: But got `string`",
]
)
self.test_driver.proc_call([hh_client, "stop", self.test_driver.repo_dir])
# Start another server with the new saved state. Will have both errors.
self.test_driver.start_hh_server(
changed_files=["class_1.php"], saved_state_path=new_saved_state.path
)
self.test_driver.check_cmd(
[
"{root}class_3.php:5:12,19: Invalid return type (Typing[4110])",
" {root}class_3.php:4:28,30: Expected `int`",
" {root}class_1.php:5:33,38: But got `string`",
"{root}class_3b.php:5:8,15: Invalid return type (Typing[4110])",
" {root}class_3b.php:4:26,28: Expected `int`",
" {root}class_1.php:5:33,38: But got `string`",
]
)
def test_incrementally_generated_saved_state_after_loaded_saved_state(self) -> None:
# Same as the above test, except we begin the test by starting up
# a Hack Server that loads a saved state.
self.test_driver.start_hh_server()
# Hack server is now started with a saved state
self.test_driver.check_cmd(["No errors!"], assert_loaded_saved_state=True)
old_saved_state = self.test_driver.dump_saved_state()
new_file = os.path.join(self.test_driver.repo_dir, "class_3b.php")
self.add_file_that_depends_on_class_a(new_file)
self.test_driver.check_cmd(["No errors!"], assert_loaded_saved_state=True)
new_saved_state = self.test_driver.dump_saved_state(assert_edges_added=True)
assert new_saved_state.returned_values.get_edges_added() > 0
self.change_return_type_on_base_class(
os.path.join(self.test_driver.repo_dir, "class_1.php")
)
self.test_driver.check_cmd(
[
"{root}class_3.php:5:12,19: Invalid return type (Typing[4110])",
" {root}class_3.php:4:28,30: Expected `int`",
" {root}class_1.php:5:33,38: But got `string`",
"{root}class_3b.php:5:8,15: Invalid return type (Typing[4110])",
" {root}class_3b.php:4:26,28: Expected `int`",
" {root}class_1.php:5:33,38: But got `string`",
],
assert_loaded_saved_state=True,
)
self.test_driver.proc_call([hh_client, "stop", self.test_driver.repo_dir])
# Start server with the original saved state. Will be missing the
# second error because of the missing edge.
self.test_driver.start_hh_server(
changed_files=["class_1.php"], saved_state_path=old_saved_state.path
)
self.test_driver.check_cmd(
[
"{root}class_3.php:5:12,19: Invalid return type (Typing[4110])",
" {root}class_3.php:4:28,30: Expected `int`",
" {root}class_1.php:5:33,38: But got `string`",
]
)
self.test_driver.proc_call([hh_client, "stop", self.test_driver.repo_dir])
# Start another server with the new saved state. Will have both errors.
self.test_driver.start_hh_server(
changed_files=["class_1.php"], saved_state_path=new_saved_state.path
)
self.test_driver.check_cmd(
[
"{root}class_3.php:5:12,19: Invalid return type (Typing[4110])",
" {root}class_3.php:4:28,30: Expected `int`",
" {root}class_1.php:5:33,38: But got `string`",
"{root}class_3b.php:5:8,15: Invalid return type (Typing[4110])",
" {root}class_3b.php:4:26,28: Expected `int`",
" {root}class_1.php:5:33,38: But got `string`",
]
)
def test_incrementally_generated_saved_state_with_errors(self) -> None:
# Introduce an error in "master"
self.change_return_type_on_base_class(
os.path.join(self.test_driver.repo_dir, "class_1.php")
)
saved_state_with_1_error: SaveStateResult = self.test_driver.dump_saved_state(
ignore_errors=True
)
self.test_driver.proc_call([hh_client, "stop", self.test_driver.repo_dir])
# Start server with the saved state, assume there are no local changes.
self.test_driver.start_hh_server(
changed_files=None, saved_state_path=saved_state_with_1_error.path
)
# We still expect that the error from the saved state shows up.
self.test_driver.check_cmd(
[
"{root}class_3.php:5:12,19: Invalid return type (Typing[4110])",
" {root}class_3.php:4:28,30: Expected `int`",
" {root}class_1.php:5:33,38: But got `string`",
]
)
self.test_driver.proc_call([hh_client, "stop", self.test_driver.repo_dir])
new_file = os.path.join(self.test_driver.repo_dir, "class_3b.php")
self.add_file_that_depends_on_class_a(new_file)
# Start server with the saved state, the only change is in the new file.
self.test_driver.start_hh_server(
changed_files=["class_3b.php"],
saved_state_path=saved_state_with_1_error.path,
)
# Now we expect 2 errors - one from the saved state and one
# from the change.
self.test_driver.check_cmd(
[
"{root}class_3.php:5:12,19: Invalid return type (Typing[4110])",
" {root}class_3.php:4:28,30: Expected `int`",
" {root}class_1.php:5:33,38: But got `string`",
"{root}class_3b.php:5:8,15: Invalid return type (Typing[4110])",
" {root}class_3b.php:4:26,28: Expected `int`",
" {root}class_1.php:5:33,38: But got `string`",
],
assert_loaded_saved_state=False,
)
saved_state_with_2_errors = self.test_driver.dump_saved_state(
ignore_errors=True
)
self.test_driver.proc_call([hh_client, "stop", self.test_driver.repo_dir])
# Let's fix the error
self.change_return_type_on_base_class(
filename=os.path.join(self.test_driver.repo_dir, "class_1.php"),
type="int",
value="11",
)
# Start another server with the new saved state. Will have both errors.
self.test_driver.start_hh_server(
changed_files=["class_1.php"],
saved_state_path=saved_state_with_2_errors.path,
)
self.test_driver.check_cmd(["No errors!"], assert_loaded_saved_state=True)
def test_replace_state_after_saving(self) -> None:
# Save state
result = self.test_driver.dump_saved_state(assert_edges_added=True)
assert result.returned_values.get_edges_added() > 0
# Save state again - confirm the same number of edges is dumped
result2 = self.test_driver.dump_saved_state(assert_edges_added=True)
self.assertEqual(
result.returned_values.get_edges_added(),
result2.returned_values.get_edges_added(),
)
# Save state with the 'replace' arg
replace_result1 = self.test_driver.dump_saved_state(
assert_edges_added=True, replace_state_after_saving=True
)
self.assertEqual(
result.returned_values.get_edges_added(),
replace_result1.returned_values.get_edges_added(),
)
# Save state with the new arg - confirm there are 0 new edges
replace_result2 = self.test_driver.dump_saved_state(
assert_edges_added=True, replace_state_after_saving=True
)
self.assertEqual(replace_result2.returned_values.get_edges_added(), 0)
# Make a change
# Save state - confirm there are only the # of new edges
# corresponding to the one change
new_file = os.path.join(self.test_driver.repo_dir, "class_3b.php")
self.add_file_that_depends_on_class_a(new_file)
self.test_driver.check_cmd(["No errors!"], assert_loaded_saved_state=False)
replace_incremental = self.test_driver.dump_saved_state(
assert_edges_added=True, replace_state_after_saving=True
)
assert (
replace_incremental.returned_values.get_edges_added()
< result.returned_values.get_edges_added()
)
assert replace_incremental.returned_values.get_edges_added() > 0
self.test_driver.check_cmd(["No errors!"], assert_loaded_saved_state=False)
def add_file_that_depends_on_class_a(self, filename: str) -> None:
with open(filename, "w") as f:
f.write(
"""<?hh // strict
class UsesAToo {
public function test() : int {
return A::foo();
}
}
"""
)
def change_return_type_on_base_class(
self, filename: str, type: str = "string", value: str = '"Hello"'
) -> None:
# Change the return type
with open(filename, "w") as f:
f.write(
"""<?hh // strict
class B {
public static function foo () : %s {
return %s;
}
}
"""
% (type, value)
)
class ReverseNamingTableFallbackTestDriver(SavedStateTestDriver):
enable_naming_table_fallback = True
def write_local_conf(self) -> None:
with open(os.path.join(self.repo_dir, "hh.conf"), "w") as f:
f.write(
r"""
# some comment
use_mini_state = true
use_watchman = true
watchman_subscribe_v2 = true
lazy_decl = true
lazy_parse = true
lazy_init2 = true
enable_naming_table_fallback = true
"""
)
class ReverseNamingTableSavedStateCommonTests(common_tests.CommonTests):
@classmethod
def get_test_driver(cls) -> ReverseNamingTableFallbackTestDriver:
return ReverseNamingTableFallbackTestDriver()
class ReverseNamingTableSavedStateHierarchyTests(hierarchy_tests.HierarchyTests):
@classmethod
def get_test_driver(cls) -> ReverseNamingTableFallbackTestDriver:
return ReverseNamingTableFallbackTestDriver()
class ReverseNamingTableSavedStateTests(SavedStateTests):
@classmethod
def get_test_driver(cls) -> ReverseNamingTableFallbackTestDriver:
return ReverseNamingTableFallbackTestDriver()
def test_file_moved(self) -> None:
new_file = os.path.join(self.test_driver.repo_dir, "class_3b.php")
self.add_file_that_depends_on_class_a(new_file)
self.test_driver.check_cmd(["No errors!"], assert_loaded_saved_state=False)
naming_table_path = self.test_driver.dump_naming_saved_state(
self.test_driver.repo_dir,
saved_state_path=os.path.join(self.test_driver.repo_dir, "new"),
)
self.test_driver.proc_call([hh_client, "stop", self.test_driver.repo_dir])
new_file2 = os.path.join(self.test_driver.repo_dir, "class_3c.php")
shutil.move(new_file, new_file2)
self.test_driver.start_hh_server(
changed_files=[],
changed_naming_files=["class_3c.php"],
naming_saved_state_path=naming_table_path,
)
self.test_driver.check_cmd(["No errors!"], assert_loaded_saved_state=True)
| 37.520875
| 88
| 0.63816
|
4a046737a56637b08a3295c7e9bcd5ad9d80f5e2
| 5,430
|
py
|
Python
|
monte-carlo/first_visit_mc.py
|
RaphSku/Reinforcement-Learning
|
fe043864dea0f22ad4afc58962cdbe8d018cdb11
|
[
"MIT-0"
] | null | null | null |
monte-carlo/first_visit_mc.py
|
RaphSku/Reinforcement-Learning
|
fe043864dea0f22ad4afc58962cdbe8d018cdb11
|
[
"MIT-0"
] | null | null | null |
monte-carlo/first_visit_mc.py
|
RaphSku/Reinforcement-Learning
|
fe043864dea0f22ad4afc58962cdbe8d018cdb11
|
[
"MIT-0"
] | null | null | null |
import numpy as np
from typing import Sequence
class LinearWorld:
"""A 1D world where the agent can only go left or right
Parameters
----------
number_of_fields : int
The linear world consists of fields, each field has a state value
Attributes
----------
states : array-like[float]
There exists only one terminal state, the most left field, every other
state gets assigned a random state value, sampled from a uniform distribution
start_position : int
The position where the agent starts every time a new episode is triggered
current_position : int
The position of the agent
Methods
-------
reset()
The agent will start again from its start position
"""
def __init__(self, number_of_fields: int, start_position: int):
self.number_of_fields = number_of_fields
self.states = np.random.rand(number_of_fields)
self.states[0] = 0
self.start_position = start_position
self.current_position = start_position
def reset(self) -> None:
"""The current position of the agent is reset to the start position"""
self.current_position = self.start_position
class Policy:
"""Defines the policy which is used for estimating the value of the Linear World
Parameters
----------
policy : array-like[int]
Policy which the agent should follow in the Linear World
"""
def __init__(self, policy: Sequence[int]):
self.policy = policy
def __getitem__(self, ix):
return self.policy[ix]
class FirstVisitMC:
"""Implements the first visit MC algorithm for estimating the value function
Parameters
----------
world : LinearWorld
A Linear world which defines the state values for every field
policy : Policy
The agent will follow this policy, the policy has only two available possible actions,
going left which is represented by `0` and going right which is represented by `1`
Methods
-------
estimate_value(max_iterations) 2d-array[float]
Implementation of the algorithm which will estimate the value function for the given policy
"""
def __init__(self, world: LinearWorld, policy: Policy):
self.world = world
self.policy = policy
def estimate_value(self, max_iterations: int) -> Sequence[float]:
"""The first visit MC algorithm will compute the expected return
when visiting a state s for the first time
Parameters
----------
max_iterations : int
After the maximum number of iterations is reached, the estimation will stop if
it was not stopped beforehand
"""
returns = {state: [0.0] for state in range(self.world.number_of_fields)}
iteration = 0
while iteration < max_iterations:
self.world.reset()
episode = self.__generate_episode()
visited = {}
index = 0
for state, reward in episode:
if state not in visited:
visited[state] = "visited"
G = reward
for _, future_reward in episode[index+1:]:
if len(episode[index+1:]) != 0:
G += future_reward
returns[state].append(G)
self.world.states[state] = np.mean(returns[state])
index += 1
for state in returns.keys():
if state not in visited:
self.world.states[state] = -np.inf
iteration += 1
return self.world.states
def __generate_episode(self) -> list[list[int]]:
"""An episode is generated by following the given policy, the episode
is needed for the estimation of the value function
Returns
-------
2d-array[int]
An episode consists of the states which are reached by following the
given policy and the rewards which one obtains for every state
"""
episode = []
while True:
current_position = self.world.current_position
if current_position == 0:
episode.append([current_position, 0])
break
episode.append([current_position, -1])
# going left action
if self.policy[current_position] == 0:
self.world.current_position -= 1
continue
# going right action
if self.policy[current_position] == 1:
if current_position == self.world.number_of_fields - 1:
break
self.world.current_position += 1
return episode
def main():
WORLDLENGTH = 10
START_POSITION = 4
POLICY = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
linear_world = LinearWorld(number_of_fields = WORLDLENGTH, start_position = START_POSITION)
policy = Policy(policy = POLICY)
first_visit_mc = FirstVisitMC(world = linear_world, policy = policy)
value = first_visit_mc.estimate_value(max_iterations = 1000)
print(f"Esimated value for the Linear World:\n{value}")
if __name__ == "__main__":
main()
| 33.9375
| 103
| 0.585267
|
4a0467e0da7d42ca62866e1db0787ed404a6affc
| 102
|
py
|
Python
|
django_full_stack/favorite_books_project/apps/favorite_books/apps.py
|
gfhuertac/coding_dojo_python
|
4d17bb63fb2b9669216a0f60326d4a4b9055af7e
|
[
"MIT"
] | null | null | null |
django_full_stack/favorite_books_project/apps/favorite_books/apps.py
|
gfhuertac/coding_dojo_python
|
4d17bb63fb2b9669216a0f60326d4a4b9055af7e
|
[
"MIT"
] | 6
|
2020-06-06T01:50:21.000Z
|
2022-02-10T11:33:02.000Z
|
django_full_stack/favorite_books_project/apps/favorite_books/apps.py
|
gfhuertac/coding_dojo_python
|
4d17bb63fb2b9669216a0f60326d4a4b9055af7e
|
[
"MIT"
] | null | null | null |
from django.apps import AppConfig
class FavoriteBooksConfig(AppConfig):
name = 'favorite_books'
| 17
| 37
| 0.784314
|
4a0469dea62b70a8da8b0decf60c7acf695fe592
| 7,257
|
py
|
Python
|
codegen_workspace/detection2onnx.py
|
gbxu/vision
|
b9f963d7b0af8b4c7e904d16493c5fb0b30bd457
|
[
"BSD-3-Clause"
] | null | null | null |
codegen_workspace/detection2onnx.py
|
gbxu/vision
|
b9f963d7b0af8b4c7e904d16493c5fb0b30bd457
|
[
"BSD-3-Clause"
] | null | null | null |
codegen_workspace/detection2onnx.py
|
gbxu/vision
|
b9f963d7b0af8b4c7e904d16493c5fb0b30bd457
|
[
"BSD-3-Clause"
] | null | null | null |
import torch
import sys
# import os
# sys.path.append(os.environ["HOME"]+"/vision/")
import torchvision # https://pytorch.org/vision/stable/models.html
from pathlib import Path
from torch.onnx import TrainingMode
import onnx
import argparse
import warnings
get_model={
# Object Detection, Instance Segmentation and Person Keypoint Detection
"fasterrcnn_resnet50_fpn": (torchvision.models.detection.fasterrcnn_resnet50_fpn(pretrained=False, pretrained_backbone=False, ), (2, 3, 600, 1000), (91, 11)), # pytorch->onnx fails: randperm
"fasterrcnn_mobilenet_v3_large_fpn": (torchvision.models.detection.fasterrcnn_mobilenet_v3_large_fpn(pretrained=False, pretrained_backbone=False, ), (2, 3, 600, 1000), (91, 11)), # pytorch->onnx fails: randperm
"fasterrcnn_mobilenet_v3_large_320_fpn": (torchvision.models.detection.fasterrcnn_mobilenet_v3_large_320_fpn(pretrained=False, pretrained_backbone=False, ), (2, 3, 600, 1000), (91, 11)), # pytorch->onnx fails: randperm
"retinanet_resnet50_fpn": (torchvision.models.detection.retinanet_resnet50_fpn(pretrained=False, pretrained_backbone=False, ), (2, 3, 224, 224), (91, 11)), # pytorch->onnx fails: l1_loss
"ssd300_vgg16": (torchvision.models.detection.ssd300_vgg16(pretrained=False, pretrained_backbone=False, ), (4, 3, 300, 300), (91, 11)), # pytorch->onnx fails when training: smooth_l1_loss; fails when eval: resolve_conj
"ssdlite320_mobilenet_v3_large": (torchvision.models.detection.ssdlite320_mobilenet_v3_large(pretrained=False, pretrained_backbone=False, ), (24, 3, 320, 320), (91, 11)), # pytorch->onnx fails: randperm; fails when eval: resolve_conj
"maskrcnn_resnet50_fpn": (torchvision.models.detection.maskrcnn_resnet50_fpn(pretrained=False, pretrained_backbone=False, ), (2, 3, 224, 224), (91, 11)), # pytorch->onnx fails: randperm; fails when eval: resolve_conj
# in .local/lib/python3.7/site-packages/torchvision/models/detection/transform.py:
# resized_data[:, :, 0] --> resized_data[:, 0] due to no batch dimension
# "keypointrcnn_resnet50_fpn": (torchvision.models.detection.keypointrcnn_resnet50_fpn(pretrained=False, pretrained_backbone=False, trainable_backbone_layers=5), (2, 3, 224, 224), (2, 11)), # pytorch->onnx fails: randperm; fails when eval: resolve_conj
}
def infer_shapes(model, inputs, batch):
def build_shape_dict(name, tensor, is_input, batch):
print(name, "'s shape", tensor[0].shape)
if isinstance(tensor, (tuple, list)):
return [build_shape_dict(name, t, is_input, batch) for t in tensor]
else:
# Let's assume batch is the first axis with only 1 element (~~ might not be always true ...)
if len(tensor.shape) > 0:
axes = {[axis for axis, numel in enumerate(tensor.shape) if numel == batch][0]: "batch"}
else:
axes = {}
print(f"Found {'input' if is_input else 'output'} {name} with shape: {axes}")
return axes
# Generate input names & axes
input_dynamic_axes = {k: build_shape_dict(k, v, True, batch) for k, v in inputs.items()}
print("input_dynamic_axes", input_dynamic_axes)
# Generate output names & axes
loss = model(**inputs)
outputs = {'loss': loss}
output_dynamic_axes = {k: build_shape_dict(k, v, False, batch) for k, v in outputs.items()}
print("output_dynamic_axes", output_dynamic_axes)
# Create the aggregated axes representation
dynamic_axes = dict(input_dynamic_axes, **output_dynamic_axes)
print("dynamic_axes:", dynamic_axes)
return dynamic_axes
class WrapperModel(torch.nn.Module):
def __init__(self, model):
super(WrapperModel, self).__init__()
self._model = model
self.loss = torch.nn.BCEWithLogitsLoss(reduction='sum')
def forward(self, images, targets):
out = self._model(images, targets)
if self.training:
total_loss = 0
for loss in out.values():
total_loss += loss
return total_loss
else:
total_loss = 0
if isinstance(out, dict):
for loss in out.values():
total_loss += loss
else:
for output in out:
total_loss += output.sum()
return total_loss
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--model_name", type=str, default=None, help="torchvision model name")
parser.add_argument("--batch_size", type=int, default=0, help="batch size")
args = parser.parse_args()
if args.model_name == None:
model_names = get_model.keys()
else:
model_names = args.model_name.split(',')
for args.model_name in model_names:
torchvision_model, (batch_size, channels, height, width), (num_classes, ground_truth_box) = get_model[args.model_name]
if args.batch_size > 0:
batch_size = args.batch_size
dummy_images = torch.randn(batch_size, channels, height, width)
dummy_boxes = torch.zeros((batch_size, ground_truth_box, 4))
if height < width:
dummy_boxes[:,:,2:] = height
else:
dummy_boxes[:,:,2:] = width
dummy_labels = torch.randint(1, num_classes, (batch_size, ground_truth_box))
if args.model_name in ["maskrcnn_resnet50_fpn"]:
dummy_masks = torch.randint(0, 1, (batch_size, 1, height, width))
if args.model_name in ["keypointrcnn_resnet50_fpn"]:
num_keypoints=17
dummy_keypoints = torch.randn(batch_size, num_keypoints, 3, dtype = torch.float32) # 3: (x, y, visibility)
dummy_keypoints[:,:,-1:] = 1
dummy_images = list(image for image in dummy_images)
dummy_targets = []
for i in range(len(dummy_images)):
d = {}
d['boxes'] = dummy_boxes[i]
d['labels'] = dummy_labels[i]
if args.model_name in ["maskrcnn_resnet50_fpn"]:
d['masks'] = dummy_masks[i]
if args.model_name in ["keypointrcnn_resnet50_fpn"]:
d['keypoints'] = dummy_keypoints[i]
dummy_targets.append(d)
inputs = {}
inputs['images'] = dummy_images
inputs['targets'] = dummy_targets
input_args = (inputs['images'],
inputs['targets'])
ordered_input_names = ['images', 'targets']
output_names = ['loss', ]
model=WrapperModel(torchvision_model)
model.train()
# model.eval()
# dynamic_axes=infer_shapes(model, inputs, batch_size)
torch.onnx.export(
model=model,
args=input_args,
f=Path(args.model_name+'.onnx').as_posix(),
input_names=ordered_input_names,
output_names=output_names,
# dynamic_axes=dynamic_axes,
do_constant_folding=False,
_retain_param_name=True,
enable_onnx_checker=True,
opset_version=12,
training=TrainingMode.PRESERVE
)
model = onnx.load(args.model_name+'.onnx')
model = onnx.shape_inference.infer_shapes(model)
onnx.checker.check_model(model)
onnx.save(model, args.model_name+'.onnx')
| 47.743421
| 256
| 0.652611
|
4a046a307a11574e4e326e13e25091f3a8c201c1
| 131
|
py
|
Python
|
transformer/executor/__init__.py
|
SimpleConstructs/fixed-width-parser
|
b9bb4953e66dcea2b6ce3aeb2fed58a3f34a784b
|
[
"Apache-2.0"
] | null | null | null |
transformer/executor/__init__.py
|
SimpleConstructs/fixed-width-parser
|
b9bb4953e66dcea2b6ce3aeb2fed58a3f34a784b
|
[
"Apache-2.0"
] | 3
|
2021-07-18T14:34:38.000Z
|
2021-07-25T12:49:09.000Z
|
transformer/executor/__init__.py
|
SimpleConstructs/fixed-width-transformer
|
b9bb4953e66dcea2b6ce3aeb2fed58a3f34a784b
|
[
"Apache-2.0"
] | null | null | null |
from transformer.executor.executor_config import ExecutorConfig
from transformer.executor.executor import LambdaFixedWidthExecutor
| 43.666667
| 66
| 0.908397
|
4a046b432c9c43a85cecf0f3899b72c7600c4890
| 1,866
|
py
|
Python
|
test/homemade/bank.py
|
kesaribath47/imPENNetrables
|
499cf4e4e38c99391ef309865bb0f0382e738ef1
|
[
"MIT"
] | 1
|
2019-07-19T14:55:57.000Z
|
2019-07-19T14:55:57.000Z
|
test/homemade/bank.py
|
kesaribath47/imPENNetrables
|
499cf4e4e38c99391ef309865bb0f0382e738ef1
|
[
"MIT"
] | null | null | null |
test/homemade/bank.py
|
kesaribath47/imPENNetrables
|
499cf4e4e38c99391ef309865bb0f0382e738ef1
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
__author__ = "Hung Nguyen"
import select
import subprocess
import threading
import time
class Bank:
BANK_PATH = '../../bin/bank'
class BankThread(threading.Thread):
def __init__(self, path, port, auth):
self.path = path
self.port = port
self.auth = auth
self.bank = None
threading.Thread.__init__(self)
def run(self):
command = self.path + ' ' + '-p ' + str(self.port) + ' -s ' + self.auth
self.bank = subprocess.Popen(command.split(),
shell=False,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
bufsize=1)
self.bank.wait()
def terminate(self):
self.bank.terminate()
def __init__(self, port, auth):
self.thread = None
self.port = port
self.auth = auth
def start(self):
self.thread = self.BankThread(self.BANK_PATH, self.port, self.auth)
self.thread.start()
time.sleep(1)
def stop(self):
self.thread.terminate()
def read_line_stdout(self):
poll_stdout = select.poll()
poll_stdout.register(self.thread.bank.stdout, select.POLLIN)
poll_result = poll_stdout.poll(0)
if poll_result:
return self.thread.bank.stdout.readline().decode('UTF-8').rstrip()
else:
return ''
def read_line_stderr(self):
poll_stderr = select.poll()
poll_stderr.register(self.thread.bank.stderr, select.POLLIN)
poll_result = poll_stderr.poll(0)
if poll_result:
return self.thread.bank.stderr.readline().decode('UTF-8').rstrip()
else:
return ''
| 29.15625
| 83
| 0.535906
|
4a046c0529743795a350b2c1315c5eb134c8f5ca
| 1,174
|
py
|
Python
|
backend/tests/integration/test_changgung_chiayi.py
|
msunji/vaccinate
|
60c41fc8302b285b849c6b0ad4e87125dd86e623
|
[
"MIT"
] | 1
|
2021-05-21T22:15:50.000Z
|
2021-05-21T22:15:50.000Z
|
backend/tests/integration/test_changgung_chiayi.py
|
circlebeams/vaccinate
|
3c8105a3a4b7604e3e8d6843be46e12c13736ab3
|
[
"MIT"
] | null | null | null |
backend/tests/integration/test_changgung_chiayi.py
|
circlebeams/vaccinate
|
3c8105a3a4b7604e3e8d6843be46e12c13736ab3
|
[
"MIT"
] | null | null | null |
import unittest
import os
from Parsers.changgung_chiayi import *
from hospital_types import AppointmentAvailability, HospitalAvailabilitySchema
class TestChanggungChiayi(unittest.TestCase):
def test_full(self) -> None:
with open("backend/tests/saved_pages/changgung_chiayi_full.html") as html_file:
expected_availability: HospitalAvailabilitySchema = {
"self_paid": AppointmentAvailability.UNAVAILABLE,
"government_paid": AppointmentAvailability.NO_DATA,
}
availability = parse_changgung_chiayi(html_file.read())
self.assertEqual(availability, (21, expected_availability))
def test_available(self) -> None:
with open(
"backend/tests/saved_pages/changgung_chiayi_available.html"
) as html_file:
expected_availability: HospitalAvailabilitySchema = {
"self_paid": AppointmentAvailability.AVAILABLE,
"government_paid": AppointmentAvailability.NO_DATA,
}
availability = parse_changgung_chiayi(html_file.read())
self.assertEqual(availability, (21, expected_availability))
| 41.928571
| 87
| 0.693356
|
4a046d2c3a8df918366d524e739923a9d7ee0f91
| 2,123
|
py
|
Python
|
runtime/models/api/optim_request.py
|
onap/optf-osdf
|
2b9e7f4fca3d510a201283a8561f6ff3424f5fd6
|
[
"Apache-2.0"
] | 3
|
2019-04-15T13:33:57.000Z
|
2019-10-21T17:19:19.000Z
|
runtime/models/api/optim_request.py
|
onap/optf-osdf
|
2b9e7f4fca3d510a201283a8561f6ff3424f5fd6
|
[
"Apache-2.0"
] | null | null | null |
runtime/models/api/optim_request.py
|
onap/optf-osdf
|
2b9e7f4fca3d510a201283a8561f6ff3424f5fd6
|
[
"Apache-2.0"
] | null | null | null |
# -------------------------------------------------------------------------
# Copyright (c) 2020 AT&T Intellectual Property
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -------------------------------------------------------------------------
#
from schematics.types import BaseType, DictType, StringType, IntType
from schematics.types.compound import ModelType
from osdf.models.api.common import OSDFModel
"""
"""
class RequestInfo(OSDFModel):
"""Info for northbound request from client """
transactionId = StringType(required=True)
requestID = StringType(required=True)
callbackUrl = StringType()
sourceId = StringType(required=True)
timeout = IntType()
class DataInfo(OSDFModel):
"""Optimization data info"""
text = StringType()
json = DictType(BaseType)
class OptimInfo(OSDFModel):
"""Optimizer request info details."""
# ModelId from the database, if its not populated,
# assume that solverModel will be populated.
modelId = StringType()
# type of solver (mzn, or-tools, etc.)
solver = StringType()
# Arguments for solver
solverArgs = DictType(BaseType)
# NOTE: a large blob string containing the model (which is not that
# problematic since models are fairly small).
modelContent = StringType()
# Data Payload, input data for the solver
optData = ModelType(DataInfo)
class OptimizationAPI(OSDFModel):
"""Request for Optimizer API (specific to optimization and additional metadata"""
requestInfo = ModelType(RequestInfo, required=True)
optimInfo = ModelType(OptimInfo, required=True)
| 34.803279
| 85
| 0.671691
|
4a046e354dd8ad09554437e7b18e91763cc153c9
| 1,043
|
py
|
Python
|
awx/playbooks/library/project_archive.py
|
sumit-21/awx
|
966a62c6bf2ec0c672e076684341bc6bd75827af
|
[
"Apache-2.0"
] | 17
|
2021-04-03T01:40:17.000Z
|
2022-03-03T11:45:20.000Z
|
awx/playbooks/library/project_archive.py
|
Saurabh-Thakre/awx
|
8eb377a3ea8303c394ad4c958cc828c7239c1e11
|
[
"Apache-2.0"
] | 24
|
2021-05-18T21:13:35.000Z
|
2022-03-29T10:23:52.000Z
|
awx/playbooks/library/project_archive.py
|
Saurabh-Thakre/awx
|
8eb377a3ea8303c394ad4c958cc828c7239c1e11
|
[
"Apache-2.0"
] | 24
|
2020-11-27T08:37:35.000Z
|
2021-03-08T13:27:15.000Z
|
ANSIBLE_METADATA = {
"metadata_version": "1.0",
"status": ["stableinterface"],
"supported_by": "community",
}
DOCUMENTATION = """
---
module: project_archive
short_description: unpack a project archive
description:
- Unpacks an archive that contains a project, in order to support handling versioned
artifacts from (for example) GitHub Releases or Artifactory builds.
- Handles projects in the archive root, or in a single base directory of the archive.
version_added: "2.9"
options:
src:
description:
- The source archive of the project artifact
required: true
project_path:
description:
- Directory to write the project archive contents
required: true
force:
description:
- Files in the project_path will be overwritten by matching files in the archive
default: False
author:
- "Philip Douglass" @philipsd6
"""
EXAMPLES = """
- project_archive:
src: "{{ project_path }}/.archive/project.tar.gz"
project_path: "{{ project_path }}"
force: "{{ scm_clean }}"
"""
| 25.439024
| 87
| 0.698945
|
4a046ea68edc436df0274492f659b670edd210d3
| 32,351
|
py
|
Python
|
quex/engine/state_machine/character_counter.py
|
smmckay/quex-mirror
|
7d75ed560e9f3a591935e59243188676eecb112a
|
[
"MIT"
] | null | null | null |
quex/engine/state_machine/character_counter.py
|
smmckay/quex-mirror
|
7d75ed560e9f3a591935e59243188676eecb112a
|
[
"MIT"
] | null | null | null |
quex/engine/state_machine/character_counter.py
|
smmckay/quex-mirror
|
7d75ed560e9f3a591935e59243188676eecb112a
|
[
"MIT"
] | null | null | null |
# (C) 2012 Frank-Rene Schaefer
from quex.engine.counter import CountActionMap
from quex.engine.misc.tree_walker import TreeWalker
from quex.engine.misc.tools import typed
from quex.constants import E_Count
from quex.engine.operations.operation_list import Op
from quex.blackboard import setup as Setup, Lng
class SmLineColumnCountInfo(object):
"""Information on character counting characteristics of lexeme that match a
given state machine.
.line_n_increment
The number of lines that appear in the pattern.
E_Count.VOID => cannot be determined from the pattern off-line.
.line_n_increment_by_lexeme_length
If the line number increment is proportional to the length of the
lexeme which is matched, then this variable contains the factor.
E_Count.VOID => lexeme length not proportional to line_n_increment.
.column_index
If the column index after match has a specific value, then this
member variable contains its value.
E_Count.VOID => No specific value for '.column_index'.
(This makes sense for pattern that have a 'begin-of-line' pre-
context. Or, when it contains a newline such as "\\notto".)
.column_n_increment
The number of columns that appear in the pattern
E_Count.VOID => it cannot be determined from the pattern off-line.
.column_n_increment_by_lexeme_length
If the column number increment is proportional to the length of the
lexeme which is matched, then this variable contains the factor.
E_Count.VOID => if there is no relation between lexeme length and
column number increment.
"""
__slots__ = (
"column_n_increment",
"line_n_increment",
"column_index",
"grid_step_n",
"column_n_increment_by_lexeme_length",
"line_n_increment_by_lexeme_length",
"grid_step_size_by_lexeme_length",
"run_time_counter_required_f"
)
def __init__(self, Result=None, CodecTrafoInfo=None, SM=None):
if Result is None:
self.column_n_increment = 0
self.line_n_increment = 0
self.column_index = 0
self.grid_step_n = 0
self.column_n_increment_by_lexeme_length = 0
self.line_n_increment_by_lexeme_length = 0
self.grid_step_size_by_lexeme_length = 0
else:
self.column_n_increment = SmLineColumnCountInfo.get_real(Result.column_n_increment)
self.line_n_increment = SmLineColumnCountInfo.get_real(Result.line_n_increment)
self.column_index = SmLineColumnCountInfo.get_real(Result.column_index,
ValueOfVirginity=E_Count.VOID)
self.grid_step_n = SmLineColumnCountInfo.get_real(Result.grid_step_n)
self.column_n_increment_by_lexeme_length = SmLineColumnCountInfo.get_real(Count.column_n_increment_by_lexeme_length)
self.line_n_increment_by_lexeme_length = SmLineColumnCountInfo.get_real(Count.line_n_increment_by_lexeme_length)
self.grid_step_size_by_lexeme_length = SmLineColumnCountInfo.get_real(Count.grid_step_size_by_lexeme_length)
if CodecTrafoInfo is not None and CodecTrafoInfo.variable_character_sizes_f():
self._consider_variable_character_sizes(SM, CodecTrafoInfo)
self.run_time_counter_required_f = self.__run_time_counter_required_f()
def __run_time_counter_required_f(self):
if ( self.line_n_increment_by_lexeme_length == E_Count.VOID \
and self.line_n_increment == E_Count.VOID):
return True
elif ( self.column_n_increment_by_lexeme_length == E_Count.VOID \
and self.column_n_increment == E_Count.VOID \
and self.column_index == E_Count.VOID \
and self.grid_step_size_by_lexeme_length == E_Count.VOID):
return True
else:
return False
@staticmethod
@typed(CaMap=CountActionMap)
def from_DFA(CaMap, SM, BeginOfLineF=False, CodecTrafoInfo=None):
"""LINE AND COLUMN NUMBER ANALYSIS ________________________________________
Given a pattern as a state machine 'SM' this function analyses the
increments of line and column numbers. Depending on whether those
values can be determined from the state machine or only during run-
time, a SmLineColumnCountInfo object is provided.
NOTES _____________________________________________________________________
DFA shall not contain pre- or post-contexts.
DEPENDS ON: CaMap providing three databases:
.newline
.grid
.column
RESTRICTION _______________________________________________________________
* The current approach does consider the column count to be void as soon *
* as a state is reached with two different column counts. *
Disadvantage:
Sub-optimal results for a very restricted amount of patterns. In these
cases, a 'count' is implemented where a plain addition or setting would be
sufficient.
Sub-Optimal Scenario:
If there is more than one path to one node in the state machine with
different column counts AND after that node there comes a newline from
whereon the pattern behaves 'deterministic'.
Reason for restriction left in place:
To fix this, another approach would have to be implemented where the state
machine is inverted and then the column counts starts from rear to front
until the first newline. This tremendous computation time overhead is shied
away from, because of the aforementioned low expected value add.
RETURNS: 'None', if SM is empty (no transitions).
SmLineColumnCountInfo, else.
___________________________________________________________________________
"""
tracer = CharacterCountTracer(SM)
init_state = SM.get_init_state()
init_state_target_map = init_state.target_map.get_map()
if not init_state_target_map:
return None # Do not count anything
if BeginOfLineF: column_index = 0
else: column_index = E_Count.VOID
count = Count(0, 0, ColumnIndex=column_index, GridStepN=E_Count.VIRGIN)
# Next Node: [0] state index of target state
# [1] character set that triggers to it
# [2] count information (initialized to 'virgin')
initial = [
(target_state_index, character_set, count.clone()) \
for target_state_index, character_set in init_state_target_map.iteritems()
]
Count.init(CaMap)
tracer.do(initial)
# If there was an acceptance state, the result cannot be None
assert tracer.result is not None
# (*) Determine detailed count information
if tracer.abort_f and Count.grid_step_size_by_lexeme_length.value == E_Count.VIRGIN:
# If the count procedure was aborted, possibly NOT all character
# transitions have been investigated. So the value for 'grid' must
# determined now, independently of the 'tracer.do()'.
Count.grid_step_size_by_lexeme_length <<= \
_get_grid_step_size_by_lexeme_length(SM, CaMap)
return SmLineColumnCountInfo(tracer.result, CodecTrafoInfo, SM)
@staticmethod
def get_real(Object, ValueOfVirginity=0):
if Object.value == E_Count.VIRGIN: return ValueOfVirginity
elif Object.value == E_Count.NONE: return 0
return Object.value
@classmethod
def get_OpList(cls, LCCI, ModeName):
"""RETURN: [0] Verdict
[1] CounterCode
Verdict == True --> Run-time counter implementation required!!
Pattern's length cannot be determined beforehand.
False --> No run-time counting is necessary!!
It was possible to determine the increments
based on the pattern's structure.
Default Character Counter is used when the increments and/or setting
cannot be derived from the pattern itself.
"""
if not (Setup.count_line_number_f or Setup.count_column_number_f):
return False, []
elif LCCI is None:
return True, [ Op.PasspartoutCounterCall(ModeName) ]
elif LCCI.run_time_counter_required_f:
return True, [ Op.PasspartoutCounterCall(ModeName) ]
# (*) Determine Line and Column Number Count ______________________________
#
# Both, for line and column number considerations the same rules hold.
# Those rules are defined in 'get_offset()' as shown below.
#
def get_offset(Increment, IncrementByLexemeLength):
if IncrementByLexemeLength == 0 or Increment == 0:
return None, None
elif Increment != E_Count.VOID:
return Increment, 1
else:
return Lng.LEXEME_LENGTH(), IncrementByLexemeLength
# Column and line counts must be shifted (begin=end) even if only
# columns are counted. For example, even if only columns are modified
# the old line_number_at_begin must be adapted to the current.
cmd_list = [ Op.ColumnCountShift() , Op.LineCountShift() ]
# -- Line Number Count
offset, factor = get_offset(LCCI.line_n_increment,
LCCI.line_n_increment_by_lexeme_length)
if offset is not None:
cmd_list.append(Op.LineCountAdd(offset, factor))
# -- Column Number Count
if LCCI.column_index != E_Count.VOID:
cmd_list.append(Op.ColumnCountSet(LCCI.column_index + 1))
elif LCCI.column_n_increment_by_lexeme_length != E_Count.VOID:
offset, factor = get_offset(LCCI.column_n_increment,
LCCI.column_n_increment_by_lexeme_length)
if offset is not None:
cmd_list.append(Op.ColumnCountAdd(offset, factor))
else:
# Following assert results from entry check against 'VOID'
assert LCCI.grid_step_size_by_lexeme_length != E_Count.VOID
if LCCI.grid_step_n == E_Count.VOID: grid_step_n = Lng.LEXEME_LENGTH()
elif LCCI.grid_step_n != 0: grid_step_n = LCCI.grid_step_n
else: grid_step_n = None
if grid_step_n is not None:
cmd_list.append(Op.ColumnCountGridAdd(LCCI.grid_step_size_by_lexeme_length,
grid_step_n))
return False, cmd_list
def _consider_variable_character_sizes(self, SM, CodecTrafoInfo):
"""UTF8 and UTF16 counters may have different numbers of chunks that
represent a single character. In such cases, it cannot be concluded from
the LexemeL to the number of involved characters. In such cases, column and
line number counter have to be done at run-time. This is signalized by
self.column_n_increment_by_lexeme_length = E_Count.VOID
self.grid_step_size_by_lexeme_length = E_Count.VOID
and respectively
self.line_n_increment_by_lexeme_length = E_Count.VOID
If the number of chunks per character is the same for all involved characters,
then the factor can be adapted by this number. For example, if all characters
involve 2 bytes and the buffer element is 'byte', then the character number
is equal to number of bytes divide by two, i.e.
self.column_n_increment_by_lexeme_length /= 2.
"""
lexatom_n_per_char = -1
# If the internal engine is not running on Unicode, considerations
# may be made about the byte number per character (e.g. UTF8).
if self.column_n_increment != E_Count.VOID \
or self.column_index != E_Count.VOID:
# No problem in this case; increment does not depend on the lexeme length.
pass
elif self.column_n_increment_by_lexeme_length != E_Count.VOID \
or self.grid_step_size_by_lexeme_length != E_Count.VOID:
# In this case, the column number increment is a function of
# the lexeme length. This is only valid if all characters in the
# pattern actually have the same number of 'chunks' (e.g. bytes in UTF8).
lexatom_n_per_char = CodecTrafoInfo.lexatom_n_per_character_in_state_machine(SM)
if lexatom_n_per_char is None:
# One cannot conclude from the number of bytes of a lexeme to
# the number of columns to be incremented.
self.column_n_increment_by_lexeme_length = E_Count.VOID
self.grid_step_size_by_lexeme_length = E_Count.VOID
else:
if self.column_n_increment_by_lexeme_length != E_Count.VOID:
self.column_n_increment_by_lexeme_length = float(self.column_n_increment_by_lexeme_length) / lexatom_n_per_char
elif self.grid_step_size_by_lexeme_length != E_Count.VOID:
self.grid_step_size_by_lexeme_length = float(self.grid_step_size_by_lexeme_length) / lexatom_n_per_char
if self.line_n_increment != E_Count.VOID:
# No problem in this case; increment does not depend on the lexeme length.
pass
elif self.line_n_increment_by_lexeme_length != E_Count.VOID:
if lexatom_n_per_char == -1: # If not yet determined, determine!
lexatom_n_per_char = CodecTrafoInfo.lexatom_n_per_character_in_state_machine(SM)
if lexatom_n_per_char is None:
self.line_n_increment_by_lexeme_length = E_Count.VOID
elif self.line_n_increment_by_lexeme_length != E_Count.VOID:
self.line_n_increment_by_lexeme_length = float(self.line_n_increment_by_lexeme_length) / lexatom_n_per_char
def __str__(self):
return \
"line_n_increment = %s;\n" \
"column_index = %s;\n" \
"column_n_increment = %s;\n" \
"grid_step_n = %s;\n" \
"line_n_increment_by_lexeme_length = %s;\n" \
"column_n_increment_by_lexeme_length = %s;\n" \
"grid_step_size_by_lexeme_length = %s;\n" \
% (
self.line_n_increment,
self.column_index,
self.column_n_increment,
self.grid_step_n,
self.line_n_increment_by_lexeme_length,
self.column_n_increment_by_lexeme_length,
self.grid_step_size_by_lexeme_length
)
class UniqueValue(object):
"""A simple class to hold a value that is:
-- VIRGIN, if it has not been assigned.
-- VOID, if it has been assigned with different values.
-- Some Value, if the assignment was always the same.
"""
__slots__ = ('value')
def __init__(self, InitValue=E_Count.VIRGIN):
assert InitValue is not None
if isinstance(InitValue, UniqueValue): self.value = InitValue.value
else: self.value = InitValue
def clone(self):
return UniqueValue(self.value)
def __ilshift__(self, Value):
"""Use 'injection' to fulfill the task of virgin, void, coherent assignment."""
assert not isinstance(Value, UniqueValue)
assert Value is not None
if self.value == E_Count.VIRGIN: self.value = Value
elif Value == E_Count.NONE: self.value = E_Count.NONE
elif self.value == E_Count.VOID: pass
elif self.value != Value: self.value = E_Count.VOID
else: pass # No change
return self
def __iadd__(self, Value):
assert not isinstance(Value, UniqueValue)
if self.value == E_Count.VIRGIN: self.value = Value
elif self.value == E_Count.VOID: pass
elif self.value == E_Count.NONE: pass
else: self.value += Value
return self
def __eq__(self, Other):
if isinstance(Other, UniqueValue): other_value = Other.value
else: other_value = Other
if self.value == other_value:
return True
# NONE == VIRGIN
if (self.value == E_Count.NONE and other_value == E_Count.VIRGIN) \
or (self.value == E_Count.VIRGIN and other_value == E_Count.NONE):
return True
return False
def __ne__(self, Other):
return not (self == Other)
def __str__(self):
return str(self.value)
def is_number(self):
if self.value == E_Count.VIRGIN or self.value == E_Count.VOID:
return False
if isinstance(self.value, (str, unicode)):
return False
return True
class CharacterCountTracer(TreeWalker):
"""________________________________________________________________________
Recursive Algorithm to count the number of newlines, characters, or spaces
for each state in the state machine. It is done for each state, so that
path walking can be aborted as soon as a known state is hit.
-- A loop makes a count either (i) void if the counted character appears,
or (ii) is unimportant. If (i) happens, then the counter is globally
void. In case of (ii) no change happened so any analysis starting from
the loop's knot point is still valid and does not have to be made
again.
-- A node is met through another path. Exactly the same consideration as
for loops holds again. The break-up here is also essential to avoid
exponential time (The total number of paths multiplies with the number
of branches through each knot on the path).
ONLY PATTERNS WITHOUT PRE- AND POST-CONTEXT ARE HANDLED HERE!
___________________________________________________________________________
"""
def __init__(self, SM):
self.sm = SM
self.depth = 0
self.result = Count(E_Count.VIRGIN, E_Count.VIRGIN, E_Count.VIRGIN, E_Count.VIRGIN)
if SM.get_init_state().is_acceptance():
self.result = Count(ColumnN=0, LineN=0, ColumnIndex=E_Count.VIRGIN, GridStepN=E_Count.VIRGIN)
else:
self.result = Count(E_Count.VIRGIN, E_Count.VIRGIN, E_Count.VIRGIN, E_Count.VIRGIN)
self.known_db = {} # state_index --> count
TreeWalker.__init__(self)
def on_enter(self, Info):
"""Info = (state_index of what is entered, character set that triggers to it)"""
StateIndex, CharacterSet, count = Info
if not count.compute(CharacterSet):
self.result.register_result(count)
self.abort_f = True
return None
state = self.sm.states[StateIndex]
known = self.known_db.get(StateIndex)
if known is not None:
# Set elements to 'VOID' if the previous count deviates from current count.
# 'voidify' returns 'True' if all elements have been set to void.
if self.result.voidify_deviants(known, count): self.abort_f = True
# Rest of paths starting from this state has been walked along before
subsequent = None
else:
known = count.clone()
self.known_db[StateIndex] = known
subsequent = [ (state_index, character_set, count.clone()) \
for state_index, character_set in state.target_map ]
if state.is_acceptance():
if not self.result.register_result(known): self.abort_f = True
return subsequent
def on_finished(self, node):
pass
def _get_grid_step_size_by_lexeme_length(SM, CaMap):
"""The CharacterCountTracer has been aborted (which is a good thing). Now,
the grid information has to be determined extra. As mentioned in the calling
function 'grid' can have the following three values:
N > 0, if ONLY grid characters of size 'N' are involved.
E_Count.VIRGIN, if no grid character is involved.
E_Count.VOID, if some grid characters are involved, but increase of
column_n_increment must be determined at run-time.
"""
assert False, "Supposed to trigger to show that this function is still used!"
prototype = E_Count.VIRGIN
for state in SM.states.itervalues():
for character_set in state.target_map.get_map().itervalues():
for grid_size, grid_character_set in CaMap.grid.iteritems():
if grid_character_set.is_superset(character_set):
# All characters of the transition are in 'grid_character_set'
if prototype == E_Count.VIRGIN: prototype = grid_size
elif prototype != grid_size: return E_Count.VOID
elif grid_character_set.has_intersection(character_set):
# Some characters are form 'grid_character_set' others not.
return E_Count.VOID
return prototype
class Count(object):
"""________________________________________________________________________
Contains increment of line and column number of a pattern as soon as one
particular state has been reached.
___________________________________________________________________________
"""
__slots__ = ('column_n_increment', 'line_n_increment', 'column_index', 'grid_step_n')
# (*) Increment per step:
#
# If the increment per step is the same 'C' for any character that
# appears in the pattern, then the length of the pattern can be
# computed at run- time by a simple subtraction:
#
# length = (LexemeEnd - LexemeBegin) * C
#
# provided that there is no newline in the pattern this is at the same
# time the column increment. Same holds for line number increments.
column_n_increment_by_lexeme_length = None
# Just for info, in Unicode there are the following candidates which may
# possibly have assigned a separate line number increment: Line Feed, 0x0A;
# Vertical Tab, 0x0B; Form Feed, 0x0C; Carriage Return, 0x0D; Next Line,
# 0x85; Line Separator, 0x28; Paragraph Separator, 0x2029;
line_n_increment_by_lexeme_length = None
grid_step_size_by_lexeme_length = None
# Line/Column count information
count_map = None
@staticmethod
@typed(CountMap=CountActionMap)
def init(CountMap):
"""Initialize global objects in namespace 'Count'."""
Count.column_n_increment_by_lexeme_length = UniqueValue()
Count.line_n_increment_by_lexeme_length = UniqueValue()
Count.grid_step_size_by_lexeme_length = UniqueValue()
Count.count_map = CountMap
def __init__(self, ColumnN, LineN, ColumnIndex, GridStepN=0):
self.line_n_increment = UniqueValue(LineN)
self.column_n_increment = UniqueValue(ColumnN)
self.column_index = UniqueValue(ColumnIndex)
self.grid_step_n = UniqueValue(GridStepN)
def clone(self):
return Count(self.column_n_increment,
self.line_n_increment,
self.column_index,
self.grid_step_n)
def compute(self, CharacterSet):
"""CharacterSet -- If a given input character lies in the CharacterSet,
then a state transition happens.
This function determines the influence of such a state transition
on the line and column numbers.
RETURNS:
True -- If it is worth to consider subsequent transitions, because the
line or column numbering is still deterministic.
False -- If all further consideration may be dropped, because line and
column numbering cannot be determined deterministically from
the state machine.
"""
def void_line_count():
Count.line_n_increment_by_lexeme_length <<= E_Count.VOID
self.line_n_increment <<= E_Count.VOID
def void_grid_steps():
Count.grid_step_size_by_lexeme_length <<= E_Count.VOID
self.grid_step_n <<= E_Count.VOID
def void_column_count():
Count.column_n_increment_by_lexeme_length <<= E_Count.VOID
self.column_n_increment <<= E_Count.VOID
column, grid, line = Count.count_map.get_count_commands(CharacterSet)
if line == -1:
void_line_count()
elif line is not None:
delta = line
# 'CharacterSet' does not contain anything beyond 'character_set'
Count.line_n_increment_by_lexeme_length <<= delta
Count.column_n_increment_by_lexeme_length <<= 0
Count.grid_step_size_by_lexeme_length <<= 0
self.column_n_increment <<= 0
self.column_index = 0 # UniqueValue(E_Count.VIRGIN)
self.grid_step_n = UniqueValue(E_Count.VIRGIN)
if isinstance(delta, (str, unicode)):
void_line_count()
else:
self.line_n_increment += delta
if grid == -1:
void_grid_steps()
elif grid is not None:
grid_size = grid
# 'CharacterSet' does not contain anything beyond 'character_set'
Count.line_n_increment_by_lexeme_length <<= 0
Count.grid_step_size_by_lexeme_length <<= grid_size
if isinstance(grid_size, (str, unicode)):
void_grid_steps()
else:
self.grid_step_n += 1 # Remains VOID, if it is already
if self.column_index.is_number():
delta = grid_size - (self.column_index.value % grid_size)
Count.column_n_increment_by_lexeme_length <<= delta
self.column_index += delta
self.column_n_increment += delta
else:
void_column_count()
if column == -1:
void_column_count()
self.column_index <<= E_Count.VOID
elif column is not None:
delta = column
# 'CharacterSet' does not contain anything beyond 'character_set'
Count.line_n_increment_by_lexeme_length <<= 0
Count.column_n_increment_by_lexeme_length <<= delta
Count.grid_step_size_by_lexeme_length <<= 0
# Assign 'NONE' which prevents increment to it.
# (Assinging '0' would not prevent increment.)
self.grid_step_n <<= E_Count.NONE
if isinstance(delta, (str, unicode)):
void_column_count()
self.column_index <<= E_Count.VOID
else:
self.column_index += delta
self.column_n_increment += delta
if self.all_void(): return False # Abort if all void
else: return True # Do not abort, yet
def all_void(self):
"""Determine whether all values are void. In that case the counting
may be aborted.
"""
return \
Count.column_n_increment_by_lexeme_length == E_Count.VOID \
and Count.line_n_increment_by_lexeme_length == E_Count.VOID \
and Count.grid_step_size_by_lexeme_length == E_Count.VOID \
and self.column_index == E_Count.VOID \
and self.column_n_increment == E_Count.VOID \
and self.line_n_increment == E_Count.VOID \
and self.grid_step_n == E_Count.VOID
def register_result(self, Result):
"""Register the counted numbers for one path to an acceptance state.
If any parameter has not yet been set before, set it. The call to
'voidify_deviants()' ensures that parameters which have different
counts for different paths are set to VOID.
RETURNS: False, if analysis should be aborted, because all has
been detected as being VOID.
True, if analysis may continue, since it may still be
possible that one or the other value remains.
"""
if self.line_n_increment == E_Count.VIRGIN: self.line_n_increment = Result.line_n_increment.clone()
if self.column_index == E_Count.VIRGIN: self.column_index = Result.column_index.clone()
if self.column_n_increment == E_Count.VIRGIN: self.column_n_increment = Result.column_n_increment.clone()
if self.grid_step_n == E_Count.VIRGIN: self.grid_step_n = Result.grid_step_n.clone()
return not self.voidify_deviants(self, Result)
def voidify_deviants(self, Known, New):
"""Compares elements of 'Know' with 'New' representing counts of
different paths through the state machine. If counts differ
for different paths, they are 'VOID' and cannot be determined
beforehand.
RETURNS: 'True' if all counts are 'VOID', thus the investigation
can be aborted.
'False' else.
"""
if Known.line_n_increment != New.line_n_increment: self.line_n_increment <<= E_Count.VOID
if Known.column_index != New.column_index: self.column_index <<= E_Count.VOID
if Known.column_n_increment != New.column_n_increment: self.column_n_increment <<= E_Count.VOID
if Known.grid_step_n != New.grid_step_n: self.grid_step_n <<= E_Count.VOID
return self.column_index == E_Count.VOID \
and self.column_n_increment == E_Count.VOID \
and self.grid_step_n == E_Count.VOID \
and self.line_n_increment == E_Count.VOID
#and Count.line_n_increment_by_lexeme_length == E_Count.VOID \
#and Count.column_n_increment_by_lexeme_length == E_Count.VOID \
#and Count.grid_step_size_by_lexeme_length == E_Count.VOID
def __str__(self):
return "general: {\n" \
" column_n_increment_by_lexeme_length = %s;\n" \
" grid_step_size_by_lexeme_length = %s;\n" \
" line_n_increment_by_lexeme_length = %s;\n" \
"}\n" \
"column_index = %s;\n" \
"column_n_increment = %s;\n" \
"line_n_increment = %s;\n" \
% (Count.column_n_increment_by_lexeme_length,
Count.grid_step_size_by_lexeme_length,
Count.line_n_increment_by_lexeme_length,
self.column_index,
self.column_n_increment,
self.line_n_increment)
| 46.149786
| 131
| 0.61386
|
4a046edee8dbc75af494805568b2f0152a4377e2
| 1,645
|
py
|
Python
|
metaparser/demo_metaparser_nxos.py
|
xiaoxinz-cisco/examples
|
ce1d1526346665bf797effb7b372a5030d2f9bfd
|
[
"Apache-2.0"
] | 81
|
2019-08-07T09:00:15.000Z
|
2022-03-17T23:23:51.000Z
|
metaparser/demo_metaparser_nxos.py
|
xiaoxinz-cisco/examples
|
ce1d1526346665bf797effb7b372a5030d2f9bfd
|
[
"Apache-2.0"
] | 2
|
2019-07-30T03:09:50.000Z
|
2021-09-28T13:08:00.000Z
|
metaparser/demo_metaparser_nxos.py
|
xiaoxinz-cisco/examples
|
ce1d1526346665bf797effb7b372a5030d2f9bfd
|
[
"Apache-2.0"
] | 41
|
2019-08-21T22:43:11.000Z
|
2022-03-30T03:22:35.000Z
|
#
# imports
#
import pprint
import argparse
from pyats.topology import loader
from genie.libs.parser.nxos.show_bgp import ShowBgpProcessVrfAll
def load(testbed, device_name):
tb = loader.load(testbed)
try:
return tb.devices[device_name]
except KeyError as e:
raise KeyError("Could not find '{d}' within "
"testbed '{tb}'".format(d=device, tb=testbed))
def parse_cli(device):
# By the default it will take cli
return ShowBgpProcessVrfAll(device).parse()
def parse_xml(device):
return ShowBgpProcessVrfAll(device, context=['xml']).parse()
def parse_xml_cli(device):
# Parse Xml, and if any key is missing, complete it with cli output
return ShowBgpProcessVrfAll(device, context=['xml', 'cli']).parse()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Arguments for '
'demo8_metaparser')
parser.add_argument('-testbed_file',
help='Location of the testbed file',
default = 'virl.yaml')
parser.add_argument('-device',
help='Name or alias of the device to parse on',
default = 'uut')
custom_args = parser.parse_known_args()[0]
testbed_file = custom_args.testbed_file
device_name = custom_args.device
device = load(testbed_file, device_name)
device.connect()
cli_parsed = parse_cli(device)
pprint.pprint(cli_parsed)
xml_parsed = parse_xml(device)
pprint.pprint(xml_parsed)
xml_cli_parsed = parse_xml_cli(device)
pprint.pprint(xml_cli_parsed)
| 28.859649
| 71
| 0.646201
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.