hexsha
stringlengths 40
40
| size
int64 2
1.02M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
245
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
245
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 2
1.02M
| avg_line_length
float64 1
417k
| max_line_length
int64 1
987k
| alphanum_fraction
float64 0
1
| content_no_comment
stringlengths 0
1.01M
| is_comment_constant_removed
bool 1
class | is_sharp_comment_removed
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1c437841a3b01daa9a2600a43c0462b0bfe4c09c
| 10,803
|
py
|
Python
|
qiskit/extensions/standard/equivalence_library.py
|
diego-plan9/qiskit-terra
|
a4120d70bd631ad2add228fdb1f86706bc5f2339
|
[
"Apache-2.0"
] | 1
|
2018-05-29T03:58:03.000Z
|
2018-05-29T03:58:03.000Z
|
qiskit/extensions/standard/equivalence_library.py
|
diego-plan9/qiskit-terra
|
a4120d70bd631ad2add228fdb1f86706bc5f2339
|
[
"Apache-2.0"
] | 3
|
2018-11-13T17:33:37.000Z
|
2018-12-03T09:35:00.000Z
|
qiskit/extensions/standard/equivalence_library.py
|
diego-plan9/qiskit-terra
|
a4120d70bd631ad2add228fdb1f86706bc5f2339
|
[
"Apache-2.0"
] | 2
|
2017-12-03T15:48:14.000Z
|
2018-03-11T13:08:03.000Z
|
# -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Standard gates."""
# pylint: disable=invalid-name
from qiskit.qasm import pi
from qiskit.circuit import EquivalenceLibrary, Parameter, QuantumCircuit, QuantumRegister
from . import (
HGate,
CHGate,
MSGate,
RGate,
RCCXGate,
RCCCXGate,
RXGate,
CRXGate,
RXXGate,
RYGate,
CRYGate,
RZGate,
CRZGate,
RZZGate,
SGate,
SdgGate,
SwapGate,
CSwapGate,
TGate,
TdgGate,
U1Gate,
CU1Gate,
U2Gate,
U3Gate,
CU3Gate,
XGate,
CXGate,
CCXGate,
YGate,
CYGate,
RYYGate,
ZGate,
CZGate,
)
_sel = StandardEquivalenceLibrary = EquivalenceLibrary()
# Import existing gate definitions
# HGate
q = QuantumRegister(1, 'q')
def_h = QuantumCircuit(q)
def_h.append(U2Gate(0, pi), [q[0]], [])
_sel.add_equivalence(HGate(), def_h)
# CHGate
q = QuantumRegister(2, 'q')
def_ch = QuantumCircuit(q)
for inst, qargs, cargs in [
(SGate(), [q[1]], []),
(HGate(), [q[1]], []),
(TGate(), [q[1]], []),
(CXGate(), [q[0], q[1]], []),
(TdgGate(), [q[1]], []),
(HGate(), [q[1]], []),
(SdgGate(), [q[1]], [])
]:
def_ch.append(inst, qargs, cargs)
_sel.add_equivalence(CHGate(), def_ch)
# MSGate
for num_qubits in range(2, 20):
q = QuantumRegister(num_qubits, 'q')
theta = Parameter('theta')
def_ms = QuantumCircuit(q)
for i in range(num_qubits):
for j in range(i + 1, num_qubits):
def_ms.append(RXXGate(theta), [q[i], q[j]])
_sel.add_equivalence(MSGate(num_qubits, theta), def_ms)
# RGate
q = QuantumRegister(1, 'q')
theta = Parameter('theta')
phi = Parameter('phi')
def_r = QuantumCircuit(q)
def_r.append(U3Gate(theta, phi - pi / 2, -phi + pi / 2), [q[0]])
_sel.add_equivalence(RGate(theta, phi), def_r)
# RCCXGate
q = QuantumRegister(3, 'q')
def_rccx = QuantumCircuit(q)
for inst, qargs, cargs in [
(HGate(), [q[2]], []),
(TGate(), [q[2]], []),
(CXGate(), [q[1], q[2]], []),
(TdgGate(), [q[2]], []),
(CXGate(), [q[0], q[2]], []),
(TGate(), [q[2]], []),
(CXGate(), [q[1], q[2]], []),
(TdgGate(), [q[2]], []),
(HGate(), [q[2]], []),
]:
def_rccx.append(inst, qargs, cargs)
_sel.add_equivalence(RCCXGate(), def_rccx)
# RCCCXGate
q = QuantumRegister(4, 'q')
def_rcccx = QuantumCircuit(q)
for inst, qargs, cargs in [
(HGate(), [q[3]], []),
(TGate(), [q[3]], []),
(CXGate(), [q[2], q[3]], []),
(TdgGate(), [q[3]], []),
(HGate(), [q[3]], []),
(CXGate(), [q[0], q[3]], []),
(TGate(), [q[3]], []),
(CXGate(), [q[1], q[3]], []),
(TdgGate(), [q[3]], []),
(CXGate(), [q[0], q[3]], []),
(TGate(), [q[3]], []),
(CXGate(), [q[1], q[3]], []),
(TdgGate(), [q[3]], []),
(HGate(), [q[3]], []),
(TGate(), [q[3]], []),
(CXGate(), [q[2], q[3]], []),
(TdgGate(), [q[3]], []),
(HGate(), [q[3]], []),
]:
def_rcccx.append(inst, qargs, cargs)
_sel.add_equivalence(RCCCXGate(), def_rcccx)
# RXGate
q = QuantumRegister(1, 'q')
theta = Parameter('theta')
def_rx = QuantumCircuit(q)
def_rx.append(RGate(theta, 0), [q[0]], [])
_sel.add_equivalence(RXGate(theta), def_rx)
# CRXGate
q = QuantumRegister(2, 'q')
theta = Parameter('theta')
def_crx = QuantumCircuit(q)
for inst, qargs, cargs in [
(U1Gate(pi / 2), [q[1]], []),
(CXGate(), [q[0], q[1]], []),
(U3Gate(-theta / 2, 0, 0), [q[1]], []),
(CXGate(), [q[0], q[1]], []),
(U3Gate(theta / 2, -pi / 2, 0), [q[1]], [])
]:
def_crx.append(inst, qargs, cargs)
_sel.add_equivalence(CRXGate(theta), def_crx)
# RXXGate
q = QuantumRegister(2, 'q')
theta = Parameter('theta')
def_rxx = QuantumCircuit(q)
for inst, qargs, cargs in [
(HGate(), [q[0]], []),
(HGate(), [q[1]], []),
(CXGate(), [q[0], q[1]], []),
(U1Gate(theta), [q[1]], []),
(CXGate(), [q[0], q[1]], []),
(HGate(), [q[1]], []),
(HGate(), [q[0]], []),
]:
def_rxx.append(inst, qargs, cargs)
_sel.add_equivalence(RXXGate(theta), def_rxx)
# RYGate
q = QuantumRegister(1, 'q')
theta = Parameter('theta')
def_ry = QuantumCircuit(q)
def_ry.append(RGate(theta, pi / 2), [q[0]], [])
_sel.add_equivalence(RYGate(theta), def_ry)
# CRYGate
q = QuantumRegister(2, 'q')
theta = Parameter('theta')
def_cry = QuantumCircuit(q)
for inst, qargs, cargs in [
(U3Gate(theta / 2, 0, 0), [q[1]], []),
(CXGate(), [q[0], q[1]], []),
(U3Gate(-theta / 2, 0, 0), [q[1]], []),
(CXGate(), [q[0], q[1]], [])
]:
def_cry.append(inst, qargs, cargs)
_sel.add_equivalence(CRYGate(theta), def_cry)
# RYYGate
q = QuantumRegister(2, 'q')
theta = Parameter('theta')
def_ryy = QuantumCircuit(q)
for inst, qargs, cargs in [
(RXGate(pi / 2), [q[0]], []),
(RXGate(pi / 2), [q[1]], []),
(CXGate(), [q[0], q[1]], []),
(RZGate(theta), [q[1]], []),
(CXGate(), [q[0], q[1]], []),
(RXGate(-pi / 2), [q[0]], []),
(RXGate(-pi / 2), [q[1]], []),
]:
def_ryy.append(inst, qargs, cargs)
_sel.add_equivalence(RYYGate(theta), def_ryy)
# RZGate
q = QuantumRegister(1, 'q')
theta = Parameter('theta')
def_rz = QuantumCircuit(q)
def_rz.append(U1Gate(theta), [q[0]], [])
_sel.add_equivalence(RZGate(theta), def_rz)
# CRZGate
q = QuantumRegister(2, 'q')
theta = Parameter('theta')
def_crz = QuantumCircuit(q)
for inst, qargs, cargs in [
(U1Gate(theta / 2), [q[1]], []),
(CXGate(), [q[0], q[1]], []),
(U1Gate(-theta / 2), [q[1]], []),
(CXGate(), [q[0], q[1]], [])
]:
def_crz.append(inst, qargs, cargs)
_sel.add_equivalence(CRZGate(theta), def_crz)
# RZZGate
q = QuantumRegister(2, 'q')
theta = Parameter('theta')
def_rzz = QuantumCircuit(q)
for inst, qargs, cargs in [
(CXGate(), [q[0], q[1]], []),
(U1Gate(theta), [q[1]], []),
(CXGate(), [q[0], q[1]], [])
]:
def_rzz.append(inst, qargs, cargs)
_sel.add_equivalence(RZZGate(theta), def_rzz)
# SGate
q = QuantumRegister(1, 'q')
def_s = QuantumCircuit(q)
def_s.append(U1Gate(pi / 2), [q[0]], [])
_sel.add_equivalence(SGate(), def_s)
# SdgGate
q = QuantumRegister(1, 'q')
def_sdg = QuantumCircuit(q)
def_sdg.append(U1Gate(-pi / 2), [q[0]], [])
_sel.add_equivalence(SdgGate(), def_sdg)
# SwapGate
q = QuantumRegister(2, 'q')
def_swap = QuantumCircuit(q)
for inst, qargs, cargs in [
(CXGate(), [q[0], q[1]], []),
(CXGate(), [q[1], q[0]], []),
(CXGate(), [q[0], q[1]], [])
]:
def_swap.append(inst, qargs, cargs)
_sel.add_equivalence(SwapGate(), def_swap)
# CSwapGate
q = QuantumRegister(3, 'q')
def_cswap = QuantumCircuit(q)
for inst, qargs, cargs in [
(CXGate(), [q[2], q[1]], []),
(CCXGate(), [q[0], q[1], q[2]], []),
(CXGate(), [q[2], q[1]], [])
]:
def_cswap.append(inst, qargs, cargs)
_sel.add_equivalence(CSwapGate(), def_cswap)
# TGate
q = QuantumRegister(1, 'q')
def_t = QuantumCircuit(q)
def_t.append(U1Gate(pi / 4), [q[0]], [])
_sel.add_equivalence(TGate(), def_t)
# TdgGate
q = QuantumRegister(1, 'q')
def_tdg = QuantumCircuit(q)
def_tdg.append(U1Gate(-pi / 4), [q[0]], [])
_sel.add_equivalence(TdgGate(), def_tdg)
# U1Gate
q = QuantumRegister(1, 'q')
phi = Parameter('phi')
lam = Parameter('lam')
def_u2 = QuantumCircuit(q)
def_u2.append(U3Gate(pi / 2, phi, lam), [q[0]], [])
_sel.add_equivalence(U2Gate(phi, lam), def_u2)
# CU1Gate
q = QuantumRegister(2, 'q')
theta = Parameter('theta')
def_cu1 = QuantumCircuit(q)
for inst, qargs, cargs in [
(U1Gate(theta / 2), [q[0]], []),
(CXGate(), [q[0], q[1]], []),
(U1Gate(-theta / 2), [q[1]], []),
(CXGate(), [q[0], q[1]], []),
(U1Gate(theta / 2), [q[1]], [])
]:
def_cu1.append(inst, qargs, cargs)
_sel.add_equivalence(CU1Gate(theta), def_cu1)
# U2Gate
q = QuantumRegister(1, 'q')
theta = Parameter('theta')
def_u1 = QuantumCircuit(q)
def_u1.append(U3Gate(0, 0, theta), [q[0]], [])
_sel.add_equivalence(U1Gate(theta), def_u1)
# U3Gate
# CU3Gate
q = QuantumRegister(2, 'q')
theta = Parameter('theta')
phi = Parameter('phi')
lam = Parameter('lam')
def_cu3 = QuantumCircuit(q)
for inst, qargs, cargs in [
(U1Gate((lam + phi) / 2), [q[0]], []),
(U1Gate((lam - phi) / 2), [q[1]], []),
(CXGate(), [q[0], q[1]], []),
(U3Gate(-theta / 2, 0, -(phi + lam) / 2), [q[1]], []),
(CXGate(), [q[0], q[1]], []),
(U3Gate(theta / 2, phi, 0), [q[1]], [])
]:
def_cu3.append(inst, qargs, cargs)
_sel.add_equivalence(CU3Gate(theta, phi, lam), def_cu3)
# XGate
q = QuantumRegister(1, 'q')
def_x = QuantumCircuit(q)
def_x.append(U3Gate(pi, 0, pi), [q[0]], [])
_sel.add_equivalence(XGate(), def_x)
# CXGate
# CCXGate
q = QuantumRegister(3, 'q')
def_ccx = QuantumCircuit(q)
for inst, qargs, cargs in [
(HGate(), [q[2]], []),
(CXGate(), [q[1], q[2]], []),
(TdgGate(), [q[2]], []),
(CXGate(), [q[0], q[2]], []),
(TGate(), [q[2]], []),
(CXGate(), [q[1], q[2]], []),
(TdgGate(), [q[2]], []),
(CXGate(), [q[0], q[2]], []),
(TGate(), [q[1]], []),
(TGate(), [q[2]], []),
(HGate(), [q[2]], []),
(CXGate(), [q[0], q[1]], []),
(TGate(), [q[0]], []),
(TdgGate(), [q[1]], []),
(CXGate(), [q[0], q[1]], [])
]:
def_ccx.append(inst, qargs, cargs)
_sel.add_equivalence(CCXGate(), def_ccx)
# YGate
q = QuantumRegister(1, 'q')
def_y = QuantumCircuit(q)
def_y.append(U3Gate(pi, pi / 2, pi / 2), [q[0]], [])
_sel.add_equivalence(YGate(), def_y)
# CYGate
q = QuantumRegister(2, 'q')
def_cy = QuantumCircuit(q)
for inst, qargs, cargs in [
(SdgGate(), [q[1]], []),
(CXGate(), [q[0], q[1]], []),
(SGate(), [q[1]], [])
]:
def_cy.append(inst, qargs, cargs)
_sel.add_equivalence(CYGate(), def_cy)
# ZGate
q = QuantumRegister(1, 'q')
def_z = QuantumCircuit(q)
def_z.append(U1Gate(pi), [q[0]], [])
_sel.add_equivalence(ZGate(), def_z)
# CZGate
q = QuantumRegister(2, 'q')
def_cz = QuantumCircuit(q)
for inst, qargs, cargs in [
(HGate(), [q[1]], []),
(CXGate(), [q[0], q[1]], []),
(HGate(), [q[1]], [])
]:
def_cz.append(inst, qargs, cargs)
_sel.add_equivalence(CZGate(), def_cz)
| 24.496599
| 89
| 0.544386
|
from qiskit.qasm import pi
from qiskit.circuit import EquivalenceLibrary, Parameter, QuantumCircuit, QuantumRegister
from . import (
HGate,
CHGate,
MSGate,
RGate,
RCCXGate,
RCCCXGate,
RXGate,
CRXGate,
RXXGate,
RYGate,
CRYGate,
RZGate,
CRZGate,
RZZGate,
SGate,
SdgGate,
SwapGate,
CSwapGate,
TGate,
TdgGate,
U1Gate,
CU1Gate,
U2Gate,
U3Gate,
CU3Gate,
XGate,
CXGate,
CCXGate,
YGate,
CYGate,
RYYGate,
ZGate,
CZGate,
)
_sel = StandardEquivalenceLibrary = EquivalenceLibrary()
q = QuantumRegister(1, 'q')
def_h = QuantumCircuit(q)
def_h.append(U2Gate(0, pi), [q[0]], [])
_sel.add_equivalence(HGate(), def_h)
q = QuantumRegister(2, 'q')
def_ch = QuantumCircuit(q)
for inst, qargs, cargs in [
(SGate(), [q[1]], []),
(HGate(), [q[1]], []),
(TGate(), [q[1]], []),
(CXGate(), [q[0], q[1]], []),
(TdgGate(), [q[1]], []),
(HGate(), [q[1]], []),
(SdgGate(), [q[1]], [])
]:
def_ch.append(inst, qargs, cargs)
_sel.add_equivalence(CHGate(), def_ch)
for num_qubits in range(2, 20):
q = QuantumRegister(num_qubits, 'q')
theta = Parameter('theta')
def_ms = QuantumCircuit(q)
for i in range(num_qubits):
for j in range(i + 1, num_qubits):
def_ms.append(RXXGate(theta), [q[i], q[j]])
_sel.add_equivalence(MSGate(num_qubits, theta), def_ms)
q = QuantumRegister(1, 'q')
theta = Parameter('theta')
phi = Parameter('phi')
def_r = QuantumCircuit(q)
def_r.append(U3Gate(theta, phi - pi / 2, -phi + pi / 2), [q[0]])
_sel.add_equivalence(RGate(theta, phi), def_r)
q = QuantumRegister(3, 'q')
def_rccx = QuantumCircuit(q)
for inst, qargs, cargs in [
(HGate(), [q[2]], []),
(TGate(), [q[2]], []),
(CXGate(), [q[1], q[2]], []),
(TdgGate(), [q[2]], []),
(CXGate(), [q[0], q[2]], []),
(TGate(), [q[2]], []),
(CXGate(), [q[1], q[2]], []),
(TdgGate(), [q[2]], []),
(HGate(), [q[2]], []),
]:
def_rccx.append(inst, qargs, cargs)
_sel.add_equivalence(RCCXGate(), def_rccx)
q = QuantumRegister(4, 'q')
def_rcccx = QuantumCircuit(q)
for inst, qargs, cargs in [
(HGate(), [q[3]], []),
(TGate(), [q[3]], []),
(CXGate(), [q[2], q[3]], []),
(TdgGate(), [q[3]], []),
(HGate(), [q[3]], []),
(CXGate(), [q[0], q[3]], []),
(TGate(), [q[3]], []),
(CXGate(), [q[1], q[3]], []),
(TdgGate(), [q[3]], []),
(CXGate(), [q[0], q[3]], []),
(TGate(), [q[3]], []),
(CXGate(), [q[1], q[3]], []),
(TdgGate(), [q[3]], []),
(HGate(), [q[3]], []),
(TGate(), [q[3]], []),
(CXGate(), [q[2], q[3]], []),
(TdgGate(), [q[3]], []),
(HGate(), [q[3]], []),
]:
def_rcccx.append(inst, qargs, cargs)
_sel.add_equivalence(RCCCXGate(), def_rcccx)
q = QuantumRegister(1, 'q')
theta = Parameter('theta')
def_rx = QuantumCircuit(q)
def_rx.append(RGate(theta, 0), [q[0]], [])
_sel.add_equivalence(RXGate(theta), def_rx)
q = QuantumRegister(2, 'q')
theta = Parameter('theta')
def_crx = QuantumCircuit(q)
for inst, qargs, cargs in [
(U1Gate(pi / 2), [q[1]], []),
(CXGate(), [q[0], q[1]], []),
(U3Gate(-theta / 2, 0, 0), [q[1]], []),
(CXGate(), [q[0], q[1]], []),
(U3Gate(theta / 2, -pi / 2, 0), [q[1]], [])
]:
def_crx.append(inst, qargs, cargs)
_sel.add_equivalence(CRXGate(theta), def_crx)
q = QuantumRegister(2, 'q')
theta = Parameter('theta')
def_rxx = QuantumCircuit(q)
for inst, qargs, cargs in [
(HGate(), [q[0]], []),
(HGate(), [q[1]], []),
(CXGate(), [q[0], q[1]], []),
(U1Gate(theta), [q[1]], []),
(CXGate(), [q[0], q[1]], []),
(HGate(), [q[1]], []),
(HGate(), [q[0]], []),
]:
def_rxx.append(inst, qargs, cargs)
_sel.add_equivalence(RXXGate(theta), def_rxx)
q = QuantumRegister(1, 'q')
theta = Parameter('theta')
def_ry = QuantumCircuit(q)
def_ry.append(RGate(theta, pi / 2), [q[0]], [])
_sel.add_equivalence(RYGate(theta), def_ry)
q = QuantumRegister(2, 'q')
theta = Parameter('theta')
def_cry = QuantumCircuit(q)
for inst, qargs, cargs in [
(U3Gate(theta / 2, 0, 0), [q[1]], []),
(CXGate(), [q[0], q[1]], []),
(U3Gate(-theta / 2, 0, 0), [q[1]], []),
(CXGate(), [q[0], q[1]], [])
]:
def_cry.append(inst, qargs, cargs)
_sel.add_equivalence(CRYGate(theta), def_cry)
q = QuantumRegister(2, 'q')
theta = Parameter('theta')
def_ryy = QuantumCircuit(q)
for inst, qargs, cargs in [
(RXGate(pi / 2), [q[0]], []),
(RXGate(pi / 2), [q[1]], []),
(CXGate(), [q[0], q[1]], []),
(RZGate(theta), [q[1]], []),
(CXGate(), [q[0], q[1]], []),
(RXGate(-pi / 2), [q[0]], []),
(RXGate(-pi / 2), [q[1]], []),
]:
def_ryy.append(inst, qargs, cargs)
_sel.add_equivalence(RYYGate(theta), def_ryy)
q = QuantumRegister(1, 'q')
theta = Parameter('theta')
def_rz = QuantumCircuit(q)
def_rz.append(U1Gate(theta), [q[0]], [])
_sel.add_equivalence(RZGate(theta), def_rz)
q = QuantumRegister(2, 'q')
theta = Parameter('theta')
def_crz = QuantumCircuit(q)
for inst, qargs, cargs in [
(U1Gate(theta / 2), [q[1]], []),
(CXGate(), [q[0], q[1]], []),
(U1Gate(-theta / 2), [q[1]], []),
(CXGate(), [q[0], q[1]], [])
]:
def_crz.append(inst, qargs, cargs)
_sel.add_equivalence(CRZGate(theta), def_crz)
q = QuantumRegister(2, 'q')
theta = Parameter('theta')
def_rzz = QuantumCircuit(q)
for inst, qargs, cargs in [
(CXGate(), [q[0], q[1]], []),
(U1Gate(theta), [q[1]], []),
(CXGate(), [q[0], q[1]], [])
]:
def_rzz.append(inst, qargs, cargs)
_sel.add_equivalence(RZZGate(theta), def_rzz)
q = QuantumRegister(1, 'q')
def_s = QuantumCircuit(q)
def_s.append(U1Gate(pi / 2), [q[0]], [])
_sel.add_equivalence(SGate(), def_s)
q = QuantumRegister(1, 'q')
def_sdg = QuantumCircuit(q)
def_sdg.append(U1Gate(-pi / 2), [q[0]], [])
_sel.add_equivalence(SdgGate(), def_sdg)
q = QuantumRegister(2, 'q')
def_swap = QuantumCircuit(q)
for inst, qargs, cargs in [
(CXGate(), [q[0], q[1]], []),
(CXGate(), [q[1], q[0]], []),
(CXGate(), [q[0], q[1]], [])
]:
def_swap.append(inst, qargs, cargs)
_sel.add_equivalence(SwapGate(), def_swap)
q = QuantumRegister(3, 'q')
def_cswap = QuantumCircuit(q)
for inst, qargs, cargs in [
(CXGate(), [q[2], q[1]], []),
(CCXGate(), [q[0], q[1], q[2]], []),
(CXGate(), [q[2], q[1]], [])
]:
def_cswap.append(inst, qargs, cargs)
_sel.add_equivalence(CSwapGate(), def_cswap)
q = QuantumRegister(1, 'q')
def_t = QuantumCircuit(q)
def_t.append(U1Gate(pi / 4), [q[0]], [])
_sel.add_equivalence(TGate(), def_t)
q = QuantumRegister(1, 'q')
def_tdg = QuantumCircuit(q)
def_tdg.append(U1Gate(-pi / 4), [q[0]], [])
_sel.add_equivalence(TdgGate(), def_tdg)
q = QuantumRegister(1, 'q')
phi = Parameter('phi')
lam = Parameter('lam')
def_u2 = QuantumCircuit(q)
def_u2.append(U3Gate(pi / 2, phi, lam), [q[0]], [])
_sel.add_equivalence(U2Gate(phi, lam), def_u2)
q = QuantumRegister(2, 'q')
theta = Parameter('theta')
def_cu1 = QuantumCircuit(q)
for inst, qargs, cargs in [
(U1Gate(theta / 2), [q[0]], []),
(CXGate(), [q[0], q[1]], []),
(U1Gate(-theta / 2), [q[1]], []),
(CXGate(), [q[0], q[1]], []),
(U1Gate(theta / 2), [q[1]], [])
]:
def_cu1.append(inst, qargs, cargs)
_sel.add_equivalence(CU1Gate(theta), def_cu1)
q = QuantumRegister(1, 'q')
theta = Parameter('theta')
def_u1 = QuantumCircuit(q)
def_u1.append(U3Gate(0, 0, theta), [q[0]], [])
_sel.add_equivalence(U1Gate(theta), def_u1)
q = QuantumRegister(2, 'q')
theta = Parameter('theta')
phi = Parameter('phi')
lam = Parameter('lam')
def_cu3 = QuantumCircuit(q)
for inst, qargs, cargs in [
(U1Gate((lam + phi) / 2), [q[0]], []),
(U1Gate((lam - phi) / 2), [q[1]], []),
(CXGate(), [q[0], q[1]], []),
(U3Gate(-theta / 2, 0, -(phi + lam) / 2), [q[1]], []),
(CXGate(), [q[0], q[1]], []),
(U3Gate(theta / 2, phi, 0), [q[1]], [])
]:
def_cu3.append(inst, qargs, cargs)
_sel.add_equivalence(CU3Gate(theta, phi, lam), def_cu3)
q = QuantumRegister(1, 'q')
def_x = QuantumCircuit(q)
def_x.append(U3Gate(pi, 0, pi), [q[0]], [])
_sel.add_equivalence(XGate(), def_x)
q = QuantumRegister(3, 'q')
def_ccx = QuantumCircuit(q)
for inst, qargs, cargs in [
(HGate(), [q[2]], []),
(CXGate(), [q[1], q[2]], []),
(TdgGate(), [q[2]], []),
(CXGate(), [q[0], q[2]], []),
(TGate(), [q[2]], []),
(CXGate(), [q[1], q[2]], []),
(TdgGate(), [q[2]], []),
(CXGate(), [q[0], q[2]], []),
(TGate(), [q[1]], []),
(TGate(), [q[2]], []),
(HGate(), [q[2]], []),
(CXGate(), [q[0], q[1]], []),
(TGate(), [q[0]], []),
(TdgGate(), [q[1]], []),
(CXGate(), [q[0], q[1]], [])
]:
def_ccx.append(inst, qargs, cargs)
_sel.add_equivalence(CCXGate(), def_ccx)
q = QuantumRegister(1, 'q')
def_y = QuantumCircuit(q)
def_y.append(U3Gate(pi, pi / 2, pi / 2), [q[0]], [])
_sel.add_equivalence(YGate(), def_y)
q = QuantumRegister(2, 'q')
def_cy = QuantumCircuit(q)
for inst, qargs, cargs in [
(SdgGate(), [q[1]], []),
(CXGate(), [q[0], q[1]], []),
(SGate(), [q[1]], [])
]:
def_cy.append(inst, qargs, cargs)
_sel.add_equivalence(CYGate(), def_cy)
q = QuantumRegister(1, 'q')
def_z = QuantumCircuit(q)
def_z.append(U1Gate(pi), [q[0]], [])
_sel.add_equivalence(ZGate(), def_z)
q = QuantumRegister(2, 'q')
def_cz = QuantumCircuit(q)
for inst, qargs, cargs in [
(HGate(), [q[1]], []),
(CXGate(), [q[0], q[1]], []),
(HGate(), [q[1]], [])
]:
def_cz.append(inst, qargs, cargs)
_sel.add_equivalence(CZGate(), def_cz)
| true
| true
|
1c43796bccaf30510d15fc9c0745a62bf63bf637
| 248
|
py
|
Python
|
src/tfi/doc/example_code.py
|
ajbouh/tfi
|
6e89e8c8f1ca3b285c788cc6b802fc44f9001290
|
[
"MIT"
] | 160
|
2017-09-13T00:32:05.000Z
|
2018-05-21T18:17:32.000Z
|
src/tfi/doc/example_code.py
|
tesserai/tfi
|
6e89e8c8f1ca3b285c788cc6b802fc44f9001290
|
[
"MIT"
] | 6
|
2017-09-14T17:54:21.000Z
|
2018-01-27T19:31:18.000Z
|
src/tfi/doc/example_code.py
|
ajbouh/tfi
|
6e89e8c8f1ca3b285c788cc6b802fc44f9001290
|
[
"MIT"
] | 11
|
2017-09-13T00:37:08.000Z
|
2018-03-05T08:03:34.000Z
|
class ExampleCode(object):
def __init__(self, name, label, lines):
self.name = name
self.label = label
self.lines = lines
class ExampleCodeSet(object):
def __init__(self, examples):
self.examples = examples
| 24.8
| 43
| 0.641129
|
class ExampleCode(object):
def __init__(self, name, label, lines):
self.name = name
self.label = label
self.lines = lines
class ExampleCodeSet(object):
def __init__(self, examples):
self.examples = examples
| true
| true
|
1c43798e4f90c184b28ff96bd2cdb011086d62f3
| 8,511
|
py
|
Python
|
packages/python/plotly/plotly/graph_objs/scattergeo/marker/colorbar/_tickfont.py
|
mastermind88/plotly.py
|
efa70710df1af22958e1be080e105130042f1839
|
[
"MIT"
] | null | null | null |
packages/python/plotly/plotly/graph_objs/scattergeo/marker/colorbar/_tickfont.py
|
mastermind88/plotly.py
|
efa70710df1af22958e1be080e105130042f1839
|
[
"MIT"
] | null | null | null |
packages/python/plotly/plotly/graph_objs/scattergeo/marker/colorbar/_tickfont.py
|
mastermind88/plotly.py
|
efa70710df1af22958e1be080e105130042f1839
|
[
"MIT"
] | null | null | null |
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Tickfont(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "scattergeo.marker.colorbar"
_path_str = "scattergeo.marker.colorbar.tickfont"
_valid_props = {"color", "family", "size"}
# color
# -----
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
# family
# ------
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser will only be able to apply a font if
it is available on the system which it operates. Provide
multiple font families, separated by commas, to indicate the
preference in which to apply fonts if they aren't available on
the system. The Chart Studio Cloud (at https://chart-
studio.plotly.com or on-premise) generates images on a server,
where only a select number of fonts are installed and
supported. These include "Arial", "Balto", "Courier New",
"Droid Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
The 'family' property is a string and must be specified as:
- A non-empty string
Returns
-------
str
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
# size
# ----
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
Returns
-------
int|float
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The Chart
Studio Cloud (at https://chart-studio.plotly.com or on-
premise) generates images on a server, where only a
select number of fonts are installed and supported.
These include "Arial", "Balto", "Courier New", "Droid
Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT
Sans Narrow", "Raleway", "Times New Roman".
size
"""
def __init__(self, arg=None, color=None, family=None, size=None, **kwargs):
"""
Construct a new Tickfont object
Sets the color bar's tick label font
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.scattergeo.mar
ker.colorbar.Tickfont`
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The Chart
Studio Cloud (at https://chart-studio.plotly.com or on-
premise) generates images on a server, where only a
select number of fonts are installed and supported.
These include "Arial", "Balto", "Courier New", "Droid
Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT
Sans Narrow", "Raleway", "Times New Roman".
size
Returns
-------
Tickfont
"""
super(Tickfont, self).__init__("tickfont")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.scattergeo.marker.colorbar.Tickfont
constructor must be a dict or
an instance of :class:`plotly.graph_objs.scattergeo.marker.colorbar.Tickfont`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("color", None)
_v = color if color is not None else _v
if _v is not None:
self["color"] = _v
_v = arg.pop("family", None)
_v = family if family is not None else _v
if _v is not None:
self["family"] = _v
_v = arg.pop("size", None)
_v = size if size is not None else _v
if _v is not None:
self["size"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
| 37.328947
| 82
| 0.568793
|
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Tickfont(_BaseTraceHierarchyType):
_parent_path_str = "scattergeo.marker.colorbar"
_path_str = "scattergeo.marker.colorbar.tickfont"
_valid_props = {"color", "family", "size"}
@property
def color(self):
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
@property
def family(self):
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
@property
def size(self):
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
@property
def _prop_descriptions(self):
return """\
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The Chart
Studio Cloud (at https://chart-studio.plotly.com or on-
premise) generates images on a server, where only a
select number of fonts are installed and supported.
These include "Arial", "Balto", "Courier New", "Droid
Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT
Sans Narrow", "Raleway", "Times New Roman".
size
"""
def __init__(self, arg=None, color=None, family=None, size=None, **kwargs):
super(Tickfont, self).__init__("tickfont")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.scattergeo.marker.colorbar.Tickfont
constructor must be a dict or
an instance of :class:`plotly.graph_objs.scattergeo.marker.colorbar.Tickfont`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("color", None)
_v = color if color is not None else _v
if _v is not None:
self["color"] = _v
_v = arg.pop("family", None)
_v = family if family is not None else _v
if _v is not None:
self["family"] = _v
_v = arg.pop("size", None)
_v = size if size is not None else _v
if _v is not None:
self["size"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
| true
| true
|
1c4379b36a66a2a9b70d4fc271af6a45e07df62d
| 519
|
py
|
Python
|
verification/config.py
|
vinthedark/snet-marketplace-service
|
66ed9d093b00f09d3e28ef4d86c4e4c125037d06
|
[
"MIT"
] | null | null | null |
verification/config.py
|
vinthedark/snet-marketplace-service
|
66ed9d093b00f09d3e28ef4d86c4e4c125037d06
|
[
"MIT"
] | null | null | null |
verification/config.py
|
vinthedark/snet-marketplace-service
|
66ed9d093b00f09d3e28ef4d86c4e4c125037d06
|
[
"MIT"
] | null | null | null |
NETWORK_ID = 0
NETWORKS = {
0: {
"name": "test",
"http_provider": "https://ropsten.infura.io",
"ws_provider": "wss://ropsten.infura.io/ws",
"db": {
"DB_HOST": "localhost",
"DB_USER": "unittest_root",
"DB_PASSWORD": "unittest_pwd",
"DB_NAME": "unittest_db",
"DB_PORT": 3306,
},
}
}
SLACK_HOOK = {}
# TRULIOO
TRULIOO_BASE_URL = ""
TRULIOO_API_KEY = ""
# Passbase
PASSBASE_BASE_URL = ""
PASSBASE_API_SECRET = ""
| 20.76
| 53
| 0.527938
|
NETWORK_ID = 0
NETWORKS = {
0: {
"name": "test",
"http_provider": "https://ropsten.infura.io",
"ws_provider": "wss://ropsten.infura.io/ws",
"db": {
"DB_HOST": "localhost",
"DB_USER": "unittest_root",
"DB_PASSWORD": "unittest_pwd",
"DB_NAME": "unittest_db",
"DB_PORT": 3306,
},
}
}
SLACK_HOOK = {}
TRULIOO_BASE_URL = ""
TRULIOO_API_KEY = ""
PASSBASE_BASE_URL = ""
PASSBASE_API_SECRET = ""
| true
| true
|
1c437a608b0328b405a63a31a1f68dc38ea065f4
| 1,462
|
py
|
Python
|
HW5/181041025/code/Question1.py
|
perihanmirkelam/CSE505_Algorithms
|
07f58c27e7869697c5fdadc6fa2fffbdd8f18c69
|
[
"Apache-2.0"
] | null | null | null |
HW5/181041025/code/Question1.py
|
perihanmirkelam/CSE505_Algorithms
|
07f58c27e7869697c5fdadc6fa2fffbdd8f18c69
|
[
"Apache-2.0"
] | null | null | null |
HW5/181041025/code/Question1.py
|
perihanmirkelam/CSE505_Algorithms
|
07f58c27e7869697c5fdadc6fa2fffbdd8f18c69
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# coding: utf-8
# In[29]:
def mergeSort(jobs):
if len(jobs)>1:
mid = len(jobs)//2
lefthalf = jobs[:mid]
righthalf = jobs[mid:]
mergeSort(lefthalf)
mergeSort(righthalf)
i=0
j=0
k=0
while i < len(lefthalf) and j < len(righthalf):
if lefthalf[i][weight]/lefthalf[i][time] > righthalf[j][weight]/righthalf[j][time]: #sort by weight/time
jobs[k]=lefthalf[i]
i=i+1
else:
jobs[k]=righthalf[j]
j=j+1
k=k+1
while i < len(lefthalf):
jobs[k]=lefthalf[i]
i=i+1
k=k+1
while j < len(righthalf):
jobs[k]=righthalf[j]
j=j+1
k=k+1
def findOptimalCompletionTime(jobs):
mergeSort(jobs)
print("Scheduled jobs list: ", jobs)
completionTime = 0
for i in range(0,len(jobs)):
tempTime=0
for j in range(0,i+1):
tempTime += jobs[j][time]
completionTime += tempTime * jobs[i][weight]
print("Job ", i+1 , ":", jobs[i] ,"has completed at time:", tempTime)
print("Minimized weighted sum of completion time: ",completionTime)
time=0
weight=1
jobs = [[3,5], [1,2], [4,5], [2,8]] # jobs[i] = [time_i, weight_i]
print("Let's schedule these jobs: ", jobs)
findOptimalCompletionTime(jobs)
# In[ ]:
# In[ ]:
| 20.885714
| 116
| 0.509576
|
def mergeSort(jobs):
if len(jobs)>1:
mid = len(jobs)//2
lefthalf = jobs[:mid]
righthalf = jobs[mid:]
mergeSort(lefthalf)
mergeSort(righthalf)
i=0
j=0
k=0
while i < len(lefthalf) and j < len(righthalf):
if lefthalf[i][weight]/lefthalf[i][time] > righthalf[j][weight]/righthalf[j][time]:
jobs[k]=lefthalf[i]
i=i+1
else:
jobs[k]=righthalf[j]
j=j+1
k=k+1
while i < len(lefthalf):
jobs[k]=lefthalf[i]
i=i+1
k=k+1
while j < len(righthalf):
jobs[k]=righthalf[j]
j=j+1
k=k+1
def findOptimalCompletionTime(jobs):
mergeSort(jobs)
print("Scheduled jobs list: ", jobs)
completionTime = 0
for i in range(0,len(jobs)):
tempTime=0
for j in range(0,i+1):
tempTime += jobs[j][time]
completionTime += tempTime * jobs[i][weight]
print("Job ", i+1 , ":", jobs[i] ,"has completed at time:", tempTime)
print("Minimized weighted sum of completion time: ",completionTime)
time=0
weight=1
jobs = [[3,5], [1,2], [4,5], [2,8]]
print("Let's schedule these jobs: ", jobs)
findOptimalCompletionTime(jobs)
# In[ ]:
# In[ ]:
| true
| true
|
1c437ba6984eef38fe6b7adf59af9db10fb5a08d
| 1,643
|
py
|
Python
|
testprograms/MainLarge.py
|
jchmrt/ev3dev2simulator
|
3a8968162d1658a82860a613caf9986c5428b124
|
[
"MIT"
] | 2
|
2020-07-14T01:31:14.000Z
|
2021-02-22T19:14:12.000Z
|
testprograms/MainLarge.py
|
jchmrt/ev3dev2simulator
|
3a8968162d1658a82860a613caf9986c5428b124
|
[
"MIT"
] | 19
|
2020-02-16T08:11:23.000Z
|
2020-12-10T10:06:36.000Z
|
testprograms/MainLarge.py
|
jchmrt/ev3dev2simulator
|
3a8968162d1658a82860a613caf9986c5428b124
|
[
"MIT"
] | 10
|
2020-03-02T08:37:29.000Z
|
2022-03-06T03:49:07.000Z
|
#!/usr/bin/env python3
from time import sleep
from ev3dev2._platform.ev3 import INPUT_1, INPUT_4, INPUT_2, OUTPUT_B
from ev3dev2.led import Leds
from ev3dev2.motor import MoveTank, OUTPUT_A, OUTPUT_D, SpeedPercent, MoveDifferential, MediumMotor
from ev3dev2.sensor.lego import ColorSensor
from ev3dev2.sensor.lego import TouchSensor
from ev3dev2.unit import STUD_MM
from ev3dev2.wheel import EV3EducationSetTire
from testprograms.BluetoothHelper import BluetoothHelper
def reverseRotations(rotations):
tank_drive.on_for_rotations(SpeedPercent(-35), SpeedPercent(-35), rotations)
def rotateDegrees(degrees):
tank_drive.turn_left(SpeedPercent(40), degrees)
def drive():
tank_drive.on(SpeedPercent(30), SpeedPercent(30))
def measurementOn():
tank_measurement.on_to_position(20, -100)
def measurementOff():
tank_measurement.on_to_position(20, 100)
def checkColor():
if cs.color != 1:
print('gg')
leds.set_color("RIGHT", "AMBER")
tank_drive.stop()
measurementOn()
measurementOff()
reverseRotations(1)
rotateDegrees(180)
drive()
else:
leds.set_color("RIGHT", "GREEN")
def measurement_on():
tank_measurement.on_to_position(20, -100)
def measurement_off():
tank_measurement.on_to_position(20, 100)
def check():
while True:
checkColor()
bth = BluetoothHelper()
bth.connect_as_server()
bth.send("Hello?")
leds = Leds()
# leds.animate_rainbow()
cs = ColorSensor(INPUT_2)
tank_drive = MoveDifferential(OUTPUT_A, OUTPUT_D, EV3EducationSetTire, 15 * STUD_MM)
tank_measurement = MediumMotor(OUTPUT_B)
drive()
check()
| 23.140845
| 99
| 0.734632
|
from time import sleep
from ev3dev2._platform.ev3 import INPUT_1, INPUT_4, INPUT_2, OUTPUT_B
from ev3dev2.led import Leds
from ev3dev2.motor import MoveTank, OUTPUT_A, OUTPUT_D, SpeedPercent, MoveDifferential, MediumMotor
from ev3dev2.sensor.lego import ColorSensor
from ev3dev2.sensor.lego import TouchSensor
from ev3dev2.unit import STUD_MM
from ev3dev2.wheel import EV3EducationSetTire
from testprograms.BluetoothHelper import BluetoothHelper
def reverseRotations(rotations):
tank_drive.on_for_rotations(SpeedPercent(-35), SpeedPercent(-35), rotations)
def rotateDegrees(degrees):
tank_drive.turn_left(SpeedPercent(40), degrees)
def drive():
tank_drive.on(SpeedPercent(30), SpeedPercent(30))
def measurementOn():
tank_measurement.on_to_position(20, -100)
def measurementOff():
tank_measurement.on_to_position(20, 100)
def checkColor():
if cs.color != 1:
print('gg')
leds.set_color("RIGHT", "AMBER")
tank_drive.stop()
measurementOn()
measurementOff()
reverseRotations(1)
rotateDegrees(180)
drive()
else:
leds.set_color("RIGHT", "GREEN")
def measurement_on():
tank_measurement.on_to_position(20, -100)
def measurement_off():
tank_measurement.on_to_position(20, 100)
def check():
while True:
checkColor()
bth = BluetoothHelper()
bth.connect_as_server()
bth.send("Hello?")
leds = Leds()
cs = ColorSensor(INPUT_2)
tank_drive = MoveDifferential(OUTPUT_A, OUTPUT_D, EV3EducationSetTire, 15 * STUD_MM)
tank_measurement = MediumMotor(OUTPUT_B)
drive()
check()
| true
| true
|
1c437be4b3a4e05e897a8c92ea9e144eef263140
| 3,122
|
py
|
Python
|
NiaPy/benchmarks/happyCat.py
|
lucijabrezocnik/NiaPy
|
1582d1af835c022c77224ea0234178a399efc106
|
[
"MIT"
] | null | null | null |
NiaPy/benchmarks/happyCat.py
|
lucijabrezocnik/NiaPy
|
1582d1af835c022c77224ea0234178a399efc106
|
[
"MIT"
] | null | null | null |
NiaPy/benchmarks/happyCat.py
|
lucijabrezocnik/NiaPy
|
1582d1af835c022c77224ea0234178a399efc106
|
[
"MIT"
] | 1
|
2018-06-13T08:10:23.000Z
|
2018-06-13T08:10:23.000Z
|
# encoding=utf8
"""Impementation of Happy Cat funtion."""
import math
from NiaPy.benchmarks.benchmark import Benchmark
__all__ = ['HappyCat']
class HappyCat(Benchmark):
r"""Implementation of Happy cat function.
Date: 2018
Author: Lucija Brezočnik
License: MIT
Function: **Happy cat function**
:math:`f(\mathbf{x}) = {\left |\sum_{i = 1}^D {x_i}^2 - D \right|}^{1/4} + (0.5 \sum_{i = 1}^D {x_i}^2 + \sum_{i = 1}^D x_i) / D + 0.5`
**Input domain:**
The function can be defined on any input domain but it is usually
evaluated on the hypercube :math:`x_i ∈ [-100, 100]`, for all :math:`i = 1, 2,..., D`.
**Global minimum:** :math:`f(x^*) = 0`, at :math:`x^* = (-1,...,-1)`
LaTeX formats:
Inline:
$f(\mathbf{x}) = {\left|\sum_{i = 1}^D {x_i}^2 -
D \right|}^{1/4} + (0.5 \sum_{i = 1}^D {x_i}^2 +
\sum_{i = 1}^D x_i) / D + 0.5$
Equation:
\begin{equation} f(\mathbf{x}) = {\left| \sum_{i = 1}^D {x_i}^2 -
D \right|}^{1/4} + (0.5 \sum_{i = 1}^D {x_i}^2 +
\sum_{i = 1}^D x_i) / D + 0.5 \end{equation}
Domain:
$-100 \leq x_i \leq 100$
Reference: http://bee22.com/manual/tf_images/Liang%20CEC2014.pdf &
Beyer, H. G., & Finck, S. (2012). HappyCat - A Simple Function Class Where Well-Known Direct Search Algorithms Do Fail.
In International Conference on Parallel Problem Solving from Nature (pp. 367-376). Springer, Berlin, Heidelberg.
"""
Name = ['HappyCat']
def __init__(self, Lower=-100.0, Upper=100.0):
r"""Initialize of Happy cat benchmark.
Args:
Lower (Optional[float]): Lower bound of problem.
Upper (Optional[float]): Upper bound of problem.
See Also:
:func:`NiaPy.benchmarks.Benchmark.__init__`
"""
Benchmark.__init__(self, Lower, Upper)
@staticmethod
def latex_code():
r"""Return the latex code of the problem.
Returns:
str: Latex code
"""
return r'''$f(\mathbf{x}) = {\left|\sum_{i = 1}^D {x_i}^2 -
D \right|}^{1/4} + (0.5 \sum_{i = 1}^D {x_i}^2 +
\sum_{i = 1}^D x_i) / D + 0.5$'''
def function(self):
r"""Return benchmark evaluation function.
Returns:
Callable[[int, Union[int, float, List[int, float], numpy.ndarray]], float]: Fitness function
"""
def evaluate(D, sol):
r"""Fitness function.
Args:
D (int): Dimensionality of the problem
sol (Union[int, float, List[int, float], numpy.ndarray]): Solution to check.
Returns:
float: Fitness value for the solution.
"""
val1 = 0.0
val2 = 0.0
alpha = 0.125
for i in range(D):
val1 += math.pow(abs(math.pow(sol[i], 2) - D), alpha)
val2 += (0.5 * math.pow(sol[i], 2) + sol[i]) / D
return val1 + val2 + 0.5
return evaluate
| 31.22
| 143
| 0.516976
|
import math
from NiaPy.benchmarks.benchmark import Benchmark
__all__ = ['HappyCat']
class HappyCat(Benchmark):
Name = ['HappyCat']
def __init__(self, Lower=-100.0, Upper=100.0):
Benchmark.__init__(self, Lower, Upper)
@staticmethod
def latex_code():
return r'''$f(\mathbf{x}) = {\left|\sum_{i = 1}^D {x_i}^2 -
D \right|}^{1/4} + (0.5 \sum_{i = 1}^D {x_i}^2 +
\sum_{i = 1}^D x_i) / D + 0.5$'''
def function(self):
def evaluate(D, sol):
val1 = 0.0
val2 = 0.0
alpha = 0.125
for i in range(D):
val1 += math.pow(abs(math.pow(sol[i], 2) - D), alpha)
val2 += (0.5 * math.pow(sol[i], 2) + sol[i]) / D
return val1 + val2 + 0.5
return evaluate
| true
| true
|
1c437de91b99c2c41a0ee03cc7cec5424707557b
| 2,308
|
py
|
Python
|
skiptracer.py
|
EdwardDantes/skiptracer
|
7ed09b5a72e4a325d8d9fb76dd487ba4cbec00fb
|
[
"Apache-2.0"
] | 912
|
2018-06-01T16:11:49.000Z
|
2022-03-31T11:16:07.000Z
|
skiptracer.py
|
EdwardDantes/skiptracer
|
7ed09b5a72e4a325d8d9fb76dd487ba4cbec00fb
|
[
"Apache-2.0"
] | 56
|
2018-06-02T22:35:14.000Z
|
2021-12-13T19:48:17.000Z
|
skiptracer.py
|
EdwardDantes/skiptracer
|
7ed09b5a72e4a325d8d9fb76dd487ba4cbec00fb
|
[
"Apache-2.0"
] | 210
|
2018-05-31T01:59:48.000Z
|
2022-03-12T04:54:46.000Z
|
# -*- coding: utf-8 -*-
#!/usr/bin/env python
from __future__ import print_function
from plugins.menus import menus
from plugins.banner import Logo
import sys
import signal
try:
import __builtin__ as bi
except BaseException:
import builtins as bi
import ast
from plugins.colors import BodyColors as bc
def signal_handler(signal, frame):
print("")
sys.exit(0)
signal.signal(signal.SIGINT, signal_handler)
bi.search_string = None
bi.lookup = None
bi.output = None
bi.outdata = dict()
bi.webproxy = None
bi.proxy = None
bi.debug = False
Logo().banner()
if __name__ == "__main__": # If true, run main function of framework
try:
if str(bi.output).lower() == "y":
bi.filename = raw_input(
"[Please provide the filename for output? (somefile.txt|somefile.json)]: ")
def writeout():
import json
try:
pg.write_file(json.dumps(bi.outdata), bi.filename)
print((" [" +
bc.CRED +
"X" +
bc.CEND +
"] " +
bc.CYLW +
" Output written to disk: ./%s\n" +
bc.CEND) %
bi.filename)
except Exception as nowriteJSON:
if bi.debug:
print((" [" +
bc.CRED +
"X" +
bc.CEND +
"] " +
bc.CYLW +
"Output failed to write to disk %s\n" +
bc.CEND) %
nowriteJSON)
else:
print(
" [" +
bc.CRED +
"X" +
bc.CEND +
"] " +
bc.CYLW +
"Output failed to write to disk %s\n" +
bc.CEND)
menus().intromenu()
except Exception as failedmenu:
print("Failed menu: %s" % (failedmenu))
pass
| 29.21519
| 91
| 0.403813
|
from __future__ import print_function
from plugins.menus import menus
from plugins.banner import Logo
import sys
import signal
try:
import __builtin__ as bi
except BaseException:
import builtins as bi
import ast
from plugins.colors import BodyColors as bc
def signal_handler(signal, frame):
print("")
sys.exit(0)
signal.signal(signal.SIGINT, signal_handler)
bi.search_string = None
bi.lookup = None
bi.output = None
bi.outdata = dict()
bi.webproxy = None
bi.proxy = None
bi.debug = False
Logo().banner()
if __name__ == "__main__":
try:
if str(bi.output).lower() == "y":
bi.filename = raw_input(
"[Please provide the filename for output? (somefile.txt|somefile.json)]: ")
def writeout():
import json
try:
pg.write_file(json.dumps(bi.outdata), bi.filename)
print((" [" +
bc.CRED +
"X" +
bc.CEND +
"] " +
bc.CYLW +
" Output written to disk: ./%s\n" +
bc.CEND) %
bi.filename)
except Exception as nowriteJSON:
if bi.debug:
print((" [" +
bc.CRED +
"X" +
bc.CEND +
"] " +
bc.CYLW +
"Output failed to write to disk %s\n" +
bc.CEND) %
nowriteJSON)
else:
print(
" [" +
bc.CRED +
"X" +
bc.CEND +
"] " +
bc.CYLW +
"Output failed to write to disk %s\n" +
bc.CEND)
menus().intromenu()
except Exception as failedmenu:
print("Failed menu: %s" % (failedmenu))
pass
| true
| true
|
1c437eb70c6b093c670682aeef35f268915bba86
| 1,093
|
py
|
Python
|
python/cuml/common/timing_utils.py
|
garanews/cuml
|
318f521a1d2681f4622a44921d27b5f592fe4407
|
[
"Apache-2.0"
] | 2,743
|
2018-10-11T17:28:58.000Z
|
2022-03-31T19:20:50.000Z
|
python/cuml/common/timing_utils.py
|
garanews/cuml
|
318f521a1d2681f4622a44921d27b5f592fe4407
|
[
"Apache-2.0"
] | 4,280
|
2018-10-11T22:29:57.000Z
|
2022-03-31T22:02:44.000Z
|
python/cuml/common/timing_utils.py
|
garanews/cuml
|
318f521a1d2681f4622a44921d27b5f592fe4407
|
[
"Apache-2.0"
] | 454
|
2018-10-11T17:40:56.000Z
|
2022-03-25T17:07:09.000Z
|
#
# Copyright (c) 2020, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import time
from contextlib import contextmanager
# Helper function for timing blocks of code.
@contextmanager
def timed(name):
"""
For timing blocks of code.
Examples
--------
.. code-block:: python
with timed("Print Call"):
print("Hello, World")
Output:
.. code-block:: python
Hello, World
..Print Call : 0.0005
"""
t0 = time.time()
yield
t1 = time.time()
print("..%-24s: %8.4f" % (name, t1 - t0))
| 24.288889
| 74
| 0.656908
|
import time
from contextlib import contextmanager
@contextmanager
def timed(name):
t0 = time.time()
yield
t1 = time.time()
print("..%-24s: %8.4f" % (name, t1 - t0))
| true
| true
|
1c437ee24f843523d90bf668b10ab64b5712089c
| 6,624
|
py
|
Python
|
tools/third_party/h2/h2/config.py
|
meyerweb/wpt
|
f04261533819893c71289614c03434c06856c13e
|
[
"BSD-3-Clause"
] | 14,668
|
2015-01-01T01:57:10.000Z
|
2022-03-31T23:33:32.000Z
|
tools/third_party/h2/h2/config.py
|
meyerweb/wpt
|
f04261533819893c71289614c03434c06856c13e
|
[
"BSD-3-Clause"
] | 7,642
|
2018-05-28T09:38:03.000Z
|
2022-03-31T20:55:48.000Z
|
tools/third_party/h2/h2/config.py
|
meyerweb/wpt
|
f04261533819893c71289614c03434c06856c13e
|
[
"BSD-3-Clause"
] | 5,941
|
2015-01-02T11:32:21.000Z
|
2022-03-31T16:35:46.000Z
|
# -*- coding: utf-8 -*-
"""
h2/config
~~~~~~~~~
Objects for controlling the configuration of the HTTP/2 stack.
"""
class _BooleanConfigOption(object):
"""
Descriptor for handling a boolean config option. This will block
attempts to set boolean config options to non-bools.
"""
def __init__(self, name):
self.name = name
self.attr_name = '_%s' % self.name
def __get__(self, instance, owner):
return getattr(instance, self.attr_name)
def __set__(self, instance, value):
if not isinstance(value, bool):
raise ValueError("%s must be a bool" % self.name)
setattr(instance, self.attr_name, value)
class DummyLogger(object):
"""
An Logger object that does not actual logging, hence a DummyLogger.
For the class the log operation is merely a no-op. The intent is to avoid
conditionals being sprinkled throughout the hyper-h2 code for calls to
logging functions when no logger is passed into the corresponding object.
"""
def __init__(self, *vargs):
pass
def debug(self, *vargs, **kwargs):
"""
No-op logging. Only level needed for now.
"""
pass
def trace(self, *vargs, **kwargs):
"""
No-op logging. Only level needed for now.
"""
pass
class H2Configuration(object):
"""
An object that controls the way a single HTTP/2 connection behaves.
This object allows the users to customize behaviour. In particular, it
allows users to enable or disable optional features, or to otherwise handle
various unusual behaviours.
This object has very little behaviour of its own: it mostly just ensures
that configuration is self-consistent.
:param client_side: Whether this object is to be used on the client side of
a connection, or on the server side. Affects the logic used by the
state machine, the default settings values, the allowable stream IDs,
and several other properties. Defaults to ``True``.
:type client_side: ``bool``
:param header_encoding: Controls whether the headers emitted by this object
in events are transparently decoded to ``unicode`` strings, and what
encoding is used to do that decoding. This defaults to ``None``,
meaning that headers will be returned as bytes. To automatically
decode headers (that is, to return them as unicode strings), this can
be set to the string name of any encoding, e.g. ``'utf-8'``.
.. versionchanged:: 3.0.0
Changed default value from ``'utf-8'`` to ``None``
:type header_encoding: ``str``, ``False``, or ``None``
:param validate_outbound_headers: Controls whether the headers emitted
by this object are validated against the rules in RFC 7540.
Disabling this setting will cause outbound header validation to
be skipped, and allow the object to emit headers that may be illegal
according to RFC 7540. Defaults to ``True``.
:type validate_outbound_headers: ``bool``
:param normalize_outbound_headers: Controls whether the headers emitted
by this object are normalized before sending. Disabling this setting
will cause outbound header normalization to be skipped, and allow
the object to emit headers that may be illegal according to
RFC 7540. Defaults to ``True``.
:type normalize_outbound_headers: ``bool``
:param validate_inbound_headers: Controls whether the headers received
by this object are validated against the rules in RFC 7540.
Disabling this setting will cause inbound header validation to
be skipped, and allow the object to receive headers that may be illegal
according to RFC 7540. Defaults to ``True``.
:type validate_inbound_headers: ``bool``
:param normalize_inbound_headers: Controls whether the headers received by
this object are normalized according to the rules of RFC 7540.
Disabling this setting may lead to hyper-h2 emitting header blocks that
some RFCs forbid, e.g. with multiple cookie fields.
.. versionadded:: 3.0.0
:type normalize_inbound_headers: ``bool``
:param logger: A logger that conforms to the requirements for this module,
those being no I/O and no context switches, which is needed in order
to run in asynchronous operation.
.. versionadded:: 2.6.0
:type logger: ``logging.Logger``
"""
client_side = _BooleanConfigOption('client_side')
validate_outbound_headers = _BooleanConfigOption(
'validate_outbound_headers'
)
normalize_outbound_headers = _BooleanConfigOption(
'normalize_outbound_headers'
)
validate_inbound_headers = _BooleanConfigOption(
'validate_inbound_headers'
)
normalize_inbound_headers = _BooleanConfigOption(
'normalize_inbound_headers'
)
def __init__(self,
client_side=True,
header_encoding=None,
validate_outbound_headers=True,
normalize_outbound_headers=True,
validate_inbound_headers=True,
normalize_inbound_headers=True,
logger=None):
self.client_side = client_side
self.header_encoding = header_encoding
self.validate_outbound_headers = validate_outbound_headers
self.normalize_outbound_headers = normalize_outbound_headers
self.validate_inbound_headers = validate_inbound_headers
self.normalize_inbound_headers = normalize_inbound_headers
self.logger = logger or DummyLogger(__name__)
@property
def header_encoding(self):
"""
Controls whether the headers emitted by this object in events are
transparently decoded to ``unicode`` strings, and what encoding is used
to do that decoding. This defaults to ``None``, meaning that headers
will be returned as bytes. To automatically decode headers (that is, to
return them as unicode strings), this can be set to the string name of
any encoding, e.g. ``'utf-8'``.
"""
return self._header_encoding
@header_encoding.setter
def header_encoding(self, value):
"""
Enforces constraints on the value of header encoding.
"""
if not isinstance(value, (bool, str, type(None))):
raise ValueError("header_encoding must be bool, string, or None")
if value is True:
raise ValueError("header_encoding cannot be True")
self._header_encoding = value
| 38.736842
| 79
| 0.677083
|
class _BooleanConfigOption(object):
def __init__(self, name):
self.name = name
self.attr_name = '_%s' % self.name
def __get__(self, instance, owner):
return getattr(instance, self.attr_name)
def __set__(self, instance, value):
if not isinstance(value, bool):
raise ValueError("%s must be a bool" % self.name)
setattr(instance, self.attr_name, value)
class DummyLogger(object):
def __init__(self, *vargs):
pass
def debug(self, *vargs, **kwargs):
pass
def trace(self, *vargs, **kwargs):
pass
class H2Configuration(object):
client_side = _BooleanConfigOption('client_side')
validate_outbound_headers = _BooleanConfigOption(
'validate_outbound_headers'
)
normalize_outbound_headers = _BooleanConfigOption(
'normalize_outbound_headers'
)
validate_inbound_headers = _BooleanConfigOption(
'validate_inbound_headers'
)
normalize_inbound_headers = _BooleanConfigOption(
'normalize_inbound_headers'
)
def __init__(self,
client_side=True,
header_encoding=None,
validate_outbound_headers=True,
normalize_outbound_headers=True,
validate_inbound_headers=True,
normalize_inbound_headers=True,
logger=None):
self.client_side = client_side
self.header_encoding = header_encoding
self.validate_outbound_headers = validate_outbound_headers
self.normalize_outbound_headers = normalize_outbound_headers
self.validate_inbound_headers = validate_inbound_headers
self.normalize_inbound_headers = normalize_inbound_headers
self.logger = logger or DummyLogger(__name__)
@property
def header_encoding(self):
return self._header_encoding
@header_encoding.setter
def header_encoding(self, value):
if not isinstance(value, (bool, str, type(None))):
raise ValueError("header_encoding must be bool, string, or None")
if value is True:
raise ValueError("header_encoding cannot be True")
self._header_encoding = value
| true
| true
|
1c437fe59fd92943c7f968764ae61bfd0c5db224
| 119
|
py
|
Python
|
mybrowser/session/__init__.py
|
joeledwardson/betfair-browser
|
b641f134e60307250a0e51bafa849422ecf5264b
|
[
"MIT"
] | 3
|
2021-11-23T19:03:02.000Z
|
2021-11-24T08:44:23.000Z
|
mybrowser/session/__init__.py
|
joeledwardson/betfair-browser
|
b641f134e60307250a0e51bafa849422ecf5264b
|
[
"MIT"
] | 2
|
2021-11-23T18:47:31.000Z
|
2021-12-08T15:36:11.000Z
|
mybrowser/session/__init__.py
|
joeledwardson/betfair-browser
|
b641f134e60307250a0e51bafa849422ecf5264b
|
[
"MIT"
] | null | null | null |
from .session import Session, LoadedMarket
from .notifications import NotificationType, Notification, post_notification
| 59.5
| 76
| 0.87395
|
from .session import Session, LoadedMarket
from .notifications import NotificationType, Notification, post_notification
| true
| true
|
1c4380ab2ee431b04f09eea7954a8d0ac2abc23c
| 4,264
|
py
|
Python
|
tests/h/nipsa/worker_test.py
|
project-star/h
|
fd1decafdce981b681ef3bd59e001b1284498dae
|
[
"MIT"
] | 1
|
2017-06-16T14:01:28.000Z
|
2017-06-16T14:01:28.000Z
|
tests/h/nipsa/worker_test.py
|
project-star/h
|
fd1decafdce981b681ef3bd59e001b1284498dae
|
[
"MIT"
] | null | null | null |
tests/h/nipsa/worker_test.py
|
project-star/h
|
fd1decafdce981b681ef3bd59e001b1284498dae
|
[
"MIT"
] | 1
|
2020-02-16T08:04:56.000Z
|
2020-02-16T08:04:56.000Z
|
# -*- coding: utf-8 -*-
import mock
from h.nipsa import worker
def test_add_nipsa_action():
action = worker.add_nipsa_action("foo", {"_id": "test_id"})
assert action == {
"_op_type": "update",
"_index": "foo",
"_type": "annotation",
"_id": "test_id",
"doc": {"nipsa": True}
}
def test_remove_nipsa_action():
annotation = {"_id": "test_id", "_source": {"nipsa": True, "foo": "bar"}}
action = worker.remove_nipsa_action("bar", annotation)
assert action == {
"_op_type": "index",
"_index": "bar",
"_type": "annotation",
"_id": "test_id",
"_source": {"foo": "bar"},
}
@mock.patch("h.nipsa.worker.helpers", autospec=True)
def test_bulk_update_annotations_scans_with_query(helpers):
client = mock.Mock(spec_set=['conn', 'index'])
worker.bulk_update_annotations(client=client,
query=mock.sentinel.query,
action=mock.sentinel.action)
helpers.scan.assert_called_once_with(client=client.conn,
index=client.index,
query=mock.sentinel.query)
@mock.patch("h.nipsa.worker.helpers", autospec=True)
def test_bulk_update_annotations_generates_actions_for_each_annotation(helpers):
action = mock.Mock(spec_set=[])
client = mock.Mock(spec_set=['conn', 'index'])
helpers.scan.return_value = [mock.sentinel.anno1,
mock.sentinel.anno2,
mock.sentinel.anno3]
worker.bulk_update_annotations(client=client,
query=mock.sentinel.query,
action=action)
assert action.call_args_list == [
mock.call(client.index, mock.sentinel.anno1),
mock.call(client.index, mock.sentinel.anno2),
mock.call(client.index, mock.sentinel.anno3),
]
@mock.patch("h.nipsa.worker.helpers", autospec=True)
def test_bulk_update_annotations_calls_bulk_with_actions(helpers):
action = mock.Mock(spec_set=[], side_effect=[
mock.sentinel.action1,
mock.sentinel.action2,
mock.sentinel.action3,
])
client = mock.Mock(spec_set=['conn', 'index'])
helpers.scan.return_value = [mock.sentinel.anno1,
mock.sentinel.anno2,
mock.sentinel.anno3]
worker.bulk_update_annotations(client=client,
query=mock.sentinel.query,
action=action)
helpers.bulk.assert_called_once_with(client=client.conn,
actions=[mock.sentinel.action1,
mock.sentinel.action2,
mock.sentinel.action3])
@mock.patch("h.nipsa.worker.bulk_update_annotations", autospec=True)
@mock.patch("h.nipsa.worker.celery", autospec=True)
@mock.patch("h.nipsa.worker.search", autospec=True)
class TestAddNipsa(object):
def test_calls_bulk_update_annotations(self, search, celery, bulk):
celery.request = mock.Mock(spec_set=['feature', 'es'])
celery.request.feature.return_value = True
expected_query = search.not_nipsad_annotations('acct:jeannie@example.com')
worker.add_nipsa('acct:jeannie@example.com')
bulk.assert_any_call(celery.request.es,
expected_query,
worker.add_nipsa_action)
@mock.patch("h.nipsa.worker.bulk_update_annotations", autospec=True)
@mock.patch("h.nipsa.worker.celery", autospec=True)
@mock.patch("h.nipsa.worker.search", autospec=True)
class TestRemoveNipsa(object):
def test_remove_nipsa_calls_bulk_update_annotations(self, search, celery, bulk):
celery.request = mock.Mock(spec_set=['feature', 'es'])
celery.request.feature.return_value = True
expected_query = search.nipsad_annotations('acct:jeannie@example.com')
worker.remove_nipsa('acct:jeannie@example.com')
bulk.assert_any_call(celery.request.es,
expected_query,
worker.remove_nipsa_action)
| 36.758621
| 84
| 0.594512
|
import mock
from h.nipsa import worker
def test_add_nipsa_action():
action = worker.add_nipsa_action("foo", {"_id": "test_id"})
assert action == {
"_op_type": "update",
"_index": "foo",
"_type": "annotation",
"_id": "test_id",
"doc": {"nipsa": True}
}
def test_remove_nipsa_action():
annotation = {"_id": "test_id", "_source": {"nipsa": True, "foo": "bar"}}
action = worker.remove_nipsa_action("bar", annotation)
assert action == {
"_op_type": "index",
"_index": "bar",
"_type": "annotation",
"_id": "test_id",
"_source": {"foo": "bar"},
}
@mock.patch("h.nipsa.worker.helpers", autospec=True)
def test_bulk_update_annotations_scans_with_query(helpers):
client = mock.Mock(spec_set=['conn', 'index'])
worker.bulk_update_annotations(client=client,
query=mock.sentinel.query,
action=mock.sentinel.action)
helpers.scan.assert_called_once_with(client=client.conn,
index=client.index,
query=mock.sentinel.query)
@mock.patch("h.nipsa.worker.helpers", autospec=True)
def test_bulk_update_annotations_generates_actions_for_each_annotation(helpers):
action = mock.Mock(spec_set=[])
client = mock.Mock(spec_set=['conn', 'index'])
helpers.scan.return_value = [mock.sentinel.anno1,
mock.sentinel.anno2,
mock.sentinel.anno3]
worker.bulk_update_annotations(client=client,
query=mock.sentinel.query,
action=action)
assert action.call_args_list == [
mock.call(client.index, mock.sentinel.anno1),
mock.call(client.index, mock.sentinel.anno2),
mock.call(client.index, mock.sentinel.anno3),
]
@mock.patch("h.nipsa.worker.helpers", autospec=True)
def test_bulk_update_annotations_calls_bulk_with_actions(helpers):
action = mock.Mock(spec_set=[], side_effect=[
mock.sentinel.action1,
mock.sentinel.action2,
mock.sentinel.action3,
])
client = mock.Mock(spec_set=['conn', 'index'])
helpers.scan.return_value = [mock.sentinel.anno1,
mock.sentinel.anno2,
mock.sentinel.anno3]
worker.bulk_update_annotations(client=client,
query=mock.sentinel.query,
action=action)
helpers.bulk.assert_called_once_with(client=client.conn,
actions=[mock.sentinel.action1,
mock.sentinel.action2,
mock.sentinel.action3])
@mock.patch("h.nipsa.worker.bulk_update_annotations", autospec=True)
@mock.patch("h.nipsa.worker.celery", autospec=True)
@mock.patch("h.nipsa.worker.search", autospec=True)
class TestAddNipsa(object):
def test_calls_bulk_update_annotations(self, search, celery, bulk):
celery.request = mock.Mock(spec_set=['feature', 'es'])
celery.request.feature.return_value = True
expected_query = search.not_nipsad_annotations('acct:jeannie@example.com')
worker.add_nipsa('acct:jeannie@example.com')
bulk.assert_any_call(celery.request.es,
expected_query,
worker.add_nipsa_action)
@mock.patch("h.nipsa.worker.bulk_update_annotations", autospec=True)
@mock.patch("h.nipsa.worker.celery", autospec=True)
@mock.patch("h.nipsa.worker.search", autospec=True)
class TestRemoveNipsa(object):
def test_remove_nipsa_calls_bulk_update_annotations(self, search, celery, bulk):
celery.request = mock.Mock(spec_set=['feature', 'es'])
celery.request.feature.return_value = True
expected_query = search.nipsad_annotations('acct:jeannie@example.com')
worker.remove_nipsa('acct:jeannie@example.com')
bulk.assert_any_call(celery.request.es,
expected_query,
worker.remove_nipsa_action)
| true
| true
|
1c4380b7b5eed26f0ed5dae4702e6fb59ca6da6e
| 27,145
|
py
|
Python
|
general_util/utils.py
|
alibaba/Retrieval-based-Pre-training-for-Machine-Reading-Comprehension
|
b27dc55446a29a53af7fffdad8628ccb545420da
|
[
"Apache-2.0"
] | 7
|
2021-06-16T01:40:23.000Z
|
2021-12-04T02:40:35.000Z
|
general_util/utils.py
|
SparkJiao/Retrieval-based-Pre-training-for-Machine-Reading-Comprehension
|
9ccad31bd0bf2216004cf729d1d511fc3e0b77c9
|
[
"Apache-2.0"
] | 1
|
2021-08-16T09:10:05.000Z
|
2021-08-25T08:44:44.000Z
|
general_util/utils.py
|
SparkJiao/Retrieval-based-Pre-training-for-Machine-Reading-Comprehension
|
9ccad31bd0bf2216004cf729d1d511fc3e0b77c9
|
[
"Apache-2.0"
] | 3
|
2021-09-13T02:03:37.000Z
|
2021-10-11T18:48:21.000Z
|
import collections
import json
import math
import random
import re
import string
import torch
from collections import Counter
from torch.nn.functional import softmax
from typing import List, Callable, Tuple, Any, Optional
try:
from pytorch_pretrained_bert.tokenization import BasicTokenizer
except ImportError:
from transformers import BasicTokenizer
# Named Turple List
DocSpan = collections.namedtuple("DocSpan", ["start", "length"])
def add_sentence_separator(doc_tokens: List[str], sentence_span_list: List[Tuple[int, int]], separator: str = '[SEP]'):
new_doc_tokens = []
separator_positions = []
new_sentence_span_list = []
for sen_idx, (span_start, span_end) in enumerate(sentence_span_list):
new_doc_tokens.extend(doc_tokens[span_start: span_end + 1])
if sen_idx != 0:
span_start = span_start - 1
new_sentence_span_list.append((span_start, span_end))
separator_positions.append(len(new_doc_tokens))
new_doc_tokens.append(separator)
return new_doc_tokens, separator_positions[:-1], new_sentence_span_list
# def set_random_seed(seed: int = None):
# random.seed(seed)
def remove_all_evidence(sentence_span_list, doc_tokens, evidences):
evidences.sort(reverse=False)
for index, evidence in enumerate(evidences):
evi_token_s, evi_token_e = sentence_span_list[evidence]
doc_tokens = doc_tokens[:evi_token_s] + doc_tokens[(evi_token_e + 1):]
reduce_offset = evi_token_e - evi_token_s + 1
sentence_span_list = sentence_span_list[:evidence] + [(s - reduce_offset, e - reduce_offset)
for s, e in sentence_span_list[(evidence + 1):]]
for pointer in range(index + 1, len(evidences)):
evidences[pointer] -= 1
return doc_tokens, sentence_span_list
def generate_random_seq(seq_len_a: int, seq_len_b: int):
seq_a = [0] * seq_len_a
seq_b = [1] * seq_len_b
seq = seq_a + seq_b
# _set_random_seed(seed)
random.shuffle(seq)
return seq
def random_sample(seq, sample_length: int):
# _set_random_seed(seed)
return random.sample(seq, sample_length)
def generate_seq_with_negative_sample(initial_seq: List[Any], negative_seq: List[Any], sample_ratio: float,
target_index: int = -1):
sampling_length = int(len(initial_seq) * sample_ratio)
negative_samples = random_sample(negative_seq, sampling_length)
random_new_seq_label = generate_random_seq(len(initial_seq), sampling_length)
random_new_seq = []
new_target_index = -1
positive_pointer = 0
negative_pointer = 0
orig_token_map = []
orig_total_tokens = 0
new_total_tokens = 0
for idx, num in enumerate(random_new_seq_label):
if num == 0:
for i in range(len(initial_seq[positive_pointer])):
orig_token_map.append(new_total_tokens + i)
orig_total_tokens += len(initial_seq[positive_pointer])
new_total_tokens += len(initial_seq[positive_pointer])
random_new_seq.append(initial_seq[positive_pointer])
if new_target_index == -1 and positive_pointer == target_index:
new_target_index = len(random_new_seq) - 1
positive_pointer += 1
else:
new_total_tokens += len(negative_samples[negative_pointer])
random_new_seq.append(negative_samples[negative_pointer])
negative_pointer += 1
random_new_tokens = []
sentence_span_list = []
for sentence in random_new_seq:
start = len(random_new_tokens)
end = start + len(sentence) - 1
sentence_span_list.append((start, end))
random_new_tokens.extend(sentence)
assert len(sentence_span_list) == len(random_new_seq)
assert len(sentence_span_list) == len(random_new_seq_label)
return random_new_tokens, random_new_seq_label, new_target_index, sentence_span_list, orig_token_map
def normalize_answer(s):
"""Lower text and remove punctuation, articles and extra whitespace."""
def remove_articles(text):
return re.sub(r'\b(a|an|the)\b', ' ', text)
def white_space_fix(text):
return ' '.join(text.split())
def remove_punc(text):
exclude = set(string.punctuation)
return ''.join(ch for ch in text if ch not in exclude)
def lower(text):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(s))))
def is_whitespace(c):
if c == " " or c == "\t" or c == "\r" or c == "\n" or ord(c) == 0x202F:
return True
return False
def is_punctuation(c):
# Don't contains '-' compared with string.punctuation
punc = ['!', '"', '#', '$', '%', '&', "'", '(', ')', '*', '+', ',', '-', '.', '/',
':', ';', '<', '=', '>', '?', '@', '[', '\\', ']', '^', '_', '`', '{',
'|', '}', '~']
if c in punc:
return True
return False
def split_sentence(context, sen_tokenizer):
sentences = sen_tokenizer.tokenize(context)
sen_start_list = []
sen_end_list = []
for sen in sentences:
s = context.find(sen)
assert s != -1
e = s + len(sen) - 1
sen_start_list.append(s)
sen_end_list.append(e)
return sen_start_list, sen_end_list
def improve_answer_span(doc_tokens, input_start, input_end, tokenizer,
orig_answer_text):
"""Returns tokenized answer spans that better match the annotated answer."""
# The SQuAD annotations are character based. We first project them to
# whitespace-tokenized words. But then after WordPiece tokenization, we can
# often find a "better match". For example:
#
# Question: What year was John Smith born?
# Context: The leader was John Smith (1895-1943).
# Answer: 1895
#
# The original whitespace-tokenized answer will be "(1895-1943).". However
# after tokenization, our tokens will be "( 1895 - 1943 ) .". So we can match
# the exact answer, 1895.
#
# However, this is not always possible. Consider the following:
#
# Question: What country is the top exporter of electornics?
# Context: The Japanese electronics industry is the lagest in the world.
# Answer: Japan
#
# In this case, the annotator chose "Japan" as a character sub-span of
# the word "Japanese". Since our WordPiece tokenizer does not split
# "Japanese", we just use "Japanese" as the annotation. This is fairly rare
# in SQuAD, but does happen.
tok_answer_text = " ".join(tokenizer.tokenize(orig_answer_text))
for new_start in range(input_start, input_end + 1):
for new_end in range(input_end, new_start - 1, -1):
text_span = " ".join(doc_tokens[new_start:(new_end + 1)])
if text_span == tok_answer_text:
return new_start, new_end
return input_start, input_end
def check_is_max_context(doc_spans, cur_span_index, position):
"""Check if this is the 'max context' doc span for the token."""
# Because of the sliding window approach taken to scoring documents, a single
# token can appear in multiple documents. E.g.
# Doc: the man went to the store and bought a gallon of milk
# Span A: the man went to the
# Span B: to the store and bought
# Span C: and bought a gallon of
# ...
#
# Now the word 'bought' will have two scores from spans B and C. We only
# want to consider the score with "maximum context", which we define as
# the *minimum* of its left and right context (the *sum* of left and
# right context will always be the same, of course).
#
# In the example the maximum context for 'bought' would be span C since
# it has 1 left context and 3 right context, while span B has 4 left context
# and 0 right context.
best_score = None
best_span_index = None
for (span_index, doc_span) in enumerate(doc_spans):
end = doc_span.start + doc_span.length - 1
if position < doc_span.start:
continue
if position > end:
continue
num_left_context = position - doc_span.start
num_right_context = end - position
score = min(num_left_context, num_right_context) + 0.01 * doc_span.length
if best_score is None or score > best_score:
best_score = score
best_span_index = span_index
return cur_span_index == best_span_index
def get_best_indexes(logits, n_best_size):
"""Get the n-best logits from a list."""
index_and_score = sorted(enumerate(logits), key=lambda x: x[1], reverse=True)
best_indexes = []
for i in range(len(index_and_score)):
if i >= n_best_size:
break
best_indexes.append(index_and_score[i][0])
return best_indexes
def get_final_text(pred_text, orig_text, do_lower_case, logger, verbose_logging=False):
"""Project the tokenized prediction back to the original text."""
# When we created the data, we kept track of the alignment between original
# (whitespace tokenized) tokens and our WordPiece tokenized tokens. So
# now `orig_text` contains the span of our original text corresponding to the
# span that we predicted.
#
# However, `orig_text` may contain extra characters that we don't want in
# our prediction.
#
# For example, let's say:
# pred_text = steve smith
# orig_text = Steve Smith's
#
# We don't want to return `orig_text` because it contains the extra "'s".
#
# We don't want to return `pred_text` because it's already been normalized
# (the SQuAD eval script also does punctuation stripping/lower casing but
# our tokenizer does additional normalization like stripping accent
# characters).
#
# What we really want to return is "Steve Smith".
#
# Therefore, we have to apply a semi-complicated alignment heruistic between
# `pred_text` and `orig_text` to get a character-to-charcter alignment. This
# can fail in certain cases in which case we just return `orig_text`.
def _strip_spaces(text):
ns_chars = []
ns_to_s_map = collections.OrderedDict()
for (i, c) in enumerate(text):
if c == " ":
continue
ns_to_s_map[len(ns_chars)] = i
ns_chars.append(c)
ns_text = "".join(ns_chars)
return ns_text, ns_to_s_map
# We first tokenize `orig_text`, strip whitespace from the result
# and `pred_text`, and check if they are the same length. If they are
# NOT the same length, the heuristic has failed. If they are the same
# length, we assume the characters are one-to-one aligned.
tokenizer = BasicTokenizer(do_lower_case=do_lower_case)
tok_text = " ".join(tokenizer.tokenize(orig_text))
start_position = tok_text.find(pred_text)
if start_position == -1:
if verbose_logging:
logger.info(
"Unable to find text: '%s' in '%s'" % (pred_text, orig_text))
return orig_text
end_position = start_position + len(pred_text) - 1
(orig_ns_text, orig_ns_to_s_map) = _strip_spaces(orig_text)
(tok_ns_text, tok_ns_to_s_map) = _strip_spaces(tok_text)
if len(orig_ns_text) != len(tok_ns_text):
if verbose_logging:
logger.info("Length not equal after stripping spaces: '%s' vs '%s'",
orig_ns_text, tok_ns_text)
return orig_text
# We then project the characters in `pred_text` back to `orig_text` using
# the character-to-character alignment.
tok_s_to_ns_map = {}
for (i, tok_index) in tok_ns_to_s_map.items():
tok_s_to_ns_map[tok_index] = i
orig_start_position = None
if start_position in tok_s_to_ns_map:
ns_start_position = tok_s_to_ns_map[start_position]
if ns_start_position in orig_ns_to_s_map:
orig_start_position = orig_ns_to_s_map[ns_start_position]
if orig_start_position is None:
if verbose_logging:
logger.info("Couldn't map start position")
return orig_text
orig_end_position = None
if end_position in tok_s_to_ns_map:
ns_end_position = tok_s_to_ns_map[end_position]
if ns_end_position in orig_ns_to_s_map:
orig_end_position = orig_ns_to_s_map[ns_end_position]
if orig_end_position is None:
if verbose_logging:
logger.info("Couldn't map end position")
return orig_text
output_text = orig_text[orig_start_position:(orig_end_position + 1)]
return output_text
def compute_softmax(scores):
"""Compute softmax probability over raw logits."""
if not scores:
return []
max_score = None
for score in scores:
if max_score is None or score > max_score:
max_score = score
exp_scores = []
total_sum = 0.0
for score in scores:
x = math.exp(score - max_score)
exp_scores.append(x)
total_sum += x
probs = []
for score in exp_scores:
probs.append(score / total_sum)
return probs
def find_evidence_sentence(sentence_span_list: List[Tuple], rationale_start_position: int, rationale_end_position: int):
sentence_id = -1
over_size = 0
for sen_idx, (t_start, t_end) in enumerate(sentence_span_list):
if t_end < rationale_start_position:
continue
if t_start > rationale_end_position:
break
if rationale_start_position <= t_end <= rationale_end_position:
cur_size = t_end - max(rationale_start_position, t_start) + 1
if cur_size > over_size:
over_size = cur_size
sentence_id = sen_idx
elif rationale_start_position <= t_start <= rationale_end_position:
cur_size = rationale_end_position - max(rationale_start_position, t_start) + 1
if cur_size > over_size:
over_size = cur_size
sentence_id = sen_idx
return sentence_id
def truncate_seq_pair(tokens_a, tokens_b, max_length):
"""Truncates a sequence pair in place to the maximum length."""
# This is a simple heuristic which will always truncate the longer sequence
# one token at a time. This makes more sense than truncating an equal percent
# of tokens from each, since if one sequence is very short then each token
# that's truncated likely contains more information than a longer sequence.
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
class AverageMeter(object):
"""Computes and stores the average and current value."""
def __init__(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
if isinstance(val, torch.Tensor):
val = val.item()
if isinstance(n, torch.Tensor):
n = n.item()
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def save(self):
return {
'val': self.val,
'avg': self.avg,
'sum': self.sum,
'count': self.count
}
def load(self, value: dict):
if value is None:
self.reset()
self.val = value['val'] if 'val' in value else 0
self.avg = value['avg'] if 'avg' in value else 0
self.sum = value['sum'] if 'sum' in value else 0
self.count = value['count'] if 'count' in value else 0
class LogMetric(object):
"""
Record all metrics for logging.
"""
def __init__(self, *metric_names):
self.metrics = {
key: AverageMeter() for key in metric_names
}
def update(self, metric_name, val, n=1):
self.metrics[metric_name].update(val, n)
def reset(self, metric_name=None):
if metric_name is None:
for key in self.metrics.keys():
self.metrics[key].reset()
return
self.metrics[metric_name].reset()
def get_log(self):
log = {
key: self.metrics[key].avg for key in self.metrics
}
return log
class CategoricalAccuracy(object):
def __init__(self, label_list: List[str]):
self.predictions = Counter()
self.label_list = [label.lower() for label in label_list]
self.reset()
def reset(self):
self.predictions.clear()
@staticmethod
def _get_key(gold, pred) -> str:
return '{} - {}'.format(str(gold).lower(), str(pred).lower())
@staticmethod
def _split_key(key: str) -> (str, str):
strs = key.split(' - ')
return strs[0], strs[1]
def update(self, gold, pred):
self.predictions[self._get_key(gold, pred)] += 1
def __repr__(self):
return json.dumps(self.predictions, indent=2)
def f1_measure(self, positive_label, negative_label):
true_positive = self.predictions[self._get_key(positive_label, positive_label)]
false_positive = self.predictions[self._get_key(negative_label, positive_label)]
true_negative = self.predictions[self._get_key(negative_label, negative_label)]
false_negative = self.predictions[self._get_key(positive_label, negative_label)]
precision = float(true_positive) / float(true_positive + false_positive + 1e-13)
recall = float(true_positive) / float(true_positive + false_negative + 1e-13)
f1_measure = 2. * ((precision * recall) / (precision + recall + 1e-13))
accuracy = 1.0 * (true_positive + true_negative) / (
true_positive + true_negative + false_positive + false_negative)
result = {'precision': precision, 'recall': recall, 'f1': f1_measure, 'accuracy': accuracy}
return result
def read_predictions(self, ground_truths, predictions):
"""
:param ground_truths: ground_truths[(story_id, qid)]=List[List[answer_text]]
:param predictions: official format predictions
:return:
"""
for pred in predictions:
story_id = pred['id']
turn_id = pred['turn_id']
pred_text = CoQAEvaluator.normalize_answer(pred['answer'])
gold_text = CoQAEvaluator.normalize_answer(ground_truths[(story_id, turn_id)][0])
label_list = self.label_list
if pred_text not in label_list:
pred_label = 'not'
else:
pred_label = pred_text
if gold_text not in label_list:
gold_label = 'not'
else:
gold_label = gold_text
self.update(gold_label, pred_label)
class AttentionWeightWriter(object):
def __init__(self, log_file):
self.log_file = open(log_file, 'w')
def write_weights(self, attention_matrix: torch.Tensor, col_ids: torch.Tensor = None, row_ids: torch.Tensor = None,
col_mask: torch.Tensor = None, row_mask: torch.Tensor = None, id_to_str: Callable = None,
do_softmax: bool = False):
attn_matrix = attention_matrix.detach().cpu()
if do_softmax:
attn_matrix = softmax(attn_matrix, dim=-1)
else:
attn_matrix.exp_()
batch, len1, len2 = attn_matrix.size()
if col_ids is not None:
col_ids = col_ids.detach().cpu()
if row_ids is not None:
row_ids = row_ids.detach().cpu()
if col_mask is None:
col_mask = torch.zeros(batch, len1)
else:
col_mask = col_mask.detach().cpu()
if row_mask is None:
row_mask = torch.zeros(batch, len2)
else:
row_mask = row_mask.detach().cpu()
for batch_id in range(batch):
print('batch_id = {}\t'.format(batch_id), file=self.log_file)
row_is_null = []
for j in range(len2):
t_str = self.index_to_token(index=(batch_id, j), ids=row_ids, mask=row_mask, id_to_str=id_to_str)
if t_str is None:
row_is_null.append(True)
continue
else:
row_is_null.append(False)
print(t_str, end='\t', file=self.log_file)
print(file=self.log_file)
for i in range(len1):
col_t_str = self.index_to_token(index=(batch_id, i), ids=col_ids, mask=col_mask, id_to_str=id_to_str)
if col_t_str is None:
continue
else:
print(col_t_str, end='\t', file=self.log_file)
for j in range(len2):
if row_is_null[j]:
continue
else:
print(attn_matrix[batch_id, i, j].item(), end='\t', file=self.log_file)
print(file=self.log_file)
print('======================', file=self.log_file)
@staticmethod
def index_to_token(index, ids: torch.Tensor, mask: torch.Tensor, id_to_str: Callable = None):
if mask[index] == 1:
return None
else:
if ids is None:
token_id = index[-1]
return token_id
token_id = ids[index].item()
if id_to_str is not None:
return id_to_str(token_id)
else:
return token_id
class CategoricalAccuracyAllen(object):
"""
Categorical Top-K accuracy. Assumes integer labels, with
each item to be classified having a single correct class.
Tie break enables equal distribution of scores among the
classes with same maximum predicted scores.
"""
def __init__(self, top_k: int = 1, tie_break: bool = False) -> None:
if top_k > 1 and tie_break:
raise RuntimeError("Tie break in Categorical Accuracy "
"can be done only for maximum (top_k = 1)")
if top_k <= 0:
raise RuntimeError("top_k passed to Categorical Accuracy must be > 0")
self._top_k = top_k
self._tie_break = tie_break
self.correct_count = 0.
self.total_count = 0.
def __call__(self,
predictions: torch.Tensor,
gold_labels: torch.Tensor,
mask: Optional[torch.Tensor] = None):
"""
Parameters
----------
predictions : ``torch.Tensor``, required.
A tensor of predictions of shape (batch_size, ..., num_classes).
gold_labels : ``torch.Tensor``, required.
A tensor of integer class label of shape (batch_size, ...). It must be the same
shape as the ``predictions`` tensor without the ``num_classes`` dimension.
mask: ``torch.Tensor``, optional (default = None).
A masking tensor the same size as ``gold_labels``.
"""
predictions, gold_labels, mask = self.unwrap_to_tensors(predictions, gold_labels, mask)
# Some sanity checks.
num_classes = predictions.size(-1)
if gold_labels.dim() != predictions.dim() - 1:
raise RuntimeError("gold_labels must have dimension == predictions.size() - 1 but "
"found tensor of shape: {}".format(predictions.size()))
if (gold_labels >= num_classes).any():
raise RuntimeError("A gold label passed to Categorical Accuracy contains an id >= {}, "
"the number of classes.".format(num_classes))
predictions = predictions.view((-1, num_classes))
gold_labels = gold_labels.view(-1).long()
if not self._tie_break:
# Top K indexes of the predictions (or fewer, if there aren't K of them).
# Special case topk == 1, because it's common and .max() is much faster than .topk().
if self._top_k == 1:
top_k = predictions.max(-1)[1].unsqueeze(-1)
else:
top_k = predictions.topk(min(self._top_k, predictions.shape[-1]), -1)[1]
# This is of shape (batch_size, ..., top_k).
correct = top_k.eq(gold_labels.unsqueeze(-1)).float()
else:
# prediction is correct if gold label falls on any of the max scores. distribute score by tie_counts
max_predictions = predictions.max(-1)[0]
max_predictions_mask = predictions.eq(max_predictions.unsqueeze(-1))
# max_predictions_mask is (rows X num_classes) and gold_labels is (batch_size)
# ith entry in gold_labels points to index (0-num_classes) for ith row in max_predictions
# For each row check if index pointed by gold_label is was 1 or not (among max scored classes)
correct = max_predictions_mask[torch.arange(gold_labels.numel()).long(), gold_labels].float()
tie_counts = max_predictions_mask.sum(-1)
correct /= tie_counts.float()
correct.unsqueeze_(-1)
if mask is not None:
correct *= mask.view(-1, 1).float()
self.total_count += mask.sum()
else:
self.total_count += gold_labels.numel()
self.correct_count += correct.sum()
def get_metric(self, reset: bool = False):
"""
Returns
-------
The accumulated accuracy.
"""
if self.total_count > 1e-12:
accuracy = float(self.correct_count) / float(self.total_count)
else:
accuracy = 0.0
if reset:
self.reset()
return accuracy
def reset(self):
self.correct_count = 0.0
self.total_count = 0.0
@staticmethod
def unwrap_to_tensors(*tensors: torch.Tensor):
"""
If you actually passed gradient-tracking Tensors to a Metric, there will be
a huge memory leak, because it will prevent garbage collection for the computation
graph. This method ensures that you're using tensors directly and that they are on
the CPU.
"""
return (x.detach().cpu() if isinstance(x, torch.Tensor) else x for x in tensors)
| 38.178622
| 121
| 0.605121
|
import collections
import json
import math
import random
import re
import string
import torch
from collections import Counter
from torch.nn.functional import softmax
from typing import List, Callable, Tuple, Any, Optional
try:
from pytorch_pretrained_bert.tokenization import BasicTokenizer
except ImportError:
from transformers import BasicTokenizer
DocSpan = collections.namedtuple("DocSpan", ["start", "length"])
def add_sentence_separator(doc_tokens: List[str], sentence_span_list: List[Tuple[int, int]], separator: str = '[SEP]'):
new_doc_tokens = []
separator_positions = []
new_sentence_span_list = []
for sen_idx, (span_start, span_end) in enumerate(sentence_span_list):
new_doc_tokens.extend(doc_tokens[span_start: span_end + 1])
if sen_idx != 0:
span_start = span_start - 1
new_sentence_span_list.append((span_start, span_end))
separator_positions.append(len(new_doc_tokens))
new_doc_tokens.append(separator)
return new_doc_tokens, separator_positions[:-1], new_sentence_span_list
def remove_all_evidence(sentence_span_list, doc_tokens, evidences):
evidences.sort(reverse=False)
for index, evidence in enumerate(evidences):
evi_token_s, evi_token_e = sentence_span_list[evidence]
doc_tokens = doc_tokens[:evi_token_s] + doc_tokens[(evi_token_e + 1):]
reduce_offset = evi_token_e - evi_token_s + 1
sentence_span_list = sentence_span_list[:evidence] + [(s - reduce_offset, e - reduce_offset)
for s, e in sentence_span_list[(evidence + 1):]]
for pointer in range(index + 1, len(evidences)):
evidences[pointer] -= 1
return doc_tokens, sentence_span_list
def generate_random_seq(seq_len_a: int, seq_len_b: int):
seq_a = [0] * seq_len_a
seq_b = [1] * seq_len_b
seq = seq_a + seq_b
random.shuffle(seq)
return seq
def random_sample(seq, sample_length: int):
return random.sample(seq, sample_length)
def generate_seq_with_negative_sample(initial_seq: List[Any], negative_seq: List[Any], sample_ratio: float,
target_index: int = -1):
sampling_length = int(len(initial_seq) * sample_ratio)
negative_samples = random_sample(negative_seq, sampling_length)
random_new_seq_label = generate_random_seq(len(initial_seq), sampling_length)
random_new_seq = []
new_target_index = -1
positive_pointer = 0
negative_pointer = 0
orig_token_map = []
orig_total_tokens = 0
new_total_tokens = 0
for idx, num in enumerate(random_new_seq_label):
if num == 0:
for i in range(len(initial_seq[positive_pointer])):
orig_token_map.append(new_total_tokens + i)
orig_total_tokens += len(initial_seq[positive_pointer])
new_total_tokens += len(initial_seq[positive_pointer])
random_new_seq.append(initial_seq[positive_pointer])
if new_target_index == -1 and positive_pointer == target_index:
new_target_index = len(random_new_seq) - 1
positive_pointer += 1
else:
new_total_tokens += len(negative_samples[negative_pointer])
random_new_seq.append(negative_samples[negative_pointer])
negative_pointer += 1
random_new_tokens = []
sentence_span_list = []
for sentence in random_new_seq:
start = len(random_new_tokens)
end = start + len(sentence) - 1
sentence_span_list.append((start, end))
random_new_tokens.extend(sentence)
assert len(sentence_span_list) == len(random_new_seq)
assert len(sentence_span_list) == len(random_new_seq_label)
return random_new_tokens, random_new_seq_label, new_target_index, sentence_span_list, orig_token_map
def normalize_answer(s):
def remove_articles(text):
return re.sub(r'\b(a|an|the)\b', ' ', text)
def white_space_fix(text):
return ' '.join(text.split())
def remove_punc(text):
exclude = set(string.punctuation)
return ''.join(ch for ch in text if ch not in exclude)
def lower(text):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(s))))
def is_whitespace(c):
if c == " " or c == "\t" or c == "\r" or c == "\n" or ord(c) == 0x202F:
return True
return False
def is_punctuation(c):
punc = ['!', '"', '#', '$', '%', '&', "'", '(', ')', '*', '+', ',', '-', '.', '/',
':', ';', '<', '=', '>', '?', '@', '[', '\\', ']', '^', '_', '`', '{',
'|', '}', '~']
if c in punc:
return True
return False
def split_sentence(context, sen_tokenizer):
sentences = sen_tokenizer.tokenize(context)
sen_start_list = []
sen_end_list = []
for sen in sentences:
s = context.find(sen)
assert s != -1
e = s + len(sen) - 1
sen_start_list.append(s)
sen_end_list.append(e)
return sen_start_list, sen_end_list
def improve_answer_span(doc_tokens, input_start, input_end, tokenizer,
orig_answer_text):
# The SQuAD annotations are character based. We first project them to
# whitespace-tokenized words. But then after WordPiece tokenization, we can
# often find a "better match". For example:
#
# Question: What year was John Smith born?
# Context: The leader was John Smith (1895-1943).
# Answer: 1895
#
# The original whitespace-tokenized answer will be "(1895-1943).". However
# after tokenization, our tokens will be "( 1895 - 1943 ) .". So we can match
# the exact answer, 1895.
#
# However, this is not always possible. Consider the following:
#
# Question: What country is the top exporter of electornics?
# Context: The Japanese electronics industry is the lagest in the world.
# Answer: Japan
#
# In this case, the annotator chose "Japan" as a character sub-span of
# the word "Japanese". Since our WordPiece tokenizer does not split
# "Japanese", we just use "Japanese" as the annotation. This is fairly rare
# in SQuAD, but does happen.
tok_answer_text = " ".join(tokenizer.tokenize(orig_answer_text))
for new_start in range(input_start, input_end + 1):
for new_end in range(input_end, new_start - 1, -1):
text_span = " ".join(doc_tokens[new_start:(new_end + 1)])
if text_span == tok_answer_text:
return new_start, new_end
return input_start, input_end
def check_is_max_context(doc_spans, cur_span_index, position):
# Because of the sliding window approach taken to scoring documents, a single
# token can appear in multiple documents. E.g.
# Doc: the man went to the store and bought a gallon of milk
# Span A: the man went to the
# Span B: to the store and bought
# Span C: and bought a gallon of
# ...
#
# Now the word 'bought' will have two scores from spans B and C. We only
# want to consider the score with "maximum context", which we define as
# the *minimum* of its left and right context (the *sum* of left and
# right context will always be the same, of course).
#
# In the example the maximum context for 'bought' would be span C since
# it has 1 left context and 3 right context, while span B has 4 left context
# and 0 right context.
best_score = None
best_span_index = None
for (span_index, doc_span) in enumerate(doc_spans):
end = doc_span.start + doc_span.length - 1
if position < doc_span.start:
continue
if position > end:
continue
num_left_context = position - doc_span.start
num_right_context = end - position
score = min(num_left_context, num_right_context) + 0.01 * doc_span.length
if best_score is None or score > best_score:
best_score = score
best_span_index = span_index
return cur_span_index == best_span_index
def get_best_indexes(logits, n_best_size):
index_and_score = sorted(enumerate(logits), key=lambda x: x[1], reverse=True)
best_indexes = []
for i in range(len(index_and_score)):
if i >= n_best_size:
break
best_indexes.append(index_and_score[i][0])
return best_indexes
def get_final_text(pred_text, orig_text, do_lower_case, logger, verbose_logging=False):
# When we created the data, we kept track of the alignment between original
# (whitespace tokenized) tokens and our WordPiece tokenized tokens. So
# now `orig_text` contains the span of our original text corresponding to the
# span that we predicted.
#
# However, `orig_text` may contain extra characters that we don't want in
# our prediction.
#
# For example, let's say:
# pred_text = steve smith
# orig_text = Steve Smith's
#
# We don't want to return `orig_text` because it contains the extra "'s".
#
# We don't want to return `pred_text` because it's already been normalized
# (the SQuAD eval script also does punctuation stripping/lower casing but
# our tokenizer does additional normalization like stripping accent
# characters).
#
# What we really want to return is "Steve Smith".
#
# Therefore, we have to apply a semi-complicated alignment heruistic between
# `pred_text` and `orig_text` to get a character-to-charcter alignment. This
# can fail in certain cases in which case we just return `orig_text`.
def _strip_spaces(text):
ns_chars = []
ns_to_s_map = collections.OrderedDict()
for (i, c) in enumerate(text):
if c == " ":
continue
ns_to_s_map[len(ns_chars)] = i
ns_chars.append(c)
ns_text = "".join(ns_chars)
return ns_text, ns_to_s_map
# We first tokenize `orig_text`, strip whitespace from the result
# and `pred_text`, and check if they are the same length. If they are
# NOT the same length, the heuristic has failed. If they are the same
# length, we assume the characters are one-to-one aligned.
tokenizer = BasicTokenizer(do_lower_case=do_lower_case)
tok_text = " ".join(tokenizer.tokenize(orig_text))
start_position = tok_text.find(pred_text)
if start_position == -1:
if verbose_logging:
logger.info(
"Unable to find text: '%s' in '%s'" % (pred_text, orig_text))
return orig_text
end_position = start_position + len(pred_text) - 1
(orig_ns_text, orig_ns_to_s_map) = _strip_spaces(orig_text)
(tok_ns_text, tok_ns_to_s_map) = _strip_spaces(tok_text)
if len(orig_ns_text) != len(tok_ns_text):
if verbose_logging:
logger.info("Length not equal after stripping spaces: '%s' vs '%s'",
orig_ns_text, tok_ns_text)
return orig_text
# We then project the characters in `pred_text` back to `orig_text` using
# the character-to-character alignment.
tok_s_to_ns_map = {}
for (i, tok_index) in tok_ns_to_s_map.items():
tok_s_to_ns_map[tok_index] = i
orig_start_position = None
if start_position in tok_s_to_ns_map:
ns_start_position = tok_s_to_ns_map[start_position]
if ns_start_position in orig_ns_to_s_map:
orig_start_position = orig_ns_to_s_map[ns_start_position]
if orig_start_position is None:
if verbose_logging:
logger.info("Couldn't map start position")
return orig_text
orig_end_position = None
if end_position in tok_s_to_ns_map:
ns_end_position = tok_s_to_ns_map[end_position]
if ns_end_position in orig_ns_to_s_map:
orig_end_position = orig_ns_to_s_map[ns_end_position]
if orig_end_position is None:
if verbose_logging:
logger.info("Couldn't map end position")
return orig_text
output_text = orig_text[orig_start_position:(orig_end_position + 1)]
return output_text
def compute_softmax(scores):
if not scores:
return []
max_score = None
for score in scores:
if max_score is None or score > max_score:
max_score = score
exp_scores = []
total_sum = 0.0
for score in scores:
x = math.exp(score - max_score)
exp_scores.append(x)
total_sum += x
probs = []
for score in exp_scores:
probs.append(score / total_sum)
return probs
def find_evidence_sentence(sentence_span_list: List[Tuple], rationale_start_position: int, rationale_end_position: int):
sentence_id = -1
over_size = 0
for sen_idx, (t_start, t_end) in enumerate(sentence_span_list):
if t_end < rationale_start_position:
continue
if t_start > rationale_end_position:
break
if rationale_start_position <= t_end <= rationale_end_position:
cur_size = t_end - max(rationale_start_position, t_start) + 1
if cur_size > over_size:
over_size = cur_size
sentence_id = sen_idx
elif rationale_start_position <= t_start <= rationale_end_position:
cur_size = rationale_end_position - max(rationale_start_position, t_start) + 1
if cur_size > over_size:
over_size = cur_size
sentence_id = sen_idx
return sentence_id
def truncate_seq_pair(tokens_a, tokens_b, max_length):
# This is a simple heuristic which will always truncate the longer sequence
# one token at a time. This makes more sense than truncating an equal percent
# of tokens from each, since if one sequence is very short then each token
# that's truncated likely contains more information than a longer sequence.
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
class AverageMeter(object):
def __init__(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
if isinstance(val, torch.Tensor):
val = val.item()
if isinstance(n, torch.Tensor):
n = n.item()
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def save(self):
return {
'val': self.val,
'avg': self.avg,
'sum': self.sum,
'count': self.count
}
def load(self, value: dict):
if value is None:
self.reset()
self.val = value['val'] if 'val' in value else 0
self.avg = value['avg'] if 'avg' in value else 0
self.sum = value['sum'] if 'sum' in value else 0
self.count = value['count'] if 'count' in value else 0
class LogMetric(object):
def __init__(self, *metric_names):
self.metrics = {
key: AverageMeter() for key in metric_names
}
def update(self, metric_name, val, n=1):
self.metrics[metric_name].update(val, n)
def reset(self, metric_name=None):
if metric_name is None:
for key in self.metrics.keys():
self.metrics[key].reset()
return
self.metrics[metric_name].reset()
def get_log(self):
log = {
key: self.metrics[key].avg for key in self.metrics
}
return log
class CategoricalAccuracy(object):
def __init__(self, label_list: List[str]):
self.predictions = Counter()
self.label_list = [label.lower() for label in label_list]
self.reset()
def reset(self):
self.predictions.clear()
@staticmethod
def _get_key(gold, pred) -> str:
return '{} - {}'.format(str(gold).lower(), str(pred).lower())
@staticmethod
def _split_key(key: str) -> (str, str):
strs = key.split(' - ')
return strs[0], strs[1]
def update(self, gold, pred):
self.predictions[self._get_key(gold, pred)] += 1
def __repr__(self):
return json.dumps(self.predictions, indent=2)
def f1_measure(self, positive_label, negative_label):
true_positive = self.predictions[self._get_key(positive_label, positive_label)]
false_positive = self.predictions[self._get_key(negative_label, positive_label)]
true_negative = self.predictions[self._get_key(negative_label, negative_label)]
false_negative = self.predictions[self._get_key(positive_label, negative_label)]
precision = float(true_positive) / float(true_positive + false_positive + 1e-13)
recall = float(true_positive) / float(true_positive + false_negative + 1e-13)
f1_measure = 2. * ((precision * recall) / (precision + recall + 1e-13))
accuracy = 1.0 * (true_positive + true_negative) / (
true_positive + true_negative + false_positive + false_negative)
result = {'precision': precision, 'recall': recall, 'f1': f1_measure, 'accuracy': accuracy}
return result
def read_predictions(self, ground_truths, predictions):
for pred in predictions:
story_id = pred['id']
turn_id = pred['turn_id']
pred_text = CoQAEvaluator.normalize_answer(pred['answer'])
gold_text = CoQAEvaluator.normalize_answer(ground_truths[(story_id, turn_id)][0])
label_list = self.label_list
if pred_text not in label_list:
pred_label = 'not'
else:
pred_label = pred_text
if gold_text not in label_list:
gold_label = 'not'
else:
gold_label = gold_text
self.update(gold_label, pred_label)
class AttentionWeightWriter(object):
def __init__(self, log_file):
self.log_file = open(log_file, 'w')
def write_weights(self, attention_matrix: torch.Tensor, col_ids: torch.Tensor = None, row_ids: torch.Tensor = None,
col_mask: torch.Tensor = None, row_mask: torch.Tensor = None, id_to_str: Callable = None,
do_softmax: bool = False):
attn_matrix = attention_matrix.detach().cpu()
if do_softmax:
attn_matrix = softmax(attn_matrix, dim=-1)
else:
attn_matrix.exp_()
batch, len1, len2 = attn_matrix.size()
if col_ids is not None:
col_ids = col_ids.detach().cpu()
if row_ids is not None:
row_ids = row_ids.detach().cpu()
if col_mask is None:
col_mask = torch.zeros(batch, len1)
else:
col_mask = col_mask.detach().cpu()
if row_mask is None:
row_mask = torch.zeros(batch, len2)
else:
row_mask = row_mask.detach().cpu()
for batch_id in range(batch):
print('batch_id = {}\t'.format(batch_id), file=self.log_file)
row_is_null = []
for j in range(len2):
t_str = self.index_to_token(index=(batch_id, j), ids=row_ids, mask=row_mask, id_to_str=id_to_str)
if t_str is None:
row_is_null.append(True)
continue
else:
row_is_null.append(False)
print(t_str, end='\t', file=self.log_file)
print(file=self.log_file)
for i in range(len1):
col_t_str = self.index_to_token(index=(batch_id, i), ids=col_ids, mask=col_mask, id_to_str=id_to_str)
if col_t_str is None:
continue
else:
print(col_t_str, end='\t', file=self.log_file)
for j in range(len2):
if row_is_null[j]:
continue
else:
print(attn_matrix[batch_id, i, j].item(), end='\t', file=self.log_file)
print(file=self.log_file)
print('======================', file=self.log_file)
@staticmethod
def index_to_token(index, ids: torch.Tensor, mask: torch.Tensor, id_to_str: Callable = None):
if mask[index] == 1:
return None
else:
if ids is None:
token_id = index[-1]
return token_id
token_id = ids[index].item()
if id_to_str is not None:
return id_to_str(token_id)
else:
return token_id
class CategoricalAccuracyAllen(object):
def __init__(self, top_k: int = 1, tie_break: bool = False) -> None:
if top_k > 1 and tie_break:
raise RuntimeError("Tie break in Categorical Accuracy "
"can be done only for maximum (top_k = 1)")
if top_k <= 0:
raise RuntimeError("top_k passed to Categorical Accuracy must be > 0")
self._top_k = top_k
self._tie_break = tie_break
self.correct_count = 0.
self.total_count = 0.
def __call__(self,
predictions: torch.Tensor,
gold_labels: torch.Tensor,
mask: Optional[torch.Tensor] = None):
predictions, gold_labels, mask = self.unwrap_to_tensors(predictions, gold_labels, mask)
# Some sanity checks.
num_classes = predictions.size(-1)
if gold_labels.dim() != predictions.dim() - 1:
raise RuntimeError("gold_labels must have dimension == predictions.size() - 1 but "
"found tensor of shape: {}".format(predictions.size()))
if (gold_labels >= num_classes).any():
raise RuntimeError("A gold label passed to Categorical Accuracy contains an id >= {}, "
"the number of classes.".format(num_classes))
predictions = predictions.view((-1, num_classes))
gold_labels = gold_labels.view(-1).long()
if not self._tie_break:
# Top K indexes of the predictions (or fewer, if there aren't K of them).
# Special case topk == 1, because it's common and .max() is much faster than .topk().
if self._top_k == 1:
top_k = predictions.max(-1)[1].unsqueeze(-1)
else:
top_k = predictions.topk(min(self._top_k, predictions.shape[-1]), -1)[1]
# This is of shape (batch_size, ..., top_k).
correct = top_k.eq(gold_labels.unsqueeze(-1)).float()
else:
# prediction is correct if gold label falls on any of the max scores. distribute score by tie_counts
max_predictions = predictions.max(-1)[0]
max_predictions_mask = predictions.eq(max_predictions.unsqueeze(-1))
# max_predictions_mask is (rows X num_classes) and gold_labels is (batch_size)
# ith entry in gold_labels points to index (0-num_classes) for ith row in max_predictions
# For each row check if index pointed by gold_label is was 1 or not (among max scored classes)
correct = max_predictions_mask[torch.arange(gold_labels.numel()).long(), gold_labels].float()
tie_counts = max_predictions_mask.sum(-1)
correct /= tie_counts.float()
correct.unsqueeze_(-1)
if mask is not None:
correct *= mask.view(-1, 1).float()
self.total_count += mask.sum()
else:
self.total_count += gold_labels.numel()
self.correct_count += correct.sum()
def get_metric(self, reset: bool = False):
if self.total_count > 1e-12:
accuracy = float(self.correct_count) / float(self.total_count)
else:
accuracy = 0.0
if reset:
self.reset()
return accuracy
def reset(self):
self.correct_count = 0.0
self.total_count = 0.0
@staticmethod
def unwrap_to_tensors(*tensors: torch.Tensor):
return (x.detach().cpu() if isinstance(x, torch.Tensor) else x for x in tensors)
| true
| true
|
1c4382d435c7991b40ab8ee733f83b4502d0fea7
| 826
|
py
|
Python
|
src/pyams_zmi/zmi/breadcrumb.py
|
Py-AMS/pyams-zmi
|
0073d12062728efad3dc2b5cb40b0f75eacaaa1d
|
[
"ZPL-2.1"
] | null | null | null |
src/pyams_zmi/zmi/breadcrumb.py
|
Py-AMS/pyams-zmi
|
0073d12062728efad3dc2b5cb40b0f75eacaaa1d
|
[
"ZPL-2.1"
] | null | null | null |
src/pyams_zmi/zmi/breadcrumb.py
|
Py-AMS/pyams-zmi
|
0073d12062728efad3dc2b5cb40b0f75eacaaa1d
|
[
"ZPL-2.1"
] | null | null | null |
#
# Copyright (c) 2015-2021 Thierry Florac <tflorac AT ulthar.net>
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
"""PyAMS_*** module
"""
from pyams_skin.viewlet.breadcrumb import BreadcrumbsAdapter
from pyams_template.template import override_template
from pyams_zmi.interfaces import IAdminLayer
__docformat__ = 'restructuredtext'
override_template(BreadcrumbsAdapter,
template='templates/breadcrumbs.pt', layer=IAdminLayer)
| 30.592593
| 75
| 0.773608
|
from pyams_skin.viewlet.breadcrumb import BreadcrumbsAdapter
from pyams_template.template import override_template
from pyams_zmi.interfaces import IAdminLayer
__docformat__ = 'restructuredtext'
override_template(BreadcrumbsAdapter,
template='templates/breadcrumbs.pt', layer=IAdminLayer)
| true
| true
|
1c4382e3404c092cad820cea3891d6a1328a1167
| 7,167
|
py
|
Python
|
py/test/pytests/retrieve_config.py
|
arccode/factory
|
a1b0fccd68987d8cd9c89710adc3c04b868347ec
|
[
"BSD-3-Clause"
] | 3
|
2022-01-06T16:52:52.000Z
|
2022-03-07T11:30:47.000Z
|
py/test/pytests/retrieve_config.py
|
arccode/factory
|
a1b0fccd68987d8cd9c89710adc3c04b868347ec
|
[
"BSD-3-Clause"
] | null | null | null |
py/test/pytests/retrieve_config.py
|
arccode/factory
|
a1b0fccd68987d8cd9c89710adc3c04b868347ec
|
[
"BSD-3-Clause"
] | 1
|
2021-10-24T01:47:22.000Z
|
2021-10-24T01:47:22.000Z
|
# Copyright 2017 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Retrieve JSON config file from either an USB stick or a factory server.
Description
-----------
This pytest retrieves the config file from a specified source, so a following
pytest can use config_utils to load specfic config data.
The config file can be fecthed from two types of sources:
1. Factory server
2. USB stick
To fetch a config file from a factory server, you should put the config
file under the 'parameters' folders, and specify the `data_method` to
FACTORY_SERVER.
To fetch a config file from a USB stick, you can put the file at any partition
you want, as long as the partition and file system on the USB stick can be
recognized by the operating system. The `data_method` should be set to 'USB',
and if there are several partitions on the USB stick, the argument
`usb_dev_parition` should be set to specify the partition you placed the config
file.
Test Procedure
--------------
If `data_method` is set to 'FACTORY_SERVER', no action needs to be done.
If `data_method` is set to 'USB', then:
1. Insert the USB stick
2. Wait for completion
Dependency
----------
Depends on 'udev' and 'pyudev' python module to monitor USB insertion.
Examples
--------
Assume the config file is located at 'foo/bar.json' under the remote source
(i.e., a factory server, or a USB stick).
The JSON config can be loaded from the factory server by::
{
"pytest_name": "retrieve_config",
"args": {
"config_retrieve_path": "foo/bar.json"
}
}
To load the JSON config from a USB stick, add this in test list::
{
"pytest_name": "retrieve_config",
"args": {
"data_method": "USB",
"config_retrieve_path": "foo/bar.json"
}
}
"""
import logging
import os
import threading
import time
import unittest
from cros.factory.test import session
from cros.factory.test import server_proxy
from cros.factory.test.utils import media_utils
from cros.factory.utils.arg_utils import Arg
from cros.factory.utils import config_utils
from cros.factory.utils import file_utils
from cros.factory.utils import type_utils
DATA_METHOD = type_utils.Enum(['USB', 'FACTORY_SERVER'])
class RetrieveConfigException(Exception):
pass
class RetrieveConfig(unittest.TestCase):
"""RetrieveConfig main class.
The USB data method is unstable and not really fits to our factory flow. We
still keep this method for some usage purpose.
If Arg config_save_name=None , the RetrieveConfig will save the config file
directly to the config_save_dir, and it won't keep the relatve structures
retrieving from. For example,
Remote file structure:
...
|__ als
|__ als_fixture.schema.json
Then it will save the config to (assume config_save_dir=None),
var
|__ factory
|__ config
|__ als_fixture.schema.json
"""
ARGS = [
Arg('data_method',
DATA_METHOD,
'The method to retrieve config.',
default=DATA_METHOD.FACTORY_SERVER),
Arg('config_retrieve_path',
str,
'The path to the config file to retrieve from.'),
Arg('config_save_dir',
str,
'The directory path to the config file to place at;'
'defaults to RuntimeConfigDirectory in config_utils.json.',
default=None),
Arg('config_save_name',
str,
'The config name saved in the config_save_dir; The name should '
'suffix with ".json". if None then defaults to its origin name.',
default=None),
Arg('local_ip',
str,
'Local IP address for connecting to the factory server '
'when data_method = FACTORY_SERVER. Set as None to use DHCP.',
default=None),
Arg('usb_dev_partition',
int,
'The partition of the usb_dev_path to be mounted. If None, will try '
'to mount the usb_dev_path without partition number.',
default=None),
]
def setUp(self):
self.args.config_save_dir = (self.args.config_save_dir or
config_utils.GetRuntimeConfigDirectory())
self.args.config_save_name = self.args.config_save_name or os.path.basename(
self.args.config_retrieve_path)
if not self.args.config_save_name.endswith('.json'):
raise RetrieveConfigException('Config name should suffix with ".json".')
self.config_save_path = os.path.join(self.args.config_save_dir,
self.args.config_save_name)
self.usb_dev_path = None
self.usb_ready_event = None
def runTest(self):
file_utils.TryMakeDirs(os.path.dirname(self.config_save_path))
if self.args.data_method == DATA_METHOD.USB:
self._RetrieveConfigFromUSB()
elif self.args.data_method == DATA_METHOD.FACTORY_SERVER:
self._RetrieveConfigFromFactoryServer()
else:
raise ValueError('Unknown data_method.')
def _RetrieveConfigFromFactoryServer(self):
"""Loads parameters from a factory server."""
try:
session.console.info('Retrieving %s from factory server.',
self.args.config_retrieve_path)
proxy = server_proxy.GetServerProxy()
content = proxy.GetParameter(
self.args.config_retrieve_path).data
with open(self.config_save_path, 'w') as f:
f.write(content)
logging.info('Saved config to %s.', self.config_save_path)
except Exception as e:
logging.exception('Failed to retrieve config from factory server.')
raise RetrieveConfigException(str(e))
def _RetrieveConfigFromUSB(self):
"""Loads json config from USB drive."""
self.usb_ready_event = threading.Event()
monitor = media_utils.RemovableDiskMonitor()
monitor.Start(on_insert=self._OnUSBInsertion, on_remove=self._OnUSBRemoval)
try:
self.usb_ready_event.wait()
self._MountUSBAndCopyFile()
finally:
monitor.Stop()
logging.info('Saved config to %s.', self.config_save_path)
def _MountUSBAndCopyFile(self):
session.console.info('Mounting USB (%s, %s).', self.usb_dev_path,
self.args.usb_dev_partition)
with media_utils.MountedMedia(self.usb_dev_path,
self.args.usb_dev_partition) as mount_point:
time.sleep(0.5)
pathname = os.path.join(mount_point, self.args.config_retrieve_path)
session.console.info('Retrieving %s from USB.', pathname)
if not os.path.exists(pathname):
raise ValueError(
'File %r does not exist or it is not a file.' % pathname)
try:
file_utils.CopyFileSkipBytes(pathname, self.config_save_path, 0)
except IOError as e:
logging.error('Failed to copy file %s to %s, %r', pathname,
self.config_save_path, e)
raise RetrieveConfigException(str(e))
def _OnUSBInsertion(self, device):
self.usb_dev_path = device.device_node
self.usb_ready_event.set()
def _OnUSBRemoval(self, device):
del device # unused
self.usb_ready_event.clear()
self.usb_dev_path = None
| 32.876147
| 80
| 0.686759
|
import logging
import os
import threading
import time
import unittest
from cros.factory.test import session
from cros.factory.test import server_proxy
from cros.factory.test.utils import media_utils
from cros.factory.utils.arg_utils import Arg
from cros.factory.utils import config_utils
from cros.factory.utils import file_utils
from cros.factory.utils import type_utils
DATA_METHOD = type_utils.Enum(['USB', 'FACTORY_SERVER'])
class RetrieveConfigException(Exception):
pass
class RetrieveConfig(unittest.TestCase):
ARGS = [
Arg('data_method',
DATA_METHOD,
'The method to retrieve config.',
default=DATA_METHOD.FACTORY_SERVER),
Arg('config_retrieve_path',
str,
'The path to the config file to retrieve from.'),
Arg('config_save_dir',
str,
'The directory path to the config file to place at;'
'defaults to RuntimeConfigDirectory in config_utils.json.',
default=None),
Arg('config_save_name',
str,
'The config name saved in the config_save_dir; The name should '
'suffix with ".json". if None then defaults to its origin name.',
default=None),
Arg('local_ip',
str,
'Local IP address for connecting to the factory server '
'when data_method = FACTORY_SERVER. Set as None to use DHCP.',
default=None),
Arg('usb_dev_partition',
int,
'The partition of the usb_dev_path to be mounted. If None, will try '
'to mount the usb_dev_path without partition number.',
default=None),
]
def setUp(self):
self.args.config_save_dir = (self.args.config_save_dir or
config_utils.GetRuntimeConfigDirectory())
self.args.config_save_name = self.args.config_save_name or os.path.basename(
self.args.config_retrieve_path)
if not self.args.config_save_name.endswith('.json'):
raise RetrieveConfigException('Config name should suffix with ".json".')
self.config_save_path = os.path.join(self.args.config_save_dir,
self.args.config_save_name)
self.usb_dev_path = None
self.usb_ready_event = None
def runTest(self):
file_utils.TryMakeDirs(os.path.dirname(self.config_save_path))
if self.args.data_method == DATA_METHOD.USB:
self._RetrieveConfigFromUSB()
elif self.args.data_method == DATA_METHOD.FACTORY_SERVER:
self._RetrieveConfigFromFactoryServer()
else:
raise ValueError('Unknown data_method.')
def _RetrieveConfigFromFactoryServer(self):
try:
session.console.info('Retrieving %s from factory server.',
self.args.config_retrieve_path)
proxy = server_proxy.GetServerProxy()
content = proxy.GetParameter(
self.args.config_retrieve_path).data
with open(self.config_save_path, 'w') as f:
f.write(content)
logging.info('Saved config to %s.', self.config_save_path)
except Exception as e:
logging.exception('Failed to retrieve config from factory server.')
raise RetrieveConfigException(str(e))
def _RetrieveConfigFromUSB(self):
self.usb_ready_event = threading.Event()
monitor = media_utils.RemovableDiskMonitor()
monitor.Start(on_insert=self._OnUSBInsertion, on_remove=self._OnUSBRemoval)
try:
self.usb_ready_event.wait()
self._MountUSBAndCopyFile()
finally:
monitor.Stop()
logging.info('Saved config to %s.', self.config_save_path)
def _MountUSBAndCopyFile(self):
session.console.info('Mounting USB (%s, %s).', self.usb_dev_path,
self.args.usb_dev_partition)
with media_utils.MountedMedia(self.usb_dev_path,
self.args.usb_dev_partition) as mount_point:
time.sleep(0.5)
pathname = os.path.join(mount_point, self.args.config_retrieve_path)
session.console.info('Retrieving %s from USB.', pathname)
if not os.path.exists(pathname):
raise ValueError(
'File %r does not exist or it is not a file.' % pathname)
try:
file_utils.CopyFileSkipBytes(pathname, self.config_save_path, 0)
except IOError as e:
logging.error('Failed to copy file %s to %s, %r', pathname,
self.config_save_path, e)
raise RetrieveConfigException(str(e))
def _OnUSBInsertion(self, device):
self.usb_dev_path = device.device_node
self.usb_ready_event.set()
def _OnUSBRemoval(self, device):
del device
self.usb_ready_event.clear()
self.usb_dev_path = None
| true
| true
|
1c4382f85f3e76b494e4c372dd40b1ac3840e519
| 6,850
|
py
|
Python
|
runner_predict_proba.py
|
dmitryrubtsov/Predictions-of-calls-in-Moscow-Megafon
|
260bb49e859694d6a7c0dfb8cb13cd39d05ed597
|
[
"MIT"
] | null | null | null |
runner_predict_proba.py
|
dmitryrubtsov/Predictions-of-calls-in-Moscow-Megafon
|
260bb49e859694d6a7c0dfb8cb13cd39d05ed597
|
[
"MIT"
] | null | null | null |
runner_predict_proba.py
|
dmitryrubtsov/Predictions-of-calls-in-Moscow-Megafon
|
260bb49e859694d6a7c0dfb8cb13cd39d05ed597
|
[
"MIT"
] | null | null | null |
import os
import pickle
import time
import pandas as pd
from sklearn.base import BaseEstimator, TransformerMixin
class ColumnSelector(BaseEstimator, TransformerMixin):
def __init__(self, columns):
self.columns = columns
def fit(self, X, y=None):
return self
def transform(self, X):
assert isinstance(X, pd.DataFrame)
try:
return X[self.columns]
except KeyError:
cols_error = list(set(self.columns) - set(X.columns))
raise KeyError(
f'DataFrame does not contain the following columns: {cols_error}')
class AddFeatures(BaseEstimator, TransformerMixin):
def __init__(self, features, silent=True):
self.features = features
self.silent = silent
def fit(self, X, y=None):
return self
def transform(self, X):
if not self.silent:
start_t = time.time()
print('Start adding features'.center(100, '*'))
assert isinstance(X, pd.DataFrame), 'This is not a pandas dataframe'
X_features = self.features.loc[self.features.index.isin(
X.index.unique())]
X_features = X_features.sort_values('buy_time') \
.groupby('id').last()
X_merge = X.reset_index() \
.merge(X_features.reset_index(), on=X.index.name, how='left', suffixes=('_train', '_features')) \
.set_index(X.index.name)
assert X_merge.shape[0] == X.shape[
0], f'Shapes of dataframe don\'t match: {X_merge.shape[0]} and {X.shape[0]}'
assert (X_merge.index == X.index).all(), 'Index Sort Error'
if not self.silent:
print(
f'End adding features, run time: {time_format(time.time()-start_t)}'.center(100, '*'))
print()
return X_merge
class MemUseOptimizing(BaseEstimator, TransformerMixin):
def __init__(self, silent=True):
self.silent = silent
def fit(self, X, y=None):
return self
def transform(self, X):
start_t = time.time()
assert isinstance(X, pd.DataFrame), 'This is not a pandas dataframe'
if not self.silent:
print('Start of dataframe memory use optimizing'.center(100, '*'))
start_memory_usage = X.memory_usage(deep=True).sum() / 1024**2
X_dtype = pd.DataFrame(
X.dtypes, columns=['dtype'], index=X.columns)
X_dtype['min'] = X.select_dtypes(['int', 'float']).min()
X_dtype['max'] = X.select_dtypes(['int', 'float']).max()
X_dtype['is_int'] = ~(X.select_dtypes(['int', 'float']).astype(
int).sum() - X.select_dtypes(['int', 'float']).sum()).astype('bool_')
X_dtype.loc[(X_dtype['is_int'] == True), 'dtype'] = 'int64'
X_dtype.loc[(X_dtype['is_int'] == True) & (X_dtype['min'] >= np.iinfo(
'int32').min) & (X_dtype['max'] <= np.iinfo('int32').max), 'dtype'] = 'int32'
X_dtype.loc[(X_dtype['is_int'] == True) & (X_dtype['min'] >= np.iinfo(
'int16').min) & (X_dtype['max'] <= np.iinfo('int16').max), 'dtype'] = 'int16'
X_dtype.loc[(X_dtype['is_int'] == True) & (X_dtype['min'] >= np.iinfo(
'int8').min) & (X_dtype['max'] <= np.iinfo('int8').max), 'dtype'] = 'int8'
X_dtype.loc[(X_dtype['is_int'] == True) & (
X_dtype['min'] >= np.iinfo('uint64').min), 'dtype'] = 'uint64'
X_dtype.loc[(X_dtype['is_int'] == True) & (X_dtype['min'] >= np.iinfo(
'uint32').min) & (X_dtype['max'] <= np.iinfo('uint32').max), 'dtype'] = 'uint32'
X_dtype.loc[(X_dtype['is_int'] == True) & (X_dtype['min'] >= np.iinfo(
'uint16').min) & (X_dtype['max'] <= np.iinfo('uint16').max), 'dtype'] = 'uint16'
X_dtype.loc[(X_dtype['is_int'] == True) & (X_dtype['min'] >= np.iinfo(
'uint8').min) & (X_dtype['max'] <= np.iinfo('uint8').max), 'dtype'] = 'uint8'
X_dtype.loc[(X_dtype['is_int'] == True) & (X_dtype['min'] == 0) & (
X_dtype['max'] == 1), 'dtype'] = 'bool_'
X_dtype.loc[(X_dtype['is_int'] == False), 'dtype'] = 'float64'
X_dtype.loc[(X_dtype['is_int'] == False) & (X_dtype['min'] >= np.finfo(
'float32').min) & (X_dtype['max'] <= np.finfo('float32').max), 'dtype'] = 'float32'
X_dtype.loc[(X_dtype['is_int'] == False) & (X_dtype['min'] >= np.finfo(
'float16').min) & (X_dtype['max'] <= np.finfo('float16').max), 'dtype'] = 'float16'
for col in X.select_dtypes('object').columns:
num_unique_values = len(X[col].unique())
num_total_values = len(X[col])
if num_unique_values / num_total_values < 0.5:
X_dtype.loc[col, 'dtype'] = 'category'
dtype = X_dtype['dtype'].to_dict()
X = X.astype(dtype)
if not self.silent:
memory_usage = X.memory_usage(deep=True).sum() / 1024**2
print('Memory use optimizing'.center(100, '*'))
print(
f'Memory usage of properties dataframe before optimizing: {start_memory_usage:.02f} MB')
print(
f'Memory usage of properties dataframe after optimizing: {memory_usage:.02f} MB')
print(
f'This is {100*memory_usage/start_memory_usage:.02f} % of the initial size')
print(
f'End of dataframe memory use optimizing, run time: {time_format(time.time()-start_t)}'.center(64, '*'))
print()
return X
class GetDate(BaseEstimator, TransformerMixin):
def __init__(self, silent=True):
self.silent = silent
def fit(self, X, y=None):
return self
def transform(self, X):
if not self.silent:
start_t = time.time()
print('Start geting date from timestamp'.center(100, '*'))
if isinstance(X, pd.Series):
X = pd.DataFrame(X)
assert isinstance(
X, pd.DataFrame), 'This is not a pandas dataframe or series'
df = pd.DataFrame()
for col in X.columns:
df[f'{col}_day'] = pd.to_datetime(X[col], unit='s').dt.day
df[f'{col}_month'] = pd.to_datetime(X[col], unit='s').dt.month
df[f'{col}_week'] = pd.to_datetime(X[col], unit='s').dt.week
if not self.silent:
print(
f'End geting date from timestamp, run time: {time_format(time.time()-start_t)}'.center(100, '*'))
print()
return df
TARGET = 'target'
df = pd.read_csv('data_test.csv', index_col=[1]) \
.drop('Unnamed: 0', axis=1)
with open('model.pkl', 'rb') as f:
model = pickle.load(f)
df[TARGET] = model.predict_proba(df)[:, 1]
df.to_csv('answers_test.csv')
| 37.845304
| 120
| 0.554745
|
import os
import pickle
import time
import pandas as pd
from sklearn.base import BaseEstimator, TransformerMixin
class ColumnSelector(BaseEstimator, TransformerMixin):
def __init__(self, columns):
self.columns = columns
def fit(self, X, y=None):
return self
def transform(self, X):
assert isinstance(X, pd.DataFrame)
try:
return X[self.columns]
except KeyError:
cols_error = list(set(self.columns) - set(X.columns))
raise KeyError(
f'DataFrame does not contain the following columns: {cols_error}')
class AddFeatures(BaseEstimator, TransformerMixin):
def __init__(self, features, silent=True):
self.features = features
self.silent = silent
def fit(self, X, y=None):
return self
def transform(self, X):
if not self.silent:
start_t = time.time()
print('Start adding features'.center(100, '*'))
assert isinstance(X, pd.DataFrame), 'This is not a pandas dataframe'
X_features = self.features.loc[self.features.index.isin(
X.index.unique())]
X_features = X_features.sort_values('buy_time') \
.groupby('id').last()
X_merge = X.reset_index() \
.merge(X_features.reset_index(), on=X.index.name, how='left', suffixes=('_train', '_features')) \
.set_index(X.index.name)
assert X_merge.shape[0] == X.shape[
0], f'Shapes of dataframe don\'t match: {X_merge.shape[0]} and {X.shape[0]}'
assert (X_merge.index == X.index).all(), 'Index Sort Error'
if not self.silent:
print(
f'End adding features, run time: {time_format(time.time()-start_t)}'.center(100, '*'))
print()
return X_merge
class MemUseOptimizing(BaseEstimator, TransformerMixin):
def __init__(self, silent=True):
self.silent = silent
def fit(self, X, y=None):
return self
def transform(self, X):
start_t = time.time()
assert isinstance(X, pd.DataFrame), 'This is not a pandas dataframe'
if not self.silent:
print('Start of dataframe memory use optimizing'.center(100, '*'))
start_memory_usage = X.memory_usage(deep=True).sum() / 1024**2
X_dtype = pd.DataFrame(
X.dtypes, columns=['dtype'], index=X.columns)
X_dtype['min'] = X.select_dtypes(['int', 'float']).min()
X_dtype['max'] = X.select_dtypes(['int', 'float']).max()
X_dtype['is_int'] = ~(X.select_dtypes(['int', 'float']).astype(
int).sum() - X.select_dtypes(['int', 'float']).sum()).astype('bool_')
X_dtype.loc[(X_dtype['is_int'] == True), 'dtype'] = 'int64'
X_dtype.loc[(X_dtype['is_int'] == True) & (X_dtype['min'] >= np.iinfo(
'int32').min) & (X_dtype['max'] <= np.iinfo('int32').max), 'dtype'] = 'int32'
X_dtype.loc[(X_dtype['is_int'] == True) & (X_dtype['min'] >= np.iinfo(
'int16').min) & (X_dtype['max'] <= np.iinfo('int16').max), 'dtype'] = 'int16'
X_dtype.loc[(X_dtype['is_int'] == True) & (X_dtype['min'] >= np.iinfo(
'int8').min) & (X_dtype['max'] <= np.iinfo('int8').max), 'dtype'] = 'int8'
X_dtype.loc[(X_dtype['is_int'] == True) & (
X_dtype['min'] >= np.iinfo('uint64').min), 'dtype'] = 'uint64'
X_dtype.loc[(X_dtype['is_int'] == True) & (X_dtype['min'] >= np.iinfo(
'uint32').min) & (X_dtype['max'] <= np.iinfo('uint32').max), 'dtype'] = 'uint32'
X_dtype.loc[(X_dtype['is_int'] == True) & (X_dtype['min'] >= np.iinfo(
'uint16').min) & (X_dtype['max'] <= np.iinfo('uint16').max), 'dtype'] = 'uint16'
X_dtype.loc[(X_dtype['is_int'] == True) & (X_dtype['min'] >= np.iinfo(
'uint8').min) & (X_dtype['max'] <= np.iinfo('uint8').max), 'dtype'] = 'uint8'
X_dtype.loc[(X_dtype['is_int'] == True) & (X_dtype['min'] == 0) & (
X_dtype['max'] == 1), 'dtype'] = 'bool_'
X_dtype.loc[(X_dtype['is_int'] == False), 'dtype'] = 'float64'
X_dtype.loc[(X_dtype['is_int'] == False) & (X_dtype['min'] >= np.finfo(
'float32').min) & (X_dtype['max'] <= np.finfo('float32').max), 'dtype'] = 'float32'
X_dtype.loc[(X_dtype['is_int'] == False) & (X_dtype['min'] >= np.finfo(
'float16').min) & (X_dtype['max'] <= np.finfo('float16').max), 'dtype'] = 'float16'
for col in X.select_dtypes('object').columns:
num_unique_values = len(X[col].unique())
num_total_values = len(X[col])
if num_unique_values / num_total_values < 0.5:
X_dtype.loc[col, 'dtype'] = 'category'
dtype = X_dtype['dtype'].to_dict()
X = X.astype(dtype)
if not self.silent:
memory_usage = X.memory_usage(deep=True).sum() / 1024**2
print('Memory use optimizing'.center(100, '*'))
print(
f'Memory usage of properties dataframe before optimizing: {start_memory_usage:.02f} MB')
print(
f'Memory usage of properties dataframe after optimizing: {memory_usage:.02f} MB')
print(
f'This is {100*memory_usage/start_memory_usage:.02f} % of the initial size')
print(
f'End of dataframe memory use optimizing, run time: {time_format(time.time()-start_t)}'.center(64, '*'))
print()
return X
class GetDate(BaseEstimator, TransformerMixin):
def __init__(self, silent=True):
self.silent = silent
def fit(self, X, y=None):
return self
def transform(self, X):
if not self.silent:
start_t = time.time()
print('Start geting date from timestamp'.center(100, '*'))
if isinstance(X, pd.Series):
X = pd.DataFrame(X)
assert isinstance(
X, pd.DataFrame), 'This is not a pandas dataframe or series'
df = pd.DataFrame()
for col in X.columns:
df[f'{col}_day'] = pd.to_datetime(X[col], unit='s').dt.day
df[f'{col}_month'] = pd.to_datetime(X[col], unit='s').dt.month
df[f'{col}_week'] = pd.to_datetime(X[col], unit='s').dt.week
if not self.silent:
print(
f'End geting date from timestamp, run time: {time_format(time.time()-start_t)}'.center(100, '*'))
print()
return df
TARGET = 'target'
df = pd.read_csv('data_test.csv', index_col=[1]) \
.drop('Unnamed: 0', axis=1)
with open('model.pkl', 'rb') as f:
model = pickle.load(f)
df[TARGET] = model.predict_proba(df)[:, 1]
df.to_csv('answers_test.csv')
| true
| true
|
1c438431f1b367fc3e91a7be45c76305d48a8771
| 12,873
|
py
|
Python
|
userbot/modules/pmpermit.py
|
matesa/userbot
|
a071f58245ebdfbeec3ce93c53e75ca4d1f64b53
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | 2
|
2020-01-18T18:55:28.000Z
|
2021-05-08T19:15:32.000Z
|
userbot/modules/pmpermit.py
|
matesa/userbot
|
a071f58245ebdfbeec3ce93c53e75ca4d1f64b53
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null |
userbot/modules/pmpermit.py
|
matesa/userbot
|
a071f58245ebdfbeec3ce93c53e75ca4d1f64b53
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | 19
|
2019-06-07T07:13:41.000Z
|
2021-07-09T17:34:57.000Z
|
# Copyright (C) 2019 The Raphielscape Company LLC.
#
# Licensed under the Raphielscape Public License, Version 1.b (the "License");
# you may not use this file except in compliance with the License.
#
""" Userbot module for keeping control who PM you. """
from telethon.tl.functions.contacts import BlockRequest, UnblockRequest
from telethon.tl.functions.messages import ReportSpamRequest
from telethon.tl.functions.users import GetFullUserRequest
from telethon.tl.types import User
from sqlalchemy.exc import IntegrityError
from userbot import (COUNT_PM, CMD_HELP, BOTLOG, BOTLOG_CHATID,
PM_AUTO_BAN, LASTMSG, LOGS)
from userbot.events import register
# ========================= CONSTANTS ============================
UNAPPROVED_MSG = ("`Welcome, i'am anonyCrew Personal assistant .`\n\n"
"`i Got This Job after alot of struggle So Don't Rune My JoB.`"
"`i have 2 kids called java or android i have to feed them😭 ?`\n\n"
"`So Tell Me What You Want My Sir is Offline So Now Goto @ZonersChat For Help XD‚`"
"`Don't Spam My Sir ib Els I will Block oR Report You .`\n\n"
"`Or You Can Also Go to @Zonerschat and tag @baby1473 or @hamza_alive.`")
# =================================================================
@register(incoming=True, disable_edited=True)
async def permitpm(event):
""" Prohibits people from PMing you without approval. \
Will block retarded nibbas automatically. """
if PM_AUTO_BAN:
self_user = await event.client.get_me()
if event.is_private and event.chat_id != 777000 and event.chat_id != self_user.id and not (await event.get_sender()).bot:
try:
from userbot.modules.sql_helper.pm_permit_sql import is_approved
from userbot.modules.sql_helper.globals import gvarstatus
except AttributeError:
return
apprv = is_approved(event.chat_id)
notifsoff = gvarstatus("NOTIF_OFF")
# This part basically is a sanity check
# If the message that sent before is Unapproved Message
# then stop sending it again to prevent FloodHit
if not apprv and event.text != UNAPPROVED_MSG:
if event.chat_id in LASTMSG:
prevmsg = LASTMSG[event.chat_id]
# If the message doesn't same as previous one
# Send the Unapproved Message again
if event.text != prevmsg:
async for message in event.client.iter_messages(event.chat_id,
from_user='me',
search=UNAPPROVED_MSG,
limit=1):
await message.delete()
await event.reply(UNAPPROVED_MSG)
LASTMSG.update({event.chat_id: event.text})
else:
await event.reply(UNAPPROVED_MSG)
LASTMSG.update({event.chat_id: event.text})
if notifsoff:
await event.client.send_read_acknowledge(event.chat_id)
if event.chat_id not in COUNT_PM:
COUNT_PM.update({event.chat_id: 1})
else:
COUNT_PM[event.chat_id] = COUNT_PM[event.chat_id] + 1
if COUNT_PM[event.chat_id] > 4:
await event.respond(
"`You were spamming my peru master's Inox.`\n"
"`You chuu nubfuk been BLOCKED and reported as SPAM, now GTFO.`"
)
try:
del COUNT_PM[event.chat_id]
del LASTMSG[event.chat_id]
except KeyError:
if BOTLOG:
await event.client.send_message(
BOTLOG_CHATID,
"Count PM is seemingly going retard, plis restart bot!",
)
LOGS.info("CountPM wen't rarted boi")
return
await event.client(BlockRequest(event.chat_id))
await event.client(ReportSpamRequest(peer=event.chat_id))
if BOTLOG:
name = await event.client.get_entity(event.chat_id)
name0 = str(name.first_name)
await event.client.send_message(
BOTLOG_CHATID,
"["
+ name0
+ "](tg://user?id="
+ str(event.chat_id)
+ ")"
+ " was just another retarded nibba",
)
@register(disable_edited=True, outgoing=True)
async def auto_accept(event):
""" Will approve automatically if you texted them first. """
self_user = await event.client.get_me()
if event.is_private and event.chat_id != 777000 and event.chat_id != self_user.id and not (await event.get_sender()).bot:
try:
from userbot.modules.sql_helper.pm_permit_sql import is_approved
from userbot.modules.sql_helper.pm_permit_sql import approve
except AttributeError:
return
chat = await event.get_chat()
if isinstance(chat, User):
if is_approved(event.chat_id) or chat.bot:
return
async for message in event.client.iter_messages(
event.chat_id, reverse=True, limit=1
):
if message.message is not UNAPPROVED_MSG and message.from_id == (await event.client.get_me()).id:
try:
approve(event.chat_id)
except IntegrityError:
return
if is_approved(event.chat_id) and BOTLOG:
await event.client.send_message(
BOTLOG_CHATID,
"#AUTO-APPROVED\n"
+ "User: " +
f"[{chat.first_name}](tg://user?id={chat.id})",
)
@register(outgoing=True, pattern="^.notifoff$")
async def notifoff(noff_event):
""" For .notifoff command, stop getting notifications from unapproved PMs. """
if not noff_event.text[0].isalpha() and noff_event.text[0] not in ("/", "#", "@", "!"):
try:
from userbot.modules.sql_helper.globals import addgvar
except AttributeError:
return
addgvar("NOTIF_OFF", True)
await noff_event.edit("`Notifications from unapproved PM's are silenced!`")
@register(outgoing=True, pattern="^.notifon$")
async def notifon(non_event):
""" For .notifoff command, get notifications from unapproved PMs. """
if not non_event.text[0].isalpha() and non_event.text[0] not in ("/", "#", "@", "!"):
try:
from userbot.modules.sql_helper.globals import delgvar
except AttributeError:
return
delgvar("NOTIF_OFF")
await non_event.edit("`Notifications from unapproved PM's unmuted!`")
@register(outgoing=True, pattern="^.pm$")
async def approvepm(apprvpm):
""" For .pm command, give someone the permissions to PM you. """
if not apprvpm.text[0].isalpha() and apprvpm.text[0] not in ("/", "#", "@", "!"):
try:
from userbot.modules.sql_helper.pm_permit_sql import approve
except AttributeError:
await apprvpm.edit("`Running on Non-SQL mode!`")
return
if apprvpm.reply_to_msg_id:
reply = await apprvpm.get_reply_message()
replied_user = await apprvpm.client(GetFullUserRequest(reply.from_id))
aname = replied_user.user.id
name0 = str(replied_user.user.first_name)
uid = replied_user.user.id
else:
aname = await apprvpm.client.get_entity(apprvpm.chat_id)
name0 = str(aname.first_name)
uid = apprvpm.chat_id
try:
approve(uid)
except IntegrityError:
await apprvpm.edit("`This boi may already be approved.`")
return
await apprvpm.edit(
f"[{name0}](tg://user?id={uid}) ` #Maderbsdk Approved to PM uh Sir Kek!`"
)
async for message in apprvpm.client.iter_messages(apprvpm.chat_id,
from_user='me',
search=UNAPPROVED_MSG,
limit=1):
await message.delete()
if BOTLOG:
await apprvpm.client.send_message(
BOTLOG_CHATID,
"#APPROVED\n"
+ "User: " + f"[{name0}](tg://user?id={uid})",
)
@register(outgoing=True, pattern="^.dis$")
async def disapprovepm(disapprvpm):
if not disapprvpm.text[0].isalpha() and disapprvpm.text[0] not in ("/", "#", "@", "!"):
try:
from userbot.modules.sql_helper.pm_permit_sql import dissprove
except:
await disapprvpm.edit("`Running on Non-SQL mode!`")
return
if disapprvpm.reply_to_msg_id:
reply = await disapprvpm.get_reply_message()
replied_user = await disapprvpm.client(GetFullUserRequest(reply.from_id))
aname = replied_user.user.id
name0 = str(replied_user.user.first_name)
dissprove(replied_user.user.id)
else:
dissprove(disapprvpm.chat_id)
aname = await disapprvpm.client.get_entity(disapprvpm.chat_id)
name0 = str(aname.first_name)
await disapprvpm.edit(
f"[{name0}](tg://user?id={disapprvpm.chat_id}) `Nub Nimba disapproved to PM KEK!`"
)
if BOTLOG:
await disapprvpm.client.send_message(
BOTLOG_CHATID,
f"[{name0}](tg://user?id={disapprvpm.chat_id})"
" was disapproved to PM you.",
)
@register(outgoing=True, pattern="^.block$")
async def blockpm(block):
""" For .block command, block people from PMing you! """
if not block.text[0].isalpha() and block.text[0] not in ("/", "#", "@", "!"):
await block.edit("`You've been blocked Boi!. Now cry in corner`")
if block.reply_to_msg_id:
reply = await block.get_reply_message()
replied_user = await block.client(GetFullUserRequest(reply.from_id))
aname = replied_user.user.id
name0 = str(replied_user.user.first_name)
await block.client(BlockRequest(replied_user.user.id))
uid = replied_user.user.id
else:
await block.client(BlockRequest(block.chat_id))
aname = await block.client.get_entity(block.chat_id)
name0 = str(aname.first_name)
uid = block.chat_id
try:
from userbot.modules.sql_helper.pm_permit_sql import dissprove
dissprove(uid)
except AttributeError: # Non-SQL mode.
pass
if BOTLOG:
await block.client.send_message(
BOTLOG_CHATID,
"#BLOCKED\n"
+ "User: " + f"[{name0}](tg://user?id={uid})",
)
@register(outgoing=True, pattern="^.unblock$")
async def unblockpm(unblock):
""" For .unblock command, let people PMing you again! """
if not unblock.text[0].isalpha() and unblock.text[0] \
not in ("/", "#", "@", "!") and unblock.reply_to_msg_id:
await unblock.edit("`You have been unblocked.OK NOW GIB MONI`")
if unblock.reply_to_msg_id:
reply = await unblock.get_reply_message()
replied_user = await unblock.client(GetFullUserRequest(reply.from_id))
name0 = str(replied_user.user.first_name)
await unblock.client(UnblockRequest(replied_user.user.id))
if BOTLOG:
await unblock.client.send_message(
BOTLOG_CHATID,
f"[{name0}](tg://user?id={replied_user.user.id})"
" was unblocc'd!.",
)
CMD_HELP.update({
"pmpermit": "\
.pm\
\nUsage: Approves the mentioned/replied person to PM.\
\n\n.disapprove\
\nUsage: Disapproves the mentioned/replied person to PM.\
\n\n.block\
\nUsage: Blocks the person.\
\n\n.unblock\
\nUsage: Unblocks the person so they can PM you.\
\n\n.notifoff\
\nUsage: Clears/Disables any notifications of unapproved PMs.\
\n\n.notifon\
\nUsage: Allows notifications for unapproved PMs."
})
| 41.392283
| 129
| 0.550843
|
from telethon.tl.functions.contacts import BlockRequest, UnblockRequest
from telethon.tl.functions.messages import ReportSpamRequest
from telethon.tl.functions.users import GetFullUserRequest
from telethon.tl.types import User
from sqlalchemy.exc import IntegrityError
from userbot import (COUNT_PM, CMD_HELP, BOTLOG, BOTLOG_CHATID,
PM_AUTO_BAN, LASTMSG, LOGS)
from userbot.events import register
UNAPPROVED_MSG = ("`Welcome, i'am anonyCrew Personal assistant .`\n\n"
"`i Got This Job after alot of struggle So Don't Rune My JoB.`"
"`i have 2 kids called java or android i have to feed them😭 ?`\n\n"
"`So Tell Me What You Want My Sir is Offline So Now Goto @ZonersChat For Help XD‚`"
"`Don't Spam My Sir ib Els I will Block oR Report You .`\n\n"
"`Or You Can Also Go to @Zonerschat and tag @baby1473 or @hamza_alive.`")
# =================================================================
@register(incoming=True, disable_edited=True)
async def permitpm(event):
if PM_AUTO_BAN:
self_user = await event.client.get_me()
if event.is_private and event.chat_id != 777000 and event.chat_id != self_user.id and not (await event.get_sender()).bot:
try:
from userbot.modules.sql_helper.pm_permit_sql import is_approved
from userbot.modules.sql_helper.globals import gvarstatus
except AttributeError:
return
apprv = is_approved(event.chat_id)
notifsoff = gvarstatus("NOTIF_OFF")
# This part basically is a sanity check
# If the message that sent before is Unapproved Message
# then stop sending it again to prevent FloodHit
if not apprv and event.text != UNAPPROVED_MSG:
if event.chat_id in LASTMSG:
prevmsg = LASTMSG[event.chat_id]
# If the message doesn't same as previous one
if event.text != prevmsg:
async for message in event.client.iter_messages(event.chat_id,
from_user='me',
search=UNAPPROVED_MSG,
limit=1):
await message.delete()
await event.reply(UNAPPROVED_MSG)
LASTMSG.update({event.chat_id: event.text})
else:
await event.reply(UNAPPROVED_MSG)
LASTMSG.update({event.chat_id: event.text})
if notifsoff:
await event.client.send_read_acknowledge(event.chat_id)
if event.chat_id not in COUNT_PM:
COUNT_PM.update({event.chat_id: 1})
else:
COUNT_PM[event.chat_id] = COUNT_PM[event.chat_id] + 1
if COUNT_PM[event.chat_id] > 4:
await event.respond(
"`You were spamming my peru master's Inox.`\n"
"`You chuu nubfuk been BLOCKED and reported as SPAM, now GTFO.`"
)
try:
del COUNT_PM[event.chat_id]
del LASTMSG[event.chat_id]
except KeyError:
if BOTLOG:
await event.client.send_message(
BOTLOG_CHATID,
"Count PM is seemingly going retard, plis restart bot!",
)
LOGS.info("CountPM wen't rarted boi")
return
await event.client(BlockRequest(event.chat_id))
await event.client(ReportSpamRequest(peer=event.chat_id))
if BOTLOG:
name = await event.client.get_entity(event.chat_id)
name0 = str(name.first_name)
await event.client.send_message(
BOTLOG_CHATID,
"["
+ name0
+ "](tg://user?id="
+ str(event.chat_id)
+ ")"
+ " was just another retarded nibba",
)
@register(disable_edited=True, outgoing=True)
async def auto_accept(event):
self_user = await event.client.get_me()
if event.is_private and event.chat_id != 777000 and event.chat_id != self_user.id and not (await event.get_sender()).bot:
try:
from userbot.modules.sql_helper.pm_permit_sql import is_approved
from userbot.modules.sql_helper.pm_permit_sql import approve
except AttributeError:
return
chat = await event.get_chat()
if isinstance(chat, User):
if is_approved(event.chat_id) or chat.bot:
return
async for message in event.client.iter_messages(
event.chat_id, reverse=True, limit=1
):
if message.message is not UNAPPROVED_MSG and message.from_id == (await event.client.get_me()).id:
try:
approve(event.chat_id)
except IntegrityError:
return
if is_approved(event.chat_id) and BOTLOG:
await event.client.send_message(
BOTLOG_CHATID,
"#AUTO-APPROVED\n"
+ "User: " +
f"[{chat.first_name}](tg://user?id={chat.id})",
)
@register(outgoing=True, pattern="^.notifoff$")
async def notifoff(noff_event):
if not noff_event.text[0].isalpha() and noff_event.text[0] not in ("/", "#", "@", "!"):
try:
from userbot.modules.sql_helper.globals import addgvar
except AttributeError:
return
addgvar("NOTIF_OFF", True)
await noff_event.edit("`Notifications from unapproved PM's are silenced!`")
@register(outgoing=True, pattern="^.notifon$")
async def notifon(non_event):
if not non_event.text[0].isalpha() and non_event.text[0] not in ("/", "#", "@", "!"):
try:
from userbot.modules.sql_helper.globals import delgvar
except AttributeError:
return
delgvar("NOTIF_OFF")
await non_event.edit("`Notifications from unapproved PM's unmuted!`")
@register(outgoing=True, pattern="^.pm$")
async def approvepm(apprvpm):
if not apprvpm.text[0].isalpha() and apprvpm.text[0] not in ("/", "#", "@", "!"):
try:
from userbot.modules.sql_helper.pm_permit_sql import approve
except AttributeError:
await apprvpm.edit("`Running on Non-SQL mode!`")
return
if apprvpm.reply_to_msg_id:
reply = await apprvpm.get_reply_message()
replied_user = await apprvpm.client(GetFullUserRequest(reply.from_id))
aname = replied_user.user.id
name0 = str(replied_user.user.first_name)
uid = replied_user.user.id
else:
aname = await apprvpm.client.get_entity(apprvpm.chat_id)
name0 = str(aname.first_name)
uid = apprvpm.chat_id
try:
approve(uid)
except IntegrityError:
await apprvpm.edit("`This boi may already be approved.`")
return
await apprvpm.edit(
f"[{name0}](tg://user?id={uid}) ` #Maderbsdk Approved to PM uh Sir Kek!`"
)
async for message in apprvpm.client.iter_messages(apprvpm.chat_id,
from_user='me',
search=UNAPPROVED_MSG,
limit=1):
await message.delete()
if BOTLOG:
await apprvpm.client.send_message(
BOTLOG_CHATID,
"#APPROVED\n"
+ "User: " + f"[{name0}](tg://user?id={uid})",
)
@register(outgoing=True, pattern="^.dis$")
async def disapprovepm(disapprvpm):
if not disapprvpm.text[0].isalpha() and disapprvpm.text[0] not in ("/", "#", "@", "!"):
try:
from userbot.modules.sql_helper.pm_permit_sql import dissprove
except:
await disapprvpm.edit("`Running on Non-SQL mode!`")
return
if disapprvpm.reply_to_msg_id:
reply = await disapprvpm.get_reply_message()
replied_user = await disapprvpm.client(GetFullUserRequest(reply.from_id))
aname = replied_user.user.id
name0 = str(replied_user.user.first_name)
dissprove(replied_user.user.id)
else:
dissprove(disapprvpm.chat_id)
aname = await disapprvpm.client.get_entity(disapprvpm.chat_id)
name0 = str(aname.first_name)
await disapprvpm.edit(
f"[{name0}](tg://user?id={disapprvpm.chat_id}) `Nub Nimba disapproved to PM KEK!`"
)
if BOTLOG:
await disapprvpm.client.send_message(
BOTLOG_CHATID,
f"[{name0}](tg://user?id={disapprvpm.chat_id})"
" was disapproved to PM you.",
)
@register(outgoing=True, pattern="^.block$")
async def blockpm(block):
if not block.text[0].isalpha() and block.text[0] not in ("/", "#", "@", "!"):
await block.edit("`You've been blocked Boi!. Now cry in corner`")
if block.reply_to_msg_id:
reply = await block.get_reply_message()
replied_user = await block.client(GetFullUserRequest(reply.from_id))
aname = replied_user.user.id
name0 = str(replied_user.user.first_name)
await block.client(BlockRequest(replied_user.user.id))
uid = replied_user.user.id
else:
await block.client(BlockRequest(block.chat_id))
aname = await block.client.get_entity(block.chat_id)
name0 = str(aname.first_name)
uid = block.chat_id
try:
from userbot.modules.sql_helper.pm_permit_sql import dissprove
dissprove(uid)
except AttributeError: # Non-SQL mode.
pass
if BOTLOG:
await block.client.send_message(
BOTLOG_CHATID,
"#BLOCKED\n"
+ "User: " + f"[{name0}](tg://user?id={uid})",
)
@register(outgoing=True, pattern="^.unblock$")
async def unblockpm(unblock):
if not unblock.text[0].isalpha() and unblock.text[0] \
not in ("/", "#", "@", "!") and unblock.reply_to_msg_id:
await unblock.edit("`You have been unblocked.OK NOW GIB MONI`")
if unblock.reply_to_msg_id:
reply = await unblock.get_reply_message()
replied_user = await unblock.client(GetFullUserRequest(reply.from_id))
name0 = str(replied_user.user.first_name)
await unblock.client(UnblockRequest(replied_user.user.id))
if BOTLOG:
await unblock.client.send_message(
BOTLOG_CHATID,
f"[{name0}](tg://user?id={replied_user.user.id})"
" was unblocc'd!.",
)
CMD_HELP.update({
"pmpermit": "\
.pm\
\nUsage: Approves the mentioned/replied person to PM.\
\n\n.disapprove\
\nUsage: Disapproves the mentioned/replied person to PM.\
\n\n.block\
\nUsage: Blocks the person.\
\n\n.unblock\
\nUsage: Unblocks the person so they can PM you.\
\n\n.notifoff\
\nUsage: Clears/Disables any notifications of unapproved PMs.\
\n\n.notifon\
\nUsage: Allows notifications for unapproved PMs."
})
| true
| true
|
1c4384413ad1249c86287e57bc04b52f1efeeba4
| 716
|
py
|
Python
|
apps/healthcare/covid/onprem/pipelines/covid-visualization/visualization.py
|
Karthik-Git-Sudo786/cisco-kubeflow-starter-pack
|
49013953c0cf0de508bb05f1837809d84e6ea2d2
|
[
"Apache-2.0"
] | 60
|
2020-03-20T08:05:32.000Z
|
2021-12-17T14:07:53.000Z
|
apps/healthcare/covid/onprem/pipelines/covid-visualization/visualization.py
|
Karthik-Git-Sudo786/cisco-kubeflow-starter-pack
|
49013953c0cf0de508bb05f1837809d84e6ea2d2
|
[
"Apache-2.0"
] | 84
|
2020-03-18T07:06:20.000Z
|
2021-03-02T13:29:20.000Z
|
apps/healthcare/covid/onprem/pipelines/covid-visualization/visualization.py
|
Karthik-Git-Sudo786/cisco-kubeflow-starter-pack
|
49013953c0cf0de508bb05f1837809d84e6ea2d2
|
[
"Apache-2.0"
] | 90
|
2020-03-17T11:54:05.000Z
|
2021-06-03T09:18:58.000Z
|
# Python script to visualise the predicted number of cases in India
import matplotlib.pyplot as plt
from pandas import read_csv
train_df = read_csv(source + '/train_df.csv')
predict_df = read_csv(source + '/predict_df.csv')
country = "India"
target = "ConfirmedCases"
region_train_df = train_df[(train_df["Country_Region"]==country)]
region_predict_df = predict_df[(predict_df["Country_Region"]==country)]
fig = plt.figure(figsize=(10,6))
ax1 = fig.add_axes([0,0,1,1])
ax1.plot(region_train_df["Date"],
region_train_df[target],
color="green")
ax1.plot(region_predict_df["Date"],
region_predict_df[target],
color="red")
plt.xticks(rotation=90)
plt.show()
| 25.571429
| 71
| 0.698324
|
import matplotlib.pyplot as plt
from pandas import read_csv
train_df = read_csv(source + '/train_df.csv')
predict_df = read_csv(source + '/predict_df.csv')
country = "India"
target = "ConfirmedCases"
region_train_df = train_df[(train_df["Country_Region"]==country)]
region_predict_df = predict_df[(predict_df["Country_Region"]==country)]
fig = plt.figure(figsize=(10,6))
ax1 = fig.add_axes([0,0,1,1])
ax1.plot(region_train_df["Date"],
region_train_df[target],
color="green")
ax1.plot(region_predict_df["Date"],
region_predict_df[target],
color="red")
plt.xticks(rotation=90)
plt.show()
| true
| true
|
1c4385826a2413c60ee4f12a8e6fc18bc2a4bfb0
| 5,308
|
py
|
Python
|
scripts/MakeSingleHeader.py
|
carand/CLI11
|
438eabe5f8d91a3f12ce6c53afd1236b990798ae
|
[
"BSD-3-Clause"
] | 1
|
2020-09-18T21:24:26.000Z
|
2020-09-18T21:24:26.000Z
|
scripts/MakeSingleHeader.py
|
carand/CLI11
|
438eabe5f8d91a3f12ce6c53afd1236b990798ae
|
[
"BSD-3-Clause"
] | 2
|
2020-10-31T02:25:09.000Z
|
2021-03-07T01:48:46.000Z
|
scripts/MakeSingleHeader.py
|
carand/CLI11
|
438eabe5f8d91a3f12ce6c53afd1236b990798ae
|
[
"BSD-3-Clause"
] | 1
|
2021-10-21T16:58:01.000Z
|
2021-10-21T16:58:01.000Z
|
#!/usr/bin/env python
from __future__ import print_function, unicode_literals
import os
import re
from argparse import ArgumentParser
from operator import add
from copy import copy
from functools import reduce
from subprocess import Popen, PIPE
includes_local = re.compile(r"""^#include "(.*)"$""", re.MULTILINE)
includes_system = re.compile(r"""^#include \<(.*)\>$""", re.MULTILINE)
version_finder = re.compile(r"""^#define CLI11_VERSION \"(.*)\"$""", re.MULTILINE)
verbatim_tag_str = r"""
^ # Begin of line
[^\n^\[]+ # Some characters, not including [ or the end of a line
\[ # A literal [
[^\]^\n]* # Anything except a closing ]
CLI11:verbatim # The tag
[^\]^\n]* # Anything except a closing ]
\] # A literal ]
[^\n]* # Up to end of line
$ # End of a line
"""
verbatim_all = re.compile(
verbatim_tag_str + "(.*)" + verbatim_tag_str, re.MULTILINE | re.DOTALL | re.VERBOSE
)
DIR = os.path.dirname(os.path.abspath(__file__))
class HeaderFile(object):
TAG = "Unknown git revision"
LICENSE = "// BSD 3 clause"
VERSION = "Unknown"
def __init__(self, base, inc):
with open(os.path.join(base, inc)) as f:
inner = f.read()
version = version_finder.search(inner)
if version:
self.__class__.VERSION = version.groups()[0]
# add self.verbatim
if "CLI11:verbatim" in inner:
self.verbatim = ["\n\n// Verbatim copy from {0}:".format(inc)]
self.verbatim += verbatim_all.findall(inner)
inner = verbatim_all.sub("", inner)
else:
self.verbatim = []
self.headers = set(includes_system.findall(inner))
self.body = "\n// From {0}:\n\n".format(inc) + inner[inner.find("namespace") :]
self.namespace = None
def __add__(self, other):
out = copy(self)
out.headers |= other.headers
out.body += other.body
out.verbatim += other.verbatim
return out
@property
def header_str(self):
return "\n".join("#include <" + h + ">" for h in sorted(self.headers))
@property
def verbatim_str(self):
return "\n".join(self.verbatim)
def insert_namespace(self, namespace):
self.namespace = namespace
def macro_replacement(self, before, after):
self.verbatim = [x.replace(before, after) for x in self.verbatim]
self.body = self.body.replace(before, after)
def __str__(self):
result = """\
#pragma once
// CLI11: Version {self.VERSION}
// Originally designed by Henry Schreiner
// https://github.com/CLIUtils/CLI11
//
// This is a standalone header file generated by MakeSingleHeader.py in CLI11/scripts
// from: {self.TAG}
//
// From LICENSE:
//
{self.LICENSE}
// Standard combined includes:
{self.header_str}
""".format(
self=self
)
if self.namespace:
result += "\nnamespace " + self.namespace + " {\n\n"
result += "{self.verbatim_str}\n{self.body}\n".format(self=self)
if self.namespace:
result += "} // namespace " + self.namespace + "\n\n"
return result
def MakeHeader(
output, main_header, include_dir="../include", namespace=None, macro=None
):
# Set tag if possible to class variable
try:
proc = Popen(
["git", "describe", "--tags", "--always"], cwd=str(DIR), stdout=PIPE
)
out, _ = proc.communicate()
except OSError:
pass
else:
if proc.returncode == 0:
HeaderFile.TAG = out.decode("utf-8").strip()
base_dir = os.path.abspath(os.path.join(DIR, include_dir))
main_header = os.path.join(base_dir, main_header)
header_dir = os.path.dirname(main_header)
licence_file = os.path.abspath(os.path.join(DIR, "../LICENSE"))
with open(licence_file) as f:
HeaderFile.LICENSE = "".join("// " + line for line in f)
with open(main_header) as f:
header = f.read()
include_files = includes_local.findall(header)
headers = [HeaderFile(header_dir, inc) for inc in include_files]
single_header = reduce(add, headers)
if macro is not None:
before = "CLI11_"
print("Converting macros", before, "->", macro)
single_header.macro_replacement(before, macro)
if namespace:
print("Adding namespace", namespace)
single_header.insert_namespace(namespace)
with open(output, "w") as f:
f.write(str(single_header))
print("Created", output)
if __name__ == "__main__":
parser = ArgumentParser(
usage="Convert source to single header include. Can optionally add namespace and search-replace replacements (for macros)."
)
parser.add_argument("output", help="Single header file output")
parser.add_argument(
"--main",
default="CLI/CLI.hpp",
help="The main include file that defines the other files",
)
parser.add_argument("--include", default="../include", help="The include directory")
parser.add_argument("--namespace", help="Add an optional namespace")
parser.add_argument("--macro", help="Replaces CLI11_ with NEW_PREFIX_")
args = parser.parse_args()
MakeHeader(args.output, args.main, args.include, args.namespace, args.macro)
| 30.159091
| 131
| 0.620196
|
from __future__ import print_function, unicode_literals
import os
import re
from argparse import ArgumentParser
from operator import add
from copy import copy
from functools import reduce
from subprocess import Popen, PIPE
includes_local = re.compile(r"""^#include "(.*)"$""", re.MULTILINE)
includes_system = re.compile(r"""^#include \<(.*)\>$""", re.MULTILINE)
version_finder = re.compile(r"""^#define CLI11_VERSION \"(.*)\"$""", re.MULTILINE)
verbatim_tag_str = r"""
^ # Begin of line
[^\n^\[]+ # Some characters, not including [ or the end of a line
\[ # A literal [
[^\]^\n]* # Anything except a closing ]
CLI11:verbatim # The tag
[^\]^\n]* # Anything except a closing ]
\] # A literal ]
[^\n]* # Up to end of line
$ # End of a line
"""
verbatim_all = re.compile(
verbatim_tag_str + "(.*)" + verbatim_tag_str, re.MULTILINE | re.DOTALL | re.VERBOSE
)
DIR = os.path.dirname(os.path.abspath(__file__))
class HeaderFile(object):
TAG = "Unknown git revision"
LICENSE = "// BSD 3 clause"
VERSION = "Unknown"
def __init__(self, base, inc):
with open(os.path.join(base, inc)) as f:
inner = f.read()
version = version_finder.search(inner)
if version:
self.__class__.VERSION = version.groups()[0]
if "CLI11:verbatim" in inner:
self.verbatim = ["\n\n// Verbatim copy from {0}:".format(inc)]
self.verbatim += verbatim_all.findall(inner)
inner = verbatim_all.sub("", inner)
else:
self.verbatim = []
self.headers = set(includes_system.findall(inner))
self.body = "\n// From {0}:\n\n".format(inc) + inner[inner.find("namespace") :]
self.namespace = None
def __add__(self, other):
out = copy(self)
out.headers |= other.headers
out.body += other.body
out.verbatim += other.verbatim
return out
@property
def header_str(self):
return "\n".join("#include <" + h + ">" for h in sorted(self.headers))
@property
def verbatim_str(self):
return "\n".join(self.verbatim)
def insert_namespace(self, namespace):
self.namespace = namespace
def macro_replacement(self, before, after):
self.verbatim = [x.replace(before, after) for x in self.verbatim]
self.body = self.body.replace(before, after)
def __str__(self):
result = """\
#pragma once
// CLI11: Version {self.VERSION}
// Originally designed by Henry Schreiner
// https://github.com/CLIUtils/CLI11
//
// This is a standalone header file generated by MakeSingleHeader.py in CLI11/scripts
// from: {self.TAG}
//
// From LICENSE:
//
{self.LICENSE}
// Standard combined includes:
{self.header_str}
""".format(
self=self
)
if self.namespace:
result += "\nnamespace " + self.namespace + " {\n\n"
result += "{self.verbatim_str}\n{self.body}\n".format(self=self)
if self.namespace:
result += "} // namespace " + self.namespace + "\n\n"
return result
def MakeHeader(
output, main_header, include_dir="../include", namespace=None, macro=None
):
try:
proc = Popen(
["git", "describe", "--tags", "--always"], cwd=str(DIR), stdout=PIPE
)
out, _ = proc.communicate()
except OSError:
pass
else:
if proc.returncode == 0:
HeaderFile.TAG = out.decode("utf-8").strip()
base_dir = os.path.abspath(os.path.join(DIR, include_dir))
main_header = os.path.join(base_dir, main_header)
header_dir = os.path.dirname(main_header)
licence_file = os.path.abspath(os.path.join(DIR, "../LICENSE"))
with open(licence_file) as f:
HeaderFile.LICENSE = "".join("// " + line for line in f)
with open(main_header) as f:
header = f.read()
include_files = includes_local.findall(header)
headers = [HeaderFile(header_dir, inc) for inc in include_files]
single_header = reduce(add, headers)
if macro is not None:
before = "CLI11_"
print("Converting macros", before, "->", macro)
single_header.macro_replacement(before, macro)
if namespace:
print("Adding namespace", namespace)
single_header.insert_namespace(namespace)
with open(output, "w") as f:
f.write(str(single_header))
print("Created", output)
if __name__ == "__main__":
parser = ArgumentParser(
usage="Convert source to single header include. Can optionally add namespace and search-replace replacements (for macros)."
)
parser.add_argument("output", help="Single header file output")
parser.add_argument(
"--main",
default="CLI/CLI.hpp",
help="The main include file that defines the other files",
)
parser.add_argument("--include", default="../include", help="The include directory")
parser.add_argument("--namespace", help="Add an optional namespace")
parser.add_argument("--macro", help="Replaces CLI11_ with NEW_PREFIX_")
args = parser.parse_args()
MakeHeader(args.output, args.main, args.include, args.namespace, args.macro)
| true
| true
|
1c4385b696021fae2a33d7691ec3ff865c96ff2a
| 3,156
|
py
|
Python
|
apps/habitss/tests.py
|
KolevDarko/lifehq
|
88d92f5fe76f2fb6511f2a892e096d95a69985d8
|
[
"MIT"
] | null | null | null |
apps/habitss/tests.py
|
KolevDarko/lifehq
|
88d92f5fe76f2fb6511f2a892e096d95a69985d8
|
[
"MIT"
] | null | null | null |
apps/habitss/tests.py
|
KolevDarko/lifehq
|
88d92f5fe76f2fb6511f2a892e096d95a69985d8
|
[
"MIT"
] | null | null | null |
from datetime import datetime
from account.models import Account
from dateutil.parser import parser, parse
from django.test import TestCase
from django_common.auth_backends import User
from apps.base.models import Profile
from apps.habitss.models import HabitAction, Habit
from apps.habitss.views import HabitStruct
class TestHabitStruct(TestCase):
def _create_profile(self):
user = User(email='kolevdarko.work@gmail.com', username='kolevdarko')
user.set_password("testpass")
user.pk = 1
user.save()
account = Account(user_id=1)
account.pk = 1
account.save()
self.profile = Profile(name='Dare', account_id=1)
self.profile.save()
def _create_habit(self):
self._create_profile()
habit = Habit(name="TEst ht", user=self.profile)
habit.save()
return habit
def _create_habit_actions(self, habit, action_dates):
results = []
for a_date in action_dates:
ha = HabitAction(habit=habit, action_date=parse(a_date).date())
results.append(ha)
return results
def test_fill_actions(self):
habit = self._create_habit()
actions = self._create_habit_actions(habit, ['2019-05-29', '2019-05-31'])
week_actions = HabitStruct.fill_actions(action_list=actions)
self.validate_wed_fri(week_actions)
new_actions = self._create_habit_actions(habit, ['2019-05-27', '2019-06-02'])
week_actions = HabitStruct.fill_actions(new_actions)
self.validate_mon_sun(week_actions)
new_actions = self._create_habit_actions(habit, ['2019-05-30'])
week_actions = HabitStruct.fill_actions(new_actions)
self.validate_thu(week_actions)
def validate_wed_fri(self, week_actions):
self.assertEqual(7, len(week_actions))
self.assertFalse(week_actions[0])
self.assertFalse(week_actions[1])
self.assertTrue(week_actions[2])
self.assertFalse(week_actions[3])
self.assertTrue(week_actions[4])
self.assertFalse(week_actions[5])
self.assertFalse(week_actions[6])
def validate_mon_sun(self, week_actions):
self.assertEqual(7, len(week_actions))
self.assertTrue(week_actions[0])
self.assertEqual(week_actions[0].action_date, datetime(2019, 5, 27).date())
self.assertFalse(week_actions[1])
self.assertFalse(week_actions[2])
self.assertFalse(week_actions[3])
self.assertFalse(week_actions[4])
self.assertFalse(week_actions[5])
self.assertTrue(week_actions[6])
self.assertEqual(week_actions[6].action_date, datetime(2019, 6, 2).date())
def validate_thu(self, week_actions):
self.assertEqual(7, len(week_actions))
self.assertFalse(week_actions[0])
self.assertFalse(week_actions[1])
self.assertFalse(week_actions[2])
self.assertTrue(week_actions[3])
self.assertEqual(week_actions[3].action_date, datetime(2019, 5, 30).date())
self.assertFalse(week_actions[4])
self.assertFalse(week_actions[5])
self.assertFalse(week_actions[6])
| 36.275862
| 85
| 0.679658
|
from datetime import datetime
from account.models import Account
from dateutil.parser import parser, parse
from django.test import TestCase
from django_common.auth_backends import User
from apps.base.models import Profile
from apps.habitss.models import HabitAction, Habit
from apps.habitss.views import HabitStruct
class TestHabitStruct(TestCase):
def _create_profile(self):
user = User(email='kolevdarko.work@gmail.com', username='kolevdarko')
user.set_password("testpass")
user.pk = 1
user.save()
account = Account(user_id=1)
account.pk = 1
account.save()
self.profile = Profile(name='Dare', account_id=1)
self.profile.save()
def _create_habit(self):
self._create_profile()
habit = Habit(name="TEst ht", user=self.profile)
habit.save()
return habit
def _create_habit_actions(self, habit, action_dates):
results = []
for a_date in action_dates:
ha = HabitAction(habit=habit, action_date=parse(a_date).date())
results.append(ha)
return results
def test_fill_actions(self):
habit = self._create_habit()
actions = self._create_habit_actions(habit, ['2019-05-29', '2019-05-31'])
week_actions = HabitStruct.fill_actions(action_list=actions)
self.validate_wed_fri(week_actions)
new_actions = self._create_habit_actions(habit, ['2019-05-27', '2019-06-02'])
week_actions = HabitStruct.fill_actions(new_actions)
self.validate_mon_sun(week_actions)
new_actions = self._create_habit_actions(habit, ['2019-05-30'])
week_actions = HabitStruct.fill_actions(new_actions)
self.validate_thu(week_actions)
def validate_wed_fri(self, week_actions):
self.assertEqual(7, len(week_actions))
self.assertFalse(week_actions[0])
self.assertFalse(week_actions[1])
self.assertTrue(week_actions[2])
self.assertFalse(week_actions[3])
self.assertTrue(week_actions[4])
self.assertFalse(week_actions[5])
self.assertFalse(week_actions[6])
def validate_mon_sun(self, week_actions):
self.assertEqual(7, len(week_actions))
self.assertTrue(week_actions[0])
self.assertEqual(week_actions[0].action_date, datetime(2019, 5, 27).date())
self.assertFalse(week_actions[1])
self.assertFalse(week_actions[2])
self.assertFalse(week_actions[3])
self.assertFalse(week_actions[4])
self.assertFalse(week_actions[5])
self.assertTrue(week_actions[6])
self.assertEqual(week_actions[6].action_date, datetime(2019, 6, 2).date())
def validate_thu(self, week_actions):
self.assertEqual(7, len(week_actions))
self.assertFalse(week_actions[0])
self.assertFalse(week_actions[1])
self.assertFalse(week_actions[2])
self.assertTrue(week_actions[3])
self.assertEqual(week_actions[3].action_date, datetime(2019, 5, 30).date())
self.assertFalse(week_actions[4])
self.assertFalse(week_actions[5])
self.assertFalse(week_actions[6])
| true
| true
|
1c43867a03288dc0c1f3a51949b75193edc66308
| 3,558
|
py
|
Python
|
thorpy/elements/pressable.py
|
YannThorimbert/ThorPy-1.0
|
2855491e7d5016e9cbefb71784d169bb57cf8c73
|
[
"MIT"
] | null | null | null |
thorpy/elements/pressable.py
|
YannThorimbert/ThorPy-1.0
|
2855491e7d5016e9cbefb71784d169bb57cf8c73
|
[
"MIT"
] | null | null | null |
thorpy/elements/pressable.py
|
YannThorimbert/ThorPy-1.0
|
2855491e7d5016e9cbefb71784d169bb57cf8c73
|
[
"MIT"
] | null | null | null |
from copy import copy
from pygame.event import Event, post
from thorpy.elements.element import Element
from thorpy.miscgui.state import State
from thorpy.miscgui.reaction import Reaction
from thorpy.miscgui.initializer import init_params
from thorpy.miscgui import constants, parameters, style, painterstyle
class Pressable(Element):
"""Pressable Element"""
def __init__(self, text="", elements=None, normal_params=None,
press_params=None):
"""To modify _press and _unpress reaction, assign new self._reaction_press
and self._reaction_unpress functions !"""
self.press_params = init_params(press_params)
super(Pressable, self).__init__(text, elements, normal_params)
# fusionner
self.set_painter(painterstyle.DEF_PAINTER(size=style.SIZE))
# reactions
self._set_press_reaction(parameters.BUTTON_PRESS_EVENT,
{"button": parameters.LEFT_CLICK_BUTTON})
self._set_unpress_reaction(parameters.BUTTON_UNPRESS_EVENT,
{"button": parameters.LEFT_CLICK_BUTTON})
def finish(self):
Element.finish(self)
self.press_params._normalize(self)
fusionner_press = self.press_params.get_fusionner()
state_pressed = State(fusionner_press)
self._states[constants.STATE_PRESSED] = state_pressed
def set_style(self, new_style):
Element.set_style(self, new_style)
self.press_params.params["style"] = new_style
def set_painter(self, painter, autopress=True):
"""To use before finish"""
Element.set_painter(self, painter)
if autopress:
painter = copy(painter)
painter.pressed = True
self.press_params.params["painter"] = painter
def _set_press_reaction(self, typ, args=None):
"""Set event which permit to toggle the element"""
if not args:
args = {}
reac_pressed = Reaction(typ, self._reaction_press, args,
name=constants.REAC_PRESSED)
self.add_reaction(reac_pressed)
def _set_unpress_reaction(self, typ, args=None):
if not args:
args = {}
reac_unpress = Reaction(typ, self._reaction_unpress, args,
name=constants.REAC_UNPRESS)
self.add_reaction(reac_unpress)
def _reaction_press(self, pygame_event):
"""Specific for pygame.MOUSEBUTTONDOWN. Needs pygame_event to treat
arguments of the event"""
state_ok = self.current_state == self._states[constants.STATE_NORMAL]
if state_ok:
if self.collide(pygame_event.pos, constants.STATE_NORMAL):
self._press()
def _reaction_unpress(self, pygame_event):
"""Specific for pygame.MOUSEBUTTONUP. Needs pygame_event to treat
arguments of the event"""
state_ok = (self.current_state_key == constants.STATE_PRESSED)
if state_ok:
self._unpress()
if self.collide(pygame_event.pos, constants.STATE_PRESSED):
self.run_user_func()
def _press(self):
self.unblit()
self.change_state(constants.STATE_PRESSED)
self.blit()
self.update()
ev_press = Event(constants.THORPY_EVENT,
name=constants.EVENT_PRESS,
el=self)
post(ev_press)
def _unpress(self):
self.unblit()
self.change_state(constants.STATE_NORMAL)
self.blit()
self.update()
| 37.851064
| 82
| 0.642496
|
from copy import copy
from pygame.event import Event, post
from thorpy.elements.element import Element
from thorpy.miscgui.state import State
from thorpy.miscgui.reaction import Reaction
from thorpy.miscgui.initializer import init_params
from thorpy.miscgui import constants, parameters, style, painterstyle
class Pressable(Element):
def __init__(self, text="", elements=None, normal_params=None,
press_params=None):
self.press_params = init_params(press_params)
super(Pressable, self).__init__(text, elements, normal_params)
self.set_painter(painterstyle.DEF_PAINTER(size=style.SIZE))
self._set_press_reaction(parameters.BUTTON_PRESS_EVENT,
{"button": parameters.LEFT_CLICK_BUTTON})
self._set_unpress_reaction(parameters.BUTTON_UNPRESS_EVENT,
{"button": parameters.LEFT_CLICK_BUTTON})
def finish(self):
Element.finish(self)
self.press_params._normalize(self)
fusionner_press = self.press_params.get_fusionner()
state_pressed = State(fusionner_press)
self._states[constants.STATE_PRESSED] = state_pressed
def set_style(self, new_style):
Element.set_style(self, new_style)
self.press_params.params["style"] = new_style
def set_painter(self, painter, autopress=True):
Element.set_painter(self, painter)
if autopress:
painter = copy(painter)
painter.pressed = True
self.press_params.params["painter"] = painter
def _set_press_reaction(self, typ, args=None):
if not args:
args = {}
reac_pressed = Reaction(typ, self._reaction_press, args,
name=constants.REAC_PRESSED)
self.add_reaction(reac_pressed)
def _set_unpress_reaction(self, typ, args=None):
if not args:
args = {}
reac_unpress = Reaction(typ, self._reaction_unpress, args,
name=constants.REAC_UNPRESS)
self.add_reaction(reac_unpress)
def _reaction_press(self, pygame_event):
state_ok = self.current_state == self._states[constants.STATE_NORMAL]
if state_ok:
if self.collide(pygame_event.pos, constants.STATE_NORMAL):
self._press()
def _reaction_unpress(self, pygame_event):
state_ok = (self.current_state_key == constants.STATE_PRESSED)
if state_ok:
self._unpress()
if self.collide(pygame_event.pos, constants.STATE_PRESSED):
self.run_user_func()
def _press(self):
self.unblit()
self.change_state(constants.STATE_PRESSED)
self.blit()
self.update()
ev_press = Event(constants.THORPY_EVENT,
name=constants.EVENT_PRESS,
el=self)
post(ev_press)
def _unpress(self):
self.unblit()
self.change_state(constants.STATE_NORMAL)
self.blit()
self.update()
| true
| true
|
1c43882de9d66e798e357004b8fcaf92f64aca83
| 1,357
|
py
|
Python
|
psych_metric/datasets/crowd_layer/convert_txt_to_csv_ner.py
|
prijatelj/bayesian_eval_ground_truth-free
|
c0e569c78d63beb79f5e1e727c322293c3584323
|
[
"MIT"
] | 1
|
2021-12-26T05:55:46.000Z
|
2021-12-26T05:55:46.000Z
|
psych_metric/datasets/crowd_layer/convert_txt_to_csv_ner.py
|
prijatelj/bayesian_eval_ground_truth-free
|
c0e569c78d63beb79f5e1e727c322293c3584323
|
[
"MIT"
] | null | null | null |
psych_metric/datasets/crowd_layer/convert_txt_to_csv_ner.py
|
prijatelj/bayesian_eval_ground_truth-free
|
c0e569c78d63beb79f5e1e727c322293c3584323
|
[
"MIT"
] | null | null | null |
"""Converts given data txt file into a more parsable formatted csv.
"""
import argparse
import numpy as np
import pandas as pd
def parse_args():
parser = argparse.ArgumentParser(description='Convert the given ner-mturk txt file into an easier to parse csv file.')
parser.add_argument('input_file', help='Enter the file path to the csv of author names')
parser.add_argument('output_file', help='Enter the file path to the desired output directory')
return parser.parse_args()
if __name__ == '__main__':
args = parse_args()
txt = pd.read_csv(args.input_file, header=None, sep=' ', na_values='?', dtype=str, skip_blank_lines=False)
# rename columns
txt.columns = ['token'] + list(range(len(txt.columns) - 1))
# add sequence column
count = 0
seq = np.empty(len(txt))
for i in range(len(txt)):
if txt.iloc[i].isna().all():
seq[i] = np.nan
count += 1
else:
seq[i] = count
txt.insert(0, 'sequence', seq)
# Remove all rows with only nas
txt.dropna('index', 'all', inplace=True)
# make sequence of dtype int
txt['sequence'] = txt['sequence'].astype(int)
# revert nas in token column to '?'
txt['token'] = txt['token'].fillna('?')
print(args.output_file)
txt.to_csv(args.output_file, sep=' ', index=False, na_rep='NA')
| 29.5
| 122
| 0.642594
|
import argparse
import numpy as np
import pandas as pd
def parse_args():
parser = argparse.ArgumentParser(description='Convert the given ner-mturk txt file into an easier to parse csv file.')
parser.add_argument('input_file', help='Enter the file path to the csv of author names')
parser.add_argument('output_file', help='Enter the file path to the desired output directory')
return parser.parse_args()
if __name__ == '__main__':
args = parse_args()
txt = pd.read_csv(args.input_file, header=None, sep=' ', na_values='?', dtype=str, skip_blank_lines=False)
txt.columns = ['token'] + list(range(len(txt.columns) - 1))
count = 0
seq = np.empty(len(txt))
for i in range(len(txt)):
if txt.iloc[i].isna().all():
seq[i] = np.nan
count += 1
else:
seq[i] = count
txt.insert(0, 'sequence', seq)
txt.dropna('index', 'all', inplace=True)
txt['sequence'] = txt['sequence'].astype(int)
txt['token'] = txt['token'].fillna('?')
print(args.output_file)
txt.to_csv(args.output_file, sep=' ', index=False, na_rep='NA')
| true
| true
|
1c438865c8ad14c6278e6e11a08d3a7128f77ab8
| 3,441
|
py
|
Python
|
sample.py
|
tsudalab/rxngenerator
|
6f459828c03485926adb390e5bfbd4a6d91de30b
|
[
"MIT"
] | 8
|
2022-01-04T09:36:32.000Z
|
2022-03-03T22:35:53.000Z
|
sample.py
|
tsudalab/rxngenerator
|
6f459828c03485926adb390e5bfbd4a6d91de30b
|
[
"MIT"
] | null | null | null |
sample.py
|
tsudalab/rxngenerator
|
6f459828c03485926adb390e5bfbd4a6d91de30b
|
[
"MIT"
] | 1
|
2021-12-17T19:17:44.000Z
|
2021-12-17T19:17:44.000Z
|
import sys
sys.path.append('./rxnft_vae')
import torch
import torch.nn as nn
import torch.optim as optim
import torch.optim.lr_scheduler as lr_scheduler
from torch.utils.data import DataLoader
from torch.autograd import Variable
import math, random, sys
from optparse import OptionParser
from collections import deque
from reaction_utils import read_multistep_rxns
from reaction import ReactionTree, extract_starting_reactants, StartingReactants, Templates, extract_templates,stats
from fragment import FragmentVocab, FragmentTree, FragmentNode, can_be_decomposed
from vae import FTRXNVAE, set_batch_nodeID
from mpn import MPN,PP,Discriminator
from evaluate import Evaluator
import random
parser = OptionParser()
parser.add_option("-w", "--hidden", dest="hidden_size", default=200)
parser.add_option("-l", "--latent", dest="latent_size", default=50)
parser.add_option("-d", "--depth", dest="depth", default=2)
parser.add_option("-b", "--batch", dest="batch_size", default = 32)
parser.add_option("-s", "--save_dir", dest="save_path")
parser.add_option("-t", "--data_path", dest="data_path")
parser.add_option("-v", "--vocab_path", dest="vocab_path")
parser.add_option("-o", "--output_file", dest="output_file", default = "Results/sampled_rxns.txt")
opts, _ = parser.parse_args()
# get parameters
batch_size = int(opts.batch_size)
hidden_size = int(opts.hidden_size)
latent_size = int(opts.latent_size)
depth = int(opts.depth)
vocab_path = opts.vocab_path
data_filename = opts.data_path
w_save_path = opts.save_path
output_file = opts.output_file
if torch.cuda.is_available():
#device = torch.device("cuda:1")
device = torch.device("cuda")
torch.cuda.set_device(1)
else:
device = torch.device("cpu")
print("hidden size:", hidden_size, "latent_size:", latent_size, "depth:", depth)
print("loading data.....")
data_filename = opts.data_path
routes, scores = read_multistep_rxns(data_filename)
rxn_trees = [ReactionTree(route) for route in routes]
molecules = [rxn_tree.molecule_nodes[0].smiles for rxn_tree in rxn_trees]
reactants = extract_starting_reactants(rxn_trees)
templates, n_reacts = extract_templates(rxn_trees)
reactantDic = StartingReactants(reactants)
templateDic = Templates(templates, n_reacts)
print("size of reactant dic:", reactantDic.size())
print("size of template dic:", templateDic.size())
#print(templateDic.template_list)
n_pairs = len(routes)
ind_list = [i for i in range(n_pairs)]
fgm_trees = [FragmentTree(rxn_trees[i].molecule_nodes[0].smiles) for i in ind_list]
rxn_trees = [rxn_trees[i] for i in ind_list]
data_pairs=[]
for fgm_tree, rxn_tree in zip(fgm_trees, rxn_trees):
data_pairs.append((fgm_tree, rxn_tree))
cset=set()
for fgm_tree in fgm_trees:
for node in fgm_tree.nodes:
cset.add(node.smiles)
cset = list(cset)
if vocab_path is None:
fragmentDic = FragmentVocab(cset)
else:
fragmentDic = FragmentVocab(cset, filename =vocab_path)
print("size of fragment dic:", fragmentDic.size())
# loading model
mpn = MPN(hidden_size, depth)
model = FTRXNVAE(fragmentDic, reactantDic, templateDic, hidden_size, latent_size, depth, fragment_embedding=None, reactant_embedding=None, template_embedding=None)
checkpoint = torch.load(w_save_path, map_location=device)
model.load_state_dict(checkpoint)
print("loaded model....")
evaluator = Evaluator(latent_size, model)
evaluator.validate_and_save(rxn_trees, output_file=output_file)
#evaluator.novelty_and_uniqueness([output_file], rxn_trees)
| 33.086538
| 163
| 0.779134
|
import sys
sys.path.append('./rxnft_vae')
import torch
import torch.nn as nn
import torch.optim as optim
import torch.optim.lr_scheduler as lr_scheduler
from torch.utils.data import DataLoader
from torch.autograd import Variable
import math, random, sys
from optparse import OptionParser
from collections import deque
from reaction_utils import read_multistep_rxns
from reaction import ReactionTree, extract_starting_reactants, StartingReactants, Templates, extract_templates,stats
from fragment import FragmentVocab, FragmentTree, FragmentNode, can_be_decomposed
from vae import FTRXNVAE, set_batch_nodeID
from mpn import MPN,PP,Discriminator
from evaluate import Evaluator
import random
parser = OptionParser()
parser.add_option("-w", "--hidden", dest="hidden_size", default=200)
parser.add_option("-l", "--latent", dest="latent_size", default=50)
parser.add_option("-d", "--depth", dest="depth", default=2)
parser.add_option("-b", "--batch", dest="batch_size", default = 32)
parser.add_option("-s", "--save_dir", dest="save_path")
parser.add_option("-t", "--data_path", dest="data_path")
parser.add_option("-v", "--vocab_path", dest="vocab_path")
parser.add_option("-o", "--output_file", dest="output_file", default = "Results/sampled_rxns.txt")
opts, _ = parser.parse_args()
batch_size = int(opts.batch_size)
hidden_size = int(opts.hidden_size)
latent_size = int(opts.latent_size)
depth = int(opts.depth)
vocab_path = opts.vocab_path
data_filename = opts.data_path
w_save_path = opts.save_path
output_file = opts.output_file
if torch.cuda.is_available():
device = torch.device("cuda")
torch.cuda.set_device(1)
else:
device = torch.device("cpu")
print("hidden size:", hidden_size, "latent_size:", latent_size, "depth:", depth)
print("loading data.....")
data_filename = opts.data_path
routes, scores = read_multistep_rxns(data_filename)
rxn_trees = [ReactionTree(route) for route in routes]
molecules = [rxn_tree.molecule_nodes[0].smiles for rxn_tree in rxn_trees]
reactants = extract_starting_reactants(rxn_trees)
templates, n_reacts = extract_templates(rxn_trees)
reactantDic = StartingReactants(reactants)
templateDic = Templates(templates, n_reacts)
print("size of reactant dic:", reactantDic.size())
print("size of template dic:", templateDic.size())
n_pairs = len(routes)
ind_list = [i for i in range(n_pairs)]
fgm_trees = [FragmentTree(rxn_trees[i].molecule_nodes[0].smiles) for i in ind_list]
rxn_trees = [rxn_trees[i] for i in ind_list]
data_pairs=[]
for fgm_tree, rxn_tree in zip(fgm_trees, rxn_trees):
data_pairs.append((fgm_tree, rxn_tree))
cset=set()
for fgm_tree in fgm_trees:
for node in fgm_tree.nodes:
cset.add(node.smiles)
cset = list(cset)
if vocab_path is None:
fragmentDic = FragmentVocab(cset)
else:
fragmentDic = FragmentVocab(cset, filename =vocab_path)
print("size of fragment dic:", fragmentDic.size())
mpn = MPN(hidden_size, depth)
model = FTRXNVAE(fragmentDic, reactantDic, templateDic, hidden_size, latent_size, depth, fragment_embedding=None, reactant_embedding=None, template_embedding=None)
checkpoint = torch.load(w_save_path, map_location=device)
model.load_state_dict(checkpoint)
print("loaded model....")
evaluator = Evaluator(latent_size, model)
evaluator.validate_and_save(rxn_trees, output_file=output_file)
| true
| true
|
1c43887a8bfb3ccc2eae3cbec1b04d408745a2bf
| 4,850
|
py
|
Python
|
cloudmersive_validate_api_client/models/validate_identifier_response.py
|
doc22940/cloudmersive.apiclient.python
|
8646291f45ebd7c6572a296e30f693693a6782c4
|
[
"Apache-2.0"
] | 3
|
2018-06-23T21:37:21.000Z
|
2020-04-20T23:07:36.000Z
|
cloudmersive_validate_api_client/models/validate_identifier_response.py
|
doc22940/cloudmersive.apiclient.python
|
8646291f45ebd7c6572a296e30f693693a6782c4
|
[
"Apache-2.0"
] | 1
|
2019-02-04T17:03:35.000Z
|
2019-03-02T20:16:52.000Z
|
cloudmersive_validate_api_client/models/validate_identifier_response.py
|
doc22940/cloudmersive.apiclient.python
|
8646291f45ebd7c6572a296e30f693693a6782c4
|
[
"Apache-2.0"
] | 2
|
2019-03-21T15:54:15.000Z
|
2020-05-27T17:30:43.000Z
|
# coding: utf-8
"""
validateapi
The validation APIs help you validate data. Check if an E-mail address is real. Check if a domain is real. Check up on an IP address, and even where it is located. All this and much more is available in the validation API. # noqa: E501
OpenAPI spec version: v1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class ValidateIdentifierResponse(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'valid_identifier': 'bool',
'error': 'str'
}
attribute_map = {
'valid_identifier': 'ValidIdentifier',
'error': 'Error'
}
def __init__(self, valid_identifier=None, error=None): # noqa: E501
"""ValidateIdentifierResponse - a model defined in Swagger""" # noqa: E501
self._valid_identifier = None
self._error = None
self.discriminator = None
if valid_identifier is not None:
self.valid_identifier = valid_identifier
if error is not None:
self.error = error
@property
def valid_identifier(self):
"""Gets the valid_identifier of this ValidateIdentifierResponse. # noqa: E501
True if the input identifier is valid, false otherwise # noqa: E501
:return: The valid_identifier of this ValidateIdentifierResponse. # noqa: E501
:rtype: bool
"""
return self._valid_identifier
@valid_identifier.setter
def valid_identifier(self, valid_identifier):
"""Sets the valid_identifier of this ValidateIdentifierResponse.
True if the input identifier is valid, false otherwise # noqa: E501
:param valid_identifier: The valid_identifier of this ValidateIdentifierResponse. # noqa: E501
:type: bool
"""
self._valid_identifier = valid_identifier
@property
def error(self):
"""Gets the error of this ValidateIdentifierResponse. # noqa: E501
Resulting error from the identifier validation; possible errors are: \"InputIsEmpty\", \"ContainsWhitespace\", \"ContainsNumbers\", \"ContainsHyphen\", \"ContainsUnderscore\", \"ContainsPeriod\", \"TooShort\", \"TooLong\", \"ContainsSpecialCharacters\" # noqa: E501
:return: The error of this ValidateIdentifierResponse. # noqa: E501
:rtype: str
"""
return self._error
@error.setter
def error(self, error):
"""Sets the error of this ValidateIdentifierResponse.
Resulting error from the identifier validation; possible errors are: \"InputIsEmpty\", \"ContainsWhitespace\", \"ContainsNumbers\", \"ContainsHyphen\", \"ContainsUnderscore\", \"ContainsPeriod\", \"TooShort\", \"TooLong\", \"ContainsSpecialCharacters\" # noqa: E501
:param error: The error of this ValidateIdentifierResponse. # noqa: E501
:type: str
"""
self._error = error
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(ValidateIdentifierResponse, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ValidateIdentifierResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 33.219178
| 274
| 0.610722
|
import pprint
import re
import six
class ValidateIdentifierResponse(object):
swagger_types = {
'valid_identifier': 'bool',
'error': 'str'
}
attribute_map = {
'valid_identifier': 'ValidIdentifier',
'error': 'Error'
}
def __init__(self, valid_identifier=None, error=None):
self._valid_identifier = None
self._error = None
self.discriminator = None
if valid_identifier is not None:
self.valid_identifier = valid_identifier
if error is not None:
self.error = error
@property
def valid_identifier(self):
return self._valid_identifier
@valid_identifier.setter
def valid_identifier(self, valid_identifier):
self._valid_identifier = valid_identifier
@property
def error(self):
return self._error
@error.setter
def error(self, error):
self._error = error
def to_dict(self):
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(ValidateIdentifierResponse, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
return pprint.pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if not isinstance(other, ValidateIdentifierResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| true
| true
|
1c4388d2d89af63f50ad952bd629f48f9511c161
| 8,306
|
py
|
Python
|
beacon_controller/controllers/concepts_controller.py
|
NCATS-Tangerine/rhea-beacon
|
ccf6e790dc4c26eb4853b1bcb78382b84fbfe238
|
[
"MIT"
] | null | null | null |
beacon_controller/controllers/concepts_controller.py
|
NCATS-Tangerine/rhea-beacon
|
ccf6e790dc4c26eb4853b1bcb78382b84fbfe238
|
[
"MIT"
] | 10
|
2018-08-18T03:13:08.000Z
|
2019-02-05T20:04:15.000Z
|
beacon_controller/controllers/concepts_controller.py
|
NCATS-Tangerine/rhea-beacon
|
ccf6e790dc4c26eb4853b1bcb78382b84fbfe238
|
[
"MIT"
] | null | null | null |
from swagger_server.models.beacon_concept import BeaconConcept # noqa: E501
from swagger_server.models.beacon_concept_with_details import BeaconConceptWithDetails # noqa: E501
from swagger_server.models.beacon_concept_detail import BeaconConceptDetail
from swagger_server.models.exact_match_response import ExactMatchResponse # noqa: E501
from beacon_controller import biolink_model as blm
from beacon_controller.providers import rhea
from beacon_controller.providers.xrefs import get_xrefs
from beacon_controller.const import Category, Predicate
def get_concept_details(concept_id): # noqa: E501
"""get_concept_details
Retrieves details for a specified concepts in the system, as specified by a (url-encoded) CURIE identifier of a concept known the given knowledge source. # noqa: E501
:param concept_id: (url-encoded) CURIE identifier of concept of interest
:type concept_id: str
:rtype: BeaconConceptWithDetails
"""
concept_id = concept_id.upper()
if concept_id.startswith('EC:'):
concept = rhea.get_enzyme(concept_id)
if concept is None:
return None
_, ec_number = concept_id.split(':', 1)
synonyms = concept.get('Synonyms')
if isinstance(synonyms, str):
synonyms = synonyms.split(';')
else:
synonyms = []
return BeaconConceptWithDetails(
id=concept_id,
uri=f'https://enzyme.expasy.org/EC/{ec_number}',
name=concept.get('Name'),
symbol=None,
categories=[Category.protein.name],
description=None,
synonyms=synonyms,
exact_matches=[],
details=[]
)
elif concept_id.startswith('RHEA:'):
records = rhea.get_records(f"""
PREFIX rh:<http://rdf.rhea-db.org/>
SELECT
?equation
?reaction
WHERE {{
?reaction rh:accession "{concept_id}" .
?reaction rh:equation ?equation .
}}
LIMIT 1
""")
for record in records:
return BeaconConceptWithDetails(
id=concept_id,
uri=record['reaction']['value'],
name=record['equation']['value'],
symbol=None,
categories=[Category.molecular_activity.name],
description=None,
synonyms=[],
exact_matches=[],
details=[]
)
else:
records = rhea.get_records(f"""
PREFIX rh:<http://rdf.rhea-db.org/>
SELECT ?compoundAc ?chebi
(count(distinct ?reaction) as ?reactionCount)
?compoundName
WHERE {{
?reaction rdfs:subClassOf rh:Reaction .
?reaction rh:status rh:Approved .
?reaction rh:side ?reactionSide .
?reactionSide rh:contains ?participant .
?participant rh:compound ?compound .
OPTIONAL {{ ?compound rh:chebi ?chebi . }}
?compound rh:name ?compoundName .
?compound rh:accession "{concept_id}" .
}}
LIMIT 1
""")
try:
uri = record['chebi']['value']
except:
uri = None
for record in records:
return BeaconConceptWithDetails(
id=concept_id,
uri=uri,
name=record['compoundName']['value'],
symbol=None,
categories=[Category.chemical_substance.name],
description=None,
synonyms=[],
exact_matches=[],
details=[BeaconConceptDetail(tag='reactionCount', value=record['reactionCount']['value'])]
)
def get_concepts(keywords=None, categories=None, offset=None, size=None): # noqa: E501
"""get_concepts
Retrieves a list of whose concept in the beacon knowledge base with names and/or synonyms matching a set of keywords or substrings. The results returned should generally be returned in order of the quality of the match, that is, the highest ranked concepts should exactly match the most keywords, in the same order as the keywords were given. Lower quality hits with fewer keyword matches or out-of-order keyword matches, should be returned lower in the list. # noqa: E501
:param keywords: (Optional) array of keywords or substrings against which to match concept names and synonyms
:type keywords: List[str]
:param categories: (Optional) array set of concept categories - specified as Biolink name labels codes gene, pathway, etc. - to which to constrain concepts matched by the main keyword search (see [Biolink Model](https://biolink.github.io/biolink-model) for the full list of terms)
:type categories: List[str]
:param offset: offset (cursor position) to next batch of statements of amount 'size' to return.
:type offset: int
:param size: maximum number of concept entries requested by the client; if this argument is omitted, then the query is expected to returned all the available data for the query
:type size: int
:rtype: List[BeaconConcept]
"""
if size is None:
size = 10
concepts = []
if categories is None or any(a in categories for a in blm.ancestors(Category.protein.name)):
enzymes, total_num_rows = rhea.find_enzymes(keywords, offset, size, metadata=True)
for enzyme in enzymes:
concepts.append(BeaconConcept(
id=f'EC:{enzyme.get("ID")}',
name=enzyme.get('Name'),
categories=[Category.protein.name],
description=None
))
if size is not None and len(concepts) < size:
offset = max(0, offset - total_num_rows) if offset is not None else None
size = size - len(concepts) if size is not None else None
elif size is not None and len(concepts) >= size:
return concepts
if categories is None or any(a in categories for a in blm.ancestors(Category.chemical_substance.name)):
compounds = rhea.find_compounds(keywords, offset=offset, limit=size)
for compound in compounds:
concepts.append(BeaconConcept(
id=compound.get('compoundAc').get('value'),
name=compound.get('compoundName').get('value'),
categories=[Category.chemical_substance.name],
description=None
))
return concepts
def get_exact_matches_to_concept_list(c): # noqa: E501
"""get_exact_matches_to_concept_list
Given an input array of [CURIE](https://www.w3.org/TR/curie/) identifiers of known exactly matched concepts [*sensa*-SKOS](http://www.w3.org/2004/02/skos/core#exactMatch), retrieves the list of [CURIE](https://www.w3.org/TR/curie/) identifiers of additional concepts that are deemed by the given knowledge source to be exact matches to one or more of the input concepts **plus** whichever concept identifiers from the input list were specifically matched to these additional concepts, thus giving the whole known set of equivalent concepts known to this particular knowledge source. If an empty set is returned, the it can be assumed that the given knowledge source does not know of any new equivalent concepts matching the input set. The caller of this endpoint can then decide whether or not to treat its input identifiers as its own equivalent set. # noqa: E501
:param c: an array set of [CURIE-encoded](https://www.w3.org/TR/curie/) identifiers of concepts thought to be exactly matching concepts, to be used in a search for additional exactly matching concepts [*sensa*-SKOS](http://www.w3.org/2004/02/skos/core#exactMatch).
:type c: List[str]
:rtype: List[ExactMatchResponse]
"""
results = []
for conceptId in c:
if ':' not in conceptId:
continue
xrefs = get_xrefs(conceptId)
if xrefs != []:
results.append(ExactMatchResponse(
id=conceptId,
within_domain=True,
has_exact_matches=xrefs
))
else:
results.append(ExactMatchResponse(
id=conceptId,
within_domain=False,
has_exact_matches=[]
))
return results
| 43.260417
| 871
| 0.639658
|
from swagger_server.models.beacon_concept import BeaconConcept
from swagger_server.models.beacon_concept_with_details import BeaconConceptWithDetails
from swagger_server.models.beacon_concept_detail import BeaconConceptDetail
from swagger_server.models.exact_match_response import ExactMatchResponse
from beacon_controller import biolink_model as blm
from beacon_controller.providers import rhea
from beacon_controller.providers.xrefs import get_xrefs
from beacon_controller.const import Category, Predicate
def get_concept_details(concept_id):
concept_id = concept_id.upper()
if concept_id.startswith('EC:'):
concept = rhea.get_enzyme(concept_id)
if concept is None:
return None
_, ec_number = concept_id.split(':', 1)
synonyms = concept.get('Synonyms')
if isinstance(synonyms, str):
synonyms = synonyms.split(';')
else:
synonyms = []
return BeaconConceptWithDetails(
id=concept_id,
uri=f'https://enzyme.expasy.org/EC/{ec_number}',
name=concept.get('Name'),
symbol=None,
categories=[Category.protein.name],
description=None,
synonyms=synonyms,
exact_matches=[],
details=[]
)
elif concept_id.startswith('RHEA:'):
records = rhea.get_records(f"""
PREFIX rh:<http://rdf.rhea-db.org/>
SELECT
?equation
?reaction
WHERE {{
?reaction rh:accession "{concept_id}" .
?reaction rh:equation ?equation .
}}
LIMIT 1
""")
for record in records:
return BeaconConceptWithDetails(
id=concept_id,
uri=record['reaction']['value'],
name=record['equation']['value'],
symbol=None,
categories=[Category.molecular_activity.name],
description=None,
synonyms=[],
exact_matches=[],
details=[]
)
else:
records = rhea.get_records(f"""
PREFIX rh:<http://rdf.rhea-db.org/>
SELECT ?compoundAc ?chebi
(count(distinct ?reaction) as ?reactionCount)
?compoundName
WHERE {{
?reaction rdfs:subClassOf rh:Reaction .
?reaction rh:status rh:Approved .
?reaction rh:side ?reactionSide .
?reactionSide rh:contains ?participant .
?participant rh:compound ?compound .
OPTIONAL {{ ?compound rh:chebi ?chebi . }}
?compound rh:name ?compoundName .
?compound rh:accession "{concept_id}" .
}}
LIMIT 1
""")
try:
uri = record['chebi']['value']
except:
uri = None
for record in records:
return BeaconConceptWithDetails(
id=concept_id,
uri=uri,
name=record['compoundName']['value'],
symbol=None,
categories=[Category.chemical_substance.name],
description=None,
synonyms=[],
exact_matches=[],
details=[BeaconConceptDetail(tag='reactionCount', value=record['reactionCount']['value'])]
)
def get_concepts(keywords=None, categories=None, offset=None, size=None):
if size is None:
size = 10
concepts = []
if categories is None or any(a in categories for a in blm.ancestors(Category.protein.name)):
enzymes, total_num_rows = rhea.find_enzymes(keywords, offset, size, metadata=True)
for enzyme in enzymes:
concepts.append(BeaconConcept(
id=f'EC:{enzyme.get("ID")}',
name=enzyme.get('Name'),
categories=[Category.protein.name],
description=None
))
if size is not None and len(concepts) < size:
offset = max(0, offset - total_num_rows) if offset is not None else None
size = size - len(concepts) if size is not None else None
elif size is not None and len(concepts) >= size:
return concepts
if categories is None or any(a in categories for a in blm.ancestors(Category.chemical_substance.name)):
compounds = rhea.find_compounds(keywords, offset=offset, limit=size)
for compound in compounds:
concepts.append(BeaconConcept(
id=compound.get('compoundAc').get('value'),
name=compound.get('compoundName').get('value'),
categories=[Category.chemical_substance.name],
description=None
))
return concepts
def get_exact_matches_to_concept_list(c):
results = []
for conceptId in c:
if ':' not in conceptId:
continue
xrefs = get_xrefs(conceptId)
if xrefs != []:
results.append(ExactMatchResponse(
id=conceptId,
within_domain=True,
has_exact_matches=xrefs
))
else:
results.append(ExactMatchResponse(
id=conceptId,
within_domain=False,
has_exact_matches=[]
))
return results
| true
| true
|
1c4389e92f7dcdefa6efc1d49c0fa7f9f02146b9
| 1,542
|
py
|
Python
|
team_9/cocos/test/test_shattered_tiles_3d.py
|
Donnyvdm/dojo19
|
3cf043a84e3ad6d3c4d59cd9c50b160e1ff03400
|
[
"BSD-3-Clause"
] | 1
|
2019-09-15T18:59:49.000Z
|
2019-09-15T18:59:49.000Z
|
team_9/cocos/test/test_shattered_tiles_3d.py
|
Donnyvdm/dojo19
|
3cf043a84e3ad6d3c4d59cd9c50b160e1ff03400
|
[
"BSD-3-Clause"
] | null | null | null |
team_9/cocos/test/test_shattered_tiles_3d.py
|
Donnyvdm/dojo19
|
3cf043a84e3ad6d3c4d59cd9c50b160e1ff03400
|
[
"BSD-3-Clause"
] | null | null | null |
from __future__ import division, print_function, unicode_literals
# This code is so you can run the samples without installing the package
import sys
import os
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))
#
testinfo = "t 0.1, s, t 2, s, t 5.1, s, q"
tags = "ShatteredTiles3D"
import pyglet
import cocos
from cocos.director import director
from cocos.actions import *
from cocos.layer import *
from pyglet import gl
class BackgroundLayer(cocos.layer.Layer):
def __init__(self):
super(BackgroundLayer, self).__init__()
self.img = pyglet.resource.image('background_image.png')
def draw( self ):
gl.glColor4ub(255, 255, 255, 255)
gl.glPushMatrix()
self.transform()
self.img.blit(0,0)
gl.glPopMatrix()
description = """
Shows the ShatteredTiles3D effect applied over the scene.
This effect produces a render change at start time, and no more
until the action duration is reached.
Since in this sample StopGrid() is not used after the grid action,
no change would be seen at end of action.
"""
def main():
print(description)
director.init( resizable=True )
main_scene = cocos.scene.Scene()
main_scene.add( BackgroundLayer(), z=0 )
# In real code after a sequence of grid actions the StopGrid() action
# should be called. Omited here to stay in the last grid action render
a = ShatteredTiles3D( randrange=6, duration=10, grid=(8,6) )
main_scene.do( a )
director.run (main_scene)
if __name__ == '__main__':
main()
| 28.036364
| 74
| 0.702983
|
from __future__ import division, print_function, unicode_literals
import sys
import os
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))
testinfo = "t 0.1, s, t 2, s, t 5.1, s, q"
tags = "ShatteredTiles3D"
import pyglet
import cocos
from cocos.director import director
from cocos.actions import *
from cocos.layer import *
from pyglet import gl
class BackgroundLayer(cocos.layer.Layer):
def __init__(self):
super(BackgroundLayer, self).__init__()
self.img = pyglet.resource.image('background_image.png')
def draw( self ):
gl.glColor4ub(255, 255, 255, 255)
gl.glPushMatrix()
self.transform()
self.img.blit(0,0)
gl.glPopMatrix()
description = """
Shows the ShatteredTiles3D effect applied over the scene.
This effect produces a render change at start time, and no more
until the action duration is reached.
Since in this sample StopGrid() is not used after the grid action,
no change would be seen at end of action.
"""
def main():
print(description)
director.init( resizable=True )
main_scene = cocos.scene.Scene()
main_scene.add( BackgroundLayer(), z=0 )
a = ShatteredTiles3D( randrange=6, duration=10, grid=(8,6) )
main_scene.do( a )
director.run (main_scene)
if __name__ == '__main__':
main()
| true
| true
|
1c438aa0f087f05df0285155d7d372207cc59f58
| 913
|
py
|
Python
|
src/python/miplearn/__init__.py
|
bknueven/MIPLearn
|
43225681cb87ba5bcd699f409aa9b10225f9da2d
|
[
"BSD-3-Clause"
] | 1
|
2020-06-16T15:38:48.000Z
|
2020-06-16T15:38:48.000Z
|
src/python/miplearn/__init__.py
|
bknueven/MIPLearn
|
43225681cb87ba5bcd699f409aa9b10225f9da2d
|
[
"BSD-3-Clause"
] | null | null | null |
src/python/miplearn/__init__.py
|
bknueven/MIPLearn
|
43225681cb87ba5bcd699f409aa9b10225f9da2d
|
[
"BSD-3-Clause"
] | null | null | null |
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
# Released under the modified BSD license. See COPYING.md for more details.
from .extractors import (SolutionExtractor,
InstanceFeaturesExtractor,
ObjectiveValueExtractor,
VariableFeaturesExtractor,
)
from .components.component import Component
from .components.objective import ObjectiveValueComponent
from .components.lazy import LazyConstraintsComponent
from .components.primal import (PrimalSolutionComponent,
AdaptivePredictor,
)
from .components.branching import BranchPriorityComponent
from .benchmark import BenchmarkRunner
from .instance import Instance
from .solvers import LearningSolver
| 45.65
| 82
| 0.696605
|
from .extractors import (SolutionExtractor,
InstanceFeaturesExtractor,
ObjectiveValueExtractor,
VariableFeaturesExtractor,
)
from .components.component import Component
from .components.objective import ObjectiveValueComponent
from .components.lazy import LazyConstraintsComponent
from .components.primal import (PrimalSolutionComponent,
AdaptivePredictor,
)
from .components.branching import BranchPriorityComponent
from .benchmark import BenchmarkRunner
from .instance import Instance
from .solvers import LearningSolver
| true
| true
|
1c438ab0698c65a5de0b059062da1bf1ff546e58
| 7,391
|
py
|
Python
|
lib/ls_dataset/d3m_dataset.py
|
stevencdang/AutoML-DS-Components
|
b0490262d3db5307c37f82c92e25cd938dd3a242
|
[
"Apache-2.0"
] | null | null | null |
lib/ls_dataset/d3m_dataset.py
|
stevencdang/AutoML-DS-Components
|
b0490262d3db5307c37f82c92e25cd938dd3a242
|
[
"Apache-2.0"
] | 8
|
2020-09-04T17:39:26.000Z
|
2022-03-02T03:28:38.000Z
|
lib/ls_dataset/d3m_dataset.py
|
stevencdang/AutoML-DS-Components
|
b0490262d3db5307c37f82c92e25cd938dd3a242
|
[
"Apache-2.0"
] | null | null | null |
# Author: Steven C. Dang
# Class encapsulating operations on a remote d3m dataset
import logging
import os.path as path
import os
from io import IOBase
import json
import csv
import pandas as pd
from ls_dataset.ls_dataset import LSDataset
from ls_dataset.dsr_table import DSRTable
from ls_dataset.dsr_factory import DatasetResourceFactory
logger = logging.getLogger(__name__)
class D3MDataset(LSDataset):
"""
Class representing a remote dataset with prediction results
"""
def __init__(self, dspath, dsdata):
"""
inputs:
dspath - the path to the dataset root
dsdata - a dictionary containing the dataset metadata
"""
LSDataset.__init__(self, dspath)
logger.debug("Initializing D3M dataset")
# Parse dataset metadata
self.about = dsdata['about']
self.id = dsdata['about']['datasetID']
self.name = dsdata['about']['datasetName']
# Parse data resources in the dataset
self.dataResources = [DatasetResourceFactory.get_resource(dsr) for dsr in dsdata['dataResources']]
# Store qualities field (currently noto used)A
if 'qualities' in dsdata:
self.qualities = dsdata['qualities']
else:
self.qualities = None
@staticmethod
def from_json(d):
"""
A static constructor of this class given a jsonified file
"""
if isinstance(d, str):
logger.debug("Loading json string")
ds_json = json.loads(d)
else:
logger.debug("Handling input with type: %s" % type(d))
ds_json = d
# logger.debug("got dataset json: %s" % str(ds_json))
# logger.debug("json about: %s" % ds_json['about'])
# logger.debug("json data resources: %s" % ds_json['dataResources'])
# json_doc = {'about': ds_json['about'],
# 'dataResources': ds_json['dataResources']
# }
# return D3MDataset(ds_json['dataset_info']['root_path'],
# json_doc)
return D3MDataset(ds_json['dataset_info']['root_path'],
ds_json)
@staticmethod
def from_dataset_json(fpath):
"""
A static constructor of this class given a dataset json
"""
if isinstance(fpath, str):
if path.exists(fpath):
#Get dataset path from json path
dpath = path.dirname(fpath)
# dpath = path.split(path.split(fpath)[0])[0] # Assumses root
try:
with open(fpath, 'r') as f:
ds_json = json.load(f)
return D3MDataset(dpath,
ds_json)
except:
logger.error("Error while decoding dataset json: %s" % fpath)
else:
logger.error("Found no dataset json at path: %s" % str(fpath))
raise Exception("Found no dataset json at path: %s" % str(fpath))
elif isinstance(fpath, IOBase):
logger.debug("Loading dataset json from open file")
logger.debug("dataset path: %s" % str(fpath))
dpath = path.dirname(fpath)
# dpath = path.split(path.split(fpath)[0])[0]
# ds_json = json.load(fpath)
ds_json = json.load(fpath, encoding='utf-16')
return D3MDataset(dpath,
ds_json)
else:
logger.error("Found no dataset json at path: %s" % str(fpath))
raise Exception("Found no dataset json at path: %s" % str(fpath))
@staticmethod
def get_schema_path(dpath):
name = path.split(dpath)[-1]
fpath = path.join(dpath, name + '_dataset', LSDataset.__default_schema__)
if path.exists(fpath):
return fpath
else:
raise Exception("No schema doc found in dataset directory: %s" % dpath)
def to_component_out_file(self, fpath):
"""
Write the dataset to file for passing between components.
Writes the first row of a tab separated file as the list of column names.
The first cell of the second row is simply the
"""
for resource in self.dataResources:
if resource.resType == 'table':
logger.debug("Resource type: %s\t %s" % (str(type(resource.columns)), str(resource.columns)))
for col in resource.columns:
logger.debug("Type: %s\t col: %s" % (str(type(col)), str(col)))
names = [col.colName for col in resource.columns]
js = self.to_json()
with open(fpath, 'w') as out_file:
logger.debug("Writing dataset json to component out file: %s" % fpath)
writer = csv.writer(out_file, delimiter='\t')
writer.writerow(names)
writer.writerow([js])
@staticmethod
def from_component_out_file(fpath):
"""
Load the dataset from an out file written to pass between workflow components
"""
if isinstance(fpath, str):
in_file = open(fpath, 'r')
reader = csv.reader(in_file, delimiter='\t')
rows = [row for row in reader]
in_file.close()
elif isinstance(fpath, IOBase):
reader = csv.reader(fpath, delimiter='\t')
rows = [row for row in reader]
fpath.close()
col_names = rows[0]
logger.debug("Got columns names: %s" % str(col_names))
# logger.debug("Got dataset row with type %s:\t %s" % (str(type(rows[1][0])), str(rows[1][0])))
# logger.debug(len(rows[1]))
# logger.debug(rows[1][0])
# logger.debug(type(rows[1][0]))
return D3MDataset.from_json(rows[1][0])
def to_json(self, fpath=None):
"""
Write the dataset to info to file and return a string with the json. If no path is given,
then just returns a string with the json representation of the dataset json
"""
# logger.debug("D3MDataset to json")
out = json.loads(super().to_json())
out['about'] = self.about
out['dataResources'] = [json.loads(rc.to_json()) for rc in self.dataResources]
out['qualities'] = self.qualities
if fpath is not None:
logger.debug("Writing dataset json to: %s" % fpath)
out_file = open(fpath, 'w')
json.dump(out, out_file)
out_file.close()
return json.dumps(out)
def __str__(self):
return self.to_json()
def load_dataset(self):
"""
Load the dataset table
"""
data = None
for dr in [dr for dr in self.dataResources if type(dr) is DSRTable]:
logger.debug("Found data resource table with ID: %s\tpath: %s" % (dr.resID, dr.resPath))
if data is None:
dpath = path.join(self.dpath, dr.resPath)
data = pd.read_csv(dpath, ',')
return data
def get_data_columns(self):
for dr in [dr for dr in self.dataResources if type(dr) is DSRTable]:
logger.debug("Found data resource table with ID: %s\tpath: %s" % (dr.resID, dr.resPath))
return [col for col in dr.columns if col.colName != 'd3mIndex']
| 34.699531
| 109
| 0.56677
|
import logging
import os.path as path
import os
from io import IOBase
import json
import csv
import pandas as pd
from ls_dataset.ls_dataset import LSDataset
from ls_dataset.dsr_table import DSRTable
from ls_dataset.dsr_factory import DatasetResourceFactory
logger = logging.getLogger(__name__)
class D3MDataset(LSDataset):
def __init__(self, dspath, dsdata):
LSDataset.__init__(self, dspath)
logger.debug("Initializing D3M dataset")
self.about = dsdata['about']
self.id = dsdata['about']['datasetID']
self.name = dsdata['about']['datasetName']
self.dataResources = [DatasetResourceFactory.get_resource(dsr) for dsr in dsdata['dataResources']]
if 'qualities' in dsdata:
self.qualities = dsdata['qualities']
else:
self.qualities = None
@staticmethod
def from_json(d):
if isinstance(d, str):
logger.debug("Loading json string")
ds_json = json.loads(d)
else:
logger.debug("Handling input with type: %s" % type(d))
ds_json = d
return D3MDataset(ds_json['dataset_info']['root_path'],
ds_json)
@staticmethod
def from_dataset_json(fpath):
if isinstance(fpath, str):
if path.exists(fpath):
dpath = path.dirname(fpath)
try:
with open(fpath, 'r') as f:
ds_json = json.load(f)
return D3MDataset(dpath,
ds_json)
except:
logger.error("Error while decoding dataset json: %s" % fpath)
else:
logger.error("Found no dataset json at path: %s" % str(fpath))
raise Exception("Found no dataset json at path: %s" % str(fpath))
elif isinstance(fpath, IOBase):
logger.debug("Loading dataset json from open file")
logger.debug("dataset path: %s" % str(fpath))
dpath = path.dirname(fpath)
ds_json = json.load(fpath, encoding='utf-16')
return D3MDataset(dpath,
ds_json)
else:
logger.error("Found no dataset json at path: %s" % str(fpath))
raise Exception("Found no dataset json at path: %s" % str(fpath))
@staticmethod
def get_schema_path(dpath):
name = path.split(dpath)[-1]
fpath = path.join(dpath, name + '_dataset', LSDataset.__default_schema__)
if path.exists(fpath):
return fpath
else:
raise Exception("No schema doc found in dataset directory: %s" % dpath)
def to_component_out_file(self, fpath):
for resource in self.dataResources:
if resource.resType == 'table':
logger.debug("Resource type: %s\t %s" % (str(type(resource.columns)), str(resource.columns)))
for col in resource.columns:
logger.debug("Type: %s\t col: %s" % (str(type(col)), str(col)))
names = [col.colName for col in resource.columns]
js = self.to_json()
with open(fpath, 'w') as out_file:
logger.debug("Writing dataset json to component out file: %s" % fpath)
writer = csv.writer(out_file, delimiter='\t')
writer.writerow(names)
writer.writerow([js])
@staticmethod
def from_component_out_file(fpath):
if isinstance(fpath, str):
in_file = open(fpath, 'r')
reader = csv.reader(in_file, delimiter='\t')
rows = [row for row in reader]
in_file.close()
elif isinstance(fpath, IOBase):
reader = csv.reader(fpath, delimiter='\t')
rows = [row for row in reader]
fpath.close()
col_names = rows[0]
logger.debug("Got columns names: %s" % str(col_names))
return D3MDataset.from_json(rows[1][0])
def to_json(self, fpath=None):
out = json.loads(super().to_json())
out['about'] = self.about
out['dataResources'] = [json.loads(rc.to_json()) for rc in self.dataResources]
out['qualities'] = self.qualities
if fpath is not None:
logger.debug("Writing dataset json to: %s" % fpath)
out_file = open(fpath, 'w')
json.dump(out, out_file)
out_file.close()
return json.dumps(out)
def __str__(self):
return self.to_json()
def load_dataset(self):
data = None
for dr in [dr for dr in self.dataResources if type(dr) is DSRTable]:
logger.debug("Found data resource table with ID: %s\tpath: %s" % (dr.resID, dr.resPath))
if data is None:
dpath = path.join(self.dpath, dr.resPath)
data = pd.read_csv(dpath, ',')
return data
def get_data_columns(self):
for dr in [dr for dr in self.dataResources if type(dr) is DSRTable]:
logger.debug("Found data resource table with ID: %s\tpath: %s" % (dr.resID, dr.resPath))
return [col for col in dr.columns if col.colName != 'd3mIndex']
| true
| true
|
1c438abc7906ec59abae2c735dd5b01a8b172294
| 2,358
|
py
|
Python
|
effects/game-of-life/rle2png.py
|
Kristopher38/demoscene
|
a9909597ed142ad92e0fd2968f8976c8f5db1657
|
[
"Artistic-2.0"
] | null | null | null |
effects/game-of-life/rle2png.py
|
Kristopher38/demoscene
|
a9909597ed142ad92e0fd2968f8976c8f5db1657
|
[
"Artistic-2.0"
] | null | null | null |
effects/game-of-life/rle2png.py
|
Kristopher38/demoscene
|
a9909597ed142ad92e0fd2968f8976c8f5db1657
|
[
"Artistic-2.0"
] | null | null | null |
#!/usr/bin/env python3
from sys import argv
from PIL import Image
import re
import argparse
header_pattern = re.compile(
r'x\s*=\s*(\d+)\s*,\s*y\s*=\s*(\d+)(?:\s*,\s*rule\s*=\s*(\w+\/\w+)\s*)*'
)
rle_pattern = re.compile(r'(\d*)(o|b|\$)')
rotate_transforms = {
90: Image.ROTATE_90,
180: Image.ROTATE_180,
270: Image.ROTATE_270,
}
def convert_rle(infile):
with open(infile, 'r') as f:
# skip comments
line = f.readline().strip()
while line.startswith("#"):
line = f.readline().strip()
if line == '':
line = f.readline().strip()
header = line
# read header
captures = header_pattern.match(header)
x = int(captures.group(1))
y = int(captures.group(2))
rule = captures.group(3)
# decode RLE into the image
# pallete mode (only this can produce 1-bit image,
# 1 and L seem to always output 8-bit)
im = Image.new("P", (x, y))
cur_x = 0
cur_y = 0
for line in f:
captures = [(int(count) if count != '' else 1, tag)
for count, tag in rle_pattern.findall(line)]
for count, tag in captures:
for i in range(0, count):
if tag == 'b':
im.putpixel((cur_x, cur_y), 0)
cur_x += 1
elif tag == 'o':
im.putpixel((cur_x, cur_y), 1)
cur_x += 1
elif tag == '$':
cur_y += 1
cur_x = 0
else:
im.putpixel((cur_x, cur_y), 1)
cur_x += 1
im.putpalette([0, 0, 0, 255, 255, 255])
return im
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Converts RLE game of life description to 1-bit PNG.')
parser.add_argument('infile', help='Input RLE file')
parser.add_argument('outfile', help='Output PNG file')
parser.add_argument('--rotate', help='Rotate output image', type=int,
choices=[90, 180, 270])
args = parser.parse_args()
im = convert_rle(args.infile)
if args.rotate:
im = im.transpose(rotate_transforms[args.rotate])
im.save(args.outfile, bits=1)
| 29.848101
| 76
| 0.502969
|
from sys import argv
from PIL import Image
import re
import argparse
header_pattern = re.compile(
r'x\s*=\s*(\d+)\s*,\s*y\s*=\s*(\d+)(?:\s*,\s*rule\s*=\s*(\w+\/\w+)\s*)*'
)
rle_pattern = re.compile(r'(\d*)(o|b|\$)')
rotate_transforms = {
90: Image.ROTATE_90,
180: Image.ROTATE_180,
270: Image.ROTATE_270,
}
def convert_rle(infile):
with open(infile, 'r') as f:
line = f.readline().strip()
while line.startswith("#"):
line = f.readline().strip()
if line == '':
line = f.readline().strip()
header = line
captures = header_pattern.match(header)
x = int(captures.group(1))
y = int(captures.group(2))
rule = captures.group(3)
im = Image.new("P", (x, y))
cur_x = 0
cur_y = 0
for line in f:
captures = [(int(count) if count != '' else 1, tag)
for count, tag in rle_pattern.findall(line)]
for count, tag in captures:
for i in range(0, count):
if tag == 'b':
im.putpixel((cur_x, cur_y), 0)
cur_x += 1
elif tag == 'o':
im.putpixel((cur_x, cur_y), 1)
cur_x += 1
elif tag == '$':
cur_y += 1
cur_x = 0
else:
im.putpixel((cur_x, cur_y), 1)
cur_x += 1
im.putpalette([0, 0, 0, 255, 255, 255])
return im
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Converts RLE game of life description to 1-bit PNG.')
parser.add_argument('infile', help='Input RLE file')
parser.add_argument('outfile', help='Output PNG file')
parser.add_argument('--rotate', help='Rotate output image', type=int,
choices=[90, 180, 270])
args = parser.parse_args()
im = convert_rle(args.infile)
if args.rotate:
im = im.transpose(rotate_transforms[args.rotate])
im.save(args.outfile, bits=1)
| true
| true
|
1c438ba5c7df53e8035695eedf765a3fdd0e338e
| 2,243
|
py
|
Python
|
rapm_prior/download_stats_nba_data.py
|
airalcorn2/NBA_Tutorials
|
85179300864d2c97a727a7b2a5ee46d250bdac20
|
[
"MIT"
] | 123
|
2019-01-02T20:51:19.000Z
|
2022-03-16T23:37:17.000Z
|
rapm_prior/download_stats_nba_data.py
|
nicholasrios/NBA_Tutorials
|
85179300864d2c97a727a7b2a5ee46d250bdac20
|
[
"MIT"
] | 8
|
2019-07-31T14:32:46.000Z
|
2022-02-14T00:16:15.000Z
|
rapm_prior/download_stats_nba_data.py
|
nicholasrios/NBA_Tutorials
|
85179300864d2c97a727a7b2a5ee46d250bdac20
|
[
"MIT"
] | 38
|
2019-05-15T23:26:16.000Z
|
2022-01-03T05:35:13.000Z
|
import json
import pandas as pd
import urllib3
pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 1000)
header_data = {
'Host': 'stats.nba.com',
'Connection': 'keep-alive',
'Cache-Control': 'max-age=0',
'Upgrade-Insecure-Requests': '1',
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.181 Safari/537.36',
'Referer': 'stats.nba.com',
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'en-US,en;q=0.9',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
}
# endpoints
def player_stats_url(season):
return "https://stats.nba.com/stats/leaguedashplayerstats?College=&Conference=&Country=&DateFrom=&DateTo=&Division=&DraftPick=&DraftYear=&GameScope=&GameSegment=&Height=&LastNGames=0&LeagueID=00&Location=&MeasureType=Base&Month=0&OpponentTeamID=0&Outcome=&PORound=0&PaceAdjust=N&PerMode=Totals&Period=0&PlayerExperience=&PlayerPosition=&PlusMinus=N&Rank=N&Season={0}&SeasonSegment=&SeasonType=Regular+Season&ShotClockRange=&StarterBench=&TeamID=0&TwoWay=0&VsConference=&VsDivision=&Weight=".format(
season)
# Extract json
def extract_data(http_client, url):
r = http_client.request('GET', url, headers=header_data) # Call the GET endpoint
resp = json.loads(r.data) # Convert the response to a json object
results = resp['resultSets'][0] # take the first item in the resultsSet (This can be determined by inspection of the json response)
headers = results['headers'] # take the headers of the response (our column names)
rows = results['rowSet'] # take the rows of our response
frame = pd.DataFrame(rows) # convert the rows to a dataframe
frame.columns = headers # set our column names using the extracted headers
return frame
client = urllib3.PoolManager()
season = "2019-20"
frame = extract_data(client, player_stats_url(season))
frame.to_csv("stats_nba_player_data_{0}.csv".format(season), index=False)
| 48.76087
| 502
| 0.664735
|
import json
import pandas as pd
import urllib3
pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 1000)
header_data = {
'Host': 'stats.nba.com',
'Connection': 'keep-alive',
'Cache-Control': 'max-age=0',
'Upgrade-Insecure-Requests': '1',
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.181 Safari/537.36',
'Referer': 'stats.nba.com',
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'en-US,en;q=0.9',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
}
def player_stats_url(season):
return "https://stats.nba.com/stats/leaguedashplayerstats?College=&Conference=&Country=&DateFrom=&DateTo=&Division=&DraftPick=&DraftYear=&GameScope=&GameSegment=&Height=&LastNGames=0&LeagueID=00&Location=&MeasureType=Base&Month=0&OpponentTeamID=0&Outcome=&PORound=0&PaceAdjust=N&PerMode=Totals&Period=0&PlayerExperience=&PlayerPosition=&PlusMinus=N&Rank=N&Season={0}&SeasonSegment=&SeasonType=Regular+Season&ShotClockRange=&StarterBench=&TeamID=0&TwoWay=0&VsConference=&VsDivision=&Weight=".format(
season)
def extract_data(http_client, url):
r = http_client.request('GET', url, headers=header_data)
resp = json.loads(r.data)
results = resp['resultSets'][0]
headers = results['headers']
rows = results['rowSet']
frame = pd.DataFrame(rows)
frame.columns = headers
return frame
client = urllib3.PoolManager()
season = "2019-20"
frame = extract_data(client, player_stats_url(season))
frame.to_csv("stats_nba_player_data_{0}.csv".format(season), index=False)
| true
| true
|
1c438c1bf0fc289020d2aef076f6dd54765a738f
| 1,016
|
py
|
Python
|
apprest/management/commands/insert_user.py
|
dsanchez-cells/calipsoplus-backend
|
7eaa6904ec59d88052644b31041b92ee20e54354
|
[
"MIT"
] | 4
|
2018-12-04T15:08:27.000Z
|
2019-04-11T09:49:41.000Z
|
apprest/management/commands/insert_user.py
|
dsanchez-cells/calipsoplus-backend
|
7eaa6904ec59d88052644b31041b92ee20e54354
|
[
"MIT"
] | 63
|
2018-11-22T13:07:56.000Z
|
2021-06-10T20:55:58.000Z
|
apprest/management/commands/insert_user.py
|
dsanchez-cells/calipsoplus-backend
|
7eaa6904ec59d88052644b31041b92ee20e54354
|
[
"MIT"
] | 10
|
2018-11-23T08:17:28.000Z
|
2022-01-15T23:41:59.000Z
|
from django.contrib.auth.models import User
from django.core.management.base import BaseCommand, CommandError
from apprest.services.experiment import CalipsoExperimentsServices
class Command(BaseCommand):
help = 'Insert new user'
experiments_services = CalipsoExperimentsServices()
def add_arguments(self, parser):
parser.add_argument('--userlogin', dest='userlogin', default='', help='The username', type=str)
def handle(self, *args, **options):
username = options['userlogin']
if not username:
raise CommandError(
'python manage.py insert_user --userlogin username')
try:
User.objects.get(username=username)
self.stdout.write(self.style.ERROR('User %s already exists' % username))
except User.DoesNotExist as udne:
new_user = User.objects.create_user(username, '')
new_user.save()
self.stdout.write(self.style.SUCCESS('Successfully inserted user %s' % username))
| 36.285714
| 103
| 0.675197
|
from django.contrib.auth.models import User
from django.core.management.base import BaseCommand, CommandError
from apprest.services.experiment import CalipsoExperimentsServices
class Command(BaseCommand):
help = 'Insert new user'
experiments_services = CalipsoExperimentsServices()
def add_arguments(self, parser):
parser.add_argument('--userlogin', dest='userlogin', default='', help='The username', type=str)
def handle(self, *args, **options):
username = options['userlogin']
if not username:
raise CommandError(
'python manage.py insert_user --userlogin username')
try:
User.objects.get(username=username)
self.stdout.write(self.style.ERROR('User %s already exists' % username))
except User.DoesNotExist as udne:
new_user = User.objects.create_user(username, '')
new_user.save()
self.stdout.write(self.style.SUCCESS('Successfully inserted user %s' % username))
| true
| true
|
1c438c84d51ba5ee38e7953290216d463f2c40f8
| 744
|
py
|
Python
|
ginpar/utils/strings.py
|
davidomarf/ginpar
|
cee1676ba2280d09f43a52f5145b28388b8e1152
|
[
"MIT"
] | 12
|
2019-10-19T15:35:54.000Z
|
2021-01-18T00:15:10.000Z
|
ginpar/utils/strings.py
|
davidomarf/ginpar
|
cee1676ba2280d09f43a52f5145b28388b8e1152
|
[
"MIT"
] | 19
|
2019-10-06T22:10:23.000Z
|
2019-11-08T17:53:16.000Z
|
ginpar/utils/strings.py
|
davidomarf/ginpar
|
cee1676ba2280d09f43a52f5145b28388b8e1152
|
[
"MIT"
] | null | null | null |
"""String filters to convert between cases.
The list of filters in here are added to the Jinja2 environment, so they may
come handy when designing a custom theme.
"""
def unkebab(s):
"""Replace dashes with spaces.
Parameters
----------
s : str
String that may contain "-" characters.
"""
return " ".join(s.split("-"))
def space_to_kebab(s):
"""Replace spaces with dashes.
Parameters
----------
s : str
String that may contain " " characters.
"""
return "-".join(s.split(" "))
def camel_to_space(s):
"""Replace low dashes with spaces.
Parameters
----------
s : str
String that may contain "_" characters.
"""
return " ".join(s.split("_"))
| 19.076923
| 76
| 0.577957
|
def unkebab(s):
return " ".join(s.split("-"))
def space_to_kebab(s):
return "-".join(s.split(" "))
def camel_to_space(s):
return " ".join(s.split("_"))
| true
| true
|
1c438e8445a5ef1fa4671cd7454ca72b222337f0
| 7,551
|
bzl
|
Python
|
haskell/private/actions/package.bzl
|
ghuntley/rules_haskell
|
adc3503387fbb54173dc4b4f21ae0aefe33759a4
|
[
"Apache-2.0"
] | null | null | null |
haskell/private/actions/package.bzl
|
ghuntley/rules_haskell
|
adc3503387fbb54173dc4b4f21ae0aefe33759a4
|
[
"Apache-2.0"
] | null | null | null |
haskell/private/actions/package.bzl
|
ghuntley/rules_haskell
|
adc3503387fbb54173dc4b4f21ae0aefe33759a4
|
[
"Apache-2.0"
] | null | null | null |
"""Action for creating packages and registering them with ghc-pkg"""
load("@bazel_skylib//lib:paths.bzl", "paths")
load(":private/path_utils.bzl", "target_unique_name")
load(":private/pkg_id.bzl", "pkg_id")
load(":private/set.bzl", "set")
load(":private/path_utils.bzl", "get_lib_name")
def _get_extra_libraries(dep_info):
"""Get directories and library names for extra library dependencies.
Args:
dep_info: HaskellInfo provider of the package.
Returns:
(dirs, libs):
dirs: list: Library search directories for extra library dependencies.
libs: list: Extra library dependencies.
"""
cc_libs = dep_info.cc_dependencies.dynamic_linking.libraries_to_link.to_list()
# The order in which library dependencies are listed is relevant when
# linking static archives. To maintain the order defined by the input
# depset we collect the library dependencies in a list, and use a separate
# set to deduplicate entries.
seen_libs = set.empty()
extra_libs = []
extra_lib_dirs = set.empty()
for lib in cc_libs:
lib_name = get_lib_name(lib)
if not set.is_member(seen_libs, lib_name):
set.mutable_insert(seen_libs, lib_name)
extra_libs.append(lib_name)
set.mutable_insert(extra_lib_dirs, lib.dirname)
return (set.to_list(extra_lib_dirs), extra_libs)
def package(
hs,
dep_info,
interfaces_dir,
interfaces_dir_prof,
static_library,
dynamic_library,
exposed_modules_file,
other_modules,
my_pkg_id,
static_library_prof):
"""Create GHC package using ghc-pkg.
Args:
hs: Haskell context.
interfaces_dir: Directory containing interface files.
static_library: Static library of the package.
dynamic_library: Dynamic library of the package.
static_library_prof: Static library compiled with profiling or None.
Returns:
(File, File): GHC package conf file, GHC package cache file
"""
pkg_db_dir = pkg_id.to_string(my_pkg_id)
conf_file = hs.actions.declare_file(
paths.join(pkg_db_dir, "{0}.conf".format(pkg_db_dir)),
)
cache_file = hs.actions.declare_file("package.cache", sibling = conf_file)
import_dir = paths.join(
"${pkgroot}",
paths.join(pkg_db_dir, "_iface"),
)
interfaces_dirs = [interfaces_dir]
if interfaces_dir_prof != None:
import_dir_prof = paths.join(
"${pkgroot}",
paths.join(pkg_db_dir, "_iface_prof"),
)
interfaces_dirs.append(interfaces_dir_prof)
else:
import_dir_prof = ""
(extra_lib_dirs, extra_libs) = _get_extra_libraries(dep_info)
metadata_entries = {
"name": my_pkg_id.name,
"version": my_pkg_id.version,
"id": pkg_id.to_string(my_pkg_id),
"key": pkg_id.to_string(my_pkg_id),
"exposed": "True",
"hidden-modules": " ".join(other_modules),
"import-dirs": " ".join([import_dir, import_dir_prof]),
"library-dirs": " ".join(["${pkgroot}"] + extra_lib_dirs),
"dynamic-library-dirs": " ".join(["${pkgroot}"] + extra_lib_dirs),
"hs-libraries": pkg_id.library_name(hs, my_pkg_id),
"extra-libraries": " ".join(extra_libs),
"depends": ", ".join(
# Prebuilt dependencies are added further down, since their
# package-ids are not available as strings but in build outputs.
set.to_list(dep_info.package_ids),
),
}
# Create a file from which ghc-pkg will create the actual package
# from. List of exposed modules generated below.
metadata_file = hs.actions.declare_file(target_unique_name(hs, "metadata"))
hs.actions.write(
output = metadata_file,
content = "\n".join([
"{0}: {1}".format(k, v)
for k, v in metadata_entries.items()
if v
]) + "\n",
)
# Collect the package id files of all prebuilt dependencies.
prebuilt_deps_id_files = [
dep.id_file
for dep in set.to_list(dep_info.prebuilt_dependencies)
]
# Combine exposed modules and other metadata to form the package
# configuration file.
prebuilt_deps_args = hs.actions.args()
prebuilt_deps_args.add_all([f.path for f in prebuilt_deps_id_files])
prebuilt_deps_args.use_param_file("%s", use_always = True)
prebuilt_deps_args.set_param_file_format("multiline")
hs.actions.run_shell(
inputs = [metadata_file, exposed_modules_file] + prebuilt_deps_id_files,
outputs = [conf_file],
command = """
cat $1 > $4
echo "exposed-modules: `cat $2`" >> $4
# this is equivalent to 'readarray'. We do use 'readarray' in order to
# support older bash versions.
while IFS= read -r line; do deps_id_files+=("$line"); done < $3
if [ ${#deps_id_files[@]} -eq 0 ]; then
deps=""
else
deps=$(cat "${deps_id_files[@]}" | tr '\n' " ")
fi
echo "depends: $deps" >> $4
""",
arguments = [
metadata_file.path,
exposed_modules_file.path,
prebuilt_deps_args,
conf_file.path,
],
use_default_shell_env = True,
)
# Make the call to ghc-pkg and use the package configuration file
package_path = ":".join([c.dirname for c in set.to_list(dep_info.package_databases)]) + ":"
hs.actions.run(
inputs = depset(transitive = [
set.to_depset(dep_info.package_databases),
depset(interfaces_dirs),
depset([
input
for input in [
static_library,
conf_file,
dynamic_library,
static_library_prof,
]
if input
]),
]),
outputs = [cache_file],
env = {
"GHC_PACKAGE_PATH": package_path,
},
mnemonic = "HaskellRegisterPackage",
progress_message = "HaskellRegisterPackage {}".format(hs.label),
executable = hs.tools.ghc_pkg,
# Registration of a new package consists in,
#
# 1. copying the registration file into the package db,
# 2. performing some validation on the registration file content,
# 3. recaching, i.e. regenerating the package db cache file.
#
# Normally, this is all done by `ghc-pkg register`. But in our
# case, `ghc-pkg register` is painful, because the validation
# it performs is slow, somewhat redundant but especially, too
# strict (see e.g.
# https://ghc.haskell.org/trac/ghc/ticket/15478). So we do (1)
# and (3) manually, by copying then calling `ghc-pkg recache`
# directly.
#
# The downside is that we do lose the few validations that
# `ghc-pkg register` was doing that was useful. e.g. when
# reexporting modules, validation checks that the source
# module does exist.
#
# TODO Go back to using `ghc-pkg register`. Blocked by
# https://ghc.haskell.org/trac/ghc/ticket/15478
arguments = [
"recache",
"--package-db={0}".format(conf_file.dirname),
"-v0",
"--no-expand-pkgroot",
],
# XXX: Seems required for this to work on Windows
use_default_shell_env = True,
)
return conf_file, cache_file
| 35.78673
| 95
| 0.610118
|
load("@bazel_skylib//lib:paths.bzl", "paths")
load(":private/path_utils.bzl", "target_unique_name")
load(":private/pkg_id.bzl", "pkg_id")
load(":private/set.bzl", "set")
load(":private/path_utils.bzl", "get_lib_name")
def _get_extra_libraries(dep_info):
cc_libs = dep_info.cc_dependencies.dynamic_linking.libraries_to_link.to_list()
seen_libs = set.empty()
extra_libs = []
extra_lib_dirs = set.empty()
for lib in cc_libs:
lib_name = get_lib_name(lib)
if not set.is_member(seen_libs, lib_name):
set.mutable_insert(seen_libs, lib_name)
extra_libs.append(lib_name)
set.mutable_insert(extra_lib_dirs, lib.dirname)
return (set.to_list(extra_lib_dirs), extra_libs)
def package(
hs,
dep_info,
interfaces_dir,
interfaces_dir_prof,
static_library,
dynamic_library,
exposed_modules_file,
other_modules,
my_pkg_id,
static_library_prof):
pkg_db_dir = pkg_id.to_string(my_pkg_id)
conf_file = hs.actions.declare_file(
paths.join(pkg_db_dir, "{0}.conf".format(pkg_db_dir)),
)
cache_file = hs.actions.declare_file("package.cache", sibling = conf_file)
import_dir = paths.join(
"${pkgroot}",
paths.join(pkg_db_dir, "_iface"),
)
interfaces_dirs = [interfaces_dir]
if interfaces_dir_prof != None:
import_dir_prof = paths.join(
"${pkgroot}",
paths.join(pkg_db_dir, "_iface_prof"),
)
interfaces_dirs.append(interfaces_dir_prof)
else:
import_dir_prof = ""
(extra_lib_dirs, extra_libs) = _get_extra_libraries(dep_info)
metadata_entries = {
"name": my_pkg_id.name,
"version": my_pkg_id.version,
"id": pkg_id.to_string(my_pkg_id),
"key": pkg_id.to_string(my_pkg_id),
"exposed": "True",
"hidden-modules": " ".join(other_modules),
"import-dirs": " ".join([import_dir, import_dir_prof]),
"library-dirs": " ".join(["${pkgroot}"] + extra_lib_dirs),
"dynamic-library-dirs": " ".join(["${pkgroot}"] + extra_lib_dirs),
"hs-libraries": pkg_id.library_name(hs, my_pkg_id),
"extra-libraries": " ".join(extra_libs),
"depends": ", ".join(
set.to_list(dep_info.package_ids),
),
}
metadata_file = hs.actions.declare_file(target_unique_name(hs, "metadata"))
hs.actions.write(
output = metadata_file,
content = "\n".join([
"{0}: {1}".format(k, v)
for k, v in metadata_entries.items()
if v
]) + "\n",
)
prebuilt_deps_id_files = [
dep.id_file
for dep in set.to_list(dep_info.prebuilt_dependencies)
]
prebuilt_deps_args = hs.actions.args()
prebuilt_deps_args.add_all([f.path for f in prebuilt_deps_id_files])
prebuilt_deps_args.use_param_file("%s", use_always = True)
prebuilt_deps_args.set_param_file_format("multiline")
hs.actions.run_shell(
inputs = [metadata_file, exposed_modules_file] + prebuilt_deps_id_files,
outputs = [conf_file],
command = """
cat $1 > $4
echo "exposed-modules: `cat $2`" >> $4
# this is equivalent to 'readarray'. We do use 'readarray' in order to
# support older bash versions.
while IFS= read -r line; do deps_id_files+=("$line"); done < $3
if [ ${#deps_id_files[@]} -eq 0 ]; then
deps=""
else
deps=$(cat "${deps_id_files[@]}" | tr '\n' " ")
fi
echo "depends: $deps" >> $4
""",
arguments = [
metadata_file.path,
exposed_modules_file.path,
prebuilt_deps_args,
conf_file.path,
],
use_default_shell_env = True,
)
package_path = ":".join([c.dirname for c in set.to_list(dep_info.package_databases)]) + ":"
hs.actions.run(
inputs = depset(transitive = [
set.to_depset(dep_info.package_databases),
depset(interfaces_dirs),
depset([
input
for input in [
static_library,
conf_file,
dynamic_library,
static_library_prof,
]
if input
]),
]),
outputs = [cache_file],
env = {
"GHC_PACKAGE_PATH": package_path,
},
mnemonic = "HaskellRegisterPackage",
progress_message = "HaskellRegisterPackage {}".format(hs.label),
executable = hs.tools.ghc_pkg,
arguments = [
"recache",
"--package-db={0}".format(conf_file.dirname),
"-v0",
"--no-expand-pkgroot",
],
use_default_shell_env = True,
)
return conf_file, cache_file
| true
| true
|
1c438f9a008ee3db85c82e845472932de6e679c6
| 2,660
|
py
|
Python
|
bin/distribution_test.py
|
astrolabsoftware/fink-source
|
9003e933e1d8c3f03b70c2b277638de97be102ec
|
[
"Apache-2.0"
] | 17
|
2019-03-08T12:37:06.000Z
|
2022-02-01T18:02:07.000Z
|
bin/distribution_test.py
|
astrolabsoftware/fink-source
|
9003e933e1d8c3f03b70c2b277638de97be102ec
|
[
"Apache-2.0"
] | 559
|
2019-03-07T14:55:27.000Z
|
2022-03-11T20:13:12.000Z
|
bin/distribution_test.py
|
tallamjr/fink-broker
|
97753ff695b78ea52d084cac787dec6c52c4e4cc
|
[
"Apache-2.0"
] | 12
|
2019-03-08T13:04:38.000Z
|
2022-01-23T22:22:50.000Z
|
#!/usr/bin/env python
# Copyright 2019-2021 AstroLab Software
# Author: Abhishek Chauhan
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""For verifying the working of the distribution pipeline.
Consume the distributed alerts from the Kafka Server.
1. Read from Kafka topic(s)
2. Deserialize the avro data using the pre-defined schema
3. Carry out operations on the obtained DataFrame
"""
import argparse
import time
from fink_broker.parser import getargs
from fink_broker.sparkUtils import init_sparksession
from fink_broker.distributionUtils import decode_kafka_df
from fink_broker.loggingUtils import get_fink_logger, inspect_application
def main():
parser = argparse.ArgumentParser(description=__doc__)
args = getargs(parser)
# Initialise Spark session
spark = init_sparksession(name="distribution_test", shuffle_partitions=2)
# The level here should be controlled by an argument.
logger = get_fink_logger(spark.sparkContext.appName, args.log_level)
# debug statements
inspect_application(logger)
# Topic to read from
topic = args.distribution_topic
broker_list = args.distribution_servers
# Read from the Kafka topic
df_kafka = spark \
.readStream \
.format("kafka") \
.option("kafka.bootstrap.servers", broker_list) \
.option("kafka.security.protocol", "SASL_PLAINTEXT")\
.option("kafka.sasl.mechanism", "SCRAM-SHA-512")\
.option("subscribe", topic) \
.load()
# Decode df_kafka into a Spark DataFrame with StructType column
df = decode_kafka_df(df_kafka, args.distribution_schema)
# Print received stream to the console
df = df.select("struct.*")
print("\nReading Fink OutStream\n")
debug_query = df.writeStream\
.format("console")\
.trigger(processingTime='2 seconds')\
.start()
# Keep the Streaming running for some time
if args.exit_after is not None:
time.sleep(args.exit_after)
debug_query.stop()
logger.info("Exiting distribution_test service normally...")
else:
debug_query.awaitTermination()
if __name__ == "__main__":
main()
| 31.666667
| 77
| 0.721805
|
import argparse
import time
from fink_broker.parser import getargs
from fink_broker.sparkUtils import init_sparksession
from fink_broker.distributionUtils import decode_kafka_df
from fink_broker.loggingUtils import get_fink_logger, inspect_application
def main():
parser = argparse.ArgumentParser(description=__doc__)
args = getargs(parser)
spark = init_sparksession(name="distribution_test", shuffle_partitions=2)
logger = get_fink_logger(spark.sparkContext.appName, args.log_level)
inspect_application(logger)
topic = args.distribution_topic
broker_list = args.distribution_servers
df_kafka = spark \
.readStream \
.format("kafka") \
.option("kafka.bootstrap.servers", broker_list) \
.option("kafka.security.protocol", "SASL_PLAINTEXT")\
.option("kafka.sasl.mechanism", "SCRAM-SHA-512")\
.option("subscribe", topic) \
.load()
df = decode_kafka_df(df_kafka, args.distribution_schema)
df = df.select("struct.*")
print("\nReading Fink OutStream\n")
debug_query = df.writeStream\
.format("console")\
.trigger(processingTime='2 seconds')\
.start()
if args.exit_after is not None:
time.sleep(args.exit_after)
debug_query.stop()
logger.info("Exiting distribution_test service normally...")
else:
debug_query.awaitTermination()
if __name__ == "__main__":
main()
| true
| true
|
1c4391152fe7b0959e118d2e075b3b8ec564cb67
| 808
|
py
|
Python
|
examples/tellurium-files/phrasedml/case_06.py
|
kirichoi/tellurium
|
77cf6e794600587741ebe209644a78051e0db1d5
|
[
"Apache-2.0"
] | 73
|
2016-06-13T12:44:28.000Z
|
2021-12-31T14:44:39.000Z
|
examples/tellurium-files/phrasedml/case_06.py
|
kirichoi/tellurium
|
77cf6e794600587741ebe209644a78051e0db1d5
|
[
"Apache-2.0"
] | 461
|
2015-03-26T00:05:16.000Z
|
2022-03-16T17:24:35.000Z
|
examples/tellurium-files/phrasedml/case_06.py
|
kirichoi/tellurium
|
77cf6e794600587741ebe209644a78051e0db1d5
|
[
"Apache-2.0"
] | 30
|
2016-01-18T16:50:54.000Z
|
2021-07-06T09:29:53.000Z
|
"""
Coupled ranges.
Two ranges with one of the ranges being master range, the other following in lock.
"""
import os
from tellurium.sedml.utils import run_case
a_str = '''
model case_06()
J0: S1 -> S2; k1*S1-k2*S2
S1 = 10.0; S2 = 0.0;
k1 = 0.5; k2=0.4
end
'''
p_str = '''
mod1 = model "case_06"
sim1 = simulate uniform(0, 10, 100)
task1 = run sim1 on mod1
repeat1 = repeat task1 for S1 in [1, 3, 5], S2 in uniform(0, 10, 2), reset=True
repeat2 = repeat task1 for S1 in [1, 3, 5], S2 in uniform(0, 10, 2), reset=False
plot "Example plot" repeat1.time vs repeat1.S1, repeat1.S2
report repeat1.time vs repeat1.S1, repeat1.S2
plot "Example plot" repeat2.time vs repeat2.S1, repeat2.S2
report repeat2.time vs repeat2.S1, repeat2.S2
'''
run_case(os.path.realpath(__file__), a_str, p_str)
| 28.857143
| 82
| 0.680693
|
import os
from tellurium.sedml.utils import run_case
a_str = '''
model case_06()
J0: S1 -> S2; k1*S1-k2*S2
S1 = 10.0; S2 = 0.0;
k1 = 0.5; k2=0.4
end
'''
p_str = '''
mod1 = model "case_06"
sim1 = simulate uniform(0, 10, 100)
task1 = run sim1 on mod1
repeat1 = repeat task1 for S1 in [1, 3, 5], S2 in uniform(0, 10, 2), reset=True
repeat2 = repeat task1 for S1 in [1, 3, 5], S2 in uniform(0, 10, 2), reset=False
plot "Example plot" repeat1.time vs repeat1.S1, repeat1.S2
report repeat1.time vs repeat1.S1, repeat1.S2
plot "Example plot" repeat2.time vs repeat2.S1, repeat2.S2
report repeat2.time vs repeat2.S1, repeat2.S2
'''
run_case(os.path.realpath(__file__), a_str, p_str)
| true
| true
|
1c43915857abc3fd17112f0c842caca40adcc78b
| 4,345
|
py
|
Python
|
truvo/test/test_nuvo.py
|
mjr9804/truvo
|
e3f52833e534e8ace7eb82f2091045df269eeb72
|
[
"MIT"
] | null | null | null |
truvo/test/test_nuvo.py
|
mjr9804/truvo
|
e3f52833e534e8ace7eb82f2091045df269eeb72
|
[
"MIT"
] | null | null | null |
truvo/test/test_nuvo.py
|
mjr9804/truvo
|
e3f52833e534e8ace7eb82f2091045df269eeb72
|
[
"MIT"
] | null | null | null |
"""
Tests for nuvo.py
"""
#pylint: disable=arguments-differ,missing-class-docstring,missing-function-docstring
#pylint: disable=protected-access
import json
from unittest import TestCase
from unittest.mock import patch
from truvo import nuvo
class TestNuvo(TestCase):
@patch('socket.create_connection')
@patch('socket.gethostbyname')
def setUp(self, mock_gethost, mock_create):
self.mock_gethost = mock_gethost
self.mock_gethost.return_value = '10.1.1.1'
self.mock_create = mock_create
self.mock_conn = self.mock_create.return_value
self.global_source = 'S3'
self.adm = nuvo.AudioDistributionModule(self.global_source)
def test_init(self):
self.mock_gethost.assert_called_with(nuvo.AudioDistributionModule.NUVO_AUDIO_DIST_FQDN)
self.assertEqual(self.adm.server_ip, self.mock_gethost.return_value)
self.mock_create.assert_called_with((self.mock_gethost.return_value,
nuvo.AudioDistributionModule.NUVO_AUDIO_DIST_PORT))
self.assertEqual(self.adm.conn, self.mock_conn)
self.assertEqual(self.adm.global_source, self.global_source)
self.assertEqual(self.adm.payload_id, 1)
@patch('truvo.nuvo.AudioDistributionModule._response')
def test_request(self, mock_res):
payload = {'a': 1, 'ID': 1}
res = self.adm._request(a=1)
self.mock_conn.send.assert_called_with(
json.dumps(payload).encode('utf-8') + nuvo.AudioDistributionModule.NUVO_API_TERMINATOR
)
self.adm.payload_id = 2
self.assertEqual(res, mock_res.return_value)
def test_response(self):
self.mock_conn.recv.side_effect = [b'{', b'"', b'a', b'"', b':', b'1', b'}',
nuvo.AudioDistributionModule.NUVO_API_TERMINATOR]
res = self.adm._response()
self.assertEqual(len(self.mock_conn.recv.mock_calls), 8)
self.assertEqual(res, {'a': 1})
def test_response_ping(self):
resp = {'Service': 'ping'}
side_effect = [char.encode('utf-8') for char in json.dumps(resp)]
side_effect += [nuvo.AudioDistributionModule.NUVO_API_TERMINATOR,
b'{', b'"', b'a', b'"', b':', b'1', b'}',
nuvo.AudioDistributionModule.NUVO_API_TERMINATOR]
self.mock_conn.recv.side_effect = side_effect
res = self.adm._response()
self.assertEqual(res, {'a': 1})
def test_response_greeting(self):
resp = {'Service': 'Greeting'}
side_effect = [char.encode('utf-8') for char in json.dumps(resp)]
side_effect += [nuvo.AudioDistributionModule.NUVO_API_TERMINATOR,
b'{', b'"', b'a', b'"', b':', b'1', b'}',
nuvo.AudioDistributionModule.NUVO_API_TERMINATOR]
self.mock_conn.recv.side_effect = side_effect
res = self.adm._response()
self.assertEqual(res, {'a': 1})
@patch('truvo.nuvo.AudioDistributionModule._request')
def test_set_power(self, mock_req):
self.adm._set_power('Z1', False)
mock_req.assert_called_with(Service='SetZoneProperty', ZID='Z1',
PropertyList={'Power': False})
@patch('truvo.nuvo.AudioDistributionModule._set_power')
def test_power_off(self, mock_set):
self.adm.power_off('Z2')
mock_set.assert_called_with('Z2', False)
@patch('truvo.nuvo.AudioDistributionModule._request')
def test_list_zones(self, mock_req):
mock_req.return_value = {'ZoneList': 'a'}
res = self.adm.list_zones()
mock_req.assert_called_with(Service='ListZones')
self.assertEqual(res, 'a')
@patch('truvo.nuvo.AudioDistributionModule._request')
def test_list_zones_fail(self, mock_req):
mock_req.return_value = {'a': 1}
res = self.adm.list_zones()
mock_req.assert_called_with(Service='ListZones')
self.assertEqual(res, [])
@patch('truvo.nuvo.AudioDistributionModule._request')
def test_play(self, mock_req):
self.adm.play('Z1')
mock_req.assert_called_with(Service='SetZoneProperty', ZID='Z1',
PropertyList={'Power': True, 'Source': self.adm.global_source,
'Volume': 50})
| 43.019802
| 99
| 0.636364
|
import json
from unittest import TestCase
from unittest.mock import patch
from truvo import nuvo
class TestNuvo(TestCase):
@patch('socket.create_connection')
@patch('socket.gethostbyname')
def setUp(self, mock_gethost, mock_create):
self.mock_gethost = mock_gethost
self.mock_gethost.return_value = '10.1.1.1'
self.mock_create = mock_create
self.mock_conn = self.mock_create.return_value
self.global_source = 'S3'
self.adm = nuvo.AudioDistributionModule(self.global_source)
def test_init(self):
self.mock_gethost.assert_called_with(nuvo.AudioDistributionModule.NUVO_AUDIO_DIST_FQDN)
self.assertEqual(self.adm.server_ip, self.mock_gethost.return_value)
self.mock_create.assert_called_with((self.mock_gethost.return_value,
nuvo.AudioDistributionModule.NUVO_AUDIO_DIST_PORT))
self.assertEqual(self.adm.conn, self.mock_conn)
self.assertEqual(self.adm.global_source, self.global_source)
self.assertEqual(self.adm.payload_id, 1)
@patch('truvo.nuvo.AudioDistributionModule._response')
def test_request(self, mock_res):
payload = {'a': 1, 'ID': 1}
res = self.adm._request(a=1)
self.mock_conn.send.assert_called_with(
json.dumps(payload).encode('utf-8') + nuvo.AudioDistributionModule.NUVO_API_TERMINATOR
)
self.adm.payload_id = 2
self.assertEqual(res, mock_res.return_value)
def test_response(self):
self.mock_conn.recv.side_effect = [b'{', b'"', b'a', b'"', b':', b'1', b'}',
nuvo.AudioDistributionModule.NUVO_API_TERMINATOR]
res = self.adm._response()
self.assertEqual(len(self.mock_conn.recv.mock_calls), 8)
self.assertEqual(res, {'a': 1})
def test_response_ping(self):
resp = {'Service': 'ping'}
side_effect = [char.encode('utf-8') for char in json.dumps(resp)]
side_effect += [nuvo.AudioDistributionModule.NUVO_API_TERMINATOR,
b'{', b'"', b'a', b'"', b':', b'1', b'}',
nuvo.AudioDistributionModule.NUVO_API_TERMINATOR]
self.mock_conn.recv.side_effect = side_effect
res = self.adm._response()
self.assertEqual(res, {'a': 1})
def test_response_greeting(self):
resp = {'Service': 'Greeting'}
side_effect = [char.encode('utf-8') for char in json.dumps(resp)]
side_effect += [nuvo.AudioDistributionModule.NUVO_API_TERMINATOR,
b'{', b'"', b'a', b'"', b':', b'1', b'}',
nuvo.AudioDistributionModule.NUVO_API_TERMINATOR]
self.mock_conn.recv.side_effect = side_effect
res = self.adm._response()
self.assertEqual(res, {'a': 1})
@patch('truvo.nuvo.AudioDistributionModule._request')
def test_set_power(self, mock_req):
self.adm._set_power('Z1', False)
mock_req.assert_called_with(Service='SetZoneProperty', ZID='Z1',
PropertyList={'Power': False})
@patch('truvo.nuvo.AudioDistributionModule._set_power')
def test_power_off(self, mock_set):
self.adm.power_off('Z2')
mock_set.assert_called_with('Z2', False)
@patch('truvo.nuvo.AudioDistributionModule._request')
def test_list_zones(self, mock_req):
mock_req.return_value = {'ZoneList': 'a'}
res = self.adm.list_zones()
mock_req.assert_called_with(Service='ListZones')
self.assertEqual(res, 'a')
@patch('truvo.nuvo.AudioDistributionModule._request')
def test_list_zones_fail(self, mock_req):
mock_req.return_value = {'a': 1}
res = self.adm.list_zones()
mock_req.assert_called_with(Service='ListZones')
self.assertEqual(res, [])
@patch('truvo.nuvo.AudioDistributionModule._request')
def test_play(self, mock_req):
self.adm.play('Z1')
mock_req.assert_called_with(Service='SetZoneProperty', ZID='Z1',
PropertyList={'Power': True, 'Source': self.adm.global_source,
'Volume': 50})
| true
| true
|
1c4392f528ec2231722f92fe93550d8115cbc3e8
| 9,158
|
py
|
Python
|
test/data_processing/test_nodes.py
|
coruscating/qiskit-experiments
|
dac1febf13be870d3bac16af22aa341a088e0766
|
[
"Apache-2.0"
] | null | null | null |
test/data_processing/test_nodes.py
|
coruscating/qiskit-experiments
|
dac1febf13be870d3bac16af22aa341a088e0766
|
[
"Apache-2.0"
] | 1
|
2021-06-01T01:43:52.000Z
|
2021-06-01T01:43:52.000Z
|
test/data_processing/test_nodes.py
|
coruscating/qiskit-experiments
|
dac1febf13be870d3bac16af22aa341a088e0766
|
[
"Apache-2.0"
] | 2
|
2021-05-17T10:13:20.000Z
|
2021-06-01T01:34:34.000Z
|
# This code is part of Qiskit.
#
# (C) Copyright IBM 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Data processor tests."""
# pylint: disable=unbalanced-tuple-unpacking
import numpy as np
from qiskit.test import QiskitTestCase
from qiskit_experiments.data_processing.nodes import (
SVD,
AverageData,
MinMaxNormalize,
Probability,
)
from qiskit_experiments.data_processing.data_processor import DataProcessor
from . import BaseDataProcessorTest
class TestAveraging(BaseDataProcessorTest):
"""Test the averaging nodes."""
def test_simple(self):
"""Simple test of averaging."""
datum = np.array([[1, 2], [3, 4], [5, 6]])
node = AverageData(axis=1)
self.assertTrue(np.allclose(node(datum)[0], np.array([1.5, 3.5, 5.5])))
self.assertTrue(np.allclose(node(datum)[1], np.array([0.5, 0.5, 0.5]) / np.sqrt(2)))
node = AverageData(axis=0)
self.assertTrue(np.allclose(node(datum)[0], np.array([3.0, 4.0])))
std = np.std([1, 3, 5])
self.assertTrue(np.allclose(node(datum)[1], np.array([std, std]) / np.sqrt(3)))
def test_iq_averaging(self):
"""Test averaging of IQ-data."""
iq_data = [
[[-6.20601501e14, -1.33257051e15], [-1.70921324e15, -4.05881657e15]],
[[-5.80546502e14, -1.33492509e15], [-1.65094637e15, -4.05926942e15]],
[[-4.04649069e14, -1.33191056e15], [-1.29680377e15, -4.03604815e15]],
[[-2.22203874e14, -1.30291309e15], [-8.57663429e14, -3.97784973e15]],
[[-2.92074029e13, -1.28578530e15], [-9.78824053e13, -3.92071056e15]],
[[1.98056981e14, -1.26883024e15], [3.77157017e14, -3.87460328e15]],
[[4.29955888e14, -1.25022995e15], [1.02340118e15, -3.79508679e15]],
[[6.38981344e14, -1.25084614e15], [1.68918514e15, -3.78961044e15]],
[[7.09988897e14, -1.21906634e15], [1.91914171e15, -3.73670664e15]],
[[7.63169115e14, -1.20797552e15], [2.03772603e15, -3.74653863e15]],
]
self.create_experiment(iq_data, single_shot=True)
avg_iq = AverageData(axis=0)
avg_datum, error = avg_iq(self.iq_experiment.data(0)["memory"])
expected_avg = np.array([[8.82943876e13, -1.27850527e15], [1.43410186e14, -3.89952402e15]])
expected_std = np.array(
[[5.07650185e14, 4.44664719e13], [1.40522641e15, 1.22326831e14]]
) / np.sqrt(10)
self.assertTrue(np.allclose(avg_datum, expected_avg))
self.assertTrue(np.allclose(error, expected_std))
class TestNormalize(QiskitTestCase):
"""Test the normalization node."""
def test_simple(self):
"""Simple test of normalization node."""
data = np.array([1.0, 2.0, 3.0, 3.0])
error = np.array([0.1, 0.2, 0.3, 0.3])
expected_data = np.array([0.0, 0.5, 1.0, 1.0])
expected_error = np.array([0.05, 0.1, 0.15, 0.15])
node = MinMaxNormalize()
self.assertTrue(np.allclose(node(data)[0], expected_data))
self.assertTrue(np.allclose(node(data, error)[0], expected_data))
self.assertTrue(np.allclose(node(data, error)[1], expected_error))
class TestSVD(BaseDataProcessorTest):
"""Test the SVD nodes."""
def test_simple_data(self):
"""
A simple setting where the IQ data of qubit 0 is oriented along (1,1) and
the IQ data of qubit 1 is oriented along (1,-1).
"""
iq_data = [[[0.0, 0.0], [0.0, 0.0]], [[1.0, 1.0], [-1.0, 1.0]], [[-1.0, -1.0], [1.0, -1.0]]]
self.create_experiment(iq_data)
iq_svd = SVD()
iq_svd.train([datum["memory"] for datum in self.iq_experiment.data()])
# qubit 0 IQ data is oriented along (1,1)
self.assertTrue(np.allclose(iq_svd._main_axes[0], np.array([-1, -1]) / np.sqrt(2)))
# qubit 1 IQ data is oriented along (1, -1)
self.assertTrue(np.allclose(iq_svd._main_axes[1], np.array([-1, 1]) / np.sqrt(2)))
# Note: input data shape [n_circs, n_slots, n_iq] for avg mode simulation
processed, _ = iq_svd(np.array([[[1, 1], [1, -1]]]))
expected = np.array([-1, -1]) / np.sqrt(2)
self.assertTrue(np.allclose(processed, expected))
processed, _ = iq_svd(np.array([[[2, 2], [2, -2]]]))
self.assertTrue(np.allclose(processed, expected * 2))
# Check that orthogonal data gives 0.
processed, _ = iq_svd(np.array([[[1, -1], [1, 1]]]))
expected = np.array([0, 0])
self.assertTrue(np.allclose(processed, expected))
def test_svd(self):
"""Use IQ data gathered from the hardware."""
# This data is primarily oriented along the real axis with a slight tilt.
# There is a large offset in the imaginary dimension when comparing qubits
# 0 and 1.
iq_data = [
[[-6.20601501e14, -1.33257051e15], [-1.70921324e15, -4.05881657e15]],
[[-5.80546502e14, -1.33492509e15], [-1.65094637e15, -4.05926942e15]],
[[-4.04649069e14, -1.33191056e15], [-1.29680377e15, -4.03604815e15]],
[[-2.22203874e14, -1.30291309e15], [-8.57663429e14, -3.97784973e15]],
[[-2.92074029e13, -1.28578530e15], [-9.78824053e13, -3.92071056e15]],
[[1.98056981e14, -1.26883024e15], [3.77157017e14, -3.87460328e15]],
[[4.29955888e14, -1.25022995e15], [1.02340118e15, -3.79508679e15]],
[[6.38981344e14, -1.25084614e15], [1.68918514e15, -3.78961044e15]],
[[7.09988897e14, -1.21906634e15], [1.91914171e15, -3.73670664e15]],
[[7.63169115e14, -1.20797552e15], [2.03772603e15, -3.74653863e15]],
]
self.create_experiment(iq_data)
iq_svd = SVD()
iq_svd.train([datum["memory"] for datum in self.iq_experiment.data()])
self.assertTrue(np.allclose(iq_svd._main_axes[0], np.array([-0.99633018, -0.08559302])))
self.assertTrue(np.allclose(iq_svd._main_axes[1], np.array([-0.99627747, -0.0862044])))
def test_svd_error(self):
"""Test the error formula of the SVD."""
iq_svd = SVD()
iq_svd._main_axes = np.array([[1.0, 0.0]])
iq_svd._scales = [1.0]
iq_svd._means = [[0.0, 0.0]]
# Since the axis is along the real part the imaginary error is irrelevant.
processed, error = iq_svd([[[1.0, 0.2]]], [[[0.2, 0.1]]])
self.assertEqual(processed, np.array([1.0]))
self.assertEqual(error, np.array([0.2]))
# Since the axis is along the real part the imaginary error is irrelevant.
processed, error = iq_svd([[[1.0, 0.2]]], [[[0.2, 0.3]]])
self.assertEqual(processed, np.array([1.0]))
self.assertEqual(error, np.array([0.2]))
# Tilt the axis to an angle of 36.9... degrees
iq_svd._main_axes = np.array([[0.8, 0.6]])
processed, error = iq_svd([[[1.0, 0.0]]], [[[0.2, 0.3]]])
cos_ = np.cos(np.arctan(0.6 / 0.8))
sin_ = np.sin(np.arctan(0.6 / 0.8))
self.assertEqual(processed, np.array([cos_]))
expected_error = np.sqrt((0.2 * cos_) ** 2 + (0.3 * sin_) ** 2)
self.assertEqual(error, np.array([expected_error]))
def test_train_svd_processor(self):
"""Test that we can train a DataProcessor with an SVD."""
processor = DataProcessor("memory", [SVD()])
self.assertFalse(processor.is_trained)
iq_data = [[[0.0, 0.0], [0.0, 0.0]], [[1.0, 1.0], [-1.0, 1.0]], [[-1.0, -1.0], [1.0, -1.0]]]
self.create_experiment(iq_data)
processor.train(self.iq_experiment.data())
self.assertTrue(processor.is_trained)
# Check that we can use the SVD
iq_data = [[[2, 2], [2, -2]]]
self.create_experiment(iq_data)
processed, _ = processor(self.iq_experiment.data(0))
expected = np.array([-2, -2]) / np.sqrt(2)
self.assertTrue(np.allclose(processed, expected))
class TestProbability(QiskitTestCase):
"""Test probability computation."""
def test_variance_not_zero(self):
"""Test if finite variance is computed at max or min probability."""
node = Probability(outcome="1")
data = {"1": 1024, "0": 0}
mode, stderr = node(data)
self.assertGreater(stderr, 0.0)
self.assertLessEqual(mode, 1.0)
data = {"1": 0, "0": 1024}
mode, stderr = node(data)
self.assertGreater(stderr, 0.0)
self.assertGreaterEqual(mode, 0.0)
def test_probability_balanced(self):
"""Test if p=0.5 is returned when counts are balanced and prior is flat."""
node = Probability(outcome="1")
# balanced counts with a flat prior will yield p = 0.5
data = {"1": 512, "0": 512}
mode, _ = node(data)
self.assertAlmostEqual(mode, 0.5)
| 38.64135
| 100
| 0.604826
|
import numpy as np
from qiskit.test import QiskitTestCase
from qiskit_experiments.data_processing.nodes import (
SVD,
AverageData,
MinMaxNormalize,
Probability,
)
from qiskit_experiments.data_processing.data_processor import DataProcessor
from . import BaseDataProcessorTest
class TestAveraging(BaseDataProcessorTest):
def test_simple(self):
datum = np.array([[1, 2], [3, 4], [5, 6]])
node = AverageData(axis=1)
self.assertTrue(np.allclose(node(datum)[0], np.array([1.5, 3.5, 5.5])))
self.assertTrue(np.allclose(node(datum)[1], np.array([0.5, 0.5, 0.5]) / np.sqrt(2)))
node = AverageData(axis=0)
self.assertTrue(np.allclose(node(datum)[0], np.array([3.0, 4.0])))
std = np.std([1, 3, 5])
self.assertTrue(np.allclose(node(datum)[1], np.array([std, std]) / np.sqrt(3)))
def test_iq_averaging(self):
iq_data = [
[[-6.20601501e14, -1.33257051e15], [-1.70921324e15, -4.05881657e15]],
[[-5.80546502e14, -1.33492509e15], [-1.65094637e15, -4.05926942e15]],
[[-4.04649069e14, -1.33191056e15], [-1.29680377e15, -4.03604815e15]],
[[-2.22203874e14, -1.30291309e15], [-8.57663429e14, -3.97784973e15]],
[[-2.92074029e13, -1.28578530e15], [-9.78824053e13, -3.92071056e15]],
[[1.98056981e14, -1.26883024e15], [3.77157017e14, -3.87460328e15]],
[[4.29955888e14, -1.25022995e15], [1.02340118e15, -3.79508679e15]],
[[6.38981344e14, -1.25084614e15], [1.68918514e15, -3.78961044e15]],
[[7.09988897e14, -1.21906634e15], [1.91914171e15, -3.73670664e15]],
[[7.63169115e14, -1.20797552e15], [2.03772603e15, -3.74653863e15]],
]
self.create_experiment(iq_data, single_shot=True)
avg_iq = AverageData(axis=0)
avg_datum, error = avg_iq(self.iq_experiment.data(0)["memory"])
expected_avg = np.array([[8.82943876e13, -1.27850527e15], [1.43410186e14, -3.89952402e15]])
expected_std = np.array(
[[5.07650185e14, 4.44664719e13], [1.40522641e15, 1.22326831e14]]
) / np.sqrt(10)
self.assertTrue(np.allclose(avg_datum, expected_avg))
self.assertTrue(np.allclose(error, expected_std))
class TestNormalize(QiskitTestCase):
def test_simple(self):
data = np.array([1.0, 2.0, 3.0, 3.0])
error = np.array([0.1, 0.2, 0.3, 0.3])
expected_data = np.array([0.0, 0.5, 1.0, 1.0])
expected_error = np.array([0.05, 0.1, 0.15, 0.15])
node = MinMaxNormalize()
self.assertTrue(np.allclose(node(data)[0], expected_data))
self.assertTrue(np.allclose(node(data, error)[0], expected_data))
self.assertTrue(np.allclose(node(data, error)[1], expected_error))
class TestSVD(BaseDataProcessorTest):
def test_simple_data(self):
iq_data = [[[0.0, 0.0], [0.0, 0.0]], [[1.0, 1.0], [-1.0, 1.0]], [[-1.0, -1.0], [1.0, -1.0]]]
self.create_experiment(iq_data)
iq_svd = SVD()
iq_svd.train([datum["memory"] for datum in self.iq_experiment.data()])
self.assertTrue(np.allclose(iq_svd._main_axes[0], np.array([-1, -1]) / np.sqrt(2)))
self.assertTrue(np.allclose(iq_svd._main_axes[1], np.array([-1, 1]) / np.sqrt(2)))
processed, _ = iq_svd(np.array([[[1, 1], [1, -1]]]))
expected = np.array([-1, -1]) / np.sqrt(2)
self.assertTrue(np.allclose(processed, expected))
processed, _ = iq_svd(np.array([[[2, 2], [2, -2]]]))
self.assertTrue(np.allclose(processed, expected * 2))
processed, _ = iq_svd(np.array([[[1, -1], [1, 1]]]))
expected = np.array([0, 0])
self.assertTrue(np.allclose(processed, expected))
def test_svd(self):
iq_data = [
[[-6.20601501e14, -1.33257051e15], [-1.70921324e15, -4.05881657e15]],
[[-5.80546502e14, -1.33492509e15], [-1.65094637e15, -4.05926942e15]],
[[-4.04649069e14, -1.33191056e15], [-1.29680377e15, -4.03604815e15]],
[[-2.22203874e14, -1.30291309e15], [-8.57663429e14, -3.97784973e15]],
[[-2.92074029e13, -1.28578530e15], [-9.78824053e13, -3.92071056e15]],
[[1.98056981e14, -1.26883024e15], [3.77157017e14, -3.87460328e15]],
[[4.29955888e14, -1.25022995e15], [1.02340118e15, -3.79508679e15]],
[[6.38981344e14, -1.25084614e15], [1.68918514e15, -3.78961044e15]],
[[7.09988897e14, -1.21906634e15], [1.91914171e15, -3.73670664e15]],
[[7.63169115e14, -1.20797552e15], [2.03772603e15, -3.74653863e15]],
]
self.create_experiment(iq_data)
iq_svd = SVD()
iq_svd.train([datum["memory"] for datum in self.iq_experiment.data()])
self.assertTrue(np.allclose(iq_svd._main_axes[0], np.array([-0.99633018, -0.08559302])))
self.assertTrue(np.allclose(iq_svd._main_axes[1], np.array([-0.99627747, -0.0862044])))
def test_svd_error(self):
iq_svd = SVD()
iq_svd._main_axes = np.array([[1.0, 0.0]])
iq_svd._scales = [1.0]
iq_svd._means = [[0.0, 0.0]]
processed, error = iq_svd([[[1.0, 0.2]]], [[[0.2, 0.1]]])
self.assertEqual(processed, np.array([1.0]))
self.assertEqual(error, np.array([0.2]))
processed, error = iq_svd([[[1.0, 0.2]]], [[[0.2, 0.3]]])
self.assertEqual(processed, np.array([1.0]))
self.assertEqual(error, np.array([0.2]))
iq_svd._main_axes = np.array([[0.8, 0.6]])
processed, error = iq_svd([[[1.0, 0.0]]], [[[0.2, 0.3]]])
cos_ = np.cos(np.arctan(0.6 / 0.8))
sin_ = np.sin(np.arctan(0.6 / 0.8))
self.assertEqual(processed, np.array([cos_]))
expected_error = np.sqrt((0.2 * cos_) ** 2 + (0.3 * sin_) ** 2)
self.assertEqual(error, np.array([expected_error]))
def test_train_svd_processor(self):
processor = DataProcessor("memory", [SVD()])
self.assertFalse(processor.is_trained)
iq_data = [[[0.0, 0.0], [0.0, 0.0]], [[1.0, 1.0], [-1.0, 1.0]], [[-1.0, -1.0], [1.0, -1.0]]]
self.create_experiment(iq_data)
processor.train(self.iq_experiment.data())
self.assertTrue(processor.is_trained)
iq_data = [[[2, 2], [2, -2]]]
self.create_experiment(iq_data)
processed, _ = processor(self.iq_experiment.data(0))
expected = np.array([-2, -2]) / np.sqrt(2)
self.assertTrue(np.allclose(processed, expected))
class TestProbability(QiskitTestCase):
def test_variance_not_zero(self):
node = Probability(outcome="1")
data = {"1": 1024, "0": 0}
mode, stderr = node(data)
self.assertGreater(stderr, 0.0)
self.assertLessEqual(mode, 1.0)
data = {"1": 0, "0": 1024}
mode, stderr = node(data)
self.assertGreater(stderr, 0.0)
self.assertGreaterEqual(mode, 0.0)
def test_probability_balanced(self):
node = Probability(outcome="1")
data = {"1": 512, "0": 512}
mode, _ = node(data)
self.assertAlmostEqual(mode, 0.5)
| true
| true
|
1c43934f21526c9f0103a9f4b781ad01d3e6c720
| 5,364
|
py
|
Python
|
AVL_tree_PROG.py
|
arafayr/AVL_tree_PROG
|
9528aa61fab3e10d6b12579cf438811c93e99ace
|
[
"MIT"
] | null | null | null |
AVL_tree_PROG.py
|
arafayr/AVL_tree_PROG
|
9528aa61fab3e10d6b12579cf438811c93e99ace
|
[
"MIT"
] | null | null | null |
AVL_tree_PROG.py
|
arafayr/AVL_tree_PROG
|
9528aa61fab3e10d6b12579cf438811c93e99ace
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# coding: utf-8
# In[ ]:
class Node:
def __init__(self,data,parent):
self.data = data
self.rightchild = None
self.leftchild = None
self.parent = parent
self.height = 0
class AVLTree:
def __init__(self):
self.root = None
def insert(self,data):
if self.root is None:
self.root = Node(data,None)
else:
self.insert_node(data,self.root)
def insert_node(self,data,node):
if data < node.data:
if node.leftchild is not None:
self.insert_node(data,node.leftchild)
else:
node.leftchild = Node(data,node)
node.height = max(self.calheight(node.leftchild),self.calheight(node.rightchild))+1
else:
if node.rightchild is not None:
self.insert_node(data,node.rightchild)
else:
node.rightchild = Node(data,node.rightchild)
node.height = max(self.calheight(node.leftchild),self.calheight(node.rightchild))+1
self.violation_helper(node)
def remove(self,data):
if self.root:
self.remove_node(data,self.root)
def remove_node(self,data,node):
if node is None:
return
if data < node.data:
self.remove_node(data,node.leftchild)
elif data > node.data:
self.remove_node(data,node.rightchild)
else:
if node.leftchild is None and node.rightchild is None:
parent = node.parent
if parent and parent.rightchild == node:
parent.rightchild = None
if parent and parent.leftchild == node:
parnet.leftchild = None
if parent is None:
self.root = None
del node
self.violation_helper(node)
elif node.leftchild is None and node.rightchild is not None:
parent = node.parent
if parent:
if parent.rightchild == node:
parent.rightchild = node.rightchild
if parent.leftchild == node:
parent.leftchild = node.rightchild
else:
self.root = node.rightchild
node.rightchild.parent = parent
del node
self.violation_helper(node)
elif node.rightchild is None and node.leftchild is not None:
parent = node.parent
if parent:
if parent.rightchild == node:
parent.rightchild = node.leftchild
if parent.leftchild == node:
parent.leftchild = node.leftchild
else:
self.root = node.leftchild
node.leftchild.parent = parent
del node
self.violation_helper(node)
else:
predecessor = self.getpredecessor(node.leftchild)
temp = predecessor.data
predecessor.data = node.data
node.data = temp
self.remove_node(data,predecessor)
def getpredecessor(self,node):
if node.rightchild:
return self.getpredecessor(node.rightchild)
return node
def traverse(self):
if self.root is not None:
self.traverse_lrR(self.root)
def traverse_lrR(self,node):
if node.leftchild is not None:
self.traverse_lrR(node.leftchild)
print(node.data)
if node.rightchild is not None:
self.traverse_lrR(node.rightchild)
def violation_helper(self,node):
ballance = self.calculate_balance(node)
#we know the tree is left heavy
if ballance > 1:
# left right heavy situation left rotation on parent
#left - right
if self.calculate_balance(node.leftchild) < 0:
self.rotate_left(node.leftchild)
#right rotation on grandparent
self.rotate_right(node)
if ballance < -1:
#right left heavy
if self.calculate_balance(node.rightchild) > 0:
self.rotate_right(node.rightchild)
#left rotation
self.rotate_left(node.rightchild)
def calheight(self,node):
if node is None:
return -1
else:
return node.height
def calculate_balance(self,node):
if node is None:
return 0
return self.calculate_balance(node.leftchild) - self.calculate_balance(node.rightchild)
obj = AVLTree()
obj.insert(1)
obj.insert(5)
obj.insert(0)
obj.insert(3)
obj.insert(6)
obj.insert(2)
obj.insert(7)
obj.insert(4)
obj.traverse()
| 29.635359
| 99
| 0.496644
|
class Node:
def __init__(self,data,parent):
self.data = data
self.rightchild = None
self.leftchild = None
self.parent = parent
self.height = 0
class AVLTree:
def __init__(self):
self.root = None
def insert(self,data):
if self.root is None:
self.root = Node(data,None)
else:
self.insert_node(data,self.root)
def insert_node(self,data,node):
if data < node.data:
if node.leftchild is not None:
self.insert_node(data,node.leftchild)
else:
node.leftchild = Node(data,node)
node.height = max(self.calheight(node.leftchild),self.calheight(node.rightchild))+1
else:
if node.rightchild is not None:
self.insert_node(data,node.rightchild)
else:
node.rightchild = Node(data,node.rightchild)
node.height = max(self.calheight(node.leftchild),self.calheight(node.rightchild))+1
self.violation_helper(node)
def remove(self,data):
if self.root:
self.remove_node(data,self.root)
def remove_node(self,data,node):
if node is None:
return
if data < node.data:
self.remove_node(data,node.leftchild)
elif data > node.data:
self.remove_node(data,node.rightchild)
else:
if node.leftchild is None and node.rightchild is None:
parent = node.parent
if parent and parent.rightchild == node:
parent.rightchild = None
if parent and parent.leftchild == node:
parnet.leftchild = None
if parent is None:
self.root = None
del node
self.violation_helper(node)
elif node.leftchild is None and node.rightchild is not None:
parent = node.parent
if parent:
if parent.rightchild == node:
parent.rightchild = node.rightchild
if parent.leftchild == node:
parent.leftchild = node.rightchild
else:
self.root = node.rightchild
node.rightchild.parent = parent
del node
self.violation_helper(node)
elif node.rightchild is None and node.leftchild is not None:
parent = node.parent
if parent:
if parent.rightchild == node:
parent.rightchild = node.leftchild
if parent.leftchild == node:
parent.leftchild = node.leftchild
else:
self.root = node.leftchild
node.leftchild.parent = parent
del node
self.violation_helper(node)
else:
predecessor = self.getpredecessor(node.leftchild)
temp = predecessor.data
predecessor.data = node.data
node.data = temp
self.remove_node(data,predecessor)
def getpredecessor(self,node):
if node.rightchild:
return self.getpredecessor(node.rightchild)
return node
def traverse(self):
if self.root is not None:
self.traverse_lrR(self.root)
def traverse_lrR(self,node):
if node.leftchild is not None:
self.traverse_lrR(node.leftchild)
print(node.data)
if node.rightchild is not None:
self.traverse_lrR(node.rightchild)
def violation_helper(self,node):
ballance = self.calculate_balance(node)
if ballance > 1:
if self.calculate_balance(node.leftchild) < 0:
self.rotate_left(node.leftchild)
self.rotate_right(node)
if ballance < -1:
if self.calculate_balance(node.rightchild) > 0:
self.rotate_right(node.rightchild)
self.rotate_left(node.rightchild)
def calheight(self,node):
if node is None:
return -1
else:
return node.height
def calculate_balance(self,node):
if node is None:
return 0
return self.calculate_balance(node.leftchild) - self.calculate_balance(node.rightchild)
obj = AVLTree()
obj.insert(1)
obj.insert(5)
obj.insert(0)
obj.insert(3)
obj.insert(6)
obj.insert(2)
obj.insert(7)
obj.insert(4)
obj.traverse()
| true
| true
|
1c4394dcae4611c54410c55300ddc06a3894fabe
| 16,003
|
py
|
Python
|
tests/scanner/test_win32_scanner.py
|
zea2/DeviceManager
|
5f224c7680692cd9329b5b1421b00a814e3c2bfe
|
[
"MIT"
] | null | null | null |
tests/scanner/test_win32_scanner.py
|
zea2/DeviceManager
|
5f224c7680692cd9329b5b1421b00a814e3c2bfe
|
[
"MIT"
] | 15
|
2020-03-06T13:53:56.000Z
|
2020-04-01T09:14:34.000Z
|
tests/scanner/test_win32_scanner.py
|
zea2/DeviceManager
|
5f224c7680692cd9329b5b1421b00a814e3c2bfe
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Script for testing the module device_manager.scanner._win32.
This script tests the following entities:
- class Win32USBDeviceScanner
- class Win32LANDeviceScanner
Authors:
Lukas Lankes, Forschungszentrum Jülich GmbH - ZEA-2, l.lankes@fz-juelich.de
"""
import os
import subprocess
import sys
import unittest
import unittest.mock
from device_manager.device import USBDevice, LANDevice
from device_manager.scanner import USBDeviceScanner, LANDeviceScanner
@unittest.skipUnless(sys.platform == "win32", "Requires Windows")
class TestWin32ScannerImport(unittest.TestCase):
def test_import(self):
from device_manager.scanner._win32 import Win32USBDeviceScanner, Win32LANDeviceScanner
self.assertIs(USBDeviceScanner, Win32USBDeviceScanner)
self.assertIs(LANDeviceScanner, Win32LANDeviceScanner)
def test_import_error(self):
with self.assertRaises(ImportError, msg="Importing linux-specific device scanners should "
"fail on windows"):
import device_manager.scanner._linux
@unittest.skipUnless(sys.platform == "win32", "Requires Windows")
class TestWin32USBDeviceScanner(unittest.TestCase):
class MockWin32Entity:
def __init__(self, class_name, pnp_class, vendor_id, product_id, revision_id, instance_id):
device_id = "{pnp}\\VID_{vid:04X}&PID_{pid:04X}\\{inst}".format(
pnp=pnp_class, vid=vendor_id, pid=product_id, inst=instance_id)
hardware_ids = ["{pnp}\\VID_{vid:04X}&PID_{pid:04X}".format(
pnp=pnp_class, vid=vendor_id, pid=product_id)]
if revision_id is not None:
hardware_ids.append("{pnp}\\VID_{vid:04X}&PID_{pid:04X}&REV_{rev:04X}".format(
pnp=pnp_class, vid=vendor_id, pid=product_id, rev=revision_id))
compatible_ids = ["{pnp}\\{pnp}00_HUB".format(pnp=pnp_class)]
self.CreationClassName = class_name
self.PNPClass = pnp_class
self.DeviceID = device_id
self.HardwareID = hardware_ids
self.CompatibleID = compatible_ids
self.device = USBDevice()
self.device.address = device_id
self.device.address_aliases = [*hardware_ids, *compatible_ids]
self.device.vendor_id = vendor_id
self.device.product_id = product_id
self.device.revision_id = revision_id
instance_id = instance_id.split("&")[0]
if len(instance_id) > 1:
self.device.serial = instance_id
class MockWMIConnectServer:
def ExecQuery(self, *args, **kwargs):
raise NotImplementedError()
def setUp(self) -> None:
self.scanner = USBDeviceScanner()
self.valid_devices = [
self.MockWin32Entity("Win32_PnPEntity", "USB", 0x12AB, 0x0123, 0x0100, "01234ABCDEF"),
self.MockWin32Entity("Win32_PnPEntity", "USB", 0x5A67, 0xAB98, None, "012345678AB"),
self.MockWin32Entity("Win32_PnPEntity", "USB", 0x1234, 0x9871, 0x0010, "123&125&123"),
self.MockWin32Entity("Win32_PnPEntity", "USB", 0, 0, 0, "")
]
self.invalid_devices = [
self.MockWin32Entity("Win32_PnPEntity", "PCI", 0x12AB, 0x0123, 0x0100, "INVALID1"),
self.MockWin32Entity("Win32_PnPEntity", "usb", 0x1234, 0xFEDC, 0x0000, "INVALID2"),
self.MockWin32Entity("invalid", "USB", 0x12AB, 0x0123, 0x0100, "INVALID3")
]
tmp_dev = self.MockWin32Entity("Win32_PnPEntity", "USB", 0, 0, 0, "INVALID4")
del tmp_dev.DeviceID # Missing device id
self.invalid_devices.append(tmp_dev)
tmp_dev = self.MockWin32Entity("Win32_PnPEntity", "USB", 0, 0, 0, "INVALID5")
tmp_dev.DeviceID = "USB" # Device id too short
tmp_dev.CompatibleID = []
tmp_dev.HardwareID = []
self.invalid_devices.append(tmp_dev)
tmp_dev = self.MockWin32Entity("Win32_PnPEntity", "USB", 0, 0, 0, "INVALID5")
tmp_dev.DeviceID = "USB\\VID_0000&PID_0000_0000" # Invalid device id format
tmp_dev.CompatibleID = []
tmp_dev.HardwareID = []
self.invalid_devices.append(tmp_dev)
tmp_dev = self.MockWin32Entity("Win32_PnPEntity", "USB", 0, 0, 0, "INVALID6")
tmp_dev.CompatibleID = ["USBXYZ\\abcdef1234"] # Invalid PNP class in device id
self.invalid_devices.append(tmp_dev)
tmp_dev = self.MockWin32Entity("Win32_PnPEntity", "USB", 0, 0, 0, "INVALID6")
tmp_dev.CompatibleID = ["USB\\0123456\\INVALIDXXXX"] # Not matching id
self.invalid_devices.append(tmp_dev)
tmp_dev = self.MockWin32Entity("Win32_PnPEntity", "PCI", 0, 0, 0, "INVALID7")
tmp_dev.PNPClass = "USB"
self.invalid_devices.append(tmp_dev)
tmp_dev = self.MockWin32Entity("Win32_PnPEntity", "USB", 0, 0, 0, "INVALID8")
tmp_dev.HardwareID.append(self.valid_devices[0].DeviceID) # Not matching device id
self.invalid_devices.append(tmp_dev)
tmp_dev = self.MockWin32Entity("Win32_PnPEntity", "USB", 0, 0, 0, "INVALID9")
tmp_dev.HardwareID.append(self.valid_devices[0].HardwareID[0]) # Not matching hardware id
self.invalid_devices.append(tmp_dev)
self.all_devices = [*self.valid_devices, *self.invalid_devices]
self.wmi_mock = unittest.mock.MagicMock(return_value=self.all_devices)
self.wmi_mock_expected_argument = "SELECT * FROM Win32_PnPEntity"
self.scanner._wbem = self.MockWMIConnectServer() # COMObjects cannot be mocked
self.scanner._wbem.ExecQuery = self.wmi_mock
def test_scan(self):
self.wmi_mock.reset_mock()
devices = self.scanner.list_devices(rescan=True)
self.wmi_mock.assert_called_once_with(self.wmi_mock_expected_argument)
for dev in self.valid_devices:
self.assertIn(dev.device, devices)
for dev in self.invalid_devices:
self.assertNotIn(dev.device, devices)
for search_device in self.valid_devices:
found_devices = self.scanner.find_devices(**search_device.device.unique_identifier)
self.assertSequenceEqual((search_device.device,), found_devices,
msg="The device which was searched by its unique identifiers "
"was not found")
found_devices = self.scanner.find_devices(address=search_device.device.address)
self.assertSequenceEqual((search_device.device,), found_devices,
msg="The device which was searched by address was not found")
found_devices = self.scanner.find_devices(invalid_param="test")
self.assertSequenceEqual(tuple(), found_devices,
msg="Searching for an invalid parameter should not return any "
"results")
def test_scan_errors(self):
devices = self.scanner.list_devices()
self.wmi_mock.reset_mock()
rescan_devices = self.scanner.list_devices()
self.wmi_mock.assert_not_called()
self.assertSequenceEqual(devices, rescan_devices,
msg="The second scan with Win32USBDeviceScanner should return the "
"same values without forcing a rescan")
self.wmi_mock.reset_mock()
old_return_value = self.wmi_mock.return_value
try:
self.wmi_mock.return_value = [self.valid_devices[0]]
rescan_devices = self.scanner.list_devices(rescan=True)
finally:
# Reset return value
self.wmi_mock.return_value = old_return_value
self.wmi_mock.assert_called_once_with(self.wmi_mock_expected_argument)
self.assertSequenceEqual((self.valid_devices[0].device,), rescan_devices,
msg="A forced rescan with Win32USBDeviceScanner should return "
"other values than before")
@unittest.skipUnless(sys.platform == "win32", "Requires Windows")
class TestWin32LANDeviceScanner(unittest.TestCase):
class MockPopen:
def communicate(self, input=None):
raise NotImplementedError()
@property
def returncode(self):
raise NotImplementedError()
@staticmethod
def make_lan_device(address, mac_address, aliases=None):
device = LANDevice()
device.address = address
device.address_aliases = aliases
device.mac_address = mac_address
return device
def setUp(self) -> None:
self.scanner = LANDeviceScanner()
self.arp_output = os.linesep.encode().join([
b"Interface: 192.168.1.98 --- 0x10",
b" Internet Address Physical Address Type",
b" 192.168.1.1 01-f0-60-b4-da-10 dynamic",
b" 192.168.1.3\td3-1e-52-7f-2d-81 dynamic", # Tabs are also accepted
b" 192.168.1.7 03-83-65-cf-c4-cd", # Last column(s) are ignored
b" 192.168.1.14 f0:1a:bd:f0:2f:a2 dynamic", # Colons are allowed in MAC
b" 192.168.1.15 50.9a.71.5d.24.e2 dynamic", # Dots are allowed in MAC
b" 192.168.1.23 01-41-56-0C-EB-01 dynamic",
b" 192.168.1.165 f5-6D:03-5e.45-1F dynamic",
b" 192.168.1.255 ff-ff-ff-ff-ff-ff static",
b" 224.0.0.22 01-00-5e-00-00-16 static",
b"",
b"Interface: 192.168.20.27 --- 0x17",
b" Internet Address Physical Address Type",
b"192.168.20.143 01-f0-61-b4-d9-10 dynamic", # Leading spaces not required
b" 192.168.20.251 d3-1a-52-7f-2f-82 dynamic",
b" 192.168.20.255 ff-ff-ff-ff-ff-ff static", # Broadcast (duplicate)
b" 224.0.0.22 01-00-5e-00-00-16 static", # Multicast (duplicate)
b"",
# Invalid lines ...
b"Interface: 192.168.24.75 --- 0x19",
b" 192.168.24.10 dynamic", # Invalid: no MAC
b" 192.168.24.11", # Invalid: no MAC
b" 192.168.24 01-f0-61-b4-d9-10 dynamic", # Invalid IP
b" 192.168.24.251 d3-1G-53-7f-2f-82 dynamic", # Invalid MAC
b" 192.168.24.252 abc 00-1a-53-7f-2f-82 dynamic", # Invalid line format
b""
])
self.expected_result = [
self.make_lan_device("192.168.1.1", "01-f0-60-b4-da-10"),
self.make_lan_device("192.168.1.3", "d3-1e-52-7f-2d-81"),
self.make_lan_device("192.168.1.7", "03-83-65-cf-c4-cd"),
self.make_lan_device("192.168.1.14", "f0:1a:bd:f0:2f:a2"),
self.make_lan_device("192.168.1.15", "50.9a.71.5d.24.e2"),
self.make_lan_device("192.168.1.23", "01-41-56-0C-EB-01"),
self.make_lan_device("192.168.1.165", "f5-6D:03-5e.45-1F"),
self.make_lan_device("192.168.1.255", "ff-ff-ff-ff-ff-ff",
aliases=["192.168.20.255"]), # Same mac-address -> same LANDevice
self.make_lan_device("224.0.0.22", "01-00-5e-00-00-16"),
self.make_lan_device("192.168.20.143", "01-f0-61-b4-d9-10"),
self.make_lan_device("192.168.20.251", "d3-1a-52-7f-2f-82")
]
self.popen_mock = self.MockPopen()
self.popen_communicate_mock = unittest.mock.MagicMock(return_value=(self.arp_output, b""))
self.popen_mock.communicate = self.popen_communicate_mock
self.popen_returncode_mock = unittest.mock.PropertyMock(return_value=0)
type(self.popen_mock).returncode = self.popen_returncode_mock
self.popen_init_mock = unittest.mock.MagicMock(return_value=self.popen_mock)
setattr(subprocess, "Popen", self.popen_init_mock)
self.popen_init_args = (["arp", "-a"],)
self.popen_init_kwargs = {"bufsize": 100000,
"stdin": subprocess.PIPE,
"stdout": subprocess.PIPE,
"stderr": subprocess.PIPE}
def test_scan(self):
self.popen_init_mock.reset_mock()
self.popen_returncode_mock.reset_mock()
self.popen_communicate_mock.reset_mock()
# Test list devices
devices = self.scanner.list_devices()
self.popen_init_mock.assert_called_once_with(*self.popen_init_args,
**self.popen_init_kwargs)
self.popen_returncode_mock.assert_called_once_with()
self.popen_communicate_mock.assert_called_once_with()
self.assertEqual(tuple(self.expected_result), devices,
msg="LANDeviceScanner.list_devices did not return the expected values")
self.popen_init_mock.reset_mock()
self.popen_returncode_mock.reset_mock()
self.popen_communicate_mock.reset_mock()
# Scanning after a scan that already got results should do nothing
devices = self.scanner.list_devices()
self.popen_init_mock.assert_not_called()
self.popen_returncode_mock.assert_not_called()
self.popen_communicate_mock.assert_not_called()
# Test find device
test_device = self.expected_result[0]
devices = self.scanner.find_devices(mac_address=test_device.mac_address)
self.assertSequenceEqual((test_device,), devices,
msg="LANDeviceScanner.find_devices did not return the expected "
"result")
def test_scan_errors(self):
self.popen_init_mock.reset_mock()
self.popen_returncode_mock.reset_mock()
self.popen_communicate_mock.reset_mock()
# ARP command returns 1
old_return_value = self.popen_returncode_mock.return_value
self.popen_returncode_mock.return_value = 1
devices = self.scanner.list_devices(rescan=True)
self.popen_returncode_mock.return_value = old_return_value
self.popen_init_mock.assert_called_once_with(*self.popen_init_args,
**self.popen_init_kwargs)
self.popen_returncode_mock.assert_called_once_with()
self.assertSequenceEqual(tuple(), devices,
msg="When \"arp\" fails, the scanner should not find any devices")
self.popen_init_mock.reset_mock()
self.popen_returncode_mock.reset_mock()
self.popen_communicate_mock.reset_mock()
# ARP command prints to stderr
old_return_value = self.popen_communicate_mock.return_value
self.popen_communicate_mock.return_value = (self.arp_output, b"Some error occurred!")
devices = self.scanner.list_devices(rescan=True)
self.popen_communicate_mock.return_value = old_return_value
self.popen_init_mock.assert_called_once_with(*self.popen_init_args,
**self.popen_init_kwargs)
self.popen_returncode_mock.assert_called_once_with()
self.assertSequenceEqual(tuple(), devices,
msg="When \"arp\" prints to stderr, the scanner should not find "
"any devices")
# ARP command not found
old_side_effect = self.popen_init_mock.side_effect
self.popen_init_mock.side_effect = FileNotFoundError("Command \"arp\" not found")
with self.assertRaises(FileNotFoundError,
msg="If the arp-command could not be found, searching for devices "
"should raise an exception"):
devices = self.scanner.list_devices(rescan=True)
self.popen_init_mock.side_effect = old_side_effect
| 49.544892
| 100
| 0.620509
|
import os
import subprocess
import sys
import unittest
import unittest.mock
from device_manager.device import USBDevice, LANDevice
from device_manager.scanner import USBDeviceScanner, LANDeviceScanner
@unittest.skipUnless(sys.platform == "win32", "Requires Windows")
class TestWin32ScannerImport(unittest.TestCase):
def test_import(self):
from device_manager.scanner._win32 import Win32USBDeviceScanner, Win32LANDeviceScanner
self.assertIs(USBDeviceScanner, Win32USBDeviceScanner)
self.assertIs(LANDeviceScanner, Win32LANDeviceScanner)
def test_import_error(self):
with self.assertRaises(ImportError, msg="Importing linux-specific device scanners should "
"fail on windows"):
import device_manager.scanner._linux
@unittest.skipUnless(sys.platform == "win32", "Requires Windows")
class TestWin32USBDeviceScanner(unittest.TestCase):
class MockWin32Entity:
def __init__(self, class_name, pnp_class, vendor_id, product_id, revision_id, instance_id):
device_id = "{pnp}\\VID_{vid:04X}&PID_{pid:04X}\\{inst}".format(
pnp=pnp_class, vid=vendor_id, pid=product_id, inst=instance_id)
hardware_ids = ["{pnp}\\VID_{vid:04X}&PID_{pid:04X}".format(
pnp=pnp_class, vid=vendor_id, pid=product_id)]
if revision_id is not None:
hardware_ids.append("{pnp}\\VID_{vid:04X}&PID_{pid:04X}&REV_{rev:04X}".format(
pnp=pnp_class, vid=vendor_id, pid=product_id, rev=revision_id))
compatible_ids = ["{pnp}\\{pnp}00_HUB".format(pnp=pnp_class)]
self.CreationClassName = class_name
self.PNPClass = pnp_class
self.DeviceID = device_id
self.HardwareID = hardware_ids
self.CompatibleID = compatible_ids
self.device = USBDevice()
self.device.address = device_id
self.device.address_aliases = [*hardware_ids, *compatible_ids]
self.device.vendor_id = vendor_id
self.device.product_id = product_id
self.device.revision_id = revision_id
instance_id = instance_id.split("&")[0]
if len(instance_id) > 1:
self.device.serial = instance_id
class MockWMIConnectServer:
def ExecQuery(self, *args, **kwargs):
raise NotImplementedError()
def setUp(self) -> None:
self.scanner = USBDeviceScanner()
self.valid_devices = [
self.MockWin32Entity("Win32_PnPEntity", "USB", 0x12AB, 0x0123, 0x0100, "01234ABCDEF"),
self.MockWin32Entity("Win32_PnPEntity", "USB", 0x5A67, 0xAB98, None, "012345678AB"),
self.MockWin32Entity("Win32_PnPEntity", "USB", 0x1234, 0x9871, 0x0010, "123&125&123"),
self.MockWin32Entity("Win32_PnPEntity", "USB", 0, 0, 0, "")
]
self.invalid_devices = [
self.MockWin32Entity("Win32_PnPEntity", "PCI", 0x12AB, 0x0123, 0x0100, "INVALID1"),
self.MockWin32Entity("Win32_PnPEntity", "usb", 0x1234, 0xFEDC, 0x0000, "INVALID2"),
self.MockWin32Entity("invalid", "USB", 0x12AB, 0x0123, 0x0100, "INVALID3")
]
tmp_dev = self.MockWin32Entity("Win32_PnPEntity", "USB", 0, 0, 0, "INVALID4")
del tmp_dev.DeviceID
self.invalid_devices.append(tmp_dev)
tmp_dev = self.MockWin32Entity("Win32_PnPEntity", "USB", 0, 0, 0, "INVALID5")
tmp_dev.DeviceID = "USB"
tmp_dev.CompatibleID = []
tmp_dev.HardwareID = []
self.invalid_devices.append(tmp_dev)
tmp_dev = self.MockWin32Entity("Win32_PnPEntity", "USB", 0, 0, 0, "INVALID5")
tmp_dev.DeviceID = "USB\\VID_0000&PID_0000_0000"
tmp_dev.CompatibleID = []
tmp_dev.HardwareID = []
self.invalid_devices.append(tmp_dev)
tmp_dev = self.MockWin32Entity("Win32_PnPEntity", "USB", 0, 0, 0, "INVALID6")
tmp_dev.CompatibleID = ["USBXYZ\\abcdef1234"]
self.invalid_devices.append(tmp_dev)
tmp_dev = self.MockWin32Entity("Win32_PnPEntity", "USB", 0, 0, 0, "INVALID6")
tmp_dev.CompatibleID = ["USB\\0123456\\INVALIDXXXX"]
self.invalid_devices.append(tmp_dev)
tmp_dev = self.MockWin32Entity("Win32_PnPEntity", "PCI", 0, 0, 0, "INVALID7")
tmp_dev.PNPClass = "USB"
self.invalid_devices.append(tmp_dev)
tmp_dev = self.MockWin32Entity("Win32_PnPEntity", "USB", 0, 0, 0, "INVALID8")
tmp_dev.HardwareID.append(self.valid_devices[0].DeviceID)
self.invalid_devices.append(tmp_dev)
tmp_dev = self.MockWin32Entity("Win32_PnPEntity", "USB", 0, 0, 0, "INVALID9")
tmp_dev.HardwareID.append(self.valid_devices[0].HardwareID[0])
self.invalid_devices.append(tmp_dev)
self.all_devices = [*self.valid_devices, *self.invalid_devices]
self.wmi_mock = unittest.mock.MagicMock(return_value=self.all_devices)
self.wmi_mock_expected_argument = "SELECT * FROM Win32_PnPEntity"
self.scanner._wbem = self.MockWMIConnectServer()
self.scanner._wbem.ExecQuery = self.wmi_mock
def test_scan(self):
self.wmi_mock.reset_mock()
devices = self.scanner.list_devices(rescan=True)
self.wmi_mock.assert_called_once_with(self.wmi_mock_expected_argument)
for dev in self.valid_devices:
self.assertIn(dev.device, devices)
for dev in self.invalid_devices:
self.assertNotIn(dev.device, devices)
for search_device in self.valid_devices:
found_devices = self.scanner.find_devices(**search_device.device.unique_identifier)
self.assertSequenceEqual((search_device.device,), found_devices,
msg="The device which was searched by its unique identifiers "
"was not found")
found_devices = self.scanner.find_devices(address=search_device.device.address)
self.assertSequenceEqual((search_device.device,), found_devices,
msg="The device which was searched by address was not found")
found_devices = self.scanner.find_devices(invalid_param="test")
self.assertSequenceEqual(tuple(), found_devices,
msg="Searching for an invalid parameter should not return any "
"results")
def test_scan_errors(self):
devices = self.scanner.list_devices()
self.wmi_mock.reset_mock()
rescan_devices = self.scanner.list_devices()
self.wmi_mock.assert_not_called()
self.assertSequenceEqual(devices, rescan_devices,
msg="The second scan with Win32USBDeviceScanner should return the "
"same values without forcing a rescan")
self.wmi_mock.reset_mock()
old_return_value = self.wmi_mock.return_value
try:
self.wmi_mock.return_value = [self.valid_devices[0]]
rescan_devices = self.scanner.list_devices(rescan=True)
finally:
self.wmi_mock.return_value = old_return_value
self.wmi_mock.assert_called_once_with(self.wmi_mock_expected_argument)
self.assertSequenceEqual((self.valid_devices[0].device,), rescan_devices,
msg="A forced rescan with Win32USBDeviceScanner should return "
"other values than before")
@unittest.skipUnless(sys.platform == "win32", "Requires Windows")
class TestWin32LANDeviceScanner(unittest.TestCase):
class MockPopen:
def communicate(self, input=None):
raise NotImplementedError()
@property
def returncode(self):
raise NotImplementedError()
@staticmethod
def make_lan_device(address, mac_address, aliases=None):
device = LANDevice()
device.address = address
device.address_aliases = aliases
device.mac_address = mac_address
return device
def setUp(self) -> None:
self.scanner = LANDeviceScanner()
self.arp_output = os.linesep.encode().join([
b"Interface: 192.168.1.98 --- 0x10",
b" Internet Address Physical Address Type",
b" 192.168.1.1 01-f0-60-b4-da-10 dynamic",
b" 192.168.1.3\td3-1e-52-7f-2d-81 dynamic",
b" 192.168.1.7 03-83-65-cf-c4-cd",
b" 192.168.1.14 f0:1a:bd:f0:2f:a2 dynamic",
b" 192.168.1.15 50.9a.71.5d.24.e2 dynamic",
b" 192.168.1.23 01-41-56-0C-EB-01 dynamic",
b" 192.168.1.165 f5-6D:03-5e.45-1F dynamic",
b" 192.168.1.255 ff-ff-ff-ff-ff-ff static",
b" 224.0.0.22 01-00-5e-00-00-16 static",
b"",
b"Interface: 192.168.20.27 --- 0x17",
b" Internet Address Physical Address Type",
b"192.168.20.143 01-f0-61-b4-d9-10 dynamic",
b" 192.168.20.251 d3-1a-52-7f-2f-82 dynamic",
b" 192.168.20.255 ff-ff-ff-ff-ff-ff static",
b" 224.0.0.22 01-00-5e-00-00-16 static",
b"",
b"Interface: 192.168.24.75 --- 0x19",
b" 192.168.24.10 dynamic",
b" 192.168.24.11",
b" 192.168.24 01-f0-61-b4-d9-10 dynamic",
b" 192.168.24.251 d3-1G-53-7f-2f-82 dynamic",
b" 192.168.24.252 abc 00-1a-53-7f-2f-82 dynamic",
b""
])
self.expected_result = [
self.make_lan_device("192.168.1.1", "01-f0-60-b4-da-10"),
self.make_lan_device("192.168.1.3", "d3-1e-52-7f-2d-81"),
self.make_lan_device("192.168.1.7", "03-83-65-cf-c4-cd"),
self.make_lan_device("192.168.1.14", "f0:1a:bd:f0:2f:a2"),
self.make_lan_device("192.168.1.15", "50.9a.71.5d.24.e2"),
self.make_lan_device("192.168.1.23", "01-41-56-0C-EB-01"),
self.make_lan_device("192.168.1.165", "f5-6D:03-5e.45-1F"),
self.make_lan_device("192.168.1.255", "ff-ff-ff-ff-ff-ff",
aliases=["192.168.20.255"]),
self.make_lan_device("224.0.0.22", "01-00-5e-00-00-16"),
self.make_lan_device("192.168.20.143", "01-f0-61-b4-d9-10"),
self.make_lan_device("192.168.20.251", "d3-1a-52-7f-2f-82")
]
self.popen_mock = self.MockPopen()
self.popen_communicate_mock = unittest.mock.MagicMock(return_value=(self.arp_output, b""))
self.popen_mock.communicate = self.popen_communicate_mock
self.popen_returncode_mock = unittest.mock.PropertyMock(return_value=0)
type(self.popen_mock).returncode = self.popen_returncode_mock
self.popen_init_mock = unittest.mock.MagicMock(return_value=self.popen_mock)
setattr(subprocess, "Popen", self.popen_init_mock)
self.popen_init_args = (["arp", "-a"],)
self.popen_init_kwargs = {"bufsize": 100000,
"stdin": subprocess.PIPE,
"stdout": subprocess.PIPE,
"stderr": subprocess.PIPE}
def test_scan(self):
self.popen_init_mock.reset_mock()
self.popen_returncode_mock.reset_mock()
self.popen_communicate_mock.reset_mock()
devices = self.scanner.list_devices()
self.popen_init_mock.assert_called_once_with(*self.popen_init_args,
**self.popen_init_kwargs)
self.popen_returncode_mock.assert_called_once_with()
self.popen_communicate_mock.assert_called_once_with()
self.assertEqual(tuple(self.expected_result), devices,
msg="LANDeviceScanner.list_devices did not return the expected values")
self.popen_init_mock.reset_mock()
self.popen_returncode_mock.reset_mock()
self.popen_communicate_mock.reset_mock()
devices = self.scanner.list_devices()
self.popen_init_mock.assert_not_called()
self.popen_returncode_mock.assert_not_called()
self.popen_communicate_mock.assert_not_called()
test_device = self.expected_result[0]
devices = self.scanner.find_devices(mac_address=test_device.mac_address)
self.assertSequenceEqual((test_device,), devices,
msg="LANDeviceScanner.find_devices did not return the expected "
"result")
def test_scan_errors(self):
self.popen_init_mock.reset_mock()
self.popen_returncode_mock.reset_mock()
self.popen_communicate_mock.reset_mock()
old_return_value = self.popen_returncode_mock.return_value
self.popen_returncode_mock.return_value = 1
devices = self.scanner.list_devices(rescan=True)
self.popen_returncode_mock.return_value = old_return_value
self.popen_init_mock.assert_called_once_with(*self.popen_init_args,
**self.popen_init_kwargs)
self.popen_returncode_mock.assert_called_once_with()
self.assertSequenceEqual(tuple(), devices,
msg="When \"arp\" fails, the scanner should not find any devices")
self.popen_init_mock.reset_mock()
self.popen_returncode_mock.reset_mock()
self.popen_communicate_mock.reset_mock()
old_return_value = self.popen_communicate_mock.return_value
self.popen_communicate_mock.return_value = (self.arp_output, b"Some error occurred!")
devices = self.scanner.list_devices(rescan=True)
self.popen_communicate_mock.return_value = old_return_value
self.popen_init_mock.assert_called_once_with(*self.popen_init_args,
**self.popen_init_kwargs)
self.popen_returncode_mock.assert_called_once_with()
self.assertSequenceEqual(tuple(), devices,
msg="When \"arp\" prints to stderr, the scanner should not find "
"any devices")
old_side_effect = self.popen_init_mock.side_effect
self.popen_init_mock.side_effect = FileNotFoundError("Command \"arp\" not found")
with self.assertRaises(FileNotFoundError,
msg="If the arp-command could not be found, searching for devices "
"should raise an exception"):
devices = self.scanner.list_devices(rescan=True)
self.popen_init_mock.side_effect = old_side_effect
| true
| true
|
1c4394fff12d771e0ea171ef042d49040a6866df
| 2,724
|
py
|
Python
|
setup.py
|
fangd123/TextBrewer
|
866f4363d9bd964f00aa60b0db5e9252a7905448
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
fangd123/TextBrewer
|
866f4363d9bd964f00aa60b0db5e9252a7905448
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
fangd123/TextBrewer
|
866f4363d9bd964f00aa60b0db5e9252a7905448
|
[
"Apache-2.0"
] | null | null | null |
"""
Simple check list from AllenNLP repo: https://github.com/allenai/allennlp/blob/master/setup.py
To create the package for pypi.
1. Change the version in __init__.py, setup.py as well as docs/source/conf.py.
2. Commit these changes with the message: "Release: VERSION"
3. Add a tag in git to mark the release: "git tag VERSION -m'Adds tag VERSION for pypi' "
Push the tag to git: git push --tags origin master
4. Build both the sources and the wheel. Do not change anything in setup.py between
creating the wheel and the source distribution (obviously).
For the wheel, run: "python setup.py bdist_wheel" in the top level directory.
(this will build a wheel for the python version you use to build it).
For the sources, run: "python setup.py sdist"
You should now have a /dist directory with both .whl and .tar.gz source versions.
5. Check that everything looks correct by uploading the package to the pypi test server:
twine upload dist/* -r pypitest
(pypi suggest using twine as other methods upload files via plaintext.)
Check that you can install it in a virtualenv by running:
pip install -i https://testpypi.python.org/pypi transformers
6. Upload the final version to actual pypi:
twine upload dist/* -r pypi
7. Copy the release notes from RELEASE.md to the tag in github once everything is looking hunky-dory.
"""
import shutil
from pathlib import Path
from setuptools import find_packages, setup
setup(
name="textbrewer",
version="0.2.1",
author="ziqingyang",
author_email="zqyang5@iflytek.com",
description="PyTorch-based knowledge distillation toolkit for natural language processing",
long_description="PyTorch-based knowledge distillation toolkit for natural language processing.",
#long_description=open("READMEshort.md", "r", encoding="utf-8").read(),
long_description_content_type="text/markdown",
keywords="NLP deep learning knowledge distillation pytorch",
#license="",
url="http://textbrewer.hfl-rc.com",
#package_dir={"": "src"},
packages=['textbrewer'],
package_dir={'':'src'},
install_requires=[
"numpy",
"torch >= 1.1",
"tensorboard",
"tqdm"
],
python_requires=">=3.6",
classifiers=[
#"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"Intended Audience :: Education",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
],
)
| 41.907692
| 101
| 0.698972
|
import shutil
from pathlib import Path
from setuptools import find_packages, setup
setup(
name="textbrewer",
version="0.2.1",
author="ziqingyang",
author_email="zqyang5@iflytek.com",
description="PyTorch-based knowledge distillation toolkit for natural language processing",
long_description="PyTorch-based knowledge distillation toolkit for natural language processing.",
long_description_content_type="text/markdown",
keywords="NLP deep learning knowledge distillation pytorch",
url="http://textbrewer.hfl-rc.com",
packages=['textbrewer'],
package_dir={'':'src'},
install_requires=[
"numpy",
"torch >= 1.1",
"tensorboard",
"tqdm"
],
python_requires=">=3.6",
classifiers=[
"Intended Audience :: Developers",
"Intended Audience :: Education",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
],
)
| true
| true
|
1c43953717c27a54f36e47fe96bee4424e86d55d
| 7,825
|
py
|
Python
|
django/core/handlers/wsgi.py
|
raydeal/django
|
e96320c91724830034033a9cb8afd9cf8c11e2fd
|
[
"BSD-3-Clause",
"0BSD"
] | null | null | null |
django/core/handlers/wsgi.py
|
raydeal/django
|
e96320c91724830034033a9cb8afd9cf8c11e2fd
|
[
"BSD-3-Clause",
"0BSD"
] | null | null | null |
django/core/handlers/wsgi.py
|
raydeal/django
|
e96320c91724830034033a9cb8afd9cf8c11e2fd
|
[
"BSD-3-Clause",
"0BSD"
] | null | null | null |
from io import BytesIO
from django.conf import settings
from django.core import signals
from django.core.handlers import base
from django.http import HttpRequest, QueryDict, parse_cookie
from django.urls import set_script_prefix
from django.utils.encoding import repercent_broken_unicode
from django.utils.functional import cached_property
from django.utils.regex_helper import _lazy_re_compile
_slashes_re = _lazy_re_compile(rb"/+")
class LimitedStream:
"""Wrap another stream to disallow reading it past a number of bytes."""
def __init__(self, stream, limit):
self.stream = stream
self.remaining = limit
self.buffer = b""
def _read_limited(self, size=None):
if size is None or size > self.remaining:
size = self.remaining
if size == 0:
return b""
result = self.stream.read(size)
self.remaining -= len(result)
return result
def read(self, size=None):
if size is None:
result = self.buffer + self._read_limited()
self.buffer = b""
elif size < len(self.buffer):
result = self.buffer[:size]
self.buffer = self.buffer[size:]
else: # size >= len(self.buffer)
result = self.buffer + self._read_limited(size - len(self.buffer))
self.buffer = b""
return result
def readline(self, size=None):
while b"\n" not in self.buffer and (size is None or len(self.buffer) < size):
if size:
# since size is not None here, len(self.buffer) < size
chunk = self._read_limited(size - len(self.buffer))
else:
chunk = self._read_limited()
if not chunk:
break
self.buffer += chunk
sio = BytesIO(self.buffer)
if size:
line = sio.readline(size)
else:
line = sio.readline()
self.buffer = sio.read()
return line
def close(self):
pass
class WSGIRequest(HttpRequest):
def __init__(self, environ):
script_name = get_script_name(environ)
# If PATH_INFO is empty (e.g. accessing the SCRIPT_NAME URL without a
# trailing slash), operate as if '/' was requested.
path_info = get_path_info(environ) or "/"
self.environ = environ
self.path_info = path_info
# be careful to only replace the first slash in the path because of
# http://test/something and http://test//something being different as
# stated in https://www.ietf.org/rfc/rfc2396.txt
self.path = "%s/%s" % (script_name.rstrip("/"), path_info.replace("/", "", 1))
self.META = environ
self.META["PATH_INFO"] = path_info
self.META["SCRIPT_NAME"] = script_name
self.method = environ["REQUEST_METHOD"].upper()
# Set content_type, content_params, and encoding.
self._set_content_type_params(environ)
try:
content_length = int(environ.get("CONTENT_LENGTH"))
except (ValueError, TypeError):
content_length = 0
self._stream = LimitedStream(self.environ["wsgi.input"], content_length)
self._read_started = False
self.resolver_match = None
def _get_scheme(self):
return self.environ.get("wsgi.url_scheme")
@cached_property
def GET(self):
# The WSGI spec says 'QUERY_STRING' may be absent.
raw_query_string = get_bytes_from_wsgi(self.environ, "QUERY_STRING", "")
return QueryDict(raw_query_string, encoding=self._encoding)
def _get_post(self):
if not hasattr(self, "_post"):
self._load_post_and_files()
return self._post
def _set_post(self, post):
self._post = post
@cached_property
def COOKIES(self):
raw_cookie = get_str_from_wsgi(self.environ, "HTTP_COOKIE", "")
return parse_cookie(raw_cookie)
@property
def FILES(self):
if not hasattr(self, "_files"):
self._load_post_and_files()
return self._files
POST = property(_get_post, _set_post)
class WSGIHandler(base.BaseHandler):
request_class = WSGIRequest
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.load_middleware()
def __call__(self, environ, start_response):
set_script_prefix(get_script_name(environ))
signals.request_started.send(sender=self.__class__, environ=environ)
request = self.request_class(environ)
response = self.get_response(request)
response._handler_class = self.__class__
status = "%d %s" % (response.status_code, response.reason_phrase)
response_headers = [
*response.items(),
*(("Set-Cookie", c.output(header="")) for c in response.cookies.values()),
]
start_response(status, response_headers)
if getattr(response, "file_to_stream", None) is not None and environ.get(
"wsgi.file_wrapper"
):
# If `wsgi.file_wrapper` is used the WSGI server does not call
# .close on the response, but on the file wrapper. Patch it to use
# response.close instead which takes care of closing all files.
response.file_to_stream.close = response.close
response = environ["wsgi.file_wrapper"](
response.file_to_stream, response.block_size
)
return response
def get_path_info(environ):
"""Return the HTTP request's PATH_INFO as a string."""
path_info = get_bytes_from_wsgi(environ, "PATH_INFO", "/")
return repercent_broken_unicode(path_info).decode()
def get_script_name(environ):
"""
Return the equivalent of the HTTP request's SCRIPT_NAME environment
variable. If Apache mod_rewrite is used, return what would have been
the script name prior to any rewriting (so it's the script name as seen
from the client's perspective), unless the FORCE_SCRIPT_NAME setting is
set (to anything).
"""
if settings.FORCE_SCRIPT_NAME is not None:
return settings.FORCE_SCRIPT_NAME
# If Apache's mod_rewrite had a whack at the URL, Apache set either
# SCRIPT_URL or REDIRECT_URL to the full resource URL before applying any
# rewrites. Unfortunately not every web server (lighttpd!) passes this
# information through all the time, so FORCE_SCRIPT_NAME, above, is still
# needed.
script_url = get_bytes_from_wsgi(environ, "SCRIPT_URL", "") or get_bytes_from_wsgi(
environ, "REDIRECT_URL", ""
)
if script_url:
if b"//" in script_url:
# mod_wsgi squashes multiple successive slashes in PATH_INFO,
# do the same with script_url before manipulating paths (#17133).
script_url = _slashes_re.sub(b"/", script_url)
path_info = get_bytes_from_wsgi(environ, "PATH_INFO", "")
script_name = script_url[: -len(path_info)] if path_info else script_url
else:
script_name = get_bytes_from_wsgi(environ, "SCRIPT_NAME", "")
return script_name.decode()
def get_bytes_from_wsgi(environ, key, default):
"""
Get a value from the WSGI environ dictionary as bytes.
key and default should be strings.
"""
value = environ.get(key, default)
# Non-ASCII values in the WSGI environ are arbitrarily decoded with
# ISO-8859-1. This is wrong for Django websites where UTF-8 is the default.
# Re-encode to recover the original bytestring.
return value.encode("iso-8859-1")
def get_str_from_wsgi(environ, key, default):
"""
Get a value from the WSGI environ dictionary as str.
key and default should be str objects.
"""
value = get_bytes_from_wsgi(environ, key, default)
return value.decode(errors="replace")
| 35.894495
| 87
| 0.648051
|
from io import BytesIO
from django.conf import settings
from django.core import signals
from django.core.handlers import base
from django.http import HttpRequest, QueryDict, parse_cookie
from django.urls import set_script_prefix
from django.utils.encoding import repercent_broken_unicode
from django.utils.functional import cached_property
from django.utils.regex_helper import _lazy_re_compile
_slashes_re = _lazy_re_compile(rb"/+")
class LimitedStream:
def __init__(self, stream, limit):
self.stream = stream
self.remaining = limit
self.buffer = b""
def _read_limited(self, size=None):
if size is None or size > self.remaining:
size = self.remaining
if size == 0:
return b""
result = self.stream.read(size)
self.remaining -= len(result)
return result
def read(self, size=None):
if size is None:
result = self.buffer + self._read_limited()
self.buffer = b""
elif size < len(self.buffer):
result = self.buffer[:size]
self.buffer = self.buffer[size:]
else:
result = self.buffer + self._read_limited(size - len(self.buffer))
self.buffer = b""
return result
def readline(self, size=None):
while b"\n" not in self.buffer and (size is None or len(self.buffer) < size):
if size:
chunk = self._read_limited(size - len(self.buffer))
else:
chunk = self._read_limited()
if not chunk:
break
self.buffer += chunk
sio = BytesIO(self.buffer)
if size:
line = sio.readline(size)
else:
line = sio.readline()
self.buffer = sio.read()
return line
def close(self):
pass
class WSGIRequest(HttpRequest):
def __init__(self, environ):
script_name = get_script_name(environ)
path_info = get_path_info(environ) or "/"
self.environ = environ
self.path_info = path_info
self.path = "%s/%s" % (script_name.rstrip("/"), path_info.replace("/", "", 1))
self.META = environ
self.META["PATH_INFO"] = path_info
self.META["SCRIPT_NAME"] = script_name
self.method = environ["REQUEST_METHOD"].upper()
self._set_content_type_params(environ)
try:
content_length = int(environ.get("CONTENT_LENGTH"))
except (ValueError, TypeError):
content_length = 0
self._stream = LimitedStream(self.environ["wsgi.input"], content_length)
self._read_started = False
self.resolver_match = None
def _get_scheme(self):
return self.environ.get("wsgi.url_scheme")
@cached_property
def GET(self):
raw_query_string = get_bytes_from_wsgi(self.environ, "QUERY_STRING", "")
return QueryDict(raw_query_string, encoding=self._encoding)
def _get_post(self):
if not hasattr(self, "_post"):
self._load_post_and_files()
return self._post
def _set_post(self, post):
self._post = post
@cached_property
def COOKIES(self):
raw_cookie = get_str_from_wsgi(self.environ, "HTTP_COOKIE", "")
return parse_cookie(raw_cookie)
@property
def FILES(self):
if not hasattr(self, "_files"):
self._load_post_and_files()
return self._files
POST = property(_get_post, _set_post)
class WSGIHandler(base.BaseHandler):
request_class = WSGIRequest
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.load_middleware()
def __call__(self, environ, start_response):
set_script_prefix(get_script_name(environ))
signals.request_started.send(sender=self.__class__, environ=environ)
request = self.request_class(environ)
response = self.get_response(request)
response._handler_class = self.__class__
status = "%d %s" % (response.status_code, response.reason_phrase)
response_headers = [
*response.items(),
*(("Set-Cookie", c.output(header="")) for c in response.cookies.values()),
]
start_response(status, response_headers)
if getattr(response, "file_to_stream", None) is not None and environ.get(
"wsgi.file_wrapper"
):
response.file_to_stream.close = response.close
response = environ["wsgi.file_wrapper"](
response.file_to_stream, response.block_size
)
return response
def get_path_info(environ):
path_info = get_bytes_from_wsgi(environ, "PATH_INFO", "/")
return repercent_broken_unicode(path_info).decode()
def get_script_name(environ):
if settings.FORCE_SCRIPT_NAME is not None:
return settings.FORCE_SCRIPT_NAME
# SCRIPT_URL or REDIRECT_URL to the full resource URL before applying any
# rewrites. Unfortunately not every web server (lighttpd!) passes this
# information through all the time, so FORCE_SCRIPT_NAME, above, is still
# needed.
script_url = get_bytes_from_wsgi(environ, "SCRIPT_URL", "") or get_bytes_from_wsgi(
environ, "REDIRECT_URL", ""
)
if script_url:
if b"//" in script_url:
# mod_wsgi squashes multiple successive slashes in PATH_INFO,
# do the same with script_url before manipulating paths (#17133).
script_url = _slashes_re.sub(b"/", script_url)
path_info = get_bytes_from_wsgi(environ, "PATH_INFO", "")
script_name = script_url[: -len(path_info)] if path_info else script_url
else:
script_name = get_bytes_from_wsgi(environ, "SCRIPT_NAME", "")
return script_name.decode()
def get_bytes_from_wsgi(environ, key, default):
value = environ.get(key, default)
# Non-ASCII values in the WSGI environ are arbitrarily decoded with
# ISO-8859-1. This is wrong for Django websites where UTF-8 is the default.
# Re-encode to recover the original bytestring.
return value.encode("iso-8859-1")
def get_str_from_wsgi(environ, key, default):
value = get_bytes_from_wsgi(environ, key, default)
return value.decode(errors="replace")
| true
| true
|
1c4395fdce03fca9baeba81e5f548c41f4601762
| 446
|
py
|
Python
|
lib/download.py
|
colajam93/aurpackager
|
624b9be3dcb0d1ef388a0bd2c3019390881d51cd
|
[
"MIT"
] | null | null | null |
lib/download.py
|
colajam93/aurpackager
|
624b9be3dcb0d1ef388a0bd2c3019390881d51cd
|
[
"MIT"
] | null | null | null |
lib/download.py
|
colajam93/aurpackager
|
624b9be3dcb0d1ef388a0bd2c3019390881d51cd
|
[
"MIT"
] | null | null | null |
from contextlib import closing
from urllib.request import urlopen, OpenerDirector
def save_to_file(url: str, file_path: str, opener: OpenerDirector = None):
def process(request_):
with open(file_path, 'wb') as f:
f.write(request_.read())
if opener:
with closing(opener.open(url)) as request:
process(request)
else:
with closing(urlopen(url)) as request:
process(request)
| 27.875
| 74
| 0.647982
|
from contextlib import closing
from urllib.request import urlopen, OpenerDirector
def save_to_file(url: str, file_path: str, opener: OpenerDirector = None):
def process(request_):
with open(file_path, 'wb') as f:
f.write(request_.read())
if opener:
with closing(opener.open(url)) as request:
process(request)
else:
with closing(urlopen(url)) as request:
process(request)
| true
| true
|
1c4396f29e3704807e1fce11fdaed48c810a35a5
| 7,393
|
py
|
Python
|
okta/resource_clients/domain_client.py
|
ander501/okta-sdk-python
|
0927dc6a2f6d5ebf7cd1ea806d81065094c92471
|
[
"Apache-2.0"
] | null | null | null |
okta/resource_clients/domain_client.py
|
ander501/okta-sdk-python
|
0927dc6a2f6d5ebf7cd1ea806d81065094c92471
|
[
"Apache-2.0"
] | null | null | null |
okta/resource_clients/domain_client.py
|
ander501/okta-sdk-python
|
0927dc6a2f6d5ebf7cd1ea806d81065094c92471
|
[
"Apache-2.0"
] | null | null | null |
"""
Copyright 2020 - Present Okta, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# AUTO-GENERATED! DO NOT EDIT FILE DIRECTLY
# SEE CONTRIBUTOR DOCUMENTATION
from okta.models.domain_list_response\
import DomainListResponse
from okta.models.domain\
import Domain
from okta.utils import format_url
from okta.api_client import APIClient
class DomainClient(APIClient):
"""
A Client object for the Domain resource.
"""
def __init__(self):
self._base_url = ""
async def list_domains(
self,
keep_empty_params=False
):
"""
List all verified custom Domains for the org.
Args:
Returns:
DomainListResponse
"""
http_method = "get".upper()
api_url = format_url(f"""
{self._base_url}
/api/v1/domains
""")
body = {}
headers = {}
form = {}
request, error = await self._request_executor.create_request(
http_method, api_url, body, headers, form, keep_empty_params=keep_empty_params
)
if error:
return (None, None, error)
response, error = await self._request_executor\
.execute(request, DomainListResponse)
if error:
return (None, response, error)
try:
result = DomainListResponse(
self.form_response_body(response.get_body())
)
except Exception as error:
return (None, response, error)
return (result, response, None)
async def create_domain(
self, domain,
keep_empty_params=False
):
"""
Creates your domain.
Args:
{domain}
Returns:
Domain
"""
http_method = "post".upper()
api_url = format_url(f"""
{self._base_url}
/api/v1/domains
""")
if isinstance(domain, dict):
body = domain
else:
body = domain.as_dict()
headers = {
"Accept": "application/json",
"Content-Type": "application/json"
}
form = {}
request, error = await self._request_executor.create_request(
http_method, api_url, body, headers, form, keep_empty_params=keep_empty_params
)
if error:
return (None, None, error)
response, error = await self._request_executor\
.execute(request, Domain)
if error:
return (None, response, error)
try:
result = Domain(
self.form_response_body(response.get_body())
)
except Exception as error:
return (None, response, error)
return (result, response, None)
async def delete_domain(
self, domainId,
keep_empty_params=False
):
"""
Deletes a Domain by `id`.
Args:
domain_id {str}
"""
http_method = "delete".upper()
api_url = format_url(f"""
{self._base_url}
/api/v1/domains/{domainId}
""")
body = {}
headers = {}
form = {}
request, error = await self._request_executor.create_request(
http_method, api_url, body, headers, form, keep_empty_params=keep_empty_params
)
if error:
return (None, error)
response, error = await self._request_executor\
.execute(request)
if error:
return (response, error)
return (response, None)
async def get_domain(
self, domainId,
keep_empty_params=False
):
"""
Fetches a Domain by `id`.
Args:
domain_id {str}
Returns:
Domain
"""
http_method = "get".upper()
api_url = format_url(f"""
{self._base_url}
/api/v1/domains/{domainId}
""")
body = {}
headers = {}
form = {}
request, error = await self._request_executor.create_request(
http_method, api_url, body, headers, form, keep_empty_params=keep_empty_params
)
if error:
return (None, None, error)
response, error = await self._request_executor\
.execute(request, Domain)
if error:
return (None, response, error)
try:
result = Domain(
self.form_response_body(response.get_body())
)
except Exception as error:
return (None, response, error)
return (result, response, None)
async def create_certificate(
self, domainId, domain_certificate,
keep_empty_params=False
):
"""
Creates the Certificate for the Domain.
Args:
domain_id {str}
{domain_certificate}
"""
http_method = "put".upper()
api_url = format_url(f"""
{self._base_url}
/api/v1/domains/{domainId}/certificate
""")
if isinstance(domain_certificate, dict):
body = domain_certificate
else:
body = domain_certificate.as_dict()
headers = {
"Accept": "application/json",
"Content-Type": "application/json"
}
form = {}
request, error = await self._request_executor.create_request(
http_method, api_url, body, headers, form, keep_empty_params=keep_empty_params
)
if error:
return (None, error)
response, error = await self._request_executor\
.execute(request)
if error:
return (response, error)
return (response, None)
async def verify_domain(
self, domainId,
keep_empty_params=False
):
"""
Verifies the Domain by `id`.
Args:
domain_id {str}
Returns:
Domain
"""
http_method = "post".upper()
api_url = format_url(f"""
{self._base_url}
/api/v1/domains/{domainId}/verify
""")
body = {}
headers = {}
form = {}
request, error = await self._request_executor.create_request(
http_method, api_url, body, headers, form, keep_empty_params=keep_empty_params
)
if error:
return (None, None, error)
response, error = await self._request_executor\
.execute(request, Domain)
if error:
return (None, response, error)
try:
result = Domain(
self.form_response_body(response.get_body())
)
except Exception as error:
return (None, response, error)
return (result, response, None)
| 26.123675
| 90
| 0.548357
|
from okta.models.domain_list_response\
import DomainListResponse
from okta.models.domain\
import Domain
from okta.utils import format_url
from okta.api_client import APIClient
class DomainClient(APIClient):
def __init__(self):
self._base_url = ""
async def list_domains(
self,
keep_empty_params=False
):
http_method = "get".upper()
api_url = format_url(f"""
{self._base_url}
/api/v1/domains
""")
body = {}
headers = {}
form = {}
request, error = await self._request_executor.create_request(
http_method, api_url, body, headers, form, keep_empty_params=keep_empty_params
)
if error:
return (None, None, error)
response, error = await self._request_executor\
.execute(request, DomainListResponse)
if error:
return (None, response, error)
try:
result = DomainListResponse(
self.form_response_body(response.get_body())
)
except Exception as error:
return (None, response, error)
return (result, response, None)
async def create_domain(
self, domain,
keep_empty_params=False
):
http_method = "post".upper()
api_url = format_url(f"""
{self._base_url}
/api/v1/domains
""")
if isinstance(domain, dict):
body = domain
else:
body = domain.as_dict()
headers = {
"Accept": "application/json",
"Content-Type": "application/json"
}
form = {}
request, error = await self._request_executor.create_request(
http_method, api_url, body, headers, form, keep_empty_params=keep_empty_params
)
if error:
return (None, None, error)
response, error = await self._request_executor\
.execute(request, Domain)
if error:
return (None, response, error)
try:
result = Domain(
self.form_response_body(response.get_body())
)
except Exception as error:
return (None, response, error)
return (result, response, None)
async def delete_domain(
self, domainId,
keep_empty_params=False
):
http_method = "delete".upper()
api_url = format_url(f"""
{self._base_url}
/api/v1/domains/{domainId}
""")
body = {}
headers = {}
form = {}
request, error = await self._request_executor.create_request(
http_method, api_url, body, headers, form, keep_empty_params=keep_empty_params
)
if error:
return (None, error)
response, error = await self._request_executor\
.execute(request)
if error:
return (response, error)
return (response, None)
async def get_domain(
self, domainId,
keep_empty_params=False
):
http_method = "get".upper()
api_url = format_url(f"""
{self._base_url}
/api/v1/domains/{domainId}
""")
body = {}
headers = {}
form = {}
request, error = await self._request_executor.create_request(
http_method, api_url, body, headers, form, keep_empty_params=keep_empty_params
)
if error:
return (None, None, error)
response, error = await self._request_executor\
.execute(request, Domain)
if error:
return (None, response, error)
try:
result = Domain(
self.form_response_body(response.get_body())
)
except Exception as error:
return (None, response, error)
return (result, response, None)
async def create_certificate(
self, domainId, domain_certificate,
keep_empty_params=False
):
http_method = "put".upper()
api_url = format_url(f"""
{self._base_url}
/api/v1/domains/{domainId}/certificate
""")
if isinstance(domain_certificate, dict):
body = domain_certificate
else:
body = domain_certificate.as_dict()
headers = {
"Accept": "application/json",
"Content-Type": "application/json"
}
form = {}
request, error = await self._request_executor.create_request(
http_method, api_url, body, headers, form, keep_empty_params=keep_empty_params
)
if error:
return (None, error)
response, error = await self._request_executor\
.execute(request)
if error:
return (response, error)
return (response, None)
async def verify_domain(
self, domainId,
keep_empty_params=False
):
http_method = "post".upper()
api_url = format_url(f"""
{self._base_url}
/api/v1/domains/{domainId}/verify
""")
body = {}
headers = {}
form = {}
request, error = await self._request_executor.create_request(
http_method, api_url, body, headers, form, keep_empty_params=keep_empty_params
)
if error:
return (None, None, error)
response, error = await self._request_executor\
.execute(request, Domain)
if error:
return (None, response, error)
try:
result = Domain(
self.form_response_body(response.get_body())
)
except Exception as error:
return (None, response, error)
return (result, response, None)
| true
| true
|
1c43975c6be9b0a91b85f63283c7f97b975a1cb8
| 10,146
|
py
|
Python
|
threedi_api_client/openapi/models/potential_breach.py
|
nens/threedi-api-client
|
43b0eb1bd47310b1783f87f6ad8bfbfe0fb4d90a
|
[
"BSD-3-Clause"
] | null | null | null |
threedi_api_client/openapi/models/potential_breach.py
|
nens/threedi-api-client
|
43b0eb1bd47310b1783f87f6ad8bfbfe0fb4d90a
|
[
"BSD-3-Clause"
] | 16
|
2021-05-31T09:52:04.000Z
|
2022-03-14T16:07:19.000Z
|
threedi_api_client/openapi/models/potential_breach.py
|
nens/threedi-api-client
|
43b0eb1bd47310b1783f87f6ad8bfbfe0fb4d90a
|
[
"BSD-3-Clause"
] | null | null | null |
# coding: utf-8
"""
3Di API
3Di simulation API (latest stable version: v3) Framework release: 2.9.0 3Di core release: 2.2.2 deployed on: 11:01AM (UTC) on January 11, 2022 # noqa: E501
The version of the OpenAPI document: v3
Contact: info@nelen-schuurmans.nl
Generated by: https://openapi-generator.tech
"""
import logging
import pprint
import re # noqa: F401
import six
from threedi_api_client.openapi.configuration import Configuration
logger = logging.getLogger(__name__)
class PotentialBreach(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'url': 'str',
'connected_pnt_id': 'int',
'levee_material': 'str',
'line_id': 'int',
'maximum_breach_depth': 'float',
'threedimodel': 'str',
'id': 'int'
}
attribute_map = {
'url': 'url',
'connected_pnt_id': 'connected_pnt_id',
'levee_material': 'levee_material',
'line_id': 'line_id',
'maximum_breach_depth': 'maximum_breach_depth',
'threedimodel': 'threedimodel',
'id': 'id'
}
def __init__(self, url=None, connected_pnt_id=None, levee_material=None, line_id=None, maximum_breach_depth=None, threedimodel=None, id=None, local_vars_configuration=None): # noqa: E501
"""PotentialBreach - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._url = None
self._connected_pnt_id = None
self._levee_material = None
self._line_id = None
self._maximum_breach_depth = None
self._threedimodel = None
self._id = None
self.discriminator = None
if url is not None:
self.url = url
self.connected_pnt_id = connected_pnt_id
self.levee_material = levee_material
self.line_id = line_id
self.maximum_breach_depth = maximum_breach_depth
if threedimodel is not None:
self.threedimodel = threedimodel
if id is not None:
self.id = id
@property
def url(self):
"""Gets the url of this PotentialBreach. # noqa: E501
:return: The url of this PotentialBreach. # noqa: E501
:rtype: str
"""
return self._url
@url.setter
def url(self, url):
"""Sets the url of this PotentialBreach.
:param url: The url of this PotentialBreach. # noqa: E501
:type: str
"""
self._url = url
@property
def connected_pnt_id(self):
"""Gets the connected_pnt_id of this PotentialBreach. # noqa: E501
:return: The connected_pnt_id of this PotentialBreach. # noqa: E501
:rtype: int
"""
return self._connected_pnt_id
@connected_pnt_id.setter
def connected_pnt_id(self, connected_pnt_id):
"""Sets the connected_pnt_id of this PotentialBreach.
:param connected_pnt_id: The connected_pnt_id of this PotentialBreach. # noqa: E501
:type: int
"""
if self.local_vars_configuration.client_side_validation and connected_pnt_id is None: # noqa: E501
raise ValueError("Invalid value for `connected_pnt_id`, must not be `None`") # noqa: E501
if (self.local_vars_configuration.client_side_validation and
connected_pnt_id is not None and connected_pnt_id > 2147483647): # noqa: E501
raise ValueError("Invalid value for `connected_pnt_id`, must be a value less than or equal to `2147483647`") # noqa: E501
if (self.local_vars_configuration.client_side_validation and
connected_pnt_id is not None and connected_pnt_id < -2147483648): # noqa: E501
raise ValueError("Invalid value for `connected_pnt_id`, must be a value greater than or equal to `-2147483648`") # noqa: E501
self._connected_pnt_id = connected_pnt_id
@property
def levee_material(self):
"""Gets the levee_material of this PotentialBreach. # noqa: E501
:return: The levee_material of this PotentialBreach. # noqa: E501
:rtype: str
"""
return self._levee_material
@levee_material.setter
def levee_material(self, levee_material):
"""Sets the levee_material of this PotentialBreach.
:param levee_material: The levee_material of this PotentialBreach. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and levee_material is None: # noqa: E501
raise ValueError("Invalid value for `levee_material`, must not be `None`") # noqa: E501
allowed_values = ["sand", "clay"] # noqa: E501
if self.local_vars_configuration.client_side_validation and levee_material not in allowed_values: # noqa: E501
logger.warning(
"Warning: Unknown value for `levee_material` ({0}), must be one of {1}. Either your threedi-api-client version is out of date or this value is invalid." # noqa: E501
.format(levee_material, allowed_values)
)
self._levee_material = levee_material
@property
def line_id(self):
"""Gets the line_id of this PotentialBreach. # noqa: E501
:return: The line_id of this PotentialBreach. # noqa: E501
:rtype: int
"""
return self._line_id
@line_id.setter
def line_id(self, line_id):
"""Sets the line_id of this PotentialBreach.
:param line_id: The line_id of this PotentialBreach. # noqa: E501
:type: int
"""
if self.local_vars_configuration.client_side_validation and line_id is None: # noqa: E501
raise ValueError("Invalid value for `line_id`, must not be `None`") # noqa: E501
if (self.local_vars_configuration.client_side_validation and
line_id is not None and line_id > 2147483647): # noqa: E501
raise ValueError("Invalid value for `line_id`, must be a value less than or equal to `2147483647`") # noqa: E501
if (self.local_vars_configuration.client_side_validation and
line_id is not None and line_id < -2147483648): # noqa: E501
raise ValueError("Invalid value for `line_id`, must be a value greater than or equal to `-2147483648`") # noqa: E501
self._line_id = line_id
@property
def maximum_breach_depth(self):
"""Gets the maximum_breach_depth of this PotentialBreach. # noqa: E501
:return: The maximum_breach_depth of this PotentialBreach. # noqa: E501
:rtype: float
"""
return self._maximum_breach_depth
@maximum_breach_depth.setter
def maximum_breach_depth(self, maximum_breach_depth):
"""Sets the maximum_breach_depth of this PotentialBreach.
:param maximum_breach_depth: The maximum_breach_depth of this PotentialBreach. # noqa: E501
:type: float
"""
if self.local_vars_configuration.client_side_validation and maximum_breach_depth is None: # noqa: E501
raise ValueError("Invalid value for `maximum_breach_depth`, must not be `None`") # noqa: E501
self._maximum_breach_depth = maximum_breach_depth
@property
def threedimodel(self):
"""Gets the threedimodel of this PotentialBreach. # noqa: E501
:return: The threedimodel of this PotentialBreach. # noqa: E501
:rtype: str
"""
return self._threedimodel
@threedimodel.setter
def threedimodel(self, threedimodel):
"""Sets the threedimodel of this PotentialBreach.
:param threedimodel: The threedimodel of this PotentialBreach. # noqa: E501
:type: str
"""
self._threedimodel = threedimodel
@property
def id(self):
"""Gets the id of this PotentialBreach. # noqa: E501
:return: The id of this PotentialBreach. # noqa: E501
:rtype: int
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this PotentialBreach.
:param id: The id of this PotentialBreach. # noqa: E501
:type: int
"""
self._id = id
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, PotentialBreach):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, PotentialBreach):
return True
return self.to_dict() != other.to_dict()
| 33.596026
| 191
| 0.623398
|
import logging
import pprint
import re
import six
from threedi_api_client.openapi.configuration import Configuration
logger = logging.getLogger(__name__)
class PotentialBreach(object):
openapi_types = {
'url': 'str',
'connected_pnt_id': 'int',
'levee_material': 'str',
'line_id': 'int',
'maximum_breach_depth': 'float',
'threedimodel': 'str',
'id': 'int'
}
attribute_map = {
'url': 'url',
'connected_pnt_id': 'connected_pnt_id',
'levee_material': 'levee_material',
'line_id': 'line_id',
'maximum_breach_depth': 'maximum_breach_depth',
'threedimodel': 'threedimodel',
'id': 'id'
}
def __init__(self, url=None, connected_pnt_id=None, levee_material=None, line_id=None, maximum_breach_depth=None, threedimodel=None, id=None, local_vars_configuration=None):
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._url = None
self._connected_pnt_id = None
self._levee_material = None
self._line_id = None
self._maximum_breach_depth = None
self._threedimodel = None
self._id = None
self.discriminator = None
if url is not None:
self.url = url
self.connected_pnt_id = connected_pnt_id
self.levee_material = levee_material
self.line_id = line_id
self.maximum_breach_depth = maximum_breach_depth
if threedimodel is not None:
self.threedimodel = threedimodel
if id is not None:
self.id = id
@property
def url(self):
return self._url
@url.setter
def url(self, url):
self._url = url
@property
def connected_pnt_id(self):
return self._connected_pnt_id
@connected_pnt_id.setter
def connected_pnt_id(self, connected_pnt_id):
if self.local_vars_configuration.client_side_validation and connected_pnt_id is None:
raise ValueError("Invalid value for `connected_pnt_id`, must not be `None`")
if (self.local_vars_configuration.client_side_validation and
connected_pnt_id is not None and connected_pnt_id > 2147483647):
raise ValueError("Invalid value for `connected_pnt_id`, must be a value less than or equal to `2147483647`")
if (self.local_vars_configuration.client_side_validation and
connected_pnt_id is not None and connected_pnt_id < -2147483648):
raise ValueError("Invalid value for `connected_pnt_id`, must be a value greater than or equal to `-2147483648`")
self._connected_pnt_id = connected_pnt_id
@property
def levee_material(self):
return self._levee_material
@levee_material.setter
def levee_material(self, levee_material):
if self.local_vars_configuration.client_side_validation and levee_material is None:
raise ValueError("Invalid value for `levee_material`, must not be `None`")
allowed_values = ["sand", "clay"]
if self.local_vars_configuration.client_side_validation and levee_material not in allowed_values:
logger.warning(
"Warning: Unknown value for `levee_material` ({0}), must be one of {1}. Either your threedi-api-client version is out of date or this value is invalid."
.format(levee_material, allowed_values)
)
self._levee_material = levee_material
@property
def line_id(self):
return self._line_id
@line_id.setter
def line_id(self, line_id):
if self.local_vars_configuration.client_side_validation and line_id is None:
raise ValueError("Invalid value for `line_id`, must not be `None`")
if (self.local_vars_configuration.client_side_validation and
line_id is not None and line_id > 2147483647):
raise ValueError("Invalid value for `line_id`, must be a value less than or equal to `2147483647`")
if (self.local_vars_configuration.client_side_validation and
line_id is not None and line_id < -2147483648):
raise ValueError("Invalid value for `line_id`, must be a value greater than or equal to `-2147483648`")
self._line_id = line_id
@property
def maximum_breach_depth(self):
return self._maximum_breach_depth
@maximum_breach_depth.setter
def maximum_breach_depth(self, maximum_breach_depth):
if self.local_vars_configuration.client_side_validation and maximum_breach_depth is None:
raise ValueError("Invalid value for `maximum_breach_depth`, must not be `None`")
self._maximum_breach_depth = maximum_breach_depth
@property
def threedimodel(self):
return self._threedimodel
@threedimodel.setter
def threedimodel(self, threedimodel):
self._threedimodel = threedimodel
@property
def id(self):
return self._id
@id.setter
def id(self, id):
self._id = id
def to_dict(self):
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
return pprint.pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if not isinstance(other, PotentialBreach):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
if not isinstance(other, PotentialBreach):
return True
return self.to_dict() != other.to_dict()
| true
| true
|
1c439907cfb244998cd93f3c730564f431b4e5ee
| 600
|
py
|
Python
|
coffeenijuan/management/migrations/0003_supply_addeded_by.py
|
Cyvid7-Darus10/CMSC-128-Group-Repo
|
cdbbe84f4af398a6c1d5c2749173d4cf585487a9
|
[
"MIT"
] | null | null | null |
coffeenijuan/management/migrations/0003_supply_addeded_by.py
|
Cyvid7-Darus10/CMSC-128-Group-Repo
|
cdbbe84f4af398a6c1d5c2749173d4cf585487a9
|
[
"MIT"
] | null | null | null |
coffeenijuan/management/migrations/0003_supply_addeded_by.py
|
Cyvid7-Darus10/CMSC-128-Group-Repo
|
cdbbe84f4af398a6c1d5c2749173d4cf585487a9
|
[
"MIT"
] | null | null | null |
# Generated by Django 4.0.2 on 2022-06-07 15:04
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('management', '0002_transaction'),
]
operations = [
migrations.AddField(
model_name='supply',
name='addeded_by',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
| 27.272727
| 133
| 0.678333
|
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('management', '0002_transaction'),
]
operations = [
migrations.AddField(
model_name='supply',
name='addeded_by',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
| true
| true
|
1c439b1e301860046f645abdbc1f4668b65d7558
| 1,419
|
py
|
Python
|
dataproc/google/cloud/dataproc_v1beta2/proto/shared_pb2.py
|
nielm/google-cloud-python
|
fd126fdea34206109eb00d675374ff7dc4dcc5ef
|
[
"Apache-2.0"
] | 1
|
2019-01-23T21:54:51.000Z
|
2019-01-23T21:54:51.000Z
|
dataproc/google/cloud/dataproc_v1beta2/proto/shared_pb2.py
|
nielm/google-cloud-python
|
fd126fdea34206109eb00d675374ff7dc4dcc5ef
|
[
"Apache-2.0"
] | 1
|
2018-04-06T19:51:23.000Z
|
2018-04-06T19:51:23.000Z
|
dataproc/google/cloud/dataproc_v1beta2/proto/shared_pb2.py
|
nielm/google-cloud-python
|
fd126fdea34206109eb00d675374ff7dc4dcc5ef
|
[
"Apache-2.0"
] | 1
|
2020-11-15T11:44:36.000Z
|
2020-11-15T11:44:36.000Z
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/cloud/dataproc_v1beta2/proto/shared.proto
import sys
_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode("latin1"))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name="google/cloud/dataproc_v1beta2/proto/shared.proto",
package="google.cloud.dataproc.v1beta2",
syntax="proto3",
serialized_options=_b(
"\n!com.google.cloud.dataproc.v1beta2B\013SharedProtoP\001ZEgoogle.golang.org/genproto/googleapis/cloud/dataproc/v1beta2;dataproc"
),
serialized_pb=_b(
"\n0google/cloud/dataproc_v1beta2/proto/shared.proto\x12\x1dgoogle.cloud.dataproc.v1beta2\x1a\x1cgoogle/api/annotations.protoBy\n!com.google.cloud.dataproc.v1beta2B\x0bSharedProtoP\x01ZEgoogle.golang.org/genproto/googleapis/cloud/dataproc/v1beta2;dataprocb\x06proto3"
),
dependencies=[google_dot_api_dot_annotations__pb2.DESCRIPTOR],
)
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| 36.384615
| 275
| 0.799859
|
import sys
_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode("latin1"))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
_sym_db = _symbol_database.Default()
from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name="google/cloud/dataproc_v1beta2/proto/shared.proto",
package="google.cloud.dataproc.v1beta2",
syntax="proto3",
serialized_options=_b(
"\n!com.google.cloud.dataproc.v1beta2B\013SharedProtoP\001ZEgoogle.golang.org/genproto/googleapis/cloud/dataproc/v1beta2;dataproc"
),
serialized_pb=_b(
"\n0google/cloud/dataproc_v1beta2/proto/shared.proto\x12\x1dgoogle.cloud.dataproc.v1beta2\x1a\x1cgoogle/api/annotations.protoBy\n!com.google.cloud.dataproc.v1beta2B\x0bSharedProtoP\x01ZEgoogle.golang.org/genproto/googleapis/cloud/dataproc/v1beta2;dataprocb\x06proto3"
),
dependencies=[google_dot_api_dot_annotations__pb2.DESCRIPTOR],
)
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
DESCRIPTOR._options = None
| true
| true
|
1c439bd57caf9056e4da24f63fd5fa1fd020399d
| 258
|
py
|
Python
|
core/urls.py
|
kamyabdesign/DRF_Django
|
7930ef9ad259c092e16e7ae1885054cef1608415
|
[
"Apache-2.0"
] | null | null | null |
core/urls.py
|
kamyabdesign/DRF_Django
|
7930ef9ad259c092e16e7ae1885054cef1608415
|
[
"Apache-2.0"
] | null | null | null |
core/urls.py
|
kamyabdesign/DRF_Django
|
7930ef9ad259c092e16e7ae1885054cef1608415
|
[
"Apache-2.0"
] | null | null | null |
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),
path('api-auth/', include('rest_framework.urls')),
path('', include('blog.urls')),
path('api/', include('api.urls')),
]
| 23.454545
| 54
| 0.651163
|
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),
path('api-auth/', include('rest_framework.urls')),
path('', include('blog.urls')),
path('api/', include('api.urls')),
]
| true
| true
|
1c439c449f1626aa0ccf923efed17e25fa0b2efb
| 2,913
|
py
|
Python
|
partner/partner.py
|
aparkerlue/ledger-partner
|
b7d59f23ed5853312cdf9aa97f8941f00478d41b
|
[
"Apache-2.0"
] | null | null | null |
partner/partner.py
|
aparkerlue/ledger-partner
|
b7d59f23ed5853312cdf9aa97f8941f00478d41b
|
[
"Apache-2.0"
] | null | null | null |
partner/partner.py
|
aparkerlue/ledger-partner
|
b7d59f23ed5853312cdf9aa97f8941f00478d41b
|
[
"Apache-2.0"
] | null | null | null |
# -*- mode: python; coding: utf-8; -*-
"""Functions for partnership accounting with Ledger."""
from math import floor
from ledger import Amount
def isclose(a, b, rel_tol=1e-09, abs_tol=0.0):
"""Return True if the values a and b are close to each other."""
return abs(a - b) <= max(rel_tol * max(abs(a), abs(b)), abs_tol)
def parse_partner_string(s):
"""Return a dict of partner labels to percentages summing to one."""
L = [x.strip().split(" ") for x in s.split(",")]
# Numberless spec indicates equal weighting.
if all(len(x) <= 1 for x in L):
return {t[0]: 100 / len(L) for t in L}
# There should be no more than a single elided amount.
n_elisions = len([None for t in L if len(t) <= 1])
if n_elisions == 0:
spec = {t[0]: float(t[1]) for t in L}
n = sum(spec.values())
if n != 100:
raise ValueError("spec values do not total 100: %s" % n)
elif n_elisions == 1:
spec = {
t[0]: float(t[1])
if len(t) > 1
else 100 - sum(float(u[1]) for u in L if len(u) > 1)
for t in L
}
else:
assert n_elisions > 1
raise ValueError("number of elided values exceeds 1: %s" % n_elisions)
assert sum(spec.values()) == 100
return spec
def attribute_to_partner_strict(partner, partner_string_or_spec, amount):
"""Return the amount attributable to the given partner."""
spec = (
partner_string_or_spec
if isinstance(partner_string_or_spec, dict)
else parse_partner_string(partner_string_or_spec)
)
if partner not in spec:
raise ValueError("Partner not found in partner string: %s" % partner)
v100 = spec[partner] * float(amount.abs())
f_floor = round if isclose(v100, round(v100)) else floor
v = amount.sign() * 0.01 * f_floor(v100)
return Amount(str(v)).with_commodity(amount.commodity)
def attribute_to_partner(partner, partner_string_or_spec, amount):
"""Return the amount attributable to the given partner."""
try:
a = attribute_to_partner_strict(partner, partner_string_or_spec, amount)
except ValueError:
a = Amount(0)
return a
def attribute_to_residual(partner_string, amount, digits_of_precision=2):
"""Return the residual amount not attributable to any partners."""
spec = parse_partner_string(partner_string)
v = round(
float(amount)
- sum(float(attribute_to_partner(partner, spec, amount)) for partner in spec),
digits_of_precision,
)
return Amount(str(v)).with_commodity(amount.commodity)
def signed_unit_amount(amount):
"""Return amount with its sign and unit magnitude."""
return amount / amount.abs()
def partner_in_partner_string(partner, partner_string):
"""Return true if partner is present in partner string."""
return partner in parse_partner_string(partner_string)
| 33.872093
| 86
| 0.650532
|
from math import floor
from ledger import Amount
def isclose(a, b, rel_tol=1e-09, abs_tol=0.0):
return abs(a - b) <= max(rel_tol * max(abs(a), abs(b)), abs_tol)
def parse_partner_string(s):
L = [x.strip().split(" ") for x in s.split(",")]
if all(len(x) <= 1 for x in L):
return {t[0]: 100 / len(L) for t in L}
n_elisions = len([None for t in L if len(t) <= 1])
if n_elisions == 0:
spec = {t[0]: float(t[1]) for t in L}
n = sum(spec.values())
if n != 100:
raise ValueError("spec values do not total 100: %s" % n)
elif n_elisions == 1:
spec = {
t[0]: float(t[1])
if len(t) > 1
else 100 - sum(float(u[1]) for u in L if len(u) > 1)
for t in L
}
else:
assert n_elisions > 1
raise ValueError("number of elided values exceeds 1: %s" % n_elisions)
assert sum(spec.values()) == 100
return spec
def attribute_to_partner_strict(partner, partner_string_or_spec, amount):
spec = (
partner_string_or_spec
if isinstance(partner_string_or_spec, dict)
else parse_partner_string(partner_string_or_spec)
)
if partner not in spec:
raise ValueError("Partner not found in partner string: %s" % partner)
v100 = spec[partner] * float(amount.abs())
f_floor = round if isclose(v100, round(v100)) else floor
v = amount.sign() * 0.01 * f_floor(v100)
return Amount(str(v)).with_commodity(amount.commodity)
def attribute_to_partner(partner, partner_string_or_spec, amount):
try:
a = attribute_to_partner_strict(partner, partner_string_or_spec, amount)
except ValueError:
a = Amount(0)
return a
def attribute_to_residual(partner_string, amount, digits_of_precision=2):
spec = parse_partner_string(partner_string)
v = round(
float(amount)
- sum(float(attribute_to_partner(partner, spec, amount)) for partner in spec),
digits_of_precision,
)
return Amount(str(v)).with_commodity(amount.commodity)
def signed_unit_amount(amount):
return amount / amount.abs()
def partner_in_partner_string(partner, partner_string):
return partner in parse_partner_string(partner_string)
| true
| true
|
1c439c92d19627a8c9292e2c3337445b1ea5786f
| 91
|
py
|
Python
|
enthought/traits/traits_listener.py
|
enthought/etsproxy
|
4aafd628611ebf7fe8311c9d1a0abcf7f7bb5347
|
[
"BSD-3-Clause"
] | 3
|
2016-12-09T06:05:18.000Z
|
2018-03-01T13:00:29.000Z
|
enthought/traits/traits_listener.py
|
enthought/etsproxy
|
4aafd628611ebf7fe8311c9d1a0abcf7f7bb5347
|
[
"BSD-3-Clause"
] | 1
|
2020-12-02T00:51:32.000Z
|
2020-12-02T08:48:55.000Z
|
enthought/traits/traits_listener.py
|
enthought/etsproxy
|
4aafd628611ebf7fe8311c9d1a0abcf7f7bb5347
|
[
"BSD-3-Clause"
] | null | null | null |
# proxy module
from __future__ import absolute_import
from traits.traits_listener import *
| 22.75
| 38
| 0.846154
|
from __future__ import absolute_import
from traits.traits_listener import *
| true
| true
|
1c439cda4f6402826d3d0ec46d23201b633cd875
| 649
|
py
|
Python
|
setup.py
|
caracalai/caracal
|
c93373fb88091175c32dc0e4a91eb7a78b693367
|
[
"MIT"
] | 25
|
2021-12-10T14:07:04.000Z
|
2022-02-21T14:45:31.000Z
|
setup.py
|
caracalai/caracal
|
c93373fb88091175c32dc0e4a91eb7a78b693367
|
[
"MIT"
] | null | null | null |
setup.py
|
caracalai/caracal
|
c93373fb88091175c32dc0e4a91eb7a78b693367
|
[
"MIT"
] | null | null | null |
import setuptools
with open("README.md", "r") as f:
long_description = f.read()
setuptools.setup(
name="caracal",
version="0.0.1",
long_description=long_description,
author="BroutonLab team",
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
author_email="hello@caracal.ai",
package_dir={"": "src"},
packages=setuptools.find_packages(where="src"),
install_requires=[
"pyzmq",
"protobuf",
"antlr4-python3-runtime",
"numpy",
],
python_requires=">=3.6",
)
| 24.037037
| 51
| 0.597843
|
import setuptools
with open("README.md", "r") as f:
long_description = f.read()
setuptools.setup(
name="caracal",
version="0.0.1",
long_description=long_description,
author="BroutonLab team",
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
author_email="hello@caracal.ai",
package_dir={"": "src"},
packages=setuptools.find_packages(where="src"),
install_requires=[
"pyzmq",
"protobuf",
"antlr4-python3-runtime",
"numpy",
],
python_requires=">=3.6",
)
| true
| true
|
1c439db077a19cdd96ad6a2a146970e85ccbd10b
| 1,189
|
py
|
Python
|
custom_components/tesla_custom/binary_sensor.py
|
carleeno/tesla
|
81c342d5c2564eed3659cc5a0711b4c209dd6773
|
[
"Apache-2.0"
] | 108
|
2021-04-29T11:38:05.000Z
|
2022-03-25T10:35:28.000Z
|
custom_components/tesla_custom/binary_sensor.py
|
carleeno/tesla
|
81c342d5c2564eed3659cc5a0711b4c209dd6773
|
[
"Apache-2.0"
] | 120
|
2021-04-29T07:49:59.000Z
|
2022-03-31T04:45:15.000Z
|
custom_components/tesla_custom/binary_sensor.py
|
custom-components/tesla
|
004f265ec0c6e25ce2d04cdeea9964cf1d1cac4e
|
[
"Apache-2.0"
] | 33
|
2021-05-01T16:03:07.000Z
|
2022-03-12T21:54:40.000Z
|
"""Support for Tesla binary sensor."""
from homeassistant.components.binary_sensor import DEVICE_CLASSES, BinarySensorEntity
from . import DOMAIN as TESLA_DOMAIN
from .tesla_device import TeslaDevice
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up the Tesla binary_sensors by config_entry."""
async_add_entities(
[
TeslaBinarySensor(
device,
hass.data[TESLA_DOMAIN][config_entry.entry_id]["coordinator"],
)
for device in hass.data[TESLA_DOMAIN][config_entry.entry_id]["devices"][
"binary_sensor"
]
],
True,
)
class TeslaBinarySensor(TeslaDevice, BinarySensorEntity):
"""Implement an Tesla binary sensor for parking and charger."""
@property
def device_class(self):
"""Return the class of this binary sensor."""
return (
self.tesla_device.sensor_type
if self.tesla_device.sensor_type in DEVICE_CLASSES
else None
)
@property
def is_on(self):
"""Return the state of the binary sensor."""
return self.tesla_device.get_value()
| 29
| 85
| 0.640034
|
from homeassistant.components.binary_sensor import DEVICE_CLASSES, BinarySensorEntity
from . import DOMAIN as TESLA_DOMAIN
from .tesla_device import TeslaDevice
async def async_setup_entry(hass, config_entry, async_add_entities):
async_add_entities(
[
TeslaBinarySensor(
device,
hass.data[TESLA_DOMAIN][config_entry.entry_id]["coordinator"],
)
for device in hass.data[TESLA_DOMAIN][config_entry.entry_id]["devices"][
"binary_sensor"
]
],
True,
)
class TeslaBinarySensor(TeslaDevice, BinarySensorEntity):
@property
def device_class(self):
return (
self.tesla_device.sensor_type
if self.tesla_device.sensor_type in DEVICE_CLASSES
else None
)
@property
def is_on(self):
return self.tesla_device.get_value()
| true
| true
|
1c439e6349e5d084a088b8833c4b88c1aeebd6da
| 189
|
py
|
Python
|
scanpy/tests/test_sim.py
|
mkmkryu/scanpy2
|
f3db32a142dc31c1b628380db1c969a6d0b9dc3a
|
[
"BSD-3-Clause"
] | 1,171
|
2017-01-17T14:01:02.000Z
|
2022-03-31T23:02:57.000Z
|
scanpy/tests/test_sim.py
|
mkmkryu/scanpy2
|
f3db32a142dc31c1b628380db1c969a6d0b9dc3a
|
[
"BSD-3-Clause"
] | 1,946
|
2017-01-22T10:19:04.000Z
|
2022-03-31T17:13:03.000Z
|
scanpy/tests/test_sim.py
|
mkmkryu/scanpy2
|
f3db32a142dc31c1b628380db1c969a6d0b9dc3a
|
[
"BSD-3-Clause"
] | 499
|
2017-01-21T11:39:29.000Z
|
2022-03-23T13:57:35.000Z
|
import scanpy as sc
import numpy as np
def test_sim_toggleswitch():
adata = sc.tl.sim('toggleswitch')
np.allclose(adata.X, sc.datasets.toggleswitch().X, np.finfo(np.float32).eps)
| 23.625
| 80
| 0.724868
|
import scanpy as sc
import numpy as np
def test_sim_toggleswitch():
adata = sc.tl.sim('toggleswitch')
np.allclose(adata.X, sc.datasets.toggleswitch().X, np.finfo(np.float32).eps)
| true
| true
|
1c439ef1c9899ada0ed1adae7eefbd496b13ddb8
| 334
|
py
|
Python
|
cart/urls.py
|
Code-Institute-Submissions/kordianbird-GamerGarageMS4
|
5c771674e76c6b81c761a2282df62bff61667def
|
[
"OML"
] | null | null | null |
cart/urls.py
|
Code-Institute-Submissions/kordianbird-GamerGarageMS4
|
5c771674e76c6b81c761a2282df62bff61667def
|
[
"OML"
] | null | null | null |
cart/urls.py
|
Code-Institute-Submissions/kordianbird-GamerGarageMS4
|
5c771674e76c6b81c761a2282df62bff61667def
|
[
"OML"
] | null | null | null |
from django.urls import path
from . import views
urlpatterns = [
path('', views.view_cart, name='view_cart'),
path('add/<item_id>/', views.add_to_cart, name='add_to_cart'),
path('remove/<item_id>/', views.remove_from_cart, name='remove_from_cart'),
path('adjust/<item_id>/', views.adjust_cart, name='adjust_cart'),
]
| 33.4
| 79
| 0.694611
|
from django.urls import path
from . import views
urlpatterns = [
path('', views.view_cart, name='view_cart'),
path('add/<item_id>/', views.add_to_cart, name='add_to_cart'),
path('remove/<item_id>/', views.remove_from_cart, name='remove_from_cart'),
path('adjust/<item_id>/', views.adjust_cart, name='adjust_cart'),
]
| true
| true
|
1c43a009e316cddd46cc3993314f9f414db25862
| 2,094
|
py
|
Python
|
modules/settingsConf.py
|
mattiasoldani/anaKrys
|
9f6e4bcb72463c68fdccd6b65ec6bcf539346774
|
[
"MIT"
] | null | null | null |
modules/settingsConf.py
|
mattiasoldani/anaKrys
|
9f6e4bcb72463c68fdccd6b65ec6bcf539346774
|
[
"MIT"
] | 7
|
2021-01-12T15:19:36.000Z
|
2022-03-26T10:44:25.000Z
|
modules/settingsConf.py
|
mattiasoldani/anaKrys
|
9f6e4bcb72463c68fdccd6b65ec6bcf539346774
|
[
"MIT"
] | null | null | null |
from settings import *
###############################################################################
###############################################################################
def settingsSelect(boolTest, whichInput):
if not boolTest: # physics files -- either ROOT or ASCII or NPZ
print("looking for files with label %s in ./settings/" % whichInput)
return "settings.%s_runList" % whichInput, "settings.%s_settings" % whichInput
else: # test files -- either ROOT or ASCII
print("test mode: will operate with test settings & %s files" % whichInput)
return "settings.test.y20Test_runList", "settings.test.y20Test_settings"
###############################################################################
###############################################################################
def boolControlPrint(boolLoad, boolTest, fileType):
print("execution control booleans:")
print("data reload controller: %s" % str(boolLoad))
whichInput = (" (%s)" % fileType) if boolTest else ""
print("test mode controller: %s%s" % (str(boolTest), whichInput))
###############################################################################
###############################################################################
def settingsPrint(filePath, fileNameFormat, nRunToOpen, nRun0):
#####
# I/O-related
print("will work with run numbers(s)/type(s) in %s with format %s" % (filePath, fileNameFormat))
iRun = []
iType = []
for i, iKey in enumerate(nRunToOpen):
if iKey in nRun0:
iRun.append(iKey)
iType.append(nRun0[iKey])
elif iKey in nRun0.values():
lsRuns = [s for s in nRun0 if iKey==nRun0[s]]
for j in range(len(lsRuns)):
iRun.append(lsRuns[j])
iType.append(iKey)
if len(iRun)>0:
for i in range(len(iRun)):
print("(%d/%d) %s %s" % (i+1, len(iRun), iRun[i], iType[i]))
else:
print("no runs selected for opening -- execution will only work if test mode is selected")
| 42.734694
| 100
| 0.475645
|
from settings import *
| true
| true
|
1c43a0552c4e97cf972971d47db51a0b2becb608
| 439
|
py
|
Python
|
env/Lib/site-packages/plotly/validators/streamtube/hoverlabel/_namelengthsrc.py
|
andresgreen-byte/Laboratorio-1--Inversion-de-Capital
|
8a4707301d19c3826c31026c4077930bcd6a8182
|
[
"MIT"
] | 11,750
|
2015-10-12T07:03:39.000Z
|
2022-03-31T20:43:15.000Z
|
venv/Lib/site-packages/plotly/validators/streamtube/hoverlabel/_namelengthsrc.py
|
wakisalvador/constructed-misdirection
|
74779e9ec640a11bc08d5d1967c85ac4fa44ea5e
|
[
"Unlicense"
] | 2,951
|
2015-10-12T00:41:25.000Z
|
2022-03-31T22:19:26.000Z
|
venv/Lib/site-packages/plotly/validators/streamtube/hoverlabel/_namelengthsrc.py
|
wakisalvador/constructed-misdirection
|
74779e9ec640a11bc08d5d1967c85ac4fa44ea5e
|
[
"Unlicense"
] | 2,623
|
2015-10-15T14:40:27.000Z
|
2022-03-28T16:05:50.000Z
|
import _plotly_utils.basevalidators
class NamelengthsrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self, plotly_name="namelengthsrc", parent_name="streamtube.hoverlabel", **kwargs
):
super(NamelengthsrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
**kwargs
)
| 31.357143
| 88
| 0.671982
|
import _plotly_utils.basevalidators
class NamelengthsrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self, plotly_name="namelengthsrc", parent_name="streamtube.hoverlabel", **kwargs
):
super(NamelengthsrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
**kwargs
)
| true
| true
|
1c43a0a7b2ff45d86431f6a45ca3ab07b55cbfdb
| 310
|
py
|
Python
|
src/image_downloader.py
|
aditipolkam/spotify-downloader
|
2b6f8027170771dda7713bf034f5c099187a9bed
|
[
"MIT"
] | null | null | null |
src/image_downloader.py
|
aditipolkam/spotify-downloader
|
2b6f8027170771dda7713bf034f5c099187a9bed
|
[
"MIT"
] | null | null | null |
src/image_downloader.py
|
aditipolkam/spotify-downloader
|
2b6f8027170771dda7713bf034f5c099187a9bed
|
[
"MIT"
] | 1
|
2022-01-23T10:40:17.000Z
|
2022-01-23T10:40:17.000Z
|
import urllib.request
import os
import pathlib
from pathlib import Path
def download_image(url, filepath, name):
filename = str(name + ".jpg")
img_path = Path(pathlib.Path(filepath, filename))
urllib.request.urlretrieve(url,img_path)
#print("cover download successful")
return img_path
| 20.666667
| 53
| 0.732258
|
import urllib.request
import os
import pathlib
from pathlib import Path
def download_image(url, filepath, name):
filename = str(name + ".jpg")
img_path = Path(pathlib.Path(filepath, filename))
urllib.request.urlretrieve(url,img_path)
return img_path
| true
| true
|
1c43a2957e0ba3c3212221040eb1df78fccdd245
| 32,336
|
py
|
Python
|
tests/test_client/tests.py
|
victorliun/django
|
7def55c3f6716fcfa40a3bd5d0fbb2090588d81e
|
[
"PSF-2.0",
"BSD-3-Clause"
] | null | null | null |
tests/test_client/tests.py
|
victorliun/django
|
7def55c3f6716fcfa40a3bd5d0fbb2090588d81e
|
[
"PSF-2.0",
"BSD-3-Clause"
] | null | null | null |
tests/test_client/tests.py
|
victorliun/django
|
7def55c3f6716fcfa40a3bd5d0fbb2090588d81e
|
[
"PSF-2.0",
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Testing using the Test Client
The test client is a class that can act like a simple
browser for testing purposes.
It allows the user to compose GET and POST requests, and
obtain the response that the server gave to those requests.
The server Response objects are annotated with the details
of the contexts and templates that were rendered during the
process of serving the request.
``Client`` objects are stateful - they will retain cookie (and
thus session) details for the lifetime of the ``Client`` instance.
This is not intended as a replacement for Twill, Selenium, or
other browser automation frameworks - it is here to allow
testing against the contexts and templates produced by a view,
rather than the HTML rendered to the end-user.
"""
from __future__ import unicode_literals
from django.contrib.auth.models import User
from django.core import mail
from django.http import HttpResponse
from django.test import (
Client, RequestFactory, SimpleTestCase, TestCase, override_settings,
)
from django.urls import reverse_lazy
from .views import get_view, post_view, trace_view
@override_settings(ROOT_URLCONF='test_client.urls')
class ClientTest(TestCase):
@classmethod
def setUpTestData(cls):
cls.u1 = User.objects.create_user(username='testclient', password='password')
cls.u2 = User.objects.create_user(username='inactive', password='password', is_active=False)
def test_get_view(self):
"GET a view"
# The data is ignored, but let's check it doesn't crash the system
# anyway.
data = {'var': '\xf2'}
response = self.client.get('/get_view/', data)
# Check some response details
self.assertContains(response, 'This is a test')
self.assertEqual(response.context['var'], '\xf2')
self.assertEqual(response.templates[0].name, 'GET Template')
def test_get_post_view(self):
"GET a view that normally expects POSTs"
response = self.client.get('/post_view/', {})
# Check some response details
self.assertEqual(response.status_code, 200)
self.assertEqual(response.templates[0].name, 'Empty GET Template')
self.assertTemplateUsed(response, 'Empty GET Template')
self.assertTemplateNotUsed(response, 'Empty POST Template')
def test_empty_post(self):
"POST an empty dictionary to a view"
response = self.client.post('/post_view/', {})
# Check some response details
self.assertEqual(response.status_code, 200)
self.assertEqual(response.templates[0].name, 'Empty POST Template')
self.assertTemplateNotUsed(response, 'Empty GET Template')
self.assertTemplateUsed(response, 'Empty POST Template')
def test_post(self):
"POST some data to a view"
post_data = {
'value': 37
}
response = self.client.post('/post_view/', post_data)
# Check some response details
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['data'], '37')
self.assertEqual(response.templates[0].name, 'POST Template')
self.assertContains(response, 'Data received')
def test_trace(self):
"""TRACE a view"""
response = self.client.trace('/trace_view/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['method'], 'TRACE')
self.assertEqual(response.templates[0].name, 'TRACE Template')
def test_response_headers(self):
"Check the value of HTTP headers returned in a response"
response = self.client.get("/header_view/")
self.assertEqual(response['X-DJANGO-TEST'], 'Slartibartfast')
def test_response_attached_request(self):
"""
Check that the returned response has a ``request`` attribute with the
originating environ dict and a ``wsgi_request`` with the originating
``WSGIRequest`` instance.
"""
response = self.client.get("/header_view/")
self.assertTrue(hasattr(response, 'request'))
self.assertTrue(hasattr(response, 'wsgi_request'))
for key, value in response.request.items():
self.assertIn(key, response.wsgi_request.environ)
self.assertEqual(response.wsgi_request.environ[key], value)
def test_response_resolver_match(self):
"""
The response contains a ResolverMatch instance.
"""
response = self.client.get('/header_view/')
self.assertTrue(hasattr(response, 'resolver_match'))
def test_response_resolver_match_redirect_follow(self):
"""
The response ResolverMatch instance contains the correct
information when following redirects.
"""
response = self.client.get('/redirect_view/', follow=True)
self.assertEqual(response.resolver_match.url_name, 'get_view')
def test_response_resolver_match_regular_view(self):
"""
The response ResolverMatch instance contains the correct
information when accessing a regular view.
"""
response = self.client.get('/get_view/')
self.assertEqual(response.resolver_match.url_name, 'get_view')
def test_raw_post(self):
"POST raw data (with a content type) to a view"
test_doc = """<?xml version="1.0" encoding="utf-8"?>
<library><book><title>Blink</title><author>Malcolm Gladwell</author></book></library>
"""
response = self.client.post("/raw_post_view/", test_doc,
content_type="text/xml")
self.assertEqual(response.status_code, 200)
self.assertEqual(response.templates[0].name, "Book template")
self.assertEqual(response.content, b"Blink - Malcolm Gladwell")
def test_insecure(self):
"GET a URL through http"
response = self.client.get('/secure_view/', secure=False)
self.assertFalse(response.test_was_secure_request)
self.assertEqual(response.test_server_port, '80')
def test_secure(self):
"GET a URL through https"
response = self.client.get('/secure_view/', secure=True)
self.assertTrue(response.test_was_secure_request)
self.assertEqual(response.test_server_port, '443')
def test_redirect(self):
"GET a URL that redirects elsewhere"
response = self.client.get('/redirect_view/')
# Check that the response was a 302 (redirect)
self.assertRedirects(response, '/get_view/')
def test_redirect_with_query(self):
"GET a URL that redirects with given GET parameters"
response = self.client.get('/redirect_view/', {'var': 'value'})
# Check if parameters are intact
self.assertRedirects(response, '/get_view/?var=value')
def test_permanent_redirect(self):
"GET a URL that redirects permanently elsewhere"
response = self.client.get('/permanent_redirect_view/')
# Check that the response was a 301 (permanent redirect)
self.assertRedirects(response, '/get_view/', status_code=301)
def test_temporary_redirect(self):
"GET a URL that does a non-permanent redirect"
response = self.client.get('/temporary_redirect_view/')
# Check that the response was a 302 (non-permanent redirect)
self.assertRedirects(response, '/get_view/', status_code=302)
def test_redirect_to_strange_location(self):
"GET a URL that redirects to a non-200 page"
response = self.client.get('/double_redirect_view/')
# Check that the response was a 302, and that
# the attempt to get the redirection location returned 301 when retrieved
self.assertRedirects(response, '/permanent_redirect_view/', target_status_code=301)
def test_follow_redirect(self):
"A URL that redirects can be followed to termination."
response = self.client.get('/double_redirect_view/', follow=True)
self.assertRedirects(response, '/get_view/', status_code=302, target_status_code=200)
self.assertEqual(len(response.redirect_chain), 2)
def test_follow_relative_redirect(self):
"A URL with a relative redirect can be followed."
response = self.client.get('/accounts/', follow=True)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.request['PATH_INFO'], '/accounts/login/')
def test_follow_relative_redirect_no_trailing_slash(self):
"A URL with a relative redirect with no trailing slash can be followed."
response = self.client.get('/accounts/no_trailing_slash', follow=True)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.request['PATH_INFO'], '/accounts/login/')
def test_redirect_http(self):
"GET a URL that redirects to an http URI"
response = self.client.get('/http_redirect_view/', follow=True)
self.assertFalse(response.test_was_secure_request)
def test_redirect_https(self):
"GET a URL that redirects to an https URI"
response = self.client.get('/https_redirect_view/', follow=True)
self.assertTrue(response.test_was_secure_request)
def test_notfound_response(self):
"GET a URL that responds as '404:Not Found'"
response = self.client.get('/bad_view/')
# Check that the response was a 404, and that the content contains MAGIC
self.assertContains(response, 'MAGIC', status_code=404)
def test_valid_form(self):
"POST valid data to a form"
post_data = {
'text': 'Hello World',
'email': 'foo@example.com',
'value': 37,
'single': 'b',
'multi': ('b', 'c', 'e')
}
response = self.client.post('/form_view/', post_data)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "Valid POST Template")
def test_valid_form_with_hints(self):
"GET a form, providing hints in the GET data"
hints = {
'text': 'Hello World',
'multi': ('b', 'c', 'e')
}
response = self.client.get('/form_view/', data=hints)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "Form GET Template")
# Check that the multi-value data has been rolled out ok
self.assertContains(response, 'Select a valid choice.', 0)
def test_incomplete_data_form(self):
"POST incomplete data to a form"
post_data = {
'text': 'Hello World',
'value': 37
}
response = self.client.post('/form_view/', post_data)
self.assertContains(response, 'This field is required.', 3)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "Invalid POST Template")
self.assertFormError(response, 'form', 'email', 'This field is required.')
self.assertFormError(response, 'form', 'single', 'This field is required.')
self.assertFormError(response, 'form', 'multi', 'This field is required.')
def test_form_error(self):
"POST erroneous data to a form"
post_data = {
'text': 'Hello World',
'email': 'not an email address',
'value': 37,
'single': 'b',
'multi': ('b', 'c', 'e')
}
response = self.client.post('/form_view/', post_data)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "Invalid POST Template")
self.assertFormError(response, 'form', 'email', 'Enter a valid email address.')
def test_valid_form_with_template(self):
"POST valid data to a form using multiple templates"
post_data = {
'text': 'Hello World',
'email': 'foo@example.com',
'value': 37,
'single': 'b',
'multi': ('b', 'c', 'e')
}
response = self.client.post('/form_view_with_template/', post_data)
self.assertContains(response, 'POST data OK')
self.assertTemplateUsed(response, "form_view.html")
self.assertTemplateUsed(response, 'base.html')
self.assertTemplateNotUsed(response, "Valid POST Template")
def test_incomplete_data_form_with_template(self):
"POST incomplete data to a form using multiple templates"
post_data = {
'text': 'Hello World',
'value': 37
}
response = self.client.post('/form_view_with_template/', post_data)
self.assertContains(response, 'POST data has errors')
self.assertTemplateUsed(response, 'form_view.html')
self.assertTemplateUsed(response, 'base.html')
self.assertTemplateNotUsed(response, "Invalid POST Template")
self.assertFormError(response, 'form', 'email', 'This field is required.')
self.assertFormError(response, 'form', 'single', 'This field is required.')
self.assertFormError(response, 'form', 'multi', 'This field is required.')
def test_form_error_with_template(self):
"POST erroneous data to a form using multiple templates"
post_data = {
'text': 'Hello World',
'email': 'not an email address',
'value': 37,
'single': 'b',
'multi': ('b', 'c', 'e')
}
response = self.client.post('/form_view_with_template/', post_data)
self.assertContains(response, 'POST data has errors')
self.assertTemplateUsed(response, "form_view.html")
self.assertTemplateUsed(response, 'base.html')
self.assertTemplateNotUsed(response, "Invalid POST Template")
self.assertFormError(response, 'form', 'email', 'Enter a valid email address.')
def test_unknown_page(self):
"GET an invalid URL"
response = self.client.get('/unknown_view/')
# Check that the response was a 404
self.assertEqual(response.status_code, 404)
def test_url_parameters(self):
"Make sure that URL ;-parameters are not stripped."
response = self.client.get('/unknown_view/;some-parameter')
# Check that the path in the response includes it (ignore that it's a 404)
self.assertEqual(response.request['PATH_INFO'], '/unknown_view/;some-parameter')
def test_view_with_login(self):
"Request a page that is protected with @login_required"
# Get the page without logging in. Should result in 302.
response = self.client.get('/login_protected_view/')
self.assertRedirects(response, '/accounts/login/?next=/login_protected_view/')
# Log in
login = self.client.login(username='testclient', password='password')
self.assertTrue(login, 'Could not log in')
# Request a page that requires a login
response = self.client.get('/login_protected_view/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['user'].username, 'testclient')
@override_settings(
INSTALLED_APPS=['django.contrib.auth'],
SESSION_ENGINE='django.contrib.sessions.backends.file',
)
def test_view_with_login_when_sessions_app_is_not_installed(self):
self.test_view_with_login()
def test_view_with_force_login(self):
"Request a page that is protected with @login_required"
# Get the page without logging in. Should result in 302.
response = self.client.get('/login_protected_view/')
self.assertRedirects(response, '/accounts/login/?next=/login_protected_view/')
# Log in
self.client.force_login(self.u1)
# Request a page that requires a login
response = self.client.get('/login_protected_view/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['user'].username, 'testclient')
def test_view_with_method_login(self):
"Request a page that is protected with a @login_required method"
# Get the page without logging in. Should result in 302.
response = self.client.get('/login_protected_method_view/')
self.assertRedirects(response, '/accounts/login/?next=/login_protected_method_view/')
# Log in
login = self.client.login(username='testclient', password='password')
self.assertTrue(login, 'Could not log in')
# Request a page that requires a login
response = self.client.get('/login_protected_method_view/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['user'].username, 'testclient')
def test_view_with_method_force_login(self):
"Request a page that is protected with a @login_required method"
# Get the page without logging in. Should result in 302.
response = self.client.get('/login_protected_method_view/')
self.assertRedirects(response, '/accounts/login/?next=/login_protected_method_view/')
# Log in
self.client.force_login(self.u1)
# Request a page that requires a login
response = self.client.get('/login_protected_method_view/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['user'].username, 'testclient')
def test_view_with_login_and_custom_redirect(self):
"Request a page that is protected with @login_required(redirect_field_name='redirect_to')"
# Get the page without logging in. Should result in 302.
response = self.client.get('/login_protected_view_custom_redirect/')
self.assertRedirects(response, '/accounts/login/?redirect_to=/login_protected_view_custom_redirect/')
# Log in
login = self.client.login(username='testclient', password='password')
self.assertTrue(login, 'Could not log in')
# Request a page that requires a login
response = self.client.get('/login_protected_view_custom_redirect/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['user'].username, 'testclient')
def test_view_with_force_login_and_custom_redirect(self):
"""
Request a page that is protected with
@login_required(redirect_field_name='redirect_to')
"""
# Get the page without logging in. Should result in 302.
response = self.client.get('/login_protected_view_custom_redirect/')
self.assertRedirects(response, '/accounts/login/?redirect_to=/login_protected_view_custom_redirect/')
# Log in
self.client.force_login(self.u1)
# Request a page that requires a login
response = self.client.get('/login_protected_view_custom_redirect/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['user'].username, 'testclient')
def test_view_with_bad_login(self):
"Request a page that is protected with @login, but use bad credentials"
login = self.client.login(username='otheruser', password='nopassword')
self.assertFalse(login)
def test_view_with_inactive_login(self):
"""
An inactive user may login if the authenticate backend allows it.
"""
credentials = {'username': 'inactive', 'password': 'password'}
self.assertFalse(self.client.login(**credentials))
with self.settings(AUTHENTICATION_BACKENDS=['django.contrib.auth.backends.AllowAllUsersModelBackend']):
self.assertTrue(self.client.login(**credentials))
@override_settings(
AUTHENTICATION_BACKENDS=[
'django.contrib.auth.backends.ModelBackend',
'django.contrib.auth.backends.AllowAllUsersModelBackend',
]
)
def test_view_with_inactive_force_login(self):
"Request a page that is protected with @login, but use an inactive login"
# Get the page without logging in. Should result in 302.
response = self.client.get('/login_protected_view/')
self.assertRedirects(response, '/accounts/login/?next=/login_protected_view/')
# Log in
self.client.force_login(self.u2, backend='django.contrib.auth.backends.AllowAllUsersModelBackend')
# Request a page that requires a login
response = self.client.get('/login_protected_view/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['user'].username, 'inactive')
def test_logout(self):
"Request a logout after logging in"
# Log in
self.client.login(username='testclient', password='password')
# Request a page that requires a login
response = self.client.get('/login_protected_view/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['user'].username, 'testclient')
# Log out
self.client.logout()
# Request a page that requires a login
response = self.client.get('/login_protected_view/')
self.assertRedirects(response, '/accounts/login/?next=/login_protected_view/')
def test_logout_with_force_login(self):
"Request a logout after logging in"
# Log in
self.client.force_login(self.u1)
# Request a page that requires a login
response = self.client.get('/login_protected_view/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['user'].username, 'testclient')
# Log out
self.client.logout()
# Request a page that requires a login
response = self.client.get('/login_protected_view/')
self.assertRedirects(response, '/accounts/login/?next=/login_protected_view/')
@override_settings(
AUTHENTICATION_BACKENDS=[
'django.contrib.auth.backends.ModelBackend',
'test_client.auth_backends.TestClientBackend',
],
)
def test_force_login_with_backend(self):
"""
Request a page that is protected with @login_required when using
force_login() and passing a backend.
"""
# Get the page without logging in. Should result in 302.
response = self.client.get('/login_protected_view/')
self.assertRedirects(response, '/accounts/login/?next=/login_protected_view/')
# Log in
self.client.force_login(self.u1, backend='test_client.auth_backends.TestClientBackend')
# Request a page that requires a login
response = self.client.get('/login_protected_view/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['user'].username, 'testclient')
@override_settings(SESSION_ENGINE="django.contrib.sessions.backends.signed_cookies")
def test_logout_cookie_sessions(self):
self.test_logout()
def test_view_with_permissions(self):
"Request a page that is protected with @permission_required"
# Get the page without logging in. Should result in 302.
response = self.client.get('/permission_protected_view/')
self.assertRedirects(response, '/accounts/login/?next=/permission_protected_view/')
# Log in
login = self.client.login(username='testclient', password='password')
self.assertTrue(login, 'Could not log in')
# Log in with wrong permissions. Should result in 302.
response = self.client.get('/permission_protected_view/')
self.assertRedirects(response, '/accounts/login/?next=/permission_protected_view/')
# TODO: Log in with right permissions and request the page again
def test_view_with_permissions_exception(self):
"Request a page that is protected with @permission_required but raises an exception"
# Get the page without logging in. Should result in 403.
response = self.client.get('/permission_protected_view_exception/')
self.assertEqual(response.status_code, 403)
# Log in
login = self.client.login(username='testclient', password='password')
self.assertTrue(login, 'Could not log in')
# Log in with wrong permissions. Should result in 403.
response = self.client.get('/permission_protected_view_exception/')
self.assertEqual(response.status_code, 403)
def test_view_with_method_permissions(self):
"Request a page that is protected with a @permission_required method"
# Get the page without logging in. Should result in 302.
response = self.client.get('/permission_protected_method_view/')
self.assertRedirects(response, '/accounts/login/?next=/permission_protected_method_view/')
# Log in
login = self.client.login(username='testclient', password='password')
self.assertTrue(login, 'Could not log in')
# Log in with wrong permissions. Should result in 302.
response = self.client.get('/permission_protected_method_view/')
self.assertRedirects(response, '/accounts/login/?next=/permission_protected_method_view/')
# TODO: Log in with right permissions and request the page again
def test_external_redirect(self):
response = self.client.get('/django_project_redirect/')
self.assertRedirects(response, 'https://www.djangoproject.com/', fetch_redirect_response=False)
def test_external_redirect_with_fetch_error_msg(self):
"""
Check that assertRedirects without fetch_redirect_response=False raises
a relevant ValueError rather than a non-descript AssertionError.
"""
response = self.client.get('/django_project_redirect/')
with self.assertRaisesMessage(ValueError, 'unable to fetch'):
self.assertRedirects(response, 'https://www.djangoproject.com/')
def test_session_modifying_view(self):
"Request a page that modifies the session"
# Session value isn't set initially
try:
self.client.session['tobacconist']
self.fail("Shouldn't have a session value")
except KeyError:
pass
self.client.post('/session_view/')
# Check that the session was modified
self.assertEqual(self.client.session['tobacconist'], 'hovercraft')
@override_settings(
INSTALLED_APPS=[],
SESSION_ENGINE='django.contrib.sessions.backends.file',
)
def test_sessions_app_is_not_installed(self):
self.test_session_modifying_view()
@override_settings(
INSTALLED_APPS=[],
SESSION_ENGINE='django.contrib.sessions.backends.nonexistent',
)
def test_session_engine_is_invalid(self):
with self.assertRaisesMessage(ImportError, 'nonexistent'):
self.test_session_modifying_view()
def test_view_with_exception(self):
"Request a page that is known to throw an error"
with self.assertRaises(KeyError):
self.client.get("/broken_view/")
# Try the same assertion, a different way
try:
self.client.get('/broken_view/')
self.fail('Should raise an error')
except KeyError:
pass
def test_mail_sending(self):
"Test that mail is redirected to a dummy outbox during test setup"
response = self.client.get('/mail_sending_view/')
self.assertEqual(response.status_code, 200)
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].subject, 'Test message')
self.assertEqual(mail.outbox[0].body, 'This is a test email')
self.assertEqual(mail.outbox[0].from_email, 'from@example.com')
self.assertEqual(mail.outbox[0].to[0], 'first@example.com')
self.assertEqual(mail.outbox[0].to[1], 'second@example.com')
def test_reverse_lazy_decodes(self):
"Ensure reverse_lazy works in the test client"
data = {'var': 'data'}
response = self.client.get(reverse_lazy('get_view'), data)
# Check some response details
self.assertContains(response, 'This is a test')
def test_relative_redirect(self):
response = self.client.get('/accounts/')
self.assertRedirects(response, '/accounts/login/')
def test_relative_redirect_no_trailing_slash(self):
response = self.client.get('/accounts/no_trailing_slash')
self.assertRedirects(response, '/accounts/login/')
def test_mass_mail_sending(self):
"Test that mass mail is redirected to a dummy outbox during test setup"
response = self.client.get('/mass_mail_sending_view/')
self.assertEqual(response.status_code, 200)
self.assertEqual(len(mail.outbox), 2)
self.assertEqual(mail.outbox[0].subject, 'First Test message')
self.assertEqual(mail.outbox[0].body, 'This is the first test email')
self.assertEqual(mail.outbox[0].from_email, 'from@example.com')
self.assertEqual(mail.outbox[0].to[0], 'first@example.com')
self.assertEqual(mail.outbox[0].to[1], 'second@example.com')
self.assertEqual(mail.outbox[1].subject, 'Second Test message')
self.assertEqual(mail.outbox[1].body, 'This is the second test email')
self.assertEqual(mail.outbox[1].from_email, 'from@example.com')
self.assertEqual(mail.outbox[1].to[0], 'second@example.com')
self.assertEqual(mail.outbox[1].to[1], 'third@example.com')
def test_exception_following_nested_client_request(self):
"""
A nested test client request shouldn't clobber exception signals from
the outer client request.
"""
with self.assertRaisesMessage(Exception, 'exception message'):
self.client.get('/nesting_exception_view/')
@override_settings(
MIDDLEWARE=['django.middleware.csrf.CsrfViewMiddleware'],
ROOT_URLCONF='test_client.urls',
)
class CSRFEnabledClientTests(SimpleTestCase):
def test_csrf_enabled_client(self):
"A client can be instantiated with CSRF checks enabled"
csrf_client = Client(enforce_csrf_checks=True)
# The normal client allows the post
response = self.client.post('/post_view/', {})
self.assertEqual(response.status_code, 200)
# The CSRF-enabled client rejects it
response = csrf_client.post('/post_view/', {})
self.assertEqual(response.status_code, 403)
class CustomTestClient(Client):
i_am_customized = "Yes"
class CustomTestClientTest(SimpleTestCase):
client_class = CustomTestClient
def test_custom_test_client(self):
"""A test case can specify a custom class for self.client."""
self.assertIs(hasattr(self.client, "i_am_customized"), True)
def _generic_view(request):
return HttpResponse(status=200)
@override_settings(ROOT_URLCONF='test_client.urls')
class RequestFactoryTest(SimpleTestCase):
"""Tests for the request factory."""
# A mapping between names of HTTP/1.1 methods and their test views.
http_methods_and_views = (
('get', get_view),
('post', post_view),
('put', _generic_view),
('patch', _generic_view),
('delete', _generic_view),
('head', _generic_view),
('options', _generic_view),
('trace', trace_view),
)
def setUp(self):
self.request_factory = RequestFactory()
def test_request_factory(self):
"""The request factory implements all the HTTP/1.1 methods."""
for method_name, view in self.http_methods_and_views:
method = getattr(self.request_factory, method_name)
request = method('/somewhere/')
response = view(request)
self.assertEqual(response.status_code, 200)
def test_get_request_from_factory(self):
"""
The request factory returns a templated response for a GET request.
"""
request = self.request_factory.get('/somewhere/')
response = get_view(request)
self.assertContains(response, 'This is a test')
def test_trace_request_from_factory(self):
"""The request factory returns an echo response for a TRACE request."""
url_path = '/somewhere/'
request = self.request_factory.trace(url_path)
response = trace_view(request)
protocol = request.META["SERVER_PROTOCOL"]
echoed_request_line = "TRACE {} {}".format(url_path, protocol)
self.assertContains(response, echoed_request_line)
| 41.139949
| 111
| 0.670708
|
from __future__ import unicode_literals
from django.contrib.auth.models import User
from django.core import mail
from django.http import HttpResponse
from django.test import (
Client, RequestFactory, SimpleTestCase, TestCase, override_settings,
)
from django.urls import reverse_lazy
from .views import get_view, post_view, trace_view
@override_settings(ROOT_URLCONF='test_client.urls')
class ClientTest(TestCase):
@classmethod
def setUpTestData(cls):
cls.u1 = User.objects.create_user(username='testclient', password='password')
cls.u2 = User.objects.create_user(username='inactive', password='password', is_active=False)
def test_get_view(self):
data = {'var': '\xf2'}
response = self.client.get('/get_view/', data)
self.assertContains(response, 'This is a test')
self.assertEqual(response.context['var'], '\xf2')
self.assertEqual(response.templates[0].name, 'GET Template')
def test_get_post_view(self):
response = self.client.get('/post_view/', {})
self.assertEqual(response.status_code, 200)
self.assertEqual(response.templates[0].name, 'Empty GET Template')
self.assertTemplateUsed(response, 'Empty GET Template')
self.assertTemplateNotUsed(response, 'Empty POST Template')
def test_empty_post(self):
response = self.client.post('/post_view/', {})
self.assertEqual(response.status_code, 200)
self.assertEqual(response.templates[0].name, 'Empty POST Template')
self.assertTemplateNotUsed(response, 'Empty GET Template')
self.assertTemplateUsed(response, 'Empty POST Template')
def test_post(self):
post_data = {
'value': 37
}
response = self.client.post('/post_view/', post_data)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['data'], '37')
self.assertEqual(response.templates[0].name, 'POST Template')
self.assertContains(response, 'Data received')
def test_trace(self):
response = self.client.trace('/trace_view/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['method'], 'TRACE')
self.assertEqual(response.templates[0].name, 'TRACE Template')
def test_response_headers(self):
response = self.client.get("/header_view/")
self.assertEqual(response['X-DJANGO-TEST'], 'Slartibartfast')
def test_response_attached_request(self):
response = self.client.get("/header_view/")
self.assertTrue(hasattr(response, 'request'))
self.assertTrue(hasattr(response, 'wsgi_request'))
for key, value in response.request.items():
self.assertIn(key, response.wsgi_request.environ)
self.assertEqual(response.wsgi_request.environ[key], value)
def test_response_resolver_match(self):
response = self.client.get('/header_view/')
self.assertTrue(hasattr(response, 'resolver_match'))
def test_response_resolver_match_redirect_follow(self):
response = self.client.get('/redirect_view/', follow=True)
self.assertEqual(response.resolver_match.url_name, 'get_view')
def test_response_resolver_match_regular_view(self):
response = self.client.get('/get_view/')
self.assertEqual(response.resolver_match.url_name, 'get_view')
def test_raw_post(self):
test_doc = """<?xml version="1.0" encoding="utf-8"?>
<library><book><title>Blink</title><author>Malcolm Gladwell</author></book></library>
"""
response = self.client.post("/raw_post_view/", test_doc,
content_type="text/xml")
self.assertEqual(response.status_code, 200)
self.assertEqual(response.templates[0].name, "Book template")
self.assertEqual(response.content, b"Blink - Malcolm Gladwell")
def test_insecure(self):
response = self.client.get('/secure_view/', secure=False)
self.assertFalse(response.test_was_secure_request)
self.assertEqual(response.test_server_port, '80')
def test_secure(self):
response = self.client.get('/secure_view/', secure=True)
self.assertTrue(response.test_was_secure_request)
self.assertEqual(response.test_server_port, '443')
def test_redirect(self):
response = self.client.get('/redirect_view/')
self.assertRedirects(response, '/get_view/')
def test_redirect_with_query(self):
response = self.client.get('/redirect_view/', {'var': 'value'})
self.assertRedirects(response, '/get_view/?var=value')
def test_permanent_redirect(self):
response = self.client.get('/permanent_redirect_view/')
self.assertRedirects(response, '/get_view/', status_code=301)
def test_temporary_redirect(self):
response = self.client.get('/temporary_redirect_view/')
self.assertRedirects(response, '/get_view/', status_code=302)
def test_redirect_to_strange_location(self):
response = self.client.get('/double_redirect_view/')
self.assertRedirects(response, '/permanent_redirect_view/', target_status_code=301)
def test_follow_redirect(self):
response = self.client.get('/double_redirect_view/', follow=True)
self.assertRedirects(response, '/get_view/', status_code=302, target_status_code=200)
self.assertEqual(len(response.redirect_chain), 2)
def test_follow_relative_redirect(self):
response = self.client.get('/accounts/', follow=True)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.request['PATH_INFO'], '/accounts/login/')
def test_follow_relative_redirect_no_trailing_slash(self):
response = self.client.get('/accounts/no_trailing_slash', follow=True)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.request['PATH_INFO'], '/accounts/login/')
def test_redirect_http(self):
response = self.client.get('/http_redirect_view/', follow=True)
self.assertFalse(response.test_was_secure_request)
def test_redirect_https(self):
response = self.client.get('/https_redirect_view/', follow=True)
self.assertTrue(response.test_was_secure_request)
def test_notfound_response(self):
response = self.client.get('/bad_view/')
self.assertContains(response, 'MAGIC', status_code=404)
def test_valid_form(self):
post_data = {
'text': 'Hello World',
'email': 'foo@example.com',
'value': 37,
'single': 'b',
'multi': ('b', 'c', 'e')
}
response = self.client.post('/form_view/', post_data)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "Valid POST Template")
def test_valid_form_with_hints(self):
hints = {
'text': 'Hello World',
'multi': ('b', 'c', 'e')
}
response = self.client.get('/form_view/', data=hints)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "Form GET Template")
self.assertContains(response, 'Select a valid choice.', 0)
def test_incomplete_data_form(self):
post_data = {
'text': 'Hello World',
'value': 37
}
response = self.client.post('/form_view/', post_data)
self.assertContains(response, 'This field is required.', 3)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "Invalid POST Template")
self.assertFormError(response, 'form', 'email', 'This field is required.')
self.assertFormError(response, 'form', 'single', 'This field is required.')
self.assertFormError(response, 'form', 'multi', 'This field is required.')
def test_form_error(self):
post_data = {
'text': 'Hello World',
'email': 'not an email address',
'value': 37,
'single': 'b',
'multi': ('b', 'c', 'e')
}
response = self.client.post('/form_view/', post_data)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "Invalid POST Template")
self.assertFormError(response, 'form', 'email', 'Enter a valid email address.')
def test_valid_form_with_template(self):
post_data = {
'text': 'Hello World',
'email': 'foo@example.com',
'value': 37,
'single': 'b',
'multi': ('b', 'c', 'e')
}
response = self.client.post('/form_view_with_template/', post_data)
self.assertContains(response, 'POST data OK')
self.assertTemplateUsed(response, "form_view.html")
self.assertTemplateUsed(response, 'base.html')
self.assertTemplateNotUsed(response, "Valid POST Template")
def test_incomplete_data_form_with_template(self):
post_data = {
'text': 'Hello World',
'value': 37
}
response = self.client.post('/form_view_with_template/', post_data)
self.assertContains(response, 'POST data has errors')
self.assertTemplateUsed(response, 'form_view.html')
self.assertTemplateUsed(response, 'base.html')
self.assertTemplateNotUsed(response, "Invalid POST Template")
self.assertFormError(response, 'form', 'email', 'This field is required.')
self.assertFormError(response, 'form', 'single', 'This field is required.')
self.assertFormError(response, 'form', 'multi', 'This field is required.')
def test_form_error_with_template(self):
post_data = {
'text': 'Hello World',
'email': 'not an email address',
'value': 37,
'single': 'b',
'multi': ('b', 'c', 'e')
}
response = self.client.post('/form_view_with_template/', post_data)
self.assertContains(response, 'POST data has errors')
self.assertTemplateUsed(response, "form_view.html")
self.assertTemplateUsed(response, 'base.html')
self.assertTemplateNotUsed(response, "Invalid POST Template")
self.assertFormError(response, 'form', 'email', 'Enter a valid email address.')
def test_unknown_page(self):
response = self.client.get('/unknown_view/')
self.assertEqual(response.status_code, 404)
def test_url_parameters(self):
response = self.client.get('/unknown_view/;some-parameter')
self.assertEqual(response.request['PATH_INFO'], '/unknown_view/;some-parameter')
def test_view_with_login(self):
# Get the page without logging in. Should result in 302.
response = self.client.get('/login_protected_view/')
self.assertRedirects(response, '/accounts/login/?next=/login_protected_view/')
# Log in
login = self.client.login(username='testclient', password='password')
self.assertTrue(login, 'Could not log in')
# Request a page that requires a login
response = self.client.get('/login_protected_view/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['user'].username, 'testclient')
@override_settings(
INSTALLED_APPS=['django.contrib.auth'],
SESSION_ENGINE='django.contrib.sessions.backends.file',
)
def test_view_with_login_when_sessions_app_is_not_installed(self):
self.test_view_with_login()
def test_view_with_force_login(self):
# Get the page without logging in. Should result in 302.
response = self.client.get('/login_protected_view/')
self.assertRedirects(response, '/accounts/login/?next=/login_protected_view/')
# Log in
self.client.force_login(self.u1)
# Request a page that requires a login
response = self.client.get('/login_protected_view/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['user'].username, 'testclient')
def test_view_with_method_login(self):
# Get the page without logging in. Should result in 302.
response = self.client.get('/login_protected_method_view/')
self.assertRedirects(response, '/accounts/login/?next=/login_protected_method_view/')
# Log in
login = self.client.login(username='testclient', password='password')
self.assertTrue(login, 'Could not log in')
# Request a page that requires a login
response = self.client.get('/login_protected_method_view/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['user'].username, 'testclient')
def test_view_with_method_force_login(self):
# Get the page without logging in. Should result in 302.
response = self.client.get('/login_protected_method_view/')
self.assertRedirects(response, '/accounts/login/?next=/login_protected_method_view/')
# Log in
self.client.force_login(self.u1)
# Request a page that requires a login
response = self.client.get('/login_protected_method_view/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['user'].username, 'testclient')
def test_view_with_login_and_custom_redirect(self):
# Get the page without logging in. Should result in 302.
response = self.client.get('/login_protected_view_custom_redirect/')
self.assertRedirects(response, '/accounts/login/?redirect_to=/login_protected_view_custom_redirect/')
# Log in
login = self.client.login(username='testclient', password='password')
self.assertTrue(login, 'Could not log in')
# Request a page that requires a login
response = self.client.get('/login_protected_view_custom_redirect/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['user'].username, 'testclient')
def test_view_with_force_login_and_custom_redirect(self):
# Get the page without logging in. Should result in 302.
response = self.client.get('/login_protected_view_custom_redirect/')
self.assertRedirects(response, '/accounts/login/?redirect_to=/login_protected_view_custom_redirect/')
# Log in
self.client.force_login(self.u1)
# Request a page that requires a login
response = self.client.get('/login_protected_view_custom_redirect/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['user'].username, 'testclient')
def test_view_with_bad_login(self):
login = self.client.login(username='otheruser', password='nopassword')
self.assertFalse(login)
def test_view_with_inactive_login(self):
credentials = {'username': 'inactive', 'password': 'password'}
self.assertFalse(self.client.login(**credentials))
with self.settings(AUTHENTICATION_BACKENDS=['django.contrib.auth.backends.AllowAllUsersModelBackend']):
self.assertTrue(self.client.login(**credentials))
@override_settings(
AUTHENTICATION_BACKENDS=[
'django.contrib.auth.backends.ModelBackend',
'django.contrib.auth.backends.AllowAllUsersModelBackend',
]
)
def test_view_with_inactive_force_login(self):
# Get the page without logging in. Should result in 302.
response = self.client.get('/login_protected_view/')
self.assertRedirects(response, '/accounts/login/?next=/login_protected_view/')
# Log in
self.client.force_login(self.u2, backend='django.contrib.auth.backends.AllowAllUsersModelBackend')
# Request a page that requires a login
response = self.client.get('/login_protected_view/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['user'].username, 'inactive')
def test_logout(self):
# Log in
self.client.login(username='testclient', password='password')
# Request a page that requires a login
response = self.client.get('/login_protected_view/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['user'].username, 'testclient')
# Log out
self.client.logout()
# Request a page that requires a login
response = self.client.get('/login_protected_view/')
self.assertRedirects(response, '/accounts/login/?next=/login_protected_view/')
def test_logout_with_force_login(self):
# Log in
self.client.force_login(self.u1)
# Request a page that requires a login
response = self.client.get('/login_protected_view/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['user'].username, 'testclient')
# Log out
self.client.logout()
# Request a page that requires a login
response = self.client.get('/login_protected_view/')
self.assertRedirects(response, '/accounts/login/?next=/login_protected_view/')
@override_settings(
AUTHENTICATION_BACKENDS=[
'django.contrib.auth.backends.ModelBackend',
'test_client.auth_backends.TestClientBackend',
],
)
def test_force_login_with_backend(self):
# Get the page without logging in. Should result in 302.
response = self.client.get('/login_protected_view/')
self.assertRedirects(response, '/accounts/login/?next=/login_protected_view/')
# Log in
self.client.force_login(self.u1, backend='test_client.auth_backends.TestClientBackend')
# Request a page that requires a login
response = self.client.get('/login_protected_view/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['user'].username, 'testclient')
@override_settings(SESSION_ENGINE="django.contrib.sessions.backends.signed_cookies")
def test_logout_cookie_sessions(self):
self.test_logout()
def test_view_with_permissions(self):
# Get the page without logging in. Should result in 302.
response = self.client.get('/permission_protected_view/')
self.assertRedirects(response, '/accounts/login/?next=/permission_protected_view/')
# Log in
login = self.client.login(username='testclient', password='password')
self.assertTrue(login, 'Could not log in')
# Log in with wrong permissions. Should result in 302.
response = self.client.get('/permission_protected_view/')
self.assertRedirects(response, '/accounts/login/?next=/permission_protected_view/')
# TODO: Log in with right permissions and request the page again
def test_view_with_permissions_exception(self):
# Get the page without logging in. Should result in 403.
response = self.client.get('/permission_protected_view_exception/')
self.assertEqual(response.status_code, 403)
# Log in
login = self.client.login(username='testclient', password='password')
self.assertTrue(login, 'Could not log in')
# Log in with wrong permissions. Should result in 403.
response = self.client.get('/permission_protected_view_exception/')
self.assertEqual(response.status_code, 403)
def test_view_with_method_permissions(self):
# Get the page without logging in. Should result in 302.
response = self.client.get('/permission_protected_method_view/')
self.assertRedirects(response, '/accounts/login/?next=/permission_protected_method_view/')
# Log in
login = self.client.login(username='testclient', password='password')
self.assertTrue(login, 'Could not log in')
# Log in with wrong permissions. Should result in 302.
response = self.client.get('/permission_protected_method_view/')
self.assertRedirects(response, '/accounts/login/?next=/permission_protected_method_view/')
# TODO: Log in with right permissions and request the page again
def test_external_redirect(self):
response = self.client.get('/django_project_redirect/')
self.assertRedirects(response, 'https://www.djangoproject.com/', fetch_redirect_response=False)
def test_external_redirect_with_fetch_error_msg(self):
response = self.client.get('/django_project_redirect/')
with self.assertRaisesMessage(ValueError, 'unable to fetch'):
self.assertRedirects(response, 'https://www.djangoproject.com/')
def test_session_modifying_view(self):
# Session value isn't set initially
try:
self.client.session['tobacconist']
self.fail("Shouldn't have a session value")
except KeyError:
pass
self.client.post('/session_view/')
# Check that the session was modified
self.assertEqual(self.client.session['tobacconist'], 'hovercraft')
@override_settings(
INSTALLED_APPS=[],
SESSION_ENGINE='django.contrib.sessions.backends.file',
)
def test_sessions_app_is_not_installed(self):
self.test_session_modifying_view()
@override_settings(
INSTALLED_APPS=[],
SESSION_ENGINE='django.contrib.sessions.backends.nonexistent',
)
def test_session_engine_is_invalid(self):
with self.assertRaisesMessage(ImportError, 'nonexistent'):
self.test_session_modifying_view()
def test_view_with_exception(self):
with self.assertRaises(KeyError):
self.client.get("/broken_view/")
# Try the same assertion, a different way
try:
self.client.get('/broken_view/')
self.fail('Should raise an error')
except KeyError:
pass
def test_mail_sending(self):
response = self.client.get('/mail_sending_view/')
self.assertEqual(response.status_code, 200)
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].subject, 'Test message')
self.assertEqual(mail.outbox[0].body, 'This is a test email')
self.assertEqual(mail.outbox[0].from_email, 'from@example.com')
self.assertEqual(mail.outbox[0].to[0], 'first@example.com')
self.assertEqual(mail.outbox[0].to[1], 'second@example.com')
def test_reverse_lazy_decodes(self):
data = {'var': 'data'}
response = self.client.get(reverse_lazy('get_view'), data)
# Check some response details
self.assertContains(response, 'This is a test')
def test_relative_redirect(self):
response = self.client.get('/accounts/')
self.assertRedirects(response, '/accounts/login/')
def test_relative_redirect_no_trailing_slash(self):
response = self.client.get('/accounts/no_trailing_slash')
self.assertRedirects(response, '/accounts/login/')
def test_mass_mail_sending(self):
response = self.client.get('/mass_mail_sending_view/')
self.assertEqual(response.status_code, 200)
self.assertEqual(len(mail.outbox), 2)
self.assertEqual(mail.outbox[0].subject, 'First Test message')
self.assertEqual(mail.outbox[0].body, 'This is the first test email')
self.assertEqual(mail.outbox[0].from_email, 'from@example.com')
self.assertEqual(mail.outbox[0].to[0], 'first@example.com')
self.assertEqual(mail.outbox[0].to[1], 'second@example.com')
self.assertEqual(mail.outbox[1].subject, 'Second Test message')
self.assertEqual(mail.outbox[1].body, 'This is the second test email')
self.assertEqual(mail.outbox[1].from_email, 'from@example.com')
self.assertEqual(mail.outbox[1].to[0], 'second@example.com')
self.assertEqual(mail.outbox[1].to[1], 'third@example.com')
def test_exception_following_nested_client_request(self):
with self.assertRaisesMessage(Exception, 'exception message'):
self.client.get('/nesting_exception_view/')
@override_settings(
MIDDLEWARE=['django.middleware.csrf.CsrfViewMiddleware'],
ROOT_URLCONF='test_client.urls',
)
class CSRFEnabledClientTests(SimpleTestCase):
def test_csrf_enabled_client(self):
csrf_client = Client(enforce_csrf_checks=True)
# The normal client allows the post
response = self.client.post('/post_view/', {})
self.assertEqual(response.status_code, 200)
# The CSRF-enabled client rejects it
response = csrf_client.post('/post_view/', {})
self.assertEqual(response.status_code, 403)
class CustomTestClient(Client):
i_am_customized = "Yes"
class CustomTestClientTest(SimpleTestCase):
client_class = CustomTestClient
def test_custom_test_client(self):
self.assertIs(hasattr(self.client, "i_am_customized"), True)
def _generic_view(request):
return HttpResponse(status=200)
@override_settings(ROOT_URLCONF='test_client.urls')
class RequestFactoryTest(SimpleTestCase):
# A mapping between names of HTTP/1.1 methods and their test views.
http_methods_and_views = (
('get', get_view),
('post', post_view),
('put', _generic_view),
('patch', _generic_view),
('delete', _generic_view),
('head', _generic_view),
('options', _generic_view),
('trace', trace_view),
)
def setUp(self):
self.request_factory = RequestFactory()
def test_request_factory(self):
for method_name, view in self.http_methods_and_views:
method = getattr(self.request_factory, method_name)
request = method('/somewhere/')
response = view(request)
self.assertEqual(response.status_code, 200)
def test_get_request_from_factory(self):
request = self.request_factory.get('/somewhere/')
response = get_view(request)
self.assertContains(response, 'This is a test')
def test_trace_request_from_factory(self):
url_path = '/somewhere/'
request = self.request_factory.trace(url_path)
response = trace_view(request)
protocol = request.META["SERVER_PROTOCOL"]
echoed_request_line = "TRACE {} {}".format(url_path, protocol)
self.assertContains(response, echoed_request_line)
| true
| true
|
1c43a3e89071329a74cdf1180bec926813985501
| 14,688
|
py
|
Python
|
Scripts/rum.py
|
bdsinger/PsyNeuLink
|
71d8a0bb1691ff85061d4ad3de866d9930a69a73
|
[
"Apache-2.0"
] | null | null | null |
Scripts/rum.py
|
bdsinger/PsyNeuLink
|
71d8a0bb1691ff85061d4ad3de866d9930a69a73
|
[
"Apache-2.0"
] | null | null | null |
Scripts/rum.py
|
bdsinger/PsyNeuLink
|
71d8a0bb1691ff85061d4ad3de866d9930a69a73
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
# In[14]:
import numpy as np
import matplotlib.pyplot as plt
# get_ipython().run_line_magic('matplotlib', 'inline')
import psyneulink as pnl
# In[15]:
import psyneulink.core.components.functions.transferfunctions
nouns=['oak','pine','rose','daisy','canary','robin','salmon','sunfish']
relations=['is','has','can']
is_list=['living','living thing','plant','animal','tree','flower','bird','fish','big','green','red','yellow']
has_list=['roots','leaves','bark','branches','skin','feathers','wings','gills','scales']
can_list=['grow','move','swim','fly','breathe','breathe underwater','breathe air','walk','photosynthesize']
descriptors=[nouns,is_list,has_list,can_list]
truth_nouns=np.identity(len(nouns))
truth_is=np.zeros((len(nouns),len(is_list)))
truth_is[0,:]=[1,1,1,0,1,0,0,0,1,0,0,0]
truth_is[1,:]=[1,1,1,0,1,0,0,0,1,0,0,0]
truth_is[2,:]=[1,1,1,0,0,1,0,0,0,0,0,0]
truth_is[3,:]=[1,1,1,0,0,1,0,0,0,0,0,0]
truth_is[4,:]=[1,1,0,1,0,0,1,0,0,0,0,1]
truth_is[5,:]=[1,1,0,1,0,0,1,0,0,0,0,1]
truth_is[6,:]= [1,1,0,1,0,0,0,1,1,0,1,0]
truth_is[7,:]= [1,1,0,1,0,0,0,1,1,0,0,0]
truth_has=np.zeros((len(nouns),len(has_list)))
truth_has[0,:]= [1,1,1,1,0,0,0,0,0]
truth_has[1,:]= [1,1,1,1,0,0,0,0,0]
truth_has[2,:]= [1,1,0,0,0,0,0,0,0]
truth_has[3,:]= [1,1,0,0,0,0,0,0,0]
truth_has[4,:]= [0,0,0,0,1,1,1,0,0]
truth_has[5,:]= [0,0,0,0,1,1,1,0,0]
truth_has[6,:]= [0,0,0,0,0,0,0,1,1]
truth_has[7,:]= [0,0,0,0,0,0,0,1,1]
truth_can=np.zeros((len(nouns),len(can_list)))
truth_can[0,:]= [1,0,0,0,0,0,0,0,1]
truth_can[1,:]= [1,0,0,0,0,0,0,0,1]
truth_can[2,:]= [1,0,0,0,0,0,0,0,1]
truth_can[3,:]= [1,0,0,0,0,0,0,0,1]
truth_can[4,:]= [1,1,0,1,1,0,1,1,0]
truth_can[5,:]= [1,1,0,1,1,0,1,1,0]
truth_can[6,:]= [1,1,1,0,1,1,0,0,0]
truth_can[7,:]= [1,1,1,0,1,1,0,0,0]
truths=[[truth_nouns],[truth_is],[truth_has],[truth_can]]
#dict_is={'oak':truth_is[0,:],'pine':truth_is[1,:],'rose':truth_is[2,:],'daisy':truth_is[3,:],'canary':truth_is[4,:],'robin':truth_is[5,:],'salmon':truth_is[6,:],'sunfish':truth_is[7,:]}
# In[16]:
def gen_input_vals (nouns,relations):
X_1=np.identity(len(nouns))
X_2=np.identity(len(relations))
return(X_1,X_2)
# In[17]:
nouns_onehot,rels_onehot=gen_input_vals(nouns,relations)
r_1=np.shape(nouns_onehot)[0]
c_1=np.shape(nouns_onehot)[1]
r_2=np.shape(rels_onehot)[0]
c_2=np.shape(rels_onehot)[1]
# In[18]:
#gotta figure out how to make this PNL friendly (breathe deep, my dude. One thing at a time.)
#later, we want to be able to change our bias, but for now, we're going to stick with a hard-coded one.
def step(variable,params,context):
if np.sum(variable)<.5:
out=0
else:
out=1
return(out)
# In[19]:
Step=pnl.UserDefinedFunction(custom_function=step,
default_variable=np.zeros(4))
# In[20]:
#we're on the part where we generalize this and apply it as the function for all the bins...
#we'd like to generalize this for size so we can consistently just call the one UDF, but specifying size, to remove
#redundancies and general clutter. lol
step_mech=pnl.ProcessingMechanism(function=pnl.UserDefinedFunction(custom_function=step, default_variable=np.zeros(4)),
size=4,
name='step_mech')
# In[21]:
nouns_in = pnl.TransferMechanism(name="nouns_input",default_variable=np.zeros(r_1))
rels_in = pnl.TransferMechanism(name="rels_input",default_variable=np.zeros(r_2))
h1 = pnl.TransferMechanism(name="hidden_nouns",
size=8,
function=psyneulink.core.components.functions.transferfunctions.Logistic)
h2 = pnl.TransferMechanism(name="hidden_mixed",
size=15,
function=psyneulink.core.components.functions.transferfunctions.Logistic)
out_sig_I = pnl.TransferMechanism(name="sig_outs_I",
size=len(nouns),
function=psyneulink.core.components.functions.transferfunctions.Logistic)
out_sig_is = pnl.TransferMechanism(name="sig_outs_is",
size=len(is_list),
function=psyneulink.core.components.functions.transferfunctions.Logistic)
out_sig_has = pnl.TransferMechanism(name="sig_outs_has",
size=len(has_list),
function=psyneulink.core.components.functions.transferfunctions.Logistic)
out_sig_can = pnl.TransferMechanism(name="sig_outs_can",
size=len(can_list),
function=psyneulink.core.components.functions.transferfunctions.Logistic)
#biases
bh1 = pnl.TransferMechanism(name="bias_hidden_nouns",
default_variable=np.zeros(8))
bh2 = pnl.TransferMechanism(name="bias_hidden_mixed",
default_variable=np.zeros(15))
bosI = pnl.TransferMechanism(name="bias_osI",
default_variable=np.zeros(len(nouns)))
bosi = pnl.TransferMechanism(name="bias_osi",
default_variable=np.zeros(len(is_list)))
bosh = pnl.TransferMechanism(name="bias_osh",
default_variable=np.zeros(len(has_list)))
bosc = pnl.TransferMechanism(name="bias_osc",
default_variable=np.zeros(len(can_list)))
#later, we'll change the out_bin_x functions to a UDF that does a step function.
# out_bin_I = pnl.TransferMechanism(name="binary_outs_I",
# size=len(nouns),
# function=pnl.Linear)
#
# out_bin_is = pnl.TransferMechanism(name="binary_outs_is",
# size=len(is_list),
# function=pnl.Linear)
#
# out_bin_has = pnl.TransferMechanism(name="binary_outs_has",
# size=len(has_list),
# function=pnl.Linear)
#
# out_bin_can = pnl.TransferMechanism(name="binary_outs_can",
# size=len(can_list),
# function=pnl.Linear)
#we'll need to add in biases too. That will come later.
# In[22]:
#I want to put in a mapping projection that just ensures all our weight matrices between sigs and bins is I.
mapII=pnl.MappingProjection(matrix=np.eye(len(nouns)),
name="mapII"
)
mapIi=pnl.MappingProjection(matrix=np.eye(len(is_list)),
name="mapIi"
)
mapIh=pnl.MappingProjection(matrix=np.eye(len(has_list)),
name="mapIh"
)
mapIc=pnl.MappingProjection(matrix=np.eye(len(can_list)),
name="mapIc"
)
# In[23]:
#This is where we build the processes.
p11=pnl.Process(pathway=[nouns_in,h1,h2],
learning=pnl.LEARNING)
p12=pnl.Process(pathway=[rels_in,h2],
learning=pnl.LEARNING)
p21=pnl.Process(pathway=[h2,out_sig_I],
learning=pnl.LEARNING)
p22=pnl.Process(pathway=[h2,out_sig_is],
learning=pnl.LEARNING)
p23=pnl.Process(pathway=[h2,out_sig_has],
learning=pnl.LEARNING)
p24=pnl.Process(pathway=[h2,out_sig_can],
learning=pnl.LEARNING)
# In[24]:
#These are the processes that transform sigs to bins
#
# p31=pnl.Process(pathway=[out_sig_I,
# mapII,
# out_bin_I],
# learning=pnl.LEARNING
# )
#
# p32=pnl.Process(pathway=[out_sig_is,
# mapIi,
# out_bin_is],
# learning=pnl.LEARNING
# )
#
# p33=pnl.Process(pathway=[out_sig_has,
# mapIh,
# out_bin_has],
# learning=pnl.LEARNING
# )
#
# p34=pnl.Process(pathway=[out_sig_can,
# mapIc,
# out_bin_can],
# learning=pnl.LEARNING
# )
# In[25]:
#Bias processes go here
bp1=pnl.Process(pathway=[bh1,h1],
learning=pnl.LEARNING
)
bp2=pnl.Process(pathway=[bh2,h2],
learning=pnl.LEARNING
)
bposI=pnl.Process(pathway=[bosI,out_sig_I],
learning=pnl.LEARNING
)
bposi=pnl.Process(pathway=[bosi,out_sig_is],
learning=pnl.LEARNING
)
bposh=pnl.Process(pathway=[bosh,out_sig_has],
learning=pnl.LEARNING
)
bposc=pnl.Process(pathway=[bosc,out_sig_can],
learning=pnl.LEARNING
)
# In[117]:
#This is where we put them all into a system
rumel_sys=pnl.System(processes=[p11,
bp1,
p12,
bp2,
p21,
bposI,
p22,
bposi,
p23,
bposh,
p24,
bposc,
# p31,
# p32,
# p33,
# p34
])
rumel_sys.show_graph(show_learning=True)
# In[26]:
#This is where we build multiple systems that separate the learning components from the non-learning components.
#This only compiles when the one above it does not.
#This might be a good bug to report...
#Additionally, for some reason this system is a clone of the one above, regardless of whether or not we include
#the one below. If the p3x processes are defined at all, they are automatically included in the system.
#What is going on here?
# rumel_sys2a=pnl.System(processes=[p11,
# bp1,
# p12,
# bp2,
# p21,
# bposI,
# p22,
# bposi,
# p23,
# bposh,
# p24,
# bposc])
# # In[147]:
#
#
# rumel_sys2b=pnl.System(processes=[
# p31,
# p32,
# p33,
# p34])
#
#
# # In[27]:
#
#
# rumel_sys2a.show_graph(output_fmt='jupyter')
#
#
# # In[97]:
#
#
# #so far, so hoopy. What we want is to not enable learning on our binaries. Just on our sigs.
#
#
# # In[100]:
#
#
# for noun in range(len(nouns)):
# for rel_out in range (3):
# rumel_sys.run(inputs={nouns_in: nouns_onehot[noun],
# rels_in: rels_onehot[rel_out],
# bh1: np.zeros(8),
# bh2: np.zeros(15),
# bosI: np.zeros(len(nouns)),
# bosi: np.zeros(len(is_list)),
# bosh: np.zeros(len(has_list)),
# bosc: np.zeros(len(can_list)),
# },
# targets={out_bin_I: nouns_onehot[noun],
# out_bin_is: truth_is[noun],
# out_bin_has: truth_has[noun],
# out_bin_can: truth_can[noun]
# }
# )
# #What we can do here, is build our inputs into a nested for loop
#
#
# # In[103]:
#
#
for noun in range(len(nouns)):
for rel_out in range (3):
rumel_sys.run(inputs={nouns_in: nouns_onehot [noun],
rels_in: rels_onehot [rel_out],
bh1: np.zeros(8),
bh2: np.zeros(15),
bosI: np.zeros(len(nouns)),
bosi: np.zeros(len(is_list)),
bosh: np.zeros(len(has_list)),
bosc: np.zeros(len(can_list)),
},
targets={out_sig_I: nouns_onehot [noun],
out_sig_is: truth_is [noun],
out_sig_has: truth_has [noun],
out_sig_can: truth_can [noun]
}
)
# #What we can do here, is build our inputs into a nested for loop
#
#
# # So far, what I have left to do includes:
# #
# # getting a threshold function up and running.
# # See "step", defined below. All we need to do is make it PNL friendly. :D
# #
# # # This is done
# #
# # also want to make sure that the weight matrices from sigs to bins is I and cannot learn
# #
# # # Setting them to I is done. But, if we turn off learning on them, we can't run the system at all, because nothing that can learn projects to a target mechanism. It doesn't matter where we set the targets. If we set targets at sigs, it says, sigs don't project to target mechanism (target mechs are getting attached to bins). If we set targets for bins, it says targets don't project to target mechanims (target mechs are attached to bins, but bins can't learn).
# #
# # # I think I know a work-around on this that doesn't require we make a whole new system. We use the same setup that we used for the duck-rabbit model, where we map the sig outputs to labels, which are 1 or 0, using the previously defined step function, and get the MSE out of that.
# #
# # # I think it's okay for us to still try to set up multiple systems with overlapping mechanisms...
# # # Information on our capacity to do this should be available in "compositions" but github pages is down right now. :/
# #
# # figure out how to turn on learning for some mechs and not for others without losing previously learned weights, either by avoiding reinitializing the system or by saving previously learned weights. :)
# # this might be something to talk to Jon about...
# #
# # In order to do this, we will definitely* need to figure out how to put them into different systems and run the whole thing together?
# #
# # Actually seeing how it performs?
# #
# # Is that it?
# #
# # I need to get my github shit working, too, so I can work in devel and the other branches. :| Still, good progress for today, I think. :)
| 33.688073
| 470
| 0.540782
|
import numpy as np
import matplotlib.pyplot as plt
import psyneulink as pnl
import psyneulink.core.components.functions.transferfunctions
nouns=['oak','pine','rose','daisy','canary','robin','salmon','sunfish']
relations=['is','has','can']
is_list=['living','living thing','plant','animal','tree','flower','bird','fish','big','green','red','yellow']
has_list=['roots','leaves','bark','branches','skin','feathers','wings','gills','scales']
can_list=['grow','move','swim','fly','breathe','breathe underwater','breathe air','walk','photosynthesize']
descriptors=[nouns,is_list,has_list,can_list]
truth_nouns=np.identity(len(nouns))
truth_is=np.zeros((len(nouns),len(is_list)))
truth_is[0,:]=[1,1,1,0,1,0,0,0,1,0,0,0]
truth_is[1,:]=[1,1,1,0,1,0,0,0,1,0,0,0]
truth_is[2,:]=[1,1,1,0,0,1,0,0,0,0,0,0]
truth_is[3,:]=[1,1,1,0,0,1,0,0,0,0,0,0]
truth_is[4,:]=[1,1,0,1,0,0,1,0,0,0,0,1]
truth_is[5,:]=[1,1,0,1,0,0,1,0,0,0,0,1]
truth_is[6,:]= [1,1,0,1,0,0,0,1,1,0,1,0]
truth_is[7,:]= [1,1,0,1,0,0,0,1,1,0,0,0]
truth_has=np.zeros((len(nouns),len(has_list)))
truth_has[0,:]= [1,1,1,1,0,0,0,0,0]
truth_has[1,:]= [1,1,1,1,0,0,0,0,0]
truth_has[2,:]= [1,1,0,0,0,0,0,0,0]
truth_has[3,:]= [1,1,0,0,0,0,0,0,0]
truth_has[4,:]= [0,0,0,0,1,1,1,0,0]
truth_has[5,:]= [0,0,0,0,1,1,1,0,0]
truth_has[6,:]= [0,0,0,0,0,0,0,1,1]
truth_has[7,:]= [0,0,0,0,0,0,0,1,1]
truth_can=np.zeros((len(nouns),len(can_list)))
truth_can[0,:]= [1,0,0,0,0,0,0,0,1]
truth_can[1,:]= [1,0,0,0,0,0,0,0,1]
truth_can[2,:]= [1,0,0,0,0,0,0,0,1]
truth_can[3,:]= [1,0,0,0,0,0,0,0,1]
truth_can[4,:]= [1,1,0,1,1,0,1,1,0]
truth_can[5,:]= [1,1,0,1,1,0,1,1,0]
truth_can[6,:]= [1,1,1,0,1,1,0,0,0]
truth_can[7,:]= [1,1,1,0,1,1,0,0,0]
truths=[[truth_nouns],[truth_is],[truth_has],[truth_can]]
def gen_input_vals (nouns,relations):
X_1=np.identity(len(nouns))
X_2=np.identity(len(relations))
return(X_1,X_2)
nouns_onehot,rels_onehot=gen_input_vals(nouns,relations)
r_1=np.shape(nouns_onehot)[0]
c_1=np.shape(nouns_onehot)[1]
r_2=np.shape(rels_onehot)[0]
c_2=np.shape(rels_onehot)[1]
def step(variable,params,context):
if np.sum(variable)<.5:
out=0
else:
out=1
return(out)
# In[19]:
Step=pnl.UserDefinedFunction(custom_function=step,
default_variable=np.zeros(4))
# In[20]:
#we're on the part where we generalize this and apply it as the function for all the bins...
#redundancies and general clutter. lol
step_mech=pnl.ProcessingMechanism(function=pnl.UserDefinedFunction(custom_function=step, default_variable=np.zeros(4)),
size=4,
name='step_mech')
# In[21]:
nouns_in = pnl.TransferMechanism(name="nouns_input",default_variable=np.zeros(r_1))
rels_in = pnl.TransferMechanism(name="rels_input",default_variable=np.zeros(r_2))
h1 = pnl.TransferMechanism(name="hidden_nouns",
size=8,
function=psyneulink.core.components.functions.transferfunctions.Logistic)
h2 = pnl.TransferMechanism(name="hidden_mixed",
size=15,
function=psyneulink.core.components.functions.transferfunctions.Logistic)
out_sig_I = pnl.TransferMechanism(name="sig_outs_I",
size=len(nouns),
function=psyneulink.core.components.functions.transferfunctions.Logistic)
out_sig_is = pnl.TransferMechanism(name="sig_outs_is",
size=len(is_list),
function=psyneulink.core.components.functions.transferfunctions.Logistic)
out_sig_has = pnl.TransferMechanism(name="sig_outs_has",
size=len(has_list),
function=psyneulink.core.components.functions.transferfunctions.Logistic)
out_sig_can = pnl.TransferMechanism(name="sig_outs_can",
size=len(can_list),
function=psyneulink.core.components.functions.transferfunctions.Logistic)
#biases
bh1 = pnl.TransferMechanism(name="bias_hidden_nouns",
default_variable=np.zeros(8))
bh2 = pnl.TransferMechanism(name="bias_hidden_mixed",
default_variable=np.zeros(15))
bosI = pnl.TransferMechanism(name="bias_osI",
default_variable=np.zeros(len(nouns)))
bosi = pnl.TransferMechanism(name="bias_osi",
default_variable=np.zeros(len(is_list)))
bosh = pnl.TransferMechanism(name="bias_osh",
default_variable=np.zeros(len(has_list)))
bosc = pnl.TransferMechanism(name="bias_osc",
default_variable=np.zeros(len(can_list)))
#later, we'll change the out_bin_x functions to a UDF that does a step function.
# In[22]:
#I want to put in a mapping projection that just ensures all our weight matrices between sigs and bins is I.
mapII=pnl.MappingProjection(matrix=np.eye(len(nouns)),
name="mapII"
)
mapIi=pnl.MappingProjection(matrix=np.eye(len(is_list)),
name="mapIi"
)
mapIh=pnl.MappingProjection(matrix=np.eye(len(has_list)),
name="mapIh"
)
mapIc=pnl.MappingProjection(matrix=np.eye(len(can_list)),
name="mapIc"
)
# In[23]:
#This is where we build the processes.
p11=pnl.Process(pathway=[nouns_in,h1,h2],
learning=pnl.LEARNING)
p12=pnl.Process(pathway=[rels_in,h2],
learning=pnl.LEARNING)
p21=pnl.Process(pathway=[h2,out_sig_I],
learning=pnl.LEARNING)
p22=pnl.Process(pathway=[h2,out_sig_is],
learning=pnl.LEARNING)
p23=pnl.Process(pathway=[h2,out_sig_has],
learning=pnl.LEARNING)
p24=pnl.Process(pathway=[h2,out_sig_can],
learning=pnl.LEARNING)
# In[24]:
#These are the processes that transform sigs to bins
#
# p31=pnl.Process(pathway=[out_sig_I,
# mapII,
# out_bin_I],
# learning=pnl.LEARNING
# )
#
# p32=pnl.Process(pathway=[out_sig_is,
# mapIi,
# out_bin_is],
# learning=pnl.LEARNING
# )
#
# p33=pnl.Process(pathway=[out_sig_has,
# mapIh,
# out_bin_has],
# learning=pnl.LEARNING
# )
#
# p34=pnl.Process(pathway=[out_sig_can,
# mapIc,
# out_bin_can],
# learning=pnl.LEARNING
# )
# In[25]:
#Bias processes go here
bp1=pnl.Process(pathway=[bh1,h1],
learning=pnl.LEARNING
)
bp2=pnl.Process(pathway=[bh2,h2],
learning=pnl.LEARNING
)
bposI=pnl.Process(pathway=[bosI,out_sig_I],
learning=pnl.LEARNING
)
bposi=pnl.Process(pathway=[bosi,out_sig_is],
learning=pnl.LEARNING
)
bposh=pnl.Process(pathway=[bosh,out_sig_has],
learning=pnl.LEARNING
)
bposc=pnl.Process(pathway=[bosc,out_sig_can],
learning=pnl.LEARNING
)
# In[117]:
#This is where we put them all into a system
rumel_sys=pnl.System(processes=[p11,
bp1,
p12,
bp2,
p21,
bposI,
p22,
bposi,
p23,
bposh,
p24,
bposc,
# p31,
# p32,
# p33,
# p34
])
rumel_sys.show_graph(show_learning=True)
# In[26]:
#This is where we build multiple systems that separate the learning components from the non-learning components.
#This only compiles when the one above it does not.
#This might be a good bug to report...
#Additionally, for some reason this system is a clone of the one above, regardless of whether or not we include
#the one below. If the p3x processes are defined at all, they are automatically included in the system.
#What is going on here?
# rumel_sys2a=pnl.System(processes=[p11,
# bp1,
# p12,
# bp2,
# p21,
# bposI,
# p22,
# bposi,
# p23,
# bposh,
# p24,
# bposc])
# # In[147]:
#
#
# rumel_sys2b=pnl.System(processes=[
# p31,
# p32,
# p33,
# p34])
#
#
# # In[27]:
#
#
# rumel_sys2a.show_graph(output_fmt='jupyter')
#
#
# # In[97]:
#
#
# #so far, so hoopy. What we want is to not enable learning on our binaries. Just on our sigs.
#
#
# # In[100]:
#
#
# for noun in range(len(nouns)):
# for rel_out in range (3):
# rumel_sys.run(inputs={nouns_in: nouns_onehot[noun],
# rels_in: rels_onehot[rel_out],
# bh1: np.zeros(8),
# bh2: np.zeros(15),
# bosI: np.zeros(len(nouns)),
# bosi: np.zeros(len(is_list)),
# bosh: np.zeros(len(has_list)),
# bosc: np.zeros(len(can_list)),
# },
# targets={out_bin_I: nouns_onehot[noun],
# out_bin_is: truth_is[noun],
# out_bin_has: truth_has[noun],
# out_bin_can: truth_can[noun]
# }
# )
# #What we can do here, is build our inputs into a nested for loop
#
#
# # In[103]:
#
#
for noun in range(len(nouns)):
for rel_out in range (3):
rumel_sys.run(inputs={nouns_in: nouns_onehot [noun],
rels_in: rels_onehot [rel_out],
bh1: np.zeros(8),
bh2: np.zeros(15),
bosI: np.zeros(len(nouns)),
bosi: np.zeros(len(is_list)),
bosh: np.zeros(len(has_list)),
bosc: np.zeros(len(can_list)),
},
targets={out_sig_I: nouns_onehot [noun],
out_sig_is: truth_is [noun],
out_sig_has: truth_has [noun],
out_sig_can: truth_can [noun]
}
)
# #What we can do here, is build our inputs into a nested for loop
#
#
# # So far, what I have left to do includes:
# #
# # getting a threshold function up and running.
# # See "step", defined below. All we need to do is make it PNL friendly. :D
# #
# # # This is done
# #
# # also want to make sure that the weight matrices from sigs to bins is I and cannot learn
# #
# # # Setting them to I is done. But, if we turn off learning on them, we can't run the system at all, because nothing that can learn projects to a target mechanism. It doesn't matter where we set the targets. If we set targets at sigs, it says, sigs don't project to target mechanism (target mechs are getting attached to bins). If we set targets for bins, it says targets don't project to target mechanims (target mechs are attached to bins, but bins can't learn).
| true
| true
|
1c43a4beccd3e3b8b93f9f00909ebd701fcc7ae1
| 1,898
|
py
|
Python
|
PiBlynk-py/14-terminals.py
|
BLavery/PyBlynk
|
96e0d5c42fa1f5eed91d41890856f065ed8b607a
|
[
"MIT"
] | 12
|
2017-09-08T12:26:48.000Z
|
2020-09-18T03:30:36.000Z
|
PiBlynk-py/14-terminals.py
|
BLavery/PyBlynk
|
96e0d5c42fa1f5eed91d41890856f065ed8b607a
|
[
"MIT"
] | 1
|
2018-01-21T19:07:49.000Z
|
2021-03-04T17:28:57.000Z
|
PiBlynk-py/14-terminals.py
|
BLavery/PyBlynk
|
96e0d5c42fa1f5eed91d41890856f065ed8b607a
|
[
"MIT"
] | 7
|
2018-02-13T21:54:46.000Z
|
2020-09-18T03:30:39.000Z
|
import os
from PiBlynk import Blynk
from mytoken import *
blynk = Blynk(token)
#------------------------------------------
import time
def timeNow():
return time.asctime()[11:19]
#------------------------------------
# terminal from APP into python interpreter
_last_cmd = ""
def pyterminal_h(value, pin, st):
global _last_cmd
cmd = value[0]
if cmd == ".":
cmd = _last_cmd
blynk.virtual_write(pin, "> "+cmd+"\n")
else:
_last_cmd = cmd
try:
out = eval(cmd)
if out != None:
outstr = "= "+repr(out)
except:
try:
exec(cmd)
outstr = "= (OK)"
except Exception as e:
outstr = repr(e)
blynk.virtual_write(pin, outstr+"\n")
blynk.add_virtual_pin(14, None, pyterminal_h)
#--------------------------------
# Terminal from APP into OS shell
def osterminal_h(value, pin, st):
try:
outstr = os.popen(value[0]).read()
except Exception as e:
outstr = repr(e)
blynk.virtual_write(pin, outstr)
blynk.add_virtual_pin(13, None, osterminal_h)
# These "terminals" esp the OS one could be dangerous vulnerability.
# Be sure you really, really want to keep this in regular installation !!!!
#--------------------------------
def cnct_cb():
print ("Connected: "+ timeNow())
blynk.on_connect(cnct_cb)
blynk.run()
# AT APP:
# terminal widget Vpin 13 for python interactive terminal
# Note You cannot create new variables in RPI's python,
# But you can interrogate blynk variables/functions from user or blynk script
# (eg gps.age() or blynk._token )
# terminal Vpin 14 for OS (shell) interactive terminal
# Note shell terminal has no memory between calls.
# So a "cd" to change directory, immediately lapses again.
| 25.648649
| 82
| 0.557956
|
import os
from PiBlynk import Blynk
from mytoken import *
blynk = Blynk(token)
import time
def timeNow():
return time.asctime()[11:19]
_last_cmd = ""
def pyterminal_h(value, pin, st):
global _last_cmd
cmd = value[0]
if cmd == ".":
cmd = _last_cmd
blynk.virtual_write(pin, "> "+cmd+"\n")
else:
_last_cmd = cmd
try:
out = eval(cmd)
if out != None:
outstr = "= "+repr(out)
except:
try:
exec(cmd)
outstr = "= (OK)"
except Exception as e:
outstr = repr(e)
blynk.virtual_write(pin, outstr+"\n")
blynk.add_virtual_pin(14, None, pyterminal_h)
def osterminal_h(value, pin, st):
try:
outstr = os.popen(value[0]).read()
except Exception as e:
outstr = repr(e)
blynk.virtual_write(pin, outstr)
blynk.add_virtual_pin(13, None, osterminal_h)
def cnct_cb():
print ("Connected: "+ timeNow())
blynk.on_connect(cnct_cb)
blynk.run()
# But you can interrogate blynk variables/functions from user or blynk script
# (eg gps.age() or blynk._token )
# terminal Vpin 14 for OS (shell) interactive terminal
# Note shell terminal has no memory between calls.
# So a "cd" to change directory, immediately lapses again.
| true
| true
|
1c43a4ff50f9df182226287026667f6a1751ef12
| 6,391
|
py
|
Python
|
codes/data/__init__.py
|
DamianoGiani/EGVSRprova
|
2cae74436f2bf864f061d63eadae079a328ed9ed
|
[
"MIT"
] | null | null | null |
codes/data/__init__.py
|
DamianoGiani/EGVSRprova
|
2cae74436f2bf864f061d63eadae079a328ed9ed
|
[
"MIT"
] | null | null | null |
codes/data/__init__.py
|
DamianoGiani/EGVSRprova
|
2cae74436f2bf864f061d63eadae079a328ed9ed
|
[
"MIT"
] | null | null | null |
import torch
import torch.nn.functional as F
from torch.utils.data import DataLoader
from .paired_lmdb_dataset import PairedLMDBDataset
from .unpaired_lmdb_dataset import UnpairedLMDBDataset
from .paired_folder_dataset import PairedFolderDataset
from .mypaired_folder_dataset import MyPairedFolderDataset
def create_dataloader(opt, dataset_idx='train'):
# setup params
data_opt = opt['dataset'].get(dataset_idx)
degradation_type = opt['dataset']['degradation']['type']
# -------------- loader for training -------------- #
if dataset_idx == 'train':
# check dataset
assert data_opt['name'] in ('VimeoTecoGAN', 'VimeoTecoGAN-sub'), \
'Unknown Dataset: {}'.format(data_opt['name'])
if degradation_type == 'BI':
# create dataset
dataset = PairedLMDBDataset(
data_opt,
scale=opt['scale'],
tempo_extent=opt['train']['tempo_extent'],
moving_first_frame=opt['train'].get('moving_first_frame', False),
moving_factor=opt['train'].get('moving_factor', 1.0))
elif degradation_type == 'BD':
# enlarge crop size to incorporate border size
sigma = opt['dataset']['degradation']['sigma']
enlarged_crop_size = data_opt['crop_size'] + 2 * int(sigma * 3.0)
# create dataset
dataset = UnpairedLMDBDataset(
data_opt,
crop_size=enlarged_crop_size, # override
tempo_extent=opt['train']['tempo_extent'],
moving_first_frame=opt['train'].get('moving_first_frame', False),
moving_factor=opt['train'].get('moving_factor', 1.0))
else:
raise ValueError('Unrecognized degradation type: {}'.format(
degradation_type))
# create data loader
loader = DataLoader(
dataset=dataset,
batch_size=data_opt['batch_size'],
shuffle=True,
num_workers=data_opt['num_workers'],
pin_memory=data_opt['pin_memory'])
# -------------- loader for testing -------------- #
elif dataset_idx.startswith('test'):
# create data loader
dataset = PairedFolderDataset(data_opt, scale=opt['scale'])
loader = DataLoader(
dataset=dataset,
batch_size=1,
shuffle=False,
num_workers=data_opt['num_workers'],
pin_memory=data_opt['pin_memory'])
else:
raise ValueError('Unrecognized dataset index: {}'.format(dataset_idx))
return loader
def mycreate_dataloader(opt, dataset_idx='train'):
# setup params
data_opt = opt['dataset'].get(dataset_idx)
degradation_type = opt['dataset']['degradation']['type']
# -------------- loader for training -------------- #
if dataset_idx == 'train':
# check dataset
assert data_opt['name'] in ('VimeoTecoGAN', 'VimeoTecoGAN-sub'), \
'Unknown Dataset: {}'.format(data_opt['name'])
if degradation_type == 'BI':
# create dataset
dataset = PairedLMDBDataset(
data_opt,
scale=opt['scale'],
tempo_extent=opt['train']['tempo_extent'],
moving_first_frame=opt['train'].get('moving_first_frame', False),
moving_factor=opt['train'].get('moving_factor', 1.0))
elif degradation_type == 'BD':
# enlarge crop size to incorporate border size
sigma = opt['dataset']['degradation']['sigma']
enlarged_crop_size = data_opt['crop_size'] + 2 * int(sigma * 3.0)
# create dataset
dataset = UnpairedLMDBDataset(
data_opt,
crop_size=enlarged_crop_size, # override
tempo_extent=opt['train']['tempo_extent'],
moving_first_frame=opt['train'].get('moving_first_frame', False),
moving_factor=opt['train'].get('moving_factor', 1.0))
else:
raise ValueError('Unrecognized degradation type: {}'.format(
degradation_type))
# create data loader
loader = DataLoader(
dataset=dataset,
batch_size=data_opt['batch_size'],
shuffle=True,
num_workers=data_opt['num_workers'],
pin_memory=data_opt['pin_memory'])
# -------------- loader for testing -------------- #
elif dataset_idx.startswith('test'):
# create data loader
dataset = MyPairedFolderDataset(data_opt, scale=opt['scale'])
loader = DataLoader(
dataset=dataset,
batch_size=1,
shuffle=False,
num_workers=data_opt['num_workers'],
pin_memory=data_opt['pin_memory'])
else:
raise ValueError('Unrecognized dataset index: {}'.format(dataset_idx))
return loader
def prepare_data(opt, data, kernel):
""" prepare gt, lr data for training
for BD degradation, generate lr data and remove border of gt data
for BI degradation, return data directly
"""
device = torch.device(opt['device'])
degradation_type = opt['dataset']['degradation']['type']
if degradation_type == 'BI':
gt_data, lr_data = data['gt'].to(device), data['lr'].to(device)
elif degradation_type == 'BD':
# setup params
scale = opt['scale']
sigma = opt['dataset']['degradation'].get('sigma', 1.5)
border_size = int(sigma * 3.0)
gt_with_border = data['gt'].to(device)
n, t, c, gt_h, gt_w = gt_with_border.size()
lr_h = (gt_h - 2 * border_size) // scale
lr_w = (gt_w - 2 * border_size) // scale
# generate lr data
gt_with_border = gt_with_border.view(n * t, c, gt_h, gt_w)
lr_data = F.conv2d(
gt_with_border, kernel, stride=scale, bias=None, padding=0)
lr_data = lr_data.view(n, t, c, lr_h, lr_w)
# remove gt border
gt_data = gt_with_border[
...,
border_size: border_size + scale * lr_h,
border_size: border_size + scale * lr_w
]
gt_data = gt_data.view(n, t, c, scale * lr_h, scale * lr_w)
else:
raise ValueError('Unrecognized degradation type: {}'.format(
degradation_type))
return { 'gt': gt_data, 'lr': lr_data }
| 35.904494
| 81
| 0.581756
|
import torch
import torch.nn.functional as F
from torch.utils.data import DataLoader
from .paired_lmdb_dataset import PairedLMDBDataset
from .unpaired_lmdb_dataset import UnpairedLMDBDataset
from .paired_folder_dataset import PairedFolderDataset
from .mypaired_folder_dataset import MyPairedFolderDataset
def create_dataloader(opt, dataset_idx='train'):
data_opt = opt['dataset'].get(dataset_idx)
degradation_type = opt['dataset']['degradation']['type']
if dataset_idx == 'train':
assert data_opt['name'] in ('VimeoTecoGAN', 'VimeoTecoGAN-sub'), \
'Unknown Dataset: {}'.format(data_opt['name'])
if degradation_type == 'BI':
dataset = PairedLMDBDataset(
data_opt,
scale=opt['scale'],
tempo_extent=opt['train']['tempo_extent'],
moving_first_frame=opt['train'].get('moving_first_frame', False),
moving_factor=opt['train'].get('moving_factor', 1.0))
elif degradation_type == 'BD':
sigma = opt['dataset']['degradation']['sigma']
enlarged_crop_size = data_opt['crop_size'] + 2 * int(sigma * 3.0)
dataset = UnpairedLMDBDataset(
data_opt,
crop_size=enlarged_crop_size,
tempo_extent=opt['train']['tempo_extent'],
moving_first_frame=opt['train'].get('moving_first_frame', False),
moving_factor=opt['train'].get('moving_factor', 1.0))
else:
raise ValueError('Unrecognized degradation type: {}'.format(
degradation_type))
loader = DataLoader(
dataset=dataset,
batch_size=data_opt['batch_size'],
shuffle=True,
num_workers=data_opt['num_workers'],
pin_memory=data_opt['pin_memory'])
elif dataset_idx.startswith('test'):
dataset = PairedFolderDataset(data_opt, scale=opt['scale'])
loader = DataLoader(
dataset=dataset,
batch_size=1,
shuffle=False,
num_workers=data_opt['num_workers'],
pin_memory=data_opt['pin_memory'])
else:
raise ValueError('Unrecognized dataset index: {}'.format(dataset_idx))
return loader
def mycreate_dataloader(opt, dataset_idx='train'):
data_opt = opt['dataset'].get(dataset_idx)
degradation_type = opt['dataset']['degradation']['type']
if dataset_idx == 'train':
assert data_opt['name'] in ('VimeoTecoGAN', 'VimeoTecoGAN-sub'), \
'Unknown Dataset: {}'.format(data_opt['name'])
if degradation_type == 'BI':
dataset = PairedLMDBDataset(
data_opt,
scale=opt['scale'],
tempo_extent=opt['train']['tempo_extent'],
moving_first_frame=opt['train'].get('moving_first_frame', False),
moving_factor=opt['train'].get('moving_factor', 1.0))
elif degradation_type == 'BD':
sigma = opt['dataset']['degradation']['sigma']
enlarged_crop_size = data_opt['crop_size'] + 2 * int(sigma * 3.0)
dataset = UnpairedLMDBDataset(
data_opt,
crop_size=enlarged_crop_size,
tempo_extent=opt['train']['tempo_extent'],
moving_first_frame=opt['train'].get('moving_first_frame', False),
moving_factor=opt['train'].get('moving_factor', 1.0))
else:
raise ValueError('Unrecognized degradation type: {}'.format(
degradation_type))
loader = DataLoader(
dataset=dataset,
batch_size=data_opt['batch_size'],
shuffle=True,
num_workers=data_opt['num_workers'],
pin_memory=data_opt['pin_memory'])
elif dataset_idx.startswith('test'):
dataset = MyPairedFolderDataset(data_opt, scale=opt['scale'])
loader = DataLoader(
dataset=dataset,
batch_size=1,
shuffle=False,
num_workers=data_opt['num_workers'],
pin_memory=data_opt['pin_memory'])
else:
raise ValueError('Unrecognized dataset index: {}'.format(dataset_idx))
return loader
def prepare_data(opt, data, kernel):
device = torch.device(opt['device'])
degradation_type = opt['dataset']['degradation']['type']
if degradation_type == 'BI':
gt_data, lr_data = data['gt'].to(device), data['lr'].to(device)
elif degradation_type == 'BD':
scale = opt['scale']
sigma = opt['dataset']['degradation'].get('sigma', 1.5)
border_size = int(sigma * 3.0)
gt_with_border = data['gt'].to(device)
n, t, c, gt_h, gt_w = gt_with_border.size()
lr_h = (gt_h - 2 * border_size) // scale
lr_w = (gt_w - 2 * border_size) // scale
gt_with_border = gt_with_border.view(n * t, c, gt_h, gt_w)
lr_data = F.conv2d(
gt_with_border, kernel, stride=scale, bias=None, padding=0)
lr_data = lr_data.view(n, t, c, lr_h, lr_w)
gt_data = gt_with_border[
...,
border_size: border_size + scale * lr_h,
border_size: border_size + scale * lr_w
]
gt_data = gt_data.view(n, t, c, scale * lr_h, scale * lr_w)
else:
raise ValueError('Unrecognized degradation type: {}'.format(
degradation_type))
return { 'gt': gt_data, 'lr': lr_data }
| true
| true
|
1c43a5d43acef9b7daa27745b07cce0f336cf9ba
| 200
|
py
|
Python
|
src/server/app/endpoints/thankyou/controllers.py
|
MatthiasRiener/DigiPen
|
9b4aff4a1c431e06d73733dc3dd3f3f3d4631704
|
[
"MIT"
] | null | null | null |
src/server/app/endpoints/thankyou/controllers.py
|
MatthiasRiener/DigiPen
|
9b4aff4a1c431e06d73733dc3dd3f3f3d4631704
|
[
"MIT"
] | null | null | null |
src/server/app/endpoints/thankyou/controllers.py
|
MatthiasRiener/DigiPen
|
9b4aff4a1c431e06d73733dc3dd3f3f3d4631704
|
[
"MIT"
] | null | null | null |
from flask import Flask, render_template, Blueprint
thankyou = Blueprint("thankyou", __name__)
@thankyou.route('/', methods=["GET"])
def index():
return render_template('/thankyou/index.html')
| 22.222222
| 51
| 0.73
|
from flask import Flask, render_template, Blueprint
thankyou = Blueprint("thankyou", __name__)
@thankyou.route('/', methods=["GET"])
def index():
return render_template('/thankyou/index.html')
| true
| true
|
1c43a6a7f69248cf21e59f6dbe57ecc16dc04e09
| 1,335
|
py
|
Python
|
WebBrickLibs/MiscLib/tests/TestCombinators.py
|
AndyThirtover/wb_gateway
|
69f9c870369085f4440033201e2fb263a463a523
|
[
"BSD-3-Clause"
] | null | null | null |
WebBrickLibs/MiscLib/tests/TestCombinators.py
|
AndyThirtover/wb_gateway
|
69f9c870369085f4440033201e2fb263a463a523
|
[
"BSD-3-Clause"
] | null | null | null |
WebBrickLibs/MiscLib/tests/TestCombinators.py
|
AndyThirtover/wb_gateway
|
69f9c870369085f4440033201e2fb263a463a523
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright L.P.Klyne 2013
# Licenced under 3 clause BSD licence
# $Id: TestCombinators.py 2612 2008-08-11 20:08:49Z graham.klyne $
#
# Unit testing for WebBrick library combinators
# See http://pyunit.sourceforge.net/pyunit.html
#
import sys
import unittest
sys.path.append("../..")
from MiscLib.Combinators import *
class TestCombinators(unittest.TestCase):
def setUp(self):
return
def tearDown(self):
return
# Test cases
def testApply(self):
# Is function application like BCPL? (fn can be a variable)
def ap(f,v): return f(v)
def inc(n): return n+1
assert ap(inc,2)==3
def testCurry(self):
def f(a,b,c): return a+b+c
g = curry(f,1,2)
assert g(3) == 6
def testCompose(self):
def f(a,b,c): return a+b+c
def g(a,b): return a*b
h = compose(f,g,1000,200)
assert h(3,4) == 1212, "h(3,4) is "+str(h(3,4))
# Code to run unit tests directly from command line.
def getTestSuite():
suite = unittest.TestSuite()
suite.addTest(TestCombinators("testApply"))
suite.addTest(TestCombinators("testCurry"))
suite.addTest(TestCombinators("testCompose"))
return suite
if __name__ == "__main__":
# unittest.main()
runner = unittest.TextTestRunner()
runner.run(getTestSuite())
| 24.272727
| 68
| 0.632959
|
import sys
import unittest
sys.path.append("../..")
from MiscLib.Combinators import *
class TestCombinators(unittest.TestCase):
def setUp(self):
return
def tearDown(self):
return
def testApply(self):
def ap(f,v): return f(v)
def inc(n): return n+1
assert ap(inc,2)==3
def testCurry(self):
def f(a,b,c): return a+b+c
g = curry(f,1,2)
assert g(3) == 6
def testCompose(self):
def f(a,b,c): return a+b+c
def g(a,b): return a*b
h = compose(f,g,1000,200)
assert h(3,4) == 1212, "h(3,4) is "+str(h(3,4))
def getTestSuite():
suite = unittest.TestSuite()
suite.addTest(TestCombinators("testApply"))
suite.addTest(TestCombinators("testCurry"))
suite.addTest(TestCombinators("testCompose"))
return suite
if __name__ == "__main__":
runner = unittest.TextTestRunner()
runner.run(getTestSuite())
| true
| true
|
1c43a6c922911852492067e97959f186a004ec8b
| 641
|
py
|
Python
|
setup.py
|
Deekshith1994/Recipes
|
5dfc98b249f6caf90571c037eb35560417b6818e
|
[
"Apache-2.0"
] | 591
|
2016-04-27T15:34:28.000Z
|
2019-02-28T21:28:15.000Z
|
setup.py
|
Deekshith1994/Recipes
|
5dfc98b249f6caf90571c037eb35560417b6818e
|
[
"Apache-2.0"
] | 11
|
2016-05-24T17:09:16.000Z
|
2018-10-17T10:02:10.000Z
|
setup.py
|
Deekshith1994/Recipes
|
5dfc98b249f6caf90571c037eb35560417b6818e
|
[
"Apache-2.0"
] | 120
|
2016-04-27T20:50:42.000Z
|
2019-02-25T03:48:31.000Z
|
#!/usr/bin/env python
import sys
from setuptools import setup, find_packages
import ingredient_phrase_tagger
requires, extra = ['Unidecode==0.04.14', 'pandas==0.17.1'], {}
if sys.version_info >= (3,):
extra['use_2to3'] = True
setup(
name='ingredient_phrase_tagger',
version='0.0.0.dev0',
description='Extract structured data from ingredient phrases using conditional random fields',
author='The New York Times Company',
author_email='',
license='Apache 2.0',
install_requires=requires,
packages=find_packages(),
package_dir={'ingredient_phrase_tagger': 'ingredient_phrase_tagger'},
**extra
)
| 25.64
| 98
| 0.711388
|
import sys
from setuptools import setup, find_packages
import ingredient_phrase_tagger
requires, extra = ['Unidecode==0.04.14', 'pandas==0.17.1'], {}
if sys.version_info >= (3,):
extra['use_2to3'] = True
setup(
name='ingredient_phrase_tagger',
version='0.0.0.dev0',
description='Extract structured data from ingredient phrases using conditional random fields',
author='The New York Times Company',
author_email='',
license='Apache 2.0',
install_requires=requires,
packages=find_packages(),
package_dir={'ingredient_phrase_tagger': 'ingredient_phrase_tagger'},
**extra
)
| true
| true
|
1c43a9a5a12f07ef89a7cb5e84279cee5f3d98be
| 8,831
|
py
|
Python
|
pyscf/grad/casscf.py
|
highlight0112/pyscf
|
4afbd42bad3e72db5bb94d8cacf1d5de76537bdd
|
[
"Apache-2.0"
] | null | null | null |
pyscf/grad/casscf.py
|
highlight0112/pyscf
|
4afbd42bad3e72db5bb94d8cacf1d5de76537bdd
|
[
"Apache-2.0"
] | null | null | null |
pyscf/grad/casscf.py
|
highlight0112/pyscf
|
4afbd42bad3e72db5bb94d8cacf1d5de76537bdd
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Qiming Sun <osirpt.sun@gmail.com>
#
'''
CASSCF analytical nuclear gradients
Ref.
J. Comput. Chem., 5, 589
'''
import time
from functools import reduce
import numpy
from pyscf import lib
from pyscf import ao2mo
from pyscf.lib import logger
from pyscf.grad import rhf as rhf_grad
from pyscf.grad.mp2 import _shell_prange
def kernel(mc, mo_coeff=None, ci=None, atmlst=None, mf_grad=None,
verbose=None):
if mo_coeff is None: mo_coeff = mc.mo_coeff
if ci is None: ci = mc.ci
if mf_grad is None: mf_grad = mc._scf.nuc_grad_method()
if mc.frozen is not None:
raise NotImplementedError
mol = mc.mol
ncore = mc.ncore
ncas = mc.ncas
nocc = ncore + ncas
nelecas = mc.nelecas
nao, nmo = mo_coeff.shape
nao_pair = nao * (nao+1) // 2
mo_occ = mo_coeff[:,:nocc]
mo_core = mo_coeff[:,:ncore]
mo_cas = mo_coeff[:,ncore:nocc]
casdm1, casdm2 = mc.fcisolver.make_rdm12(ci, ncas, nelecas)
# gfock = Generalized Fock, Adv. Chem. Phys., 69, 63
dm_core = numpy.dot(mo_core, mo_core.T) * 2
dm_cas = reduce(numpy.dot, (mo_cas, casdm1, mo_cas.T))
aapa = ao2mo.kernel(mol, (mo_cas, mo_cas, mo_occ, mo_cas), compact=False)
aapa = aapa.reshape(ncas,ncas,nocc,ncas)
vj, vk = mc._scf.get_jk(mol, (dm_core, dm_cas))
h1 = mc.get_hcore()
vhf_c = vj[0] - vk[0] * .5
vhf_a = vj[1] - vk[1] * .5
gfock = reduce(numpy.dot, (mo_occ.T, h1 + vhf_c + vhf_a, mo_occ)) * 2
gfock[:,ncore:nocc] = reduce(numpy.dot, (mo_occ.T, h1 + vhf_c, mo_cas, casdm1))
gfock[:,ncore:nocc] += numpy.einsum('uviw,vuwt->it', aapa, casdm2)
dme0 = reduce(numpy.dot, (mo_occ, (gfock+gfock.T)*.5, mo_occ.T))
aapa = vj = vk = vhf_c = vhf_a = h1 = gfock = None
dm1 = dm_core + dm_cas
vhf1c, vhf1a = mf_grad.get_veff(mol, (dm_core, dm_cas))
hcore_deriv = mf_grad.hcore_generator(mol)
s1 = mf_grad.get_ovlp(mol)
diag_idx = numpy.arange(nao)
diag_idx = diag_idx * (diag_idx+1) // 2 + diag_idx
casdm2_cc = casdm2 + casdm2.transpose(0,1,3,2)
dm2buf = ao2mo._ao2mo.nr_e2(casdm2_cc.reshape(ncas**2,ncas**2), mo_cas.T,
(0, nao, 0, nao)).reshape(ncas**2,nao,nao)
dm2buf = lib.pack_tril(dm2buf)
dm2buf[:,diag_idx] *= .5
dm2buf = dm2buf.reshape(ncas,ncas,nao_pair)
casdm2 = casdm2_cc = None
if atmlst is None:
atmlst = range(mol.natm)
aoslices = mol.aoslice_by_atom()
de = numpy.zeros((len(atmlst),3))
max_memory = mc.max_memory - lib.current_memory()[0]
blksize = int(max_memory*.9e6/8 / ((aoslices[:,3]-aoslices[:,2]).max()*nao_pair))
blksize = min(nao, max(2, blksize))
for k, ia in enumerate(atmlst):
shl0, shl1, p0, p1 = aoslices[ia]
h1ao = hcore_deriv(ia)
de[k] += numpy.einsum('xij,ij->x', h1ao, dm1)
de[k] -= numpy.einsum('xij,ij->x', s1[:,p0:p1], dme0[p0:p1]) * 2
q1 = 0
for b0, b1, nf in _shell_prange(mol, 0, mol.nbas, blksize):
q0, q1 = q1, q1 + nf
dm2_ao = lib.einsum('ijw,pi,qj->pqw', dm2buf, mo_cas[p0:p1], mo_cas[q0:q1])
shls_slice = (shl0,shl1,b0,b1,0,mol.nbas,0,mol.nbas)
eri1 = mol.intor('int2e_ip1', comp=3, aosym='s2kl',
shls_slice=shls_slice).reshape(3,p1-p0,nf,nao_pair)
de[k] -= numpy.einsum('xijw,ijw->x', eri1, dm2_ao) * 2
eri1 = None
de[k] += numpy.einsum('xij,ij->x', vhf1c[:,p0:p1], dm1[p0:p1]) * 2
de[k] += numpy.einsum('xij,ij->x', vhf1a[:,p0:p1], dm_core[p0:p1]) * 2
de += mf_grad.grad_nuc(mol, atmlst)
return de
def as_scanner(mcscf_grad):
'''Generating a nuclear gradients scanner/solver (for geometry optimizer).
The returned solver is a function. This function requires one argument
"mol" as input and returns energy and first order nuclear derivatives.
The solver will automatically use the results of last calculation as the
initial guess of the new calculation. All parameters assigned in the
nuc-grad object and SCF object (DIIS, conv_tol, max_memory etc) are
automatically applied in the solver.
Note scanner has side effects. It may change many underlying objects
(_scf, with_df, with_x2c, ...) during calculation.
Examples:
>>> from pyscf import gto, scf, mcscf
>>> mol = gto.M(atom='N 0 0 0; N 0 0 1.1', verbose=0)
>>> mc_grad_scanner = mcscf.CASSCF(scf.RHF(mol), 4, 4).nuc_grad_method().as_scanner()
>>> etot, grad = mc_grad_scanner(gto.M(atom='N 0 0 0; N 0 0 1.1'))
>>> etot, grad = mc_grad_scanner(gto.M(atom='N 0 0 0; N 0 0 1.5'))
'''
from pyscf import gto
if isinstance(mcscf_grad, lib.GradScanner):
return mcscf_grad
logger.info(mcscf_grad, 'Create scanner for %s', mcscf_grad.__class__)
class CASSCF_GradScanner(mcscf_grad.__class__, lib.GradScanner):
def __init__(self, g):
lib.GradScanner.__init__(self, g)
def __call__(self, mol_or_geom, **kwargs):
if isinstance(mol_or_geom, gto.Mole):
mol = mol_or_geom
else:
mol = self.mol.set_geom_(mol_or_geom, inplace=False)
mc_scanner = self.base
e_tot = mc_scanner(mol)
self.mol = mol
de = self.kernel(**kwargs)
return e_tot, de
return CASSCF_GradScanner(mcscf_grad)
class Gradients(lib.StreamObject):
'''Non-relativistic restricted Hartree-Fock gradients'''
def __init__(self, mc):
self.base = mc
self.mol = mc.mol
self.stdout = mc.stdout
self.verbose = mc.verbose
self.max_memory = mc.max_memory
self.atmlst = None
self.de = None
self._keys = set(self.__dict__.keys())
def dump_flags(self):
log = logger.Logger(self.stdout, self.verbose)
log.info('\n')
if not self.base.converged:
log.warn('Ground state CASSCF not converged')
log.info('******** %s for %s ********',
self.__class__, self.base.__class__)
log.info('max_memory %d MB (current use %d MB)',
self.max_memory, lib.current_memory()[0])
return self
def kernel(self, mo_coeff=None, ci=None, atmlst=None, mf_grad=None,
verbose=None):
cput0 = (time.clock(), time.time())
log = logger.new_logger(self, verbose)
if atmlst is None:
atmlst = self.atmlst
else:
self.atmlst = atmlst
if self.verbose >= logger.WARN:
self.check_sanity()
if self.verbose >= logger.INFO:
self.dump_flags()
if self.verbose >= logger.WARN:
self.check_sanity()
if self.verbose >= logger.INFO:
self.dump_flags()
self.de = kernel(self.base, mo_coeff, ci, atmlst, mf_grad, log)
log.timer('CASSCF gradients', *cput0)
self._finalize()
return self.de
def _finalize(self):
if self.verbose >= logger.NOTE:
logger.note(self, '--------------- %s gradients ---------------',
self.base.__class__.__name__)
rhf_grad._write(self, self.mol, self.de, self.atmlst)
logger.note(self, '----------------------------------------------')
as_scanner = as_scanner
Grad = Gradients
from pyscf import mcscf
mcscf.mc1step.CASSCF.Gradients = lib.class_as_method(Gradients)
if __name__ == '__main__':
from pyscf import gto
from pyscf import scf
from pyscf import mcscf
mol = gto.Mole()
mol.atom = 'N 0 0 0; N 0 0 1.2; H 1 1 0; H 1 1 1.2'
mol.basis = '631g'
mol.build()
mf = scf.RHF(mol).run()
mc = mcscf.CASSCF(mf, 4, 4).run()
de = mc.Gradients().kernel()
print(lib.finger(de) - 0.019602220578635747)
mol = gto.Mole()
mol.verbose = 0
mol.atom = 'N 0 0 0; N 0 0 1.2'
mol.basis = 'sto3g'
mol.build()
mf = scf.RHF(mol).run()
mc = mcscf.CASSCF(mf, 4, 4).run()
de = kernel(mc)
mcs = mc.as_scanner()
mol.set_geom_('N 0 0 0; N 0 0 1.201')
e1 = mcs(mol)
mol.set_geom_('N 0 0 0; N 0 0 1.199')
e2 = mcs(mol)
print(de[1,2], (e1-e2)/0.002*lib.param.BOHR)
| 34.767717
| 89
| 0.611143
|
import time
from functools import reduce
import numpy
from pyscf import lib
from pyscf import ao2mo
from pyscf.lib import logger
from pyscf.grad import rhf as rhf_grad
from pyscf.grad.mp2 import _shell_prange
def kernel(mc, mo_coeff=None, ci=None, atmlst=None, mf_grad=None,
verbose=None):
if mo_coeff is None: mo_coeff = mc.mo_coeff
if ci is None: ci = mc.ci
if mf_grad is None: mf_grad = mc._scf.nuc_grad_method()
if mc.frozen is not None:
raise NotImplementedError
mol = mc.mol
ncore = mc.ncore
ncas = mc.ncas
nocc = ncore + ncas
nelecas = mc.nelecas
nao, nmo = mo_coeff.shape
nao_pair = nao * (nao+1) // 2
mo_occ = mo_coeff[:,:nocc]
mo_core = mo_coeff[:,:ncore]
mo_cas = mo_coeff[:,ncore:nocc]
casdm1, casdm2 = mc.fcisolver.make_rdm12(ci, ncas, nelecas)
dm_core = numpy.dot(mo_core, mo_core.T) * 2
dm_cas = reduce(numpy.dot, (mo_cas, casdm1, mo_cas.T))
aapa = ao2mo.kernel(mol, (mo_cas, mo_cas, mo_occ, mo_cas), compact=False)
aapa = aapa.reshape(ncas,ncas,nocc,ncas)
vj, vk = mc._scf.get_jk(mol, (dm_core, dm_cas))
h1 = mc.get_hcore()
vhf_c = vj[0] - vk[0] * .5
vhf_a = vj[1] - vk[1] * .5
gfock = reduce(numpy.dot, (mo_occ.T, h1 + vhf_c + vhf_a, mo_occ)) * 2
gfock[:,ncore:nocc] = reduce(numpy.dot, (mo_occ.T, h1 + vhf_c, mo_cas, casdm1))
gfock[:,ncore:nocc] += numpy.einsum('uviw,vuwt->it', aapa, casdm2)
dme0 = reduce(numpy.dot, (mo_occ, (gfock+gfock.T)*.5, mo_occ.T))
aapa = vj = vk = vhf_c = vhf_a = h1 = gfock = None
dm1 = dm_core + dm_cas
vhf1c, vhf1a = mf_grad.get_veff(mol, (dm_core, dm_cas))
hcore_deriv = mf_grad.hcore_generator(mol)
s1 = mf_grad.get_ovlp(mol)
diag_idx = numpy.arange(nao)
diag_idx = diag_idx * (diag_idx+1) // 2 + diag_idx
casdm2_cc = casdm2 + casdm2.transpose(0,1,3,2)
dm2buf = ao2mo._ao2mo.nr_e2(casdm2_cc.reshape(ncas**2,ncas**2), mo_cas.T,
(0, nao, 0, nao)).reshape(ncas**2,nao,nao)
dm2buf = lib.pack_tril(dm2buf)
dm2buf[:,diag_idx] *= .5
dm2buf = dm2buf.reshape(ncas,ncas,nao_pair)
casdm2 = casdm2_cc = None
if atmlst is None:
atmlst = range(mol.natm)
aoslices = mol.aoslice_by_atom()
de = numpy.zeros((len(atmlst),3))
max_memory = mc.max_memory - lib.current_memory()[0]
blksize = int(max_memory*.9e6/8 / ((aoslices[:,3]-aoslices[:,2]).max()*nao_pair))
blksize = min(nao, max(2, blksize))
for k, ia in enumerate(atmlst):
shl0, shl1, p0, p1 = aoslices[ia]
h1ao = hcore_deriv(ia)
de[k] += numpy.einsum('xij,ij->x', h1ao, dm1)
de[k] -= numpy.einsum('xij,ij->x', s1[:,p0:p1], dme0[p0:p1]) * 2
q1 = 0
for b0, b1, nf in _shell_prange(mol, 0, mol.nbas, blksize):
q0, q1 = q1, q1 + nf
dm2_ao = lib.einsum('ijw,pi,qj->pqw', dm2buf, mo_cas[p0:p1], mo_cas[q0:q1])
shls_slice = (shl0,shl1,b0,b1,0,mol.nbas,0,mol.nbas)
eri1 = mol.intor('int2e_ip1', comp=3, aosym='s2kl',
shls_slice=shls_slice).reshape(3,p1-p0,nf,nao_pair)
de[k] -= numpy.einsum('xijw,ijw->x', eri1, dm2_ao) * 2
eri1 = None
de[k] += numpy.einsum('xij,ij->x', vhf1c[:,p0:p1], dm1[p0:p1]) * 2
de[k] += numpy.einsum('xij,ij->x', vhf1a[:,p0:p1], dm_core[p0:p1]) * 2
de += mf_grad.grad_nuc(mol, atmlst)
return de
def as_scanner(mcscf_grad):
from pyscf import gto
if isinstance(mcscf_grad, lib.GradScanner):
return mcscf_grad
logger.info(mcscf_grad, 'Create scanner for %s', mcscf_grad.__class__)
class CASSCF_GradScanner(mcscf_grad.__class__, lib.GradScanner):
def __init__(self, g):
lib.GradScanner.__init__(self, g)
def __call__(self, mol_or_geom, **kwargs):
if isinstance(mol_or_geom, gto.Mole):
mol = mol_or_geom
else:
mol = self.mol.set_geom_(mol_or_geom, inplace=False)
mc_scanner = self.base
e_tot = mc_scanner(mol)
self.mol = mol
de = self.kernel(**kwargs)
return e_tot, de
return CASSCF_GradScanner(mcscf_grad)
class Gradients(lib.StreamObject):
def __init__(self, mc):
self.base = mc
self.mol = mc.mol
self.stdout = mc.stdout
self.verbose = mc.verbose
self.max_memory = mc.max_memory
self.atmlst = None
self.de = None
self._keys = set(self.__dict__.keys())
def dump_flags(self):
log = logger.Logger(self.stdout, self.verbose)
log.info('\n')
if not self.base.converged:
log.warn('Ground state CASSCF not converged')
log.info('******** %s for %s ********',
self.__class__, self.base.__class__)
log.info('max_memory %d MB (current use %d MB)',
self.max_memory, lib.current_memory()[0])
return self
def kernel(self, mo_coeff=None, ci=None, atmlst=None, mf_grad=None,
verbose=None):
cput0 = (time.clock(), time.time())
log = logger.new_logger(self, verbose)
if atmlst is None:
atmlst = self.atmlst
else:
self.atmlst = atmlst
if self.verbose >= logger.WARN:
self.check_sanity()
if self.verbose >= logger.INFO:
self.dump_flags()
if self.verbose >= logger.WARN:
self.check_sanity()
if self.verbose >= logger.INFO:
self.dump_flags()
self.de = kernel(self.base, mo_coeff, ci, atmlst, mf_grad, log)
log.timer('CASSCF gradients', *cput0)
self._finalize()
return self.de
def _finalize(self):
if self.verbose >= logger.NOTE:
logger.note(self, '--------------- %s gradients ---------------',
self.base.__class__.__name__)
rhf_grad._write(self, self.mol, self.de, self.atmlst)
logger.note(self, '----------------------------------------------')
as_scanner = as_scanner
Grad = Gradients
from pyscf import mcscf
mcscf.mc1step.CASSCF.Gradients = lib.class_as_method(Gradients)
if __name__ == '__main__':
from pyscf import gto
from pyscf import scf
from pyscf import mcscf
mol = gto.Mole()
mol.atom = 'N 0 0 0; N 0 0 1.2; H 1 1 0; H 1 1 1.2'
mol.basis = '631g'
mol.build()
mf = scf.RHF(mol).run()
mc = mcscf.CASSCF(mf, 4, 4).run()
de = mc.Gradients().kernel()
print(lib.finger(de) - 0.019602220578635747)
mol = gto.Mole()
mol.verbose = 0
mol.atom = 'N 0 0 0; N 0 0 1.2'
mol.basis = 'sto3g'
mol.build()
mf = scf.RHF(mol).run()
mc = mcscf.CASSCF(mf, 4, 4).run()
de = kernel(mc)
mcs = mc.as_scanner()
mol.set_geom_('N 0 0 0; N 0 0 1.201')
e1 = mcs(mol)
mol.set_geom_('N 0 0 0; N 0 0 1.199')
e2 = mcs(mol)
print(de[1,2], (e1-e2)/0.002*lib.param.BOHR)
| true
| true
|
1c43a9d34a103b5ab69d0cfba4759484f8792ead
| 3,080
|
py
|
Python
|
deprecated/Friedman.py
|
Klettgau/CSC-332
|
cf0563d1230cac124ed2146ab2e211a15f216c23
|
[
"MIT"
] | null | null | null |
deprecated/Friedman.py
|
Klettgau/CSC-332
|
cf0563d1230cac124ed2146ab2e211a15f216c23
|
[
"MIT"
] | null | null | null |
deprecated/Friedman.py
|
Klettgau/CSC-332
|
cf0563d1230cac124ed2146ab2e211a15f216c23
|
[
"MIT"
] | null | null | null |
import argparse
import ciphers.Affine as aff
import deprecated.CustomParser as CustomParser
import ciphers.Engima as Engima
import ciphers.Jefferson as jeff
import ciphers.Julius as jul
import ciphers.Vigenere as vig
# this is was the original cli for the project.
#Deprecated
def main():
grandfather = CustomParser.Parsely()
parser = argparse.ArgumentParser(description="Please Enter 0 for Caesar Cipher\n"
"\nPlease Enter 1 for Affine Cipher\n"
"Please Enter 2 for Vigenere Cipher\n"
"Please Enter 3 for Jefferson Cipher\n"
"Please Enter 4 for Enigma")
parser.add_argument("-m", "--mode", help="this picks the type of cipher", type=int, required=True)
args, unknown = parser.parse_known_args()
if args.mode == 0: # julius
custom_parser = grandfather.parser_jules()
custom_parser, unknown = custom_parser.parse_known_args()
jul.display_values(custom_parser.key, custom_parser.text)
encodeResult = jul.encode(custom_parser.text, custom_parser.key)
jul.print_text(encodeResult, 0)
decodedResult = jul.decode(encodeResult, custom_parser.key)
print("decode result")
jul.print_text(decodedResult, 1)
elif args.mode == 1: # affine
# key=m , wabbit =b
custom_parser = grandfather.parser_jules()
print(".....")
custom_parser, unknown = custom_parser.parse_known_args()
proposed_value, bol = aff.check_coprime(custom_parser.key, 26)
if bol != True:
print("Please pick one of these following coprime values", proposed_value)
else:
cipherText = aff.encode(custom_parser.text, custom_parser.key, custom_parser.intercept)
print("\nBelow is the Decoded Text \n")
aff.decode(cipherText, proposed_value, custom_parser.intercept)
# need to add the ability to organize all 26
elif args.mode == 2: # jeff
custom_parser = grandfather.parser_jeff()
custom_parser, unknown = custom_parser.parse_known_args()
cipherText = jeff.encode(custom_parser.text, custom_parser.key)
print(cipherText)
print(jeff.decode(cipherText, custom_parser.key))
elif args.mode == 3:
custom_parser = grandfather.parser_vig()
custom_parser, unknown = custom_parser.parse_known_args()
print(custom_parser.text, custom_parser.key)
resy = vig.encode(custom_parser.text, custom_parser.key)
print("Cipher-Text:", resy)
print("Plain-Text:", vig.decode(resy, custom_parser.key))
elif args.mode == 4:
custom_parser = grandfather.parser_enig()
custom_parser, unknown = custom_parser.parse_known_args()
eng = Engima.M3()
eng.vienna(custom_parser.text, custom_parser.stecker, custom_parser.ring, custom_parser.rotor,
custom_parser.startPos)
if __name__ == '__main__':
main()
| 44
| 102
| 0.647727
|
import argparse
import ciphers.Affine as aff
import deprecated.CustomParser as CustomParser
import ciphers.Engima as Engima
import ciphers.Jefferson as jeff
import ciphers.Julius as jul
import ciphers.Vigenere as vig
def main():
grandfather = CustomParser.Parsely()
parser = argparse.ArgumentParser(description="Please Enter 0 for Caesar Cipher\n"
"\nPlease Enter 1 for Affine Cipher\n"
"Please Enter 2 for Vigenere Cipher\n"
"Please Enter 3 for Jefferson Cipher\n"
"Please Enter 4 for Enigma")
parser.add_argument("-m", "--mode", help="this picks the type of cipher", type=int, required=True)
args, unknown = parser.parse_known_args()
if args.mode == 0:
custom_parser = grandfather.parser_jules()
custom_parser, unknown = custom_parser.parse_known_args()
jul.display_values(custom_parser.key, custom_parser.text)
encodeResult = jul.encode(custom_parser.text, custom_parser.key)
jul.print_text(encodeResult, 0)
decodedResult = jul.decode(encodeResult, custom_parser.key)
print("decode result")
jul.print_text(decodedResult, 1)
elif args.mode == 1:
custom_parser = grandfather.parser_jules()
print(".....")
custom_parser, unknown = custom_parser.parse_known_args()
proposed_value, bol = aff.check_coprime(custom_parser.key, 26)
if bol != True:
print("Please pick one of these following coprime values", proposed_value)
else:
cipherText = aff.encode(custom_parser.text, custom_parser.key, custom_parser.intercept)
print("\nBelow is the Decoded Text \n")
aff.decode(cipherText, proposed_value, custom_parser.intercept)
elif args.mode == 2:
custom_parser = grandfather.parser_jeff()
custom_parser, unknown = custom_parser.parse_known_args()
cipherText = jeff.encode(custom_parser.text, custom_parser.key)
print(cipherText)
print(jeff.decode(cipherText, custom_parser.key))
elif args.mode == 3:
custom_parser = grandfather.parser_vig()
custom_parser, unknown = custom_parser.parse_known_args()
print(custom_parser.text, custom_parser.key)
resy = vig.encode(custom_parser.text, custom_parser.key)
print("Cipher-Text:", resy)
print("Plain-Text:", vig.decode(resy, custom_parser.key))
elif args.mode == 4:
custom_parser = grandfather.parser_enig()
custom_parser, unknown = custom_parser.parse_known_args()
eng = Engima.M3()
eng.vienna(custom_parser.text, custom_parser.stecker, custom_parser.ring, custom_parser.rotor,
custom_parser.startPos)
if __name__ == '__main__':
main()
| true
| true
|
1c43a9e8e3e044bdac8baa2d76c080754533826a
| 575
|
py
|
Python
|
month1/day12/demo08.py
|
Amiao-miao/all-codes
|
ec50036d42d40086cac5fddf6baf4de18ac91e55
|
[
"Apache-2.0"
] | 1
|
2021-02-02T02:17:37.000Z
|
2021-02-02T02:17:37.000Z
|
month1/day12/demo08.py
|
Amiao-miao/all-codes
|
ec50036d42d40086cac5fddf6baf4de18ac91e55
|
[
"Apache-2.0"
] | null | null | null |
month1/day12/demo08.py
|
Amiao-miao/all-codes
|
ec50036d42d40086cac5fddf6baf4de18ac91e55
|
[
"Apache-2.0"
] | null | null | null |
"""
需求:老张开车去东北
变化:飞机/轮船/骑车....
思想:
封装:将需求分解为多个类
人类 汽车 飞机 轮船 自行车 ...
继承:
多态:
"""
class Person:
def __init__(self, name=""):
self.name = name
def drive(self, pos, vehicle):
print("去", pos)
vehicle.transport()
class Vehicle:
def transport(self):
pass
class Car(Vehicle):
def transport(self):
print("行驶")
class Airplane(Vehicle):
# 重写快捷键:ctrl + o
def transport(self):
print("飞行")
lz = Person("老张")
car = Car()
air = Airplane()
lz.drive("东北", car)
| 16.428571
| 38
| 0.514783
|
class Person:
def __init__(self, name=""):
self.name = name
def drive(self, pos, vehicle):
print("去", pos)
vehicle.transport()
class Vehicle:
def transport(self):
pass
class Car(Vehicle):
def transport(self):
print("行驶")
class Airplane(Vehicle):
def transport(self):
print("飞行")
lz = Person("老张")
car = Car()
air = Airplane()
lz.drive("东北", car)
| true
| true
|
1c43a9f90f338a12da6807ed168ec5a7c59bd252
| 12,522
|
py
|
Python
|
dev_tool/__main__.py
|
ShenTengTu/mpy-dev-tool
|
b70c789ed743eb5089ec5b2b6f90e107bced152d
|
[
"MIT"
] | null | null | null |
dev_tool/__main__.py
|
ShenTengTu/mpy-dev-tool
|
b70c789ed743eb5089ec5b2b6f90e107bced152d
|
[
"MIT"
] | null | null | null |
dev_tool/__main__.py
|
ShenTengTu/mpy-dev-tool
|
b70c789ed743eb5089ec5b2b6f90e107bced152d
|
[
"MIT"
] | null | null | null |
from argparse import _SubParsersAction, Namespace
import shutil
from os import linesep
from . import (
realpath_join,
os_cwd,
os_walk_hash,
path_exists,
path_basename,
HERE,
DIST_DIR,
SUBMODULES_DIR,
EXT_LIB_DIR,
PYPROJECT_TOML,
)
from .toml_op import write_toml, read_toml
from .mpy import mpy_cross_version, mk_mpy, os_walk_mpy
from .github_api import git_source_choices, update_from_github, update_script_from_github
from .pyb_util import PyboardContextbuilder, pyboard_put_files
from .cli import CLI
def init_dev_tool_toml():
if not path_exists(PYPROJECT_TOML):
write_toml(PYPROJECT_TOML, {})
d = read_toml(PYPROJECT_TOML)
# ask user to configure
print("Configure 'pyproject.toml'...\n[dev_tool.*]")
module_name = input("- module name (dev_tool) : ")
if not module_name:
module_name = "dev_tool"
src_dir = input("- source directory (.) : ")
if not src_dir:
src_dir = "."
print("Write 'pyproject.toml'...")
# [dev_tool.*]
d.setdefault("dev_tool", {})
# [dev_tool.module]
d["dev_tool"].setdefault("module", {})
d["dev_tool"]["module"].setdefault("name", module_name)
d["dev_tool"]["module"].setdefault("src_dir", src_dir)
d["dev_tool"]["module"].setdefault("micropython-lib", [])
# [dev_tool.submodule_dependencies]
d["dev_tool"].setdefault("submodule_dependencies", {})
# [dev_tool.script_src.*]
d["dev_tool"].setdefault("script_src", {})
# [[dev_tool.script_src.gists]]
d["dev_tool"]["script_src"].setdefault(
"gists", [{"file": "", "gist_id": "", "sha": ""}]
)
# [[dev_tool.script_src.repo_contents]]
d["dev_tool"]["script_src"].setdefault(
"repo_contents",
[{"file": "", "owner": "", "repo": "", "path": "", "ref": "", "sha": ""}],
)
write_toml(PYPROJECT_TOML, d)
class PyBoardActioin(_SubParsersAction):
"""
Create `PyboardContextbuilder` instance & add into the namespace.
"""
# Must inherit "_SubParsersAction", because it is invoked in subparsers
def __init__(self, option_strings, dest, **kwargs):
# Fix TypeError: __init__() got multiple values for argument 'prog'
kwargs.pop("prog", None)
super().__init__(option_strings, dest, **kwargs)
self.dest = dest # Let the namespace contains the parser name
def __call__(self, parser, namespace, values, option_string=None):
super().__call__(parser, namespace, values, option_string)
sub_parser_name = str(getattr(namespace, self.dest))
if sub_parser_name.startswith(("pyboard_", "pyb_")):
# Avoid reassigning a new "PyboardContextbuilder" which will cause "PyboardError"
if "_pyb_context_builder_" not in namespace:
setattr(
namespace,
"_pyb_context_builder_",
PyboardContextbuilder(
namespace.port,
namespace.baud,
namespace.user,
namespace.password,
namespace.wait,
),
)
# #
MODULE_NAME = None
SRC_DIR = None
# CLI setting #
parser = CLI(
main_params=dict(prog="dev_tool", description="Dev tool"),
sub_params=dict(
title="Task Commads",
metavar="task_commad",
description="All dev tasks as below.",
dest="Task",
required=True,
action=PyBoardActioin,
),
)
# PyBoard arguments #
def mk_pyboard_argument_group(parser_):
"""
Add argument group about PyBoard to argparse parser.
"""
pyb_args_g = parser_.add_argument_group("PyBoard arguments")
pyb_args_g.add_argument(
"-p",
"--port",
default="/dev/ttyACM0",
help="the serial device or the IP address of the pyboard",
)
pyb_args_g.add_argument(
"-b", "--baud", default=115200, help="the baud rate of the serial device"
)
pyb_args_g.add_argument(
"-u", "--user", default="micro", help="the telnet login username"
)
pyb_args_g.add_argument(
"-pw", "--password", default="python", help="the telnet login password"
)
pyb_args_g.add_argument(
"-w",
"--wait",
default=0,
type=int,
help="seconds to wait for USB connected board to become available",
)
pyb_args_g.add_argument(
"-dl",
"--delay",
default=3,
type=int,
help="seconds to wait before entering raw REPL",
)
return pyb_args_g
pyboard_args_g = mk_pyboard_argument_group(parser)
# Task commands #
@parser.sub_command(aliases=["init"], help="initialize `dev_tool`")
def tool_init(args):
if path_exists(PYPROJECT_TOML):
print("'pyproject.toml' has exist.")
return
init_dev_tool_toml()
@parser.sub_command(
aliases=["dl_ext"], help="download extra libraries to local from `micropython-lib`"
)
def download_ext_libs(args):
def file_filter(file_info):
file_name = str(file_info["name"])
return (
file_name.endswith(".py")
and not file_name.startswith(("test_", "example_"))
and file_name not in ["setup.py"]
)
d = read_toml(PYPROJECT_TOML)
if "micropython-lib" in d["dev_tool"]["module"]:
ext_lib_list = d["dev_tool"]["module"]["micropython-lib"]
ns = Namespace(
source="repo_contents",
ref="master",
parent_dir=EXT_LIB_DIR,
file_filter=file_filter,
)
meta = {
"owner": "micropython",
"repo": "micropython-lib",
"file": None,
"path": None,
"sha": None,
}
for lib_name in ext_lib_list:
print(lib_name, ":")
meta["file"] = lib_name
meta["path"] = lib_name
update_from_github(ns, meta)
# create __init__.py
init_py_path = realpath_join(EXT_LIB_DIR, lib_name, "__init__.py", normcase=False)
if not path_exists(init_py_path):
with open(init_py_path, "w", newline=linesep) as f:
f.write("")
@parser.sub_command_arg(
"submodule",
help="relative path of the submodule src directory in `submodules` folder.",
type=str,
)
@parser.sub_command(aliases=["a_sub"], help="add a submodule into main module .")
def add_submodule(args):
submodule = args.submodule
submodule_dir = realpath_join(SUBMODULES_DIR, submodule)
if not path_exists(submodule_dir):
print("The submodule is not exist : `%s`" % submodule_dir)
return
basename = path_basename(submodule_dir)
dst_dir = realpath_join(SRC_DIR, basename)
try:
shutil.copytree(submodule_dir, dst_dir)
except FileExistsError:
print("`%s` has been exist." % basename)
return
# record sub module dependencies into `pyproject.toml`
d = read_toml(PYPROJECT_TOML)
d["dev_tool"]["submodule_dependencies"][basename] = submodule
write_toml(PYPROJECT_TOML, d)
print("add sub module : `%s`" % basename)
@parser.sub_command_arg(
"submodule_name", help="submodule name in main module folder.", type=str
)
@parser.sub_command(aliases=["u_sub"], help="update a submodule in main module .")
def update_submodule(args):
# read submodule_dependencies from PYPROJECT_TOML
submodule_name = args.submodule_name
d = read_toml(PYPROJECT_TOML)
submodule = d["dev_tool"]["submodule_dependencies"].get(submodule_name, None)
if submodule is None:
print("Use `dev_tool add_submodule <submodule>` first.")
return
submodule_dir = realpath_join(SUBMODULES_DIR, submodule)
if not path_exists(submodule_dir):
print("The submodule is not exist : `%s`" % submodule_dir)
return
basename = path_basename(submodule_dir)
dst_dir = realpath_join(SRC_DIR, basename)
# get hash of each files by os_walk()
d_src = os_walk_hash(submodule_dir)
d_dst = os_walk_hash(dst_dir)
# compare hash then update files
for rel_path in d_src:
if d_src[rel_path] == d_dst[rel_path]:
continue
src_f = realpath_join(submodule_dir, rel_path)
dst_f = realpath_join(dst_dir, rel_path)
shutil.copy2(src_f, dst_f)
print("Upadate `%s`" % dst_f)
@parser.sub_command(
aliases=["mk_mpy"], help="compile source `*.py` to `*.mpy` (save in `dist/mpy` folder)."
)
def make_mpy(args):
args_list = os_walk_mpy(SRC_DIR, realpath_join(DIST_DIR, "mpy/" + MODULE_NAME))
mpy_cross_version()
while args_list:
args_ = args_list.pop()
mk_mpy(*args_)
print("finished.")
@parser.sub_command_arg("file", help="script file basename.", type=str)
@parser.sub_command_arg("source", help="the script source", choices=git_source_choices)
@parser.sub_command_arg("-r", "--ref", help="The name of the commit/branch/tag", type=str)
@parser.sub_command_arg("-d", "--dev", help="for development", action="store_true")
@parser.sub_command_arg(
"-t", "--toml", help="specify config toml file (the path base on CWD)", type=str
)
@parser.sub_command(
aliases=["u_scpt"], help="update the script file by github API (see pyproject.toml)."
)
def update_script(args):
if args.toml:
config_toml = realpath_join(os_cwd(), args.toml)
update_script_from_github(args, config_toml)
else:
update_script_from_github(args)
@parser.sub_command_arg("src", help="the dir path on the board", nargs="?", default="/")
@parser.sub_command(aliases=["pyb_ls"], help="pyboard: list the dir")
def pyboard_ls(args):
with args._pyb_context_builder_(args.delay) as pyb_context:
pyb_context.pyb.fs_ls(args.src)
@parser.sub_command_arg("src", help="the file path on the board")
@parser.sub_command(aliases=["pyb_rm"], help="pyboard: remove the file")
def pyboard_rm(args):
with args._pyb_context_builder_(args.delay) as pyb_context:
pyb_context.rm(args.src)
@parser.sub_command_arg(
"--timeout", help="timeout waiting for first EOF reception", type=int, default="10"
)
@parser.sub_command_arg("src", help="the file path on local")
@parser.sub_command(aliases=["pyb_exec_file"], help="pyboard: execute the local file")
def pyboard_exec_file(args):
with args._pyb_context_builder_(args.delay) as pyb_context:
pyb_context.exec_file(args.src, args.timeout)
@parser.sub_command_arg(
"--dest", help="dest dir path (base on pyboard root)", type=str, default="/"
)
@parser.sub_command_arg("src", help="src dir path (base on project root)", type=str)
@parser.sub_command(aliases=["pyb_puts"], help="pyboard: put local files to the board")
def pyboard_puts(args):
src_path = realpath_join(HERE, "../", args.src)
with args._pyb_context_builder_(args.delay) as pyb_context:
pyboard_put_files(pyb_context, [(src_path, args.dest)])
@parser.sub_command_arg(
"-f", "--files", help="specific files to install", nargs="+", type=str
)
@parser.sub_command(
aliases=["pyb_i"], help="pyboard: install mpy distribution to the board"
)
def pyboard_install(args):
with args._pyb_context_builder_(args.delay) as pyb_context:
src_dir = realpath_join(DIST_DIR, "mpy/" + MODULE_NAME)
dest_dir = "lib/" + MODULE_NAME
pyboard_put_files(pyb_context, [(src_dir, dest_dir)], spec=args.files)
def main():
ns = parser.parse_args() # for `--help` can pass
if ns.Task in ["tool_init", "init", "pyboard_ls", "pyb_ls"]:
parser.handle_args(namespace=ns)
return
# Check pyproject.toml & else
if not path_exists(PYPROJECT_TOML):
print(
(
"'pyproject.toml' is not exist.\n"
+ "Please use `dev_tool init` command to initialize."
)
)
return
pyp_toml = read_toml(PYPROJECT_TOML)
if "dev_tool" not in pyp_toml:
print(
(
"'dev_tool' property is not exist.\n"
+ "Please use `dev_tool init` command to initialize."
)
)
return
global MODULE_NAME, SRC_DIR
MODULE_NAME = pyp_toml["dev_tool"]["module"]["name"]
SRC_DIR = realpath_join(
HERE, "../", pyp_toml["dev_tool"]["module"]["src_dir"], MODULE_NAME
)
if not path_exists(SRC_DIR):
print(('Module source directory does not exist: "%s"') % SRC_DIR)
return
parser.handle_args(namespace=ns)
if __name__ == "__main__":
main()
| 32.866142
| 93
| 0.639435
|
from argparse import _SubParsersAction, Namespace
import shutil
from os import linesep
from . import (
realpath_join,
os_cwd,
os_walk_hash,
path_exists,
path_basename,
HERE,
DIST_DIR,
SUBMODULES_DIR,
EXT_LIB_DIR,
PYPROJECT_TOML,
)
from .toml_op import write_toml, read_toml
from .mpy import mpy_cross_version, mk_mpy, os_walk_mpy
from .github_api import git_source_choices, update_from_github, update_script_from_github
from .pyb_util import PyboardContextbuilder, pyboard_put_files
from .cli import CLI
def init_dev_tool_toml():
if not path_exists(PYPROJECT_TOML):
write_toml(PYPROJECT_TOML, {})
d = read_toml(PYPROJECT_TOML)
print("Configure 'pyproject.toml'...\n[dev_tool.*]")
module_name = input("- module name (dev_tool) : ")
if not module_name:
module_name = "dev_tool"
src_dir = input("- source directory (.) : ")
if not src_dir:
src_dir = "."
print("Write 'pyproject.toml'...")
d.setdefault("dev_tool", {})
d["dev_tool"].setdefault("module", {})
d["dev_tool"]["module"].setdefault("name", module_name)
d["dev_tool"]["module"].setdefault("src_dir", src_dir)
d["dev_tool"]["module"].setdefault("micropython-lib", [])
d["dev_tool"].setdefault("submodule_dependencies", {})
d["dev_tool"].setdefault("script_src", {})
d["dev_tool"]["script_src"].setdefault(
"gists", [{"file": "", "gist_id": "", "sha": ""}]
)
d["dev_tool"]["script_src"].setdefault(
"repo_contents",
[{"file": "", "owner": "", "repo": "", "path": "", "ref": "", "sha": ""}],
)
write_toml(PYPROJECT_TOML, d)
class PyBoardActioin(_SubParsersAction):
def __init__(self, option_strings, dest, **kwargs):
kwargs.pop("prog", None)
super().__init__(option_strings, dest, **kwargs)
self.dest = dest
def __call__(self, parser, namespace, values, option_string=None):
super().__call__(parser, namespace, values, option_string)
sub_parser_name = str(getattr(namespace, self.dest))
if sub_parser_name.startswith(("pyboard_", "pyb_")):
if "_pyb_context_builder_" not in namespace:
setattr(
namespace,
"_pyb_context_builder_",
PyboardContextbuilder(
namespace.port,
namespace.baud,
namespace.user,
namespace.password,
namespace.wait,
),
)
MODULE_NAME = None
SRC_DIR = None
parser = CLI(
main_params=dict(prog="dev_tool", description="Dev tool"),
sub_params=dict(
title="Task Commads",
metavar="task_commad",
description="All dev tasks as below.",
dest="Task",
required=True,
action=PyBoardActioin,
),
)
def mk_pyboard_argument_group(parser_):
pyb_args_g = parser_.add_argument_group("PyBoard arguments")
pyb_args_g.add_argument(
"-p",
"--port",
default="/dev/ttyACM0",
help="the serial device or the IP address of the pyboard",
)
pyb_args_g.add_argument(
"-b", "--baud", default=115200, help="the baud rate of the serial device"
)
pyb_args_g.add_argument(
"-u", "--user", default="micro", help="the telnet login username"
)
pyb_args_g.add_argument(
"-pw", "--password", default="python", help="the telnet login password"
)
pyb_args_g.add_argument(
"-w",
"--wait",
default=0,
type=int,
help="seconds to wait for USB connected board to become available",
)
pyb_args_g.add_argument(
"-dl",
"--delay",
default=3,
type=int,
help="seconds to wait before entering raw REPL",
)
return pyb_args_g
pyboard_args_g = mk_pyboard_argument_group(parser)
@parser.sub_command(aliases=["init"], help="initialize `dev_tool`")
def tool_init(args):
if path_exists(PYPROJECT_TOML):
print("'pyproject.toml' has exist.")
return
init_dev_tool_toml()
@parser.sub_command(
aliases=["dl_ext"], help="download extra libraries to local from `micropython-lib`"
)
def download_ext_libs(args):
def file_filter(file_info):
file_name = str(file_info["name"])
return (
file_name.endswith(".py")
and not file_name.startswith(("test_", "example_"))
and file_name not in ["setup.py"]
)
d = read_toml(PYPROJECT_TOML)
if "micropython-lib" in d["dev_tool"]["module"]:
ext_lib_list = d["dev_tool"]["module"]["micropython-lib"]
ns = Namespace(
source="repo_contents",
ref="master",
parent_dir=EXT_LIB_DIR,
file_filter=file_filter,
)
meta = {
"owner": "micropython",
"repo": "micropython-lib",
"file": None,
"path": None,
"sha": None,
}
for lib_name in ext_lib_list:
print(lib_name, ":")
meta["file"] = lib_name
meta["path"] = lib_name
update_from_github(ns, meta)
init_py_path = realpath_join(EXT_LIB_DIR, lib_name, "__init__.py", normcase=False)
if not path_exists(init_py_path):
with open(init_py_path, "w", newline=linesep) as f:
f.write("")
@parser.sub_command_arg(
"submodule",
help="relative path of the submodule src directory in `submodules` folder.",
type=str,
)
@parser.sub_command(aliases=["a_sub"], help="add a submodule into main module .")
def add_submodule(args):
submodule = args.submodule
submodule_dir = realpath_join(SUBMODULES_DIR, submodule)
if not path_exists(submodule_dir):
print("The submodule is not exist : `%s`" % submodule_dir)
return
basename = path_basename(submodule_dir)
dst_dir = realpath_join(SRC_DIR, basename)
try:
shutil.copytree(submodule_dir, dst_dir)
except FileExistsError:
print("`%s` has been exist." % basename)
return
d = read_toml(PYPROJECT_TOML)
d["dev_tool"]["submodule_dependencies"][basename] = submodule
write_toml(PYPROJECT_TOML, d)
print("add sub module : `%s`" % basename)
@parser.sub_command_arg(
"submodule_name", help="submodule name in main module folder.", type=str
)
@parser.sub_command(aliases=["u_sub"], help="update a submodule in main module .")
def update_submodule(args):
submodule_name = args.submodule_name
d = read_toml(PYPROJECT_TOML)
submodule = d["dev_tool"]["submodule_dependencies"].get(submodule_name, None)
if submodule is None:
print("Use `dev_tool add_submodule <submodule>` first.")
return
submodule_dir = realpath_join(SUBMODULES_DIR, submodule)
if not path_exists(submodule_dir):
print("The submodule is not exist : `%s`" % submodule_dir)
return
basename = path_basename(submodule_dir)
dst_dir = realpath_join(SRC_DIR, basename)
d_src = os_walk_hash(submodule_dir)
d_dst = os_walk_hash(dst_dir)
for rel_path in d_src:
if d_src[rel_path] == d_dst[rel_path]:
continue
src_f = realpath_join(submodule_dir, rel_path)
dst_f = realpath_join(dst_dir, rel_path)
shutil.copy2(src_f, dst_f)
print("Upadate `%s`" % dst_f)
@parser.sub_command(
aliases=["mk_mpy"], help="compile source `*.py` to `*.mpy` (save in `dist/mpy` folder)."
)
def make_mpy(args):
args_list = os_walk_mpy(SRC_DIR, realpath_join(DIST_DIR, "mpy/" + MODULE_NAME))
mpy_cross_version()
while args_list:
args_ = args_list.pop()
mk_mpy(*args_)
print("finished.")
@parser.sub_command_arg("file", help="script file basename.", type=str)
@parser.sub_command_arg("source", help="the script source", choices=git_source_choices)
@parser.sub_command_arg("-r", "--ref", help="The name of the commit/branch/tag", type=str)
@parser.sub_command_arg("-d", "--dev", help="for development", action="store_true")
@parser.sub_command_arg(
"-t", "--toml", help="specify config toml file (the path base on CWD)", type=str
)
@parser.sub_command(
aliases=["u_scpt"], help="update the script file by github API (see pyproject.toml)."
)
def update_script(args):
if args.toml:
config_toml = realpath_join(os_cwd(), args.toml)
update_script_from_github(args, config_toml)
else:
update_script_from_github(args)
@parser.sub_command_arg("src", help="the dir path on the board", nargs="?", default="/")
@parser.sub_command(aliases=["pyb_ls"], help="pyboard: list the dir")
def pyboard_ls(args):
with args._pyb_context_builder_(args.delay) as pyb_context:
pyb_context.pyb.fs_ls(args.src)
@parser.sub_command_arg("src", help="the file path on the board")
@parser.sub_command(aliases=["pyb_rm"], help="pyboard: remove the file")
def pyboard_rm(args):
with args._pyb_context_builder_(args.delay) as pyb_context:
pyb_context.rm(args.src)
@parser.sub_command_arg(
"--timeout", help="timeout waiting for first EOF reception", type=int, default="10"
)
@parser.sub_command_arg("src", help="the file path on local")
@parser.sub_command(aliases=["pyb_exec_file"], help="pyboard: execute the local file")
def pyboard_exec_file(args):
with args._pyb_context_builder_(args.delay) as pyb_context:
pyb_context.exec_file(args.src, args.timeout)
@parser.sub_command_arg(
"--dest", help="dest dir path (base on pyboard root)", type=str, default="/"
)
@parser.sub_command_arg("src", help="src dir path (base on project root)", type=str)
@parser.sub_command(aliases=["pyb_puts"], help="pyboard: put local files to the board")
def pyboard_puts(args):
src_path = realpath_join(HERE, "../", args.src)
with args._pyb_context_builder_(args.delay) as pyb_context:
pyboard_put_files(pyb_context, [(src_path, args.dest)])
@parser.sub_command_arg(
"-f", "--files", help="specific files to install", nargs="+", type=str
)
@parser.sub_command(
aliases=["pyb_i"], help="pyboard: install mpy distribution to the board"
)
def pyboard_install(args):
with args._pyb_context_builder_(args.delay) as pyb_context:
src_dir = realpath_join(DIST_DIR, "mpy/" + MODULE_NAME)
dest_dir = "lib/" + MODULE_NAME
pyboard_put_files(pyb_context, [(src_dir, dest_dir)], spec=args.files)
def main():
ns = parser.parse_args()
if ns.Task in ["tool_init", "init", "pyboard_ls", "pyb_ls"]:
parser.handle_args(namespace=ns)
return
if not path_exists(PYPROJECT_TOML):
print(
(
"'pyproject.toml' is not exist.\n"
+ "Please use `dev_tool init` command to initialize."
)
)
return
pyp_toml = read_toml(PYPROJECT_TOML)
if "dev_tool" not in pyp_toml:
print(
(
"'dev_tool' property is not exist.\n"
+ "Please use `dev_tool init` command to initialize."
)
)
return
global MODULE_NAME, SRC_DIR
MODULE_NAME = pyp_toml["dev_tool"]["module"]["name"]
SRC_DIR = realpath_join(
HERE, "../", pyp_toml["dev_tool"]["module"]["src_dir"], MODULE_NAME
)
if not path_exists(SRC_DIR):
print(('Module source directory does not exist: "%s"') % SRC_DIR)
return
parser.handle_args(namespace=ns)
if __name__ == "__main__":
main()
| true
| true
|
1c43aa654c855e80acf368da55cae7fcc0829d48
| 4,181
|
py
|
Python
|
Tests/compat/sbs_newtype.py
|
dsonbill/IronPython3-NETCore
|
8c76bdbec1754233f04b41ecd28e9bae2c862fd0
|
[
"Apache-2.0"
] | 2
|
2019-09-21T22:22:30.000Z
|
2020-05-09T12:45:51.000Z
|
Tests/compat/sbs_newtype.py
|
dsonbill/IronPython3-NETCore
|
8c76bdbec1754233f04b41ecd28e9bae2c862fd0
|
[
"Apache-2.0"
] | null | null | null |
Tests/compat/sbs_newtype.py
|
dsonbill/IronPython3-NETCore
|
8c76bdbec1754233f04b41ecd28e9bae2c862fd0
|
[
"Apache-2.0"
] | null | null | null |
#####################################################################################
#
# Copyright (c) Microsoft Corporation. All rights reserved.
#
# This source code is subject to terms and conditions of the Apache License, Version 2.0. A
# copy of the license can be found in the License.html file at the root of this distribution. If
# you cannot locate the Apache License, Version 2.0, please send an email to
# ironpy@microsoft.com. By using this source code in any fashion, you are agreeing to be bound
# by the terms of the Apache License, Version 2.0.
#
# You must not remove this notice, or any other, from this software.
#
#
#####################################################################################
from common import *
class o1: pass
class o2: pass
class o3(o2): pass
class n1(object):
pass
class n2(object):
__slots__ = ['a', 'b']
class n3(object):
__slots__ = ['a', 'b', 'c']
class n4(n1): pass
class n5(n1):
__slots__ = ['e', 'f']
class n6(n2): pass
class n7(n2):
__slots__ = ['g', 'h', 'i']
class n8(object):
__slots__ = ['__dict__']
os = [eval("o%s" % i) for i in range(1, 4)]
ns = [eval("n%s" % i) for i in range(1, 9)]
alls = os + ns + [object, float]
def combinators(handle, items, n):
if n == 0:
yield []
return
for i, item in enumerate(items):
this = [item]
for others in combinators(handle, handle(items, i), n-1):
yield this + others
def combinations(items, n):
def skipIthItem(items, i):
return items[:i] + items[i+1:]
return combinators(skipIthItem, items, n)
win_exception_map = {
'Cannot create a consistent method resolution' : 'mro order',
'multiple bases have instance lay-out conflict' : 'lay-out conflicit',
}
cli_exception_map = {
'invalid order for base classes' : 'mro order',
'can only extend one CLI or builtin type' : 'lay-out conflicit',
}
def get_exception_summary():
exception_map = is_cli and cli_exception_map or win_exception_map
for (x, y) in list(exception_map.items()):
if x in sys.exc_value.message:
return y
return sys.exc_value.message
count = 0
class test(object):
def test__pass(self):
global count
for i in range(1, 4):
for ts in combinations(alls, i):
new_class = "g%s" % count
count += 1
base_types = ', '.join([t.__name__ for t in ts])
code = "class %s(%s): pass" % (new_class, base_types)
try:
printwith("case", code)
exec(code, globals())
except:
printwith("same", get_exception_summary())
def test__with_slots(self):
global count
for i in range(1, 4):
for ts in combinations(alls, i):
new_class = "g%s" % count
count += 1
base_types = ', '.join([t.__name__ for t in ts])
code = "class %s(%s): __slots__ = 'abc'" % (new_class, base_types)
try:
printwith("case", code)
exec(code, globals())
except:
printwith("same", get_exception_summary())
# this depends on the first two tests.
# no good to look into the diff if the below tests still fail
def test_derive_from_g(self):
all_g = [x for x in dir(sys.modules[__name__]) if x[0] == 'g' and x[1:].isdigit()]
for y in all_g:
code = "class dg(%s): pass" % y # __slots__ = 'a'
try:
printwith("case", code)
exec(code)
except:
printwith("same", get_exception_summary())
# TODO: reduce the test case number by merging the first two like this:
# if count % 2 == 0:
# code = "class %s(%s): pass" % (new_class, base_types)
# else:
# code = "class %s(%s): __slots__ = 'abc'" % (new_class, base_types)
runtests(test)
| 31.916031
| 97
| 0.526429
| true
| true
|
|
1c43ab87ff3684e20b68bd114c2a295f62a3a3f9
| 17,256
|
py
|
Python
|
scripts/run_e2e_tests.py
|
tjinjoy/oppia
|
ed5ccbd95e42078457d40dde1dda02f1ae6a4354
|
[
"Apache-2.0"
] | null | null | null |
scripts/run_e2e_tests.py
|
tjinjoy/oppia
|
ed5ccbd95e42078457d40dde1dda02f1ae6a4354
|
[
"Apache-2.0"
] | null | null | null |
scripts/run_e2e_tests.py
|
tjinjoy/oppia
|
ed5ccbd95e42078457d40dde1dda02f1ae6a4354
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS-IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Python execution for running e2e tests."""
from __future__ import absolute_import # pylint: disable=import-only-modules
from __future__ import unicode_literals # pylint: disable=import-only-modules
import argparse
import atexit
import contextlib
import os
import re
import subprocess
import sys
import time
import python_utils
from scripts import build
from scripts import common
from scripts import install_chrome_on_travis
from scripts import install_third_party_libs
from scripts import setup
from scripts import setup_gae
CHROME_DRIVER_VERSION = '2.41'
WEB_DRIVER_PORT = 4444
GOOGLE_APP_ENGINE_PORT = 9001
OPPIA_SERVER_PORT = 8181
PROTRACTOR_BIN_PATH = os.path.join(
common.NODE_MODULES_PATH, 'protractor', 'bin', 'protractor')
CONSTANT_FILE_PATH = os.path.join(common.CURR_DIR, 'assets', 'constants.ts')
FECONF_FILE_PATH = os.path.join('feconf.py')
MAX_WAIT_TIME_FOR_PORT_TO_OPEN_SECS = 1000
WEBDRIVER_HOME_PATH = os.path.join(
common.NODE_MODULES_PATH, 'webdriver-manager')
WEBDRIVER_MANAGER_BIN_PATH = os.path.join(
WEBDRIVER_HOME_PATH, 'bin', 'webdriver-manager')
WEBDRIVER_PROVIDER_PATH = os.path.join(
WEBDRIVER_HOME_PATH, 'dist', 'lib', 'provider')
GECKO_PROVIDER_FILE_PATH = os.path.join(
WEBDRIVER_PROVIDER_PATH, 'geckodriver.js')
CHROME_PROVIDER_FILE_PATH = os.path.join(
WEBDRIVER_PROVIDER_PATH, 'chromedriver.js')
CHROME_PROVIDER_BAK_FILE_PATH = os.path.join(
WEBDRIVER_PROVIDER_PATH, 'chromedriver.js.bak')
GECKO_PROVIDER_BAK_FILE_PATH = os.path.join(
WEBDRIVER_PROVIDER_PATH, 'geckodriver.js.bak')
WEBPACK_BIN_PATH = os.path.join(
common.CURR_DIR, 'node_modules', 'webpack', 'bin', 'webpack.js')
PATTERN_FOR_REPLACE_WEBDRIVER_CODE = r'this\.osArch = os\.arch\(\);'
PROTRACTOR_CONFIG_FILE_PATH = os.path.join(
'core', 'tests', 'protractor.conf.js')
BROWSER_STACK_CONFIG_FILE_PATH = os.path.join(
'core', 'tests', 'protractor-browserstack.conf.js')
HASHES_FILE_PATH = os.path.join('assets', 'hashes.json')
_PARSER = argparse.ArgumentParser(description="""
Run this script from the oppia root folder:
bash scripts/run_e2e_tests.sh
The root folder MUST be named 'oppia'.
--suite=suite_name Performs test for different suites, here suites are the
name of the test files present in core/tests/protractor_desktop/ and
core/test/protractor/ dirs. e.g. for the file
core/tests/protractor/accessibility.js use --suite=accessibility.
For performing a full test, no argument is required.
Note: You can replace 'it' with 'fit' or 'describe' with 'fdescribe' to run a
single test or test suite.
""")
_PARSER.add_argument(
'--skip-install',
help='If true, skips installing dependencies. The default value is false.',
action='store_true')
_PARSER.add_argument(
'--skip-build',
help='If true, skips building files. The default value is false.',
action='store_true')
_PARSER.add_argument(
'--sharding-instances', type=int, default=3,
help='Sets the number of parallel browsers to open while sharding.'
'Sharding must be disabled (either by passing in false to --sharding'
' or 1 to --sharding-instances) if running any tests in isolation'
' (fit or fdescribe).')
_PARSER.add_argument(
'--prod_env',
help='Run the tests in prod mode. Static resources are served from'
' build directory and use cache slugs.',
action='store_true')
_PARSER.add_argument(
'--community_dashboard_enabled', action='store_true',
help='Run the test after enabling the community dashboard page.')
_PARSER.add_argument(
'--suite', default='full',
help='Performs test for different suites, here suites are the'
'name of the test files present in core/tests/protractor_desktop/ and'
'core/test/protractor/ dirs. e.g. for the file'
'core/tests/protractor/accessibility.js use --suite=accessibility.'
'For performing a full test, no argument is required.')
_PARSER.add_argument(
'--debug_mode',
help='Runs the protractor test in debugging mode. Follow the instruction '
'provided in following URL to run e2e tests in debugging mode: '
'https://www.protractortest.org/#/debugging#disabled-control-flow',
action='store_true')
# This list contains the sub process triggered by this script. This includes
# the oppia web server.
SUBPROCESSES = []
def ensure_screenshots_dir_is_removed():
"""Check if screenshot directory exists, if so, delete it."""
screenshots_dir = os.path.join(os.pardir, 'protractor-screenshots')
if not os.path.isdir(screenshots_dir):
return
python_utils.PRINT(
'Note: If ADD_SCREENSHOT_REPORTER is set to true in'
'core/tests/protractor.conf.js, you can view screenshots'
'of the failed tests in ../protractor-screenshots/')
os.rmdir(screenshots_dir)
def cleanup():
"""Kill the running subprocesses and server fired in this program."""
dev_appserver_path = '%s/dev_appserver.py' % common.GOOGLE_APP_ENGINE_HOME
webdriver_download_path = '%s/downloads' % WEBDRIVER_HOME_PATH
if common.is_windows_os():
# In windows system, the java command line will use absolute path.
webdriver_download_path = os.path.abspath(webdriver_download_path)
processes_to_kill = [
'.*%s.*' % re.escape(dev_appserver_path),
'.*%s.*' % re.escape(webdriver_download_path)
]
for p in SUBPROCESSES:
p.kill()
for p in processes_to_kill:
common.kill_processes_based_on_regex(p)
def is_oppia_server_already_running():
"""Check if the ports are taken by any other processes. If any one of
them is taken, it may indicate there is already one Oppia instance running.
Return:
bool: Whether there is a running Oppia instance.
"""
running = False
for port in [OPPIA_SERVER_PORT, GOOGLE_APP_ENGINE_PORT]:
if common.is_port_open(port):
python_utils.PRINT(
'There is already a server running on localhost:%s.'
'Please terminate it before running the end-to-end tests.'
'Exiting.' % port)
running = True
break
return running
def wait_for_port_to_be_open(port_number):
"""Wait until the port is open.
Args:
port_number: int. The port number to wait.
"""
waited_seconds = 0
while (not common.is_port_open(port_number) and
waited_seconds < MAX_WAIT_TIME_FOR_PORT_TO_OPEN_SECS):
time.sleep(1)
waited_seconds += 1
if (waited_seconds ==
MAX_WAIT_TIME_FOR_PORT_TO_OPEN_SECS and
not common.is_port_open(port_number)):
python_utils.PRINT(
'Failed to start server on port %s, exiting ...' % port_number)
sys.exit(1)
def run_webpack_compilation():
"""Runs webpack compilation."""
max_tries = 5
webpack_bundles_dir_name = 'webpack_bundles'
for _ in python_utils.RANGE(max_tries):
try:
subprocess.check_call([
common.NODE_BIN_PATH, WEBPACK_BIN_PATH, '--config',
'webpack.dev.config.ts'])
except subprocess.CalledProcessError as error:
python_utils.PRINT(error.output)
sys.exit(error.returncode)
return
if os.path.isdir(webpack_bundles_dir_name):
break
if not os.path.isdir(webpack_bundles_dir_name):
python_utils.PRINT(
'Failed to complete webpack compilation, exiting ...')
sys.exit(1)
def update_dev_mode_in_constants_js(constant_file, dev_mode_setting):
"""Change constant file based on the running mode. Only the `DEV_MODE` line
should be changed.
Args:
constant_file: str. File path to the constant file.
dev_mode_setting: bool. Represents whether the program is running on dev
mode.
"""
pattern = '"DEV_MODE": .*'
replace = '"DEV_MODE": %s' % (
'true' if dev_mode_setting else 'false')
common.inplace_replace_file(constant_file, pattern, replace)
def update_community_dashboard_status_in_feconf_file(
feconf_file_path, enable_community_dashboard):
"""Change feconf.py file based on whether the community dashboard is
enabled.
Args:
feconf_file_path: str. Path to the feconf.py file.
enable_community_dashboard: bool. Represents whether community
dashboard is enabled.
"""
pattern = 'COMMUNITY_DASHBOARD_ENABLED = .*'
replace = 'COMMUNITY_DASHBOARD_ENABLED = %s' % enable_community_dashboard
common.inplace_replace_file(feconf_file_path, pattern, replace)
def run_webdriver_manager(parameters):
"""Run commands of webdriver manager.
Args:
parameters: list(str). A list of parameters to pass to webdriver
manager.
"""
web_driver_command = [common.NODE_BIN_PATH, WEBDRIVER_MANAGER_BIN_PATH]
web_driver_command.extend(parameters)
python_utils.PRINT(common.run_cmd(web_driver_command))
def setup_and_install_dependencies(skip_install):
"""Run the setup and installation scripts."""
if not skip_install:
install_third_party_libs.main()
setup.main(args=[])
setup_gae.main(args=[])
if os.getenv('TRAVIS'):
install_chrome_on_travis.main(args=[])
def build_js_files(dev_mode_setting):
"""Build the javascript files.
Args:
dev_mode_setting: bool. Represents whether to run the related commands
in dev mode.
"""
update_dev_mode_in_constants_js(CONSTANT_FILE_PATH, dev_mode_setting)
if not dev_mode_setting:
python_utils.PRINT(' Generating files for production mode...')
build.main(args=['--prod_env'])
else:
# The 'hashes.json' file is used by the `url-interpolation` service.
if not os.path.isfile(HASHES_FILE_PATH):
with python_utils.open_file(HASHES_FILE_PATH, 'w') as hash_file:
hash_file.write('{}')
build.main(args=[])
run_webpack_compilation()
@contextlib.contextmanager
def tweak_webdriver_manager():
"""webdriver-manager (version 13.0.0) uses `os.arch()` to determine the
architecture of the operating system, however, this function can only be
used to determine the architecture of the machine that compiled `node`.
In the case of Windows, we are using the portable version,
which was compiled on `ia32` machine so that is the value returned by this
`os.arch` function. Unfortunately, webdriver-manager seems to assume that
Windows wouldn't run on the ia32 architecture, so its help function used to
determine download link returns null for this, which means that the
application has no idea about where to download the correct version.
https://github.com/angular/webdriver-manager/blob/b7539a5a3897a8a76abae7245f0de8175718b142/lib/provider/chromedriver.ts#L16
https://github.com/angular/webdriver-manager/blob/b7539a5a3897a8a76abae7245f0de8175718b142/lib/provider/geckodriver.ts#L21
https://github.com/angular/webdriver-manager/blob/b7539a5a3897a8a76abae7245f0de8175718b142/lib/provider/chromedriver.ts#L167
https://github.com/nodejs/node/issues/17036
"""
try:
if common.is_windows_os():
regex_pattern = PATTERN_FOR_REPLACE_WEBDRIVER_CODE
arch = 'x64' if common.is_x64_architecture() else 'x86'
replace = 'this.osArch = "%s";' % arch
common.inplace_replace_file(
CHROME_PROVIDER_FILE_PATH, regex_pattern, replace)
common.inplace_replace_file(
GECKO_PROVIDER_FILE_PATH, regex_pattern, replace)
yield
finally:
if common.is_windows_os():
undo_webdriver_tweak()
def undo_webdriver_tweak():
"""Undo the tweak on webdriver manager's source code."""
if os.path.isfile(CHROME_PROVIDER_BAK_FILE_PATH):
os.remove(CHROME_PROVIDER_FILE_PATH)
os.rename(CHROME_PROVIDER_BAK_FILE_PATH, CHROME_PROVIDER_FILE_PATH)
if os.path.isfile(GECKO_PROVIDER_BAK_FILE_PATH):
os.remove(GECKO_PROVIDER_FILE_PATH)
os.rename(GECKO_PROVIDER_BAK_FILE_PATH, GECKO_PROVIDER_FILE_PATH)
def start_webdriver_manager():
"""Update and start webdriver manager."""
with tweak_webdriver_manager():
run_webdriver_manager(
['update', '--versions.chrome', CHROME_DRIVER_VERSION])
run_webdriver_manager(
['start', '--versions.chrome', CHROME_DRIVER_VERSION,
'--detach', '--quiet'])
def get_parameter_for_sharding(sharding_instances):
"""Return the parameter for sharding, based on the given number of
sharding instances.
Args:
sharding_instances: int. How many sharding instances to be running.
Returns:
list(str): A list of parameters to represent the sharding configuration.
"""
if sharding_instances <= 0:
raise ValueError('Sharding instance should be larger than 0')
if sharding_instances == 1:
return []
else:
return ['--capabilities.shardTestFiles=True',
'--capabilities.maxInstances=%s' % sharding_instances]
def get_parameter_for_dev_mode(dev_mode_setting):
"""Return parameter for whether the test should be running on dev_mode.
Args:
dev_mode_setting: bool. Whether the test is running on dev_mode.
Returns:
str: A string for the testing mode command line parameter.
"""
return '--params.devMode=%s' % dev_mode_setting
def get_parameter_for_suite(suite_name):
"""Return a parameter for which suite to run the tests for.
Args:
suite_name: str. The suite name whose tests should be run. If the value
is `full`, all tests will run.
Returns:
list(str): A list of command line parameters for the suite.
"""
return ['--suite', suite_name]
def get_e2e_test_parameters(
sharding_instances, suite_name, dev_mode_setting):
"""Return parameters for the end-2-end tests.
Args:
sharding_instances: str. Sets the number of parallel browsers to open
while sharding.
suite_name: str. Performs test for different suites.
dev_mode_setting: bool. Represents whether run the related commands in
dev mode.
Returns:
list(str): Parameters for running the tests.
"""
sharding_parameters = get_parameter_for_sharding(sharding_instances)
dev_mode_parameters = get_parameter_for_dev_mode(dev_mode_setting)
suite_parameter = get_parameter_for_suite(suite_name)
commands = [PROTRACTOR_CONFIG_FILE_PATH]
commands.extend(sharding_parameters)
commands.extend(suite_parameter)
commands.append(dev_mode_parameters)
return commands
def start_google_app_engine_server(dev_mode_setting):
"""Start the Google App Engine server.
Args:
dev_mode_setting: bool. Represents whether to run the related commands
in dev mode.
"""
app_yaml_filepath = 'app%s.yaml' % ('_dev' if dev_mode_setting else '')
p = subprocess.Popen(
'%s %s/dev_appserver.py --host 0.0.0.0 --port %s '
'--clear_datastore=yes --dev_appserver_log_level=critical '
'--log_level=critical --skip_sdk_update_check=true %s' % (
common.CURRENT_PYTHON_BIN, common.GOOGLE_APP_ENGINE_HOME,
GOOGLE_APP_ENGINE_PORT, app_yaml_filepath), shell=True)
SUBPROCESSES.append(p)
def main(args=None):
"""Run the scripts to start end-to-end tests."""
parsed_args = _PARSER.parse_args(args=args)
oppia_instance_is_already_running = is_oppia_server_already_running()
if oppia_instance_is_already_running:
sys.exit(1)
setup_and_install_dependencies(parsed_args.skip_install)
atexit.register(cleanup)
dev_mode = not parsed_args.prod_env
update_community_dashboard_status_in_feconf_file(
FECONF_FILE_PATH, parsed_args.community_dashboard_enabled)
if not parsed_args.skip_build:
build_js_files(dev_mode)
start_webdriver_manager()
start_google_app_engine_server(dev_mode)
wait_for_port_to_be_open(WEB_DRIVER_PORT)
wait_for_port_to_be_open(GOOGLE_APP_ENGINE_PORT)
ensure_screenshots_dir_is_removed()
commands = [common.NODE_BIN_PATH]
if parsed_args.debug_mode:
commands.append('--inspect-brk')
commands.append(PROTRACTOR_BIN_PATH)
commands.extend(get_e2e_test_parameters(
parsed_args.sharding_instances, parsed_args.suite, dev_mode))
p = subprocess.Popen(commands)
p.communicate()
sys.exit(p.returncode)
if __name__ == '__main__': # pragma: no cover
main()
| 36.252101
| 128
| 0.708391
|
from __future__ import absolute_import
from __future__ import unicode_literals
import argparse
import atexit
import contextlib
import os
import re
import subprocess
import sys
import time
import python_utils
from scripts import build
from scripts import common
from scripts import install_chrome_on_travis
from scripts import install_third_party_libs
from scripts import setup
from scripts import setup_gae
CHROME_DRIVER_VERSION = '2.41'
WEB_DRIVER_PORT = 4444
GOOGLE_APP_ENGINE_PORT = 9001
OPPIA_SERVER_PORT = 8181
PROTRACTOR_BIN_PATH = os.path.join(
common.NODE_MODULES_PATH, 'protractor', 'bin', 'protractor')
CONSTANT_FILE_PATH = os.path.join(common.CURR_DIR, 'assets', 'constants.ts')
FECONF_FILE_PATH = os.path.join('feconf.py')
MAX_WAIT_TIME_FOR_PORT_TO_OPEN_SECS = 1000
WEBDRIVER_HOME_PATH = os.path.join(
common.NODE_MODULES_PATH, 'webdriver-manager')
WEBDRIVER_MANAGER_BIN_PATH = os.path.join(
WEBDRIVER_HOME_PATH, 'bin', 'webdriver-manager')
WEBDRIVER_PROVIDER_PATH = os.path.join(
WEBDRIVER_HOME_PATH, 'dist', 'lib', 'provider')
GECKO_PROVIDER_FILE_PATH = os.path.join(
WEBDRIVER_PROVIDER_PATH, 'geckodriver.js')
CHROME_PROVIDER_FILE_PATH = os.path.join(
WEBDRIVER_PROVIDER_PATH, 'chromedriver.js')
CHROME_PROVIDER_BAK_FILE_PATH = os.path.join(
WEBDRIVER_PROVIDER_PATH, 'chromedriver.js.bak')
GECKO_PROVIDER_BAK_FILE_PATH = os.path.join(
WEBDRIVER_PROVIDER_PATH, 'geckodriver.js.bak')
WEBPACK_BIN_PATH = os.path.join(
common.CURR_DIR, 'node_modules', 'webpack', 'bin', 'webpack.js')
PATTERN_FOR_REPLACE_WEBDRIVER_CODE = r'this\.osArch = os\.arch\(\);'
PROTRACTOR_CONFIG_FILE_PATH = os.path.join(
'core', 'tests', 'protractor.conf.js')
BROWSER_STACK_CONFIG_FILE_PATH = os.path.join(
'core', 'tests', 'protractor-browserstack.conf.js')
HASHES_FILE_PATH = os.path.join('assets', 'hashes.json')
_PARSER = argparse.ArgumentParser(description="""
Run this script from the oppia root folder:
bash scripts/run_e2e_tests.sh
The root folder MUST be named 'oppia'.
--suite=suite_name Performs test for different suites, here suites are the
name of the test files present in core/tests/protractor_desktop/ and
core/test/protractor/ dirs. e.g. for the file
core/tests/protractor/accessibility.js use --suite=accessibility.
For performing a full test, no argument is required.
Note: You can replace 'it' with 'fit' or 'describe' with 'fdescribe' to run a
single test or test suite.
""")
_PARSER.add_argument(
'--skip-install',
help='If true, skips installing dependencies. The default value is false.',
action='store_true')
_PARSER.add_argument(
'--skip-build',
help='If true, skips building files. The default value is false.',
action='store_true')
_PARSER.add_argument(
'--sharding-instances', type=int, default=3,
help='Sets the number of parallel browsers to open while sharding.'
'Sharding must be disabled (either by passing in false to --sharding'
' or 1 to --sharding-instances) if running any tests in isolation'
' (fit or fdescribe).')
_PARSER.add_argument(
'--prod_env',
help='Run the tests in prod mode. Static resources are served from'
' build directory and use cache slugs.',
action='store_true')
_PARSER.add_argument(
'--community_dashboard_enabled', action='store_true',
help='Run the test after enabling the community dashboard page.')
_PARSER.add_argument(
'--suite', default='full',
help='Performs test for different suites, here suites are the'
'name of the test files present in core/tests/protractor_desktop/ and'
'core/test/protractor/ dirs. e.g. for the file'
'core/tests/protractor/accessibility.js use --suite=accessibility.'
'For performing a full test, no argument is required.')
_PARSER.add_argument(
'--debug_mode',
help='Runs the protractor test in debugging mode. Follow the instruction '
'provided in following URL to run e2e tests in debugging mode: '
'https://www.protractortest.org/#/debugging#disabled-control-flow',
action='store_true')
SUBPROCESSES = []
def ensure_screenshots_dir_is_removed():
screenshots_dir = os.path.join(os.pardir, 'protractor-screenshots')
if not os.path.isdir(screenshots_dir):
return
python_utils.PRINT(
'Note: If ADD_SCREENSHOT_REPORTER is set to true in'
'core/tests/protractor.conf.js, you can view screenshots'
'of the failed tests in ../protractor-screenshots/')
os.rmdir(screenshots_dir)
def cleanup():
dev_appserver_path = '%s/dev_appserver.py' % common.GOOGLE_APP_ENGINE_HOME
webdriver_download_path = '%s/downloads' % WEBDRIVER_HOME_PATH
if common.is_windows_os():
webdriver_download_path = os.path.abspath(webdriver_download_path)
processes_to_kill = [
'.*%s.*' % re.escape(dev_appserver_path),
'.*%s.*' % re.escape(webdriver_download_path)
]
for p in SUBPROCESSES:
p.kill()
for p in processes_to_kill:
common.kill_processes_based_on_regex(p)
def is_oppia_server_already_running():
running = False
for port in [OPPIA_SERVER_PORT, GOOGLE_APP_ENGINE_PORT]:
if common.is_port_open(port):
python_utils.PRINT(
'There is already a server running on localhost:%s.'
'Please terminate it before running the end-to-end tests.'
'Exiting.' % port)
running = True
break
return running
def wait_for_port_to_be_open(port_number):
waited_seconds = 0
while (not common.is_port_open(port_number) and
waited_seconds < MAX_WAIT_TIME_FOR_PORT_TO_OPEN_SECS):
time.sleep(1)
waited_seconds += 1
if (waited_seconds ==
MAX_WAIT_TIME_FOR_PORT_TO_OPEN_SECS and
not common.is_port_open(port_number)):
python_utils.PRINT(
'Failed to start server on port %s, exiting ...' % port_number)
sys.exit(1)
def run_webpack_compilation():
max_tries = 5
webpack_bundles_dir_name = 'webpack_bundles'
for _ in python_utils.RANGE(max_tries):
try:
subprocess.check_call([
common.NODE_BIN_PATH, WEBPACK_BIN_PATH, '--config',
'webpack.dev.config.ts'])
except subprocess.CalledProcessError as error:
python_utils.PRINT(error.output)
sys.exit(error.returncode)
return
if os.path.isdir(webpack_bundles_dir_name):
break
if not os.path.isdir(webpack_bundles_dir_name):
python_utils.PRINT(
'Failed to complete webpack compilation, exiting ...')
sys.exit(1)
def update_dev_mode_in_constants_js(constant_file, dev_mode_setting):
pattern = '"DEV_MODE": .*'
replace = '"DEV_MODE": %s' % (
'true' if dev_mode_setting else 'false')
common.inplace_replace_file(constant_file, pattern, replace)
def update_community_dashboard_status_in_feconf_file(
feconf_file_path, enable_community_dashboard):
pattern = 'COMMUNITY_DASHBOARD_ENABLED = .*'
replace = 'COMMUNITY_DASHBOARD_ENABLED = %s' % enable_community_dashboard
common.inplace_replace_file(feconf_file_path, pattern, replace)
def run_webdriver_manager(parameters):
web_driver_command = [common.NODE_BIN_PATH, WEBDRIVER_MANAGER_BIN_PATH]
web_driver_command.extend(parameters)
python_utils.PRINT(common.run_cmd(web_driver_command))
def setup_and_install_dependencies(skip_install):
if not skip_install:
install_third_party_libs.main()
setup.main(args=[])
setup_gae.main(args=[])
if os.getenv('TRAVIS'):
install_chrome_on_travis.main(args=[])
def build_js_files(dev_mode_setting):
update_dev_mode_in_constants_js(CONSTANT_FILE_PATH, dev_mode_setting)
if not dev_mode_setting:
python_utils.PRINT(' Generating files for production mode...')
build.main(args=['--prod_env'])
else:
if not os.path.isfile(HASHES_FILE_PATH):
with python_utils.open_file(HASHES_FILE_PATH, 'w') as hash_file:
hash_file.write('{}')
build.main(args=[])
run_webpack_compilation()
@contextlib.contextmanager
def tweak_webdriver_manager():
try:
if common.is_windows_os():
regex_pattern = PATTERN_FOR_REPLACE_WEBDRIVER_CODE
arch = 'x64' if common.is_x64_architecture() else 'x86'
replace = 'this.osArch = "%s";' % arch
common.inplace_replace_file(
CHROME_PROVIDER_FILE_PATH, regex_pattern, replace)
common.inplace_replace_file(
GECKO_PROVIDER_FILE_PATH, regex_pattern, replace)
yield
finally:
if common.is_windows_os():
undo_webdriver_tweak()
def undo_webdriver_tweak():
if os.path.isfile(CHROME_PROVIDER_BAK_FILE_PATH):
os.remove(CHROME_PROVIDER_FILE_PATH)
os.rename(CHROME_PROVIDER_BAK_FILE_PATH, CHROME_PROVIDER_FILE_PATH)
if os.path.isfile(GECKO_PROVIDER_BAK_FILE_PATH):
os.remove(GECKO_PROVIDER_FILE_PATH)
os.rename(GECKO_PROVIDER_BAK_FILE_PATH, GECKO_PROVIDER_FILE_PATH)
def start_webdriver_manager():
with tweak_webdriver_manager():
run_webdriver_manager(
['update', '--versions.chrome', CHROME_DRIVER_VERSION])
run_webdriver_manager(
['start', '--versions.chrome', CHROME_DRIVER_VERSION,
'--detach', '--quiet'])
def get_parameter_for_sharding(sharding_instances):
if sharding_instances <= 0:
raise ValueError('Sharding instance should be larger than 0')
if sharding_instances == 1:
return []
else:
return ['--capabilities.shardTestFiles=True',
'--capabilities.maxInstances=%s' % sharding_instances]
def get_parameter_for_dev_mode(dev_mode_setting):
return '--params.devMode=%s' % dev_mode_setting
def get_parameter_for_suite(suite_name):
return ['--suite', suite_name]
def get_e2e_test_parameters(
sharding_instances, suite_name, dev_mode_setting):
sharding_parameters = get_parameter_for_sharding(sharding_instances)
dev_mode_parameters = get_parameter_for_dev_mode(dev_mode_setting)
suite_parameter = get_parameter_for_suite(suite_name)
commands = [PROTRACTOR_CONFIG_FILE_PATH]
commands.extend(sharding_parameters)
commands.extend(suite_parameter)
commands.append(dev_mode_parameters)
return commands
def start_google_app_engine_server(dev_mode_setting):
app_yaml_filepath = 'app%s.yaml' % ('_dev' if dev_mode_setting else '')
p = subprocess.Popen(
'%s %s/dev_appserver.py --host 0.0.0.0 --port %s '
'--clear_datastore=yes --dev_appserver_log_level=critical '
'--log_level=critical --skip_sdk_update_check=true %s' % (
common.CURRENT_PYTHON_BIN, common.GOOGLE_APP_ENGINE_HOME,
GOOGLE_APP_ENGINE_PORT, app_yaml_filepath), shell=True)
SUBPROCESSES.append(p)
def main(args=None):
parsed_args = _PARSER.parse_args(args=args)
oppia_instance_is_already_running = is_oppia_server_already_running()
if oppia_instance_is_already_running:
sys.exit(1)
setup_and_install_dependencies(parsed_args.skip_install)
atexit.register(cleanup)
dev_mode = not parsed_args.prod_env
update_community_dashboard_status_in_feconf_file(
FECONF_FILE_PATH, parsed_args.community_dashboard_enabled)
if not parsed_args.skip_build:
build_js_files(dev_mode)
start_webdriver_manager()
start_google_app_engine_server(dev_mode)
wait_for_port_to_be_open(WEB_DRIVER_PORT)
wait_for_port_to_be_open(GOOGLE_APP_ENGINE_PORT)
ensure_screenshots_dir_is_removed()
commands = [common.NODE_BIN_PATH]
if parsed_args.debug_mode:
commands.append('--inspect-brk')
commands.append(PROTRACTOR_BIN_PATH)
commands.extend(get_e2e_test_parameters(
parsed_args.sharding_instances, parsed_args.suite, dev_mode))
p = subprocess.Popen(commands)
p.communicate()
sys.exit(p.returncode)
if __name__ == '__main__':
main()
| true
| true
|
1c43ac745ef32675e0e5e8a38ff7ea9b049dc90b
| 1,380
|
py
|
Python
|
Leetcode/133.clone-graph.py
|
EdwaRen/Competitve-Programming
|
e8bffeb457936d28c75ecfefb5a1f316c15a9b6c
|
[
"MIT"
] | 1
|
2021-05-03T21:48:25.000Z
|
2021-05-03T21:48:25.000Z
|
Leetcode/133.clone-graph.py
|
EdwaRen/Competitve_Programming
|
e8bffeb457936d28c75ecfefb5a1f316c15a9b6c
|
[
"MIT"
] | null | null | null |
Leetcode/133.clone-graph.py
|
EdwaRen/Competitve_Programming
|
e8bffeb457936d28c75ecfefb5a1f316c15a9b6c
|
[
"MIT"
] | null | null | null |
"""
# Definition for a Node.
"""
class Node(object):
def __init__(self, val, neighbors):
self.val = val
self.neighbors = neighbors
class Solution(object):
def cloneGraph(self, node):
"""
:type node: Node
:rtype: Node
"""
# Dict mapping new -> old
self.dp = {}
stack = [node]
def recurse(orig_node):
if orig_node in self.dp:
return self.dp[orig_node]
else:
created_node = Node(orig_node.val, [])
self.dp[orig_node] = created_node
for n in orig_node.neighbors:
created_node.neighbors.append(recurse(n))
return created_node
recurse(node)
return self.dp[node]
z = Solution()
a = Node(1, [])
b = Node(2, [])
c = Node(3, [])
d = Node(4, [])
a.neighbors = [b, d]
b.neighbors = [a, c]
c.neighbors = [b, d]
d.neighbors = [a, c]
res = z.cloneGraph(a)
seen = []
boomer_nodes = [a, b, c, d]
stack = [res]
while stack:
cur_node = stack.pop()
print(cur_node.val, cur_node in boomer_nodes, end='')
for i in cur_node.neighbors:
print(i.val, end = '')
if i not in seen:
stack.append(i)
seen.append(i)
print()
# print(res.val, res.neighbors)
# print(res == a)
| 19.714286
| 61
| 0.51087
|
class Node(object):
def __init__(self, val, neighbors):
self.val = val
self.neighbors = neighbors
class Solution(object):
def cloneGraph(self, node):
self.dp = {}
stack = [node]
def recurse(orig_node):
if orig_node in self.dp:
return self.dp[orig_node]
else:
created_node = Node(orig_node.val, [])
self.dp[orig_node] = created_node
for n in orig_node.neighbors:
created_node.neighbors.append(recurse(n))
return created_node
recurse(node)
return self.dp[node]
z = Solution()
a = Node(1, [])
b = Node(2, [])
c = Node(3, [])
d = Node(4, [])
a.neighbors = [b, d]
b.neighbors = [a, c]
c.neighbors = [b, d]
d.neighbors = [a, c]
res = z.cloneGraph(a)
seen = []
boomer_nodes = [a, b, c, d]
stack = [res]
while stack:
cur_node = stack.pop()
print(cur_node.val, cur_node in boomer_nodes, end='')
for i in cur_node.neighbors:
print(i.val, end = '')
if i not in seen:
stack.append(i)
seen.append(i)
print()
| true
| true
|
1c43aca44aa17aca4a5165de11dd1d2b40cbee2e
| 199
|
py
|
Python
|
concorde/shaman/__main__.py
|
frutiger/concorde
|
9f5a763bdaf2b8e48636193db39b7fde8209156c
|
[
"Unlicense"
] | 2
|
2016-03-09T03:54:19.000Z
|
2016-04-14T09:37:01.000Z
|
concorde/shaman/__main__.py
|
frutiger/concorde
|
9f5a763bdaf2b8e48636193db39b7fde8209156c
|
[
"Unlicense"
] | 1
|
2016-02-28T23:43:14.000Z
|
2016-02-28T23:43:14.000Z
|
concorde/shaman/__main__.py
|
frutiger/concorde
|
9f5a763bdaf2b8e48636193db39b7fde8209156c
|
[
"Unlicense"
] | 1
|
2016-02-28T23:25:19.000Z
|
2016-02-28T23:25:19.000Z
|
# shaman.__main__
import os
import sys
from .profile import Profile
def main():
if len(sys.argv) > 1:
os.chdir(sys.argv[1])
Profile().run()
if __name__ == '__main__':
main()
| 12.4375
| 29
| 0.613065
|
import os
import sys
from .profile import Profile
def main():
if len(sys.argv) > 1:
os.chdir(sys.argv[1])
Profile().run()
if __name__ == '__main__':
main()
| true
| true
|
1c43ad00ec84d35d26a4cd91af1b711ebf5f6585
| 379
|
py
|
Python
|
graphene_django_cud/tests/migrations/0006_dog_bark_count.py
|
rymanso/graphene-django-cud
|
43bdc972700012304ddc0c3fb022a1ec7fbb7c31
|
[
"MIT"
] | 66
|
2019-09-10T09:35:15.000Z
|
2022-03-09T15:29:49.000Z
|
graphene_django_cud/tests/migrations/0006_dog_bark_count.py
|
rymanso/graphene-django-cud
|
43bdc972700012304ddc0c3fb022a1ec7fbb7c31
|
[
"MIT"
] | 61
|
2019-09-24T08:43:18.000Z
|
2022-03-31T05:51:41.000Z
|
graphene_django_cud/tests/migrations/0006_dog_bark_count.py
|
rymanso/graphene-django-cud
|
43bdc972700012304ddc0c3fb022a1ec7fbb7c31
|
[
"MIT"
] | 27
|
2019-11-13T12:18:56.000Z
|
2022-02-16T20:49:12.000Z
|
# Generated by Django 2.2.6 on 2021-01-02 02:31
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('tests', '0005_dogregistration'),
]
operations = [
migrations.AddField(
model_name='dog',
name='bark_count',
field=models.IntegerField(default=0),
),
]
| 19.947368
| 49
| 0.591029
|
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('tests', '0005_dogregistration'),
]
operations = [
migrations.AddField(
model_name='dog',
name='bark_count',
field=models.IntegerField(default=0),
),
]
| true
| true
|
1c43adc8107f6755957a1f0868d44f529924a4cc
| 1,202
|
py
|
Python
|
recurring_content_detector/video_functions.py
|
lcd1232/recurring-content-detector
|
31e502e16b0d9870a6e3e4eb3bfe93d058cd01b0
|
[
"MIT"
] | 46
|
2019-06-12T10:43:47.000Z
|
2022-03-15T12:17:13.000Z
|
recurring_content_detector/video_functions.py
|
lcd1232/recurring-content-detector
|
31e502e16b0d9870a6e3e4eb3bfe93d058cd01b0
|
[
"MIT"
] | 19
|
2020-01-20T09:26:39.000Z
|
2021-12-10T13:16:08.000Z
|
recurring_content_detector/video_functions.py
|
lcd1232/recurring-content-detector
|
31e502e16b0d9870a6e3e4eb3bfe93d058cd01b0
|
[
"MIT"
] | 14
|
2020-01-05T21:04:28.000Z
|
2022-02-21T08:51:04.000Z
|
import cv2
import ffmpeg
import mimetypes
def file_is_video(video_fn):
"""
Checks if the given file path actually is a video file
"""
file_type = mimetypes.guess_type(video_fn)[0]
return file_type is not None and file_type.startswith("video")
def get_framerate(video_fn):
"""
Return the video framerate given a video filename
"""
video = cv2.VideoCapture(video_fn)
return video.get(cv2.CAP_PROP_FPS)
def resize(input, output, resize_width):
"""
Resizes a video with ffmpeg
"""
video2 = cv2.VideoCapture(input)
framecount = int(video2.get(cv2.CAP_PROP_FRAME_COUNT))
if framecount > 0:
stream = ffmpeg.input(input)
if resize_width == 224:
stream = ffmpeg.filter(stream, 'scale', w=224, h=224)
else:
stream = ffmpeg.filter(stream, 'scale', w=resize_width, h="trunc(ow/a/2)*2")
stream = ffmpeg.output(stream, output)
try:
ffmpeg.run(stream)
except FileNotFoundError:
raise Exception("ffmpeg not found, make sure ffmpeg is in the PATH")
else:
raise Exception("Something is wrong with the video file: {}".format(input))
| 30.820513
| 92
| 0.642263
|
import cv2
import ffmpeg
import mimetypes
def file_is_video(video_fn):
file_type = mimetypes.guess_type(video_fn)[0]
return file_type is not None and file_type.startswith("video")
def get_framerate(video_fn):
video = cv2.VideoCapture(video_fn)
return video.get(cv2.CAP_PROP_FPS)
def resize(input, output, resize_width):
video2 = cv2.VideoCapture(input)
framecount = int(video2.get(cv2.CAP_PROP_FRAME_COUNT))
if framecount > 0:
stream = ffmpeg.input(input)
if resize_width == 224:
stream = ffmpeg.filter(stream, 'scale', w=224, h=224)
else:
stream = ffmpeg.filter(stream, 'scale', w=resize_width, h="trunc(ow/a/2)*2")
stream = ffmpeg.output(stream, output)
try:
ffmpeg.run(stream)
except FileNotFoundError:
raise Exception("ffmpeg not found, make sure ffmpeg is in the PATH")
else:
raise Exception("Something is wrong with the video file: {}".format(input))
| true
| true
|
1c43adc9316e975e0e744796c75493658c598e86
| 2,021
|
py
|
Python
|
tests/example_data/data_loading/pandas/pands_data_loading_conf.py
|
delorenzosoftware/superset
|
5403f1ec163a52623f34f459d89f20e4e190371d
|
[
"Apache-2.0"
] | 2
|
2021-12-21T15:57:16.000Z
|
2022-01-31T02:22:02.000Z
|
tests/example_data/data_loading/pandas/pands_data_loading_conf.py
|
changeiot/superset
|
299b5dc64448d04abe6b35ee85fbd2b938c781bc
|
[
"Apache-2.0"
] | 19
|
2022-01-29T03:16:22.000Z
|
2022-03-25T23:50:16.000Z
|
tests/example_data/data_loading/pandas/pands_data_loading_conf.py
|
changeiot/superset
|
299b5dc64448d04abe6b35ee85fbd2b938c781bc
|
[
"Apache-2.0"
] | 2
|
2021-12-21T13:41:18.000Z
|
2021-12-26T22:16:43.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from typing import Any, Dict
default_pandas_data_loader_config = {
"if_exists": "replace",
"chunksize": 500,
"index": False,
"method": "multi",
"strftime": "%Y-%m-%d %H:%M:%S",
"support_datetime_type": False,
}
class PandasLoaderConfigurations:
if_exists: str
chunksize: int
index: bool
method: str
strftime: str
support_datetime_type: bool
def __init__(
self,
*,
if_exists: str,
chunksize: int,
index: bool,
method: str,
strftime: str,
support_datetime_type: bool,
):
self.if_exists = if_exists
self.chunksize = chunksize
self.index = index
self.method = method
self.strftime = strftime
self.support_datetime_type = support_datetime_type
@classmethod
def make_from_dict(cls, _dict: Dict[str, Any]) -> PandasLoaderConfigurations:
copy_dict = default_pandas_data_loader_config.copy()
copy_dict.update(_dict)
return PandasLoaderConfigurations(**copy_dict) # type: ignore
@classmethod
def make_default(cls) -> PandasLoaderConfigurations:
return cls.make_from_dict({})
| 31.092308
| 81
| 0.688768
|
from __future__ import annotations
from typing import Any, Dict
default_pandas_data_loader_config = {
"if_exists": "replace",
"chunksize": 500,
"index": False,
"method": "multi",
"strftime": "%Y-%m-%d %H:%M:%S",
"support_datetime_type": False,
}
class PandasLoaderConfigurations:
if_exists: str
chunksize: int
index: bool
method: str
strftime: str
support_datetime_type: bool
def __init__(
self,
*,
if_exists: str,
chunksize: int,
index: bool,
method: str,
strftime: str,
support_datetime_type: bool,
):
self.if_exists = if_exists
self.chunksize = chunksize
self.index = index
self.method = method
self.strftime = strftime
self.support_datetime_type = support_datetime_type
@classmethod
def make_from_dict(cls, _dict: Dict[str, Any]) -> PandasLoaderConfigurations:
copy_dict = default_pandas_data_loader_config.copy()
copy_dict.update(_dict)
return PandasLoaderConfigurations(**copy_dict)
@classmethod
def make_default(cls) -> PandasLoaderConfigurations:
return cls.make_from_dict({})
| true
| true
|
1c43aee86a6f74f2a929505d9eafea8c529c9914
| 14,477
|
py
|
Python
|
monty/functools.py
|
JosephMontoya-TRI/monty
|
facef1776c7d05c941191a32a0b93f986a9761dd
|
[
"MIT"
] | null | null | null |
monty/functools.py
|
JosephMontoya-TRI/monty
|
facef1776c7d05c941191a32a0b93f986a9761dd
|
[
"MIT"
] | null | null | null |
monty/functools.py
|
JosephMontoya-TRI/monty
|
facef1776c7d05c941191a32a0b93f986a9761dd
|
[
"MIT"
] | null | null | null |
"""
functools, especially backported from Python 3.
"""
from __future__ import absolute_import
__author__ = 'Shyue Ping Ong'
__copyright__ = 'Copyright 2013, The Materials Virtual Lab'
__version__ = '0.1'
__maintainer__ = 'Shyue Ping Ong'
__email__ = 'ongsp@ucsd.edu'
__date__ = '8/29/14'
from collections import namedtuple
from functools import update_wrapper, wraps, partial
try:
from threading import RLock
except:
class RLock:
"""Dummy reentrant lock for builds without threads"""
def __enter__(self):
pass
def __exit__(self, exctype, excinst, exctb):
pass
_CacheInfo = namedtuple("CacheInfo", ["hits", "misses", "maxsize", "currsize"])
class _HashedSeq(list):
"""
This class guarantees that hash() will be called no more than once
per element. This is important because the lru_cache() will hash
the key multiple times on a cache miss.
"""
__slots__ = 'hashvalue'
def __init__(self, tup, hashfunc=hash):
self[:] = tup
self.hashvalue = hashfunc(tup)
def __hash__(self):
return self.hashvalue
def _make_key(args, kwds, typed,
kwd_mark=(object(),),
fasttypes={int, str, frozenset, type(None)}):
"""
Make a cache key from optionally typed positional and keyword arguments
The key is constructed in a way that is flat as possible rather than
as a nested structure that would take more memory.
If there is only a single argument and its data type is known to cache
its hash value, then that argument is returned without a wrapper. This
saves space and improves lookup speed.
"""
key = args
if kwds:
sorted_items = sorted(kwds.items())
key += kwd_mark
for item in sorted_items:
key += item
if typed:
key += tuple(type(v) for v in args)
if kwds:
key += tuple(type(v) for k, v in sorted_items)
elif len(key) == 1 and type(key[0]) in fasttypes:
return key[0]
return _HashedSeq(key)
def lru_cache(maxsize=128, typed=False):
"""
Least-recently-used cache decorator, which is a backport of the same
function in Python >= 3.2.
If *maxsize* is set to None, the LRU features are disabled and the cache
can grow without bound.
If *typed* is True, arguments of different types will be cached separately.
For example, f(3.0) and f(3) will be treated as distinct calls with
distinct results.
Arguments to the cached function must be hashable.
View the cache statistics named tuple (hits, misses, maxsize, currsize)
with f.cache_info(). Clear the cache and statistics with f.cache_clear().
Access the underlying function with f.__wrapped__.
See: http://en.wikipedia.org/wiki/Cache_algorithms#Least_Recently_Used
"""
# Users should only access the lru_cache through its public API:
# cache_info, cache_clear, and f.__wrapped__
# The internals of the lru_cache are encapsulated for thread safety and
# to allow the implementation to change (including a possible C version).
# Early detection of an erroneous call to @lru_cache without any arguments
# resulting in the inner function being passed to maxsize instead of an
# integer or None.
if maxsize is not None and not isinstance(maxsize, int):
raise TypeError('Expected maxsize to be an integer or None')
# Constants shared by all lru cache instances:
sentinel = object() # unique object used to signal cache misses
make_key = _make_key # build a key from the function arguments
PREV, NEXT, KEY, RESULT = 0, 1, 2, 3 # names for the link fields
def decorating_function(user_function):
cache = {}
hits = [0]
misses = [0]
full = [False]
cache_get = cache.get # bound method to lookup a key or return None
lock = RLock() # because linkedlist updates aren't threadsafe
root = [] # root of the circular doubly linked list
root[:] = [root, root, None, None] # initialize by pointing to self
r = [root]
if maxsize == 0:
def wrapper(*args, **kwds):
# No caching -- just a statistics update after a successful call
result = user_function(*args, **kwds)
misses[0] += 1
return result
elif maxsize is None:
def wrapper(*args, **kwds):
# Simple caching without ordering or size limit
key = make_key(args, kwds, typed)
result = cache_get(key, sentinel)
if result is not sentinel:
hits[0] += 1
return result
result = user_function(*args, **kwds)
cache[key] = result
misses[0] += 1
return result
else:
def wrapper(*args, **kwds):
# Size limited caching that tracks accesses by recency
key = make_key(args, kwds, typed)
with lock:
link = cache_get(key)
if link is not None:
# Move the link to the front of the circular queue
link_prev, link_next, _key, result = link
link_prev[NEXT] = link_next
link_next[PREV] = link_prev
last = r[0][PREV]
last[NEXT] = r[0][PREV] = link
link[PREV] = last
link[NEXT] = r[0]
hits[0] += 1
return result
result = user_function(*args, **kwds)
with lock:
if key in cache:
# Getting here means that this same key was added to the
# cache while the lock was released. Since the link
# update is already done, we need only return the
# computed result and update the count of misses.
pass
elif full[0]:
# Use the old root to store the new key and result.
oldroot = r[0]
oldroot[KEY] = key
oldroot[RESULT] = result
# Empty the oldest link and make it the new root.
# Keep a reference to the old key and old result to
# prevent their ref counts from going to zero during the
# update. That will prevent potentially arbitrary object
# clean-up code (i.e. __del__) from running while we're
# still adjusting the links.
r[0] = oldroot[NEXT]
oldkey = r[0][KEY]
oldresult = r[0][RESULT]
r[0][KEY] = r[0][RESULT] = None
# Now update the cache dictionary.
del cache[oldkey]
# Save the potentially reentrant cache[key] assignment
# for last, after the root and links have been put in
# a consistent state.
cache[key] = oldroot
else:
# Put result in a new link at the front of the queue.
last = r[0][PREV]
link = [last, r[0], key, result]
last[NEXT] = r[0][PREV] = cache[key] = link
full[0] = (len(cache) >= maxsize)
misses[0] += 1
return result
def cache_info():
"""Report cache statistics"""
with lock:
return _CacheInfo(hits[0], misses[0], maxsize, len(cache))
def cache_clear():
"""Clear the cache and cache statistics"""
with lock:
cache.clear()
root[:] = [root, root, None, None]
r[0] = root
hits[0] = 0
misses[0] = 0
full[0] = False
wrapper.cache_info = cache_info
wrapper.cache_clear = cache_clear
return update_wrapper(wrapper, user_function)
return decorating_function
class lazy_property(object):
"""
lazy_property descriptor
Used as a decorator to create lazy attributes. Lazy attributes
are evaluated on first use.
"""
def __init__(self, func):
self.__func = func
wraps(self.__func)(self)
def __get__(self, inst, inst_cls):
if inst is None:
return self
if not hasattr(inst, '__dict__'):
raise AttributeError("'%s' object has no attribute '__dict__'"
% (inst_cls.__name__,))
name = self.__name__
if name.startswith('__') and not name.endswith('__'):
name = '_%s%s' % (inst_cls.__name__, name)
value = self.__func(inst)
inst.__dict__[name] = value
return value
@classmethod
def invalidate(cls, inst, name):
"""Invalidate a lazy attribute.
This obviously violates the lazy contract. A subclass of lazy
may however have a contract where invalidation is appropriate.
"""
inst_cls = inst.__class__
if not hasattr(inst, '__dict__'):
raise AttributeError("'%s' object has no attribute '__dict__'"
% (inst_cls.__name__,))
if name.startswith('__') and not name.endswith('__'):
name = '_%s%s' % (inst_cls.__name__, name)
if not isinstance(getattr(inst_cls, name), cls):
raise AttributeError("'%s.%s' is not a %s attribute"
% (inst_cls.__name__, name, cls.__name__))
if name in inst.__dict__:
del inst.__dict__[name]
def return_if_raise(exception_tuple, retval_if_exc, disabled=False):
"""
Decorator for functions, methods or properties. Execute the callable in a
try block, and return retval_if_exc if one of the exceptions listed in
exception_tuple is raised (se also ``return_node_if_raise``).
Setting disabled to True disables the try except block (useful for
debugging purposes). One can use this decorator to define properties.
Example::
@return_if_raise(ValueError, None)
def return_none_if_value_error(self):
pass
@return_if_raise((ValueError, KeyError), "hello")
def another_method(self):
pass
@property
@return_if_raise(AttributeError, None)
def name(self):
"Name of the object, None if not set."
return self._name
"""
# we need a tuple of exceptions.
if isinstance(exception_tuple, list):
exception_tuple = tuple(exception_tuple)
elif not isinstance(exception_tuple, tuple):
exception_tuple = (exception_tuple,)
else:
raise TypeError("Wrong exception_tuple %s" % type(exception_tuple))
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
if disabled:
return func(*args, **kwargs)
try:
return func(*args, **kwargs)
except exception_tuple:
return retval_if_exc
else:
raise RuntimeError()
return wrapper
return decorator
# One could use None as default value in return_if_raise but this one is
# explicit and more readable
return_none_if_raise = partial(return_if_raise, retval_if_exc=None)
"""
This decorator returns None if one of the exceptions is raised.
@return_none_if_raise(ValueError)
def method(self):
"""
class timeout(object):
"""
Timeout function. Use to limit matching to a certain time limit. Note that
this works only on Unix-based systems as it uses signal. Usage:
try:
with timeout(3):
do_stuff()
except TimeoutError:
do_something_else()
"""
def __init__(self, seconds=1, error_message='Timeout'):
"""
Args:
seconds (int): Allowed time for function in seconds.
error_message (str): An error message.
"""
self.seconds = seconds
self.error_message = error_message
def handle_timeout(self, signum, frame):
raise TimeoutError(self.error_message)
def __enter__(self):
import signal
signal.signal(signal.SIGALRM, self.handle_timeout)
signal.alarm(self.seconds)
def __exit__(self, type, value, traceback):
import signal
signal.alarm(0)
class TimeoutError(Exception):
def __init__(self, message):
self.message = message
def prof_main(main):
"""
Decorator for profiling main programs.
Profiling is activated by prepending the command line options
supported by the original main program with the keyword `prof`.
Example:
$ script.py arg --foo=1
becomes
$ script.py prof arg --foo=1
The decorated main accepts two new arguments:
prof_file: Name of the output file with profiling data
If not given, a temporary file is created.
sortby: Profiling data are sorted according to this value.
default is "time". See sort_stats.
"""
@wraps(main)
def wrapper(*args, **kwargs):
import sys
try:
do_prof = sys.argv[1] == "prof"
if do_prof: sys.argv.pop(1)
except Exception:
do_prof = False
if not do_prof:
sys.exit(main())
else:
print("Entering profiling mode...")
import pstats, cProfile, tempfile
prof_file = kwargs.get("prof_file", None)
if prof_file is None:
_, prof_file = tempfile.mkstemp()
print("Profiling data stored in %s" % prof_file)
sortby = kwargs.get("sortby", "time")
cProfile.runctx("main()", globals(), locals(), prof_file)
s = pstats.Stats(prof_file)
s.strip_dirs().sort_stats(sortby).print_stats()
if "retval" not in kwargs:
sys.exit(0)
else:
return kwargs["retval"]
return wrapper
| 33.745921
| 80
| 0.570629
|
from __future__ import absolute_import
__author__ = 'Shyue Ping Ong'
__copyright__ = 'Copyright 2013, The Materials Virtual Lab'
__version__ = '0.1'
__maintainer__ = 'Shyue Ping Ong'
__email__ = 'ongsp@ucsd.edu'
__date__ = '8/29/14'
from collections import namedtuple
from functools import update_wrapper, wraps, partial
try:
from threading import RLock
except:
class RLock:
"""Dummy reentrant lock for builds without threads"""
def __enter__(self):
pass
def __exit__(self, exctype, excinst, exctb):
pass
_CacheInfo = namedtuple("CacheInfo", ["hits", "misses", "maxsize", "currsize"])
class _HashedSeq(list):
__slots__ = 'hashvalue'
def __init__(self, tup, hashfunc=hash):
self[:] = tup
self.hashvalue = hashfunc(tup)
def __hash__(self):
return self.hashvalue
def _make_key(args, kwds, typed,
kwd_mark=(object(),),
fasttypes={int, str, frozenset, type(None)}):
key = args
if kwds:
sorted_items = sorted(kwds.items())
key += kwd_mark
for item in sorted_items:
key += item
if typed:
key += tuple(type(v) for v in args)
if kwds:
key += tuple(type(v) for k, v in sorted_items)
elif len(key) == 1 and type(key[0]) in fasttypes:
return key[0]
return _HashedSeq(key)
def lru_cache(maxsize=128, typed=False):
if maxsize is not None and not isinstance(maxsize, int):
raise TypeError('Expected maxsize to be an integer or None')
sentinel = object()
make_key = _make_key
PREV, NEXT, KEY, RESULT = 0, 1, 2, 3
def decorating_function(user_function):
cache = {}
hits = [0]
misses = [0]
full = [False]
cache_get = cache.get
lock = RLock()
root = [] # root of the circular doubly linked list
root[:] = [root, root, None, None] # initialize by pointing to self
r = [root]
if maxsize == 0:
def wrapper(*args, **kwds):
# No caching -- just a statistics update after a successful call
result = user_function(*args, **kwds)
misses[0] += 1
return result
elif maxsize is None:
def wrapper(*args, **kwds):
# Simple caching without ordering or size limit
key = make_key(args, kwds, typed)
result = cache_get(key, sentinel)
if result is not sentinel:
hits[0] += 1
return result
result = user_function(*args, **kwds)
cache[key] = result
misses[0] += 1
return result
else:
def wrapper(*args, **kwds):
# Size limited caching that tracks accesses by recency
key = make_key(args, kwds, typed)
with lock:
link = cache_get(key)
if link is not None:
# Move the link to the front of the circular queue
link_prev, link_next, _key, result = link
link_prev[NEXT] = link_next
link_next[PREV] = link_prev
last = r[0][PREV]
last[NEXT] = r[0][PREV] = link
link[PREV] = last
link[NEXT] = r[0]
hits[0] += 1
return result
result = user_function(*args, **kwds)
with lock:
if key in cache:
# Getting here means that this same key was added to the
# cache while the lock was released. Since the link
# update is already done, we need only return the
# computed result and update the count of misses.
pass
elif full[0]:
# Use the old root to store the new key and result.
oldroot = r[0]
oldroot[KEY] = key
oldroot[RESULT] = result
# Empty the oldest link and make it the new root.
# Keep a reference to the old key and old result to
# prevent their ref counts from going to zero during the
# update. That will prevent potentially arbitrary object
# clean-up code (i.e. __del__) from running while we're
r[0] = oldroot[NEXT]
oldkey = r[0][KEY]
oldresult = r[0][RESULT]
r[0][KEY] = r[0][RESULT] = None
del cache[oldkey]
cache[key] = oldroot
else:
last = r[0][PREV]
link = [last, r[0], key, result]
last[NEXT] = r[0][PREV] = cache[key] = link
full[0] = (len(cache) >= maxsize)
misses[0] += 1
return result
def cache_info():
with lock:
return _CacheInfo(hits[0], misses[0], maxsize, len(cache))
def cache_clear():
with lock:
cache.clear()
root[:] = [root, root, None, None]
r[0] = root
hits[0] = 0
misses[0] = 0
full[0] = False
wrapper.cache_info = cache_info
wrapper.cache_clear = cache_clear
return update_wrapper(wrapper, user_function)
return decorating_function
class lazy_property(object):
def __init__(self, func):
self.__func = func
wraps(self.__func)(self)
def __get__(self, inst, inst_cls):
if inst is None:
return self
if not hasattr(inst, '__dict__'):
raise AttributeError("'%s' object has no attribute '__dict__'"
% (inst_cls.__name__,))
name = self.__name__
if name.startswith('__') and not name.endswith('__'):
name = '_%s%s' % (inst_cls.__name__, name)
value = self.__func(inst)
inst.__dict__[name] = value
return value
@classmethod
def invalidate(cls, inst, name):
inst_cls = inst.__class__
if not hasattr(inst, '__dict__'):
raise AttributeError("'%s' object has no attribute '__dict__'"
% (inst_cls.__name__,))
if name.startswith('__') and not name.endswith('__'):
name = '_%s%s' % (inst_cls.__name__, name)
if not isinstance(getattr(inst_cls, name), cls):
raise AttributeError("'%s.%s' is not a %s attribute"
% (inst_cls.__name__, name, cls.__name__))
if name in inst.__dict__:
del inst.__dict__[name]
def return_if_raise(exception_tuple, retval_if_exc, disabled=False):
if isinstance(exception_tuple, list):
exception_tuple = tuple(exception_tuple)
elif not isinstance(exception_tuple, tuple):
exception_tuple = (exception_tuple,)
else:
raise TypeError("Wrong exception_tuple %s" % type(exception_tuple))
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
if disabled:
return func(*args, **kwargs)
try:
return func(*args, **kwargs)
except exception_tuple:
return retval_if_exc
else:
raise RuntimeError()
return wrapper
return decorator
return_none_if_raise = partial(return_if_raise, retval_if_exc=None)
class timeout(object):
def __init__(self, seconds=1, error_message='Timeout'):
self.seconds = seconds
self.error_message = error_message
def handle_timeout(self, signum, frame):
raise TimeoutError(self.error_message)
def __enter__(self):
import signal
signal.signal(signal.SIGALRM, self.handle_timeout)
signal.alarm(self.seconds)
def __exit__(self, type, value, traceback):
import signal
signal.alarm(0)
class TimeoutError(Exception):
def __init__(self, message):
self.message = message
def prof_main(main):
@wraps(main)
def wrapper(*args, **kwargs):
import sys
try:
do_prof = sys.argv[1] == "prof"
if do_prof: sys.argv.pop(1)
except Exception:
do_prof = False
if not do_prof:
sys.exit(main())
else:
print("Entering profiling mode...")
import pstats, cProfile, tempfile
prof_file = kwargs.get("prof_file", None)
if prof_file is None:
_, prof_file = tempfile.mkstemp()
print("Profiling data stored in %s" % prof_file)
sortby = kwargs.get("sortby", "time")
cProfile.runctx("main()", globals(), locals(), prof_file)
s = pstats.Stats(prof_file)
s.strip_dirs().sort_stats(sortby).print_stats()
if "retval" not in kwargs:
sys.exit(0)
else:
return kwargs["retval"]
return wrapper
| true
| true
|
1c43aefb40b6a4733937ffe25a9ac6453faae8b7
| 20,502
|
py
|
Python
|
vnpy/database/mongodb/mongodb_database.py
|
NovelResearchInvestment/vnpy
|
27e1b053a478e26416d7ce2867fa99e8cb7ee659
|
[
"MIT"
] | null | null | null |
vnpy/database/mongodb/mongodb_database.py
|
NovelResearchInvestment/vnpy
|
27e1b053a478e26416d7ce2867fa99e8cb7ee659
|
[
"MIT"
] | null | null | null |
vnpy/database/mongodb/mongodb_database.py
|
NovelResearchInvestment/vnpy
|
27e1b053a478e26416d7ce2867fa99e8cb7ee659
|
[
"MIT"
] | null | null | null |
""""""
from datetime import datetime
from typing import List
from mongoengine import (
Document,
DateTimeField,
FloatField,
StringField,
IntField,
connect,
QuerySet
)
from mongoengine.errors import DoesNotExist
from vnpy.trader.constant import Exchange, Interval
from vnpy.trader.object import BarData, TickData, OrderData, TradeData, PositionData, AccountData
from vnpy.trader.database import (
BaseDatabase,
BarOverview,
DB_TZ,
convert_tz
)
from vnpy.trader.setting import SETTINGS
from copy import copy
class DbBarData(Document):
""""""
symbol: str = StringField()
exchange: str = StringField()
datetime: datetime = DateTimeField()
interval: str = StringField()
name: str = StringField()
volume: float = FloatField()
turnover: float = FloatField()
open_interest: float = FloatField()
open_price: float = FloatField()
high_price: float = FloatField()
low_price: float = FloatField()
close_price: float = FloatField()
meta = {
"indexes": [
{
"fields": ("symbol", "exchange", "interval", "datetime"),
"unique": True,
}
]
}
class DbTickData(Document):
""""""
symbol: str = StringField()
exchange: str = StringField()
datetime: datetime = DateTimeField()
name: str = StringField()
volume: float = FloatField()
turnover: float = FloatField()
open_interest: float = FloatField()
last_price: float = FloatField()
last_volume: float = FloatField()
limit_up: float = FloatField()
limit_down: float = FloatField()
open_price: float = FloatField()
high_price: float = FloatField()
low_price: float = FloatField()
close_price: float = FloatField()
pre_close: float = FloatField()
bid_price_1: float = FloatField()
bid_price_2: float = FloatField()
bid_price_3: float = FloatField()
bid_price_4: float = FloatField()
bid_price_5: float = FloatField()
ask_price_1: float = FloatField()
ask_price_2: float = FloatField()
ask_price_3: float = FloatField()
ask_price_4: float = FloatField()
ask_price_5: float = FloatField()
bid_volume_1: float = FloatField()
bid_volume_2: float = FloatField()
bid_volume_3: float = FloatField()
bid_volume_4: float = FloatField()
bid_volume_5: float = FloatField()
ask_volume_1: float = FloatField()
ask_volume_2: float = FloatField()
ask_volume_3: float = FloatField()
ask_volume_4: float = FloatField()
ask_volume_5: float = FloatField()
localtime: datetime = DateTimeField()
meta = {
"indexes": [
{
"fields": ("symbol", "exchange", "datetime"),
"unique": True,
}
],
}
class DbOrderData(Document):
symbol: str = StringField()
exchange: str = StringField()
datetime: datetime = DateTimeField()
orderid: str = StringField()
ordertype: str = StringField()
direction: str = StringField()
offset: str = StringField()
price: float = FloatField()
volume: float = FloatField()
traded: float = FloatField()
status: str = StringField()
reference: str = StringField()
vt_orderid: str = StringField()
localtime: datetime = DateTimeField()
meta = {
"indexes": [
{
"fields": ("symbol", "exchange", "datetime", "localtime"),
"unique": True,
}
],
}
class DbTradeData(Document):
symbol: str = StringField()
exchange: str = StringField()
datetime: datetime = DateTimeField()
orderid: str = StringField()
tradeid: str = StringField()
direction: str = StringField()
offset: str = StringField()
price: float = FloatField()
volume: float = FloatField()
reference: str = StringField()
vt_orderid: str = StringField()
vt_tradeid: str = StringField()
localtime: datetime = DateTimeField()
meta = {
"indexes": [
{
"fields": ("symbol", "exchange", "datetime", "localtime"),
"unique": True,
}
],
}
class DbPositionData(Document):
symbol: str = StringField()
exchange: str = StringField()
gateway_name: str = StringField()
symbol: str = StringField()
exchange: str = StringField()
direction: str = StringField()
volume: str = FloatField()
frozen: str = FloatField()
price: str = FloatField()
pnl: str = FloatField()
yd_volume: str = FloatField()
vt_symbol: str = StringField()
vt_positionid: str = StringField()
localtime: datetime = DateTimeField()
meta = {
"indexes": [
{
"fields": ("symbol", "exchange", "localtime"),
"unique": True,
}
],
}
class DbAccountData(Document):
symbol: str = StringField()
exchange: str = StringField()
datetime: datetime = DateTimeField()
localtime: datetime = DateTimeField()
meta = {
"indexes": [
{
"fields": ("symbol", "exchange", "datetime"),
"unique": True,
}
],
}
class DbOrderData(Document):
symbol: str = StringField()
exchange: str = StringField()
datetime: datetime = DateTimeField()
orderid: str = StringField()
ordertype: str = StringField()
direction: str = StringField()
offset: str = StringField()
price: float = FloatField()
volume: float = FloatField()
traded: float = FloatField()
status: str = StringField()
reference: str = StringField()
vt_orderid: str = StringField()
localtime: datetime = DateTimeField()
meta = {
"indexes": [
{
"fields": ("symbol", "exchange", "datetime", "localtime"),
"unique": True,
}
],
}
class DbTradeData(Document):
symbol: str = StringField()
exchange: str = StringField()
datetime: datetime = DateTimeField()
orderid: str = StringField()
tradeid: str = StringField()
direction: str = StringField()
offset: str = StringField()
price: float = FloatField()
volume: float = FloatField()
reference: str = StringField()
vt_orderid: str = StringField()
vt_tradeid: str = StringField()
localtime: datetime = DateTimeField()
meta = {
"indexes": [
{
"fields": ("symbol", "exchange", "datetime", "localtime"),
"unique": True,
}
],
}
class DbPositionData(Document):
symbol: str = StringField()
exchange: str = StringField()
gateway_name: str = StringField()
symbol: str = StringField()
exchange: str = StringField()
direction: str = StringField()
volume: str = FloatField()
frozen: str = FloatField()
price: str = FloatField()
pnl: str = FloatField()
yd_volume: str = FloatField()
vt_symbol: str = StringField()
vt_positionid: str = StringField()
localtime: datetime = DateTimeField()
meta = {
"indexes": [
{
"fields": ("symbol", "exchange", "localtime"),
"unique": True,
}
],
}
class DbAccountData(Document):
symbol: str = StringField()
exchange: str = StringField()
datetime: datetime = DateTimeField()
localtime: datetime = DateTimeField()
meta = {
"indexes": [
{
"fields": ("symbol", "exchange", "datetime"),
"unique": True,
}
],
}
class DbBarOverview(Document):
""""""
symbol: str = StringField()
exchange: str = StringField()
interval: str = StringField()
count: int = IntField()
start: datetime = DateTimeField()
end: datetime = DateTimeField()
meta = {
"indexes": [
{
"fields": ("symbol", "exchange", "interval"),
"unique": True,
}
],
}
class MongodbDatabase(BaseDatabase):
""""""
def __init__(self) -> None:
""""""
database = SETTINGS["database.database"]
host = SETTINGS["database.host"]
port = SETTINGS["database.port"]
username = SETTINGS["database.user"]
password = SETTINGS["database.password"]
authentication_source = SETTINGS["database.authentication_source"]
if not username:
username = None
password = None
authentication_source = None
connect(
db=database,
host=host,
port=port,
username=username,
password=password,
authentication_source=authentication_source,
)
def save_bar_data(self, bars: List[BarData]) -> bool:
""""""
# Store key parameters
bar = bars[0]
symbol = bar.symbol
exchange = bar.exchange
interval = bar.interval
# Upsert data into mongodb
for bar in bars:
bar.datetime = convert_tz(bar.datetime)
d = copy(bar.__dict__)
d["exchange"] = d["exchange"].value
d["interval"] = d["interval"].value
d.pop("gateway_name")
d.pop("vt_symbol")
param = to_update_param(d)
DbBarData.objects(
symbol=d["symbol"],
exchange=d["exchange"],
interval=d["interval"],
datetime=d["datetime"],
).update_one(upsert=True, **param)
# Update bar overview
try:
overview: DbBarOverview = DbBarOverview.objects(
symbol=symbol,
exchange=exchange.value,
interval=interval.value
).get()
except DoesNotExist:
overview: DbBarOverview = DbBarOverview(
symbol=symbol,
exchange=exchange.value,
interval=interval.value
)
if not overview.start:
overview.start = bars[0].datetime
overview.end = bars[-1].datetime
overview.count = len(bars)
else:
overview.start = min(bars[0].datetime, overview.start)
overview.end = max(bars[-1].datetime, overview.end)
overview.count = DbBarData.objects(
symbol=symbol,
exchange=exchange.value,
interval=interval.value
).count()
overview.save()
def save_tick_data(self, ticks: List[TickData]) -> bool:
""""""
for tick in ticks:
tick.datetime = convert_tz(tick.datetime)
d = copy(tick.__dict__)
d["exchange"] = d["exchange"].value
d.pop("gateway_name")
d.pop("vt_symbol")
param = to_update_param(d)
# .objects() declare database index
DbTickData.objects(
symbol=d["symbol"],
exchange=d["exchange"],
datetime=d["datetime"],
).update_one(upsert=True, **param)
def save_order_data(self, orders: List[OrderData]) -> bool:
for order in orders:
order.datetime = convert_tz(order.datetime)
d = copy(order.__dict__)
d["exchange"] = d["exchange"].value
d["ordertype"] = d['type'].value
d["direction"] = d['direction'].value
d["offset"] = d['offset'].value
d["status"] = d['status'].value
d["reference"] = SETTINGS['account_setting']['key']
d.pop("gateway_name")
d.pop("vt_symbol")
d.pop("type")
param = to_update_param(d)
DbOrderData.objects(
symbol=d["symbol"],
exchange=d["exchange"],
datetime=d["datetime"],
).update_one(upsert=True, **param)
def save_trade_data(self, trades: List[TradeData]) -> bool:
for trade in trades:
trade.datetime = convert_tz(trade.datetime)
d = copy(trade.__dict__)
d["exchange"] = d["exchange"].value
d["tradeid"] = str(d["tradeid"])
d["direction"] = d['direction'].value
d["offset"] = d['offset'].value
d["reference"] = SETTINGS['account_setting']['key']
d.pop("gateway_name")
d.pop("vt_symbol")
param = to_update_param(d)
DbTradeData.objects(
symbol=d["symbol"],
exchange=d["exchange"],
datetime=d["datetime"],
).update_one(upsert=True, **param)
def save_position_data(self, positions: List[PositionData]) -> bool:
for position in positions:
position.datetime = convert_tz(position.datetime)
d = copy(position.__dict__)
d["exchange"] = d["exchange"].value
d["direction"] = d['direction'].value
d["reference"] = SETTINGS['account_setting']['key']
d.pop("gateway_name")
d.pop("vt_symbol")
param = to_update_param(d)
DbPositionData.objects(
symbol=d["symbol"],
exchange=d["exchange"],
).update_one(upsert=True, **param)
def save_account_data(self, accounts: List[AccountData]) -> bool:
for account in accounts:
account.datetime = convert_tz(account.datetime)
d = copy(account.__dict__)
param = to_update_param(d)
DbAccountData.objects().update_one(upsert=True, **param)
def load_bar_data(
self,
symbol: str,
exchange: Exchange,
interval: Interval,
start: datetime,
end: datetime
) -> List[BarData]:
""""""
s: QuerySet = DbBarData.objects(
symbol=symbol,
exchange=exchange.value,
interval=interval.value,
datetime__gte=convert_tz(start),
datetime__lte=convert_tz(end),
)
bars: List[BarData] = []
for db_bar in s:
bar = BarData(
symbol=db_bar.symbol,
exchange=Exchange(db_bar.exchange),
datetime=db_bar.datetime.astimezone(DB_TZ),
interval=Interval(db_bar.interval),
volume=db_bar.volume,
turnover=db_bar.turnover,
open_interest=db_bar.open_interest,
open_price=db_bar.open_price,
high_price=db_bar.high_price,
low_price=db_bar.low_price,
close_price=db_bar.close_price,
gateway_name="DB"
)
bars.append(bar)
return bars
def load_tick_data(
self,
symbol: str,
exchange: Exchange,
start: datetime,
end: datetime
) -> List[TickData]:
""""""
s: QuerySet = DbTickData.objects(
symbol=symbol,
exchange=exchange.value,
datetime__gte=convert_tz(start),
datetime__lte=convert_tz(end),
)
ticks: List[TickData] = []
for db_tick in s:
tick = TickData(
symbol=db_tick.symbol,
exchange=Exchange(db_tick.exchange),
datetime=db_tick.datetime.astimezone(DB_TZ),
name=db_tick.name,
volume=db_tick.volume,
turnover=db_tick.turnover,
open_interest=db_tick.open_interest,
last_price=db_tick.last_price,
last_volume=db_tick.last_volume,
limit_up=db_tick.limit_up,
limit_down=db_tick.limit_down,
open_price=db_tick.open_price,
high_price=db_tick.high_price,
low_price=db_tick.low_price,
pre_close=db_tick.pre_close,
bid_price_1=db_tick.bid_price_1,
bid_price_2=db_tick.bid_price_2,
bid_price_3=db_tick.bid_price_3,
bid_price_4=db_tick.bid_price_4,
bid_price_5=db_tick.bid_price_5,
ask_price_1=db_tick.ask_price_1,
ask_price_2=db_tick.ask_price_2,
ask_price_3=db_tick.ask_price_3,
ask_price_4=db_tick.ask_price_4,
ask_price_5=db_tick.ask_price_5,
bid_volume_1=db_tick.bid_volume_1,
bid_volume_2=db_tick.bid_volume_2,
bid_volume_3=db_tick.bid_volume_3,
bid_volume_4=db_tick.bid_volume_4,
bid_volume_5=db_tick.bid_volume_5,
ask_volume_1=db_tick.ask_volume_1,
ask_volume_2=db_tick.ask_volume_2,
ask_volume_3=db_tick.ask_volume_3,
ask_volume_4=db_tick.ask_volume_4,
ask_volume_5=db_tick.ask_volume_5,
localtime=db_tick.localtime,
gateway_name="DB"
)
ticks.append(tick)
return ticks
def load_order_data(self) -> List[OrderData]:
pass
def load_trade_data(self) -> List[TradeData]:
pass
def load_position_data(self) -> List[PositionData]:
pass
def load_account_data(self) -> List[AccountData]:
pass
def delete_bar_data(
self,
symbol: str,
exchange: Exchange,
interval: Interval
) -> int:
""""""
count = DbBarData.objects(
symbol=symbol,
exchange=exchange.value,
interval=interval.value
).delete()
# Delete bar overview
DbBarOverview.objects(
symbol=symbol,
exchange=exchange.value,
interval=interval.value
).delete()
return count
def delete_tick_data(
self,
symbol: str,
exchange: Exchange
) -> int:
""""""
count = DbTickData.objects(
symbol=symbol,
exchange=exchange.value
).delete()
return count
def get_bar_overview(self) -> List[BarOverview]:
"""
Return data avaible in database.
"""
# Init bar overview for old version database
data_count = DbBarData.objects.count()
overview_count = DbBarOverview.objects.count()
if data_count and not overview_count:
self.init_bar_overview()
s: QuerySet = DbBarOverview.objects()
overviews = []
for overview in s:
overview.exchange = Exchange(overview.exchange)
overview.interval = Interval(overview.interval)
overviews.append(overview)
return overviews
def init_bar_overview(self) -> None:
"""
Init overview table if not exists.
"""
s: QuerySet = (
DbBarData.objects.aggregate({
"$group": {
"_id": {
"symbol": "$symbol",
"exchange": "$exchange",
"interval": "$interval",
},
"count": {"$sum": 1}
}
})
)
for d in s:
id_data = d["_id"]
overview = DbBarOverview()
overview.symbol = id_data["symbol"]
overview.exchange = id_data["exchange"]
overview.interval = id_data["interval"]
overview.count = d["count"]
start_bar: DbBarData = (
DbBarData.objects(
symbol=id_data["symbol"],
exchange=id_data["exchange"],
interval=id_data["interval"],
)
.order_by("+datetime")
.first()
)
overview.start = start_bar.datetime
end_bar: DbBarData = (
DbBarData.objects(
symbol=id_data["symbol"],
exchange=id_data["exchange"],
interval=id_data["interval"],
)
.order_by("-datetime")
.first()
)
overview.end = end_bar.datetime
overview.save()
def to_update_param(d: dict) -> dict:
"""
Convert data dict to update parameters.
"""
param = {f"set__{k}": v for k, v in d.items()}
return param
database_manager = MongodbDatabase()
| 28.835443
| 97
| 0.544776
|
from datetime import datetime
from typing import List
from mongoengine import (
Document,
DateTimeField,
FloatField,
StringField,
IntField,
connect,
QuerySet
)
from mongoengine.errors import DoesNotExist
from vnpy.trader.constant import Exchange, Interval
from vnpy.trader.object import BarData, TickData, OrderData, TradeData, PositionData, AccountData
from vnpy.trader.database import (
BaseDatabase,
BarOverview,
DB_TZ,
convert_tz
)
from vnpy.trader.setting import SETTINGS
from copy import copy
class DbBarData(Document):
symbol: str = StringField()
exchange: str = StringField()
datetime: datetime = DateTimeField()
interval: str = StringField()
name: str = StringField()
volume: float = FloatField()
turnover: float = FloatField()
open_interest: float = FloatField()
open_price: float = FloatField()
high_price: float = FloatField()
low_price: float = FloatField()
close_price: float = FloatField()
meta = {
"indexes": [
{
"fields": ("symbol", "exchange", "interval", "datetime"),
"unique": True,
}
]
}
class DbTickData(Document):
symbol: str = StringField()
exchange: str = StringField()
datetime: datetime = DateTimeField()
name: str = StringField()
volume: float = FloatField()
turnover: float = FloatField()
open_interest: float = FloatField()
last_price: float = FloatField()
last_volume: float = FloatField()
limit_up: float = FloatField()
limit_down: float = FloatField()
open_price: float = FloatField()
high_price: float = FloatField()
low_price: float = FloatField()
close_price: float = FloatField()
pre_close: float = FloatField()
bid_price_1: float = FloatField()
bid_price_2: float = FloatField()
bid_price_3: float = FloatField()
bid_price_4: float = FloatField()
bid_price_5: float = FloatField()
ask_price_1: float = FloatField()
ask_price_2: float = FloatField()
ask_price_3: float = FloatField()
ask_price_4: float = FloatField()
ask_price_5: float = FloatField()
bid_volume_1: float = FloatField()
bid_volume_2: float = FloatField()
bid_volume_3: float = FloatField()
bid_volume_4: float = FloatField()
bid_volume_5: float = FloatField()
ask_volume_1: float = FloatField()
ask_volume_2: float = FloatField()
ask_volume_3: float = FloatField()
ask_volume_4: float = FloatField()
ask_volume_5: float = FloatField()
localtime: datetime = DateTimeField()
meta = {
"indexes": [
{
"fields": ("symbol", "exchange", "datetime"),
"unique": True,
}
],
}
class DbOrderData(Document):
symbol: str = StringField()
exchange: str = StringField()
datetime: datetime = DateTimeField()
orderid: str = StringField()
ordertype: str = StringField()
direction: str = StringField()
offset: str = StringField()
price: float = FloatField()
volume: float = FloatField()
traded: float = FloatField()
status: str = StringField()
reference: str = StringField()
vt_orderid: str = StringField()
localtime: datetime = DateTimeField()
meta = {
"indexes": [
{
"fields": ("symbol", "exchange", "datetime", "localtime"),
"unique": True,
}
],
}
class DbTradeData(Document):
symbol: str = StringField()
exchange: str = StringField()
datetime: datetime = DateTimeField()
orderid: str = StringField()
tradeid: str = StringField()
direction: str = StringField()
offset: str = StringField()
price: float = FloatField()
volume: float = FloatField()
reference: str = StringField()
vt_orderid: str = StringField()
vt_tradeid: str = StringField()
localtime: datetime = DateTimeField()
meta = {
"indexes": [
{
"fields": ("symbol", "exchange", "datetime", "localtime"),
"unique": True,
}
],
}
class DbPositionData(Document):
symbol: str = StringField()
exchange: str = StringField()
gateway_name: str = StringField()
symbol: str = StringField()
exchange: str = StringField()
direction: str = StringField()
volume: str = FloatField()
frozen: str = FloatField()
price: str = FloatField()
pnl: str = FloatField()
yd_volume: str = FloatField()
vt_symbol: str = StringField()
vt_positionid: str = StringField()
localtime: datetime = DateTimeField()
meta = {
"indexes": [
{
"fields": ("symbol", "exchange", "localtime"),
"unique": True,
}
],
}
class DbAccountData(Document):
symbol: str = StringField()
exchange: str = StringField()
datetime: datetime = DateTimeField()
localtime: datetime = DateTimeField()
meta = {
"indexes": [
{
"fields": ("symbol", "exchange", "datetime"),
"unique": True,
}
],
}
class DbOrderData(Document):
symbol: str = StringField()
exchange: str = StringField()
datetime: datetime = DateTimeField()
orderid: str = StringField()
ordertype: str = StringField()
direction: str = StringField()
offset: str = StringField()
price: float = FloatField()
volume: float = FloatField()
traded: float = FloatField()
status: str = StringField()
reference: str = StringField()
vt_orderid: str = StringField()
localtime: datetime = DateTimeField()
meta = {
"indexes": [
{
"fields": ("symbol", "exchange", "datetime", "localtime"),
"unique": True,
}
],
}
class DbTradeData(Document):
symbol: str = StringField()
exchange: str = StringField()
datetime: datetime = DateTimeField()
orderid: str = StringField()
tradeid: str = StringField()
direction: str = StringField()
offset: str = StringField()
price: float = FloatField()
volume: float = FloatField()
reference: str = StringField()
vt_orderid: str = StringField()
vt_tradeid: str = StringField()
localtime: datetime = DateTimeField()
meta = {
"indexes": [
{
"fields": ("symbol", "exchange", "datetime", "localtime"),
"unique": True,
}
],
}
class DbPositionData(Document):
symbol: str = StringField()
exchange: str = StringField()
gateway_name: str = StringField()
symbol: str = StringField()
exchange: str = StringField()
direction: str = StringField()
volume: str = FloatField()
frozen: str = FloatField()
price: str = FloatField()
pnl: str = FloatField()
yd_volume: str = FloatField()
vt_symbol: str = StringField()
vt_positionid: str = StringField()
localtime: datetime = DateTimeField()
meta = {
"indexes": [
{
"fields": ("symbol", "exchange", "localtime"),
"unique": True,
}
],
}
class DbAccountData(Document):
symbol: str = StringField()
exchange: str = StringField()
datetime: datetime = DateTimeField()
localtime: datetime = DateTimeField()
meta = {
"indexes": [
{
"fields": ("symbol", "exchange", "datetime"),
"unique": True,
}
],
}
class DbBarOverview(Document):
symbol: str = StringField()
exchange: str = StringField()
interval: str = StringField()
count: int = IntField()
start: datetime = DateTimeField()
end: datetime = DateTimeField()
meta = {
"indexes": [
{
"fields": ("symbol", "exchange", "interval"),
"unique": True,
}
],
}
class MongodbDatabase(BaseDatabase):
def __init__(self) -> None:
database = SETTINGS["database.database"]
host = SETTINGS["database.host"]
port = SETTINGS["database.port"]
username = SETTINGS["database.user"]
password = SETTINGS["database.password"]
authentication_source = SETTINGS["database.authentication_source"]
if not username:
username = None
password = None
authentication_source = None
connect(
db=database,
host=host,
port=port,
username=username,
password=password,
authentication_source=authentication_source,
)
def save_bar_data(self, bars: List[BarData]) -> bool:
bar = bars[0]
symbol = bar.symbol
exchange = bar.exchange
interval = bar.interval
for bar in bars:
bar.datetime = convert_tz(bar.datetime)
d = copy(bar.__dict__)
d["exchange"] = d["exchange"].value
d["interval"] = d["interval"].value
d.pop("gateway_name")
d.pop("vt_symbol")
param = to_update_param(d)
DbBarData.objects(
symbol=d["symbol"],
exchange=d["exchange"],
interval=d["interval"],
datetime=d["datetime"],
).update_one(upsert=True, **param)
try:
overview: DbBarOverview = DbBarOverview.objects(
symbol=symbol,
exchange=exchange.value,
interval=interval.value
).get()
except DoesNotExist:
overview: DbBarOverview = DbBarOverview(
symbol=symbol,
exchange=exchange.value,
interval=interval.value
)
if not overview.start:
overview.start = bars[0].datetime
overview.end = bars[-1].datetime
overview.count = len(bars)
else:
overview.start = min(bars[0].datetime, overview.start)
overview.end = max(bars[-1].datetime, overview.end)
overview.count = DbBarData.objects(
symbol=symbol,
exchange=exchange.value,
interval=interval.value
).count()
overview.save()
def save_tick_data(self, ticks: List[TickData]) -> bool:
for tick in ticks:
tick.datetime = convert_tz(tick.datetime)
d = copy(tick.__dict__)
d["exchange"] = d["exchange"].value
d.pop("gateway_name")
d.pop("vt_symbol")
param = to_update_param(d)
DbTickData.objects(
symbol=d["symbol"],
exchange=d["exchange"],
datetime=d["datetime"],
).update_one(upsert=True, **param)
def save_order_data(self, orders: List[OrderData]) -> bool:
for order in orders:
order.datetime = convert_tz(order.datetime)
d = copy(order.__dict__)
d["exchange"] = d["exchange"].value
d["ordertype"] = d['type'].value
d["direction"] = d['direction'].value
d["offset"] = d['offset'].value
d["status"] = d['status'].value
d["reference"] = SETTINGS['account_setting']['key']
d.pop("gateway_name")
d.pop("vt_symbol")
d.pop("type")
param = to_update_param(d)
DbOrderData.objects(
symbol=d["symbol"],
exchange=d["exchange"],
datetime=d["datetime"],
).update_one(upsert=True, **param)
def save_trade_data(self, trades: List[TradeData]) -> bool:
for trade in trades:
trade.datetime = convert_tz(trade.datetime)
d = copy(trade.__dict__)
d["exchange"] = d["exchange"].value
d["tradeid"] = str(d["tradeid"])
d["direction"] = d['direction'].value
d["offset"] = d['offset'].value
d["reference"] = SETTINGS['account_setting']['key']
d.pop("gateway_name")
d.pop("vt_symbol")
param = to_update_param(d)
DbTradeData.objects(
symbol=d["symbol"],
exchange=d["exchange"],
datetime=d["datetime"],
).update_one(upsert=True, **param)
def save_position_data(self, positions: List[PositionData]) -> bool:
for position in positions:
position.datetime = convert_tz(position.datetime)
d = copy(position.__dict__)
d["exchange"] = d["exchange"].value
d["direction"] = d['direction'].value
d["reference"] = SETTINGS['account_setting']['key']
d.pop("gateway_name")
d.pop("vt_symbol")
param = to_update_param(d)
DbPositionData.objects(
symbol=d["symbol"],
exchange=d["exchange"],
).update_one(upsert=True, **param)
def save_account_data(self, accounts: List[AccountData]) -> bool:
for account in accounts:
account.datetime = convert_tz(account.datetime)
d = copy(account.__dict__)
param = to_update_param(d)
DbAccountData.objects().update_one(upsert=True, **param)
def load_bar_data(
self,
symbol: str,
exchange: Exchange,
interval: Interval,
start: datetime,
end: datetime
) -> List[BarData]:
s: QuerySet = DbBarData.objects(
symbol=symbol,
exchange=exchange.value,
interval=interval.value,
datetime__gte=convert_tz(start),
datetime__lte=convert_tz(end),
)
bars: List[BarData] = []
for db_bar in s:
bar = BarData(
symbol=db_bar.symbol,
exchange=Exchange(db_bar.exchange),
datetime=db_bar.datetime.astimezone(DB_TZ),
interval=Interval(db_bar.interval),
volume=db_bar.volume,
turnover=db_bar.turnover,
open_interest=db_bar.open_interest,
open_price=db_bar.open_price,
high_price=db_bar.high_price,
low_price=db_bar.low_price,
close_price=db_bar.close_price,
gateway_name="DB"
)
bars.append(bar)
return bars
def load_tick_data(
self,
symbol: str,
exchange: Exchange,
start: datetime,
end: datetime
) -> List[TickData]:
s: QuerySet = DbTickData.objects(
symbol=symbol,
exchange=exchange.value,
datetime__gte=convert_tz(start),
datetime__lte=convert_tz(end),
)
ticks: List[TickData] = []
for db_tick in s:
tick = TickData(
symbol=db_tick.symbol,
exchange=Exchange(db_tick.exchange),
datetime=db_tick.datetime.astimezone(DB_TZ),
name=db_tick.name,
volume=db_tick.volume,
turnover=db_tick.turnover,
open_interest=db_tick.open_interest,
last_price=db_tick.last_price,
last_volume=db_tick.last_volume,
limit_up=db_tick.limit_up,
limit_down=db_tick.limit_down,
open_price=db_tick.open_price,
high_price=db_tick.high_price,
low_price=db_tick.low_price,
pre_close=db_tick.pre_close,
bid_price_1=db_tick.bid_price_1,
bid_price_2=db_tick.bid_price_2,
bid_price_3=db_tick.bid_price_3,
bid_price_4=db_tick.bid_price_4,
bid_price_5=db_tick.bid_price_5,
ask_price_1=db_tick.ask_price_1,
ask_price_2=db_tick.ask_price_2,
ask_price_3=db_tick.ask_price_3,
ask_price_4=db_tick.ask_price_4,
ask_price_5=db_tick.ask_price_5,
bid_volume_1=db_tick.bid_volume_1,
bid_volume_2=db_tick.bid_volume_2,
bid_volume_3=db_tick.bid_volume_3,
bid_volume_4=db_tick.bid_volume_4,
bid_volume_5=db_tick.bid_volume_5,
ask_volume_1=db_tick.ask_volume_1,
ask_volume_2=db_tick.ask_volume_2,
ask_volume_3=db_tick.ask_volume_3,
ask_volume_4=db_tick.ask_volume_4,
ask_volume_5=db_tick.ask_volume_5,
localtime=db_tick.localtime,
gateway_name="DB"
)
ticks.append(tick)
return ticks
def load_order_data(self) -> List[OrderData]:
pass
def load_trade_data(self) -> List[TradeData]:
pass
def load_position_data(self) -> List[PositionData]:
pass
def load_account_data(self) -> List[AccountData]:
pass
def delete_bar_data(
self,
symbol: str,
exchange: Exchange,
interval: Interval
) -> int:
count = DbBarData.objects(
symbol=symbol,
exchange=exchange.value,
interval=interval.value
).delete()
DbBarOverview.objects(
symbol=symbol,
exchange=exchange.value,
interval=interval.value
).delete()
return count
def delete_tick_data(
self,
symbol: str,
exchange: Exchange
) -> int:
count = DbTickData.objects(
symbol=symbol,
exchange=exchange.value
).delete()
return count
def get_bar_overview(self) -> List[BarOverview]:
data_count = DbBarData.objects.count()
overview_count = DbBarOverview.objects.count()
if data_count and not overview_count:
self.init_bar_overview()
s: QuerySet = DbBarOverview.objects()
overviews = []
for overview in s:
overview.exchange = Exchange(overview.exchange)
overview.interval = Interval(overview.interval)
overviews.append(overview)
return overviews
def init_bar_overview(self) -> None:
s: QuerySet = (
DbBarData.objects.aggregate({
"$group": {
"_id": {
"symbol": "$symbol",
"exchange": "$exchange",
"interval": "$interval",
},
"count": {"$sum": 1}
}
})
)
for d in s:
id_data = d["_id"]
overview = DbBarOverview()
overview.symbol = id_data["symbol"]
overview.exchange = id_data["exchange"]
overview.interval = id_data["interval"]
overview.count = d["count"]
start_bar: DbBarData = (
DbBarData.objects(
symbol=id_data["symbol"],
exchange=id_data["exchange"],
interval=id_data["interval"],
)
.order_by("+datetime")
.first()
)
overview.start = start_bar.datetime
end_bar: DbBarData = (
DbBarData.objects(
symbol=id_data["symbol"],
exchange=id_data["exchange"],
interval=id_data["interval"],
)
.order_by("-datetime")
.first()
)
overview.end = end_bar.datetime
overview.save()
def to_update_param(d: dict) -> dict:
param = {f"set__{k}": v for k, v in d.items()}
return param
database_manager = MongodbDatabase()
| true
| true
|
1c43af03e0a527463944610ebbf54b54b44af576
| 1,101
|
py
|
Python
|
setup.py
|
lrgr/explosig-data
|
9fd11e5252e3fb112dc7a3e55cb7b40d8b8d5efb
|
[
"MIT"
] | 1
|
2020-01-30T17:55:03.000Z
|
2020-01-30T17:55:03.000Z
|
setup.py
|
keller-mark/explosig-data
|
9fd11e5252e3fb112dc7a3e55cb7b40d8b8d5efb
|
[
"MIT"
] | 1
|
2020-02-20T15:03:54.000Z
|
2020-02-20T23:44:14.000Z
|
setup.py
|
keller-mark/explosig-data
|
9fd11e5252e3fb112dc7a3e55cb7b40d8b8d5efb
|
[
"MIT"
] | 1
|
2020-01-12T14:17:20.000Z
|
2020-01-12T14:17:20.000Z
|
import os
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="explosig-data",
version="0.0.5",
author="Leiserson Research Group",
description="Process mutation data into standard formats originally developed for the ExploSig family of tools",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/lrgr/explosig-data",
packages=setuptools.find_packages(),
package_data={
'explosig_data': [
os.path.join('snakefiles', 'genes', 'human.smk'),
os.path.join('snakefiles', 'genomes', 'human.smk')
],
},
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires='>=3.6',
install_requires=[
'requests>=2.22.0',
'pandas>=0.25.1',
'numpy>=1.17.0',
'snakemake>=5.3',
'biopython>=1.75',
'twobitreader>=3.1',
'tqdm>=4.39.0'
],
)
| 28.973684
| 116
| 0.60218
|
import os
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="explosig-data",
version="0.0.5",
author="Leiserson Research Group",
description="Process mutation data into standard formats originally developed for the ExploSig family of tools",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/lrgr/explosig-data",
packages=setuptools.find_packages(),
package_data={
'explosig_data': [
os.path.join('snakefiles', 'genes', 'human.smk'),
os.path.join('snakefiles', 'genomes', 'human.smk')
],
},
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires='>=3.6',
install_requires=[
'requests>=2.22.0',
'pandas>=0.25.1',
'numpy>=1.17.0',
'snakemake>=5.3',
'biopython>=1.75',
'twobitreader>=3.1',
'tqdm>=4.39.0'
],
)
| true
| true
|
1c43af043c358960ef291435ccf748377f5039a2
| 914
|
py
|
Python
|
tf_autoaugment/transforms/color.py
|
sebastian-sz/tf-autoaugment
|
6807f5095df1b842a8a17265dc2361165f5d1658
|
[
"MIT"
] | null | null | null |
tf_autoaugment/transforms/color.py
|
sebastian-sz/tf-autoaugment
|
6807f5095df1b842a8a17265dc2361165f5d1658
|
[
"MIT"
] | null | null | null |
tf_autoaugment/transforms/color.py
|
sebastian-sz/tf-autoaugment
|
6807f5095df1b842a8a17265dc2361165f5d1658
|
[
"MIT"
] | null | null | null |
"""Code for Color transform."""
import tensorflow as tf
from tf_autoaugment.image_utils import blend_batch
from tf_autoaugment.transforms.base_transform import BaseTransform
class Color(BaseTransform):
"""Implements Color Transform."""
def __call__(self, images: tf.Tensor, level: tf.Tensor) -> tf.Tensor:
"""Parse level and run color function on image batch."""
factor = self._parse_level(level)
return self.color(images=images, factor=factor)
@staticmethod
def color(images: tf.Tensor, factor: tf.Tensor) -> tf.Tensor:
"""Adjust the color balance of the image."""
degenerate = tf.image.grayscale_to_rgb(tf.image.rgb_to_grayscale(images))
return blend_batch(degenerate, images, factor)
def _parse_level(self, level: tf.Tensor) -> tf.Tensor:
result = (level / self._MAX_LEVEL) * 1.8 + 0.1
return tf.cast(result, tf.float32)
| 36.56
| 81
| 0.695842
|
import tensorflow as tf
from tf_autoaugment.image_utils import blend_batch
from tf_autoaugment.transforms.base_transform import BaseTransform
class Color(BaseTransform):
def __call__(self, images: tf.Tensor, level: tf.Tensor) -> tf.Tensor:
factor = self._parse_level(level)
return self.color(images=images, factor=factor)
@staticmethod
def color(images: tf.Tensor, factor: tf.Tensor) -> tf.Tensor:
degenerate = tf.image.grayscale_to_rgb(tf.image.rgb_to_grayscale(images))
return blend_batch(degenerate, images, factor)
def _parse_level(self, level: tf.Tensor) -> tf.Tensor:
result = (level / self._MAX_LEVEL) * 1.8 + 0.1
return tf.cast(result, tf.float32)
| true
| true
|
1c43af4eb0282ba7f121ee3ea1d70ccdbd2d1c04
| 346
|
py
|
Python
|
Exercise_10_4.py
|
kushrami/Python-Crash-Course-book-Excersice
|
7093181940a90d9f4bab5775ef56f57963450393
|
[
"Apache-2.0"
] | null | null | null |
Exercise_10_4.py
|
kushrami/Python-Crash-Course-book-Excersice
|
7093181940a90d9f4bab5775ef56f57963450393
|
[
"Apache-2.0"
] | null | null | null |
Exercise_10_4.py
|
kushrami/Python-Crash-Course-book-Excersice
|
7093181940a90d9f4bab5775ef56f57963450393
|
[
"Apache-2.0"
] | null | null | null |
#Guest Book:
while True:
User_name = input("Please enter your name or enter 'q' to exit:")
if User_name == 'q':
break
else:
with open('guest_book.txt','a') as file_object:
file_object.write(User_name)
file_object.write('\n')
print("You can check your name in file guest_book.txt")
| 28.833333
| 69
| 0.595376
|
while True:
User_name = input("Please enter your name or enter 'q' to exit:")
if User_name == 'q':
break
else:
with open('guest_book.txt','a') as file_object:
file_object.write(User_name)
file_object.write('\n')
print("You can check your name in file guest_book.txt")
| true
| true
|
1c43af6b0a0ef55b8648c8fefafcc6067e451792
| 379
|
py
|
Python
|
app/v1/__init__.py
|
FormatMemory/django_api_backend
|
690439ad612598c86c22a837bc0f2e5bea74f2d2
|
[
"MIT"
] | null | null | null |
app/v1/__init__.py
|
FormatMemory/django_api_backend
|
690439ad612598c86c22a837bc0f2e5bea74f2d2
|
[
"MIT"
] | 8
|
2021-03-18T23:26:33.000Z
|
2022-03-11T23:44:22.000Z
|
app/v1/__init__.py
|
FormatMemory/django_api_backend
|
690439ad612598c86c22a837bc0f2e5bea74f2d2
|
[
"MIT"
] | null | null | null |
import os
ENVIRONMENT = os.environ.get('ENVIRONMENT', 'local')
if ENVIRONMENT == 'production':
import pymysql
pymysql.install_as_MySQLdb()
# else:
# import subprocess
# os.chdir("../")
# bashCommand = "bash scripts/load_sample_data.sh"
# process = subprocess.Popen(bashCommand.split(), stdout=subprocess.PIPE)
# output, error = process.communicate()
| 29.153846
| 77
| 0.691293
|
import os
ENVIRONMENT = os.environ.get('ENVIRONMENT', 'local')
if ENVIRONMENT == 'production':
import pymysql
pymysql.install_as_MySQLdb()
| true
| true
|
1c43b2b5f81f62dcbcaa6b27b888a15112bf38b1
| 1,587
|
py
|
Python
|
tessled/effects/animations/exploringbox.py
|
hodgestar/tesseract-control-software
|
41f47a4b901a0069f1745c90abe28f0778704b0e
|
[
"MIT"
] | 2
|
2019-07-13T14:15:30.000Z
|
2020-01-04T10:44:47.000Z
|
tessled/effects/animations/exploringbox.py
|
hodgestar/tesseract-control-software
|
41f47a4b901a0069f1745c90abe28f0778704b0e
|
[
"MIT"
] | null | null | null |
tessled/effects/animations/exploringbox.py
|
hodgestar/tesseract-control-software
|
41f47a4b901a0069f1745c90abe28f0778704b0e
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
""" Exploring Box animation.
Inquisitive pair of cubes of LEDs.
"""
import random
from ..engine import Animation
from ..sprites import Cube
class SpiralPath:
def __init__(self, margin, offset=0.0):
steps_x = 8 - margin[0]
steps_y = 8 - margin[1]
self._max_z = 8 - margin[2]
self._xy = []
self._xy += zip([0] * steps_y, range(0, steps_y))
self._xy += zip(range(0, steps_x), [steps_y] * steps_x)
self._xy += zip([steps_x] * steps_y, range(steps_y - 1, -1, -1))
self._xy += zip(range(steps_x - 1, -1, -1), [0] * steps_x)
self._t = int(len(self._xy) * offset)
def next(self):
once_around = len(self._xy)
pz = self._t // once_around
r = self._t % once_around
px, py = self._xy[r]
self._t += 1
self._t %= (self._max_z + 1) * once_around
return (px, py, pz)
class ExploringBox(Animation):
ANIMATION = __name__
ARGS = {
}
def post_init(self):
size1 = random.choice([2, 3, 4, 5])
self._cube1 = Cube(size=size1)
self._spiral_path1 = SpiralPath(
margin=(size1, size1, size1))
size2 = random.choice([2, 3, 4, 5])
self._cube2 = Cube(size=size2)
self._spiral_path2 = SpiralPath(
margin=(size2, size2, size2),
offset=0.5)
def render(self, frame):
self._cube1.pos = self._spiral_path1.next()
self._cube1.render(frame)
self._cube2.pos = self._spiral_path2.next()
self._cube2.render(frame)
| 26.016393
| 72
| 0.562067
|
import random
from ..engine import Animation
from ..sprites import Cube
class SpiralPath:
def __init__(self, margin, offset=0.0):
steps_x = 8 - margin[0]
steps_y = 8 - margin[1]
self._max_z = 8 - margin[2]
self._xy = []
self._xy += zip([0] * steps_y, range(0, steps_y))
self._xy += zip(range(0, steps_x), [steps_y] * steps_x)
self._xy += zip([steps_x] * steps_y, range(steps_y - 1, -1, -1))
self._xy += zip(range(steps_x - 1, -1, -1), [0] * steps_x)
self._t = int(len(self._xy) * offset)
def next(self):
once_around = len(self._xy)
pz = self._t // once_around
r = self._t % once_around
px, py = self._xy[r]
self._t += 1
self._t %= (self._max_z + 1) * once_around
return (px, py, pz)
class ExploringBox(Animation):
ANIMATION = __name__
ARGS = {
}
def post_init(self):
size1 = random.choice([2, 3, 4, 5])
self._cube1 = Cube(size=size1)
self._spiral_path1 = SpiralPath(
margin=(size1, size1, size1))
size2 = random.choice([2, 3, 4, 5])
self._cube2 = Cube(size=size2)
self._spiral_path2 = SpiralPath(
margin=(size2, size2, size2),
offset=0.5)
def render(self, frame):
self._cube1.pos = self._spiral_path1.next()
self._cube1.render(frame)
self._cube2.pos = self._spiral_path2.next()
self._cube2.render(frame)
| true
| true
|
1c43b420d215966a9d094b82adc894d33e04ba72
| 8,997
|
py
|
Python
|
sdk/appservice/azure-mgmt-web/azure/mgmt/web/v2020_09_01/aio/_web_site_management_client.py
|
rsdoherty/azure-sdk-for-python
|
6bba5326677468e6660845a703686327178bb7b1
|
[
"MIT"
] | 2,728
|
2015-01-09T10:19:32.000Z
|
2022-03-31T14:50:33.000Z
|
sdk/appservice/azure-mgmt-web/azure/mgmt/web/v2020_09_01/aio/_web_site_management_client.py
|
rsdoherty/azure-sdk-for-python
|
6bba5326677468e6660845a703686327178bb7b1
|
[
"MIT"
] | 17,773
|
2015-01-05T15:57:17.000Z
|
2022-03-31T23:50:25.000Z
|
sdk/appservice/azure-mgmt-web/azure/mgmt/web/v2020_09_01/aio/_web_site_management_client.py
|
rsdoherty/azure-sdk-for-python
|
6bba5326677468e6660845a703686327178bb7b1
|
[
"MIT"
] | 1,916
|
2015-01-19T05:05:41.000Z
|
2022-03-31T19:36:44.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Optional, TYPE_CHECKING
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core import AsyncARMPipelineClient
from msrest import Deserializer, Serializer
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials_async import AsyncTokenCredential
from ._configuration import WebSiteManagementClientConfiguration
from .operations import AppServiceCertificateOrdersOperations
from .operations import CertificateRegistrationProviderOperations
from .operations import DomainsOperations
from .operations import TopLevelDomainsOperations
from .operations import DomainRegistrationProviderOperations
from .operations import CertificatesOperations
from .operations import DeletedWebAppsOperations
from .operations import DiagnosticsOperations
from .operations import ProviderOperations
from .operations import RecommendationsOperations
from .operations import WebSiteManagementClientOperationsMixin
from .operations import WebAppsOperations
from .operations import StaticSitesOperations
from .operations import AppServiceEnvironmentsOperations
from .operations import AppServicePlansOperations
from .operations import ResourceHealthMetadataOperations
from .. import models
class WebSiteManagementClient(WebSiteManagementClientOperationsMixin):
"""WebSite Management Client.
:ivar app_service_certificate_orders: AppServiceCertificateOrdersOperations operations
:vartype app_service_certificate_orders: azure.mgmt.web.v2020_09_01.aio.operations.AppServiceCertificateOrdersOperations
:ivar certificate_registration_provider: CertificateRegistrationProviderOperations operations
:vartype certificate_registration_provider: azure.mgmt.web.v2020_09_01.aio.operations.CertificateRegistrationProviderOperations
:ivar domains: DomainsOperations operations
:vartype domains: azure.mgmt.web.v2020_09_01.aio.operations.DomainsOperations
:ivar top_level_domains: TopLevelDomainsOperations operations
:vartype top_level_domains: azure.mgmt.web.v2020_09_01.aio.operations.TopLevelDomainsOperations
:ivar domain_registration_provider: DomainRegistrationProviderOperations operations
:vartype domain_registration_provider: azure.mgmt.web.v2020_09_01.aio.operations.DomainRegistrationProviderOperations
:ivar certificates: CertificatesOperations operations
:vartype certificates: azure.mgmt.web.v2020_09_01.aio.operations.CertificatesOperations
:ivar deleted_web_apps: DeletedWebAppsOperations operations
:vartype deleted_web_apps: azure.mgmt.web.v2020_09_01.aio.operations.DeletedWebAppsOperations
:ivar diagnostics: DiagnosticsOperations operations
:vartype diagnostics: azure.mgmt.web.v2020_09_01.aio.operations.DiagnosticsOperations
:ivar provider: ProviderOperations operations
:vartype provider: azure.mgmt.web.v2020_09_01.aio.operations.ProviderOperations
:ivar recommendations: RecommendationsOperations operations
:vartype recommendations: azure.mgmt.web.v2020_09_01.aio.operations.RecommendationsOperations
:ivar web_apps: WebAppsOperations operations
:vartype web_apps: azure.mgmt.web.v2020_09_01.aio.operations.WebAppsOperations
:ivar static_sites: StaticSitesOperations operations
:vartype static_sites: azure.mgmt.web.v2020_09_01.aio.operations.StaticSitesOperations
:ivar app_service_environments: AppServiceEnvironmentsOperations operations
:vartype app_service_environments: azure.mgmt.web.v2020_09_01.aio.operations.AppServiceEnvironmentsOperations
:ivar app_service_plans: AppServicePlansOperations operations
:vartype app_service_plans: azure.mgmt.web.v2020_09_01.aio.operations.AppServicePlansOperations
:ivar resource_health_metadata: ResourceHealthMetadataOperations operations
:vartype resource_health_metadata: azure.mgmt.web.v2020_09_01.aio.operations.ResourceHealthMetadataOperations
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials_async.AsyncTokenCredential
:param subscription_id: Your Azure subscription ID. This is a GUID-formatted string (e.g. 00000000-0000-0000-0000-000000000000).
:type subscription_id: str
:param str base_url: Service URL
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
"""
def __init__(
self,
credential: "AsyncTokenCredential",
subscription_id: str,
base_url: Optional[str] = None,
**kwargs: Any
) -> None:
if not base_url:
base_url = 'https://management.azure.com'
self._config = WebSiteManagementClientConfiguration(credential, subscription_id, **kwargs)
self._client = AsyncARMPipelineClient(base_url=base_url, config=self._config, **kwargs)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._serialize.client_side_validation = False
self._deserialize = Deserializer(client_models)
self.app_service_certificate_orders = AppServiceCertificateOrdersOperations(
self._client, self._config, self._serialize, self._deserialize)
self.certificate_registration_provider = CertificateRegistrationProviderOperations(
self._client, self._config, self._serialize, self._deserialize)
self.domains = DomainsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.top_level_domains = TopLevelDomainsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.domain_registration_provider = DomainRegistrationProviderOperations(
self._client, self._config, self._serialize, self._deserialize)
self.certificates = CertificatesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.deleted_web_apps = DeletedWebAppsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.diagnostics = DiagnosticsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.provider = ProviderOperations(
self._client, self._config, self._serialize, self._deserialize)
self.recommendations = RecommendationsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.web_apps = WebAppsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.static_sites = StaticSitesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.app_service_environments = AppServiceEnvironmentsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.app_service_plans = AppServicePlansOperations(
self._client, self._config, self._serialize, self._deserialize)
self.resource_health_metadata = ResourceHealthMetadataOperations(
self._client, self._config, self._serialize, self._deserialize)
async def _send_request(self, http_request: HttpRequest, **kwargs: Any) -> AsyncHttpResponse:
"""Runs the network request through the client's chained policies.
:param http_request: The network request you want to make. Required.
:type http_request: ~azure.core.pipeline.transport.HttpRequest
:keyword bool stream: Whether the response payload will be streamed. Defaults to True.
:return: The response of your network call. Does not do error handling on your response.
:rtype: ~azure.core.pipeline.transport.AsyncHttpResponse
"""
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
http_request.url = self._client.format_url(http_request.url, **path_format_arguments)
stream = kwargs.pop("stream", True)
pipeline_response = await self._client._pipeline.run(http_request, stream=stream, **kwargs)
return pipeline_response.http_response
async def close(self) -> None:
await self._client.close()
async def __aenter__(self) -> "WebSiteManagementClient":
await self._client.__aenter__()
return self
async def __aexit__(self, *exc_details) -> None:
await self._client.__aexit__(*exc_details)
| 58.422078
| 132
| 0.7667
|
from typing import Any, Optional, TYPE_CHECKING
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core import AsyncARMPipelineClient
from msrest import Deserializer, Serializer
if TYPE_CHECKING:
from azure.core.credentials_async import AsyncTokenCredential
from ._configuration import WebSiteManagementClientConfiguration
from .operations import AppServiceCertificateOrdersOperations
from .operations import CertificateRegistrationProviderOperations
from .operations import DomainsOperations
from .operations import TopLevelDomainsOperations
from .operations import DomainRegistrationProviderOperations
from .operations import CertificatesOperations
from .operations import DeletedWebAppsOperations
from .operations import DiagnosticsOperations
from .operations import ProviderOperations
from .operations import RecommendationsOperations
from .operations import WebSiteManagementClientOperationsMixin
from .operations import WebAppsOperations
from .operations import StaticSitesOperations
from .operations import AppServiceEnvironmentsOperations
from .operations import AppServicePlansOperations
from .operations import ResourceHealthMetadataOperations
from .. import models
class WebSiteManagementClient(WebSiteManagementClientOperationsMixin):
def __init__(
self,
credential: "AsyncTokenCredential",
subscription_id: str,
base_url: Optional[str] = None,
**kwargs: Any
) -> None:
if not base_url:
base_url = 'https://management.azure.com'
self._config = WebSiteManagementClientConfiguration(credential, subscription_id, **kwargs)
self._client = AsyncARMPipelineClient(base_url=base_url, config=self._config, **kwargs)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._serialize.client_side_validation = False
self._deserialize = Deserializer(client_models)
self.app_service_certificate_orders = AppServiceCertificateOrdersOperations(
self._client, self._config, self._serialize, self._deserialize)
self.certificate_registration_provider = CertificateRegistrationProviderOperations(
self._client, self._config, self._serialize, self._deserialize)
self.domains = DomainsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.top_level_domains = TopLevelDomainsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.domain_registration_provider = DomainRegistrationProviderOperations(
self._client, self._config, self._serialize, self._deserialize)
self.certificates = CertificatesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.deleted_web_apps = DeletedWebAppsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.diagnostics = DiagnosticsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.provider = ProviderOperations(
self._client, self._config, self._serialize, self._deserialize)
self.recommendations = RecommendationsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.web_apps = WebAppsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.static_sites = StaticSitesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.app_service_environments = AppServiceEnvironmentsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.app_service_plans = AppServicePlansOperations(
self._client, self._config, self._serialize, self._deserialize)
self.resource_health_metadata = ResourceHealthMetadataOperations(
self._client, self._config, self._serialize, self._deserialize)
async def _send_request(self, http_request: HttpRequest, **kwargs: Any) -> AsyncHttpResponse:
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
http_request.url = self._client.format_url(http_request.url, **path_format_arguments)
stream = kwargs.pop("stream", True)
pipeline_response = await self._client._pipeline.run(http_request, stream=stream, **kwargs)
return pipeline_response.http_response
async def close(self) -> None:
await self._client.close()
async def __aenter__(self) -> "WebSiteManagementClient":
await self._client.__aenter__()
return self
async def __aexit__(self, *exc_details) -> None:
await self._client.__aexit__(*exc_details)
| true
| true
|
1c43b4def4cac1a4ef50874622f19a0f859adacf
| 2,826
|
py
|
Python
|
q2_qemistree/tests/test_process_fingerprint.py
|
fedarko/q2-qemistree
|
611ab6d38fed8f59dda35984e49fa73d7431bc50
|
[
"BSD-2-Clause"
] | 1
|
2020-01-22T02:09:11.000Z
|
2020-01-22T02:09:11.000Z
|
q2_qemistree/tests/test_process_fingerprint.py
|
fedarko/q2-qemistree
|
611ab6d38fed8f59dda35984e49fa73d7431bc50
|
[
"BSD-2-Clause"
] | null | null | null |
q2_qemistree/tests/test_process_fingerprint.py
|
fedarko/q2-qemistree
|
611ab6d38fed8f59dda35984e49fa73d7431bc50
|
[
"BSD-2-Clause"
] | 1
|
2021-11-02T02:53:25.000Z
|
2021-11-02T02:53:25.000Z
|
# ----------------------------------------------------------------------------
# Copyright (c) 2016-2018, QIIME 2 development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
from unittest import TestCase, main
from biom import load_table
import pandas as pd
import os
import pkg_resources
import qiime2
from q2_qemistree import CSIDirFmt
from q2_qemistree._process_fingerprint import (collate_fingerprint,
get_feature_smiles)
data = pkg_resources.resource_filename('q2_qemistree', 'data')
class FingerprintTests(TestCase):
def setUp(self):
THIS_DIR = os.path.dirname(os.path.abspath(__file__))
self.featureTable = os.path.join(THIS_DIR,
'data/features_formated.biom')
self.emptycsi = os.path.join(os.path.join(THIS_DIR,
'data/emptycsi'))
self.goodcsi = qiime2.Artifact.load(os.path.join(THIS_DIR,
'data/csiFolder.qza'))
properties_path = os.path.join(data, 'molecular_properties.csv')
self.properties = pd.read_csv(properties_path, dtype=str, sep='\t')
self.properties.set_index('absoluteIndex', inplace=True)
def test_fingerprintOut(self):
msg = "Fingerprint file is empty!"
with self.assertRaisesRegex(ValueError, msg):
collate_fingerprint(self.emptycsi)
def test_featureMatch1(self):
goodcsi = self.goodcsi.view(CSIDirFmt)
tablefp = collate_fingerprint(goodcsi)
features = load_table(self.featureTable)
allfeatrs = set(features.ids(axis='observation'))
fpfeatrs = set(tablefp.index)
self.assertEqual(fpfeatrs <= allfeatrs, True)
def test_featureMatch2(self):
goodcsi = self.goodcsi.view(CSIDirFmt)
tablefp = collate_fingerprint(goodcsi)
smiles = get_feature_smiles(goodcsi, tablefp)
fpfeatrs = set(tablefp.index)
smlfeatrs = set(smiles.index)
self.assertEqual(fpfeatrs == smlfeatrs, True)
def test_pubchemTrue(self):
goodcsi = self.goodcsi.view(CSIDirFmt)
tablefp = collate_fingerprint(goodcsi, qc_properties=True)
indx = self.properties.loc[self.properties.type == 'PUBCHEM'].index
self.assertEqual(set(tablefp.columns) == set(indx), True)
def test_pubchemFalse(self):
goodcsi = self.goodcsi.view(CSIDirFmt)
tablefp = collate_fingerprint(goodcsi)
indx = self.properties.index
self.assertEqual(set(tablefp.columns) == set(indx), True)
if __name__ == '__main__':
main()
| 39.25
| 79
| 0.617127
|
from unittest import TestCase, main
from biom import load_table
import pandas as pd
import os
import pkg_resources
import qiime2
from q2_qemistree import CSIDirFmt
from q2_qemistree._process_fingerprint import (collate_fingerprint,
get_feature_smiles)
data = pkg_resources.resource_filename('q2_qemistree', 'data')
class FingerprintTests(TestCase):
def setUp(self):
THIS_DIR = os.path.dirname(os.path.abspath(__file__))
self.featureTable = os.path.join(THIS_DIR,
'data/features_formated.biom')
self.emptycsi = os.path.join(os.path.join(THIS_DIR,
'data/emptycsi'))
self.goodcsi = qiime2.Artifact.load(os.path.join(THIS_DIR,
'data/csiFolder.qza'))
properties_path = os.path.join(data, 'molecular_properties.csv')
self.properties = pd.read_csv(properties_path, dtype=str, sep='\t')
self.properties.set_index('absoluteIndex', inplace=True)
def test_fingerprintOut(self):
msg = "Fingerprint file is empty!"
with self.assertRaisesRegex(ValueError, msg):
collate_fingerprint(self.emptycsi)
def test_featureMatch1(self):
goodcsi = self.goodcsi.view(CSIDirFmt)
tablefp = collate_fingerprint(goodcsi)
features = load_table(self.featureTable)
allfeatrs = set(features.ids(axis='observation'))
fpfeatrs = set(tablefp.index)
self.assertEqual(fpfeatrs <= allfeatrs, True)
def test_featureMatch2(self):
goodcsi = self.goodcsi.view(CSIDirFmt)
tablefp = collate_fingerprint(goodcsi)
smiles = get_feature_smiles(goodcsi, tablefp)
fpfeatrs = set(tablefp.index)
smlfeatrs = set(smiles.index)
self.assertEqual(fpfeatrs == smlfeatrs, True)
def test_pubchemTrue(self):
goodcsi = self.goodcsi.view(CSIDirFmt)
tablefp = collate_fingerprint(goodcsi, qc_properties=True)
indx = self.properties.loc[self.properties.type == 'PUBCHEM'].index
self.assertEqual(set(tablefp.columns) == set(indx), True)
def test_pubchemFalse(self):
goodcsi = self.goodcsi.view(CSIDirFmt)
tablefp = collate_fingerprint(goodcsi)
indx = self.properties.index
self.assertEqual(set(tablefp.columns) == set(indx), True)
if __name__ == '__main__':
main()
| true
| true
|
1c43b5518f82afd11244e10c06cfe4ffc4dec58b
| 1,730
|
py
|
Python
|
web_server/users/views.py
|
ColeBoytinck/cmput404-group-project
|
133e118fe8453b13f0d5afdf7b8d625eba9e4086
|
[
"MIT"
] | null | null | null |
web_server/users/views.py
|
ColeBoytinck/cmput404-group-project
|
133e118fe8453b13f0d5afdf7b8d625eba9e4086
|
[
"MIT"
] | null | null | null |
web_server/users/views.py
|
ColeBoytinck/cmput404-group-project
|
133e118fe8453b13f0d5afdf7b8d625eba9e4086
|
[
"MIT"
] | null | null | null |
from django.shortcuts import render, redirect
from django.http import HttpResponse, HttpResponseNotAllowed, HttpResponseRedirect
from django.contrib.auth import login
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from .forms import UserRegisterForm
from django.contrib.auth import views as auth_views
from django.urls import reverse
class CustomLogin(auth_views.LoginView):
def form_valid(self, form):
login(self.request, form.get_user())
# set expiration of the current login session
# a single login is alive for 10hrs
self.request.session.set_expiry(36000)
return HttpResponseRedirect(self.get_success_url())
@login_required
def profile(request,user_id):
return render(request, 'users/profile.html', {'user_id':user_id})
def register(request):
if request.method == 'POST':
form = UserRegisterForm(request.POST)
if form.is_valid():
user = form.save(commit=False)
# wait for admin permission to activate account
user.is_active = False
host = request.get_host()
url = host + "/author/" + str(user.id.hex)
# set user url
user.url = url
# set user id
# format: 127.0.0.1:5454/author/de305d54-75b4-431b-adb2-eb6b9e546013
user.uid = url
# set user host
user.host = host
# update database entry of current user
user.save()
return redirect('login')
else:
form = UserRegisterForm()
return render(request, 'users/register.html', {'form': form})
def mandala(request):
return render(request, 'users/mandala.html')
| 32.641509
| 82
| 0.660694
|
from django.shortcuts import render, redirect
from django.http import HttpResponse, HttpResponseNotAllowed, HttpResponseRedirect
from django.contrib.auth import login
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from .forms import UserRegisterForm
from django.contrib.auth import views as auth_views
from django.urls import reverse
class CustomLogin(auth_views.LoginView):
def form_valid(self, form):
login(self.request, form.get_user())
self.request.session.set_expiry(36000)
return HttpResponseRedirect(self.get_success_url())
@login_required
def profile(request,user_id):
return render(request, 'users/profile.html', {'user_id':user_id})
def register(request):
if request.method == 'POST':
form = UserRegisterForm(request.POST)
if form.is_valid():
user = form.save(commit=False)
user.is_active = False
host = request.get_host()
url = host + "/author/" + str(user.id.hex)
user.url = url
user.uid = url
user.host = host
user.save()
return redirect('login')
else:
form = UserRegisterForm()
return render(request, 'users/register.html', {'form': form})
def mandala(request):
return render(request, 'users/mandala.html')
| true
| true
|
1c43b5a1abad2c7406e4069e04015d92cba013a2
| 1,523
|
py
|
Python
|
flax/linen/__init__.py
|
psc-g/flax
|
3ebc3e9751c3d28a78e715ba8a738e56ec1298fb
|
[
"Apache-2.0"
] | null | null | null |
flax/linen/__init__.py
|
psc-g/flax
|
3ebc3e9751c3d28a78e715ba8a738e56ec1298fb
|
[
"Apache-2.0"
] | null | null | null |
flax/linen/__init__.py
|
psc-g/flax
|
3ebc3e9751c3d28a78e715ba8a738e56ec1298fb
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020 The Flax Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Linen Neural Network api."""
# pylint: disable=g-multiple-import
# re-export commonly used modules and functions
from .activation import (celu, elu, gelu, glu, leaky_relu, log_sigmoid,
log_softmax, relu, sigmoid, soft_sign, softmax,
softplus, swish, silu, tanh)
from .attention import (MultiHeadDotProductAttention, SelfAttention,
dot_product_attention, make_attention_mask,
make_causal_mask, combine_masks)
from .linear import Conv, ConvTranspose, Dense, DenseGeneral, Embed
from .module import Module, compact, enable_named_call, disable_named_call, Variable
from .normalization import BatchNorm, GroupNorm, LayerNorm
from .pooling import avg_pool, max_pool
from .recurrent import GRUCell, LSTMCell, ConvLSTM
from .stochastic import Dropout
from .transforms import jit, named_call, remat, scan, vmap
# pylint: enable=g-multiple-import
| 43.514286
| 84
| 0.73933
|
from .activation import (celu, elu, gelu, glu, leaky_relu, log_sigmoid,
log_softmax, relu, sigmoid, soft_sign, softmax,
softplus, swish, silu, tanh)
from .attention import (MultiHeadDotProductAttention, SelfAttention,
dot_product_attention, make_attention_mask,
make_causal_mask, combine_masks)
from .linear import Conv, ConvTranspose, Dense, DenseGeneral, Embed
from .module import Module, compact, enable_named_call, disable_named_call, Variable
from .normalization import BatchNorm, GroupNorm, LayerNorm
from .pooling import avg_pool, max_pool
from .recurrent import GRUCell, LSTMCell, ConvLSTM
from .stochastic import Dropout
from .transforms import jit, named_call, remat, scan, vmap
| true
| true
|
1c43b639de52cca5596c31b8a080b350134776da
| 1,265
|
py
|
Python
|
pyxt/cmd/tests/test_python.py
|
millerdev/pyxt
|
64034dd17b47a81ebfd0160102919e1f84299a6d
|
[
"MIT"
] | 3
|
2021-01-10T18:32:15.000Z
|
2021-01-12T08:13:29.000Z
|
pyxt/cmd/tests/test_python.py
|
millerdev/pyxt
|
64034dd17b47a81ebfd0160102919e1f84299a6d
|
[
"MIT"
] | null | null | null |
pyxt/cmd/tests/test_python.py
|
millerdev/pyxt
|
64034dd17b47a81ebfd0160102919e1f84299a6d
|
[
"MIT"
] | null | null | null |
from testil import eq, Regex
from ...process import ProcessError
from ...tests.util import async_test, do_command, FakeEditor, gentest
def test_doc():
@gentest
@async_test
async def test(code, output, command="python"):
editor = FakeEditor(text=code)
result = await do_command(command, editor)
eq(result["items"], [{"label": output, "copy": True}])
assert result.get("filter_results"), result
yield test("1 + 1", "2")
yield test("print(1 + 1)", "2")
yield test(" 2 + 2", "4")
yield test(" print('hi')\n 2 + 2\n", "hi\n4")
yield test(
"""
def f(x):
return x
""",
"no output"
)
yield test(
"""
(1
+ 2)
""",
"3"
)
yield test(
"""
(1
+ 2)
# comment
""",
"3"
)
yield test(
"""
x = 4
y = 1;x + y
""",
"5"
)
yield test("1 + 1", "4", "python -c 'print(4)'")
@async_test
async def test_syntax_error():
editor = FakeEditor(text='print "not with python 3"')
try:
await do_command("python", editor)
except ProcessError as err:
eq(str(err), Regex("SyntaxError"))
| 21.440678
| 69
| 0.484585
|
from testil import eq, Regex
from ...process import ProcessError
from ...tests.util import async_test, do_command, FakeEditor, gentest
def test_doc():
@gentest
@async_test
async def test(code, output, command="python"):
editor = FakeEditor(text=code)
result = await do_command(command, editor)
eq(result["items"], [{"label": output, "copy": True}])
assert result.get("filter_results"), result
yield test("1 + 1", "2")
yield test("print(1 + 1)", "2")
yield test(" 2 + 2", "4")
yield test(" print('hi')\n 2 + 2\n", "hi\n4")
yield test(
"""
def f(x):
return x
""",
"no output"
)
yield test(
"""
(1
+ 2)
""",
"3"
)
yield test(
"""
(1
+ 2)
# comment
""",
"3"
)
yield test(
"""
x = 4
y = 1;x + y
""",
"5"
)
yield test("1 + 1", "4", "python -c 'print(4)'")
@async_test
async def test_syntax_error():
editor = FakeEditor(text='print "not with python 3"')
try:
await do_command("python", editor)
except ProcessError as err:
eq(str(err), Regex("SyntaxError"))
| true
| true
|
1c43b66e895ab1fc296ada51275d11e5502c75e8
| 2,859
|
py
|
Python
|
artemis/neural_models/text_emotional_clf.py
|
StanfordGeometryLab/artemis
|
2f540173bfd2da3a505715a260834533c3097ac7
|
[
"MIT"
] | 254
|
2021-01-20T08:24:53.000Z
|
2022-03-30T11:57:23.000Z
|
artemis/neural_models/text_emotional_clf.py
|
StanfordGeometryLab/artemis
|
2f540173bfd2da3a505715a260834533c3097ac7
|
[
"MIT"
] | 11
|
2021-03-26T21:49:22.000Z
|
2022-02-27T10:53:52.000Z
|
artemis/neural_models/text_emotional_clf.py
|
lyrs/artemis
|
e4938ef79b504e810e4d3634eab23781b06a926e
|
[
"MIT"
] | 21
|
2021-01-21T10:31:44.000Z
|
2022-03-24T15:54:05.000Z
|
"""
Given an utterance (an optionally an image) guess a distribution over the emotion labels.
The MIT License (MIT)
Originally created in 2020, for Python 3.x
Copyright (c) 2021 Panos Achlioptas (ai.stanford.edu/~optas) & Stanford Geometric Computing Lab
"""
import torch
import torch.nn.functional as F
from torch import nn
from tqdm.notebook import tqdm as tqdm_notebook
from ..utils.stats import AverageMeter
class TextEmotionClassifier(nn.Module):
def __init__(self, text_encoder, clf_head, img_encoder=None):
super(TextEmotionClassifier, self).__init__()
self.text_encoder = text_encoder
self.clf_head = clf_head
self.img_encoder = img_encoder
def __call__(self, text, img=None):
if img is not None:
img_feat = self.img_encoder(img)
feat = self.text_encoder(text, img_feat)
else:
feat = self.text_encoder(text)
logits = self.clf_head(feat)
return logits
def single_epoch_train(model, data_loader, use_vision, criterion, optimizer, device):
epoch_loss = AverageMeter()
epoch_acc = AverageMeter()
model.train()
for batch in tqdm_notebook(data_loader):
labels = batch['emotion'].to(device)
tokens = batch['tokens'].to(device)
if use_vision:
img = batch['image'].to(device)
logits = model(tokens, img)
else:
logits = model(tokens)
# Calculate loss
loss = criterion(logits, labels)
acc = torch.mean((logits.argmax(1) == labels).double())
# Back prop.
optimizer.zero_grad()
loss.backward()
optimizer.step()
b_size = len(labels)
epoch_loss.update(loss.item(), b_size)
epoch_acc.update(acc.item(), b_size)
return epoch_loss.avg, epoch_acc.avg
@torch.no_grad()
def evaluate_on_dataset(model, data_loader, use_vision, criterion, device, detailed=True):
epoch_loss = AverageMeter()
epoch_acc = AverageMeter()
model.eval()
epoch_confidence = []
for batch in tqdm_notebook(data_loader):
labels = batch['emotion'].to(device)
tokens = batch['tokens'].to(device)
if use_vision:
img = batch['image'].to(device)
logits = model(tokens, img)
else:
logits = model(tokens)
# Calculate loss
loss = criterion(logits, labels)
guessed_correct = logits.argmax(1) == labels
acc = torch.mean(guessed_correct.double())
if detailed:
epoch_confidence.append(F.softmax(logits, dim=-1).cpu())
b_size = len(labels)
epoch_loss.update(loss.item(), b_size)
epoch_acc.update(acc.item(), b_size)
if detailed:
epoch_confidence = torch.cat(epoch_confidence).numpy()
return epoch_loss.avg, epoch_acc.avg, epoch_confidence
| 30.414894
| 95
| 0.644281
|
import torch
import torch.nn.functional as F
from torch import nn
from tqdm.notebook import tqdm as tqdm_notebook
from ..utils.stats import AverageMeter
class TextEmotionClassifier(nn.Module):
def __init__(self, text_encoder, clf_head, img_encoder=None):
super(TextEmotionClassifier, self).__init__()
self.text_encoder = text_encoder
self.clf_head = clf_head
self.img_encoder = img_encoder
def __call__(self, text, img=None):
if img is not None:
img_feat = self.img_encoder(img)
feat = self.text_encoder(text, img_feat)
else:
feat = self.text_encoder(text)
logits = self.clf_head(feat)
return logits
def single_epoch_train(model, data_loader, use_vision, criterion, optimizer, device):
epoch_loss = AverageMeter()
epoch_acc = AverageMeter()
model.train()
for batch in tqdm_notebook(data_loader):
labels = batch['emotion'].to(device)
tokens = batch['tokens'].to(device)
if use_vision:
img = batch['image'].to(device)
logits = model(tokens, img)
else:
logits = model(tokens)
loss = criterion(logits, labels)
acc = torch.mean((logits.argmax(1) == labels).double())
optimizer.zero_grad()
loss.backward()
optimizer.step()
b_size = len(labels)
epoch_loss.update(loss.item(), b_size)
epoch_acc.update(acc.item(), b_size)
return epoch_loss.avg, epoch_acc.avg
@torch.no_grad()
def evaluate_on_dataset(model, data_loader, use_vision, criterion, device, detailed=True):
epoch_loss = AverageMeter()
epoch_acc = AverageMeter()
model.eval()
epoch_confidence = []
for batch in tqdm_notebook(data_loader):
labels = batch['emotion'].to(device)
tokens = batch['tokens'].to(device)
if use_vision:
img = batch['image'].to(device)
logits = model(tokens, img)
else:
logits = model(tokens)
loss = criterion(logits, labels)
guessed_correct = logits.argmax(1) == labels
acc = torch.mean(guessed_correct.double())
if detailed:
epoch_confidence.append(F.softmax(logits, dim=-1).cpu())
b_size = len(labels)
epoch_loss.update(loss.item(), b_size)
epoch_acc.update(acc.item(), b_size)
if detailed:
epoch_confidence = torch.cat(epoch_confidence).numpy()
return epoch_loss.avg, epoch_acc.avg, epoch_confidence
| true
| true
|
1c43b6cc0328e741416a790b6fc9f09366c36e66
| 6,270
|
py
|
Python
|
s64da_benchmark_toolkit/streams.py
|
a-masterov/s64da-benchmark-toolkit
|
b1af60c6a1c7d4c62ae9fb5ce04e2c7b59002b0b
|
[
"MIT"
] | 15
|
2020-03-27T09:40:12.000Z
|
2022-03-25T19:55:53.000Z
|
s64da_benchmark_toolkit/streams.py
|
a-masterov/s64da-benchmark-toolkit
|
b1af60c6a1c7d4c62ae9fb5ce04e2c7b59002b0b
|
[
"MIT"
] | 43
|
2020-03-25T10:03:11.000Z
|
2021-06-02T01:15:17.000Z
|
s64da_benchmark_toolkit/streams.py
|
a-masterov/s64da-benchmark-toolkit
|
b1af60c6a1c7d4c62ae9fb5ce04e2c7b59002b0b
|
[
"MIT"
] | 6
|
2020-10-08T11:32:40.000Z
|
2021-11-17T21:20:44.000Z
|
# -*- coding: utf-8 -*-
import logging
import os
import csv
import re
import time
from collections import namedtuple
from datetime import datetime
from multiprocessing import Manager, Pool
from natsort import natsorted
from pandas.io.formats.style import Styler
import pandas
import yaml
from .db import DB
from .reporting import Reporting, QueryMetric
Benchmark = namedtuple('Benchmark', ['name', 'base_dir'])
LOG = logging.getLogger()
class Streams:
def __init__(self, args, benchmark):
# The Output structure:
#
# s64da-benchmark-toolkit/
# ├── results/
# │ ├── results.csv
# │ ├── report.html
# │ ├── query_results/
# │ │ ├── 0_1.csv
# │ │ ├── 0_2.csv
# │ ├── query_plans/
# │ │ ├── 0_1.txt
# │ │ ├── 0_2.txt
self.config = Streams._make_config(args, benchmark)
self.db = DB(args.dsn)
self.num_streams = args.streams
self.benchmark = benchmark
self.stream_offset = args.stream_offset
self.scale_factor = args.scale_factor
self.query_dir = self._get_query_dir()
self.explain_analyze = args.explain_analyze
self.use_server_side_cursors = args.use_server_side_cursors
self.reporting = Reporting(benchmark, args, self.config)
@staticmethod
def _make_config(args, benchmark):
config = {}
config_file = args.config or f'benchmarks/{benchmark.name}/configs/default.yaml'
with open(config_file, 'r') as conf_file:
config = yaml.load(conf_file, Loader=yaml.Loader)
if args.timeout:
config['timeout'] = args.timeout
return config
def _get_query_dir(self):
_dir = os.path.join(self.benchmark.base_dir, 'queries')
if os.path.isdir(os.path.join(_dir, f'queries_{self.scale_factor}')):
_dir = os.path.join(_dir, f'queries_{self.scale_factor}')
return _dir
def read_sql_file(self, query_id):
query_path = os.path.join(self.query_dir, f'{query_id}.sql')
with open(query_path, 'r') as query_file:
return query_file.read()
@staticmethod
def apply_sql_modifications(sql, modifications):
for modification in modifications:
sql = sql.replace(modification[0], modification[1])
return sql
def run(self):
dbconfig = self.config.get('dbconfig')
try:
mp_manager = Manager()
reporting_queue = mp_manager.Queue()
if dbconfig:
self.db.reset_config()
self.db.apply_config(dbconfig)
self.run_streams(reporting_queue)
except KeyboardInterrupt:
# Reset all the stuff
pass
finally:
self.reporting.run_report(reporting_queue)
if dbconfig:
self.db.reset_config()
def get_stream_sequence(self, stream_id):
streams_path = os.path.join(self.benchmark.base_dir, 'queries', 'streams.yaml')
with open(streams_path, 'r') as streams_file:
try:
return yaml.load(streams_file, Loader=yaml.Loader)[stream_id]
except KeyError:
raise ValueError(f'Stream file {streams_path} does not contain stream id {stream_id}')
def _make_run_args(self, reporting_queue):
if self.num_streams == 0:
return ((reporting_queue, 0),)
else:
return tuple((reporting_queue, stream) for stream in
range(self.stream_offset, self.num_streams + self.stream_offset))
def run_streams(self, reporting_queue):
with Pool(processes=max(self.num_streams, 1)) as pool:
map_args = self._make_run_args(reporting_queue)
pool.starmap(self._run_stream, map_args)
def _run_query(self, stream_id, query_id):
query_sql = self.read_sql_file(query_id)
query_sql = Streams.apply_sql_modifications(query_sql, (
('revenue0', f'revenue{stream_id}'),))
timeout = self.config.get('timeout', 0)
return self.db.run_query(query_sql, timeout, self.explain_analyze, self.use_server_side_cursors)
def _run_stream(self, reporting_queue, stream_id):
sequence = self.get_stream_sequence(stream_id)
num_queries = len(sequence)
timeout = Streams.parse_timeout(self.config.get('timeout', 0))
for idx, query_id in enumerate(sequence):
num_query = idx + 1
pretext = f'{num_query:2}/{num_queries:2}: query {query_id:2} of stream {stream_id:2}'
if query_id in self.config.get('ignore', []):
LOG.info(f'ignoring {pretext}.')
reporting_queue.put(QueryMetric(
stream_id=stream_id,
query_id=query_id,
timestamp_start=time.time(),
timestamp_stop=time.time() + timeout,
status="IGNORED",
result=None,
plan=None
))
else:
LOG.info(f'running {pretext}.')
timing, query_result, plan = self._run_query(stream_id, query_id)
timestamp_stop = timing.stop if timing.status.name == 'OK' else timing.start + timeout
runtime = timing.stop - timing.start
LOG.info(f'finished {pretext}: {runtime:.2f}s {timing.status.name}')
reporting_queue.put(QueryMetric(
stream_id=stream_id,
query_id=query_id,
timestamp_start=timing.start,
timestamp_stop=timestamp_stop,
status=timing.status.name,
result=query_result,
plan=plan
))
@staticmethod
def parse_timeout(timeout):
valid_units = {'ms': 1, 's': 1000, 'min': 60000, 'h': 3.6e+6, 'd': 8.64e+7}
match = re.match(r"(?P<tm>\d+)\s*(?P<unit>\w*)", timeout)
if match:
tm = match.group('tm')
unit = match.group('unit')
if not unit:
unit = 'ms'
return int(tm) * valid_units.get(unit, 1) // 1000
return None
| 33.891892
| 104
| 0.586762
|
import logging
import os
import csv
import re
import time
from collections import namedtuple
from datetime import datetime
from multiprocessing import Manager, Pool
from natsort import natsorted
from pandas.io.formats.style import Styler
import pandas
import yaml
from .db import DB
from .reporting import Reporting, QueryMetric
Benchmark = namedtuple('Benchmark', ['name', 'base_dir'])
LOG = logging.getLogger()
class Streams:
def __init__(self, args, benchmark):
self.config = Streams._make_config(args, benchmark)
self.db = DB(args.dsn)
self.num_streams = args.streams
self.benchmark = benchmark
self.stream_offset = args.stream_offset
self.scale_factor = args.scale_factor
self.query_dir = self._get_query_dir()
self.explain_analyze = args.explain_analyze
self.use_server_side_cursors = args.use_server_side_cursors
self.reporting = Reporting(benchmark, args, self.config)
@staticmethod
def _make_config(args, benchmark):
config = {}
config_file = args.config or f'benchmarks/{benchmark.name}/configs/default.yaml'
with open(config_file, 'r') as conf_file:
config = yaml.load(conf_file, Loader=yaml.Loader)
if args.timeout:
config['timeout'] = args.timeout
return config
def _get_query_dir(self):
_dir = os.path.join(self.benchmark.base_dir, 'queries')
if os.path.isdir(os.path.join(_dir, f'queries_{self.scale_factor}')):
_dir = os.path.join(_dir, f'queries_{self.scale_factor}')
return _dir
def read_sql_file(self, query_id):
query_path = os.path.join(self.query_dir, f'{query_id}.sql')
with open(query_path, 'r') as query_file:
return query_file.read()
@staticmethod
def apply_sql_modifications(sql, modifications):
for modification in modifications:
sql = sql.replace(modification[0], modification[1])
return sql
def run(self):
dbconfig = self.config.get('dbconfig')
try:
mp_manager = Manager()
reporting_queue = mp_manager.Queue()
if dbconfig:
self.db.reset_config()
self.db.apply_config(dbconfig)
self.run_streams(reporting_queue)
except KeyboardInterrupt:
pass
finally:
self.reporting.run_report(reporting_queue)
if dbconfig:
self.db.reset_config()
def get_stream_sequence(self, stream_id):
streams_path = os.path.join(self.benchmark.base_dir, 'queries', 'streams.yaml')
with open(streams_path, 'r') as streams_file:
try:
return yaml.load(streams_file, Loader=yaml.Loader)[stream_id]
except KeyError:
raise ValueError(f'Stream file {streams_path} does not contain stream id {stream_id}')
def _make_run_args(self, reporting_queue):
if self.num_streams == 0:
return ((reporting_queue, 0),)
else:
return tuple((reporting_queue, stream) for stream in
range(self.stream_offset, self.num_streams + self.stream_offset))
def run_streams(self, reporting_queue):
with Pool(processes=max(self.num_streams, 1)) as pool:
map_args = self._make_run_args(reporting_queue)
pool.starmap(self._run_stream, map_args)
def _run_query(self, stream_id, query_id):
query_sql = self.read_sql_file(query_id)
query_sql = Streams.apply_sql_modifications(query_sql, (
('revenue0', f'revenue{stream_id}'),))
timeout = self.config.get('timeout', 0)
return self.db.run_query(query_sql, timeout, self.explain_analyze, self.use_server_side_cursors)
def _run_stream(self, reporting_queue, stream_id):
sequence = self.get_stream_sequence(stream_id)
num_queries = len(sequence)
timeout = Streams.parse_timeout(self.config.get('timeout', 0))
for idx, query_id in enumerate(sequence):
num_query = idx + 1
pretext = f'{num_query:2}/{num_queries:2}: query {query_id:2} of stream {stream_id:2}'
if query_id in self.config.get('ignore', []):
LOG.info(f'ignoring {pretext}.')
reporting_queue.put(QueryMetric(
stream_id=stream_id,
query_id=query_id,
timestamp_start=time.time(),
timestamp_stop=time.time() + timeout,
status="IGNORED",
result=None,
plan=None
))
else:
LOG.info(f'running {pretext}.')
timing, query_result, plan = self._run_query(stream_id, query_id)
timestamp_stop = timing.stop if timing.status.name == 'OK' else timing.start + timeout
runtime = timing.stop - timing.start
LOG.info(f'finished {pretext}: {runtime:.2f}s {timing.status.name}')
reporting_queue.put(QueryMetric(
stream_id=stream_id,
query_id=query_id,
timestamp_start=timing.start,
timestamp_stop=timestamp_stop,
status=timing.status.name,
result=query_result,
plan=plan
))
@staticmethod
def parse_timeout(timeout):
valid_units = {'ms': 1, 's': 1000, 'min': 60000, 'h': 3.6e+6, 'd': 8.64e+7}
match = re.match(r"(?P<tm>\d+)\s*(?P<unit>\w*)", timeout)
if match:
tm = match.group('tm')
unit = match.group('unit')
if not unit:
unit = 'ms'
return int(tm) * valid_units.get(unit, 1) // 1000
return None
| true
| true
|
1c43b72ee207df26d65dcf522df11744b18aa4c7
| 2,513
|
py
|
Python
|
lib/python/treadmill/rest/api/ipa.py
|
bretttegart/treadmill
|
812109e31c503a6eddaee2d3f2e1faf2833b6aaf
|
[
"Apache-2.0"
] | 2
|
2017-10-31T18:48:20.000Z
|
2018-03-04T20:35:20.000Z
|
lib/python/treadmill/rest/api/ipa.py
|
bretttegart/treadmill
|
812109e31c503a6eddaee2d3f2e1faf2833b6aaf
|
[
"Apache-2.0"
] | null | null | null |
lib/python/treadmill/rest/api/ipa.py
|
bretttegart/treadmill
|
812109e31c503a6eddaee2d3f2e1faf2833b6aaf
|
[
"Apache-2.0"
] | null | null | null |
"""Treadmill IPA REST api.
"""
import flask
import flask_restplus as restplus
from flask_restplus import fields
# Disable E0611: No 'name' in module
from treadmill import webutils # pylint: disable=E0611
def handle_api_error(func):
def wrapper(*args):
try:
return func(*args)
except Exception as e:
return flask.abort(400, {'message': e.message})
return wrapper
# Old style classes, no init method.
#
# pylint: disable=W0232
def init(api, cors, impl):
"""Configures REST handlers for ipa resource."""
namespace = webutils.namespace(
api, __name__, 'IPA REST operations'
)
service_req_model = {
'hostname': fields.String(description='Hostname'),
'domain': fields.String(description='Domain')
}
service_model = api.model(
'service', service_req_model
)
@namespace.route('/host/<hostname>')
@api.doc(params={'hostname': 'Hostname'})
class _Host(restplus.Resource):
"""Treadmill IPA Host"""
@webutils.post_api(
api,
cors,
)
def post(self, hostname):
"""Adds host to IPA."""
return impl.add_host(hostname)
@webutils.delete_api(
api,
cors,
)
def delete(self, hostname):
"""Deletes host from IPA."""
return impl.delete_host(hostname)
@namespace.route('/user/<username>')
@api.doc(params={'username': 'Username'})
class _User(restplus.Resource):
"""Treadmill IPA User"""
@webutils.post_api(
api,
cors,
)
@handle_api_error
def post(self, username):
"""Adds User to IPA."""
impl.add_user(username)
@webutils.delete_api(
api,
cors,
)
@handle_api_error
def delete(self, username):
"""Deletes User from IPA."""
return impl.delete_user(username)
@namespace.route('/protocol/<protocol>/service/<service>')
@api.doc(params={'service': 'Service',
'protocol': 'Protocol (ldap/zookeeper/etc)'})
class _Service(restplus.Resource):
"""Treadmill IPA Service"""
@webutils.post_api(
api,
cors,
req_model=service_model
)
def post(self, protocol, service):
"""Add Service to IPA"""
return impl.service_add(protocol, service, flask.request.json)
| 25.907216
| 74
| 0.566255
|
import flask
import flask_restplus as restplus
from flask_restplus import fields
from treadmill import webutils
def handle_api_error(func):
def wrapper(*args):
try:
return func(*args)
except Exception as e:
return flask.abort(400, {'message': e.message})
return wrapper
def init(api, cors, impl):
namespace = webutils.namespace(
api, __name__, 'IPA REST operations'
)
service_req_model = {
'hostname': fields.String(description='Hostname'),
'domain': fields.String(description='Domain')
}
service_model = api.model(
'service', service_req_model
)
@namespace.route('/host/<hostname>')
@api.doc(params={'hostname': 'Hostname'})
class _Host(restplus.Resource):
@webutils.post_api(
api,
cors,
)
def post(self, hostname):
return impl.add_host(hostname)
@webutils.delete_api(
api,
cors,
)
def delete(self, hostname):
return impl.delete_host(hostname)
@namespace.route('/user/<username>')
@api.doc(params={'username': 'Username'})
class _User(restplus.Resource):
@webutils.post_api(
api,
cors,
)
@handle_api_error
def post(self, username):
impl.add_user(username)
@webutils.delete_api(
api,
cors,
)
@handle_api_error
def delete(self, username):
return impl.delete_user(username)
@namespace.route('/protocol/<protocol>/service/<service>')
@api.doc(params={'service': 'Service',
'protocol': 'Protocol (ldap/zookeeper/etc)'})
class _Service(restplus.Resource):
@webutils.post_api(
api,
cors,
req_model=service_model
)
def post(self, protocol, service):
return impl.service_add(protocol, service, flask.request.json)
| true
| true
|
1c43b75a0f460e40be0b6a22f54c96dad9885e8b
| 1,340
|
py
|
Python
|
src/mappening/api/models/user.py
|
ucladevx/Bmaps-Backend
|
8dcbb4ca98d183499e03429b944ec0c7865065a6
|
[
"MIT"
] | 3
|
2019-11-22T15:36:16.000Z
|
2020-03-13T09:52:45.000Z
|
src/mappening/api/models/user.py
|
ucladevx/Mappening-Backend
|
8dcbb4ca98d183499e03429b944ec0c7865065a6
|
[
"MIT"
] | 59
|
2017-11-03T00:55:20.000Z
|
2019-05-09T02:07:50.000Z
|
src/mappening/api/models/user.py
|
ucladevx/Mappening-Backend
|
8dcbb4ca98d183499e03429b944ec0c7865065a6
|
[
"MIT"
] | 2
|
2018-09-15T08:51:12.000Z
|
2019-02-03T04:32:09.000Z
|
from mappening.auth.facebook import facebook_oauth
from mappening.auth.google import google_oauth
from flask import session
from flask_login import UserMixin
# UserMixin provides default implementations for methods user objs should have
class User(UserMixin):
def __init__(self, user_id, active=True, admin=False):
self.user_id = user_id
self.active = active
self.admin = admin
# Get unicode id to uniquely identify user
# Can be used to load user from user_loader callback
def get_id(self):
return unicode(self.user_id)
# True if user has an activated account that they can log in to
# Otherwise account will be rejected/suspended from use
def is_active(self):
return self.active
# Determine whether user is anonymous
def is_anonymous(self):
return False
# True is user is authenticated with proper credentials
# Must be true for users to fulfill criteria of login_required
def is_authenticated(self):
return True
# True if user has admin privileges (can interact with user info)
def is_admin(self):
return self.admin
@facebook_oauth.tokengetter
def get_facebook_oauth_token():
return session.get("oauth_token")
@google_oauth.tokengetter
def get_google_oauth_token():
return session.get("oauth_token")
| 30.454545
| 78
| 0.730597
|
from mappening.auth.facebook import facebook_oauth
from mappening.auth.google import google_oauth
from flask import session
from flask_login import UserMixin
class User(UserMixin):
def __init__(self, user_id, active=True, admin=False):
self.user_id = user_id
self.active = active
self.admin = admin
def get_id(self):
return unicode(self.user_id)
def is_active(self):
return self.active
def is_anonymous(self):
return False
def is_authenticated(self):
return True
def is_admin(self):
return self.admin
@facebook_oauth.tokengetter
def get_facebook_oauth_token():
return session.get("oauth_token")
@google_oauth.tokengetter
def get_google_oauth_token():
return session.get("oauth_token")
| true
| true
|
1c43b7619606c51986086aabf83c8f3c2b0ac47e
| 1,583
|
py
|
Python
|
kolla/tests/base.py
|
XieXianbin/kolla
|
2548c67ad906ad74edd0517a14cda5ddba0fe2a3
|
[
"Apache-2.0"
] | 1
|
2017-03-30T08:09:22.000Z
|
2017-03-30T08:09:22.000Z
|
kolla/tests/base.py
|
XieXianbin/kolla
|
2548c67ad906ad74edd0517a14cda5ddba0fe2a3
|
[
"Apache-2.0"
] | 9
|
2019-02-28T09:15:43.000Z
|
2021-03-16T14:34:31.000Z
|
kolla/tests/base.py
|
XieXianbin/kolla
|
2548c67ad906ad74edd0517a14cda5ddba0fe2a3
|
[
"Apache-2.0"
] | 3
|
2016-06-16T17:11:06.000Z
|
2021-01-24T11:51:21.000Z
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import fixtures
import mock
from oslo_config import cfg
from oslotest import base as oslotest_base
from kolla.common import config as common_config
TESTS_ROOT = os.path.dirname(os.path.abspath(__file__))
class TestCase(oslotest_base.BaseTestCase):
'''All unit test should inherit from this class'''
config_file = None
def setUp(self):
super(TestCase, self).setUp()
self.conf = cfg.ConfigOpts()
default_config_files = self.get_default_config_files()
common_config.parse(self.conf, [],
default_config_files=default_config_files)
# NOTE(jeffrey4l): mock the _get_image_dir method to return a fake
# docker images dir
self.useFixture(fixtures.MockPatch(
'kolla.image.build.KollaWorker._get_images_dir',
mock.Mock(return_value=os.path.join(TESTS_ROOT, 'docker'))))
def get_default_config_files(self):
if self.config_file:
return [os.path.join(TESTS_ROOT, 'etc', self.config_file)]
| 35.177778
| 74
| 0.715098
|
import os
import fixtures
import mock
from oslo_config import cfg
from oslotest import base as oslotest_base
from kolla.common import config as common_config
TESTS_ROOT = os.path.dirname(os.path.abspath(__file__))
class TestCase(oslotest_base.BaseTestCase):
config_file = None
def setUp(self):
super(TestCase, self).setUp()
self.conf = cfg.ConfigOpts()
default_config_files = self.get_default_config_files()
common_config.parse(self.conf, [],
default_config_files=default_config_files)
self.useFixture(fixtures.MockPatch(
'kolla.image.build.KollaWorker._get_images_dir',
mock.Mock(return_value=os.path.join(TESTS_ROOT, 'docker'))))
def get_default_config_files(self):
if self.config_file:
return [os.path.join(TESTS_ROOT, 'etc', self.config_file)]
| true
| true
|
1c43b77505fa88b9dd35639fc897dd305727a598
| 439
|
py
|
Python
|
tests/view/glprograms/test_highlightprogram.py
|
gsanhueza/BlastSight
|
4b5c48e7ea5f67b737429f05d5213e9ff1fd399d
|
[
"MIT"
] | null | null | null |
tests/view/glprograms/test_highlightprogram.py
|
gsanhueza/BlastSight
|
4b5c48e7ea5f67b737429f05d5213e9ff1fd399d
|
[
"MIT"
] | 1
|
2022-03-13T17:35:35.000Z
|
2022-03-13T17:35:35.000Z
|
tests/view/glprograms/test_highlightprogram.py
|
gsanhueza/BlastSight
|
4b5c48e7ea5f67b737429f05d5213e9ff1fd399d
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
from blastsight.view.drawables.meshgl import MeshGL
from blastsight.view.glprograms.highlightprogram import HighlightProgram
from tests.view.glprograms.test_meshprogram import TestMeshProgram
class TestHighlightProgram(TestMeshProgram):
@property
def base_program(self):
return HighlightProgram()
@property
def base_drawable(self):
return MeshGL(self.base_element, highlight=True)
| 27.4375
| 72
| 0.783599
|
from blastsight.view.drawables.meshgl import MeshGL
from blastsight.view.glprograms.highlightprogram import HighlightProgram
from tests.view.glprograms.test_meshprogram import TestMeshProgram
class TestHighlightProgram(TestMeshProgram):
@property
def base_program(self):
return HighlightProgram()
@property
def base_drawable(self):
return MeshGL(self.base_element, highlight=True)
| true
| true
|
1c43b863155dae4837b09f4ac2b454f75e20c5ae
| 393
|
py
|
Python
|
environment/lib/python3.7/site-packages/visions/application/summaries/series/numerical_basic_summary.py
|
sid-the-coder/Easy-Data-Analysis-With-Pandas
|
5ae2a867e5a548e34f28aec89e49c361071b872c
|
[
"MIT"
] | 76
|
2020-07-06T14:44:05.000Z
|
2022-02-14T15:30:21.000Z
|
environment/lib/python3.7/site-packages/visions/application/summaries/series/numerical_basic_summary.py
|
sid-the-coder/Easy-Data-Analysis-With-Pandas
|
5ae2a867e5a548e34f28aec89e49c361071b872c
|
[
"MIT"
] | 11
|
2020-08-09T02:30:14.000Z
|
2022-03-12T00:50:14.000Z
|
environment/lib/python3.7/site-packages/visions/application/summaries/series/numerical_basic_summary.py
|
sid-the-coder/Easy-Data-Analysis-With-Pandas
|
5ae2a867e5a548e34f28aec89e49c361071b872c
|
[
"MIT"
] | 11
|
2020-07-12T16:18:07.000Z
|
2022-02-05T16:48:35.000Z
|
import pandas as pd
def numerical_basic_summary(series: pd.Series) -> dict:
"""Summary with basic aggregates
Args:
series: series to summarize
Returns:
A summary of aggregates of `mean`, `std`, `var`, `min`, `max` and `sum`.
"""
aggregates = ["mean", "std", "var", "min", "max", "sum"]
summary = series.agg(aggregates).to_dict()
return summary
| 23.117647
| 80
| 0.608142
|
import pandas as pd
def numerical_basic_summary(series: pd.Series) -> dict:
aggregates = ["mean", "std", "var", "min", "max", "sum"]
summary = series.agg(aggregates).to_dict()
return summary
| true
| true
|
1c43b88f742b955290b3e10e86719a3de9715e92
| 3,878
|
py
|
Python
|
setup.py
|
titu1994/lambda_networks_pt
|
326006a5ad3dcadc6bfb12da10b5f9530f2305c5
|
[
"MIT"
] | 14
|
2021-02-22T00:37:48.000Z
|
2022-03-03T02:19:45.000Z
|
setup.py
|
titu1994/lambda_networks_pt
|
326006a5ad3dcadc6bfb12da10b5f9530f2305c5
|
[
"MIT"
] | null | null | null |
setup.py
|
titu1994/lambda_networks_pt
|
326006a5ad3dcadc6bfb12da10b5f9530f2305c5
|
[
"MIT"
] | 2
|
2021-08-05T01:56:02.000Z
|
2022-01-04T10:49:15.000Z
|
import io
import os
import re
import sys
from shutil import rmtree
from setuptools import find_packages, setup, Command
# Package arguments
PACKAGE_NAME = "lambda_networks_pt"
DIRECTORY_NAME = "lambda_networks"
SHORT_DESCRIPION = "Pytorch implementation of Lambda Networks (https://arxiv.org/abs/2102.08602)"
URL = "https://github.com/titu1994/lambda_networks_pt"
LICENCE = 'MIT'
# Extra requirements and configs
EXTRA_REQUIREMENTS = {
'cpu': ['pytorch'],
'gpu': ['pytorch'],
}
# Test requirements and configs
TEST_REQUIRES = ['pytest']
REQUIRED_PYTHON = ">=3.0.0" # Can be None, or a string value
# Signature arguments
AUTHOR = "Somshubra Majumdar"
EMAIL = "titu1994@gmail.com"
###############################################################
# Attach test requirements to `tests`
EXTRA_REQUIREMENTS['tests'] = TEST_REQUIRES
base_path = os.path.abspath(os.path.dirname(__file__))
if LICENCE is None or LICENCE == '':
raise RuntimeError("Licence must be provided !")
if os.path.exists(os.path.join(base_path, 'LICENCE')):
raise RuntimeError("Licence must be provided !")
def get_version():
"""Return package version as listed in `__version__` in `init.py`."""
if DIRECTORY_NAME is None:
package_path = PACKAGE_NAME
else:
package_path = DIRECTORY_NAME
init_py = open(os.path.join(package_path, '__init__.py')).read()
return re.search("__version__ = ['\"]([^'\"]+)['\"]", init_py).group(1)
try:
with open(os.path.join(base_path, 'requirements.txt'), encoding='utf-8') as f:
REQUIREMENTS = f.read().split('\n')
except Exception:
REQUIREMENTS = []
try:
with io.open(os.path.join(base_path, 'README.md'), encoding='utf-8') as f:
LONG_DESCRIPTION = '\n' + f.read()
except FileNotFoundError:
LONG_DESCRIPTION = SHORT_DESCRIPION
class UploadCommand(Command):
description = 'Build, install and upload tag to git with cleanup.'
user_options = []
def run(self):
try:
self.status('Removing previous builds...')
rmtree(os.path.join(base_path, 'dist'))
except OSError:
pass
self.status('Building Source and Wheel (universal) distribution...')
os.system('{0} setup.py sdist bdist_wheel'.format(sys.executable))
self.status('Pushing git tags...')
os.system('git tag v{0}'.format(get_version()))
os.system('git push --tags')
try:
self.status('Removing build artifacts...')
rmtree(os.path.join(base_path, 'build'))
rmtree(os.path.join(base_path, '{}.egg-info'.format(PACKAGE_NAME)))
except OSError:
pass
sys.exit()
def initialize_options(self):
pass
def finalize_options(self):
pass
@staticmethod
def status(s):
print(s)
setup(
name=PACKAGE_NAME,
version=get_version(),
packages=find_packages(exclude=['tests', 'images', 'examples']),
url=URL,
download_url=URL,
python_requires=REQUIRED_PYTHON,
license=LICENCE,
author=AUTHOR,
author_email=EMAIL,
description=SHORT_DESCRIPION,
long_description=LONG_DESCRIPTION,
long_description_content_type='text/markdown',
install_requires=REQUIREMENTS,
extras_require=EXTRA_REQUIREMENTS,
classifiers=[
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Topic :: Software Development :: Libraries",
"Topic :: Software Development :: Libraries :: Python Modules",
],
test_suite="tests",
tests_require=TEST_REQUIRES,
# python setup.py upload
cmdclass={
'upload': UploadCommand,
},
)
| 27.503546
| 97
| 0.643373
|
import io
import os
import re
import sys
from shutil import rmtree
from setuptools import find_packages, setup, Command
PACKAGE_NAME = "lambda_networks_pt"
DIRECTORY_NAME = "lambda_networks"
SHORT_DESCRIPION = "Pytorch implementation of Lambda Networks (https://arxiv.org/abs/2102.08602)"
URL = "https://github.com/titu1994/lambda_networks_pt"
LICENCE = 'MIT'
EXTRA_REQUIREMENTS = {
'cpu': ['pytorch'],
'gpu': ['pytorch'],
}
TEST_REQUIRES = ['pytest']
REQUIRED_PYTHON = ">=3.0.0"
AUTHOR = "Somshubra Majumdar"
EMAIL = "titu1994@gmail.com"
print(s)
setup(
name=PACKAGE_NAME,
version=get_version(),
packages=find_packages(exclude=['tests', 'images', 'examples']),
url=URL,
download_url=URL,
python_requires=REQUIRED_PYTHON,
license=LICENCE,
author=AUTHOR,
author_email=EMAIL,
description=SHORT_DESCRIPION,
long_description=LONG_DESCRIPTION,
long_description_content_type='text/markdown',
install_requires=REQUIREMENTS,
extras_require=EXTRA_REQUIREMENTS,
classifiers=[
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Topic :: Software Development :: Libraries",
"Topic :: Software Development :: Libraries :: Python Modules",
],
test_suite="tests",
tests_require=TEST_REQUIRES,
# python setup.py upload
cmdclass={
'upload': UploadCommand,
},
)
| true
| true
|
1c43b8cab6d9a2416dab97205ef74dfc1b20fc61
| 975
|
py
|
Python
|
test/Backend/Iree/Sample/simple_invoke.py
|
raikonenfnu/mlir-npcomp
|
29e1b2fe89848d58c9bc07e7df7ce651850a5244
|
[
"Apache-2.0"
] | null | null | null |
test/Backend/Iree/Sample/simple_invoke.py
|
raikonenfnu/mlir-npcomp
|
29e1b2fe89848d58c9bc07e7df7ce651850a5244
|
[
"Apache-2.0"
] | null | null | null |
test/Backend/Iree/Sample/simple_invoke.py
|
raikonenfnu/mlir-npcomp
|
29e1b2fe89848d58c9bc07e7df7ce651850a5244
|
[
"Apache-2.0"
] | null | null | null |
# RUN: %PYTHON %s
# TODO: Numpy compiler has bitrotted.
# XFAIL: *
from npcomp.compiler.numpy.backend import iree
from npcomp.compiler.numpy.frontend import *
from npcomp.compiler.numpy import test_config
from npcomp.compiler.numpy.target import *
from npcomp.compiler.utils import logging
# TODO: This should all exist in a high level API somewhere.
from _npcomp import mlir
logging.enable()
def compile_function(f):
fe = ImportFrontend(config=test_config.create_test_config(
target_factory=GenericTarget32))
fe.import_global_function(f)
compiler = iree.CompilerBackend()
vm_blob = compiler.compile(fe.ir_module)
loaded_m = compiler.load(vm_blob)
return loaded_m[f.__name__]
@compile_function
def int_add(a: int, b: int):
return a + b
result = int_add(5, 6)
assert result == 11
@compile_function
def simple_control_flow(a: int, b: int):
return (a * b) and (a - b)
assert simple_control_flow(5, 6) == -1
assert simple_control_flow(-1, 0) == 0
| 23.214286
| 60
| 0.751795
|
from npcomp.compiler.numpy.backend import iree
from npcomp.compiler.numpy.frontend import *
from npcomp.compiler.numpy import test_config
from npcomp.compiler.numpy.target import *
from npcomp.compiler.utils import logging
from _npcomp import mlir
logging.enable()
def compile_function(f):
fe = ImportFrontend(config=test_config.create_test_config(
target_factory=GenericTarget32))
fe.import_global_function(f)
compiler = iree.CompilerBackend()
vm_blob = compiler.compile(fe.ir_module)
loaded_m = compiler.load(vm_blob)
return loaded_m[f.__name__]
@compile_function
def int_add(a: int, b: int):
return a + b
result = int_add(5, 6)
assert result == 11
@compile_function
def simple_control_flow(a: int, b: int):
return (a * b) and (a - b)
assert simple_control_flow(5, 6) == -1
assert simple_control_flow(-1, 0) == 0
| true
| true
|
1c43baab07473eaec142ec9e6c4ab210843578ac
| 654
|
py
|
Python
|
tests/opcodes/cases/test_assert_cmpge_27.py
|
juztin/pytezos-1
|
7e608ff599d934bdcf129e47db43dbdb8fef9027
|
[
"MIT"
] | 1
|
2021-05-20T16:52:08.000Z
|
2021-05-20T16:52:08.000Z
|
tests/opcodes/cases/test_assert_cmpge_27.py
|
juztin/pytezos-1
|
7e608ff599d934bdcf129e47db43dbdb8fef9027
|
[
"MIT"
] | 1
|
2020-12-30T16:44:56.000Z
|
2020-12-30T16:44:56.000Z
|
tests/opcodes/cases/test_assert_cmpge_27.py
|
tqtezos/pytezos
|
a4ac0b022d35d4c9f3062609d8ce09d584b5faa8
|
[
"MIT"
] | 1
|
2022-03-20T19:01:00.000Z
|
2022-03-20T19:01:00.000Z
|
from unittest import TestCase
from tests import abspath
from pytezos.repl.interpreter import Interpreter
from pytezos.michelson.converter import michelson_to_micheline
from pytezos.repl.parser import parse_expression
class OpcodeTestassert_cmpge_27(TestCase):
def setUp(self):
self.maxDiff = None
self.i = Interpreter(debug=False) # disable exceptions
def test_opcode_assert_cmpge_27(self):
res = self.i.execute(f'INCLUDE "{abspath("opcodes/contracts/assert_cmpge.tz")}"')
self.assertTrue(res['success'])
res = self.i.execute('RUN (Pair -1 0) Unit')
self.assertEqual(False, res['success'])
| 29.727273
| 89
| 0.7263
|
from unittest import TestCase
from tests import abspath
from pytezos.repl.interpreter import Interpreter
from pytezos.michelson.converter import michelson_to_micheline
from pytezos.repl.parser import parse_expression
class OpcodeTestassert_cmpge_27(TestCase):
def setUp(self):
self.maxDiff = None
self.i = Interpreter(debug=False)
def test_opcode_assert_cmpge_27(self):
res = self.i.execute(f'INCLUDE "{abspath("opcodes/contracts/assert_cmpge.tz")}"')
self.assertTrue(res['success'])
res = self.i.execute('RUN (Pair -1 0) Unit')
self.assertEqual(False, res['success'])
| true
| true
|
1c43baccd072765a676117f3125d7e2c4ceb4fb3
| 3,361
|
py
|
Python
|
entsoe_client/Queries/Congestion/Congestion.py
|
DarioHett/entsoe-client
|
bb424fa54966d3be49daa1edb9e0fd40ed00ac15
|
[
"MIT"
] | 1
|
2021-10-03T18:11:57.000Z
|
2021-10-03T18:11:57.000Z
|
entsoe_client/Queries/Congestion/Congestion.py
|
DarioHett/entsoe-client
|
bb424fa54966d3be49daa1edb9e0fd40ed00ac15
|
[
"MIT"
] | 1
|
2021-11-08T16:54:10.000Z
|
2021-11-08T16:54:10.000Z
|
entsoe_client/Queries/Congestion/Congestion.py
|
DarioHett/entsoe-client
|
bb424fa54966d3be49daa1edb9e0fd40ed00ac15
|
[
"MIT"
] | 1
|
2021-12-04T21:24:53.000Z
|
2021-12-04T21:24:53.000Z
|
from typing import Union
import pandas as pd
from entsoe_client.ParameterTypes import *
from entsoe_client.Queries import Query
class Congestion(Query):
"""4.3 Congestion domain."""
def __init__(
self,
documentType: DocumentType = None,
businessType: BusinessType = None,
in_Domain: Area = None,
out_Domain: Area = None,
periodStart: Union[str, int, pd.Timestamp] = None,
periodEnd: Union[str, int, pd.Timestamp] = None,
):
super(Congestion, self).__init__(
documentType=documentType,
businessType=businessType,
in_Domain=in_Domain,
out_Domain=out_Domain,
periodStart=periodStart,
periodEnd=periodEnd,
)
class Redispatching(Congestion):
"""
4.3.1. Redispatching [13.1.A]
100 documents limit applies
Time interval in query response depends on duration of matching redispatches
Mandatory parameters
DocumentType
In_Domain
Out_Domain
TimeInterval or combination of PeriodStart and PeriodEnd
Optional parameters
BusinessType
"""
def __init__(
self,
in_Domain: Area,
out_Domain: Area,
periodStart: Union[int, str, pd.Timestamp],
periodEnd: Union[int, str, pd.Timestamp],
):
super(Redispatching, self).__init__(
documentType=DocumentType.A63,
businessType=BusinessType.A46,
in_Domain=in_Domain,
out_Domain=out_Domain,
periodStart=periodStart,
periodEnd=periodEnd,
)
class Countertrading(Congestion):
"""
4.3.2. Countertrading [13.1.B]
100 documents limit applies
Time interval in query response depends on duration of matching counter trades
Mandatory parameters
DocumentType
In_Domain
Out_Domain
TimeInterval or combination of PeriodStart and PeriodEnd
"""
def __init__(
self,
in_Domain: Area,
out_Domain: Area,
periodStart: Union[int, str, pd.Timestamp],
periodEnd: Union[int, str, pd.Timestamp],
):
super(Countertrading, self).__init__(
documentType=DocumentType("Counter trade notice"),
in_Domain=in_Domain,
out_Domain=out_Domain,
periodStart=periodStart,
periodEnd=periodEnd,
)
class CostsOfCongestionManagement(Congestion):
"""
4.3.3. Costs of Congestion Management [13.1.C]
100 documents limit applies
Minimum time interval in query response is one month
Mandatory parameters
DocumentType
In_Domain
Out_Domain
TimeInterval or combination of PeriodStart and PeriodEnd
Optional parameters
BusinessType
In_Domain and Out_Domain must be populated with the same area EIC code.
"""
def __init__(
self,
in_Domain: Area,
periodStart: Union[int, str, pd.Timestamp],
periodEnd: Union[int, str, pd.Timestamp],
):
super(CostsOfCongestionManagement, self).__init__(
documentType=DocumentType.A92,
businessType=BusinessType.B03,
in_Domain=in_Domain,
out_Domain=in_Domain,
periodStart=periodStart,
periodEnd=periodEnd,
)
| 28.243697
| 82
| 0.628384
|
from typing import Union
import pandas as pd
from entsoe_client.ParameterTypes import *
from entsoe_client.Queries import Query
class Congestion(Query):
def __init__(
self,
documentType: DocumentType = None,
businessType: BusinessType = None,
in_Domain: Area = None,
out_Domain: Area = None,
periodStart: Union[str, int, pd.Timestamp] = None,
periodEnd: Union[str, int, pd.Timestamp] = None,
):
super(Congestion, self).__init__(
documentType=documentType,
businessType=businessType,
in_Domain=in_Domain,
out_Domain=out_Domain,
periodStart=periodStart,
periodEnd=periodEnd,
)
class Redispatching(Congestion):
def __init__(
self,
in_Domain: Area,
out_Domain: Area,
periodStart: Union[int, str, pd.Timestamp],
periodEnd: Union[int, str, pd.Timestamp],
):
super(Redispatching, self).__init__(
documentType=DocumentType.A63,
businessType=BusinessType.A46,
in_Domain=in_Domain,
out_Domain=out_Domain,
periodStart=periodStart,
periodEnd=periodEnd,
)
class Countertrading(Congestion):
def __init__(
self,
in_Domain: Area,
out_Domain: Area,
periodStart: Union[int, str, pd.Timestamp],
periodEnd: Union[int, str, pd.Timestamp],
):
super(Countertrading, self).__init__(
documentType=DocumentType("Counter trade notice"),
in_Domain=in_Domain,
out_Domain=out_Domain,
periodStart=periodStart,
periodEnd=periodEnd,
)
class CostsOfCongestionManagement(Congestion):
def __init__(
self,
in_Domain: Area,
periodStart: Union[int, str, pd.Timestamp],
periodEnd: Union[int, str, pd.Timestamp],
):
super(CostsOfCongestionManagement, self).__init__(
documentType=DocumentType.A92,
businessType=BusinessType.B03,
in_Domain=in_Domain,
out_Domain=in_Domain,
periodStart=periodStart,
periodEnd=periodEnd,
)
| true
| true
|
1c43bb10e64934bf4ab7ac1a74d3b67d14c2f676
| 1,060
|
py
|
Python
|
recon/linker.py
|
kabirkhan/recon
|
19a7c3c644c4e5833fb326958ee090343a45352a
|
[
"MIT"
] | 4
|
2020-11-27T10:31:14.000Z
|
2021-05-21T11:03:08.000Z
|
recon/linker.py
|
kabirkhan/recon
|
19a7c3c644c4e5833fb326958ee090343a45352a
|
[
"MIT"
] | null | null | null |
recon/linker.py
|
kabirkhan/recon
|
19a7c3c644c4e5833fb326958ee090343a45352a
|
[
"MIT"
] | 1
|
2020-11-27T10:29:14.000Z
|
2020-11-27T10:29:14.000Z
|
from abc import ABC, abstractmethod
from typing import Iterable, Iterator
from recon.types import Example
from spacy.kb import KnowledgeBase
class BaseEntityLinker(ABC):
@abstractmethod
def __call__(self, examples: Iterable[Example]) -> Iterator[Example]:
raise NotImplementedError
class EntityLinker(BaseEntityLinker):
def __call__(self, examples: Iterable[Example]) -> Iterator[Example]:
for example in examples:
for span in example.spans:
span.kb_id = span.text
yield example
class SpacyEntityLinker(BaseEntityLinker):
def __init__(self, kb: KnowledgeBase):
self.kb = kb
def __call__(self, examples: Iterable[Example]) -> Iterator[Example]:
for example in examples:
for span in example.spans:
cands = self.kb.get_candidates(span.text)
ents = [c.entity_ for c in cands if c.entity_]
if ents:
top_ent = ents[0]
span.kb_id = top_ent
yield example
| 29.444444
| 73
| 0.633962
|
from abc import ABC, abstractmethod
from typing import Iterable, Iterator
from recon.types import Example
from spacy.kb import KnowledgeBase
class BaseEntityLinker(ABC):
@abstractmethod
def __call__(self, examples: Iterable[Example]) -> Iterator[Example]:
raise NotImplementedError
class EntityLinker(BaseEntityLinker):
def __call__(self, examples: Iterable[Example]) -> Iterator[Example]:
for example in examples:
for span in example.spans:
span.kb_id = span.text
yield example
class SpacyEntityLinker(BaseEntityLinker):
def __init__(self, kb: KnowledgeBase):
self.kb = kb
def __call__(self, examples: Iterable[Example]) -> Iterator[Example]:
for example in examples:
for span in example.spans:
cands = self.kb.get_candidates(span.text)
ents = [c.entity_ for c in cands if c.entity_]
if ents:
top_ent = ents[0]
span.kb_id = top_ent
yield example
| true
| true
|
1c43bc71e3358de509f11b212de4a7edb29ba5c0
| 1,032
|
py
|
Python
|
sdk/python/pulumi_azure_native/healthcareapis/v20190916/__init__.py
|
pulumi-bot/pulumi-azure-native
|
f7b9490b5211544318e455e5cceafe47b628e12c
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/healthcareapis/v20190916/__init__.py
|
pulumi-bot/pulumi-azure-native
|
f7b9490b5211544318e455e5cceafe47b628e12c
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/healthcareapis/v20190916/__init__.py
|
pulumi-bot/pulumi-azure-native
|
f7b9490b5211544318e455e5cceafe47b628e12c
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
# Export this package's modules as members:
from ._enums import *
from .get_service import *
from .service import *
from ._inputs import *
from . import outputs
def _register_module():
import pulumi
from ... import _utilities
class Module(pulumi.runtime.ResourceModule):
_version = _utilities.get_semver_version()
def version(self):
return Module._version
def construct(self, name: str, typ: str, urn: str) -> pulumi.Resource:
if typ == "azure-native:healthcareapis/v20190916:Service":
return Service(name, pulumi.ResourceOptions(urn=urn))
else:
raise Exception(f"unknown resource type {typ}")
_module_instance = Module()
pulumi.runtime.register_resource_module("azure-native", "healthcareapis/v20190916", _module_instance)
_register_module()
| 30.352941
| 105
| 0.679264
|
# Export this package's modules as members:
from ._enums import *
from .get_service import *
from .service import *
from ._inputs import *
from . import outputs
def _register_module():
import pulumi
from ... import _utilities
class Module(pulumi.runtime.ResourceModule):
_version = _utilities.get_semver_version()
def version(self):
return Module._version
def construct(self, name: str, typ: str, urn: str) -> pulumi.Resource:
if typ == "azure-native:healthcareapis/v20190916:Service":
return Service(name, pulumi.ResourceOptions(urn=urn))
else:
raise Exception(f"unknown resource type {typ}")
_module_instance = Module()
pulumi.runtime.register_resource_module("azure-native", "healthcareapis/v20190916", _module_instance)
_register_module()
| true
| true
|
1c43bc9888ddbc540bdce99066fbea77b993c8a9
| 436
|
py
|
Python
|
ProjectEuler/p036.py
|
TISparta/competitive-programming-solutions
|
31987d4e67bb874bf15653565c6418b5605a20a8
|
[
"MIT"
] | 1
|
2018-01-30T13:21:30.000Z
|
2018-01-30T13:21:30.000Z
|
ProjectEuler/p036.py
|
TISparta/competitive-programming-solutions
|
31987d4e67bb874bf15653565c6418b5605a20a8
|
[
"MIT"
] | null | null | null |
ProjectEuler/p036.py
|
TISparta/competitive-programming-solutions
|
31987d4e67bb874bf15653565c6418b5605a20a8
|
[
"MIT"
] | 1
|
2018-08-29T13:26:50.000Z
|
2018-08-29T13:26:50.000Z
|
# Execution time : 0.768209 seconds
# Solution Explanation
# And naive brute-force approach is enough.
import time
width = 40
def solution():
return sum(it for it in range(1000000) if str(it)[:]==str(it)[::-1] and bin(it)[2:]==bin(it)[:1:-1])
if __name__=="__main__":
start_ = time.time()
print(' Answer -> %s '.center(width,'-') % ( solution() ))
print(' %f seconds '.center(width,'-') % ( time.time() - start_))
| 24.222222
| 104
| 0.614679
|
import time
width = 40
def solution():
return sum(it for it in range(1000000) if str(it)[:]==str(it)[::-1] and bin(it)[2:]==bin(it)[:1:-1])
if __name__=="__main__":
start_ = time.time()
print(' Answer -> %s '.center(width,'-') % ( solution() ))
print(' %f seconds '.center(width,'-') % ( time.time() - start_))
| true
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.