content
stringlengths 0
1.05M
| origin
stringclasses 2
values | type
stringclasses 2
values |
|---|---|---|
from django.urls import reverse
from rest_framework import status
from cornershop.apps.weather.tests import WeatherAPTestCase
class WeatherPostTestCase(WeatherAPTestCase):
def test_with_existing_record(self):
url = reverse('weather-list')
response = self.client.get(url, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('results'), self.response['results'])
|
nilq/baby-python
|
python
|
"""Utilities for algebraic number theory. """
from sympy.core.sympify import sympify
from sympy.ntheory.factor_ import factorint
from sympy.polys.domains.rationalfield import QQ
from sympy.polys.domains.integerring import ZZ
from sympy.polys.matrices.exceptions import DMRankError
from sympy.polys.numberfields.minpoly import minpoly
from sympy.printing.lambdarepr import IntervalPrinter
from sympy.utilities.decorator import public
from sympy.utilities.lambdify import lambdify
from mpmath import mp
def is_rat(c):
r"""
Test whether an argument is of an acceptable type to be used as a rational
number.
Explanation
===========
Returns ``True`` on any argument of type ``int``, :ref:`ZZ`, or :ref:`QQ`.
See Also
========
is_int
"""
# ``c in QQ`` is too accepting (e.g. ``3.14 in QQ`` is ``True``),
# ``QQ.of_type(c)`` is too demanding (e.g. ``QQ.of_type(3)`` is ``False``).
#
# Meanwhile, if gmpy2 is installed then ``ZZ.of_type()`` accepts only
# ``mpz``, not ``int``, so we need another clause to ensure ``int`` is
# accepted.
return isinstance(c, int) or ZZ.of_type(c) or QQ.of_type(c)
def is_int(c):
r"""
Test whether an argument is of an acceptable type to be used as an integer.
Explanation
===========
Returns ``True`` on any argument of type ``int`` or :ref:`ZZ`.
See Also
========
is_rat
"""
# If gmpy2 is installed then ``ZZ.of_type()`` accepts only
# ``mpz``, not ``int``, so we need another clause to ensure ``int`` is
# accepted.
return isinstance(c, int) or ZZ.of_type(c)
def get_num_denom(c):
r"""
Given any argument on which :py:func:`~.is_rat` is ``True``, return the
numerator and denominator of this number.
See Also
========
is_rat
"""
r = QQ(c)
return r.numerator, r.denominator
@public
def extract_fundamental_discriminant(a):
r"""
Extract a fundamental discriminant from an integer *a*.
Explanation
===========
Given any rational integer *a* that is 0 or 1 mod 4, write $a = d f^2$,
where $d$ is either 1 or a fundamental discriminant, and return a pair
of dictionaries ``(D, F)`` giving the prime factorizations of $d$ and $f$
respectively, in the same format returned by :py:func:`~.factorint`.
A fundamental discriminant $d$ is different from unity, and is either
1 mod 4 and squarefree, or is 0 mod 4 and such that $d/4$ is squarefree
and 2 or 3 mod 4. This is the same as being the discriminant of some
quadratic field.
Examples
========
>>> from sympy.polys.numberfields.utilities import extract_fundamental_discriminant
>>> print(extract_fundamental_discriminant(-432))
({3: 1, -1: 1}, {2: 2, 3: 1})
For comparison:
>>> from sympy import factorint
>>> print(factorint(-432))
{2: 4, 3: 3, -1: 1}
Parameters
==========
a: int, must be 0 or 1 mod 4
Returns
=======
Pair ``(D, F)`` of dictionaries.
Raises
======
ValueError
If *a* is not 0 or 1 mod 4.
References
==========
.. [1] Cohen, H. *A Course in Computational Algebraic Number Theory.*
(See Prop. 5.1.3)
"""
if a % 4 not in [0, 1]:
raise ValueError('To extract fundamental discriminant, number must be 0 or 1 mod 4.')
if a == 0:
return {}, {0: 1}
if a == 1:
return {}, {}
a_factors = factorint(a)
D = {}
F = {}
# First pass: just make d squarefree, and a/d a perfect square.
# We'll count primes (and units! i.e. -1) that are 3 mod 4 and present in d.
num_3_mod_4 = 0
for p, e in a_factors.items():
if e % 2 == 1:
D[p] = 1
if p % 4 == 3:
num_3_mod_4 += 1
if e >= 3:
F[p] = (e - 1) // 2
else:
F[p] = e // 2
# Second pass: if d is cong. to 2 or 3 mod 4, then we must steal away
# another factor of 4 from f**2 and give it to d.
even = 2 in D
if even or num_3_mod_4 % 2 == 1:
e2 = F[2]
assert e2 > 0
if e2 == 1:
del F[2]
else:
F[2] = e2 - 1
D[2] = 3 if even else 2
return D, F
@public
class AlgIntPowers:
r"""
Compute the powers of an algebraic integer.
Explanation
===========
Given an algebraic integer $\theta$ by its monic irreducible polynomial
``T`` over :ref:`ZZ`, this class computes representations of arbitrarily
high powers of $\theta$, as :ref:`ZZ`-linear combinations over
$\{1, \theta, \ldots, \theta^{n-1}\}$, where $n = \deg(T)$.
The representations are computed using the linear recurrence relations for
powers of $\theta$, derived from the polynomial ``T``. See [1], Sec. 4.2.2.
Optionally, the representations may be reduced with respect to a modulus.
Examples
========
>>> from sympy import Poly, cyclotomic_poly
>>> from sympy.polys.numberfields.utilities import AlgIntPowers
>>> T = Poly(cyclotomic_poly(5))
>>> zeta_pow = AlgIntPowers(T)
>>> print(zeta_pow[0])
[1, 0, 0, 0]
>>> print(zeta_pow[1])
[0, 1, 0, 0]
>>> print(zeta_pow[4]) # doctest: +SKIP
[-1, -1, -1, -1]
>>> print(zeta_pow[24]) # doctest: +SKIP
[-1, -1, -1, -1]
References
==========
.. [1] Cohen, H. *A Course in Computational Algebraic Number Theory.*
"""
def __init__(self, T, modulus=None):
"""
Parameters
==========
T : :py:class:`~.Poly`
The monic irreducible polynomial over :ref:`ZZ` defining the
algebraic integer.
modulus : int, None, optional
If not ``None``, all representations will be reduced w.r.t. this.
"""
self.T = T
self.modulus = modulus
self.n = T.degree()
self.powers_n_and_up = [[-c % self for c in reversed(T.rep.rep)][:-1]]
self.max_so_far = self.n
def red(self, exp):
return exp if self.modulus is None else exp % self.modulus
def __rmod__(self, other):
return self.red(other)
def compute_up_through(self, e):
m = self.max_so_far
if e <= m: return
n = self.n
r = self.powers_n_and_up
c = r[0]
for k in range(m+1, e+1):
b = r[k-1-n][n-1]
r.append(
[c[0]*b % self] + [
(r[k-1-n][i-1] + c[i]*b) % self for i in range(1, n)
]
)
self.max_so_far = e
def get(self, e):
n = self.n
if e < 0:
raise ValueError('Exponent must be non-negative.')
elif e < n:
return [1 if i == e else 0 for i in range(n)]
else:
self.compute_up_through(e)
return self.powers_n_and_up[e - n]
def __getitem__(self, item):
return self.get(item)
@public
def coeff_search(m, R):
r"""
Generate coefficients for searching through polynomials.
Explanation
===========
Lead coeff is always non-negative. Explore all combinations with coeffs
bounded in absolute value before increasing the bound. Skip the all-zero
list, and skip any repeats. See examples.
Examples
========
>>> from sympy.polys.numberfields.utilities import coeff_search
>>> cs = coeff_search(2, 1)
>>> C = [next(cs) for i in range(13)]
>>> print(C)
[[1, 1], [1, 0], [1, -1], [0, 1], [2, 2], [2, 1], [2, 0], [2, -1], [2, -2],
[1, 2], [1, -2], [0, 2], [3, 3]]
Parameters
==========
m : int
Length of coeff list.
R : int
Initial max abs val for coeffs (will increase as search proceeds).
Returns
=======
generator
Infinite generator of lists of coefficients.
"""
R0 = R
c = [R] * m
while True:
if R == R0 or R in c or -R in c:
yield c[:]
j = m - 1
while c[j] == -R:
j -= 1
c[j] -= 1
for i in range(j + 1, m):
c[i] = R
for j in range(m):
if c[j] != 0:
break
else:
R += 1
c = [R] * m
def supplement_a_subspace(M):
r"""
Extend a basis for a subspace to a basis for the whole space.
Explanation
===========
Given an $n \times r$ matrix *M* of rank $r$ (so $r \leq n$), this function
computes an invertible $n \times n$ matrix $B$ such that the first $r$
columns of $B$ equal *M*.
This operation can be interpreted as a way of extending a basis for a
subspace, to give a basis for the whole space.
To be precise, suppose you have an $n$-dimensional vector space $V$, with
basis $\{v_1, v_2, \ldots, v_n\}$, and an $r$-dimensional subspace $W$ of
$V$, spanned by a basis $\{w_1, w_2, \ldots, w_r\}$, where the $w_j$ are
given as linear combinations of the $v_i$. If the columns of *M* represent
the $w_j$ as such linear combinations, then the columns of the matrix $B$
computed by this function give a new basis $\{u_1, u_2, \ldots, u_n\}$ for
$V$, again relative to the $\{v_i\}$ basis, and such that $u_j = w_j$
for $1 \leq j \leq r$.
Examples
========
Note: The function works in terms of columns, so in these examples we
print matrix transposes in order to make the columns easier to inspect.
>>> from sympy.polys.matrices import DM
>>> from sympy import QQ, FF
>>> from sympy.polys.numberfields.utilities import supplement_a_subspace
>>> M = DM([[1, 7, 0], [2, 3, 4]], QQ).transpose()
>>> print(supplement_a_subspace(M).to_Matrix().transpose())
Matrix([[1, 7, 0], [2, 3, 4], [1, 0, 0]])
>>> M2 = M.convert_to(FF(7))
>>> print(M2.to_Matrix().transpose())
Matrix([[1, 0, 0], [2, 3, -3]])
>>> print(supplement_a_subspace(M2).to_Matrix().transpose())
Matrix([[1, 0, 0], [2, 3, -3], [0, 1, 0]])
Parameters
==========
M : :py:class:`~.DomainMatrix`
The columns give the basis for the subspace.
Returns
=======
:py:class:`~.DomainMatrix`
This matrix is invertible and its first $r$ columns equal *M*.
Raises
======
DMRankError
If *M* was not of maximal rank.
References
==========
.. [1] Cohen, H. *A Course in Computational Algebraic Number Theory*
(See Sec. 2.3.2.)
"""
n, r = M.shape
# Let In be the n x n identity matrix.
# Form the augmented matrix [M | In] and compute RREF.
Maug = M.hstack(M.eye(n, M.domain))
R, pivots = Maug.rref()
if pivots[:r] != tuple(range(r)):
raise DMRankError('M was not of maximal rank')
# Let J be the n x r matrix equal to the first r columns of In.
# Since M is of rank r, RREF reduces [M | In] to [J | A], where A is the product of
# elementary matrices Ei corresp. to the row ops performed by RREF. Since the Ei are
# invertible, so is A. Let B = A^(-1).
A = R[:, r:]
B = A.inv()
# Then B is the desired matrix. It is invertible, since B^(-1) == A.
# And A * [M | In] == [J | A]
# => A * M == J
# => M == B * J == the first r columns of B.
return B
@public
def isolate(alg, eps=None, fast=False):
"""
Find a rational isolating interval for a real algebraic number.
Examples
========
>>> from sympy import isolate, sqrt, Rational
>>> print(isolate(sqrt(2))) # doctest: +SKIP
(1, 2)
>>> print(isolate(sqrt(2), eps=Rational(1, 100)))
(24/17, 17/12)
Parameters
==========
alg : str, int, :py:class:`~.Expr`
The algebraic number to be isolated. Must be a real number, to use this
particular function. However, see also :py:meth:`.Poly.intervals`,
which isolates complex roots when you pass ``all=True``.
eps : positive element of :ref:`QQ`, None, optional (default=None)
Precision to be passed to :py:meth:`.Poly.refine_root`
fast : boolean, optional (default=False)
Say whether fast refinement procedure should be used.
(Will be passed to :py:meth:`.Poly.refine_root`.)
Returns
=======
Pair of rational numbers defining an isolating interval for the given
algebraic number.
See Also
========
.Poly.intervals
"""
alg = sympify(alg)
if alg.is_Rational:
return (alg, alg)
elif not alg.is_real:
raise NotImplementedError(
"complex algebraic numbers are not supported")
func = lambdify((), alg, modules="mpmath", printer=IntervalPrinter())
poly = minpoly(alg, polys=True)
intervals = poly.intervals(sqf=True)
dps, done = mp.dps, False
try:
while not done:
alg = func()
for a, b in intervals:
if a <= alg.a and alg.b <= b:
done = True
break
else:
mp.dps *= 2
finally:
mp.dps = dps
if eps is not None:
a, b = poly.refine_root(a, b, eps=eps, fast=fast)
return (a, b)
|
nilq/baby-python
|
python
|
import enum
import os
from argparse import ArgumentParser
import tensorflow as tf
import create_mask_image
tf.logging.set_verbosity(tf.logging.INFO)
logger = tf.logging
home = os.path.expanduser("~")
class TrainingPaths(enum.Enum):
MASK = 0,
ORIGINAL_IMAGE = 1,
MASKED_IMAGE = 2
PATHS = {
TrainingPaths.MASK: os.path.join(home, "inpainting/masks/"),
TrainingPaths.ORIGINAL_IMAGE: os.path.join(home, "inpainting/original-images/"),
TrainingPaths.MASKED_IMAGE: os.path.join(home, "inpainting/masked-images/")
}
def maybe_create_paths(paths):
for path in paths:
tf.gfile.MakeDirs(path)
logger.info("Created {} path".format(path))
def build_parser():
parser = ArgumentParser()
parser.add_argument('--num_mask', type=int,
dest='num_mask', help='how many mask to generate',
metavar='Number of mask', required=True)
parser.add_argument('--min_units', type=int,
dest='min_units', help='min units to generate',
metavar='Min units to generate', required=True)
parser.add_argument('--max_units', type=int,
dest='max_units', help='max units to generate',
metavar='Max units to generate', required=True)
parser.add_argument('--masks_path', type=str,
dest='masks_path', help='path to save masks',
metavar='Path to save masks',
default=PATHS[TrainingPaths.MASK])
parser.add_argument('--original_images_path', type=str,
dest='original_images_path', help='path to raw image',
metavar='Path to raw image',
default=PATHS[TrainingPaths.ORIGINAL_IMAGE])
parser.add_argument('--masked_images_path', type=str,
dest='masked_images_path', help='image to train',
metavar='Train',
default=PATHS[TrainingPaths.MASKED_IMAGE])
return parser
def main():
parser = build_parser()
arguments = parser.parse_args()
paths = [arguments.masks_path, arguments.original_images_path, arguments.masked_images_path]
maybe_create_paths(paths)
create_mask_image.save_mask(arguments.num_mask, arguments.min_units, arguments.max_units,
arguments.masks_path, arguments.original_images_path, arguments.masked_images_path)
if __name__ == '__main__':
main()
|
nilq/baby-python
|
python
|
from __future__ import absolute_import, unicode_literals
from .extras.clients import WebApplicationPushClient
from .extras.grant_types import AuthorizationCodePushGrant
from .extras.endpoints import Server
from .extras.errors import MalformedResponsePushCodeError
|
nilq/baby-python
|
python
|
from functools import wraps
import logging
import math
import time
from typing import Callable
logger = logging.getLogger()
def format_seconds(seconds: int):
seconds = int(seconds or 0)
hours = math.floor(seconds / 3600)
seconds -= hours * 3600
minutes = math.floor(seconds / 60)
seconds -= minutes * 60
formatted = "{}:{}".format(minutes, str(seconds).zfill(2))
if hours:
formatted = "{}:{}".format(hours, minutes)
return formatted
def format_bytes(bytes_count: int):
B = float(bytes_count)
KB = float(1024)
MB = float(KB ** 2)
GB = float(KB ** 3)
TB = float(KB ** 4)
if B < KB:
return "{0} {1}".format(B, "Bytes" if 0 == B > 1 else "Byte")
elif KB <= B < MB:
return "{0:.2f} KB".format(B / KB)
elif MB <= B < GB:
return "{0:.2f} MB".format(B / MB)
elif GB <= B < TB:
return "{0:.2f} GB".format(B / GB)
elif TB <= B:
return "{0:.2f} TB".format(B / TB)
def async_log_time(coroutine: Callable):
@wraps(coroutine)
async def wrapper(*args, **kwargs):
t0 = time.time()
await coroutine(*args, **kwargs)
t1 = time.time()
logger.info('{} took {:.3f}s'.format(coroutine.__name__, t1 - t0))
return wrapper
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
import RPi.GPIO as GPIO
import subprocess
import time
SENSOR_PIN = 14
TIME_ON = 20
def main():
GPIO.setmode(GPIO.BCM)
GPIO.setup(SENSOR_PIN, GPIO.IN)
subprocess.run(['xset', 'dpms', 'force', 'off'])
def callback(_):
subprocess.run(['xset', 'dpms', 'force', 'on'])
time.sleep(TIME_ON)
subprocess.run(['xset', 'dpms', 'force', 'off'])
try:
GPIO.add_event_detect(SENSOR_PIN, GPIO.RISING, callback=callback)
while True:
time.sleep(100)
except KeyboardInterrupt:
pass
GPIO.cleanup()
if __name__ == '__main__':
main()
|
nilq/baby-python
|
python
|
"""Testing v0x04 FlowRemoved message."""
from pyof.v0x04.asynchronous.flow_removed import FlowRemoved
from pyof.v0x04.common.flow_match import Match
from tests.test_struct import TestStruct
class TestFlowRemovedMsg(TestStruct):
"""FlowRemoved message tests (also those in :class:`.TestDump`)."""
@classmethod
def setUpClass(cls):
"""Configure raw file and its object in parent class (TestDump)."""
super().setUpClass()
super().set_raw_dump_file('v0x04', 'ofpt_flow_removed')
super().set_raw_dump_object(FlowRemoved, xid=1, cookie=1, priority=1,
reason=1, table_id=1, duration_sec=1,
duration_nsec=2, idle_timeout=3,
hard_timeout=4, packet_count=1,
byte_count=1, match=Match())
super().set_minimum_size(56)
|
nilq/baby-python
|
python
|
import enum
from ..time import Resolution, UTC
class Curve:
"""
The curve identifies any type of time series data and OHLC data.
The ``curve.name`` is used in the API when loading data for a curve.
"""
def __init__(self, name, curve_type=None, instance_issued_timezone=None,
area=None, area_sink=None, place=None,
resolution=None, frequency=None, timezone=None,
categories=None, unit=None, denominator=None, data_type=None,
source=None, commodity=None):
#: The curve name is the identifier.
self.name = name
#: Curve type (the type of data this curve refers to).
self.curve_type = curve_type
if self.curve_type.has_instances:
#: For instance-based curves: The time-zone of the issue date
#: in the instance, see :py:attr:`Instance.issued`.
self.instance_issued_timezone = instance_issued_timezone or UTC
else:
self.instance_issued_timezone = None
# The areas and place (if any)
#: The area
self.area = area
#: The importing area for exchange curves
self.area_sink = area_sink
if area_sink:
#: The exporting area for exchange curves
self.area_source = area
self.place = place
# Resolution
if resolution:
#: The frequency of data in this curve
self.frequency = resolution.frequency
#: The time-zone of date-times in this curve
self.timezone = resolution.timezone
else:
self.frequency = frequency
self.timezone = timezone
# Other metadata
#: List of categories for this curve.
self.categories = categories
#: The unit (MW, EUR, etc.). See also :py:attr:`Curve.denominator`.
self.unit = unit
#: The denominator (for EUR/MWh: unit=EUR and denominator=MWh). See
#: also :py:attr:`Curve.unit`.
self.denominator = denominator
#: The data type, :py:class:`DataType`.
self.data_type = data_type
#: The source of the data.
self.source = source
#: The curve commodity (Power, Gas, etc.)
self.commodity = commodity
@property
def resolution(self):
"""
The resolution (combination of frequency and timezone) for this curve.
"""
return Resolution(self.frequency, self.timezone)
def __str__(self):
return self.name
def __repr__(self):
return f"<Curve: \"{self.name}\", curve_type={self.curve_type}>"
_datatype_lookup = {}
class DataType(enum.Enum):
"""
Data types describe the type of data (i.e. actuals, forecast). This is
the attribute that is always set as the last word in the curve name.
"""
#: Third-party actuals collected by Energy Quantified, but not modified.
ACTUAL = ("ACTUAL", "Actual")
#: Scenario data generated by Energy Quantified, which is based on climate
#: data sets (synthetic weather years).
CLIMATE = ("CLIMATE", "Climate")
#: Scenario data generated by Energy Quantified. If you are looking for
#: weather-based scenarios, look at ``DataType.CLIMATE``.
SCENARIO = ("SCENARIO", "Scenario")
#: A combination of third-party actuals and numbers generated by Energy
#: Quantified, where we have filled missing with our best calculations.
SYNTHETIC = ("SYNTHETIC", "Synthetic")
#: The forecast models run backwards.
BACKCAST = ("BACKCAST", "Backcast")
#: The seasonal normals using 40 weather years.
NORMAL = ("NORMAL", "Normal")
#: Some model value (such as a factor).
VALUE = ("VALUE", "Value")
#: Forecasts generated by Energy Quantified unless another source is
#: explicitly stated in the curve name.
FORECAST = ("FORECAST", "Forecast")
#: Currency rates.
FOREX = ("FOREX", "Forex")
#: Closing data from the market.
OHLC = ("OHLC", "OHLC")
#: Capacity data generated from REMIT outage messages.
REMIT = ("REMIT", "REMIT")
#: Total installed capacity.
CAPACITY = ("CAPACITY", "Capacity")
def __init__(self, tag=None, label=None):
self.tag = tag
self.label = label
_datatype_lookup[tag.lower()] = self
def __str__(self):
return self.__repr__()
def __repr__(self):
return self.name
@staticmethod
def is_valid_tag(tag):
"""
Check whether a data type tag exists or not.
:param tag: A data type tag
:type tag: str
:return: True if it exists, otherwise False
:rtype: bool
"""
return tag.lower() in _datatype_lookup
@staticmethod
def by_tag(tag):
"""
Look up data type by tag.
:param tag: A data type tag
:type tag: str
:return: The data type for the given tag
:rtype: DataType
"""
return _datatype_lookup[tag.lower()]
_curvetype_lookup = {}
class CurveType(enum.Enum):
"""
Curve type is not a part of the curve name.
Curve type describes the storage format of the underlying data and which
operations must be used to fetch data for these curves.
* Load time series and scenario-based time series using the
``EnergyQuantified.timeseries.*`` operations.
* To load instances (i.e. forecasts), use the
``EnergyQuantified.timeseries.*`` operations.
* Periods and period-instances can be loaded by using each of
their respective operations located under
``EnergyQuantified.periods.*`` and
``EnergyQuantified.instance_periods.*``.
* OHLC means "open, high, low and close" data. To load data from
these curves, use the OHLC operations.
"""
#: Plain, fixed-interval time series data
TIMESERIES = ("TIMESERIES", False)
#: Plain, fixed-interval scenarios of time series data
SCENARIO_TIMESERIES = ("SCENARIO_TIMESERIES", False)
#: Instances (forecasts)
INSTANCE = ("INSTANCE", True)
#: Period-based data
PERIOD = ("PERIOD", False)
#: Instances of period-based data
INSTANCE_PERIOD = ("INSTANCE_PERIOD", True)
#: Closing prices for market data
OHLC = ("OHLC", False)
def __init__(self, tag=None, has_instances=False):
self.tag = tag
self.has_instances = has_instances
_curvetype_lookup[tag.lower()] = self
def __str__(self):
return self.name
def __repr__(self):
return self.name
@staticmethod
def is_valid_tag(tag):
"""
Check whether a curve type tag exists or not.
:param tag: A curve type tag
:type tag: str
:return: True if it exists, otherwise False
:rtype: bool
"""
return tag.lower() in _curvetype_lookup
@staticmethod
def by_tag(tag):
"""
Look up curve type by tag.
:param tag: A curve type tag
:type tag: str
:return: The curve type for the given tag
:rtype: CurveType
"""
return _curvetype_lookup[tag.lower()]
|
nilq/baby-python
|
python
|
#! python3
import sys, PyQt5
from PyQt5.QtWidgets import QWidget, QLabel, QLineEdit, QApplication
class Example(QWidget):
def __init__(self):
super().__init__()
self.initUI()
def initUI(self):
self.label = QLabel(self)
qle = QLineEdit(self)
qle.move(60, 100)
self.label.move(60, 40)
qle.textChanged[str].connect(self.onChanged)
self.setGeometry(300, 300, 350, 250)
self.setWindowTitle('QLineEdit')
self.show()
def onChanged(self, text):
self.label.setText(text)
self.label.adjustSize()
def main():
app = QApplication(sys.argv)
ex = Example()
sys.exit(app.exec_())
if __name__ == '__main__':
main()
|
nilq/baby-python
|
python
|
# coding=utf-8
# Distributed under the MIT software license, see the accompanying
# file LICENSE or http://www.opensource.org/licenses/mit-license.php.
# FIpurE: This is odd...
import sys
import os
from grpc._cython.cygrpc import StatusCode
from pur.core.purnode import purNode
from pur.generated.purbase_pb2 import GetNodeInfoReq, GetNodeInfoResp
from pur.generated.purbase_pb2_grpc import BaseServicer
class BaseService(BaseServicer):
def __init__(self, purnode: purNode):
self.purnode = purnode
def GetNodeInfo(self, request: GetNodeInfoReq, context) -> GetNodeInfoResp:
try:
resp = GetNodeInfoResp()
resp.version = self.purnode.version
pkgdir = os.path.dirname(sys.modules['pur'].__file__)
grpcprotopath = os.path.join(pkgdir, "protos", "pur.proto")
with open(grpcprotopath, 'r') as infile:
resp.grpcProto = infile.read()
return resp
except Exception as e:
context.set_code(StatusCode.unknown)
context.set_details(e)
return GetNodeInfoResp()
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
import sys
import time
timer = time.clock if sys.platform[:3] == 'win' else time.time
def total(reps, func, *args, **kwargs):
"""Total time to run func() reps times.
Returns (total time, last result)
"""
repslist = list(range(reps))
start = timer()
for i in repslist:
ret = func(*args, **kwargs)
elapsed = timer() - start
return (elapsed, ret)
def bestof(reps, func, *args, **kwargs):
"""Quickest func() among reps runs.
Returns (best time, last result)
"""
best = 2 ** 32
for i in range(reps):
start = timer()
ret = func(*args, **kwargs)
elapsed = timer() - start
if elapsed < best: best = elapsed
return (best, ret)
def bestoftotal(reps1, reps2, func, *args, **kwargs):
"""Best of totals:
(best of reps1 runs of (total of reps2 runs of func))
"""
return bestof(reps1, total, reps2, func, *args, **kwargs)
|
nilq/baby-python
|
python
|
"""Platform to present any Tuya DP as a binary sensor."""
import logging
from functools import partial
import voluptuous as vol
from homeassistant.components.binary_sensor import (
DEVICE_CLASSES_SCHEMA,
DOMAIN,
BinarySensorEntity,
)
from homeassistant.const import CONF_DEVICE_CLASS
from .common import LocalTuyaEntity, async_setup_entry
_LOGGER = logging.getLogger(__name__)
CONF_STATE_ON = "state_on"
CONF_STATE_OFF = "state_off"
def flow_schema(dps):
"""Return schema used in config flow."""
return {
vol.Required(CONF_STATE_ON, default="True"): str,
vol.Required(CONF_STATE_OFF, default="False"): str,
vol.Optional(CONF_DEVICE_CLASS): DEVICE_CLASSES_SCHEMA,
}
class LocaltuyaBinarySensor(LocalTuyaEntity, BinarySensorEntity):
"""Representation of a Tuya binary sensor."""
def __init__(
self,
device,
config_entry,
sensorid,
**kwargs,
):
"""Initialize the Tuya binary sensor."""
super().__init__(device, config_entry, sensorid, _LOGGER, **kwargs)
self._is_on = False
@property
def is_on(self):
"""Return sensor state."""
return self._is_on
@property
def device_class(self):
"""Return the class of this device."""
return self._config.get(CONF_DEVICE_CLASS)
def status_updated(self):
"""Device status was updated."""
state = str(self.dps(self._dp_id)).lower()
if state == self._config[CONF_STATE_ON].lower():
self._is_on = True
elif state == self._config[CONF_STATE_OFF].lower():
self._is_on = False
else:
self.warning(
"State for entity %s did not match state patterns", self.entity_id
)
async_setup_entry = partial(
async_setup_entry, DOMAIN, LocaltuyaBinarySensor, flow_schema
)
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
from utils import mathfont
import fontforge
# Create a WOFF font with glyphs for all the operator strings.
font = mathfont.create("stretchy", "Copyright (c) 2021 Igalia S.L.")
# Set parameters for stretchy tests.
font.math.MinConnectorOverlap = mathfont.em // 2
# Make sure that underover parameters don't add extra spacing.
font.math.LowerLimitBaselineDropMin = 0
font.math.LowerLimitGapMin = 0
font.math.StretchStackBottomShiftDown = 0
font.math.StretchStackGapAboveMin = 0
font.math.UnderbarVerticalGap = 0
font.math.UnderbarExtraDescender = 0
font.math.UpperLimitBaselineRiseMin = 0
font.math.UpperLimitGapMin = 0
font.math.StretchStackTopShiftUp = 0
font.math.StretchStackGapBelowMin = 0
font.math.OverbarVerticalGap = 0
font.math.AccentBaseHeight = 0
font.math.OverbarExtraAscender = 0
# These two characters will be stretchable in both directions.
horizontalArrow = 0x295A # LEFTWARDS HARPOON WITH BARB UP FROM BAR
verticalArrow = 0x295C # UPWARDS HARPOON WITH BARB RIGHT FROM BAR
mathfont.createSizeVariants(font)
# Add stretchy vertical and horizontal constructions for the horizontal arrow.
mathfont.createSquareGlyph(font, horizontalArrow)
mathfont.createStretchy(font, horizontalArrow, True)
mathfont.createStretchy(font, horizontalArrow, False)
# Add stretchy vertical and horizontal constructions for the vertical arrow.
mathfont.createSquareGlyph(font, verticalArrow)
mathfont.createStretchy(font, verticalArrow, True)
mathfont.createStretchy(font, verticalArrow, False)
mathfont.save(font)
|
nilq/baby-python
|
python
|
# Copyright (C) 2018 The python-bitcoin-utils developers
#
# This file is part of python-bitcoin-utils
#
# It is subject to the license terms in the LICENSE file found in the top-level
# directory of this distribution.
#
# No part of python-bitcoin-utils, including this file, may be copied,
# modified, propagated, or distributed except according to the terms contained
# in the LICENSE file.
from bitcoinutils.setup import setup
from bitcoinutils.transactions import Transaction, TxInput, TxOutput, Sequence
from bitcoinutils.keys import P2pkhAddress, P2shAddress, PrivateKey, P2wshAddress, P2wpkhAddress
from bitcoinutils.script import Script
from bitcoinutils.constants import TYPE_RELATIVE_TIMELOCK
def main():
# always remember to setup the network
setup('testnet')
priv1 = PrivateKey("cN1XE3ESGgdvr4fWsB7L3BcqXncUauF8Fo8zzv4Sm6WrkiGrsxrG")
priv2 = PrivateKey("cR8AkcbL2pgBswrHp28AftEznHPPLA86HiTog8MpNCibxwrsUcZ4")
p2sh_redeem_script = Script(
['OP_1', priv1.get_public_key().to_hex(), priv2.get_public_key().to_hex(),'OP_2', 'OP_CHECKMULTISIG'])
fromAddress = P2wshAddress.from_script(p2sh_redeem_script)
toAddress = P2wpkhAddress.from_address("tb1qtstf97nhk2gycz7vl37esddjpxwt3ut30qp5pn")
# set values
txid = '2042195c40a92353f2ffe30cd0df8d177698560e81807e8bf9174a9c0e98e6c2'
vout = 0
amount = 0.01
# create transaction input from tx id of UTXO
txin = TxInput(txid, vout)
txOut1 = TxOutput(0.0001, toAddress.to_script_pub_key())
txOut2 = TxOutput(0.0098, fromAddress.to_script_pub_key())
tx = Transaction([txin], [txOut1, txOut2], has_segwit=True)
sig1 = priv1.sign_segwit_input(tx, 0, p2sh_redeem_script, amount)
tx.witnesses.append(Script(['OP_0', sig1, p2sh_redeem_script.to_hex()]))
# print raw signed transaction ready to be broadcasted
print("\nRaw signed transaction:\n" + tx.serialize())
print("\nTxId:", tx.get_txid())
if __name__ == "__main__":
main()
|
nilq/baby-python
|
python
|
legal_labels = ["west-germany", "usa", "france", "canada", "uk", "japan"]
label_name = "places"
MAX_NUM_WORDS = 10000
MAX_SEQ_LENGTH = 100
EMBEDDING_DIM = 50
|
nilq/baby-python
|
python
|
from django.conf.urls import url
from . import views
from django.contrib.auth.views import LoginView, LogoutView, PasswordResetView, PasswordResetDoneView, PasswordResetConfirmView
app_name = 'account'
urlpatterns = [
url(r'^$', LoginView.as_view(template_name='account/welcome.html'), name='welcome_page'),
url(r'^logout/$', LogoutView.as_view(template_name='account/auth/logout.html'), name='logout'),
url(r'^student/$', views.student_home, name='student_home'),
url(r'^teacher/$', views.teacher_home, name='teacher_home'),
url(r'^student/join_class/$', views.student_join_request, name='student_join_request'),
url(r'^teacher/notifications/$', views.teacher_notifications, name='teacher_notifications'),
url(r'^teacher/notifications/(?P<request_id>[0-9]+)/(?P<handle>[0-9]+)/$', views.teacher_handle_request, name='teacher_handle_request'),
url(r'^teacher/add_mainclass/$', views.add_mainclass, name='add_mainclass'),
url(r'^teacher/edit_mainclass/$', views.edit_mainclass, name='edit_mainclass'),
url(r'^teacher/main_class/$', views.mainclass_home, name='mainclass_home'),
url(r'^teacher/main_class/students/$', views.mainclass_students, name='mainclass_students'),
url(r'^teacher/main_class/kick/(?P<student_id>[0-9]+)/$', views.kick_student, name='kick_student'),
url(r'^teacher/main_class/add_subclass/$', views.add_subclass_request, name='add_subclass_request'),
url(r'^teacher/main_class/(?P<subclass_id>[0-9]+)/edit/$', views.edit_subclass, name='edit_subclass'),
url(r'^student/(?P<subclass_id>[0-9]+)/$', views.student_subclass_home, name='student_subclass_home'),
url(r'^student/lessons/(?P<subclass_id>[0-9]+)/$', views.student_lessons, name='student_lessons'),
url(r'^teacher/lessons/(?P<subclass_id>[0-9]+)/$', views.teacher_lessons, name='teacher_lessons'),
url(r'^teacher/lessons/(?P<subclass_id>[0-9]+)/add_lesson/$', views.add_lesson, name='add_lesson'),
url(r'^teacher/lessons/(?P<subclass_id>[0-9]+)/(?P<lesson_id>[0-9]+)/confirm_delete/$', views.remove_confirm_lesson, name='remove_confirm_lesson'),
url(r'^teacher/lessons/(?P<subclass_id>[0-9]+)/(?P<lesson_id>[0-9]+)/delete/$', views.remove_lesson, name='remove_lesson'),
url(r'^student/grades/(?P<subclass_id>[0-9]+)/$', views.student_grades, name='student_grades'),
url(r'^student/calendar/(?P<subclass_id>[0-9]+)/(?P<week>[0-9]+)/$', views.student_calendar, name='student_calendar'),
url(r'^student/lessons/(?P<subclass_id>[0-9]+)/(?P<lesson_id>[0-9]+)/$', views.student_lesson, name='student_lesson'),
url(r'^teacher/lessons/(?P<subclass_id>[0-9]+)/(?P<lesson_id>[0-9]+)/$', views.teacher_lesson, name='teacher_lesson'),
url(r'^student_register/$', views.student_register, name='student_register'),
url(r'^student/edit/$', views.edit_student_profile, name='edit_student_profile'),
url(r'^teacher_register/$', views.teacher_register, name='teacher_register'),
url(r'^teacher/edit/$', views.edit_teacher_profile, name='edit_teacher_profile'),
url(r'^student/archives/(?P<subclass_id>[0-9]+)/(?P<my_filter>[0-9]+)/$', views.student_archives, name='student_archives'),
]
|
nilq/baby-python
|
python
|
import numpy as np
from astropy.io import fits
import matplotlib.pyplot as plt
source = 'NGC3351'
line = np.array(('CO10','CO21','13CO21','13CO32','C18O21','C18O32'))
num = line.shape[0]
for i in range(num):
fits_map = fits.open('data_image/'+source+'_'+line[i]+'_mom0_broad_nyq.fits')[0].data
fits_err = fits.open('data_image/errors/'+source+'_'+line[i]+'_emom0_broad_nyq.fits')[0].data
fits_map[fits_map < 0] = 0
if i > 3: # 1 sigma cutoff for C18O lines
fits_map[fits_map < fits_err] = 0
fits_err[fits_map < fits_err] = 0
else: # 3 sigma cutoff
fits_map[fits_map < 3 * fits_err] = 0
fits_err[fits_map < 3 * fits_err] = 0
np.save('data_image/'+source+'_'+line[i]+'_mom0.npy',fits_map)
np.save('data_image/errors/'+source+'_'+line[i]+'_emom0_broad_nyq.npy',fits_err)
'''
plt.imshow(fits_map, origin='lower', cmap='hot')
plt.colorbar()
plt.show()
'''
|
nilq/baby-python
|
python
|
import pytest
import grblas as gb
import dask_grblas as dgb
from grblas import dtypes
from pytest import raises
from .utils import compare
def test_new():
s = gb.Scalar.new(int)
ds = dgb.Scalar.new(int)
compare(lambda x: x, s, ds)
s = gb.Scalar.new(float)
ds = dgb.Scalar.new(float)
compare(lambda x: x, s, ds)
o = object()
compare(lambda x, y: type(x).new(y), (s, o), (ds, o), errors=True)
def test_dup():
s = gb.Scalar.from_value(5)
ds = dgb.Scalar.from_value(5)
ds2 = dgb.Scalar.from_value(s)
compare(lambda x: x, s, ds)
compare(lambda x: x, s, ds2)
compare(lambda x: x.dup(), s, ds)
compare(lambda x: x.dup(), s, ds2)
compare(lambda x: x.dup(dtype=dtypes.FP64), s, ds)
compare(lambda x: x.dup(dtype=dtypes.FP64), s, ds2)
o = object()
compare(lambda x, y: x.dup(y), (s, o), (ds, o), errors=True)
# testing compare
with raises(AssertionError):
compare(lambda x: x, s, dgb.Scalar.from_value(6))
with raises(AssertionError):
compare(lambda x: x, s, dgb.Scalar.from_value(5, dtype=dtypes.FP64))
@pytest.mark.slow
def test_isequal_isclose():
values = [
(gb.Scalar.from_value(5), gb.Scalar.from_value(5)),
(gb.Scalar.from_value(5), gb.Scalar.from_value(6)),
(gb.Scalar.from_value(5), gb.Scalar.from_value(5.0)),
(gb.Scalar.from_value(None, dtype=int), gb.Scalar.from_value(5)),
(gb.Scalar.from_value(None, dtype=int), gb.Scalar.from_value(None, dtype=int)),
(gb.Scalar.from_value(None, dtype=int), gb.Scalar.from_value(None, dtype=float)),
]
o = object()
for s, t in values:
for method_name in ['isequal', 'isclose']:
ds = dgb.Scalar.from_value(s)
dt = dgb.Scalar.from_value(t)
compare(
lambda x, y: getattr(x, method_name)(y),
(s, t),
(ds, dt),
)
compare(
lambda x, y: getattr(x, method_name)(y, check_dtype=True),
(s, t),
(ds, dt),
)
compare(lambda x, y: x == y, (s, t), (ds, dt), compute=False)
compare(lambda x: getattr(x, method_name)(o), s, ds, errors=True)
s = gb.Scalar.from_value(5.0)
t = gb.Scalar.from_value(5.000000001)
ds = dgb.Scalar.from_value(s)
dt = dgb.Scalar.from_value(t)
assert s.isclose(t)
compare(lambda x, y: x.isclose(y), (s, t), (ds, dt))
assert not s.isclose(None)
compare(lambda x, y: x.isclose(y), (s, None), (ds, None))
assert not s.isequal(None)
compare(lambda x, y: x.isequal(y), (s, None), (ds, None))
assert not s.isclose(t, rel_tol=1e-10)
compare(lambda x, y: x.isclose(y, rel_tol=1e-10), (s, t), (ds, dt))
assert s.isclose(t, rel_tol=1e-10, abs_tol=1e-8)
compare(lambda x, y: x.isclose(y, rel_tol=1e-10, abs_tol=1e-8), (s, t), (ds, dt))
compare(lambda x, y: x.isequal(y, check_dtype=True), (s, 5), (ds, 5))
compare(lambda x, y: x.isclose(y, check_dtype=True), (s, 5), (ds, 5))
def test_nvals():
s = gb.Scalar.from_value(1)
ds = dgb.Scalar.from_value(s)
compare(lambda x: x.nvals, s, ds)
s = gb.Scalar.from_value(None, dtype=int)
ds = dgb.Scalar.from_value(s)
compare(lambda x: x.nvals, s, ds)
# Test creation with PythonScalar
compare(lambda x: type(x).from_value(x.nvals), s, ds)
def test_value():
s = gb.Scalar.from_value(3)
ds = dgb.Scalar.from_value(s)
compare(lambda x: x.value, s, ds)
def f(x, y):
x.value = y
return x
compare(f, (s, 4), (ds, 4))
s2 = gb.Scalar.from_value(5)
ds2 = dgb.Scalar.from_value(s)
# compare(f, (s, s2), (ds, ds2)) # not yet supported in grblas
compare(f, (s, s2.value), (ds, ds2.value))
compare(f, (s, s.nvals), (ds, ds.nvals))
compare(f, (s, None), (ds, None))
o = object()
compare(f, (s, o), (ds, o), errors=True)
def test_bool():
values = [
gb.Scalar.from_value(0),
gb.Scalar.from_value(10.1),
gb.Scalar.from_value(True),
gb.Scalar.from_value(False),
gb.Scalar.from_value(None, dtype=int),
]
for s in values:
ds = dgb.Scalar.from_value(s)
compare(lambda x: bool(x), s, ds, compute=False)
def test_clear():
s = gb.Scalar.from_value(4)
ds = dgb.Scalar.from_value(s)
def f(x):
x.clear()
return x
compare(f, s, ds)
def test_is_empty():
s = gb.Scalar.from_value(4)
ds = dgb.Scalar.from_value(s)
compare(lambda x: x.is_empty, s, ds)
s.clear()
ds.clear()
compare(lambda x: x.is_empty, s, ds)
s = gb.Scalar.from_value(None, dtype=float)
ds = dgb.Scalar.from_value(s)
compare(lambda x: x.is_empty, s, ds)
def test_update():
def f1(x, y):
x.update(y)
return x
def f2(x, y):
x << y
return x
for f in [f1, f2]:
s = gb.Scalar.from_value(6)
ds = dgb.Scalar.from_value(s)
s2 = gb.Scalar.from_value(7)
ds2 = dgb.Scalar.from_value(s2)
compare(f, (s, s2), (ds, ds2))
compare(f, (s, 1), (ds, 1))
compare(f, (s, None), (ds, None))
v = gb.Vector.from_values([0, 2], [0, 2])
dv = dgb.Vector.from_vector(v)
compare(f, (s, v[0]), (ds, dv[0]))
@pytest.mark.xfail
def test_attrs():
s = gb.Scalar.from_value(3)
ds = dgb.Scalar.from_value(s)
assert set(dir(s)) - set(dir(ds)) == {
'_is_empty', '_assign_element', '_extract_element', '_is_scalar', '_prep_for_assign',
'_prep_for_extract', 'gb_obj', 'show',
}
assert set(dir(ds)) - set(dir(s)) == {
'_delayed', '_meta', '_optional_dup',
'compute', 'from_delayed', 'persist', 'visualize',
}
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
"""This file contains the wifi.log (Mac OS X) parser."""
import logging
import re
import pyparsing
from plaso.events import time_events
from plaso.lib import errors
from plaso.lib import eventdata
from plaso.lib import timelib
from plaso.parsers import manager
from plaso.parsers import text_parser
__author__ = 'Joaquin Moreno Garijo (bastionado@gmail.com)'
class MacWifiLogEvent(time_events.TimestampEvent):
"""Convenience class for a Mac Wifi log line event."""
DATA_TYPE = u'mac:wifilog:line'
def __init__(self, timestamp, agent, function, text, action):
"""Initializes the event object.
Args:
timestamp: the timestamp, contains the number of microseconds from
January 1, 1970 00:00:00 UTC.
agent: TODO
function: TODO
text: The log message
action: A string containing known WiFI actions, eg: connected to
an AP, configured, etc. If the action is not known,
the value is the message of the log (text variable).
"""
super(MacWifiLogEvent, self).__init__(
timestamp, eventdata.EventTimestamp.ADDED_TIME)
self.agent = agent
self.function = function
self.text = text
self.action = action
class MacWifiLogParser(text_parser.PyparsingSingleLineTextParser):
"""Parse text based on wifi.log file."""
NAME = u'macwifi'
DESCRIPTION = u'Parser for Mac OS X wifi.log files.'
_ENCODING = u'utf-8'
# Regular expressions for known actions.
RE_CONNECTED = re.compile(r'Already\sassociated\sto\s(.*)\.\sBailing')
RE_WIFI_PARAMETERS = re.compile(
r'\[ssid=(.*?), bssid=(.*?), security=(.*?), rssi=')
# Define how a log line should look like.
WIFI_LINE = (
text_parser.PyparsingConstants.MONTH.setResultsName(u'day_of_week') +
text_parser.PyparsingConstants.MONTH.setResultsName(u'month') +
text_parser.PyparsingConstants.ONE_OR_TWO_DIGITS.setResultsName(u'day') +
text_parser.PyparsingConstants.TIME_MSEC.setResultsName(u'time') +
pyparsing.Literal(u'<') +
pyparsing.CharsNotIn(u'>').setResultsName(u'agent') +
pyparsing.Literal(u'>') +
pyparsing.CharsNotIn(u':').setResultsName(u'function') +
pyparsing.Literal(u':') +
pyparsing.SkipTo(pyparsing.lineEnd).setResultsName(u'text'))
WIFI_HEADER = (
text_parser.PyparsingConstants.MONTH.setResultsName(u'day_of_week') +
text_parser.PyparsingConstants.MONTH.setResultsName(u'month') +
text_parser.PyparsingConstants.ONE_OR_TWO_DIGITS.setResultsName(u'day') +
text_parser.PyparsingConstants.TIME_MSEC.setResultsName(u'time') +
pyparsing.Literal(u'***Starting Up***'))
# Define the available log line structures.
LINE_STRUCTURES = [
(u'logline', WIFI_LINE),
(u'header', WIFI_HEADER)]
def __init__(self):
"""Initializes a parser object."""
super(MacWifiLogParser, self).__init__()
self._year_use = 0
self._last_month = None
def _GetAction(self, agent, function, text):
"""Parse the well know actions for easy reading.
Args:
agent: The device that generate the entry.
function: The function or action called by the agent.
text: Mac Wifi log text.
Returns:
know_action: A formatted string representing the known (or common) action.
"""
if not agent.startswith(u'airportd'):
return text
# TODO: replace "x in y" checks by startswith if possible.
if u'airportdProcessDLILEvent' in function:
interface = text.split()[0]
return u'Interface {0:s} turn up.'.format(interface)
if u'doAutoJoin' in function:
match = re.match(self.RE_CONNECTED, text)
if match:
ssid = match.group(1)[1:-1]
else:
ssid = u'Unknown'
return u'Wifi connected to SSID {0:s}'.format(ssid)
if u'processSystemPSKAssoc' in function:
wifi_parameters = self.RE_WIFI_PARAMETERS.search(text)
if wifi_parameters:
ssid = wifi_parameters.group(1)
bssid = wifi_parameters.group(2)
security = wifi_parameters.group(3)
if not ssid:
ssid = u'Unknown'
if not bssid:
bssid = u'Unknown'
if not security:
security = u'Unknown'
return (
u'New wifi configured. BSSID: {0:s}, SSID: {1:s}, '
u'Security: {2:s}.').format(bssid, ssid, security)
return text
def _ConvertToTimestamp(self, day, month, year, time):
"""Converts date and time values into a timestamp.
This is a timestamp_string as returned by using
text_parser.PyparsingConstants structures:
08, Nov, [20, 36, 37], 222]
Args:
day: an integer representing the day.
month: an integer representing the month.
year: an integer representing the year.
time: a list containing integers with the number of
hours, minutes and seconds.
Returns:
The timestamp which is an integer containing the number of micro seconds
since January 1, 1970, 00:00:00 UTC.
Raises:
TimestampError: if the timestamp cannot be created from the date and
time values.
"""
time_values, milliseconds = time
hours, minutes, seconds = time_values
microseconds = milliseconds * 1000
return timelib.Timestamp.FromTimeParts(
year, month, day, hours, minutes, seconds, microseconds=microseconds)
def _ParseLogLine(self, parser_mediator, structure):
"""Parse a single log line and produce an event object.
Args:
parser_mediator: A parser mediator object (instance of ParserMediator).
structure: A pyparsing.ParseResults object from a line in the
log file.
"""
if not self._year_use:
self._year_use = parser_mediator.GetEstimatedYear()
# Gap detected between years.
month = timelib.MONTH_DICT.get(structure.month.lower())
if not self._last_month:
self._last_month = month
if month < self._last_month:
self._year_use += 1
try:
timestamp = self._ConvertToTimestamp(
structure.day, month, self._year_use, structure.time)
except errors.TimestampError as exception:
parser_mediator.ProduceParseError(
u'unable to determine timestamp with error: {0:s}'.format(
exception))
return
self._last_month = month
text = structure.text
# Due to the use of CharsNotIn pyparsing structure contains whitespaces
# that need to be removed.
function = structure.function.strip()
action = self._GetAction(structure.agent, function, text)
event_object = MacWifiLogEvent(
timestamp, structure.agent, function, text, action)
parser_mediator.ProduceEvent(event_object)
def ParseRecord(self, parser_mediator, key, structure):
"""Parses a log record structure and produces events.
Args:
parser_mediator: A parser mediator object (instance of ParserMediator).
key: An identification string indicating the name of the parsed
structure.
structure: A pyparsing.ParseResults object from a line in the
log file.
"""
if key == u'logline':
self._ParseLogLine(parser_mediator, structure)
elif key != u'header':
logging.warning(
u'Unable to parse record, unknown structure: {0:s}'.format(key))
def VerifyStructure(self, parser_mediator, line):
"""Verify that this file is a Mac Wifi log file.
Args:
parser_mediator: A parser mediator object (instance of ParserMediator).
line: A single line from the text file.
Returns:
True if this is the correct parser, False otherwise.
"""
try:
_ = self.WIFI_HEADER.parseString(line)
except pyparsing.ParseException:
logging.debug(u'Not a Mac Wifi log file')
return False
return True
manager.ParsersManager.RegisterParser(MacWifiLogParser)
|
nilq/baby-python
|
python
|
# chebyfit/__init__.py
from .chebyfit import __doc__, __all__, __version__
from .chebyfit import *
|
nilq/baby-python
|
python
|
from typing import Generator, Mapping, Union
from flask_babel import lazy_gettext
from app.questionnaire.location import Location
from .context import Context
from .section_summary_context import SectionSummaryContext
class SubmitQuestionnaireContext(Context):
def __call__(
self, answers_are_editable: bool = True
) -> dict[str, Union[str, dict]]:
summary_options = self._schema.get_summary_options()
collapsible = summary_options.get("collapsible", False)
submission_schema: Mapping = self._schema.get_submission() or {}
title = submission_schema.get("title") or lazy_gettext(
"Check your answers and submit"
)
submit_button = submission_schema.get("button") or lazy_gettext(
"Submit answers"
)
guidance = submission_schema.get("guidance") or lazy_gettext(
"Please submit this survey to complete it"
)
warning = submission_schema.get("warning") or None
context = {
"title": title,
"guidance": guidance,
"warning": warning,
"submit_button": submit_button,
}
if summary_options:
context["summary"] = self._get_summary_context(
collapsible, answers_are_editable
)
return context
def _get_summary_context(
self, collapsible: bool, answers_are_editable: bool
) -> dict[str, Union[list, bool, str]]:
groups = list(self._build_all_groups())
return {
"groups": groups,
"answers_are_editable": answers_are_editable,
"collapsible": collapsible,
"summary_type": "Summary",
}
def _build_all_groups(self) -> Generator[dict, None, None]:
""" NB: Does not support repeating sections """
for section_id in self._router.enabled_section_ids:
location = Location(section_id=section_id)
section_summary_context = SectionSummaryContext(
language=self._language,
schema=self._schema,
answer_store=self._answer_store,
list_store=self._list_store,
progress_store=self._progress_store,
metadata=self._metadata,
current_location=location,
return_to="final-summary",
routing_path=self._router.routing_path(section_id),
)
section: Mapping = self._schema.get_section(section_id) or {}
if section.get("summary", {}).get("items"):
break
for group in section_summary_context()["summary"]["groups"]:
yield group
|
nilq/baby-python
|
python
|
def get_answer():
"""something"""
return True
|
nilq/baby-python
|
python
|
# Copyright (c) 2015, Michael Boyle
# See LICENSE file for details: <https://github.com/moble/scri/blob/master/LICENSE>
from __future__ import print_function, division, absolute_import
import pytest
import numpy as np
from numpy import *
import quaternion
import spherical_functions as sf
import scri
from conftest import linear_waveform, constant_waveform, random_waveform, delta_waveform
@pytest.mark.parametrize("w", [linear_waveform, constant_waveform, random_waveform])
def test_identity_rotation(w):
# Rotation by 1 should be identity operation
W_in = w()
W_out = w()
assert W_in.ensure_validity(alter=False)
assert W_out.ensure_validity(alter=False)
W_out.rotate_decomposition_basis(quaternion.one)
assert W_out.ensure_validity(alter=False)
assert np.array_equal(W_out.t, W_in.t)
assert np.array_equal(W_out.frame, W_in.frame)
assert np.array_equal(W_out.data, W_in.data)
assert np.array_equal(W_out.LM, W_in.LM)
assert W_out.ell_min == W_in.ell_min
assert W_out.ell_max == W_in.ell_max
for h_in, h_out in zip(W_in.history, W_out.history[:-1]):
assert (h_in == h_out.replace(type(W_out).__name__ + '_' + str(W_out.num),
type(W_in).__name__ + '_' + str(W_in.num))
or (h_in.startswith('# ') and h_out.startswith('# ')))
assert W_out.frameType == W_in.frameType
assert W_out.dataType == W_in.dataType
assert W_out.r_is_scaled_out == W_in.r_is_scaled_out
assert W_out.m_is_scaled_out == W_in.m_is_scaled_out
assert isinstance(W_out.num, int)
assert W_out.num != W_in.num
@pytest.mark.parametrize("w", [linear_waveform, constant_waveform, random_waveform])
def test_rotation_invariants(w):
# A random rotation should leave everything but data and frame the
# same (except num, of course)
W_in = w()
W_out = w()
np.random.seed(hash('test_rotation_invariants') % 4294967294) # Use mod to get in an acceptable range
W_out.rotate_decomposition_basis(np.quaternion(*np.random.uniform(-1, 1, 4)).normalized())
assert W_in.ensure_validity(alter=False)
assert W_out.ensure_validity(alter=False)
assert np.array_equal(W_out.t, W_in.t)
assert not np.array_equal(W_out.frame, W_in.frame) # This SHOULD change
assert not np.array_equal(W_out.data, W_in.data) # This SHOULD change
assert W_out.ell_min == W_in.ell_min
assert W_out.ell_max == W_in.ell_max
assert np.array_equal(W_out.LM, W_in.LM)
for h_in, h_out in zip(W_in.history[:-3], W_out.history[:-5]):
assert (h_in == h_out.replace(type(W_out).__name__ + '_' + str(W_out.num),
type(W_in).__name__ + '_' + str(W_in.num))
or (h_in.startswith('# ') and h_out.startswith('# ')))
assert W_out.frameType == W_in.frameType
assert W_out.dataType == W_in.dataType
assert W_out.r_is_scaled_out == W_in.r_is_scaled_out
assert W_out.m_is_scaled_out == W_in.m_is_scaled_out
assert W_out.num != W_in.num
@pytest.mark.parametrize("w", [linear_waveform, constant_waveform, random_waveform])
def test_constant_versus_series(w):
# A random rotation should leave everything but data and frame the
# same (except num, of course)
W_const = w()
W_series = w()
np.random.seed(hash('test_constant_versus_series') % 4294967294) # Use mod to get in an acceptable range
W_const.rotate_decomposition_basis(np.quaternion(*np.random.uniform(-1, 1, 4)).normalized())
W_series.rotate_decomposition_basis(
np.array([np.quaternion(*np.random.uniform(-1, 1, 4)).normalized()] * W_series.n_times))
assert W_const.ensure_validity(alter=False)
assert W_series.ensure_validity(alter=False)
assert np.array_equal(W_series.t, W_const.t)
assert not np.array_equal(W_series.frame, W_const.frame) # This SHOULD change
assert not np.array_equal(W_series.data, W_const.data) # This SHOULD change
assert W_series.ell_min == W_const.ell_min
assert W_series.ell_max == W_const.ell_max
assert np.array_equal(W_series.LM, W_const.LM)
for h_const, h_series in zip(W_const.history[:-5], W_series.history[:-11]):
assert (h_const == h_series.replace(type(W_series).__name__ + '_' + str(W_series.num),
type(W_const).__name__ + '_' + str(W_const.num))
or (h_const.startswith('# ') and h_series.startswith('# ')))
assert W_series.frameType == W_const.frameType
assert W_series.dataType == W_const.dataType
assert W_series.r_is_scaled_out == W_const.r_is_scaled_out
assert W_series.m_is_scaled_out == W_const.m_is_scaled_out
assert W_series.num != W_const.num
@pytest.mark.parametrize("w", [linear_waveform, constant_waveform, random_waveform])
def test_rotation_inversion(w):
# Rotation followed by the inverse rotation should leave
# everything the same (except that the frame data will be either a
# 1 or a series of 1s)
np.random.seed(hash('test_rotation_inversion') % 4294967294) # Use mod to get in an acceptable range
W_in = w()
assert W_in.ensure_validity(alter=False)
# We loop over (1) a single constant rotation, and (2) an array of random rotations
for R_basis in [np.quaternion(*np.random.uniform(-1, 1, 4)).normalized(),
np.array([np.quaternion(*np.random.uniform(-1, 1, 4)).normalized()] * W_in.n_times)]:
W_out = w()
W_out.rotate_decomposition_basis(R_basis)
W_out.rotate_decomposition_basis(~R_basis)
assert W_out.ensure_validity(alter=False)
assert np.array_equal(W_out.t, W_in.t)
assert np.max(np.abs(W_out.frame - W_in.frame)) < 1e-15
assert np.allclose(W_out.data, W_in.data, atol=W_in.ell_max ** 4 ** 4e-14, rtol=W_in.ell_max ** 4 * 4e-14)
assert W_out.ell_min == W_in.ell_min
assert W_out.ell_max == W_in.ell_max
assert np.array_equal(W_out.LM, W_in.LM)
for h_in, h_out in zip(W_in.history[:-3], W_out.history[:-5]):
assert (h_in == h_out.replace(type(W_out).__name__ + '_' + str(W_out.num),
type(W_in).__name__ + '_' + str(W_in.num))
or (h_in.startswith('# datetime') and h_out.startswith('# datetime')))
assert W_out.frameType == W_in.frameType
assert W_out.dataType == W_in.dataType
assert W_out.r_is_scaled_out == W_in.r_is_scaled_out
assert W_out.m_is_scaled_out == W_in.m_is_scaled_out
assert W_out.num != W_in.num
def test_rotations_of_0_0_mode(Rs):
# The (ell,m)=(0,0) mode should be rotationally invariant
n_copies = 10
W_in = delta_waveform(0, 0, begin=-10., end=100., n_times=n_copies * len(Rs), ell_min=0, ell_max=8)
assert W_in.ensure_validity(alter=False)
W_out = scri.WaveformModes(W_in)
R_basis = np.array([R for R in Rs for i in range(n_copies)])
W_out.rotate_decomposition_basis(R_basis)
assert W_out.ensure_validity(alter=False)
assert np.array_equal(W_out.t, W_in.t)
assert np.max(np.abs(W_out.frame - R_basis)) == 0.0
assert np.array_equal(W_out.data, W_in.data)
assert W_out.ell_min == W_in.ell_min
assert W_out.ell_max == W_in.ell_max
assert np.array_equal(W_out.LM, W_in.LM)
for h_in, h_out in zip(W_in.history, W_out.history[:-1]):
assert (h_in == h_out.replace(type(W_out).__name__ + '_' + str(W_out.num),
type(W_in).__name__ + '_' + str(W_in.num))
or (h_in.startswith('# ') and h_out.startswith('# ')))
assert W_out.frameType == W_in.frameType
assert W_out.dataType == W_in.dataType
assert W_out.r_is_scaled_out == W_in.r_is_scaled_out
assert W_out.m_is_scaled_out == W_in.m_is_scaled_out
assert W_out.num != W_in.num
def test_rotations_of_each_mode_individually(Rs):
ell_min = 0
ell_max = 8 # sf.ell_max is just too much; this test is too slow, and ell=8 should be fine
R_basis = Rs
Ds = np.empty((len(Rs), sf.LMpM_total_size(ell_min, ell_max)), dtype=complex)
for i, R in enumerate(Rs):
Ds[i, :] = sf.Wigner_D_matrices(R, ell_min, ell_max)
for ell in range(ell_max + 1):
first_zeros = np.zeros((len(Rs), sf.LM_total_size(ell_min, ell - 1)), dtype=complex)
later_zeros = np.zeros((len(Rs), sf.LM_total_size(ell + 1, ell_max)), dtype=complex)
for Mp in range(-ell, ell):
W_in = delta_waveform(ell, Mp, begin=-10., end=100., n_times=len(Rs), ell_min=ell_min, ell_max=ell_max)
# Now, the modes are f^{\ell,m[} = \delta^{\ell,mp}_{L,Mp}
assert W_in.ensure_validity(alter=False)
W_out = scri.WaveformModes(W_in)
W_out.rotate_decomposition_basis(Rs)
assert W_out.ensure_validity(alter=False)
assert np.array_equal(W_out.t, W_in.t)
assert np.max(np.abs(W_out.frame - R_basis)) == 0.0
i_D0 = sf.LMpM_index(ell, Mp, -ell, ell_min)
assert np.array_equal(W_out.data[:, :sf.LM_total_size(ell_min, ell - 1)], first_zeros)
if ell < ell_max:
assert np.array_equal(
W_out.data[:, sf.LM_total_size(ell_min, ell - 1):-sf.LM_total_size(ell + 1, ell_max)],
Ds[:, i_D0:i_D0 + (2 * ell + 1)])
assert np.array_equal(W_out.data[:, -sf.LM_total_size(ell + 1, ell_max):], later_zeros)
else:
assert np.array_equal(W_out.data[:, sf.LM_total_size(ell_min, ell - 1):],
Ds[:, i_D0:i_D0 + (2 * ell + 1)])
assert W_out.ell_min == W_in.ell_min
assert W_out.ell_max == W_in.ell_max
assert np.array_equal(W_out.LM, W_in.LM)
for h_in, h_out in zip(W_in.history, W_out.history[:-1]):
assert h_in == h_out.replace(type(W_out).__name__ + str(W_out.num), type(W_in).__name__ + str(W_in.num))
assert W_out.frameType == W_in.frameType
assert W_out.dataType == W_in.dataType
assert W_out.r_is_scaled_out == W_in.r_is_scaled_out
assert W_out.m_is_scaled_out == W_in.m_is_scaled_out
assert W_out.num != W_in.num
|
nilq/baby-python
|
python
|
from apiaudio.api_request import APIRequest
class Connector(APIRequest):
OBJECT_NAME = "connector"
resource_path = "/connector/"
connection_path = "/connection/"
@classmethod
def retrieve(cls, name):
if not name:
raise Exception("Name must be set")
return cls._get_request(path_param=cls.resource_path + name)
@classmethod
def connection(cls, connection_id):
if not connection_id:
raise Exception("Connection id must be set")
return cls._get_request(path_param=cls.connection_path + connection_id)
|
nilq/baby-python
|
python
|
import unittest
from rime.util import struct
class TestStruct(unittest.TestCase):
def test_dict_attr(self):
self.assertEqual(struct.Struct.items, dict.items)
def test_constructor(self):
s = struct.Struct(test_attr='test_obj')
self.assertEqual(s.test_attr, 'test_obj')
self.assertEqual(s['test_attr'], 'test_obj')
def test_add_attr(self):
s = struct.Struct()
s.test_attr = 'test_obj'
self.assertEqual(s.test_attr, 'test_obj')
def test_add_key(self):
s = struct.Struct()
s['test_attr'] = 'test_obj'
self.assertEqual(s.test_attr, 'test_obj')
self.assertEqual(s['test_attr'], 'test_obj')
def test_attribute_error(self):
s = struct.Struct()
with self.assertRaises(AttributeError):
s.test_attr
|
nilq/baby-python
|
python
|
import pandas as pd
from calendar import isleap
def get_date_range_hours_from_year(year):
"""
creates date range in hours for the year excluding leap day
:param year: year of date range
:type year: int
:return: pd.date_range with 8760 values
:rtype: pandas.data_range
"""
date_range = pd.date_range(start=str(year), end=str(year + 1), freq='H', closed='left')
# Check if leap year and remove extra day
if isleap(year):
date_range = date_range[~((date_range.month == 2) & (date_range.day == 29))]
return date_range
|
nilq/baby-python
|
python
|
from collections import defaultdict
import nltk
import random
import string
import torch
from nltk.corpus import stopwords
from pytorch_pretrained_bert import BertTokenizer, BertForMaskedLM
from tqdm import tqdm
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print('Initialize BERT vocabulary...')
bert_tokenizer = BertTokenizer(vocab_file='data/BERT_model_reddit/vocab.txt')
print('Initialize BERT model...')
bert_model = BertForMaskedLM.from_pretrained('data/BERT_model_reddit').to(device)
bert_model.eval()
''' Printing functions '''
class print_color:
PURPLE = '\033[95m'
CYAN = '\033[96m'
DARKCYAN = '\033[36m'
BLUE = '\033[94m'
GREEN = '\033[92m'
YELLOW = '\033[93m'
RED = '\033[91m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
END = '\033[0m'
def color_print_top_words(top_words, gt_euphemism):
print('[Euphemism Candidates]: ')
gt_euphemism_upper = set([y for x in gt_euphemism for y in x.split()])
for i in top_words[:100]:
if i in gt_euphemism:
print(print_color.BOLD + print_color.PURPLE + i + print_color.END, end=', ')
elif i in gt_euphemism_upper:
print(print_color.UNDERLINE + print_color.PURPLE + i + print_color.END, end=', ')
else:
print(i, end=', ')
print()
''' Evaluation '''
def evaluate_detection(top_words, gt_euphemism):
color_print_top_words(top_words, gt_euphemism)
correct_list = [] # appear in the ground truth
correct_list_upper = [] # not appear in the ground truth but contain in a ground truth phase.
gt_euphemism_upper = set([y for x in gt_euphemism for y in x.split()])
for i, x in enumerate(top_words):
correct_list.append(1 if x in gt_euphemism else 0)
correct_list_upper.append(1 if x in gt_euphemism_upper else 0)
topk_precision_list = []
cummulative_sum = 0
topk_precision_list_upper = []
cummulative_sum_upper = 0
for i in range(0, len(correct_list)):
cummulative_sum += correct_list[i]
topk_precision_list.append(cummulative_sum/(i+1))
cummulative_sum_upper += correct_list_upper[i]
topk_precision_list_upper.append(cummulative_sum_upper/(i+1))
for topk in [10, 20, 30, 40, 50, 60, 80, 100]:
if topk < len(topk_precision_list):
print('Top-{:d} precision is ({:.2f}, {:.2f})'.format(topk, topk_precision_list[topk-1], topk_precision_list_upper[topk-1]))
return 0
''' Main Function '''
def MLM(sgs, input_keywords, thres=1, filter_uninformative=1):
def to_bert_input(tokens, bert_tokenizer):
token_idx = torch.tensor(bert_tokenizer.convert_tokens_to_ids(tokens))
sep_idx = tokens.index('[SEP]')
segment_idx = token_idx * 0
segment_idx[(sep_idx + 1):] = 1
mask = (token_idx != 0)
return token_idx.unsqueeze(0).to(device), segment_idx.unsqueeze(0).to(device), mask.unsqueeze(0).to(device)
def single_MLM(message):
MLM_k = 50
tokens = bert_tokenizer.tokenize(message)
if len(tokens) == 0:
return []
if tokens[0] != CLS:
tokens = [CLS] + tokens
if tokens[-1] != SEP:
tokens.append(SEP)
token_idx, segment_idx, mask = to_bert_input(tokens, bert_tokenizer)
with torch.no_grad():
logits = bert_model(token_idx, segment_idx, mask, masked_lm_labels=None)
logits = logits.squeeze(0)
probs = torch.softmax(logits, dim=-1)
for idx, token in enumerate(tokens):
if token == MASK:
topk_prob, topk_indices = torch.topk(probs[idx, :], MLM_k)
topk_tokens = bert_tokenizer.convert_ids_to_tokens(topk_indices.cpu().numpy())
out = [[topk_tokens[i], float(topk_prob[i])] for i in range(MLM_k)]
return out
PAD, MASK, CLS, SEP = '[PAD]', '[MASK]', '[CLS]', '[SEP]'
MLM_score = defaultdict(float)
temp = sgs if len(sgs) < 10 else tqdm(sgs)
skip_ms_num = 0
good_sgs = []
for sgs_i in temp:
top_words = single_MLM(sgs_i)
seen_input = 0
for input_i in input_keywords:
if input_i in [x[0] for x in top_words[:thres]]:
seen_input += 1
if filter_uninformative == 1 and seen_input < 2:
skip_ms_num += 1
continue
good_sgs.append(sgs_i)
for j in top_words:
if j[0] in string.punctuation:
continue
if j[0] in stopwords.words('english'):
continue
if j[0] in input_keywords:
continue
if j[0] in ['drug', 'drugs']: # exclude these two for the drug dataset.
continue
if j[0][:2] == '##': # the '##' by BERT indicates that is not a word.
continue
MLM_score[j[0]] += j[1]
# print(sgs_i)
# print([x[0] for x in top_words[:20]])
out = sorted(MLM_score, key=lambda x: MLM_score[x], reverse=True)
out_tuple = [[x, MLM_score[x]] for x in out]
if len(sgs) >= 10:
print('The percentage of uninformative masked sentences is {:d}/{:d} = {:.2f}%'.format(skip_ms_num, len(sgs), float(skip_ms_num)/len(sgs)*100))
return out, out_tuple, good_sgs
def euphemism_detection(input_keywords, all_text, ms_limit, filter_uninformative):
print('\n' + '*' * 40 + ' [Euphemism Detection] ' + '*' * 40)
print('[util.py] Input Keyword: ', end='')
print(input_keywords)
print('[util.py] Extracting masked sentences for input keywords...')
masked_sentence = []
for sentence in tqdm(all_text):
temp = nltk.word_tokenize(sentence)
for input_keyword_i in input_keywords:
if input_keyword_i not in temp:
continue
temp_index = temp.index(input_keyword_i)
masked_sentence += [' '.join(temp[: temp_index]) + ' [MASK] ' + ' '.join(temp[temp_index + 1:])]
random.shuffle(masked_sentence)
masked_sentence = masked_sentence[:ms_limit]
print('[util.py] Generating top candidates...')
top_words, _, _ = MLM(masked_sentence, input_keywords, thres=5, filter_uninformative=filter_uninformative)
return top_words
|
nilq/baby-python
|
python
|
from .FeatureSet import FeatureSet
class Version(FeatureSet):
def __init__(self, api, internalIdentifier, identifier, versionString, apiString):
super(Version, self).__init__(api, internalIdentifier)
self.nativeIdentifier = identifier
self.apiString = apiString
self.majorVersion, self.minorVersion = [ int(val) for val in versionString.split(".")[0:2] ]
self.isCore = False
self.isExt = False
self.deprecatedFunctions = []
self.deprecatedConstants = []
self.deprecatedTypes = []
self.removedFunctions = []
self.removedConstants = []
self.removedTypes = []
def __lt__(self, other):
return self.majorVersion < other.majorVersion or (self.majorVersion == other.majorVersion and self.minorVersion < other.minorVersion)
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Device',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('token', models.CharField(unique=True, max_length=127)),
('user', models.ForeignKey(to=settings.AUTH_USER_MODEL, null=True)),
],
),
migrations.CreateModel(
name='Message',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('message_id', models.CharField(max_length=63, null=True)),
('timestamp', models.DateTimeField(auto_now_add=True)),
('body', models.TextField()),
('type', models.TextField(null=True, blank=True)),
('devices', models.ManyToManyField(to='pesteh.Device')),
('user', models.ForeignKey(to=settings.AUTH_USER_MODEL)),
],
),
]
|
nilq/baby-python
|
python
|
#!/usr/bin/python
import dnaseq
import bm_preproc
import kmer_index
human_chromosome = dnaseq.read_genome("chr1.GRCh38.excerpt.fasta")
def approximate_matches(p, t, index):
n = 2
matches = set()
total_hits = 0
for i in range(0, 24, 8):
pi = p[i:i+8]
hits = index.query(pi);
total_hits += len(hits)
for hit in hits:
if hit < i or hit - i + len(p) > len(t):
continue
missmatches = 0
for j in range(0, i):
if p[j] != t[hit - i + j]:
missmatches += 1
if missmatches > n:
break
for j in range(i + len(pi), len(p)):
if p[j] != t[hit - i + j]:
missmatches += 1
if missmatches > n:
break
if missmatches <= n:
matches.add(hit - i)
return sorted(list(matches)), total_hits
def approximate_matches_seq(p, t, index):
n = 2
matches = set()
total_hits = 0
for i in range(0, 3):
pi = p[i:]
hits = index.query(pi);
total_hits += len(hits)
for hit in hits:
if hit < i or hit - i + len(p) > len(t):
continue
missmatches = 0
for j in range(0, i):
if p[j] != t[hit - i + j]:
missmatches += 1
if missmatches > n:
break
for j in range(i + len(pi), len(p)):
if p[j] != t[hit - i + j]:
missmatches += 1
if missmatches > n:
break
if missmatches <= n:
matches.add(hit - i)
return sorted(list(matches)), total_hits
def question_01():
occurrences, comparisons, alignments = \
dnaseq.naive_instrumented(
human_chromosome,
"GGCGCGGTGGCTCACGCCTGTAATCCCAGCACTTTGGGAGGCCGAGG")
print "question_01: %i" % alignments
def question_02():
occurrences, comparisons, alignments = \
dnaseq.naive_instrumented(
human_chromosome,
"GGCGCGGTGGCTCACGCCTGTAATCCCAGCACTTTGGGAGGCCGAGG")
print "question_02: %i" % comparisons
def question_03():
p = "GGCGCGGTGGCTCACGCCTGTAATCCCAGCACTTTGGGAGGCCGAGG"
p_bm = bm_preproc.BoyerMoore(p)
occurrences, comparisons, alignments = \
dnaseq.boyer_moore_instrumented(p, p_bm, human_chromosome)
print "question_03: %i" % alignments
def question_04():
p = "GGCGCGGTGGCTCACGCCTGTAAT"
index = kmer_index.Index(human_chromosome, 8)
matches, hits = approximate_matches(p, human_chromosome, index)
print "question_04: %i" % len(matches)
def question_05():
p = "GGCGCGGTGGCTCACGCCTGTAAT"
index = kmer_index.Index(human_chromosome, 8)
matches, hits = approximate_matches(p, human_chromosome, index)
print "question_05: %i" % hits
def question_06():
p = "GGCGCGGTGGCTCACGCCTGTAAT"
t = human_chromosome
index = kmer_index.SubseqIndex(t, 8, 3)
matches, hits = approximate_matches_seq(p, t, index)
print "question_06: %i" % hits
if __name__ == '__main__':
question_01()
question_02()
question_03()
question_04()
question_05()
question_06()
|
nilq/baby-python
|
python
|
from .baselines import *
from .cocostuff import *
from .potsdam import *
from .duckietown import *
|
nilq/baby-python
|
python
|
import os
import sys
import tempfile
from unittest import mock
from hashlib import sha1
from random import random
from io import StringIO
import argparse
from .base import BaseTest
from .. import cloudssh
class Test(BaseTest):
fake_reservations = [
{
'Groups': [],
'Instances': [
{
'InstanceId': 'i-b929323f777f4c016d',
'PrivateIpAddress': '10.0.0.60',
'PublicIpAddress': '123.456.7.89',
'State': {
'Code': 16,
'Name': 'running'
},
'Tags': [
{
'Key': 'Name',
'Value': 'test_instance'
}
]
},
{
'InstanceId': 'i-2959b4a6e3cdd13a2f',
'PrivateIpAddress': '10.0.0.61',
'PublicIpAddress': '123.456.7.90',
'State': {
'Code': 16,
'Name': 'running'
},
'Tags': [
{
'Key': 'Name',
'Value': 'test_instance_2'
}
]
},
{
'InstanceId': 'i-' + sha1(str(random()).encode('utf-8')).hexdigest()[:18],
'PrivateIpAddress': '10.0.0.62',
'PublicIpAddress': '123.456.7.91',
'State': {
'Code': 80,
'Name': 'stopped'
},
'Tags': [
{
'Key': 'Name',
'Value': 'test_instance_stopped'
}
]
},
{
'InstanceId': 'i-' + sha1(str(random()).encode('utf-8')).hexdigest()[:18],
'PrivateIpAddress': '10.0.0.63',
'PublicIpAddress': '123.456.7.94',
'State': {
'Code': 16,
'Name': 'running'
}
},
{
'InstanceId': 'i-' + sha1(str(random()).encode('utf-8')).hexdigest()[:18],
'PrivateIpAddress': '10.0.0.64',
'PublicIpAddress': '123.456.7.95',
'State': {
'Code': 16,
'Name': 'running'
},
'Tags': [
{
'Key': 'env',
'Value': 'prod'
}
]
}
]
}
]
test_config = """
[MAIN]
region = us-east-1
aws_profile_name = cloud_ssh_unittest
ssh_user = paul
"""
def setUp(self):
# Set unit tests config dir
self.tmp_config_dir = tempfile.TemporaryDirectory()
cloudssh.config_dir = self.tmp_config_dir.name + '/'
# Write default config
with open(cloudssh.config_dir + 'cloudssh.cfg', 'w') as f:
f.write(self.test_config)
# Parse config
cloudssh.parse_user_config()
# Set region
cloudssh.set_region()
def tearDown(self):
# Cleanup temp dir
self.tmp_config_dir.cleanup()
@mock.patch('argparse.ArgumentParser.parse_args',
return_value=argparse.Namespace(region=None, build_index=None, instance='my_server', search=None, info=None))
def test_parse_cli_args(self, mock_args):
args = cloudssh.parse_cli_args()
assert type(args) is dict
assert args['region'] is None # defaulted to None
assert args['build_index'] is False # defaulted to False
assert args['info'] is None # defaulted to None
def test_parse_user_config(self):
# Config file exists
assert isinstance(cloudssh.parse_user_config(), object)
# Config file does not exists
assert cloudssh.parse_user_config(filename='invalid.cfg') is None
def test_get_value_from_user_config(self):
# Get a valid config
assert cloudssh.get_value_from_user_config(
'aws_profile_name') == 'cloud_ssh_unittest'
# We should get None with an invalid config
assert cloudssh.get_value_from_user_config('invalid') is None
# We should get None if we don't have a loaded config
cloudssh.user_config = None
assert cloudssh.get_value_from_user_config('aws_profile_name') is None
def test_set_region(self):
# From config file
assert cloudssh.set_region() == 'us-east-1'
# Region sent from CLI
assert cloudssh.set_region(from_args='us-west-1') == 'us-west-1'
# Invalid region name
self.assertRaises(RuntimeError, cloudssh.set_region, 'us-invalid-1')
@mock.patch.object(cloudssh, 'get_value_from_user_config', return_value=None)
def test_set_region_2(self, mock_args):
# Test default without CLI input or config file
assert cloudssh.set_region() == 'us-east-1'
def test_get_aws_client(self):
client = cloudssh.get_aws_client()
# assert isinstance(client, botocore.client.EC2)
assert isinstance(client, object)
def test_is_instance_id(self):
assert cloudssh.is_instance_id('i-68602df5') is True
assert cloudssh.is_instance_id('i-015baacc848a0brfg') is True
assert cloudssh.is_instance_id('this_is_a_name') is False
def test_aws_lookup(self):
client = cloudssh.get_aws_client()
# Lookup an instance name
response = cloudssh.aws_lookup(
instance='cloudssh_test_instance', client=client)
assert isinstance(response, dict)
assert isinstance(response['Reservations'], list)
# lookup an instance ID
response = cloudssh.aws_lookup(
instance='i-06bb6dbab77bfcf3f', client=client)
assert isinstance(response, dict)
assert isinstance(response['Reservations'], list)
def test_get_instance_infos(self):
assert cloudssh.get_instance_infos(
reservations=self.fake_reservations) == {
'id': 'i-b929323f777f4c016d',
'launch_date': None,
'private_ip': '10.0.0.60',
'public_ip': '123.456.7.89',
'subnet': None,
'tags': [{'Key': 'Name', 'Value': 'test_instance'}],
'type': None,
'vpc': None
}
# No reservations
self.assertRaises(
SystemExit, cloudssh.get_instance_infos, reservations=[])
# Reservations but no public IP
altered = self.fake_reservations
altered[0]['Instances'][0].pop('PublicIpAddress')
self.assertRaises(SystemExit, cloudssh.get_instance_infos,
reservations=altered)
def test_get_ssh_command(self):
assert cloudssh.get_ssh_command(public_ip='123.456.7.89') == [
'ssh', '123.456.7.89']
assert cloudssh.get_ssh_command(
public_ip='123.456.7.89',
user='paul'
) == ['ssh', 'paul@123.456.7.89']
assert cloudssh.get_ssh_command(
public_ip='123.456.7.89',
proxyjump='1.2.3.4'
) == ['ssh', '-J 1.2.3.4', '123.456.7.89']
assert cloudssh.get_ssh_command(
public_ip='123.456.7.89',
flag='-v'
) == ['ssh', 'v', '123.456.7.89']
assert cloudssh.get_ssh_command(
public_ip='123.456.7.89',
user='paul',
proxyjump='1.2.3.4',
flag='-v'
) == ['ssh', '-J 1.2.3.4', 'v', 'paul@123.456.7.89']
def test_resolve_home(self):
assert cloudssh.resolve_home('/tmp/full/path') == '/tmp/full/path'
assert cloudssh.resolve_home(
'~/in_home').startswith(('/home/', '/Users'))
def test_is_dir(self):
assert cloudssh.is_dir('/tmp/nonexistent') is False
assert cloudssh.is_dir('/tmp/') is True
def test_mkdir(self):
test_dir = '/tmp/test_mkdir'
assert cloudssh.mkdir(test_dir) is True
os.rmdir(test_dir)
def test_get_instances_list(self):
assert cloudssh.get_instances_list(
reservations=self.fake_reservations) == [
{
'name': 'test_instance',
'detail': {
'id': 'i-b929323f777f4c016d',
'public_ip': None,
'private_ip': '10.0.0.60',
'type': None,
'vpc': None,
'subnet': None,
'launch_date': None,
'tags': [{'Key': 'Name', 'Value': 'test_instance'}]
}
}, {
'name': 'test_instance_2',
'detail': {
'id': 'i-2959b4a6e3cdd13a2f',
'public_ip': '123.456.7.90',
'private_ip': '10.0.0.61',
'type': None,
'vpc': None,
'subnet': None,
'launch_date': None,
'tags': [{'Key': 'Name', 'Value': 'test_instance_2'}]
}
}
]
# No reservations
self.assertRaises(
SystemExit, cloudssh.get_instances_list, reservations=[])
def test_read_index(self):
filename = 'test_read_file'
cloudssh.write_index(
filename=filename,
content={'a': True}
)
# Read file
assert cloudssh.read_index(filename=filename) == {'a': True}
# Read invalid file
assert cloudssh.read_index(filename='/tmp/nonexistent') == {}
def test_write_index(self):
filename = 'test_write_index'
assert cloudssh.write_index(
filename=filename,
content={}
) is True
@mock.patch.object(cloudssh, 'get_value_from_user_config', return_value='my_profile')
def test_append_to_index(self, mock_args):
cloudssh.region = 'us-east-1'
# With an existing index
assert cloudssh.append_to_index(
existing_index={
'my_profile': {
'us-west-1': ['name_123']
}
},
new=['name_1', 'name_2']
) == {
'my_profile': {
'us-west-1': ['name_123'],
'us-east-1': ['name_1', 'name_2'],
}
}
# Without an existing index
assert cloudssh.append_to_index(
existing_index={},
new=['name_1', 'name_2']
) == {
'my_profile': {
'us-east-1': ['name_1', 'name_2'],
}
}
def test_build_index(self):
filename = 'test_index'
assert cloudssh.build_index(filename=filename) is True
# Build index with config dir creation
with tempfile.TemporaryDirectory() as test_dir:
cloudssh.config_dir = test_dir + '/new_path/'
assert cloudssh.build_index(filename=filename) is True
@mock.patch.object(cloudssh, 'get_instances_list_from_index', return_value=[{'name': 'one_thing', 'detail': {'publicIp': '123.456.789.0'}}, {'name': 'one_other_thing', 'detail': {'publicIp': '123.456.789.1'}}, {'name': 'third_thing', 'detail': {'publicIp': '123.456.789.2'}}])
@mock.patch('src.cloudssh.confirm', return_value=True)
def test_search_one_result(self, mock_args, mock_args_2):
saved_stdout = sys.stdout
try:
out = StringIO()
sys.stdout = out
# Render file content to stdout
cloudssh.search(query='other_thing')
output = out.getvalue().strip()
assert output == '' # Because it was intercepted and never printed
finally:
sys.stdout = saved_stdout
@mock.patch.object(cloudssh, 'get_instances_list_from_index', return_value=[{'name': 'one_thing', 'detail': {'publicIp': '123.456.789.0'}}, {'name': 'one_other_thing', 'detail': {'publicIp': '123.456.789.1'}}, {'name': 'third_thing', 'detail': {'publicIp': '123.456.789.2'}}])
def test_search_multiple_results(self, mock_args):
saved_stdout = sys.stdout
try:
out = StringIO()
sys.stdout = out
# Catch `exit()` and render content to stdout
self.assertRaises(
SystemExit, cloudssh.search, query='thing')
output = out.getvalue().strip()
assert output == 'Results:\n* one_thing\n* one_other_thing\n* third_thing'
finally:
sys.stdout = saved_stdout
def test_search_no_result(self):
saved_stdout = sys.stdout
try:
out = StringIO()
sys.stdout = out
# Catch `exit()` and render content to stdout
self.assertRaises(
SystemExit, cloudssh.search, query='invalid_name')
output = out.getvalue().strip()
assert output == 'No result!'
finally:
sys.stdout = saved_stdout
def test_confirm(self):
with mock.patch('builtins.input', return_value='y'):
self.assertTrue(cloudssh.confirm())
self.assertTrue(cloudssh.confirm(resp=True))
def test_confirm_2(self):
with mock.patch('builtins.input', return_value='n'):
self.assertFalse(cloudssh.confirm())
self.assertFalse(cloudssh.confirm(resp=True))
def test_confirm_3(self):
# Test empty return
with mock.patch('builtins.input', return_value=''):
self.assertTrue(cloudssh.confirm(resp=True))
def test_get_instances_list_from_index(self):
filename = 'test_get_instances_list_from_index'
cloudssh.region = 'us-east-1'
# Write test index
cloudssh.write_index(
filename=filename,
content={
'cloud_ssh_unittest': {
'us-west-1': [{'name': 'name_123'}],
'us-east-1': [{'name': 'name_1'}, {'name': 'name_2'}],
}
}
)
assert cloudssh.get_instances_list_from_index(filename=filename) == [
{'name': 'name_1'}, {'name': 'name_2'}]
@mock.patch.object(cloudssh, 'get_value_from_user_config', return_value='nonexistent_profile')
def test_get_instances_list_from_index_2(self, mock_args):
filename = 'test_get_instances_list_from_index'
assert cloudssh.get_instances_list_from_index(filename=filename) == []
@mock.patch.object(cloudssh, 'get_instances_list_from_index', return_value=[{'name': 'one_thing'}, {'name': 'one_other_thing'}, {'name': 'third_thing'}, {'name': 'with space'}])
@mock.patch('readline.get_line_buffer', return_value='one')
def test_autocomplete(self, mock_args, mock_args_2):
assert cloudssh.autocomplete('on', state=0) == 'one_thing'
assert cloudssh.autocomplete(
'on', state=1) == 'one_other_thing'
assert cloudssh.autocomplete('on', state=2) is None
@mock.patch.object(cloudssh, 'get_instances_list_from_index', return_value=[{'name': 'one_thing'}, {'name': 'one_other_thing'}, {'name': 'third_thing'}, {'name': 'with space'}])
@mock.patch('readline.get_line_buffer', return_value='with ')
def test_autocomplete_2(self, mock_args, mock_args_2):
assert cloudssh.autocomplete('on', state=0) == 'space'
@mock.patch.object(cloudssh, 'get_instances_list_from_index', return_value=[{'name': 'one_thing'}, {'name': 'one_other_thing'}, {'name': 'third_thing'}])
@mock.patch('readline.get_line_buffer', return_value='ONE')
def test_autocomplete_3(self, mock_args, mock_args_2):
assert cloudssh.autocomplete(
'on', state=0, is_case_sensitive=True) is None
@mock.patch.object(cloudssh, 'get_instances_list_from_index', return_value=[{'name': 'one_thing'}, {'name': 'one_other_thing'}, {'name': 'third_thing'}])
@mock.patch('readline.get_line_buffer', return_value='ONE')
def test_autocomplete_4(self, mock_args, mock_args_2):
assert cloudssh.autocomplete('on', state=0) == 'one_thing'
assert cloudssh.autocomplete(
'on', state=1) == 'one_other_thing'
assert cloudssh.autocomplete('on', state=2) is None
@mock.patch.object(cloudssh, 'get_instances_list_from_index', return_value=[{'name': 'one_thing'}, {'name': 'one_other_thing'}, {'name': 'third_thing'}])
@mock.patch('builtins.input', return_value='some_value')
def test_get_input_autocomplete(self, mock_args, mock_args_2):
assert cloudssh.get_input_autocomplete() == 'some_value'
@mock.patch.object(cloudssh, 'get_instances_list_from_index', return_value=[{'name': 'one_thing', 'detail': {'public_ip': '123.456.789.0'}}, {'name': 'one_other_thing', 'detail': {'public_ip': '123.456.789.1'}}, {'name': 'third_thing', 'detail': {'public_ip': '123.456.789.2'}}])
def test_instance_lookup_index(self, mock_args):
assert cloudssh.instance_lookup(
'one_thing') == ('index', {'public_ip': '123.456.789.0'})
@mock.patch.object(cloudssh, 'get_instances_list_from_index', return_value=[{'name': 'one_thing', 'detail': {'public_ip': '123.456.789.0'}}, {'name': 'one_other_thing', 'detail': {'public_ip': '123.456.789.1'}}, {'name': 'third_thing', 'detail': {'public_ip': '123.456.789.2'}}])
def test_instance_lookup_aws(self, mock_args):
assert cloudssh.instance_lookup(
'cloudssh_test_instance') == ('aws', {
'id': 'i-06bb6dbab77bfcf3f',
'public_ip': '52.6.180.201',
'private_ip': '172.31.91.210',
'type': 't2.micro',
'vpc': 'vpc-37911a4d',
'subnet': 'subnet-e4f389ca',
'launch_date': '2019-04-05 19:15:28+00:00',
'tags': [{'Key': 'Name', 'Value': 'cloudssh_test_instance'}]
})
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
from glob import glob
import re
from collections import Counter
import subprocess32 as sp
import string
from itertools import product
from sys import stderr
from time import time
def split_regions_file(boot_contigs_dict, fnames, size):
"""
takes Counter dictionary of bootstrapped contigs
and an iterator over filenames to choose
writes out split regions files with repetitions of contigs
NOT spread over different split regions files
"""
c = 0 # initialise contig count
# get next file name from iterator
fn = fnames.next()
# open new file for writing and get filehandle
out = open("split_rf/" + fn[0] + fn[1], "w")
# iterate over Counter dict of bootstrapped contigs, key=contig name, value=count (rep)
for contig,rep in sorted(boot_contigs_dict.items(), key=lambda x: int(x[0].replace("Contig_", ""))):
c+=rep
if c > size: # write up to 'size' contigs to each split rf file
out.close() # close current rf file
fn = fnames.next() # get next file name from iterator
out = open("split_rf/" + fn[0] + fn[1], "w") # open new rf file for writing
c = rep
for _ in range(rep): # write contig name to rf file as often as it occurs in the bootstrap resample
out.write(contig + "\n")
index = '' # index of bootstrap replicate
for rf in sorted(glob("including_non-overlapping/BOOT_RF/000*")):
start = time()
index = re.findall(r'\d+', rf)[-1]
# reset array for bootstrapped contigs
boot_contigs = []
with open(rf, "r") as boot_rf:
for contig in boot_rf:
boot_contigs.append(contig.rstrip())
# create dictionary of counts of contigs
boot_contigs_dict = Counter(boot_contigs)
# clear directory
sp.call("rm -f split_rf/*", shell=True)
# get filename iterator
fnames = product(string.lowercase, repeat=2)
# split bootstrapped regions file, 400 contigs per file
split_regions_file(boot_contigs_dict, fnames, 400)
# remove previous split SAF files for PAR
cmd = "rm -f including_non-overlapping/SAF/bootstrap/PAR/[a-z]*"
sp.call(cmd, shell=True)
# remove previous split SAF files for ERY
cmd = cmd.replace("PAR", "ERY")
sp.call(cmd, shell=True)
# run SAF calculation in parallel for PAR
cmd = 'ls split_rf/* | parallel -j 24 "angsd -bam PAR.slim.bamfile.list -ref Big_Data_ref.fa \
-anc Big_Data_ref.fa -out including_non-overlapping/SAF/bootstrap/PAR/{/}.unfolded -fold 0 \
-sites all.sites -rf {} -only_proper_pairs 0 -baq 1 -minMapQ 5 -minInd 9 -GL 1 -doSaf 1 -nThreads 1 2>/dev/null"'
sp.call(cmd, shell=True)
# run SAF calculation in parallel for ERY
cmd = cmd.replace("PAR", "ERY")
sp.call(cmd, shell=True)
# concatenate split SAF files for PAR
cmd = "realSFS cat -outnames including_non-overlapping/SAF/bootstrap/PAR/{}.unfolded including_non-overlapping/SAF/bootstrap/PAR/[a-z]*saf.idx 2>/dev/null".format(index)
sp.call(cmd, shell=True)
# concatenate split SAF files for ERY
cmd = cmd.replace("PAR", "ERY")
sp.call(cmd, shell=True)
end = time()
run_time = end - start
print >> stderr, "Finished SAF calculation for bootstrap {0}. It took {1} sec to complete.".format(index, int(run_time))
# remove split SAF files for PAR
cmd = "rm -f including_non-overlapping/SAF/bootstrap/PAR/[a-z]*"
sp.call(cmd, shell=True)
# remove split SAF files for ERY
cmd = cmd.replace("PAR", "ERY")
sp.call(cmd, shell=True)
|
nilq/baby-python
|
python
|
import tempfile
from django.urls import reverse
from PIL import Image
from rest_framework import status
from rest_framework.test import APITestCase
from brouwers.users.tests.factories import UserFactory
from ..factories import AlbumFactory, PhotoFactory
class PhotoViewsetTests(APITestCase):
def setUp(self):
super().setUp()
self.user = UserFactory.create()
self.album = AlbumFactory.create(user=self.user)
self.list_url = reverse("api:photo-list")
def test_upload(self):
"""
Test that API uploads are possible.
"""
data = {"album": self.album.pk}
# anonymous
response = self.client.post(self.list_url, data, format="multipart")
self.assertEqual(
response.data, {"detail": "Authentication credentials were not provided."}
)
# authenticated
self.client.login(username=self.user.username, password="password")
# create an image
image = Image.new("RGB", (192, 108), "green")
tmp_file = tempfile.NamedTemporaryFile(suffix=".jpg")
image.save(tmp_file, format="JPEG")
with open(tmp_file.name, "rb") as image:
data.update(
{
"image": image,
"description": "dummy description",
}
)
response = self.client.post(self.list_url, data, format="multipart")
self.assertEqual(status.HTTP_200_OK, response.status_code)
self.assertEqual(response.data["success"], True)
def test_failing_upload(self):
self.client.login(username=self.user.username, password="password")
response = self.client.post(
self.list_url, {"album": self.album.pk}, format="multipart"
)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertIn("image", response.data) # there must be an error
def test_list_photos(self):
photos = PhotoFactory.create_batch(10, album=self.album)
self.client.login(username=self.user.username, password="password")
response = self.client.get(self.list_url, {"album": self.album.pk})
self.assertEqual(response.data["count"], 10)
for photo, result in zip(photos, response.data["results"]):
self.assertEqual(photo.id, result["id"])
self.assertEqual(set(result["image"].keys()), set(["large", "thumb"]))
def test_detail_next_previous(self):
photos = PhotoFactory.create_batch(5, album=self.album)
next_url = reverse("api:photo-next", kwargs={"pk": photos[2].pk})
previous_url = reverse("api:photo-previous", kwargs={"pk": photos[2].pk})
response = self.client.get(next_url)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.data["id"], photos[3].id)
response = self.client.get(previous_url)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.data["id"], photos[1].id)
def test_unauthenticated_rotate(self):
photo = PhotoFactory.create(
album=self.album, image__width=100, image__height=50
)
detail_url = reverse("api:photo-rotate", kwargs={"pk": photo.pk})
response = self.client.patch(detail_url, data={"direction": "cw"})
self.assertEqual(response.status_code, 403)
def test_rotate(self):
photo = PhotoFactory.create(
album=self.album, image__width=100, image__height=50
)
self.client.login(username=self.user.username, password="password")
detail_url = reverse("api:photo-rotate", kwargs={"pk": photo.pk})
response = self.client.patch(detail_url, data={"direction": "cw"}) # clockwise
self.assertEqual(response.status_code, 200)
self.assertEqual(response.data["width"], 50)
self.assertEqual(response.data["height"], 100)
img = Image.open(photo.image.path)
self.assertEqual(img.size, (50, 100))
response = self.client.patch(
detail_url, data={"direction": "ccw"}
) # counter-clockwise
self.assertEqual(response.status_code, 200)
self.assertEqual(response.data["width"], 100)
self.assertEqual(response.data["height"], 50)
img = Image.open(photo.image.path)
self.assertEqual(img.size, (100, 50))
def test_invalid_rotate(self):
photo = PhotoFactory.create(
album=self.album, image__width=100, image__height=50
)
self.client.login(username=self.user.username, password="password")
detail_url = reverse("api:photo-rotate", kwargs={"pk": photo.pk})
response = self.client.patch(
detail_url, data={"direction": "fl;asjdf"}
) # clockwise
self.assertEqual(response.status_code, 400)
|
nilq/baby-python
|
python
|
"""
MIT License
Copyright (c) 2020 Shahibur Rahaman
"""
import Operations
import time
def main():
print(
"""
Calculator version 2.9.10.20
Copyright (c) Shahibur Rahaman
Licensed under the MIT License.
|> Press (Ctrl + C) to exit the program.
|> Choose your operation:
1. Addition
2. Subtraction
3. Multiplication
4. Division
"""
)
choice = 0
while True:
try:
while True:
try:
choice = int(input("Enter your choice: [1, 2, 3, 4] "))
if choice > 4 or choice < 1:
print("\nPlease enter your choice according to the given operation options only!")
continue
except ValueError:
print("\nPlease enter a numerical value only!")
continue
else:
break
while True:
try:
x = float(input("\nEnter the first number: "))
y = float(input("Enter the second number: "))
except ValueError:
print("\nPlease enter numerical values only!\n")
else:
break
add = Operations.Operation(x, y).addition()
sub = Operations.Operation(x, y).subtraction()
mul = Operations.Operation(x, y).multiplication()
div = Operations.Operation(x, y).division()
c = choice
print("\n--------------------------")
if c == 1:
print(f"{x} + {y} = {add}")
elif c == 2:
print(f"{x} - {y} = {sub}")
elif c == 3:
print(f"{x} X {y} = {mul}")
elif c == 4:
print(f"{x} / {y} = {div}")
print("--------------------------\n")
except KeyboardInterrupt:
print("\nExiting...")
time.sleep(1)
break
if __name__ == "__main__":
main()
|
nilq/baby-python
|
python
|
import sys
import json
from data_grab.run_scraper import Scraper
if(len(sys.argv)<2):
print('Please Give topic name. e.g. "Clock"')
sys.exit()
topic = sys.argv[1]
data_obj = False
j_data = json.loads(open('data_grab/resources/topic_examvida.json').read())
for c in j_data:
if topic == c["topic_name"]:
topic_name = topic
data_obj = c
break
if not data_obj:
print("<<Error>> [ Topic Not Found ] - " + topic)
sys.exit()
print("Topic Found - Please Wait")
scraper = Scraper()
if(len(sys.argv)>2):
if(sys.argv[2]=="-y"):
scraper.run_spiders(data_obj , False)
else:
scraper.run_spiders(data_obj)
else:
scraper.run_spiders(data_obj)
|
nilq/baby-python
|
python
|
from views.main_view import prompt
prompt()
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
A clone of 'pmap' utility on Linux, 'vmmap' on OSX and 'procstat -v' on BSD.
Report memory map of a process.
$ python scripts/pmap.py 32402
pid=32402, name=hg
Address RSS Mode Mapping
0000000000400000 1200K r-xp /usr/bin/python2.7
0000000000838000 4K r--p /usr/bin/python2.7
0000000000839000 304K rw-p /usr/bin/python2.7
00000000008ae000 68K rw-p [anon]
000000000275e000 5396K rw-p [heap]
00002b29bb1e0000 124K r-xp /lib/x86_64-linux-gnu/ld-2.17.so
00002b29bb203000 8K rw-p [anon]
00002b29bb220000 528K rw-p [anon]
00002b29bb2d8000 768K rw-p [anon]
00002b29bb402000 4K r--p /lib/x86_64-linux-gnu/ld-2.17.so
00002b29bb403000 8K rw-p /lib/x86_64-linux-gnu/ld-2.17.so
00002b29bb405000 60K r-xp /lib/x86_64-linux-gnu/libpthread-2.17.so
00002b29bb41d000 0K ---p /lib/x86_64-linux-gnu/libpthread-2.17.so
00007fff94be6000 48K rw-p [stack]
00007fff94dd1000 4K r-xp [vdso]
ffffffffff600000 0K r-xp [vsyscall]
...
"""
import sys
import psutil
def main():
if len(sys.argv) != 2:
sys.exit('usage: pmap <pid>')
p = psutil.Process(int(sys.argv[1]))
print("pid=%s, name=%s" % (p.pid, p.name()))
templ = "%-16s %10s %-7s %s"
print(templ % ("Address", "RSS", "Mode", "Mapping"))
total_rss = 0
for m in p.memory_maps(grouped=False):
total_rss += m.rss
print(templ % (
m.addr.split('-')[0].zfill(16),
str(m.rss / 1024) + 'K',
m.perms,
m.path))
print("-" * 33)
print(templ % ("Total", str(total_rss / 1024) + 'K', '', ''))
if __name__ == '__main__':
main()
|
nilq/baby-python
|
python
|
# coding=utf-8
# Copyright 2021-present, the Recognai S.L. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from datetime import datetime
from typing import Dict, List, Optional, Union
from pydantic import BaseModel, Field
from rubrix.client.models import Text2TextRecord as ClientText2TextRecord
from rubrix.client.sdk.commons.models import (
MACHINE_NAME,
BaseAnnotation,
BaseRecord,
PredictionStatus,
ScoreRange,
TaskStatus,
UpdateDatasetRequest,
)
class Text2TextPrediction(BaseModel):
text: str
score: float = Field(default=1.0, ge=0.0, le=1.0)
class Text2TextAnnotation(BaseAnnotation):
sentences: List[Text2TextPrediction]
class CreationText2TextRecord(BaseRecord[Text2TextAnnotation]):
text: str
@classmethod
def from_client(cls, record: ClientText2TextRecord):
prediction = None
if record.prediction is not None:
prediction = Text2TextAnnotation(
sentences=[
Text2TextPrediction(text=pred[0], score=pred[1])
if isinstance(pred, tuple)
else Text2TextPrediction(text=pred)
for pred in record.prediction
],
agent=record.prediction_agent or MACHINE_NAME,
)
annotation = None
if record.annotation is not None:
annotation = Text2TextAnnotation(
sentences=[Text2TextPrediction(text=record.annotation)],
agent=record.annotation_agent or MACHINE_NAME,
)
return cls(
text=record.text,
prediction=prediction,
annotation=annotation,
status=record.status,
metadata=record.metadata,
id=record.id,
event_timestamp=record.event_timestamp,
)
class Text2TextRecord(CreationText2TextRecord):
last_updated: datetime = None
_predicted: Optional[PredictionStatus] = Field(alias="predicted")
def to_client(self) -> ClientText2TextRecord:
return ClientText2TextRecord(
text=self.text,
prediction=[
(sentence.text, sentence.score)
for sentence in self.prediction.sentences
]
if self.prediction
else None,
prediction_agent=self.prediction.agent if self.prediction else None,
annotation=self.annotation.sentences[0].text if self.annotation else None,
annotation_agent=self.annotation.agent if self.annotation else None,
status=self.status,
metadata=self.metadata or {},
id=self.id,
event_timestamp=self.event_timestamp,
metrics=self.metrics or None,
search_keywords=self.search_keywords or None,
)
class Text2TextBulkData(UpdateDatasetRequest):
records: List[CreationText2TextRecord]
class Text2TextQuery(BaseModel):
ids: Optional[List[Union[str, int]]]
query_text: str = Field(default=None)
advanced_query_dsl: bool = False
annotated_by: List[str] = Field(default_factory=list)
predicted_by: List[str] = Field(default_factory=list)
score: Optional[ScoreRange] = Field(default=None)
status: List[TaskStatus] = Field(default_factory=list)
predicted: Optional[PredictionStatus] = Field(default=None, nullable=True)
metadata: Optional[Dict[str, Union[str, List[str]]]] = None
|
nilq/baby-python
|
python
|
# Copyright (c) 2018, Manfred Moitzi
# License: MIT License
import pytest
import os
import ezdxf
BASEDIR = 'integration_tests' if os.path.exists('integration_tests') else '.'
DATADIR = 'data'
COLDFIRE = r"D:\Source\dxftest\CADKitSamples\kit-dev-coldfire-xilinx_5213.dxf"
@pytest.mark.skipif(not os.path.exists(COLDFIRE), reason='test data not present')
def test_kit_dev_coldfire():
doc = ezdxf.readfile(COLDFIRE)
auditor = doc.audit()
assert len(auditor) == 0
@pytest.fixture(params=['Leica_Disto_S910.dxf'])
def filename(request):
filename = os.path.join(BASEDIR, DATADIR, request.param)
if not os.path.exists(filename):
pytest.skip(f'File {filename} not found.')
return filename
def test_leica_disto_r12(filename):
doc = ezdxf.readfile(filename, legacy_mode=True)
auditor = doc.audit()
assert len(auditor) == 0
|
nilq/baby-python
|
python
|
def run():
my_range = range(0, 7, 2)
print(my_range)
other_range = range(0, 8, 2)
print(other_range)
print(id(my_range))
print(id(other_range))
print(my_range == other_range) # Validate (value equality)
print(my_range is other_range) # Validate (object equality)
# Par
for i in range(0, 101, 2):
print(i)
# None
for i in range(1, 99):
if i % 2 != 0:
print(f'{i} is none')
if __name__ == '__main__':
run()
|
nilq/baby-python
|
python
|
from fastapi import APIRouter, Depends
from typing import List
from src.utils.crud_router import include_generic_collection_document_router
from src.dependencies import current_active_user
from src.services.courses import CourseService, CourseSectionService
dependencies: List[Depends] = [Depends(current_active_user)]
course_service: CourseService = CourseService()
course_router: APIRouter = APIRouter(dependencies=dependencies,
prefix="/api/courses", tags=["Course"])
include_generic_collection_document_router(course_router, course_service)
course_section_service: CourseSectionService = CourseSectionService()
course_section_router: APIRouter = APIRouter(dependencies=dependencies,
prefix="/api/course-section", tags=["CourseSection"])
include_generic_collection_document_router(course_section_router, course_section_service)
|
nilq/baby-python
|
python
|
from typing import Dict, Text, Any, List
import tensorflow_transform as tft
def preprocessing_fn(inputs: Dict[Text, Any], custom_config) -> Dict[Text, Any]:
"""tf.transform's callback function for preprocessing inputs.
Args:
inputs: map from feature keys to raw not-yet-transformed features.
custom_config:
timesteps: The number of timesteps in the look back window
features: Which of the features from the TF.Example to use in the model.
Returns:
Map from string feature key to transformed feature operations.
"""
feature_columns = sorted(custom_config["feature_columns"])
features = {}
for feature in feature_columns:
if feature not in inputs.keys():
raise ValueError(
f"Input is missing required feature {feature}. Input has: {inputs.keys()}"
)
features[f"{feature}"] = tft.scale_to_z_score(inputs[feature])
return features
|
nilq/baby-python
|
python
|
import numpy as np
import tensorflow as tf
from datasets import audio
from infolog import log
from wavenet_vocoder import util
from wavenet_vocoder.util import *
from .gaussian import sample_from_gaussian
from .mixture import sample_from_discretized_mix_logistic
from .modules import (Conv1D1x1, ConvTranspose2D, ConvTranspose1D, DiscretizedMixtureLogisticLoss, Embedding, GaussianMaximumLikelihoodEstimation,
LeakyReluActivation, MaskedCrossEntropyLoss, ReluActivation, ResidualConv1DGLU, WeightNorm)
import pdb
def _expand_global_features(batch_size, time_length, global_features, data_format='BCT'):
"""Expand global conditioning features to all time steps
Args:
batch_size: int
time_length: int
global_features: Tensor of shape [batch_size, channels] or [batch_size, channels, 1]
data_format: string, 'BCT' to get output of shape [batch_size, channels, time_length]
or 'BTC' to get output of shape [batch_size, time_length, channels]
Returns:
None or Tensor of shape [batch_size, channels, time_length] or [batch_size, time_length, channels]
"""
accepted_formats = ['BCT', 'BTC']
if not (data_format in accepted_formats):
raise ValueError('{} is an unknow data format, accepted formats are "BCT" and "BTC"'.format(data_format))
if global_features is None:
return None
#[batch_size, channels] ==> [batch_size, channels, 1]
# g = tf.cond(tf.equal(tf.rank(global_features), 2),
# lambda: tf.expand_dims(global_features, axis=-1),
# lambda: global_features)
g = tf.reshape(global_features, [tf.shape(global_features)[0], tf.shape(global_features)[1], 1])
g_shape = tf.shape(g)
#[batch_size, channels, 1] ==> [batch_size, channels, time_length]
# ones = tf.ones([g_shape[0], g_shape[1], time_length], tf.int32)
# g = g * ones
g = tf.tile(g, [1, 1, time_length])
if data_format == 'BCT':
return g
else:
#[batch_size, channels, time_length] ==> [batch_size, time_length, channels]
return tf.transpose(g, [0, 2, 1])
def receptive_field_size(total_layers, num_cycles, kernel_size, dilation=lambda x: 2**x):
"""Compute receptive field size.
Args:
total_layers; int
num_cycles: int
kernel_size: int
dilation: callable, function used to compute dilation factor.
use "lambda x: 1" to disable dilated convolutions.
Returns:
int: receptive field size in sample.
"""
assert total_layers % num_cycles == 0
layers_per_cycle = total_layers // num_cycles
dilations = [dilation(i % layers_per_cycle) for i in range(total_layers)]
return (kernel_size - 1) * sum(dilations) + 1
def maybe_Normalize_weights(layer, weight_normalization=True, init=False, init_scale=1.):
"""Maybe Wraps layer with Weight Normalization wrapper.
Args;
layer: tf layers instance, the layer candidate for normalization
weight_normalization: Boolean, determines whether to normalize the layer
init: Boolean, determines if the current run is the data dependent initialization run
init_scale: Float, Initialisation scale of the data dependent initialization. Usually 1.
"""
if weight_normalization:
return WeightNorm(layer, init, init_scale)
return layer
class WaveNet():
"""Tacotron-2 Wavenet Vocoder model.
"""
def __init__(self, hparams, init):
#Get hparams
self._hparams = hparams
if self.local_conditioning_enabled():
assert hparams.num_mels == hparams.cin_channels
#Initialize model architecture
assert hparams.layers % hparams.stacks == 0
layers_per_stack = hparams.layers // hparams.stacks
self.scalar_input = is_scalar_input(hparams.input_type)
#first (embedding) convolution
with tf.variable_scope('input_convolution'):
if self.scalar_input:
self.first_conv = Conv1D1x1(hparams.residual_channels,
weight_normalization=hparams.wavenet_weight_normalization,
weight_normalization_init=init,
weight_normalization_init_scale=hparams.wavenet_init_scale,
name='input_convolution')
else:
self.first_conv = Conv1D1x1(hparams.residual_channels,
weight_normalization=hparams.wavenet_weight_normalization,
weight_normalization_init=init,
weight_normalization_init_scale=hparams.wavenet_init_scale,
name='input_convolution')
#Residual Blocks
self.residual_layers = []
for layer in range(hparams.layers):
self.residual_layers.append(ResidualConv1DGLU(
hparams.residual_channels, hparams.gate_channels,
kernel_size=hparams.kernel_size,
skip_out_channels=hparams.skip_out_channels,
use_bias=hparams.use_bias,
dilation_rate=2**(layer % layers_per_stack),
dropout=hparams.wavenet_dropout,
cin_channels=hparams.cin_channels,
gin_channels=hparams.gin_channels,
weight_normalization=hparams.wavenet_weight_normalization,
init=init,
init_scale=hparams.wavenet_init_scale,
name='ResidualConv1DGLU_{}'.format(layer)))
#Final (skip) convolutions
with tf.variable_scope('skip_convolutions'):
self.last_conv_layers = [
ReluActivation(name='final_conv_relu1'),
Conv1D1x1(hparams.skip_out_channels,
weight_normalization=hparams.wavenet_weight_normalization,
weight_normalization_init=init,
weight_normalization_init_scale=hparams.wavenet_init_scale,
name='final_convolution_1'),
ReluActivation(name='final_conv_relu2'),
Conv1D1x1(hparams.out_channels,
weight_normalization=hparams.wavenet_weight_normalization,
weight_normalization_init=init,
weight_normalization_init_scale=hparams.wavenet_init_scale,
name='final_convolution_2'),]
#Global conditionning embedding
if hparams.gin_channels > 0 and hparams.use_speaker_embedding:
assert hparams.n_speakers is not None
self.embed_speakers = Embedding(
hparams.n_speakers, hparams.gin_channels, std=0.1, name='gc_embedding')
else:
self.embed_speakers = None
self.all_convs = [self.first_conv] + self.residual_layers + self.last_conv_layers
#Upsample conv net
if hparams.upsample_conditional_features:
self.upsample_conv = []
for i, s in enumerate(hparams.upsample_scales):
with tf.variable_scope('local_conditioning_upsampling_{}'.format(i+1)):
if hparams.upsample_type == '2D':
convt = ConvTranspose2D(1, (hparams.freq_axis_kernel_size, 2*s),
padding='same', strides=(1, s))
else:
assert hparams.upsample_type == '1D'
convt = ConvTranspose1D(hparams.cin_channels, (2*s, ),
padding='same', strides=(s, ))
self.upsample_conv.append(maybe_Normalize_weights(convt,
hparams.wavenet_weight_normalization, init, hparams.wavenet_init_scale))
if hparams.upsample_activation == 'LeakyRelu':
self.upsample_conv.append(LeakyReluActivation(alpha=hparams.leaky_alpha,
name='upsample_leaky_relu_{}'.format(i+1)))
elif hparams.upsample_activation == 'Relu':
self.upsample_conv.append(ReluActivation(name='upsample_relu_{}'.format(i+1)))
else:
assert hparams.upsample_activation == None
self.all_convs += self.upsample_conv
else:
self.upsample_conv = None
self.receptive_field = receptive_field_size(hparams.layers,
hparams.stacks, hparams.kernel_size)
def set_mode(self, is_training):
for conv in self.all_convs:
try:
conv.set_mode(is_training)
except AttributeError:
pass
def initialize(self, y, c, g, input_lengths, x=None, synthesis_length=None):
'''Initialize wavenet graph for train, eval and test cases.
'''
hparams = self._hparams
self.is_training = x is not None
self.is_evaluating = not self.is_training and y is not None
#Set all convolutions to corresponding mode
self.set_mode(self.is_training)
log('Initializing Wavenet model. Dimensions (? = dynamic shape): ')
log(' Train mode: {}'.format(self.is_training))
log(' Eval mode: {}'.format(self.is_evaluating))
log(' Synthesis mode: {}'.format(not (self.is_training or self.is_evaluating)))
with tf.variable_scope('inference') as scope:
#Training
if self.is_training:
batch_size = tf.shape(x)[0]
#[batch_size, time_length, 1]
self.mask = self.get_mask(input_lengths, maxlen=tf.shape(x)[-1]) #To be used in loss computation
#[batch_size, channels, time_length]
y_hat = self.step(x, c, g, softmax=False) #softmax is automatically computed inside softmax_cross_entropy if needed
if is_mulaw_quantize(hparams.input_type):
#[batch_size, time_length, channels]
self.y_hat_q = tf.transpose(y_hat, [0, 2, 1])
self.y_hat = y_hat
self.y = y
self.input_lengths = input_lengths
#Add mean and scale stats if using Guassian distribution output (there would be too many logistics if using MoL)
if self._hparams.out_channels == 2:
self.means = self.y_hat[:, 0, :]
self.log_scales = self.y_hat[:, 1, :]
else:
self.means = None
#Graph extension for log saving
#[batch_size, time_length]
shape_control = (batch_size, tf.shape(x)[-1], 1)
with tf.control_dependencies([tf.assert_equal(tf.shape(y), shape_control)]):
y_log = tf.squeeze(y, [-1])
if is_mulaw_quantize(hparams.input_type):
self.y = y_log
y_hat_log = tf.cond(tf.equal(tf.rank(y_hat), 4),
lambda: tf.squeeze(y_hat, [-1]),
lambda: y_hat)
y_hat_log = tf.reshape(y_hat_log, [batch_size, hparams.out_channels, -1])
if is_mulaw_quantize(hparams.input_type):
#[batch_size, time_length]
y_hat_log = tf.argmax(tf.nn.softmax(y_hat_log, axis=1), 1)
y_hat_log = util.inv_mulaw_quantize(y_hat_log, hparams.quantize_channels)
y_log = util.inv_mulaw_quantize(y_log, hparams.quantize_channels)
else:
#[batch_size, time_length]
if hparams.out_channels == 2:
y_hat_log = sample_from_gaussian(
y_hat_log, log_scale_min_gauss=hparams.log_scale_min_gauss)
else:
y_hat_log = sample_from_discretized_mix_logistic(
y_hat_log, log_scale_min=hparams.log_scale_min)
if is_mulaw(hparams.input_type):
y_hat_log = util.inv_mulaw(y_hat_log, hparams.quantize_channels)
y_log = util.inv_mulaw(y_log, hparams.quantize_channels)
self.y_hat_log = y_hat_log
self.y_log = y_log
log(' inputs: {}'.format(x.shape))
if self.local_conditioning_enabled():
log(' local_condition: {}'.format(c.shape))
if self.has_speaker_embedding():
log(' global_condition: {}'.format(g.shape))
log(' targets: {}'.format(y_log.shape))
log(' outputs: {}'.format(y_hat_log.shape))
#evaluating
elif self.is_evaluating:
#[time_length, ]
idx = 0
length = input_lengths[idx]
y_target = tf.reshape(y[idx], [-1])[:length]
if c is not None:
c = tf.expand_dims(c[idx, :, :length], axis=0)
with tf.control_dependencies([tf.assert_equal(tf.rank(c), 3)]):
c = tf.identity(c, name='eval_assert_c_rank_op')
if g is not None:
g = tf.expand_dims(g[idx], axis=0)
batch_size = tf.shape(c)[0]
#Start silence frame
if is_mulaw_quantize(hparams.input_type):
initial_value = mulaw_quantize(0, hparams.quantize_channels)
elif is_mulaw(hparams.input_type):
initial_value = mulaw(0.0, hparams.quantize_channels)
else:
initial_value = 0.0
#[channels, ]
if is_mulaw_quantize(hparams.input_type):
initial_input = tf.one_hot(indices=initial_value, depth=hparams.quantize_channels, dtype=tf.float32)
initial_input = tf.tile(tf.reshape(initial_input, [1, 1, hparams.quantize_channels]), [batch_size, 1, 1])
else:
initial_input = tf.ones([batch_size, 1, 1], tf.float32) * initial_value
#Fast eval
y_hat = self.incremental(initial_input, c=c, g=g, time_length=length,
softmax=False, quantize=True, log_scale_min=hparams.log_scale_min, log_scale_min_gauss=hparams.log_scale_min_gauss)
#Save targets and length for eval loss computation
if is_mulaw_quantize(hparams.input_type):
self.y_eval = tf.reshape(y[idx], [1, -1])[:, :length]
else:
self.y_eval = tf.expand_dims(y[idx], axis=0)[:, :length, :]
self.eval_length = length
if is_mulaw_quantize(hparams.input_type):
y_hat = tf.reshape(tf.argmax(y_hat, axis=1), [-1])
y_hat = inv_mulaw_quantize(y_hat, hparams.quantize_channels)
y_target = inv_mulaw_quantize(y_target, hparams.quantize_channels)
elif is_mulaw(hparams.input_type):
y_hat = inv_mulaw(tf.reshape(y_hat, [-1]), hparams.quantize_channels)
y_target = inv_mulaw(y_target, hparams.quantize_channels)
else:
y_hat = tf.reshape(y_hat, [-1])
self.y_hat = y_hat
self.y_target = y_target
if self.local_conditioning_enabled():
log(' local_condition: {}'.format(c.shape))
if self.has_speaker_embedding():
log(' global_condition: {}'.format(g.shape))
log(' targets: {}'.format(y_target.shape))
log(' outputs: {}'.format(y_hat.shape))
#synthesizing
else:
batch_size = tf.shape(c)[0]
if c is None:
assert synthesis_length is not None
else:
#[batch_size, local_condition_time, local_condition_dimension(num_mels)]
message = ('Expected 3 dimension shape [batch_size(1), time_length, {}] for local condition features but found {}'.format(
hparams.cin_channels, c.shape))
with tf.control_dependencies([tf.assert_equal(tf.rank(c), 3, message=message)]):
c = tf.identity(c, name='synthesis_assert_c_rank_op')
Tc = tf.shape(c)[1]
upsample_factor = audio.get_hop_size(self._hparams)
#Overwrite length with respect to local condition features
synthesis_length = Tc * upsample_factor
#[batch_size, local_condition_dimension, local_condition_time]
#time_length will be corrected using the upsample network
c = tf.transpose(c, [0, 2, 1])
if g is not None:
assert g.shape == (batch_size, 1)
#Start silence frame
if is_mulaw_quantize(hparams.input_type):
initial_value = mulaw_quantize(0, hparams.quantize_channels)
elif is_mulaw(hparams.input_type):
initial_value = mulaw(0.0, hparams.quantize_channels)
else:
initial_value = 0.0
if is_mulaw_quantize(hparams.input_type):
assert initial_value >= 0 and initial_value < hparams.quantize_channels
initial_input = tf.one_hot(indices=initial_value, depth=hparams.quantize_channels, dtype=tf.float32)
initial_input = tf.tile(tf.reshape(initial_input, [1, 1, hparams.quantize_channels]), [batch_size, 1, 1])
else:
initial_input = tf.ones([batch_size, 1, 1], tf.float32) * initial_value
y_hat = self.incremental(initial_input, c=c, g=g, time_length=synthesis_length,
softmax=False, quantize=True, log_scale_min=hparams.log_scale_min, log_scale_min_gauss=hparams.log_scale_min_gauss)
if is_mulaw_quantize(hparams.input_type):
y_hat = tf.reshape(tf.argmax(y_hat, axis=1), [batch_size, -1])
y_hat = util.inv_mulaw_quantize(y_hat, hparams.quantize_channels)
elif is_mulaw(hparams.input_type):
y_hat = util.inv_mulaw(tf.reshape(y_hat, [batch_size, -1]), hparams.quantize_channels)
else:
y_hat = tf.reshape(y_hat, [batch_size, -1])
self.y_hat = y_hat
if self.local_conditioning_enabled():
log(' local_condition: {}'.format(c.shape))
if self.has_speaker_embedding():
log(' global_condition: {}'.format(g.shape))
log(' outputs: {}'.format(y_hat.shape))
self.variables = tf.trainable_variables()
n_vars = np.sum([np.prod(v.shape) for v in tf.trainable_variables()])
log(' Receptive Field: ({} samples / {:.1f} ms)'.format(self.receptive_field, self.receptive_field / hparams.sample_rate * 1000.))
#1_000_000 is causing syntax problems for some people?! Python please :)
log(' WaveNet Parameters: {:.3f} Million.'.format(np.sum([np.prod(v.get_shape().as_list()) for v in self.variables]) / 1000000))
self.ema = tf.train.ExponentialMovingAverage(decay=hparams.wavenet_ema_decay)
def add_loss(self):
'''Adds loss computation to the graph. Supposes that initialize function has already been called.
'''
with tf.variable_scope('loss') as scope:
if self.is_training:
if is_mulaw_quantize(self._hparams.input_type):
self.loss = MaskedCrossEntropyLoss(self.y_hat_q[:, :-1, :], self.y[:, 1:], mask=self.mask)
else:
if self._hparams.out_channels == 2:
self.loss = GaussianMaximumLikelihoodEstimation(self.y_hat[:, :, :-1], self.y[:, 1:, :], hparams=self._hparams, mask=self.mask)
else:
self.loss = DiscretizedMixtureLogisticLoss(self.y_hat[:, :, :-1], self.y[:, 1:, :], hparams=self._hparams, mask=self.mask)
elif self.is_evaluating:
if is_mulaw_quantize(self._hparams.input_type):
self.eval_loss = MaskedCrossEntropyLoss(self.y_hat_eval, self.y_eval, lengths=[self.eval_length])
else:
if self._hparams.out_channels == 2:
self.eval_loss = GaussianMaximumLikelihoodEstimation(self.y_hat_eval, self.y_eval, hparams=self._hparams, lengths=[self.eval_length])
else:
self.eval_loss = DiscretizedMixtureLogisticLoss(self.y_hat_eval, self.y_eval, hparams=self._hparams, lengths=[self.eval_length])
def add_optimizer(self, global_step):
'''Adds optimizer to the graph. Supposes that initialize function has already been called.
'''
with tf.variable_scope('optimizer'):
hp = self._hparams
#Create lr schedule
if hp.wavenet_lr_schedule == 'noam':
learning_rate = self._noam_learning_rate_decay(hp.wavenet_learning_rate,
global_step,
warmup_steps=hp.wavenet_warmup)
else:
assert hp.wavenet_lr_schedule == 'exponential'
learning_rate = self._exponential_learning_rate_decay(hp.wavenet_learning_rate,
global_step,
hp.wavenet_decay_rate,
hp.wavenet_decay_steps)
#Adam optimization
self.learning_rate = learning_rate
optimizer = tf.train.AdamOptimizer(learning_rate, hp.wavenet_adam_beta1,
hp.wavenet_adam_beta2, hp.wavenet_adam_epsilon)
gradients, variables = zip(*optimizer.compute_gradients(self.loss))
self.gradients = gradients
#Gradients clipping
if hp.wavenet_clip_gradients:
clipped_gradients, _ = tf.clip_by_global_norm(gradients, 1.)
else:
clipped_gradients = gradients
with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
adam_optimize = optimizer.apply_gradients(zip(clipped_gradients, variables),
global_step=global_step)
#Add exponential moving average
#https://www.tensorflow.org/api_docs/python/tf/train/ExponentialMovingAverage
#Use adam optimization process as a dependency
with tf.control_dependencies([adam_optimize]):
#Create the shadow variables and add ops to maintain moving averages
#Also updates moving averages after each update step
#This is the optimize call instead of traditional adam_optimize one.
assert tuple(self.variables) == variables #Verify all trainable variables are being averaged
self.optimize = self.ema.apply(variables)
def _noam_learning_rate_decay(self, init_lr, global_step, warmup_steps=4000.0):
# Noam scheme from tensor2tensor:
step = tf.cast(global_step + 1, dtype=tf.float32)
return tf.maximum(init_lr * warmup_steps**0.5 * tf.minimum(step * warmup_steps**-1.5, step**-0.5), 1e-4)
def _exponential_learning_rate_decay(self, init_lr, global_step,
decay_rate=0.5,
decay_steps=300000):
#Compute natural exponential decay
lr = tf.train.exponential_decay(init_lr,
global_step,
decay_steps,
decay_rate,
name='wavenet_lr_exponential_decay')
return lr
def get_mask(self, input_lengths, maxlen=None):
expand = not is_mulaw_quantize(self._hparams.input_type)
mask = sequence_mask(input_lengths, max_len=maxlen, expand=expand)
if is_mulaw_quantize(self._hparams.input_type):
return mask[:, 1:]
return mask[:, 1:, :]
#Sanity check functions
def has_speaker_embedding(self):
return self.embed_speakers is not None
def local_conditioning_enabled(self):
return self._hparams.cin_channels > 0
def step(self, x, c=None, g=None, softmax=False):
"""Forward step
Args:
x: Tensor of shape [batch_size, channels, time_length], One-hot encoded audio signal.
c: Tensor of shape [batch_size, cin_channels, time_length], Local conditioning features.
g: Tensor of shape [batch_size, gin_channels, 1] or Ids of shape [batch_size, 1],
Global conditioning features.
Note: set hparams.use_speaker_embedding to False to disable embedding layer and
use extrnal One-hot encoded features.
softmax: Boolean, Whether to apply softmax.
Returns:
a Tensor of shape [batch_size, out_channels, time_length]
"""
#[batch_size, channels, time_length] -> [batch_size, time_length, channels]
batch_size = tf.shape(x)[0]
time_length = tf.shape(x)[-1]
if g is not None:
if self.embed_speakers is not None:
#[batch_size, 1] ==> [batch_size, 1, gin_channels]
g = self.embed_speakers(tf.reshape(g, [batch_size, -1]))
#[batch_size, gin_channels, 1]
with tf.control_dependencies([tf.assert_equal(tf.rank(g), 3)]):
g = tf.transpose(g, [0, 2, 1])
#Expand global conditioning features to all time steps
g_bct = _expand_global_features(batch_size, time_length, g, data_format='BCT')
if c is not None and self.upsample_conv is not None:
if self._hparams.upsample_type == '2D':
#[batch_size, 1, cin_channels, time_length]
expand_dim = 1
else:
assert self._hparams.upsample_type == '1D'
#[batch_size, cin_channels, 1, time_length]
expand_dim = 2
c = tf.expand_dims(c, axis=expand_dim)
for transposed_conv in self.upsample_conv:
c = transposed_conv(c)
#[batch_size, cin_channels, time_length]
c = tf.squeeze(c, [expand_dim])
with tf.control_dependencies([tf.assert_equal(tf.shape(c)[-1], tf.shape(x)[-1])]):
c = tf.identity(c, name='control_c_and_x_shape')
#Feed data to network
x = self.first_conv(x)
skips = None
for conv in self.residual_layers:
x, h = conv(inputs = x, c = c, g = g_bct)
if skips is None:
skips = h
else:
skips = skips + h
x = skips
for conv in self.last_conv_layers:
x = conv(inputs = x)
return tf.nn.softmax(x, axis=1) if softmax else x
def incremental(self, initial_input, c=None, g=None,
time_length=100, test_inputs=None,
softmax=True, quantize=True, log_scale_min=-7.0, log_scale_min_gauss=-7.0):
"""Inceremental forward step
Inputs of shape [batch_size, channels, time_length] are reshaped to [batch_size, time_length, channels]
Input of each time step is of shape [batch_size, 1, channels]
Args:
Initial input: Tensor of shape [batch_size, channels, 1], initial recurrence input.
c: Tensor of shape [batch_size, cin_channels, time_length], Local conditioning features
g: Tensor of shape [batch_size, gin_channels, time_length] or [batch_size, gin_channels, 1]
global conditioning features
T: int, number of timesteps to generate
test_inputs: Tensor, teacher forcing inputs (debug)
softmax: Boolean, whether to apply softmax activation
quantize: Whether to quantize softmax output before feeding to
next time step input
log_scale_min: float, log scale minimum value.
Returns:
Tensor of shape [batch_size, channels, time_length] or [batch_size, channels, 1]
Generated one_hot encoded samples
"""
batch_size = tf.shape(initial_input)[0]
#Note: should reshape to [batch_size, time_length, channels]
#not [batch_size, channels, time_length]
if test_inputs is not None:
if self.scalar_input:
if tf.shape(test_inputs)[1] == 1:
test_inputs = tf.transpose(test_inputs, [0, 2, 1])
else:
if tf.shape(test_inputs)[1] == self._hparams.out_channels:
test_inputs = tf.transpose(test_inputs, [0, 2, 1])
batch_size = tf.shape(test_inputs)[0]
if time_length is None:
time_length = tf.shape(test_inputs)[1]
else:
time_length = tf.maximum(time_length, tf.shape(test_inputs)[1])
#Global conditioning
if g is not None:
if self.embed_speakers is not None:
g = self.embed_speakers(tf.reshape(g, [batch_size, -1]))
#[batch_size, channels, 1]
with tf.control_dependencies([tf.assert_equal(tf.rank(g), 3)]):
g = tf.transpose(g, [0, 2, 1])
self.g_btc = _expand_global_features(batch_size, time_length, g, data_format='BTC')
#Local conditioning
if c is not None and self.upsample_conv is not None:
if self._hparams.upsample_type == '2D':
#[batch_size, 1, cin_channels, time_length]
expand_dim = 1
else:
assert self._hparams.upsample_type == '1D'
#[batch_size, cin_channels, 1, time_length]
expand_dim = 2
c = tf.expand_dims(c, axis=expand_dim)
for upsample_conv in self.upsample_conv:
c = upsample_conv(c)
#[batch_size, channels, time_length]
c = tf.squeeze(c, [expand_dim])
with tf.control_dependencies([tf.assert_equal(tf.shape(c)[-1], time_length)]):
self.c = tf.transpose(c, [0, 2, 1])
#Initialize loop variables
if initial_input.shape[1] == self._hparams.out_channels:
initial_input = tf.transpose(initial_input, [0, 2, 1])
initial_time = tf.constant(0, dtype=tf.int32)
if test_inputs is not None:
initial_input = tf.expand_dims(test_inputs[:, 0, :], axis=1)
initial_outputs_ta = tf.TensorArray(dtype=tf.float32, size=0, dynamic_size=True)
initial_loss_outputs_ta = tf.TensorArray(dtype=tf.float32, size=0, dynamic_size=True)
#Only use convolutions queues for Residual Blocks main convolutions (only ones with kernel size 3 and dilations, all others are 1x1)
initial_queues = [tf.zeros((batch_size, res_conv.layer.kw + (res_conv.layer.kw - 1) * (res_conv.layer.dilation_rate[0] - 1), self._hparams.residual_channels),
name='convolution_queue_{}'.format(i+1)) for i, res_conv in enumerate(self.residual_layers)]
def condition(time, unused_outputs_ta, unused_current_input, unused_loss_outputs_ta, unused_queues):
return tf.less(time, time_length)
def body(time, outputs_ta, current_input, loss_outputs_ta, queues):
#conditioning features for single time step
ct = None if self.c is None else tf.expand_dims(self.c[:, time, :], axis=1)
gt = None if self.g_btc is None else tf.expand_dims(self.g_btc[:, time, :], axis=1)
x = self.first_conv.incremental_step(current_input)
skips = None
new_queues = []
for conv, queue in zip(self.residual_layers, queues):
x, h, new_queue = conv.incremental_step(x, ct, gt, queue=queue)
skips = h if skips is None else (skips + h)
new_queues.append(new_queue)
x = skips
for conv in self.last_conv_layers:
try:
x = conv.incremental_step(x)
except AttributeError: #When calling Relu activation
x = conv(x)
#Save x for eval loss computation
loss_outputs_ta = loss_outputs_ta.write(time, tf.squeeze(x, [1])) #squeeze time_length dimension (=1)
#Generate next input by sampling
if self.scalar_input:
if self._hparams.out_channels == 2:
x = sample_from_gaussian(
tf.reshape(x, [batch_size, -1, 1]),
log_scale_min_gauss=log_scale_min_gauss)
else:
x = sample_from_discretized_mix_logistic(
tf.reshape(x, [batch_size, -1, 1]), log_scale_min=log_scale_min)
next_input = tf.expand_dims(x, axis=-1) #Expand on the channels dimension
else:
x = tf.nn.softmax(tf.reshape(x, [batch_size, -1]), axis=1) if softmax \
else tf.reshape(x, [batch_size, -1])
if quantize:
#[batch_size, 1]
sample = tf.multinomial(x, 1) #Pick a sample using x as probability (one for each batche)
#[batch_size, 1, quantize_channels] (time dimension extended by default)
x = tf.one_hot(sample, depth=self._hparams.quantize_channels)
next_input = x
if len(x.shape) == 3:
x = tf.squeeze(x, [1])
outputs_ta = outputs_ta.write(time, x)
time = tf.Print(time + 1, [time+1, time_length])
#output = x (maybe next input)
if test_inputs is not None:
#override next_input with ground truth
next_input = tf.expand_dims(test_inputs[:, time, :], axis=1)
return (time, outputs_ta, next_input, loss_outputs_ta, new_queues)
res = tf.while_loop(
condition,
body,
loop_vars=[
initial_time, initial_outputs_ta, initial_input, initial_loss_outputs_ta, initial_queues
],
parallel_iterations=32,
swap_memory=self._hparams.wavenet_swap_with_cpu)
outputs_ta = res[1]
#[time_length, batch_size, channels]
outputs = outputs_ta.stack()
#Save eval prediction for eval loss computation
eval_outputs = res[3].stack()
if is_mulaw_quantize(self._hparams.input_type):
self.y_hat_eval = tf.transpose(eval_outputs, [1, 0, 2])
else:
self.y_hat_eval = tf.transpose(eval_outputs, [1, 2, 0])
#[batch_size, channels, time_length]
return tf.transpose(outputs, [1, 2, 0])
def clear_queue(self):
self.first_conv.clear_queue()
for f in self.conv_layers:
f.clear_queue()
for f in self.last_conv_layers:
try:
f.clear_queue()
except AttributeError:
pass
|
nilq/baby-python
|
python
|
"""Testing for vault_backend module."""
import hvac
import pytest
import requests
import config
import vault_backend
def test___get_vault_client(monkeypatch):
# valid test
client = vault_backend.__get_vault_client('salesforce')
assert isinstance(client, hvac.Client)
# test w/ no VAULT_CERT
def mock_vault_cert(*args):
return False
monkeypatch.setattr(config, 'get_vault_ca_cert', mock_vault_cert)
client = vault_backend.__get_vault_client('salesforce')
assert isinstance(client, hvac.Client)
def test___get_vault_client_no_mtls_client_cert(monkeypatch):
def mock_config(*args):
return False
monkeypatch.setattr(config, 'get_vault_mtls_client_cert', mock_config)
client = vault_backend.__get_vault_client('salesforce')
assert client is None
def test___get_vault_client_no_mtls_client_key(monkeypatch):
def mock_config(*args):
return False
monkeypatch.setattr(config, 'get_vault_mtls_client_key', mock_config)
client = vault_backend.__get_vault_client('salesforce')
assert client is None
def test___get_vault_client_no_vault_url(monkeypatch):
def mock_config(*args):
return False
monkeypatch.setattr(config, 'get_vault_url', mock_config)
client = vault_backend.__get_vault_client('salesforce')
assert client is None
def test___get_vault_client_no_vault_ns(monkeypatch):
def mock_config(*args):
return False
monkeypatch.setattr(config, 'get_vault_namespace', mock_config)
client = vault_backend.__get_vault_client('salesforce')
assert client is None
def test_get_dynamic_secret(monkeypatch, get_jwt):
# test w/o connection to vault
dek = vault_backend.get_dynamic_secret('salesforce', 'salesforce',
'latest', get_jwt)
assert dek == b''
# test w/ failing client creation
def mock_client(tenant: str):
return None
monkeypatch.setattr(vault_backend, '__get_vault_client', mock_client)
dek = vault_backend.get_dynamic_secret('salesforce', 'salesforce',
'latest', get_jwt)
def test_get_dynamic_secret_3(monkeypatch, get_jwt):
# test client initialized
def mock_auth_client(*args):
return vault_backend.__get_vault_client('salesforce')
monkeypatch.setattr(vault_backend, '__authenticate_vault_client',
mock_auth_client)
def mock_client_init(*args):
return True
monkeypatch.setattr(hvac.api.SystemBackend, 'is_initialized',
mock_client_init)
# fails, because cannot reach Vault
with pytest.raises(requests.exceptions.ConnectionError):
vault_backend.get_dynamic_secret('salesforce', 'salesforce',
'latest', get_jwt)
def test_get_dynamic_secret_4(monkeypatch, get_jwt):
# test client initialized
def mock_auth_client(*args):
return vault_backend.__get_vault_client('salesforce')
monkeypatch.setattr(vault_backend, '__authenticate_vault_client',
mock_auth_client)
def mock_client_init(*args):
return True
monkeypatch.setattr(hvac.api.SystemBackend, 'is_initialized',
mock_client_init)
# mock client.secrets.transit.read_key()
def mock_readkey(*args, **kwargs):
raise hvac.exceptions.Forbidden
monkeypatch.setattr(hvac.api.secrets_engines.transit.Transit, 'read_key',
mock_readkey)
dek = vault_backend.get_dynamic_secret('salesforce', 'salesforce',
'latest', get_jwt)
assert dek == b''
def test_get_dynamic_secret_5(monkeypatch, get_jwt):
# test client initialized
def mock_auth_client(*args):
return vault_backend.__get_vault_client('salesforce')
monkeypatch.setattr(vault_backend, '__authenticate_vault_client',
mock_auth_client)
def mock_client_init(*args):
return True
monkeypatch.setattr(hvac.api.SystemBackend, 'is_initialized',
mock_client_init)
# mock client.secrets.transit.read_key()
def mock_readkey(*args, **kwargs):
response = {}
return response
monkeypatch.setattr(hvac.api.secrets_engines.transit.Transit, 'read_key',
mock_readkey)
dek = vault_backend.get_dynamic_secret('salesforce', 'salesforce',
'latest', get_jwt)
assert dek == b''
def test_get_dynamic_secret_6(monkeypatch, get_jwt):
# test client initialized
def mock_auth_client(*args):
return vault_backend.__get_vault_client('salesforce')
monkeypatch.setattr(vault_backend, '__authenticate_vault_client',
mock_auth_client)
def mock_client_init(*args):
return True
monkeypatch.setattr(hvac.api.SystemBackend, 'is_initialized',
mock_client_init)
# mock client.secrets.transit.read_key()
def mock_readkey(*args, **kwargs):
response = {'data': {'latest_version': 1}}
return response
monkeypatch.setattr(hvac.api.secrets_engines.transit.Transit, 'read_key',
mock_readkey)
dek = vault_backend.get_dynamic_secret('salesforce', 'salesforce',
'latest', get_jwt)
assert dek == b''
def test_get_dynamic_secret_7(monkeypatch, get_jwt):
# test client initialized
def mock_auth_client(*args):
return vault_backend.__get_vault_client('salesforce')
monkeypatch.setattr(vault_backend, '__authenticate_vault_client',
mock_auth_client)
def mock_client_init(*args):
return True
monkeypatch.setattr(hvac.api.SystemBackend, 'is_initialized',
mock_client_init)
# mock client.secrets.transit.read_key()
def mock_readkey(*args, **kwargs):
response = {'data': {'latest_version': 1}}
return response
monkeypatch.setattr(hvac.api.secrets_engines.transit.Transit, 'read_key',
mock_readkey)
# mock client.secrets.transit.export_key()
def mock_exportkey(*args, **kwargs):
return None
monkeypatch.setattr(hvac.api.secrets_engines.transit.Transit, 'export_key',
mock_exportkey)
dek = vault_backend.get_dynamic_secret('salesforce', 'salesforce',
'latest', get_jwt)
assert dek == b''
def test_get_dynamic_secret_8(monkeypatch, get_jwt):
# test client initialized
def mock_auth_client(*args):
return vault_backend.__get_vault_client('salesforce')
monkeypatch.setattr(vault_backend, '__authenticate_vault_client',
mock_auth_client)
def mock_client_init(*args):
return True
monkeypatch.setattr(hvac.api.SystemBackend, 'is_initialized',
mock_client_init)
# mock client.secrets.transit.read_key()
def mock_readkey(*args, **kwargs):
response = {'data': {'latest_version': 1}}
return response
monkeypatch.setattr(hvac.api.secrets_engines.transit.Transit, 'read_key',
mock_readkey)
magic_dek = 'bWFnaWNfZGVr' # value: magic_dek
# mock client.secrets.transit.export_key()
def mock_exportkey(*args, **kwargs):
response = {'data': {'keys': {'1': magic_dek}}}
return response
monkeypatch.setattr(hvac.api.secrets_engines.transit.Transit, 'export_key',
mock_exportkey)
dek = vault_backend.get_dynamic_secret('salesforce', 'salesforce',
1, get_jwt)
assert dek == b'magic_dek'
def test___get_vault_token(monkeypatch, get_jwt):
# test with valid token
client = vault_backend.__get_vault_client('salesforce')
fake_token = 's.FAKETOKEN'
def mock_devmode(*args):
# if get_config_by_keypath() is called with key DEV_MODE,
# interfere and return true, if called with other keys, ignore
if args[0] == 'DEV_MODE':
return True
if args[0] == [
'TENANT_CFG.salesforce.backend.VAULT.default_role',
'VAULT.default_role'
]:
# return default role
return 'distributey'
monkeypatch.setattr(config, 'get_config_by_keypath', mock_devmode)
def mock_vault_auth_jwt(*args, **kwargs):
# example token: s.f7Ea3C3ojOYE0GRLzmhSGNkE
response = {'auth': {'client_token': fake_token}}
return response
monkeypatch.setattr(
hvac.api.auth_methods.jwt.JWT, 'jwt_login', mock_vault_auth_jwt)
token = vault_backend.__get_vault_token(
client,
'salesforce',
get_jwt,
'jwt')
assert token == fake_token
def test___get_vault_token2(monkeypatch, get_jwt):
# test with invalid response
client = vault_backend.__get_vault_client('salesforce')
fake_token = 's.FAKETOKEN'
def mock_vault_auth_jwt(*args, **kwargs):
# example token: s.f7Ea3C3ojOYE0GRLzmhSGNkE
response = {'auth': {'wrong_key': fake_token}}
return response
monkeypatch.setattr(
hvac.api.auth_methods.jwt.JWT, 'jwt_login', mock_vault_auth_jwt)
token = vault_backend.__get_vault_token(
client,
'salesforce',
get_jwt,
'jwt')
assert token == ''
def test___authenticate_vault_client(monkeypatch, get_jwt):
# test with "valid" token
client = vault_backend.__get_vault_client('salesforce')
def mock_client_is_authenticated(*args, **kwargs):
return True
monkeypatch.setattr(
hvac.v1.Client, 'is_authenticated', mock_client_is_authenticated)
vault_backend.__VAULT_TOKEN_CACHE = {
'c2FsZXNmb3JjZS1qd3Rfa2lkX3NhbGVzZm9yY2Vfc2VydmljZVg=': 's.FAKETOKEN'
}
client = vault_backend.__authenticate_vault_client(
client, 'salesforce', get_jwt)
assert isinstance(client, hvac.v1.Client)
def test___authenticate_vault_client2(monkeypatch, get_jwt):
# test with invalid token
client = vault_backend.__get_vault_client('salesforce')
def mock_client_is_authenticated(*args, **kwargs):
return False
monkeypatch.setattr(
hvac.v1.Client, 'is_authenticated', mock_client_is_authenticated)
client = vault_backend.__authenticate_vault_client(
client, 'salesforce', get_jwt)
assert client is None
|
nilq/baby-python
|
python
|
#====================================================================================
# TOPIC: PYTHON - Modules Usage
#====================================================================================
#
# FILE-NAME : 013_module_usage.py
# DEPENDANT-FILES : These are the files and libraries needed to run this program ;
# module.py and 013_module_usage.py
#
# AUTHOR : learnpython.com / Hemaxi
# (c) 2013
#
# DESC : Python Modules , used to organize code.
#
#====================================================================================
# Use this to import the module named "module"
import module
# Using the module's variables and functions
# print the "MODULE" variables, use"module." -->DOT
print (module.country_1, module.country_2, module.country_3);
# OUTPUT: USA China India
# print the "MODULE" LIST
print (module.list_world_nations);
# OUTPUT: ['USA', 'China', 'India']
# print the "MODULE" TUPLE
print (module.tuple_world_nations);
# OUTPUT: ('USA', 'China', 'India')
# print the "MODULE" DICTIONARY
print (module.dictionary_world_nations);
# OUTPUT: {'Country_1': 'India'}
# calling the function from the module
print (module.module_function_add(1, 3));
# OUTPUT: 4
#====================================================================================
# END OF CODE
#====================================================================================
|
nilq/baby-python
|
python
|
from django.db import models
from django.urls import reverse
from django.utils.translation import ugettext_lazy as _
from colorful.fields import RGBColorField
from mayan.apps.acls.models import AccessControlList
from mayan.apps.databases.model_mixins import ExtraDataModelMixin
from mayan.apps.events.classes import EventManagerMethodAfter, EventManagerSave
from mayan.apps.events.decorators import method_event
from mayan.apps.documents.models import Document
from mayan.apps.documents.permissions import permission_document_view
from .events import (
event_tag_attached, event_tag_created, event_tag_edited, event_tag_removed
)
from .html_widgets import widget_single_tag
class Tag(ExtraDataModelMixin, models.Model):
"""
This model represents a binary property that can be applied to a document.
The tag can have a label and a color.
"""
label = models.CharField(
db_index=True, help_text=_(
'A short text used as the name.'
), max_length=128, unique=True, verbose_name=_('Label')
)
color = RGBColorField(
help_text=_('The RGB color values for this.'),
verbose_name=_('Color')
)
documents = models.ManyToManyField(
related_name='tags', to=Document, verbose_name=_('Documents')
)
class Meta:
ordering = ('label',)
verbose_name = _('Tag')
verbose_name_plural = _('Tags')
def __str__(self):
return self.label
@method_event(
action_object='self',
event=event_tag_attached,
event_manager_class=EventManagerMethodAfter,
)
def attach_to(self, document):
self._event_target = document
self.documents.add(document)
def get_absolute_url(self):
return reverse(
viewname='tags:tag_document_list', kwargs={'tag_id': self.pk}
)
def get_document_count(self, user):
"""
Return the numeric count of documents that have this tag attached.
The count is filtered by access.
"""
return self.get_documents(permission=permission_document_view, user=user).count()
def get_documents(self, user, permission=None):
"""
Return a filtered queryset documents that have this tag attached.
"""
queryset = self.documents.all()
if permission:
queryset = AccessControlList.objects.restrict_queryset(
permission=permission_document_view, queryset=queryset,
user=user
)
return queryset
def get_preview_widget(self):
return widget_single_tag(tag=self)
get_preview_widget.short_description = _('Preview')
@method_event(
action_object='self',
event=event_tag_removed,
event_manager_class=EventManagerMethodAfter,
)
def remove_from(self, document):
self._event_target = document
self.documents.remove(document)
@method_event(
event_manager_class=EventManagerSave,
created={
'event': event_tag_created,
'target': 'self',
},
edited={
'event': event_tag_edited,
'target': 'self',
}
)
def save(self, *args, **kwargs):
return super().save(*args, **kwargs)
class DocumentTag(Tag):
class Meta:
proxy = True
verbose_name = _('Document tag')
verbose_name_plural = _('Document tags')
|
nilq/baby-python
|
python
|
"""Common run function which does the heavy lifting of formatting output"""
import csv
import enum
import itertools
import logging
import typing
from notions.flatten import flatten_item
from notions.models.database import Database
from notions.models.page import Page, PageTitleProperty
from . import yaml
from .config import OutputFormats
LOG = logging.getLogger(__name__)
def text_format_item(
item: typing.Union[Page, Database],
output: typing.TextIO,
text_formatter: typing.Callable[[typing.Any], str],
):
output.write(text_formatter(item))
output.write("\n")
async def text_format_iterable(
iterable: typing.AsyncIterable,
output: typing.TextIO,
text_formatter: typing.Callable[[typing.Any], str],
):
async for item in iterable:
text_format_item(item, output, text_formatter)
def notion_json_format_item(
item: typing.Union[Page, Database],
output: typing.TextIO,
):
output.write(item.json())
output.write("\n")
def json_format_item(
item: typing.Union[Page, Database],
output: typing.TextIO,
):
output.write(flatten_item(item).json())
output.write("\n")
async def json_format_iterable(
iterable: typing.AsyncIterable,
output: typing.TextIO,
formatter=lambda item: flatten_item(item).json(),
):
items = []
async for item in iterable:
items.append(formatter(item))
output.write("[\n")
LOG.info(f"Writing {len(items)} items to {output.name}")
for item in items[0:-1]:
output.write(item)
output.write(",\n")
output.write(items[-1])
output.write("\n]")
async def notion_json_format_iterable(
iterable: typing.AsyncIterable,
output: typing.TextIO,
):
# re-use the json formatter
await json_format_iterable(iterable, output, formatter=lambda item: item.json())
async def jsonl_format_iterable(
iterable: typing.AsyncIterable,
output: typing.TextIO,
):
async for item in iterable:
output.write(flatten_item(item).json())
output.write("\n")
async def notion_jsonl_format_iterable(
iterable: typing.AsyncIterable,
output: typing.TextIO,
):
async for item in iterable:
output.write(item.json())
output.write("\n")
def notion_yaml_format_item(
item: typing.Union[Page, Database],
output: typing.TextIO,
):
yaml.dump(item.dict(), output)
def yaml_format_item(
item: typing.Union[Page, Database],
output: typing.TextIO,
):
yaml.dump(flatten_item(item).dict(), output)
async def notion_yaml_format_iterable(
iterable: typing.AsyncIterable,
output: typing.TextIO,
):
items = []
async for item in iterable:
items.append(item.dict())
yaml.dump(items, output)
async def yaml_format_iterable(
iterable: typing.AsyncIterable,
output: typing.TextIO,
):
items = []
async for item in iterable:
items.append(flatten_item(item).dict())
yaml.dump(items, output)
def default_text_formatter(item: typing.Union[Database, Page]) -> str:
title = "-No title-"
item_type = "unknown"
if isinstance(item, Database):
title_property = item.title
item_type = "database"
else:
item_type = "page"
if "Name" in item.properties and isinstance(
item.properties["Name"], PageTitleProperty
):
title_property = item.properties["Name"].title
titles = [t.plain_text for t in title_property]
if titles:
title = titles[0]
return f"{item_type} : {item.id} : {title} : {list(item.properties)}"
async def csv_format_iterable(
iterable: typing.AsyncIterable,
output: typing.TextIO,
format: str,
guess_headers: bool,
):
writer = csv.writer(output, dialect="excel-tab" if format == "tsv" else "excel")
core_headers = ["type", "id", "title", "created_time", "last_edited_time"]
first_row = True
async for item in iterable:
item = flatten_item(item)
if first_row:
if guess_headers:
# TODO: expand and flatten nested objects to property_nested_name
property_headers = list(item.properties)
headers = core_headers + property_headers
else:
headers = core_headers
writer.writerow(headers)
first_row = False
row = [item.type, item.id, item.title, item.created_time, item.last_edited_time]
if guess_headers:
row += [str(item.properties[header].value) for header in property_headers]
else:
row += [str(prop.value) for prop in item.properties.values()]
writer.writerow(row)
async def csv_format_item(
item: typing.Union[Page, Database],
output: typing.TextIO,
format: str,
guess_headers: bool,
):
async def items():
yield item
await csv_format_iterable(
items(), output, format=format, guess_headers=guess_headers
)
async def run(
iterable: typing.AsyncIterable,
output: typing.TextIO,
output_format: OutputFormats,
text_formatter: typing.Callable[[typing.Any], str] = default_text_formatter,
guess_headers: bool = False,
):
"""Helper for commands which handles formatting output"""
if output_format == OutputFormats.notion_json:
await notion_json_format_iterable(iterable, output)
elif output_format == OutputFormats.notion_jsonl:
await notion_jsonl_format_iterable(iterable, output)
elif output_format == OutputFormats.notion_yaml:
await notion_yaml_format_iterable(iterable, output)
elif output_format == OutputFormats.text:
await text_format_iterable(iterable, output, text_formatter)
elif output_format == OutputFormats.json:
await json_format_iterable(iterable, output)
elif output_format == OutputFormats.jsonl:
await jsonl_format_iterable(iterable, output)
elif output_format == OutputFormats.yaml:
await yaml_format_iterable(iterable, output)
elif output_format == OutputFormats.tsv:
await csv_format_iterable(iterable, output, "tsv", guess_headers=guess_headers)
elif output_format == OutputFormats.csv:
await csv_format_iterable(iterable, output, "csv", guess_headers=guess_headers)
else:
raise NotImplementedError(f"Unknown output format: {output_format=}")
async def run_single_item(
awaitable: typing.Awaitable[typing.Union[Page, Database]],
output: typing.TextIO,
output_format: OutputFormats,
text_formatter: typing.Callable[[typing.Any], str] = default_text_formatter,
guess_headers: bool = False,
):
item = await awaitable
if output_format == OutputFormats.notion_json:
notion_json_format_item(item, output)
elif output_format == OutputFormats.notion_jsonl:
notion_json_format_item(item, output)
elif output_format == OutputFormats.notion_yaml:
notion_yaml_format_item(item, output)
elif output_format == OutputFormats.text:
text_format_item(item, output, text_formatter)
elif output_format == OutputFormats.json:
json_format_item(item, output)
elif output_format == OutputFormats.jsonl:
json_format_item(item, output)
elif output_format == OutputFormats.yaml:
yaml_format_item(item, output)
elif output_format == OutputFormats.tsv:
await csv_format_item(item, output, "tsv", guess_headers=guess_headers)
elif output_format == OutputFormats.csv:
await csv_format_item(item, output, "csv", guess_headers=guess_headers)
else:
raise NotImplementedError(f"Unknown output format: {output_format=}")
|
nilq/baby-python
|
python
|
import numpy as np
import pickle
from natasha import (
Doc,
Segmenter,
NewsEmbedding,
NewsMorphTagger,
MorphVocab
)
from navec import Navec
from telegram.ext import Updater, CommandHandler, MessageHandler, Filters, ConversationHandler
from telegram import Bot as Bot_
from metrics import metric
PATH = 'navec_hudlit_v1_12B_500K_300d_100q.tar' # Name of file for Navec
NAME = 'embeddings'
NAME_POP = 'popularity'
TOKEN = ...
INPUT = 0
# Natasha setup.
segm = Segmenter()
_emb = NewsEmbedding()
morph_tagger = NewsMorphTagger(_emb)
morph_vocab = MorphVocab()
def query_to_noun(query: str) -> list[str]:
doc = Doc(query.lower())
doc.segment(segmenter=segm)
doc.tag_morph(morph_tagger)
res_arr = []
for token in doc.tokens:
if token.pos == 'NOUN':
token.lemmatize(morph_vocab)
res_arr.append(token.lemma)
return res_arr
# Navec setup.
navec = Navec.load(PATH)
# Loading pretrained embedding vocab.
with open(NAME + '.pkl', 'rb') as f:
embed_dict = pickle.load(f)
with open(NAME_POP + '.pkl', 'rb') as f:
pop_dict = pickle.load(f)
def get_tags(request: str) -> str:
nouns = query_to_noun(request)
if not len(nouns):
return f'В запросе \'{request}\' не найдено существительных.'
request_vec = np.zeros(300)
found = False
sum_weights = 0
for noun in nouns:
if noun in navec:
if noun in pop_dict:
request_vec += navec[noun] * pop_dict[noun]
sum_weights += pop_dict[noun]
else:
request_vec += navec[noun]
sum_weights += 1
found = True
if not found:
return f'В запросе \'{request}\' не найдено существительных с реализованными эмбеддингами.'
request_vec /= sum_weights
distances = {
key: (metric(request_vec, vec) / (np.log(pop_dict[key] + 1) + 1) if key in pop_dict else metric(request_vec, vec))
for key, vec in embed_dict.items()}
distances = {k: v for k, v in sorted(distances.items(), key=lambda item: item[1])}
req_keys = list(distances.keys())[1:11]
return f'Потенциальные теги для запроса \'{request}\': {req_keys}'
class Bot:
def __init__(self, token: str = TOKEN):
self.token = token
def start(self) -> None:
self.bot = Bot_(token=self.token)
self.updater = Updater(self.token, use_context=True)
self.dispatcher = self.updater.dispatcher
self.request()
def stop(self) -> None:
self.updater.stop()
def start_msg(self, update, _):
self.user_id = update.message.from_user.id
msg = 'Привет! Введи запрос, содержащий существительное, и я подскажу потенциальные теги ' \
'для твоего запроса.'
update.message.reply_text(msg)
return INPUT
def cancel_msg(self, update, _):
msg = 'Определение тегов остановлено.'
update.message.reply_text(msg)
return ConversationHandler.END
def tags_reply(self, update, _):
msg = get_tags(update.message.text)
update.message.reply_text(msg)
return INPUT
def request(self) -> None:
conv_handler = ConversationHandler(
entry_points=[CommandHandler('start', self.start_msg)],
states={
INPUT: [MessageHandler(Filters.text & ~Filters.command, self.tags_reply)],
},
fallbacks=[CommandHandler('cancel', lambda update, context: ConversationHandler.END)],
)
self.dispatcher.add_handler(conv_handler)
self.updater.start_polling()
if __name__ == '__main__':
bot = Bot()
bot.start()
_ = input()
bot.stop()
|
nilq/baby-python
|
python
|
from django.apps import AppConfig
class CityeventConfig(AppConfig):
name = 'cityEvent'
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import arrow
from app import celery, create_app
from app.models.email_event import EmailEvent
from app.email import send_email
@celery.task
def schedule_send_emails():
now = arrow.utcnow().replace(second=0, microsecond=0)
app = create_app(os.getenv('JUBLIA_CONFIG') or 'default')
with app.app_context():
# find email_events need to be send.
target_emailEvents = find_target_emailEvents(timestamp=now)
for email_event in target_emailEvents:
# send email
send_email.delay(email_event.id)
def find_target_emailEvents(timestamp):
'''
Find email_events need to be send.
**timestamp==now & is_send=False
'''
target_emailEvents = EmailEvent.query.filter_by(timestamp=timestamp, is_send=False).all()
return target_emailEvents
|
nilq/baby-python
|
python
|
# Command Line Interface
import argparse as ap
import datetime as dt
import inflationtools.main as main
from argparse import RawTextHelpFormatter # Allows to use newline in help text
import locale
import gettext # Unable to get pot for this file... find the reason.
pt = gettext.translation('CLI', localedir='locales', languages=['pt_BR'])
sys_locale = locale.getdefaultlocale()
if 'BR' in sys_locale[0]:
pt.install()
_ = pt.gettext
locale.setlocale(locale.LC_NUMERIC,
sys_locale[0][0:2])# Sets locales to system default for numbers
locale.setlocale(locale.LC_MONETARY, 'pt') # Sets locales to Brazil, for money
# Prepares indexes list.
indexes = {}
indexes.update(main.bcb_urls)
indexes.update(main.quandl_urls)
indexes = list(indexes.keys())
indexes.sort()
indexes = '\n'.join(indexes)
# Date parser
def parse_dates(date_string):
assert type(date_string) is str, f'date_string is a {type(date_string)}'
date_string = '01-' + date_string
new_date = dt.datetime.strptime(date_string, '%d-%m-%Y') # Quandl uses '2009-09-30' date style
return new_date
def CLI():
"""
Implements the argument parser to inflationtools.
:return:
"""
parser = ap.ArgumentParser(formatter_class=RawTextHelpFormatter)
parser.add_argument('index', metavar=_('index'),
help=_('The inflation index that you want to look. Available: \n') + indexes)
parser.add_argument('start_date', metavar=_('start_date'),
help=_("Starting date, using '01-2001' format."))
parser.add_argument('end_date', metavar=_('end_date'),
help=_("Ending date, using '01-2001' format."))
parser.add_argument('-a', '--amount', metavar=_('amount'),
help=_('Amount you want to update.'))
arguments = parser.parse_args()
arguments.start_date, arguments.end_date = parse_dates(arguments.start_date), parse_dates(arguments.end_date)
inflation = main.get_cumulative_inflation(arguments.index, arguments.start_date, arguments.end_date)
if arguments.amount:
money = arguments.amount
if money[0:2] == 'R$':
money = money[2:]
money = locale.atof(money)
money *= inflation
print(locale.currency(money)) # Prints in BRL
else:
print(locale.str(inflation))
if __name__ == '__main__':
CLI()
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2016-12-28 15:33
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('book', '0008_book_type'),
]
operations = [
migrations.AddField(
model_name='book',
name='folder',
field=models.CharField(blank=True, max_length=1000, null=True),
),
]
|
nilq/baby-python
|
python
|
#!/usr/bin/python3
def islower(c):
chrcode = ord(c)
if chrcode >= 97 and chrcode <= 122:
return True
else:
return False
|
nilq/baby-python
|
python
|
import json
import os
import time
import pandas as pd
from bing import bing_web_search
def crawl_snippets(title, retry=3):
_, raw_resp = bing_web_search(title)
response = json.loads(raw_resp)
for _ in range(retry):
try:
pages = response['webPages']['value']
return '\n'.join([title] + list(map(lambda page: page['snippet'], pages)))
except KeyError:
time.sleep(1.5)
print('retry fail response: {}'.format(response))
continue
print('fail to crawl {}'.format(title))
def retrieve_data(titles, prefix):
for i, title in enumerate(titles, 1):
filename = '{}/{}.txt'.format(prefix, i)
if not os.path.exists(filename):
result = crawl_snippets(title)
if result is None:
print('fail to crawl index: {}, title: {}, skip it'.format(i, title))
continue
file = open(filename, 'w+', encoding='utf-8')
file.write(result)
file.close()
time.sleep(0.01)
for mode in ['train', 'test']:
path = '../../input/{}_v2.csv'.format(mode)
df = pd.read_csv(path)
retrieve_data(df['title'].values, '../../snippets/{}'.format(mode))
|
nilq/baby-python
|
python
|
import base64
from unittest.mock import ANY
import pytest
from rhub.auth.keycloak import KeycloakClient
from rhub.api import DEFAULT_PAGE_LIMIT
API_BASE = '/v0'
def test_token_create(client, keycloak_mock):
keycloak_mock.login.return_value = {'access_token': 'foobar'}
rv = client.post(
f'{API_BASE}/auth/token/create',
headers={
'Authorization': 'Basic ' + base64.b64encode(b'user:pass').decode(),
}
)
keycloak_mock.login.assert_called_with('user', 'pass')
assert rv.status_code == 200
assert rv.json == {'access_token': 'foobar'}
def test_me(client, keycloak_mock):
keycloak_mock.user_get.return_value = {
'id': '00000000-0000-0000-0000-000000000000',
'username': 'user',
}
rv = client.get(
f'{API_BASE}/me',
headers={'Authorization': 'Bearer foobar'},
)
assert rv.status_code == 200
assert rv.json == {
'id': '00000000-0000-0000-0000-000000000000',
'username': 'user',
'_href': ANY,
}
def test_list_users(client, keycloak_mock):
keycloak_mock.user_list.return_value = [{
'id': '00000000-0000-0000-0000-000000000000',
'username': 'user',
}]
rv = client.get(
f'{API_BASE}/auth/user',
headers={'Authorization': 'Bearer foobar'},
)
keycloak_mock.user_list.assert_called_with({'first': 0, 'max': DEFAULT_PAGE_LIMIT})
assert rv.status_code == 200
assert rv.json == [{
'id': '00000000-0000-0000-0000-000000000000',
'username': 'user',
'_href': ANY,
}]
def test_create_user(client, keycloak_mock):
user_id = '00000000-0000-0000-0000-000000000000'
user_data = {'username': 'user', 'email': 'user@example.com'}
keycloak_mock.user_create.return_value = user_id
keycloak_mock.user_get.return_value = user_data | {'id': user_id}
rv = client.post(
f'{API_BASE}/auth/user',
headers={'Authorization': 'Bearer foobar'},
json=user_data,
)
keycloak_mock.user_create.assert_called_with(user_data)
keycloak_mock.user_get.assert_called_with(user_id)
assert rv.status_code == 200
assert rv.json == user_data | {'id': user_id, '_href': ANY}
def test_get_user(client, keycloak_mock):
user_id = '00000000-0000-0000-0000-000000000000'
user_data = {'username': 'user', 'email': 'user@example.com'}
keycloak_mock.user_get.return_value = user_data | {'id': user_id}
rv = client.get(
f'{API_BASE}/auth/user/{user_id}',
headers={'Authorization': 'Bearer foobar'},
)
keycloak_mock.user_get.assert_called_with(user_id)
assert rv.status_code == 200
assert rv.json == user_data | {'id': user_id, '_href': ANY}
def test_update_user(client, keycloak_mock):
user_id = '00000000-0000-0000-0000-000000000000'
user_data = {'username': 'user', 'email': 'new-user@example.com'}
keycloak_mock.user_update.return_value = user_id
keycloak_mock.user_get.return_value = user_data | {'id': user_id}
rv = client.patch(
f'{API_BASE}/auth/user/{user_id}',
headers={'Authorization': 'Bearer foobar'},
json=user_data,
)
keycloak_mock.user_update.assert_called_with(user_id, user_data)
keycloak_mock.user_get.assert_called_with(user_id)
assert rv.status_code == 200
assert rv.json == user_data | {'id': user_id, '_href': ANY}
def test_delete_user(client, keycloak_mock):
user_id = '00000000-0000-0000-0000-000000000000'
keycloak_mock.user_delete.return_value = None
rv = client.delete(
f'{API_BASE}/auth/user/{user_id}',
headers={'Authorization': 'Bearer foobar'},
)
keycloak_mock.user_delete.assert_called_with(user_id)
assert rv.status_code == 200
assert rv.json == {}
def test_list_user_groups(client, keycloak_mock):
user_id = '00000000-0000-0000-0000-000000000000'
keycloak_mock.user_group_list.return_value = [{'id': user_id, 'name': 'admin'}]
rv = client.get(
f'{API_BASE}/auth/user/{user_id}/groups',
headers={'Authorization': 'Bearer foobar'},
)
keycloak_mock.user_group_list.assert_called_with(user_id)
assert rv.status_code == 200
assert rv.json == [{'id': user_id, 'name': 'admin', '_href': ANY}]
def test_add_user_group(client, keycloak_mock):
user_id = '00000000-0000-0000-0000-000000000000'
group_id = '00000000-0004-0003-0002-000000000001'
keycloak_mock.group_user_add.return_value = None
rv = client.post(
f'{API_BASE}/auth/user/{user_id}/groups',
headers={'Authorization': 'Bearer foobar'},
json={'id': group_id},
)
keycloak_mock.group_user_add.assert_called_with(user_id, group_id)
assert rv.status_code == 200
assert rv.json == {}
def test_delete_user_group(client, keycloak_mock):
user_id = '00000000-0000-0000-0000-000000000000'
group_id = '00000000-0004-0003-0002-000000000001'
keycloak_mock.group_user_remove.return_value = None
rv = client.delete(
f'{API_BASE}/auth/user/{user_id}/groups',
headers={'Authorization': 'Bearer foobar'},
json={'id': group_id},
)
keycloak_mock.group_user_remove.assert_called_with(user_id, group_id)
assert rv.status_code == 200
assert rv.json == {}
def test_list_groups(client, keycloak_mock):
keycloak_mock.group_list.return_value = [{
'id': '00000000-0000-0000-0000-000000000000',
'name': 'admin',
}]
rv = client.get(
f'{API_BASE}/auth/group',
headers={'Authorization': 'Bearer foobar'},
)
assert rv.status_code == 200
assert rv.json == [{
'id': '00000000-0000-0000-0000-000000000000',
'name': 'admin',
'_href': ANY,
}]
def test_create_group(client, keycloak_mock):
group_id = '00000000-0004-0003-0002-000000000001'
group_data = {'name': 'admin'}
keycloak_mock.group_create.return_value = group_id
keycloak_mock.group_get.return_value = group_data | {'id': group_id}
rv = client.post(
f'{API_BASE}/auth/group',
headers={'Authorization': 'Bearer foobar'},
json=group_data,
)
keycloak_mock.group_create.assert_called_with(group_data)
keycloak_mock.group_get.assert_called_with(group_id)
assert rv.status_code == 200
assert rv.json == group_data | {'id': group_id, '_href': ANY}
def test_get_group(client, keycloak_mock):
group_id = '00000000-0004-0003-0002-000000000001'
group_data = {'name': 'admin'}
keycloak_mock.group_get.return_value = group_data | {'id': group_id}
rv = client.get(
f'{API_BASE}/auth/group/{group_id}',
headers={'Authorization': 'Bearer foobar'},
)
keycloak_mock.group_get.assert_called_with(group_id)
assert rv.status_code == 200
assert rv.json == group_data | {'id': group_id, '_href': ANY}
def test_update_group(client, keycloak_mock):
group_id = '00000000-0004-0003-0002-000000000001'
group_data = {'name': 'new-admin'}
keycloak_mock.group_update.return_value = group_id
keycloak_mock.group_get.return_value = group_data | {'id': group_id}
rv = client.patch(
f'{API_BASE}/auth/group/{group_id}',
headers={'Authorization': 'Bearer foobar'},
json=group_data,
)
keycloak_mock.group_update.assert_called_with(group_id, group_data)
keycloak_mock.group_get.assert_called_with(group_id)
assert rv.status_code == 200
assert rv.json == group_data | {'id': group_id, '_href': ANY}
def test_delete_group(client, keycloak_mock):
group_id = '00000000-0004-0003-0002-000000000001'
keycloak_mock.group_delete.return_value = group_id
rv = client.delete(
f'{API_BASE}/auth/group/{group_id}',
headers={'Authorization': 'Bearer foobar'},
)
keycloak_mock.group_delete.assert_called_with(group_id)
assert rv.status_code == 200
assert rv.json == {}
def test_list_group_users(client, keycloak_mock):
group_id = '00000000-0004-0003-0002-000000000001'
user_data = {
'id': '00000000-0000-0000-0000-000000000000',
'username': 'user',
}
keycloak_mock.group_user_list.return_value = [user_data]
rv = client.get(
f'{API_BASE}/auth/group/{group_id}/users',
headers={'Authorization': 'Bearer foobar'},
)
keycloak_mock.group_user_list.assert_called_with(group_id)
assert rv.status_code == 200
assert rv.json == [user_data | {'_href': ANY}]
def test_list_roles(client, keycloak_mock):
keycloak_mock.role_list.return_value = [{
'id': '00000000-000d-000c-000b-00000000000a',
'name': 'admin',
}]
rv = client.get(
f'{API_BASE}/auth/role',
headers={'Authorization': 'Bearer foobar'},
)
assert rv.status_code == 200
assert rv.json == [{
'id': '00000000-000d-000c-000b-00000000000a',
'name': 'admin',
'_href': ANY,
}]
def test_create_role(client, keycloak_mock):
role_id = '00000000-000d-000c-000b-00000000000a'
role_data = {'name': 'admin'}
keycloak_mock.role_create.return_value = role_id
keycloak_mock.role_get.return_value = role_data | {'id': role_id}
rv = client.post(
f'{API_BASE}/auth/role',
headers={'Authorization': 'Bearer foobar'},
json=role_data,
)
keycloak_mock.role_create.assert_called_with(role_data)
keycloak_mock.role_get.assert_called_with(role_id)
assert rv.status_code == 200
assert rv.json == role_data | {'id': role_id, '_href': ANY}
def test_get_role(client, keycloak_mock):
role_id = '00000000-000d-000c-000b-00000000000a'
role_data = {'name': 'admin'}
keycloak_mock.role_get.return_value = role_data | {'id': role_id}
rv = client.get(
f'{API_BASE}/auth/role/{role_id}',
headers={'Authorization': 'Bearer foobar'},
)
keycloak_mock.role_get.assert_called_with(role_id)
assert rv.status_code == 200
assert rv.json == role_data | {'id': role_id, '_href': ANY}
def test_update_role(client, keycloak_mock):
role_id = '00000000-000d-000c-000b-00000000000a'
role_data = {'name': 'new-admin'}
keycloak_mock.role_update.return_value = role_id
keycloak_mock.role_get.return_value = role_data | {'id': role_id}
rv = client.patch(
f'{API_BASE}/auth/role/{role_id}',
headers={'Authorization': 'Bearer foobar'},
json=role_data,
)
keycloak_mock.role_update.assert_called_with(role_id, role_data)
keycloak_mock.role_get.assert_called_with(role_data['name'])
assert rv.status_code == 200
assert rv.json == role_data | {'id': role_id, '_href': ANY}
def test_delete_role(client, keycloak_mock):
role_id = '00000000-000d-000c-000b-00000000000a'
keycloak_mock.role_delete.return_value = role_id
rv = client.delete(
f'{API_BASE}/auth/role/{role_id}',
headers={'Authorization': 'Bearer foobar'},
)
keycloak_mock.role_delete.assert_called_with(role_id)
assert rv.status_code == 200
assert rv.json == {}
|
nilq/baby-python
|
python
|
from corehq.apps.commtrack.const import COMMTRACK_USERNAME
from corehq.apps.users.util import DEMO_USER_ID, SYSTEM_USER_ID
from corehq.pillows.utils import (
COMMCARE_SUPPLY_USER_TYPE,
DEMO_USER_TYPE,
MOBILE_USER_TYPE,
SYSTEM_USER_TYPE,
WEB_USER_TYPE,
)
from corehq.warehouse.loaders import (
ApplicationDimLoader,
ApplicationStagingLoader,
DomainMembershipDimLoader,
GroupDimLoader,
GroupStagingLoader,
LocationDimLoader,
LocationStagingLoader,
UserDimLoader,
UserGroupDimLoader,
UserStagingLoader,
)
from corehq.warehouse.models import (
ApplicationDim,
Batch,
DomainMembershipDim,
GroupDim,
LocationDim,
LocationStagingTable,
UserDim,
UserGroupDim,
)
from corehq.warehouse.tests.utils import (
BaseWarehouseTestCase,
create_application_staging_record,
create_batch,
create_group_staging_record,
create_location_records_from_tree,
create_location_staging_record,
create_user_staging_record,
)
def teardown_module():
Batch.objects.all().delete()
class TestUserDim(BaseWarehouseTestCase):
domain = 'user-dim-test'
slug = 'user_dim'
@classmethod
def setUpClass(cls):
super(TestUserDim, cls).setUpClass()
cls.batch = create_batch(cls.slug)
cls.records = [
create_user_staging_record(
cls.domain,
user_id=SYSTEM_USER_ID,
username='system_bob',
batch_id=cls.batch.id
),
create_user_staging_record(
cls.domain,
user_id=DEMO_USER_ID,
username='demo_sally',
batch_id=cls.batch.id
),
create_user_staging_record(
cls.domain,
user_id=COMMTRACK_USERNAME,
username='commtrack_billy',
batch_id=cls.batch.id
),
create_user_staging_record(
None,
user_id='beeboobop',
username='web',
doc_type='WebUser',
batch_id=cls.batch.id
),
create_user_staging_record(
cls.domain,
user_id='greengoblin',
username='mobile',
batch_id=cls.batch.id
),
]
@classmethod
def tearDownClass(cls):
for record in cls.records:
record.delete()
UserDimLoader().clear_records()
UserStagingLoader().clear_records()
super(TestUserDim, cls).tearDownClass()
def test_user_types(self):
UserDimLoader().commit(self.batch)
self.assertEqual(UserDim.objects.count(), 5)
self.assertEqual(
UserDim.objects.filter(user_type=SYSTEM_USER_TYPE).first().user_id,
SYSTEM_USER_ID,
)
self.assertEqual(
UserDim.objects.filter(user_type=DEMO_USER_TYPE).first().user_id,
DEMO_USER_ID,
)
self.assertEqual(
UserDim.objects.filter(user_type=COMMCARE_SUPPLY_USER_TYPE).first().user_id,
COMMTRACK_USERNAME,
)
self.assertEqual(
UserDim.objects.filter(user_type=MOBILE_USER_TYPE).first().user_id,
'greengoblin',
)
self.assertEqual(
UserDim.objects.filter(user_type=WEB_USER_TYPE).first().user_id,
'beeboobop',
)
class TestDomainMembershipDim(BaseWarehouseTestCase):
slug = DomainMembershipDimLoader.slug
@classmethod
def setUpClass(cls):
super(TestDomainMembershipDim, cls).setUpClass()
cls.batch = create_batch(cls.slug)
cls.bootstrap_user_staging()
@classmethod
def bootstrap_user_staging(cls):
create_user_staging_record(
domain='test1',
user_id='u1',
username='mobile1',
doc_type='CommCareUser',
batch_id=cls.batch.id,
)
create_user_staging_record(
domain='test1',
user_id='u2',
username='mobile2',
doc_type='CommCareUser',
batch_id=cls.batch.id,
)
create_user_staging_record(
domain=None,
username='mobile1',
user_id='u3',
doc_type='WebUser',
batch_id=cls.batch.id,
domain_memberships=[
{'domain': 'test1', 'is_admin': True},
{'domain': 'test2', 'is_admin': False},
]
)
UserDimLoader().commit(cls.batch)
@classmethod
def tearDownClass(cls):
DomainMembershipDimLoader().clear_records()
UserDimLoader().clear_records()
UserStagingLoader().clear_records()
super(TestDomainMembershipDim, cls).tearDownClass()
def test_insert_and_update(self):
DomainMembershipDimLoader().commit(self.batch)
# should create 4 domain membership columns
self.assertEqual(
DomainMembershipDim.objects.count(), 4
)
# 'u3' user should have 2 membership columns for each of the domain
dim_id_of_user3 = UserDim.objects.filter(user_id='u3')[0].id
self.assertEqual(
DomainMembershipDim.objects.filter(user_dim_id=dim_id_of_user3).count(),
2
)
## test removing a domain membership
# clear and add new staging record to remove a membership of 2
UserStagingLoader().clear_records()
create_user_staging_record(
domain=None,
username='mobile1',
user_id='u3',
doc_type='WebUser',
batch_id=self.batch.id,
domain_memberships=[
{'domain': 'test1', 'is_admin': True},
]
)
DomainMembershipDimLoader().commit(self.batch)
# should create 3 domain membership columns instead of 4
self.assertEqual(
DomainMembershipDim.objects.count(), 3
)
# u3 user should have only 1 domain-membership
dim_id_of_user3 = UserDim.objects.filter(user_id='u3')[0].id
self.assertEqual(
DomainMembershipDim.objects.filter(user_dim_id=dim_id_of_user3).count(),
1
)
class TestUserGroupDim(BaseWarehouseTestCase):
domain = 'user-group-dim-test'
slug = 'user_group_dim'
@classmethod
def setUpClass(cls):
super(TestUserGroupDim, cls).setUpClass()
cls.batch = create_batch(cls.slug)
cls.blue_dog = create_user_staging_record(cls.domain,
username='blue-dog',
batch_id=cls.batch.id)
cls.black_dog = create_user_staging_record(cls.domain,
username='black-dog',
batch_id=cls.batch.id)
cls.yellow_cat = create_user_staging_record(cls.domain,
username='yellow-cat',
batch_id=cls.batch.id)
@classmethod
def tearDownClass(cls):
GroupStagingLoader().clear_records()
UserStagingLoader().clear_records()
GroupDimLoader().clear_records()
UserDimLoader().clear_records()
UserGroupDimLoader().clear_records()
super(TestUserGroupDim, cls).tearDownClass()
def test_basic_user_group_insert(self):
UserDimLoader().commit(self.batch)
self.assertEqual(UserDim.objects.count(), 3)
# Setup group records to have multiple users
dogs = create_group_staging_record(
self.domain,
'dogs',
user_ids=[self.blue_dog.user_id, self.black_dog.user_id],
batch_id=self.batch.id
)
create_group_staging_record(
self.domain,
'cats',
user_ids=[self.yellow_cat.user_id],
batch_id=self.batch.id
)
GroupDimLoader().commit(self.batch)
self.assertEqual(GroupDim.objects.count(), 2)
UserGroupDimLoader().commit(self.batch)
self.assertEqual(UserGroupDim.objects.count(), 3)
dog_relations = UserGroupDim.objects.filter(group_dim=GroupDim.objects.get(group_id=dogs.group_id))
self.assertEqual(
dog_relations.count(),
2,
)
self.assertEqual(
set(dog_relations.values_list('user_dim_id', flat=True)),
set(UserDim.objects.filter(
user_id__in=[self.blue_dog.user_id, self.black_dog.user_id]
).values_list('id', flat=True)),
)
class TestLocationDim(BaseWarehouseTestCase):
domain = 'location-dim-test'
slug = 'location_dim'
@classmethod
def setUpClass(cls):
super(TestLocationDim, cls).setUpClass()
cls.batch = create_batch(cls.slug)
def tearDown(self):
LocationStagingLoader().clear_records()
LocationDimLoader().clear_records()
super(TestLocationDim, self).tearDown()
def test_location_dim(self):
tree = {
('Illinois', 'state'): {
('Naperville', 'city'): {
('Home', 'home'): {}
},
('Chicago', 'city'): {},
}
}
create_location_records_from_tree(self.domain, tree, self.batch.id)
self.assertEqual(LocationStagingTable.objects.count(), 4)
LocationDimLoader().commit(self.batch)
self.assertEqual(LocationDim.objects.count(), 4)
home_location = LocationDim.objects.filter(name='Home').first()
self.assertEqual(home_location.location_type_name, 'home')
self.assertEqual(home_location.location_type_code, 'home')
root_location = LocationDim.objects.filter(name='Illinois').first()
self.assertEqual(root_location.location_level_0, root_location.sql_location_id)
def test_location_dim_update(self):
tree = {
('Illinois', 'state'): {
('Naperville', 'city'): {
('Home', 'home'): {}
},
('Chicago', 'city'): {},
}
}
create_location_records_from_tree(self.domain, tree, self.batch.id)
LocationDimLoader().commit(self.batch)
self.assertEqual(LocationDim.objects.count(), 4)
# Let's add one more location under Naperville to ensure that the dim updates
# when it's not a root node
LocationStagingLoader().clear_records()
home_location = LocationDim.objects.filter(name='Home').first()
city_location = LocationDim.objects.filter(name='Naperville').first()
create_location_staging_record(
self.domain,
'Other home',
sql_location_id=10,
# Give it the same parent as the Home location
sql_parent_location_id=city_location.sql_location_id,
location_type_id=home_location.location_type_id,
batch_id=self.batch.id
)
LocationDimLoader().commit(self.batch)
self.assertEqual(LocationDim.objects.count(), 5)
class TestAppDim(BaseWarehouseTestCase):
domain = 'app-dim-test'
slug = 'app_dim'
@classmethod
def setUpClass(cls):
super(TestAppDim, cls).setUpClass()
cls.batch = create_batch(cls.slug)
@classmethod
def tearDownClass(cls):
ApplicationDimLoader().clear_records()
ApplicationStagingLoader().clear_records()
super(TestAppDim, cls).tearDownClass()
def test_app_dim(self):
create_application_staging_record(self.domain, 'test-app', batch_id=self.batch.id)
create_application_staging_record(self.domain, 'test-deleted', doc_type='Application-Deleted', batch_id=self.batch.id)
ApplicationDimLoader().commit(self.batch)
self.assertEqual(ApplicationDim.objects.count(), 2)
test_app = ApplicationDim.objects.get(name='test-app')
self.assertEqual(test_app.deleted, False)
deleted_app = ApplicationDim.objects.get(name='test-deleted')
self.assertEqual(deleted_app.deleted, True)
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Apr 15 11:52:31 2019
@author: tgadfort
"""
import sys
import re
from datetime import timedelta
from playTypes import playtype
# create logger
import logging
module_logger = logging.getLogger('log.{0}'.format(__name__))
############################################################################################################
## Drive Class
############################################################################################################
class driveclass:
def __init__(self, headline, detail, possession, postdrivehomescore, postdriveawayscore, plays=None, text={}):
self.logger = logging.getLogger('log.{0}.{1}'.format(__name__, self.__class__))
self.ind = 6*" "
self.headline = headline
self.detail = detail
self.possession = possession
self.postdrivehomescore = postdrivehomescore
self.postdriveawayscore = postdriveawayscore
self.plays = plays
try:
self.headlineText = text.get("Headline")[0]
except:
self.headlineText = str(None)
try:
self.detailText = text.get("Detail")[0]
except:
self.detailText = str(None)
def setPlays(self, plays):
self.plays = plays
def getHeadlineText(self):
return self.headlineText
def getDetailText(self):
return self.detailText
def getSummaryText(self):
plays = self.detail.plays
yards = self.detail.yards
headline = self.headline
retval = "{0: <5}{1: <5}{2: <25}{3: <25}{4: <25}".format(plays, yards, headline, self.headlineText, self.detailText)
return retval
############################################################################################################
## Drive Detail Class
############################################################################################################
class drivedetailclass:
def __init__(self, plays, yards, gametime):
self.logger = logging.getLogger('log.{0}.{1}'.format(__name__, self.__class__))
self.ind = 6*" "
self.plays = plays
self.yards = yards
self.gametime = gametime
############################################################################################################
## Drive Summary Class
############################################################################################################
class drivesummary:
def __init__(self, drive, fieldMap):
self.logger = logging.getLogger('log.{0}.{1}'.format(__name__, self.__class__))
self.ind = 4*" "
self.name = "drivesummary"
self.headline = None
self.score = None
self.details = None
self.fullDrive = None
driveNo = drive.get('Drive')
if driveNo is None:
raise ValueError("No Drive in drive dict")
headline = drive.get('Headline')
if headline is None:
raise ValueError("No Headline in drive dict")
self.headlineText = headline
detail = drive.get('Detail')
if detail is None:
raise ValueError("No Detail in drive dict")
self.detailText = detail
possession = drive.get('Posession')
if possession is None:
raise ValueError("No Posession in drive dict")
data = drive.get('Data')
if data is None:
raise ValueError("No Data in drive dict")
###
### For whatever reason home/away scores are reversed on the webpage...
###
homescore = drive.get('AwayScore')
if homescore is None:
raise ValueError("No AwayScore in drive dict")
awayscore = drive.get('HomeScore')
if awayscore is None:
raise ValueError("No HomeScore in drive dict")
self.possession = self.parsePossession(possession, fieldMap)
self.headline = self.parseHeadline(headline)
self.detail = self.parseDetail(detail)
self.homescore = self.parseScore(homescore)
self.awayscore = self.parseScore(awayscore)
self.driveplays = data
self.logger.debug("{0}Drive Summary: [{1} - {2}] {3}".format(self.ind, self.awayscore, self.homescore, headline))
self.fullDrive = driveclass(headline=self.headline, detail=self.detail, possession=self.possession,
postdrivehomescore=self.homescore, postdriveawayscore=self.awayscore,
text={"Headline": self.headlineText, "Detail": self.detailText})
def getHeadline(self):
return self.headlineText
def getDetail(self):
return self.detailText
def getPostDriveScore(self):
return [self.awayscore, self.homescore]
def getDrivePlays(self):
return self.driveplays
def getFullDrive(self):
return self.fullDrive
def parsePossession(self, possession, fieldMap, debug=False):
if not isinstance(possession, list):
self.logger.error("Possession is not a list: {0}".format(possession))
if len(possession) != 1:
self.logger.error("Not one element in possession list: {0}".format(possession))
teamID = possession[0]
teamAbbrev = None
try:
teamAbbrev = fieldMap[teamID]
except:
self.logger.error("Could not find {0} in field map: {1}".format(teamID, fieldMap))
self.logger.debug("{0}Parsed Possession: {1}".format(self.ind, teamAbbrev))
return teamAbbrev
def parseHeadline(self, headline, debug=False):
play = None
if isinstance(headline, list):
if len(headline) >= 1:
pt = playtype()
play = pt.getPlay(headline[0]).name
else:
self.logger.error("Not one headline entry: {0}".format(headline))
else:
self.logger.error("Headline is not a list: {0}".format(headline))
self.logger.debug("{0}Parsed Headline: {1}".format(self.ind, play))
return play
def parseScore(self, score, debug=False):
if not isinstance(score, list):
self.logger.error("Could not determine score type: {0}".format(score))
if len(score) != 1:
self.logger.error("Not one detail entry: {0}".format(score))
scoredata = score[0]
try:
scoredata = int(scoredata)
except:
self.logger.error("Could not find an integer score for {0}".format(scoredata))
self.logger.debug("{0}Parsed Score: {1}".format(self.ind, scoredata))
return scoredata
def parseDetail(self, detail, debug=False):
if debug:
fname = sys._getframe().f_code.co_name
print("FUNC {0}".format(fname))
if not isinstance(detail, list):
raise ValueError("Could not determine detail play type: {0}".format(detail))
if len(detail) != 1:
raise ValueError("Not one detail entry: {0}".format(detail))
detaildata = detail[0]
yards = "(yards|yard|Yds|yds|Yd|yd)"
plays = "(play|plays)"
num = "([+-?]\d+|\d+)"
totalplays = None
totalyards = None
totalclock = None
m = re.search(r"{0}\s{1},\s{2}\s{3},\s{4}:{5}".format(num, plays, num, yards, num, num), detaildata)
if m is not None:
groups = m.groups()
totalplays = int(groups[0])
totalyards = int(groups[2])
totalclock = timedelta(minutes=int(groups[4]), seconds=int(groups[5]))
if totalplays is None and totalyards is None and totalclock is None:
m = re.search(r"{0}\s{1},\s{2}\s{3}".format(num, plays, num, yards), detaildata)
if m is not None:
groups = m.groups()
totalplays = int(groups[0])
totalyards = int(groups[2])
totalclock = timedelta(minutes=0, seconds=0)
if totalplays is None and totalyards is None and totalclock is None:
raise ValueError("Could not parse drive detail: {0}".format(detaildata))
drivedetail = drivedetailclass(plays=totalplays, yards=totalyards, gametime=totalclock)
return drivedetail
|
nilq/baby-python
|
python
|
"""
Abstractions for lazy compositions/manipulations of And Inverter
Graphs.
"""
from __future__ import annotations
from typing import (Union, FrozenSet, Callable, Tuple,
Mapping, Sequence, Optional)
import attr
import funcy as fn
from bidict import bidict
from pyrsistent import pmap
from pyrsistent.typing import PMap
import aiger as A
from aiger.aig import AIG, Node, Input, LatchIn
from aiger.aig import ConstFalse
@attr.s(frozen=True, auto_attribs=True)
class LazyAIG:
def __call__(self, inputs, latches=None, *, lift=None):
pass
@property
def latch2init(self):
pass
@property
def inputs(self):
pass
@property
def outputs(self):
pass
@property
def comments(self):
pass
def write(self, path):
self.aig.write(path)
relabel = AIG.relabel
simulator = AIG.simulator
simulate = AIG.simulate
@property
def latches(self) -> FrozenSet[str]:
return frozenset(self.latch2init.keys())
@property
def lazy_aig(self) -> LazyAIG:
return self
@property
def aig(self) -> AIG:
"""Return's flattened AIG represented by this LazyAIG."""
false = ConstFalse()
inputs = {i: Input(i) for i in self.inputs}
latches = {i: LatchIn(i) for i in self.latches}
def lift(obj):
if isinstance(obj, Node):
return obj
assert isinstance(obj, bool)
return ~false if obj else false
node_map, latch_map = self(inputs, latches=latches, lift=lift)
return AIG(
comments=self.comments,
inputs=self.inputs,
node_map=node_map,
latch_map=latch_map,
latch2init=self.latch2init,
)
def __rshift__(self, other: AIG_Like) -> LazyAIG:
"""Cascading composition. Feeds self into other."""
return Cascading(self, other)
def __lshift__(self, other: AIG_Like) -> LazyAIG:
"""Cascading composition. Feeds other into self."""
return lazy(other) >> self
def __or__(self, other: AIG_Like) -> LazyAIG:
"""Parallel composition between self and other."""
assert not self.latches & other.latches
assert not self.outputs & other.outputs
return Parallel(self, other)
def cutlatches(self, latches=None, renamer=None) -> Tuple[LazyAIG, Labels]:
"""Returns LazyAIG where the latches specified
in `latches` have been converted into inputs/outputs.
- If `latches` is `None`, then all latches are cut.
- `renamer`: is a function from strings to strings which
determines how to rename latches to avoid name collisions.
"""
lcirc = CutLatches(self, renamer=renamer, cut=latches)
l2init = dict(self.latch2init)
lmap = {k: (lcirc.renamer(k), l2init[k]) for k in lcirc.cut}
return lcirc, lmap
def loopback(self, *wirings) -> LazyAIG:
"""Returns result of feeding outputs specified in `*wirings` to
inputs specified in `wirings`.
Each positional argument (element of wirings) should have the following
schema:
{
'input': str,
'output': str,
'latch': str, # what to name the new latch.
'init': bool, # new latch's initial value.
'keep_output': bool, # whether output is consumed by feedback.
}
"""
return LoopBack(self, wirings=wirings)
def unroll(self, horizon, *, init=True, omit_latches=True,
only_last_outputs=False) -> LazyAIG:
"""
Returns circuit which computes the same function as
the sequential circuit after `horizon` many inputs.
Each input/output has `##time_{time}` appended to it to
distinguish different time steps.
"""
return A.Unrolled(
self, horizon, init, omit_latches, only_last_outputs
)
def __getitem__(self, others):
"""Relabel inputs, outputs, or latches.
`others` is a tuple, (kind, relabels), where
1. kind in {'i', 'o', 'l'}
2. relabels is a mapping from old names to new names.
Note: The syntax is meant to resemble variable substitution
notations, i.e., foo[x <- y] or foo[x / y].
"""
assert isinstance(others, tuple) and len(others) == 2
kind, relabels = others
assert kind in {'i', 'o', 'l'}
key = {
'i': 'input_relabels',
'l': 'latch_relabels',
'o': 'output_relabels',
}.get(kind)
return A.Relabeled(self, **{key: relabels})
def reinit(self, latch2init) -> LazyAIG:
"""Update late initial values based on mapping provided."""
assert set(latch2init.keys()) <= self.latches
return UpdatedLatchInits(circ=self, latch2init=latch2init)
AIG_Like = Union[AIG, LazyAIG]
Labels = Mapping[str, str]
def walk_keys(func, mapping):
return fn.walk_keys(func, dict(mapping))
def omit(mapping, keys):
return fn.omit(dict(mapping), keys)
def project(mapping, keys):
return fn.project(dict(mapping), keys)
@attr.s(frozen=True, auto_attribs=True)
class Parallel(LazyAIG):
left: AIG_Like
right: AIG_Like
def __call__(self, inputs, latches=None, *, lift=None):
out_l, lmap_l = self.left(inputs, latches=latches, lift=lift)
out_r, lmap_r = self.right(inputs, latches=latches, lift=lift)
return fn.merge(out_l, out_r), fn.merge(lmap_l, lmap_r)
def _merge_maps(self, key):
map1, map2 = [pmap(getattr(c, key)) for c in (self.left, self.right)]
return map1 + map2
@property
def latch2init(self):
return self._merge_maps('latch2init')
@property
def inputs(self):
return self.left.inputs | self.right.inputs
@property
def outputs(self):
return self.left.outputs | self.right.outputs
@property
def comments(self):
return self.left.comments + self.right.comments
@attr.s(frozen=True, auto_attribs=True)
class Wire:
input: str
output: str
latch: str
keep_output: bool = True
init: bool = True
def convert_wirings(wirings):
for wire in wirings:
wire.setdefault('latch', wire['input'])
return tuple(Wire(**w) for w in wirings)
@attr.s(frozen=True, auto_attribs=True)
class LoopBack(LazyAIG):
circ: AIG_Like
wirings: Sequence[Wire] = attr.ib(converter=convert_wirings)
def __call__(self, inputs, latches=None, *, lift=None):
if latches is None:
latches = pmap()
latches = dict(self.latch2init + latches) # Override initial values.
for wire in self.wirings:
inputs[wire.input] = latches[wire.latch]
del latches[wire.latch]
omap, lmap = self.circ(inputs, latches=latches, lift=lift)
for wire in self.wirings:
out, latch = wire.output, wire.latch
lmap[latch] = omap[out]
if not wire.keep_output:
del omap[out]
return omap, lmap
@property
def latch2init(self):
latch2init = pmap(self.circ.latch2init).evolver()
for wire in self.wirings:
latch2init[wire.latch] = wire.init
return latch2init.persistent()
@property
def inputs(self):
return self.circ.inputs - {w.input for w in self.wirings}
@property
def outputs(self):
omitted = {w.output for w in self.wirings if not w.keep_output}
return self.circ.outputs - omitted
@property
def comments(self):
return self.circ.comments
def convert_renamer(renamer):
if renamer is None:
def renamer(*_):
return A.common._fresh()
return fn.memoize(renamer)
@attr.s(frozen=True, auto_attribs=True)
class CutLatches(LazyAIG):
circ: AIG_Like
renamer: Callable[[str], str] = attr.ib(converter=convert_renamer)
cut: Optional[FrozenSet[str]] = None
def __attrs_post_init__(self):
if self.cut is None:
object.__setattr__(self, "cut", self.circ.latches)
def __call__(self, inputs, latches=None, *, lift=None):
inputs = dict(inputs)
if latches is None:
latches = pmap()
latches = dict(self.latch2init + latches) # Override initial values.
for latch in self.cut:
new_name = self.renamer(latch)
latches[latch] = inputs[new_name]
del inputs[new_name]
omap, lmap = self.circ(inputs, latches=latches, lift=lift)
for latch in self.cut:
new_name = self.renamer(latch)
omap[new_name] = lmap[latch]
del lmap[latch]
return omap, lmap
@property
def latch2init(self):
return pmap(omit(self.circ.latch2init, self.cut))
@property
def inputs(self):
return self.circ.inputs | set(map(self.renamer, self.cut))
@property
def outputs(self):
return self.circ.outputs | set(map(self.renamer, self.cut))
@property
def comments(self):
return self.circ.comments
@attr.s(frozen=True, auto_attribs=True)
class Cascading(LazyAIG):
left: AIG_Like
right: AIG_Like
def __call__(self, inputs, latches=None, *, lift=None):
inputs_l = project(inputs, self.left.inputs)
omap_l, lmap_l = self.left(inputs_l, latches=latches, lift=lift)
inputs_r = project(inputs, self.right.inputs)
inputs_r.update(omap_l) # <--- Cascade setup happens here.
omap_l = omit(omap_l, self._interface)
omap_r, lmap_r = self.right(inputs_r, latches=latches, lift=lift)
return fn.merge(omap_l, omap_r), fn.merge(lmap_l, lmap_r)
def _merge_maps(self, key):
map1, map2 = [pmap(getattr(c, key)) for c in (self.left, self.right)]
return map1 + map2
@property
def latch2init(self):
return self._merge_maps('latch2init')
@property
def _interface(self):
return self.left.outputs & self.right.inputs
@property
def inputs(self):
return self.left.inputs | (self.right.inputs - self._interface)
@property
def outputs(self):
return self.right.outputs | (self.left.outputs - self._interface)
@property
def comments(self):
return self.left.comments + self.right.comments
def _relabel_map(relabels, mapping):
return pmap(walk_keys(lambda x: relabels.get(x, x), mapping))
@attr.s(frozen=True, auto_attribs=True)
class UpdatedLatchInits(LazyAIG):
circ: AIG_Like
_latch2init: PMap[str, bool] = pmap()
def __call__(self, inputs, latches=None, *, lift=None):
if latches is None:
latches = pmap()
latches = dict(self.latch2init + latches) # Override initial values.
return self.circ(inputs, latches=latches, lift=lift)
@property
def latch2init(self):
return self.circ.latch2init + self._latch2init
@property
def inputs(self):
return self.circ.inputs
@property
def outputs(self):
return self.circ.outputs
@property
def comments(self):
return self.circ.comments
@attr.s(frozen=True, auto_attribs=True)
class Relabeled(LazyAIG):
circ: AIG_Like
input_relabels: PMap[str, str] = pmap()
latch_relabels: PMap[str, str] = pmap()
output_relabels: PMap[str, str] = pmap()
def __call__(self, inputs, latches=None, *, lift=None):
if latches is None:
latches = pmap()
latches = dict(self.latch2init + latches) # Override initial values.
new2old_i = bidict(self.input_relabels).inv
new2old_l = bidict(self.latch_relabels).inv
inputs = _relabel_map(new2old_i, inputs)
latches = _relabel_map(new2old_l, latches)
omap, lmap = self.circ(inputs, latches=latches, lift=lift)
omap = _relabel_map(self.output_relabels, omap)
lmap = _relabel_map(self.latch_relabels, lmap)
return dict(omap), dict(lmap)
@property
def latch2init(self):
return _relabel_map(self.latch_relabels, self.circ.latch2init)
@property
def inputs(self):
old_inputs = self.circ.inputs
return frozenset(self.input_relabels.get(i, i) for i in old_inputs)
@property
def outputs(self):
old_outputs = self.circ.outputs
return frozenset(self.output_relabels.get(i, i) for i in old_outputs)
@property
def comments(self):
return self.circ.comments
@attr.s(frozen=True, auto_attribs=True)
class Unrolled(LazyAIG):
circ: AIG_Like
horizon: int
init: bool = True
omit_latches: bool = True
only_last_outputs: bool = False
def __call__(self, inputs, latches=None, *, lift=None):
circ, omit_latches, init = self.circ, self.omit_latches, self.init
horizon, only_last_outputs = self.horizon, self.only_last_outputs
if not omit_latches:
assert (circ.latches & circ.outputs) == set()
if not init:
assert (circ.latches & circ.inputs) == set()
latches = circ.latch2init if init else project(inputs, circ.inputs)
if init:
inputs = omit(inputs, circ.inputs)
outputs = {}
for time in range(horizon):
omap, latches = circ(
inputs={i: inputs[f'{i}##time_{time}'] for i in circ.inputs},
latches=latches,
lift=lift
)
if (not only_last_outputs) or (time + 1 == horizon):
template = '{}' + f'##time_{time + 1}'
outputs.update(walk_keys(template.format, omap))
if not self.omit_latches:
outputs.update(walk_keys(template.format, latches))
assert set(outputs.keys()) == self.outputs
return dict(outputs), {}
@property
def latch2init(self):
return pmap()
def __with_times(self, keys, times):
for time in times:
template = '{}' + f'##time_{time}'
yield from map(template.format, keys)
def _with_times(self, keys, times):
return frozenset(self.__with_times(keys, times))
@property
def inputs(self):
base = set() if self.init else self.circ.latches
base |= self.circ.inputs
return self._with_times(base, times=range(self.horizon))
@property
def outputs(self):
start = self.horizon if self.only_last_outputs else 1
base = set() if self.omit_latches else self.circ.latches
base |= self.circ.outputs
return self._with_times(base, times=range(start, self.horizon + 1))
@property
def comments(self):
return self.circ.comments
@attr.s(frozen=True, auto_attribs=True)
class Lifted(LazyAIG):
circ: AIG_Like
def __call__(self, inputs, latches=None, *, lift=None):
return self.circ(inputs=inputs, latches=latches, lift=lift)
@property
def latch2init(self):
return self.circ.latch2init
@property
def inputs(self):
return self.circ.inputs
@property
def outputs(self):
return self.circ.outputs
@property
def comments(self):
return self.circ.comments
def lazy(circ: Union[AIG, LazyAIG]) -> LazyAIG:
"""Lifts AIG to a LazyAIG."""
return Lifted(circ)
__all__ = ['lazy', 'LazyAIG', 'Parallel', 'LoopBack', 'CutLatches',
'Cascading', 'Relabeled', 'Unrolled', 'AIG_Like']
|
nilq/baby-python
|
python
|
import numpy as np
import xobjects as xo
import xpart as xp
# Create a Particles on your selected context (default is CPU)
context = xo.ContextCupy()
part = xp.Particles(_context=context, x=[1,2,3])
##############
# PANDAS/HDF #
##############
# Save particles to hdf file via pandas
import pandas as pd
df = part.to_pandas()
df.to_hdf('part.hdf', key='df', mode='w')
# Read particles from hdf file via pandas
part_from_pdhdf = xp.Particles.from_pandas(pd.read_hdf('part.hdf'))
|
nilq/baby-python
|
python
|
# proxy module
from pyface.i_file_dialog import *
|
nilq/baby-python
|
python
|
import unicodedata
from django.utils.timezone import make_aware
from eagle.models import EDINETCompany, EDINETDocument
class EDINETDocumentRegister():
@classmethod
def register_document(cls, document, xbrl_path, pdf_path):
def normalize(text):
if text is not None:
return unicodedata.normalize("NFKC", text)
else:
return text
# Confirm company registration
jcn = document.jcn
company = None
try:
company = EDINETCompany.objects.get(jcn=jcn)
except EDINETCompany.DoesNotExist:
company = None
if company is None:
name = normalize(document.filer_name)
company = EDINETCompany(
local_name=name,
global_name=name,
jcn=document.jcn,
edinet_code=document.edinet_code,
sec_code=document.sec_code,
fund_code=document.fund_code
)
company.save()
parent = None
if document.parent_document_id:
try:
parent = EDINETDocument.objects.get(
edinet_document_id=document.parent_document_id)
except EDINETDocument.DoesNotExist:
parent = None
_document = EDINETDocument()
try:
_document = EDINETDocument.objects.get(
edinet_document_id=document.document_id)
except EDINETDocument.DoesNotExist:
_document = EDINETDocument()
# Register Company's document
title = normalize(document.title)
reason = normalize(document.submit_reason)
_document.company = company
if document.period_start is None and parent is not None:
_document.period_start = parent.period_start
else:
_document.period_start = document.period_start
if document.period_end is None and parent is not None:
_document.period_end = parent.period_end
else:
_document.period_end = document.period_end
_document.submitted_date = make_aware(document.submitted_date)
_document.lang = "ja"
_document.path = xbrl_path
_document.xbrl_path = xbrl_path
_document.pdf_path = pdf_path
_document.edinet_document_id = document.document_id
_document.edinet_document_type = document.doc_type_code
_document.title = title
_document.ordinance_code = document.ordinance_code
_document.form_code = document.form_code
_document.issuer_edinet_code = document.issuer_edinet_code
_document.subject_edinet_code = document.subject_edinet_code
_document.subsidiary_edinet_code = document.subsidiary_edinet_code
_document.submit_reason = reason
_document.parent_document_id = parent
if document.operated_date is None:
_document.operated_date = document.operated_date
else:
_document.operated_date = make_aware(document.operated_date)
_document.withdraw_status = document.withdraw_status
_document.operation_status = document.operation_status
_document.disclosure_status = document.disclosure_status
_document.has_attachment = document.has_attachment
_document.has_xbrl = document.has_xbrl
_document.has_pdf = document.has_pdf
_document.has_english_doc = document.has_english_doc
_document.save()
return _document
|
nilq/baby-python
|
python
|
import asyncio
import aiohttp
import json
async def pollForex(symbols, authkey):
i = 0
while True:
symbol = symbols[i % len(symbols)]
try:
async with aiohttp.ClientSession() as session:
async with session.get(
url="https://api-fxpractice.oanda.com/v1/prices",
headers={'Authorization': ('Bearer ' + authkey)},
params='instruments=' + symbol) as resp:
yield (await resp.json())
except Exception as error:
print("Fetch forex rates from Oanda: " + type(error).__name__ + " " + str(error.args))
i += 1
await asyncio.sleep(1)
async def forexPoller(symbols, authkey, orderbookAnalyser):
async for ticker in pollForex(symbols=symbols, authkey=authkey):
symbolBase = ticker['prices'][0]['instrument'].split("_")[0]
symbolQuote = ticker['prices'][0]['instrument'].split("_")[1]
ask = ticker['prices'][0]['ask']
bid = ticker['prices'][0]['bid']
print("Received " + symbolBase+"/"+ symbolQuote +
" prices from Oanda. Ask: " + str(ask) + ", Bid: " + str(bid))
#orderbookAnalyser.updateForexPrice(ticker['prices'][0])
with open('./cred/oanda.json') as file:
authkeys = json.load(file)
asyncio.ensure_future(
forexPoller(
symbols=['EUR_USD', 'GBP_USD'],
authkey=authkeys['practice'],
orderbookAnalyser=None))
loop = asyncio.get_event_loop()
loop.run_forever()
|
nilq/baby-python
|
python
|
import os
import sys
import glob
import random
import math
import datetime
import itertools
import json
import re
import logging
# from collections import OrderedDict
import numpy as np
from scipy.stats import multivariate_normal
# import scipy.misc
import tensorflow as tf
# import keras
import keras.backend as KB
import keras.layers as KL
import keras.engine as KE
sys.path.append('..')
import mrcnn.utils as utils
import tensorflow.contrib.util as tfc
import pprint
##----------------------------------------------------------------------------------------------------------------------
## build_predictions
##----------------------------------------------------------------------------------------------------------------------
def build_predictions(norm_input_rois, mrcnn_class, mrcnn_bbox, config):
'''
Split output_rois by class id, and add class_id and class_score
output:
-------
pred_tensor: [ Batchsz, Num_Classes, Num_Rois, 7: (y1, x1, y2, x2, class_id, class_score, normalized class score)]
y1,x1, y2,x2 are in image dimension format
'''
batch_size = config.BATCH_SIZE
num_classes = config.NUM_CLASSES
h, w = config.IMAGE_SHAPE[:2]
# num_rois = config.TRAIN_ROIS_PER_IMAGE
num_rois = KB.int_shape(norm_input_rois)[1]
scale = tf.constant([h,w,h,w], dtype = tf.float32)
# dup_scale = tf.reshape(tf.tile(scale, [num_rois]),[num_rois,-1])
dup_scale = scale * tf.ones([batch_size, num_rois, 1], dtype = 'float32')
det_per_class = config.DETECTION_PER_CLASS
print()
print(' > build_predictions()')
print(' num_rois : ', num_rois )
print(' norm_input_rois.shape : ', type(norm_input_rois), KB.int_shape(norm_input_rois))
print(' scale.shape : ', type(scale), KB.int_shape(scale), scale.get_shape())
print(' dup_scale.shape : ', type(dup_scale), KB.int_shape(dup_scale), dup_scale.get_shape())
print()
print(' mrcnn_class shape : ', KB.int_shape(mrcnn_class))
print(' mrcnn_bbox.shape : ', KB.int_shape(mrcnn_bbox), mrcnn_bbox.shape )
print(' config image shape : ', config.IMAGE_SHAPE, 'h:',h,'w:',w)
#---------------------------------------------------------------------------
# Build a meshgrid for image id and bbox to use in gathering of bbox delta information
#---------------------------------------------------------------------------
batch_grid, bbox_grid = tf.meshgrid( tf.range(batch_size, dtype=tf.int32),
tf.range(num_rois, dtype=tf.int32), indexing = 'ij' )
#------------------------------------------------------------------------------------
# use the argmaxof each row to determine the dominating (predicted) class
#------------------------------------------------------------------------------------
pred_classes = tf.argmax( mrcnn_class,axis=-1,output_type = tf.int32)
pred_classes_exp = tf.to_float(tf.expand_dims(pred_classes ,axis=-1))
# print(' pred_classes : ', pred_classes.shape)
# print(pred_classes.eval())
# print(' pred_scores : ', pred_scores.shape ,'\n', pred_scores.eval())
# print(' pred_classes_exp : ', pred_classes_exp.shape)
gather_ind = tf.stack([batch_grid , bbox_grid, pred_classes],axis = -1)
pred_scores = tf.gather_nd(mrcnn_class, gather_ind)
pred_deltas = tf.gather_nd(mrcnn_bbox , gather_ind)
#------------------------------------------------------------------------------------
# 22-05-2018 - stopped using the following code as it was clipping too many bouding
# boxes to 0 or 128 causing zero area generation
##------------------------------------------------------------------------------------
## apply delta refinements to the rois, based on deltas provided by the mrcnn head
##------------------------------------------------------------------------------------
pred_deltas *= config.BBOX_STD_DEV
input_rois = tf.multiply(norm_input_rois , dup_scale )
# compute "refined rois" utils.apply_box_deltas_tf(input_rois, pred_deltas)
# input_rois = utils.apply_box_deltas_tf(input_rois, pred_deltas)
print(' input_rois.shape : ', type(input_rois), KB.int_shape(input_rois), input_rois.get_shape())
# print(' mrcnn_class : ', mrcnn_class.shape, mrcnn_class)
# print(' gather_ind : ', gather_ind.shape, gather_ind)
# print(' pred_scores : ', pred_scores.shape )
# print(' pred_deltas : ', pred_deltas.shape )
# print(' input_rois : ', input_rois.shape, input_rois)
# print(' refined rois: ', refined_rois.shape, refined_rois)
# ## Clip boxes to image window
# # for now we will consider the window [0,0, 128,128]
# # _, _, window, _ = parse_image_meta(image_meta)
# window = tf.constant([[0,0,128,128]], dtype =tf.float32)
# refined_rois = utils.clip_to_window_tf(window, refined_rois)
# print(' refined rois clipped: ', refined_rois.shape, refined_rois)
#------------------------------------------------------------------------------------
##------------------------------------------------------------------------------------
## Build Pred_Scatter: tensor of bounding boxes by Image / Class
##------------------------------------------------------------------------------------
# sequence id is used to preserve the order of rois as passed to this routine
# This may be important in the post matching process but for now it's not being used.
# sequence = tf.ones_like(pred_classes, dtype = tf.int32) * (bbox_grid[...,::-1] + 1)
# sequence = tf.to_float(tf.expand_dims(sequence, axis = -1))
# print(sequence.shape)
# print(sequence.eval())
# pred_array = tf.concat([ refined_rois, pred_classes_exp , tf.expand_dims(pred_scores, axis = -1), sequence], axis=-1)
#------------------------------------------------------------------------------------
pred_array = tf.concat([input_rois, pred_classes_exp , tf.expand_dims(pred_scores, axis = -1)], axis=-1)
scatter_ind = tf.stack([batch_grid , pred_classes, bbox_grid],axis = -1)
pred_scatt = tf.scatter_nd(scatter_ind, pred_array, [batch_size, num_classes, num_rois, pred_array.shape[-1]])
print(' pred_array : ', pred_array.shape)
print(' scatter_ind : ', type(scatter_ind), 'shape', scatter_ind.shape)
print(' pred_scatter : ', pred_scatt.get_shape())
##--------------------------------------------------------------------------------------------
## Apply a per class score normalization
##--------------------------------------------------------------------------------------------
normalizer = tf.reduce_max(pred_scatt[...,-1], axis = -1, keepdims=True)
normalizer = tf.where(normalizer < 1.0e-15, tf.ones_like(normalizer), normalizer)
norm_score = tf.expand_dims(pred_scatt[...,-1]/normalizer, axis = -1)
pred_scatt = tf.concat([pred_scatt, norm_score],axis = -1)
print(' - Add normalized score --\n')
print(' normalizer : ', normalizer.shape)
print(' norm_score : ', norm_score.shape)
print(' pred_scatter : ', pred_scatt.get_shape())
##------------------------------------------------------------------------------------
## sort pred_scatter in each class dimension based on bbox scores (last column)
##------------------------------------------------------------------------------------
_, sort_inds = tf.nn.top_k(pred_scatt[...,-1], k=pred_scatt.shape[2])
# build indexes to gather rows from pred_scatter based on sort order
class_grid, batch_grid, roi_grid = tf.meshgrid(tf.range(num_classes),tf.range(batch_size), tf.range(num_rois))
roi_grid_exp = tf.to_float(tf.expand_dims(roi_grid, axis = -1))
gather_inds = tf.stack([batch_grid , class_grid, sort_inds],axis = -1)
pred_tensor = tf.gather_nd(pred_scatt, gather_inds[...,:det_per_class,:], name = 'pred_tensor')
# append an index to the end of each row --- commented out 30-04-2018
# pred_tensor = tf.concat([pred_tensor, roi_grid_exp], axis = -1)
print(' sort_inds : ', type(sort_inds) , ' shape ', sort_inds.shape)
print(' class_grid : ', type(class_grid) , ' shape ', class_grid.get_shape())
print(' batch_grid : ', type(batch_grid) , ' shape ', batch_grid.get_shape())
print(' roi_grid shape : ', type(roi_grid) , ' shape ', roi_grid.get_shape())
print(' roi_grid_exp : ', type(roi_grid_exp), ' shape ', roi_grid_exp.get_shape())
print(' gather_inds : ', type(gather_inds) , ' shape ', gather_inds.get_shape())
print(' pred_tensor : ', pred_tensor.get_shape())
return pred_tensor
##----------------------------------------------------------------------------------------------------------------------
## build_refined_predictions
##----------------------------------------------------------------------------------------------------------------------
def build_refined_predictions(norm_input_rois, mrcnn_class, mrcnn_bbox, config):
'''
Split output_rois by class id, and add class_id and class_score
output:
-------
pred_tensor: [ Batchsz, Num_Classes, Num_Rois, 7: (y1, x1, y2, x2, class_id, class_score, normalized class score)]
y1,x1, y2,x2 are in image dimension format
'''
batch_size = config.BATCH_SIZE
num_classes = config.NUM_CLASSES
h, w = config.IMAGE_SHAPE[:2]
# num_rois = config.TRAIN_ROIS_PER_IMAGE
num_rois = KB.int_shape(norm_input_rois)[1]
scale = tf.constant([h,w,h,w], dtype = tf.float32)
# dup_scale = tf.reshape(tf.tile(scale, [num_rois]),[num_rois,-1])
dup_scale = scale * tf.ones([batch_size, num_rois, 1], dtype = 'float32')
det_per_class = config.DETECTION_PER_CLASS
print()
print(' > build_predictions()')
print(' num_rois : ', num_rois )
print(' norm_input_rois.shape : ', type(norm_input_rois), KB.int_shape(norm_input_rois))
print(' scale.shape : ', type(scale), KB.int_shape(scale), scale.get_shape())
print(' dup_scale.shape : ', type(dup_scale), KB.int_shape(dup_scale), dup_scale.get_shape())
print()
print(' mrcnn_class shape : ', KB.int_shape(mrcnn_class))
print(' mrcnn_bbox.shape : ', KB.int_shape(mrcnn_bbox), mrcnn_bbox.shape )
print(' config image shape : ', config.IMAGE_SHAPE, 'h:',h,'w:',w)
#---------------------------------------------------------------------------
# Build a meshgrid for image id and bbox to use in gathering of bbox delta information
#---------------------------------------------------------------------------
batch_grid, bbox_grid = tf.meshgrid( tf.range(batch_size, dtype=tf.int32),
tf.range(num_rois, dtype=tf.int32), indexing = 'ij' )
#------------------------------------------------------------------------------------
# use the argmaxof each row to determine the dominating (predicted) class
#------------------------------------------------------------------------------------
pred_classes = tf.argmax( mrcnn_class,axis=-1,output_type = tf.int32)
pred_classes_exp = tf.to_float(tf.expand_dims(pred_classes ,axis=-1))
# print(' pred_classes : ', pred_classes.shape)
# print(pred_classes.eval())
# print(' pred_scores : ', pred_scores.shape ,'\n', pred_scores.eval())
# print(' pred_classes_exp : ', pred_classes_exp.shape)
gather_ind = tf.stack([batch_grid , bbox_grid, pred_classes],axis = -1)
pred_scores = tf.gather_nd(mrcnn_class, gather_ind)
pred_deltas = tf.gather_nd(mrcnn_bbox , gather_ind)
#------------------------------------------------------------------------------------
# 22-05-2018 - stopped using the following code as it was clipping too many bouding
# boxes to 0 or 128 causing zero area generation
##------------------------------------------------------------------------------------
## apply delta refinements to the rois, based on deltas provided by the mrcnn head
##------------------------------------------------------------------------------------
pred_deltas = tf.multiply(pred_deltas, config.BBOX_STD_DEV, name = 'pred_deltas')
input_rois = tf.multiply(norm_input_rois , dup_scale )
## compute "refined rois" utils.apply_box_deltas_tf(input_rois, pred_deltas)
refined_rois = utils.apply_box_deltas_tf(input_rois, pred_deltas)
## Clip boxes to image window
window = tf.constant([[0,0,h,w]], dtype = tf.float32)
refined_rois = utils.clip_to_window_tf( window, refined_rois)
print(' refined rois clipped : ', refined_rois.shape)
print(' input_rois.shape : ', type(input_rois), KB.int_shape(input_rois), input_rois.get_shape())
print(' refined_rois.shape : ', type(refined_rois), KB.int_shape(refined_rois), refined_rois.get_shape())
# print(' mrcnn_class : ', mrcnn_class.shape, mrcnn_class)
# print(' gather_ind : ', gather_ind.shape, gather_ind)
# print(' pred_scores : ', pred_scores.shape )
# print(' pred_deltas : ', pred_deltas.shape )
# print(' input_rois : ', input_rois.shape, input_rois)
# print(' refined rois: ', refined_rois.shape, refined_rois)
# ## Clip boxes to image window
# # for now we will consider the window [0,0, 128,128]
# # _, _, window, _ = parse_image_meta(image_meta)
# window = tf.constant([[0,0,128,128]], dtype =tf.float32)
# refined_rois = utils.clip_to_window_tf(window, refined_rois)
# print(' refined rois clipped: ', refined_rois.shape, refined_rois)
#------------------------------------------------------------------------------------
##------------------------------------------------------------------------------------
## Build Pred_Scatter: tensor of bounding boxes by Image / Class
##------------------------------------------------------------------------------------
# sequence id is used to preserve the order of rois as passed to this routine
# This may be important in the post matching process but for now it's not being used.
# sequence = tf.ones_like(pred_classes, dtype = tf.int32) * (bbox_grid[...,::-1] + 1)
# sequence = tf.to_float(tf.expand_dims(sequence, axis = -1))
# print(sequence.shape)
# print(sequence.eval())
# pred_array = tf.concat([ refined_rois, pred_classes_exp , tf.expand_dims(pred_scores, axis = -1), sequence], axis=-1)
#------------------------------------------------------------------------------------
pred_array = tf.concat([refined_rois, pred_classes_exp , tf.expand_dims(pred_scores, axis = -1)], axis=-1)
scatter_ind = tf.stack([batch_grid , pred_classes, bbox_grid],axis = -1)
pred_scatt = tf.scatter_nd(scatter_ind, pred_array, [batch_size, num_classes, num_rois, pred_array.shape[-1]])
print(' pred_array : ', pred_array.shape)
print(' scatter_ind : ', type(scatter_ind), 'shape', scatter_ind.shape)
print(' pred_scatter : ', pred_scatt.get_shape())
##--------------------------------------------------------------------------------------------
## Apply a per class score normalization
##--------------------------------------------------------------------------------------------
normalizer = tf.reduce_max(pred_scatt[...,-1], axis = -1, keepdims=True)
normalizer = tf.where(normalizer < 1.0e-15, tf.ones_like(normalizer), normalizer)
norm_score = tf.expand_dims(pred_scatt[...,-1]/normalizer, axis = -1)
pred_scatt = tf.concat([pred_scatt, norm_score],axis = -1)
print(' - Add normalized score --\n')
print(' normalizer : ', normalizer.shape)
print(' norm_score : ', norm_score.shape)
print(' pred_scatter : ', pred_scatt.get_shape())
##------------------------------------------------------------------------------------
## sort pred_scatter in each class dimension based on bbox scores (last column)
##------------------------------------------------------------------------------------
_, sort_inds = tf.nn.top_k(pred_scatt[...,-1], k=pred_scatt.shape[2])
# build indexes to gather rows from pred_scatter based on sort order
class_grid, batch_grid, roi_grid = tf.meshgrid(tf.range(num_classes),tf.range(batch_size), tf.range(num_rois))
roi_grid_exp = tf.to_float(tf.expand_dims(roi_grid, axis = -1))
gather_inds = tf.stack([batch_grid , class_grid, sort_inds],axis = -1)
pred_tensor = tf.gather_nd(pred_scatt, gather_inds[...,:det_per_class,:], name = 'pred_refined_tensor')
# append an index to the end of each row --- commented out 30-04-2018
# pred_tensor = tf.concat([pred_tensor, roi_grid_exp], axis = -1)
print(' sort_inds : ', type(sort_inds) , ' shape ', sort_inds.shape)
print(' class_grid : ', type(class_grid) , ' shape ', class_grid.get_shape())
print(' batch_grid : ', type(batch_grid) , ' shape ', batch_grid.get_shape())
print(' roi_grid shape : ', type(roi_grid) , ' shape ', roi_grid.get_shape())
print(' roi_grid_exp : ', type(roi_grid_exp), ' shape ', roi_grid_exp.get_shape())
print(' gather_inds : ', type(gather_inds) , ' shape ', gather_inds.get_shape())
print(' pred_tensor : ', pred_tensor.get_shape())
return pred_tensor , pred_deltas
##----------------------------------------------------------------------------------------------------------------------
##
##----------------------------------------------------------------------------------------------------------------------
def build_ground_truth(gt_class_ids, norm_gt_bboxes, config):
batch_size = config.BATCH_SIZE
num_classes = config.NUM_CLASSES
h, w = config.IMAGE_SHAPE[:2]
num_bboxes = KB.int_shape(norm_gt_bboxes)[1]
scale = tf.constant([h,w,h,w], dtype = tf.float32)
# dup_scale = tf.reshape(tf.tile(scale, [num_rois]),[num_rois,-1])
dup_scale = scale * tf.ones([batch_size, num_bboxes, 1], dtype = 'float32')
gt_bboxes = tf.multiply(norm_gt_bboxes , dup_scale )
det_per_class = config.DETECTION_PER_CLASS
# num of bounding boxes is determined by bbox_list.shape[1] instead of config.DETECTION_MAX_INSTANCES
# use of this routine for both input_gt_boxes, and target_gt_deltas
if num_bboxes == config.DETECTION_MAX_INSTANCES:
tensor_name = "gt_tensor_max"
else:
tensor_name = "gt_tensor"
print('\n')
print(' > BUILD_GROUND TRUTH_TF()' )
print(' num_bboxes : ', num_bboxes, '(building ', tensor_name , ')' )
print(' gt_class_ids shape : ', gt_class_ids.get_shape(), ' ', KB.int_shape(gt_class_ids))
print(' norm_gt_bboxes.shape : ', norm_gt_bboxes.get_shape() , ' ', KB.int_shape(norm_gt_bboxes))
print(' gt_bboxes.shape : ', gt_bboxes.get_shape() , ' ', KB.int_shape(gt_bboxes))
#---------------------------------------------------------------------------
# use the argmaxof each row to determine the dominating (predicted) class
# mask identifies class_ids > 0
#---------------------------------------------------------------------------
gt_classes_exp = tf.to_float(tf.expand_dims(gt_class_ids ,axis=-1))
print(' gt_classes_exp : ', gt_classes_exp.get_shape() )
ones = tf.ones_like(gt_class_ids)
zeros= tf.zeros_like(gt_class_ids)
mask = tf.greater(gt_class_ids , 0)
gt_scores = tf.where(mask, ones, zeros)
# pred_scores = tf.reduce_max(mrcnn_class ,axis=-1, keep_dims=True) # (32,)
gt_scores_exp = tf.to_float(KB.expand_dims(gt_scores, axis=-1))
print(' gt_scores_exp : ', gt_scores_exp.get_shape())
##------------------------------------------------------------------------------------
## Generate GT_ARRAY
## Note that we add gt_scores_exp TWICE so that the shape of gt_array matches
## pred_tensor generated in build_predictions
##
## sequence id is used to preserve the order of rois as passed to this routine
##------------------------------------------------------------------------------------
batch_grid, bbox_grid = tf.meshgrid( tf.range(batch_size, dtype=tf.int32),
tf.range(num_bboxes, dtype=tf.int32), indexing = 'ij' )
sequence = gt_scores * (bbox_grid[...,::-1] + 1)
sequence = tf.to_float(tf.expand_dims(sequence, axis = -1))
gt_array = tf.concat([gt_bboxes, gt_classes_exp, gt_scores_exp, gt_scores_exp, sequence ], axis=2)
# print(' batch_grid shape ', batch_grid.get_shape())
# print(' bbox_grid shape ', bbox_grid.get_shape())
# print(' sequence shape ', sequence.get_shape())
##------------------------------------------------------------------------------
## Create indicies to scatter rois out to multi-dim tensor by image id and class
## resulting tensor is batch size x num_classes x num_bboxes x 7 (num columns)
##------------------------------------------------------------------------------
scatter_ind = tf.stack([batch_grid , gt_class_ids, bbox_grid],axis = -1)
gt_scatter = tf.scatter_nd(scatter_ind, gt_array, [batch_size, num_classes, num_bboxes, gt_array.shape[-1] ])
print(' gt_array shape : ', gt_array.shape , gt_array.get_shape())
print(' scatter_ind shape : ', scatter_ind.shape, scatter_ind.get_shape())
print(' tf.shape(gt_array)[-1] : ', gt_array.shape[-1], KB.int_shape(gt_array))
print(' gt_scatter shape : ', gt_scatter.shape , gt_scatter.get_shape())
##-------------------------------------------------------------------------------
## sort in each class dimension based on on sequence number (last column)
## scatter_nd places bboxs in a sparse fashion --- this sort is to place all bboxes
## at the top of the class bbox array
##-------------------------------------------------------------------------------
_ , sort_inds = tf.nn.top_k(tf.abs(gt_scatter[:,:,:,-1]), k=gt_scatter.shape[2])
# build indexes to gather rows from pred_scatter based on sort order
class_grid, batch_grid, bbox_grid = tf.meshgrid(tf.range(num_classes),tf.range(batch_size), tf.range(num_bboxes))
bbox_grid_exp = tf.to_float(tf.expand_dims(bbox_grid, axis = -1))
gather_inds = tf.stack([batch_grid , class_grid, sort_inds],axis = -1)
gt_result = tf.gather_nd(gt_scatter[...,:-1], gather_inds[...,:det_per_class,:] , name = tensor_name)
# append an index to the end of each row --- commented out 30-04-2018
# gt_result = tf.concat([gt_result, bbox_grid_exp], axis = -1)
print(' sort_inds : ', type(sort_inds) , ' shape ', sort_inds.shape)
print(' class_grid : ', type(class_grid) , ' shape ', class_grid.get_shape())
print(' batch_grid : ', type(batch_grid) , ' shape ', batch_grid.get_shape())
print(' gather_inds : ', gather_inds.get_shape())
print(' gt_result.shape : ', KB.int_shape(gt_result), gt_result.get_shape())
return gt_result
##----------------------------------------------------------------------------------------------------------------------
## INPUTS :
## FCN_HEATMAP [ numn_images x height x width x num classes ]
## PRED_HEATMAP_SCORES
##----------------------------------------------------------------------------------------------------------------------
def build_heatmap(in_tensor, config, names = None):
num_detections = config.DETECTION_MAX_INSTANCES
img_h, img_w = config.IMAGE_SHAPE[:2]
batch_size = config.BATCH_SIZE
num_classes = config.NUM_CLASSES
# rois per image is determined by size of input tensor
# detection mode: config.TRAIN_ROIS_PER_IMAGE
# ground_truth : config.DETECTION_MAX_INSTANCES
# strt_cls = 0 if rois_per_image == 32 else 1
rois_per_image = (in_tensor.shape)[2]
print('\n ')
print(' > NEW build_heatmap() for ', names )
print(' in_tensor shape : ', in_tensor.shape)
print(' num bboxes per class : ', rois_per_image )
#-----------------------------------------------------------------------------
## Stack non_zero bboxes from in_tensor into pt2_dense
#-----------------------------------------------------------------------------
# pt2_ind shape is [?, 3].
# pt2_ind[0] corresponds to image_index
# pt2_ind[1] corresponds to class_index
# pt2_ind[2] corresponds to roi row_index
# pt2_dense shape is [?, 6]
# pt2_dense[0] is image index
# pt2_dense[1:4] roi cooridnaytes
# pt2_dense[5] is class id
#-----------------------------------------------------------------------------
pt2_sum = tf.reduce_sum(tf.abs(in_tensor[:,:,:,:-2]), axis=-1)
print(' pt2_sum shape ',pt2_sum.shape)
# print(pt2_sum[0].eval())
pt2_ind = tf.where(pt2_sum > 0)
## replaced the two operations below with the one above - 15-05-2018
# pt2_mask = tf.greater(pt2_sum , 0)
# pt2_ind = tf.where(pt2_mask)
# print(' pt2_mask shape ', pt2_mask.get_shape())
# print(pt2_mask.eval())
# print(' pt2_ind shape ', pt2_ind.get_shape())
# print(pt2_ind.eval())
pt2_dense = tf.gather_nd( in_tensor, pt2_ind)
print(' dense shape ',pt2_dense.get_shape())
#-----------------------------------------------------------------------------
## Build mesh-grid to hold pixel coordinates
#-----------------------------------------------------------------------------
X = tf.range(img_w, dtype=tf.int32)
Y = tf.range(img_h, dtype=tf.int32)
X, Y = tf.meshgrid(X, Y)
# duplicate (repeat) X and Y into a batch_size x rois_per_image tensor
print(' X/Y shapes :', X.get_shape(), Y.get_shape())
ones = tf.ones([tf.shape(pt2_dense)[0] , 1, 1], dtype = tf.int32)
rep_X = ones * X
rep_Y = ones * Y
print(' Ones: ', ones.shape)
print(' ones_exp * X', ones.shape, '*', X.shape, '= ',rep_X.shape)
print(' ones_exp * Y', ones.shape, '*', Y.shape, '= ',rep_Y.shape)
# # stack the X and Y grids
bef_pos = tf.to_float(tf.stack([rep_X,rep_Y], axis = -1))
print(' before transpse ', bef_pos.get_shape())
pos_grid = tf.transpose(bef_pos,[1,2,0,3])
print(' after transpose ', pos_grid.get_shape())
##-----------------------------------------------------------------------------
## Build mean and convariance tensors for Multivariate Normal Distribution
##-----------------------------------------------------------------------------
width = pt2_dense[:,3] - pt2_dense[:,1] # x2 - x1
height = pt2_dense[:,2] - pt2_dense[:,0]
cx = pt2_dense[:,1] + ( width / 2.0)
cy = pt2_dense[:,0] + ( height / 2.0)
means = tf.stack((cx,cy),axis = -1)
covar = tf.stack((width * 0.5 , height * 0.5), axis = -1)
covar = tf.sqrt(covar)
##-----------------------------------------------------------------------------
## Compute Normal Distribution for bounding boxes
##-----------------------------------------------------------------------------
tfd = tf.contrib.distributions
mvn = tfd.MultivariateNormalDiag( loc = means, scale_diag = covar)
prob_grid = mvn.prob(pos_grid)
print(' Prob_grid shape before tanspose: ',prob_grid.get_shape())
prob_grid = tf.transpose(prob_grid,[2,0,1])
print(' Prob_grid shape after tanspose: ',prob_grid.get_shape())
print(' >> input to MVN.PROB: pos_grid (meshgrid) shape: ', pos_grid.get_shape())
print(' << output probabilities shape:' , prob_grid.get_shape())
##--------------------------------------------------------------------------------
## IMPORTANT: kill distributions of NaN boxes (resulting from bboxes with height/width of zero
## which cause singular sigma cov matrices
##--------------------------------------------------------------------------------
prob_grid = tf.where(tf.is_nan(prob_grid), tf.zeros_like(prob_grid), prob_grid)
##-------------------------------------------------------------------------------------
## scatter out the probability distributions based on class
##-------------------------------------------------------------------------------------
print('\n Scatter out the probability distributions based on class --------------')
gauss_scatt = tf.scatter_nd(pt2_ind, prob_grid, [batch_size, num_classes, rois_per_image, img_w, img_h])
print(' pt2_ind shape : ', pt2_ind.shape)
print(' prob_grid shape : ', prob_grid.shape)
print(' gauss_scatt : ', gauss_scatt.shape) # batch_sz , num_classes, num_rois, image_h, image_w
##-------------------------------------------------------------------------------------
## SUM : Reduce and sum up gauss_scattered by class
##-------------------------------------------------------------------------------------
print('\n Reduce sum based on class ---------------------------------------------')
gauss_sum = tf.reduce_sum(gauss_scatt, axis=2, name='pred_heatmap2')
# force small sums to zero - for now (09-11-18) commented out but could reintroduce based on test results
# gauss_sum = tf.where(gauss_sum < 1e-12, gauss_sum, tf.zeros_like(gauss_sum), name='Where1')
print(' gaussian_sum shape : ', gauss_sum.get_shape(), 'Keras tensor ', KB.is_keras_tensor(gauss_sum) )
### Normalize `gauss_sum` --> `gauss_norm`
#---------------------------------------------------------------------------------------------
# heatmap L2 normalization
# Normalization using the `gauss_sum` (batchsize , num_classes, height, width)
# 17-05-2018 (New method, replace dthe previous method that usedthe transposed gauss sum
# 17-05-2018 Replaced with normalization across the CLASS axis
#---------------------------------------------------------------------------------------------
# print('\n L2 normalization ------------------------------------------------------')
# gauss_L2norm = KB.l2_normalize(gauss_sum, axis = +1) # normalize along the CLASS axis
# print(' gauss L2 norm : ', gauss_L2norm.shape ,' Keras tensor ', KB.is_keras_tensor(gauss_L2norm) )
#---------------------------------------------------------------------------------------------
##---------------------------------------------------------------------------------------------
## gauss_sum normalization
## normalizer is set to one when the max of class is zero
## this prevents elements of gauss_norm computing to nan
##---------------------------------------------------------------------------------------------
print('\n normalization ------------------------------------------------------')
normalizer = tf.reduce_max(gauss_sum, axis=[-2,-1], keepdims = True)
normalizer = tf.where(normalizer < 1.0e-15, tf.ones_like(normalizer), normalizer)
gauss_norm = gauss_sum / normalizer
# gauss_norm = gauss_sum / tf.reduce_max(gauss_sum, axis=[-2,-1], keepdims = True)
# gauss_norm = tf.where(tf.is_nan(gauss_norm), tf.zeros_like(gauss_norm), gauss_norm, name = 'Where2')
print(' gauss norm : ', gauss_norm.shape ,' Keras tensor ', KB.is_keras_tensor(gauss_norm) )
##--------------------------------------------------------------------------------------------
## generate score based on gaussian using bounding box masks
## NOTE: Score is generated on NORMALIZED gaussian distributions (GAUSS_NORM)
## If want to do this on NON-NORMALIZED, we need to apply it on GAUSS_SUM
##--------------------------------------------------------------------------------------------
# flatten guassian scattered and input_tensor, and pass on to build_bbox_score routine
in_shape = tf.shape(in_tensor)
print(' shape of in_tensor is : ', KB.int_shape(in_tensor))
# in_tensor_flattened = tf.reshape(in_tensor, [-1, in_shape[-1]]) <-- not a good reshape style!!
# replaced with following line:
in_tensor_flattened = tf.reshape(in_tensor, [-1, in_tensor.shape[-1]])
# bboxes = tf.to_int32(tf.round(in_tensor_flattened[...,0:4]))
print(' in_tensor : ', in_tensor.shape)
print(' in_tensor_flattened : ', in_tensor_flattened.shape)
print(' Rois per class : ', rois_per_image)
#--------------------------------------------------------------------------------------------------------------------------
# duplicate GAUSS_NORM <num_roi> times to pass along with bboxes to map_fn function
# Here we have a choice to calculate scores using the GAUSS_SUM (unnormalized) or GAUSS_NORM (normalized)
# after looking at the scores and ratios for each option, I decided to go with the normalized
# as the numbers are larger
#
# Examples>
# Using GAUSS_SUM
# [ 3.660313 3.513489 54.475536 52.747402 1. 0.999997 4.998889 2450. 0.00204 0.444867]
# [ 7.135149 1.310972 50.020126 44.779854 1. 0.999991 4.981591 1892. 0.002633 0.574077]
# [ 13.401865 0. 62.258957 46.636948 1. 0.999971 4.957398 2303. 0.002153 0.469335]
# [ 0. 0. 66.42349 56.123024 1. 0.999908 4.999996 3696. 0.001353 0.294958]
# [ 0. 0. 40.78952 60.404335 1. 0.999833 4.586552 2460. 0.001864 0.406513]
#
# Using GAUSS_NORM: class r-cnn scr
# [ 3.660313 3.513489 54.475536 52.747402 1. 0.999997 1832.9218 2450. 0.748131 0.479411]
# [ 7.135149 1.310972 50.020126 44.779854 1. 0.999991 1659.3965 1892. 0.877059 0.56203 ]
# [ 13.401865 0. 62.258957 46.636948 1. 0.999971 1540.4974 2303. 0.668909 0.428645]
# [ 0. 0. 66.42349 56.123024 1. 0.999908 1925.3267 3696. 0.520922 0.333813]
# [ 0. 0. 40.78952 60.404335 1. 0.999833 1531.321 2460. 0.622488 0.398898]
#
# to change the source, change the following line gauss_norm <--> gauss_sum
#---------------------------------------------------------------------------------------------------------------------------
##--------------------------------------------------------------------------------------------
## Generate scores :
## Testing demonstated that the NORMALIZED score generated from using GAUSS_SUM and GAUSS_NORM
## Are the same.
## For now we will use GAUSS_SUM score and GAUSS_NORM heatmap. The reason being that the
## raw score generated in GAUSS_SUM is much smaller.
## We may need to change this base on the training results from FCN
##--------------------------------------------------------------------------------------------
##--------------------------------------------------------------------------------------------
## Generate scores using GAUSS_SUM
##--------------------------------------------------------------------------------------------
print('\n Scores from gauss_sum ----------------------------------------------')
temp = tf.expand_dims(gauss_sum, axis =2)
print(' temp expanded : ', temp.shape)
temp = tf.tile(temp, [1,1, rois_per_image ,1,1])
print(' temp tiled shape : ', temp.shape)
temp = KB.reshape(temp, (-1, temp.shape[-2], temp.shape[-1]))
print(' temp flattened : ', temp.shape)
print(' in_tensor_flattened : ', in_tensor_flattened.shape)
scores_from_sum = tf.map_fn(build_mask_routine, [temp, in_tensor_flattened], dtype=tf.float32)
print(' Scores_from_sum (after build mask routine) : ', scores_from_sum.shape) # [(num_batches x num_class x num_rois ), 3]
scores_shape = [in_tensor.shape[0], in_tensor.shape[1], in_tensor.shape[2], -1]
scores_from_sum = tf.reshape(scores_from_sum, scores_shape)
print(' reshaped scores : ', scores_from_sum.shape)
##--------------------------------------------------------------------------------------------
## tf.reduce_max(scores_from_sum[...,-1], axis = -1, keepdims=True) result is [num_imgs, num_class, 1]
##
## This is a regular normalization that moves everything between [0, 1].
## This causes negative values to move to -inf, which is a problem in FCN scoring.
## To address this a normalization between [-1 and +1] was introduced in FCN.
## Not sure how this will work with training tho.
##--------------------------------------------------------------------------------------------
normalizer = tf.reduce_max(scores_from_sum[...,-1], axis = -1, keepdims=True)
normalizer = tf.where(normalizer < 1.0e-15, tf.ones_like(normalizer), normalizer)
norm_score = tf.expand_dims(scores_from_sum[...,-1]/normalizer, axis = -1)
# scores_from_sum = tf.concat([scores_from_sum, norm_score],axis = -1) <-- added to concat down below 18-9-18
'''
##--------------------------------------------------------------------------------------------
## Generate scores using normalized GAUSS_SUM (GAUSS_NORM)
##--------------------------------------------------------------------------------------------
print('==== Scores from gauss_norm ================')
temp = tf.expand_dims(gauss_norm, axis =2)
print(' temp expanded shape : ', temp.shape)
temp = tf.tile(temp, [1,1, rois_per_image ,1,1])
print(' temp tiled shape : ', temp.shape)
temp_reshape = KB.reshape(temp, (-1, temp.shape[-2], temp.shape[-1]))
print(' temp flattened shape : ', temp_reshape.shape)
print(' in_tensor_flattened : ', in_tensor_flattened.shape)
scores_from_norm = tf.map_fn(build_mask_routine_inf, [temp_reshape, in_tensor_flattened], dtype=tf.float32)
print(' Scores_from_norm (after build mask routine) : ', scores_from_norm.shape) # [(num_batches x num_class x num_rois ), 3]
scores_shape = [in_tensor.shape[0], in_tensor.shape[1],in_tensor.shape[2], -1]
scores_from_norm = tf.reshape(scores_from_norm, scores_shape)
print(' reshaped scores : ', scores_from_norm.shape)
##--------------------------------------------------------------------------------------------
## normalize score between [0, 1].
##--------------------------------------------------------------------------------------------
normalizer = tf.reduce_max(scores_from_norm[...,-1], axis = -1, keepdims=True)
normalizer = tf.where(normalizer < 1.0e-15, tf.ones_like(normalizer), normalizer)
print(' normalizer : ',normalizer.shape)
norm_score = tf.expand_dims(scores_from_norm[...,-1]/normalizer, axis = -1)
scores_from_norm = tf.concat([scores_from_norm, norm_score],axis = -1)
print(' norm_score : ', norm_score.shape)
print(' scores_from_norm final: ', scores_from_norm.shape)
'''
##--------------------------------------------------------------------------------------------
## Append `in_tensor` and `scores_from_sum` to form `bbox_scores`
##--------------------------------------------------------------------------------------------
gauss_scores = tf.concat([in_tensor, scores_from_sum, norm_score], axis = -1,name = names[0]+'_scores')
print(' in_tensor : ', in_tensor.shape)
print(' scores_from_sum final : ', scores_from_sum.shape)
print(' norm_score : ', norm_score.shape)
print(' gauss_scores : ', gauss_scores.shape, ' name: ', gauss_scores.name)
print(' gauss_scores (FINAL) : ', gauss_scores.shape, ' Keras tensor ', KB.is_keras_tensor(gauss_scores) )
##--------------------------------------------------------------------------------------------
## //create heatmap Append `in_tensor` and `scores_from_sum` to form `bbox_scores`
##--------------------------------------------------------------------------------------------
# gauss_heatmap = KB.identity(tf.transpose(gauss_sum,[0,2,3,1]), name = names[0])
gauss_sum = tf.transpose(gauss_sum,[0,2,3,1], name = names[0])
gauss_norm = tf.transpose(gauss_norm,[0,2,3,1], name = names[0]+'_norm')
# print(' gauss_heatmap shape : ', gauss_heatmap.shape ,' Keras tensor ', KB.is_keras_tensor(gauss_heatmap) )
# print(' gauss_heatmap_norm shape : ', gauss_heatmap_norm.shape,' Keras tensor ', KB.is_keras_tensor(gauss_heatmap_norm) )
# print(gauss_heatmap)
# gauss_heatmap_norm = KB.identity(tf.transpose(gauss_norm,[0,2,3,1]), name = names[0]+'_norm')
# print(' gauss_heatmap_norm final shape : ', gauss_heatmap_norm.shape,' Keras tensor ', KB.is_keras_tensor(gauss_heatmap_norm) )
# gauss_heatmap_L2norm = KB.identity(tf.transpose(gauss_L2norm,[0,2,3,1]), name = names[0]+'_L2norm')
print(' complete')
return gauss_norm, gauss_scores # , gauss_heatmap gauss_heatmap_L2norm # [gauss_sum, gauss_scatt, means, covar]
'''
17-9-2018 -- routine was cloned from chm_layer_inf, and this code was commented out as we dont use L2 normalization
kept for history
# consider the two new columns for reshaping the gaussian_bbox_scores
new_shape = tf.shape(in_tensor)+ [0,0,0, tf.shape(scores)[-1]]
bbox_scores = tf.concat([in_tensor_flattened, scores], axis = -1)
bbox_scores = tf.reshape(bbox_scores, new_shape)
# print(' new shape is : ', new_shape.eval())
print(' in_tensor_flattened : ', in_tensor_flattened.shape)
print(' Scores shape : ', scores.shape) # [(num_batches x num_class x num_rois ), 3]
print(' boxes_scores (rehspaed) : ', bbox_scores.shape)
##--------------------------------------------------------------------------------------------
## Normalize computed score above, and add it to the heatmap_score tensor as last column
##--------------------------------------------------------------------------------------------
scr_L2norm = tf.nn.l2_normalize(bbox_scores[...,-1], axis = -1) # shape (num_imgs, num_class, num_rois)
scr_L2norm = tf.expand_dims(scr_L2norm, axis = -1)
##--------------------------------------------------------------------------------------------
# shape of tf.reduce_max(bbox_scores[...,-1], axis = -1, keepdims=True) is (num_imgs, num_class, 1)
# This is a regular normalization that moves everything between [0, 1].
# This causes negative values to move to -inf, which is a problem in FCN scoring.
# To address this a normalization between [-1 and +1] was introduced in FCN.
# Not sure how this will work with training tho.
##--------------------------------------------------------------------------------------------
scr_norm = bbox_scores[...,-1]/ tf.reduce_max(bbox_scores[...,-1], axis = -1, keepdims=True)
scr_norm = tf.where(tf.is_nan(scr_norm), tf.zeros_like(scr_norm), scr_norm)
#--------------------------------------------------------------------------------------------
# this normalization moves values to [-1, +1] which we use in FCN, but not here.
#--------------------------------------------------------------------------------------------
# reduce_max = tf.reduce_max(bbox_scores[...,-1], axis = -1, keepdims=True)
# reduce_min = tf.reduce_min(bbox_scores[...,-1], axis = -1, keepdims=True) ## epsilon = tf.ones_like(reduce_max) * 1e-7
# scr_norm = (2* (bbox_scores[...,-1] - reduce_min) / (reduce_max - reduce_min)) - 1
scr_norm = tf.where(tf.is_nan(scr_norm), tf.zeros_like(scr_norm), scr_norm)
scr_norm = tf.expand_dims(scr_norm, axis = -1) # shape (num_imgs, num_class, 32, 1)
bbox_scores = tf.concat([bbox_scores, scr_norm, scr_L2norm], axis = -1)
gauss_heatmap = KB.identity(tf.transpose(gauss_sum,[0,2,3,1]), name = names[0])
gauss_heatmap_norm = KB.identity(tf.transpose(gauss_norm,[0,2,3,1]), name = names[0]+'_norm')
gauss_heatmap_L2norm = KB.identity(tf.transpose(gauss_L2norm,[0,2,3,1]), name = names[0]+'_L2norm')
gauss_scores = KB.identity(bbox_scores, name = names[0]+'_scores')
print(' gauss_heatmap final shape : ', gauss_heatmap.shape ,' Keras tensor ', KB.is_keras_tensor(gauss_heatmap) )
print(' gauss_scores final shape : ', gauss_scores.shape ,' Keras tensor ', KB.is_keras_tensor(gauss_scores) )
print(' complete')
return gauss_heatmap_norm, gauss_scores, gauss_heatmap,gauss_heatmap_L2norm # [gauss_sum, gauss_scatt, means, covar]
'''
##----------------------------------------------------------------------------------------------------------------------
##
##----------------------------------------------------------------------------------------------------------------------
def build_mask_routine(input_list):
'''
Inputs:
-----------
heatmap_tensor : [ image height, image width ]
input_row : [y1, x1, y2, x2] in absolute (non-normalized) scale
Returns
-----------
gaussian_sum : sum of gaussian heatmap vlaues over the area covered by the bounding box
bbox_area : bounding box area (in pixels)
weighted_sum : gaussian_sum * bbox_score
'''
heatmap_tensor, input_row = input_list
with tf.variable_scope('mask_routine'):
y_extent = tf.range(input_row[0], input_row[2])
x_extent = tf.range(input_row[1], input_row[3])
Y,X = tf.meshgrid(y_extent, x_extent)
bbox_mask = tf.stack([Y,X],axis=2)
mask_indices = tf.reshape(bbox_mask,[-1,2])
mask_indices = tf.to_int32(mask_indices)
mask_size = tf.shape(mask_indices)[0]
mask_updates = tf.ones([mask_size], dtype = tf.float32)
mask = tf.scatter_nd(mask_indices, mask_updates, tf.shape(heatmap_tensor))
# mask_sum = tf.reduce_sum(mask)
mask_applied = tf.multiply(heatmap_tensor, mask, name = 'mask_applied')
bbox_area = tf.to_float((input_row[2]-input_row[0]) * (input_row[3]-input_row[1]))
gaussian_sum = tf.reduce_sum(mask_applied)
# Multiply gaussian_sum by score to obtain weighted sum
weighted_sum = gaussian_sum * input_row[5]
# ratio = gaussian_sum / bbox_area
# ratio = tf.where(tf.is_nan(ratio), 0.0, ratio)
return tf.stack([gaussian_sum, bbox_area, weighted_sum], axis = -1)
##----------------------------------------------------------------------------------------------------------------------
##
##----------------------------------------------------------------------------------------------------------------------
class CHMLayer(KE.Layer):
'''
Contextual Heatmap Layer (previously CHMLayerTF)
Receives the bboxes, their repsective classification and roi_outputs and
builds the per_class tensor
Returns:
-------
The CHM layer returns the following tensors:
pred_tensor : [batch, NUM_CLASSES, TRAIN_ROIS_PER_IMAGE , (index, class_prob, y1, x1, y2, x2, class_id, old_idx)]
in normalized coordinates
pred_cls_cnt: [batch, NUM_CLASSES]
gt_tensor: [batch, NUM_CLASSES, DETECTION_MAX_INSTANCES, (index, class_prob, y1, x1, y2, x2, class_id, old_idx)]
gt_cls_cnt: [batch, NUM_CLASSES]
Note: Returned arrays might be zero padded if not enough target ROIs.
'''
def __init__(self, config=None, **kwargs):
super().__init__(**kwargs)
print('--------------------------------')
print('>>> CHM Layer ')
print('--------------------------------')
self.config = config
def call(self, inputs):
print(' > CHMLayer Call() ', len(inputs))
# mrcnn_class , mrcnn_bbox, output_rois, gt_class_ids, gt_bboxes, tgt_class_ids, tgt_deltas = inputs
mrcnn_class , mrcnn_bbox, output_rois, tgt_class_ids, tgt_bboxes = inputs
print(' mrcnn_class.shape :', mrcnn_class.shape, KB.int_shape( mrcnn_class ))
print(' mrcnn_bbox.shape :', mrcnn_bbox.shape, KB.int_shape( mrcnn_bbox ))
print(' output_rois.shape :', output_rois.shape, KB.int_shape( output_rois ))
print(' tgt_class_ids.shape :', tgt_class_ids.shape, KB.int_shape(tgt_class_ids ))
print(' tgt_bboxes.shape :', tgt_bboxes.shape, KB.int_shape( tgt_bboxes ))
# print(' tgt_deltas.shape :', tgt_deltas.shape, KB.int_shape( tgt_deltas ))
pred_tensor = build_predictions(output_rois, mrcnn_class, mrcnn_bbox, self.config)
pr_hm_norm, pr_hm_scores = build_heatmap(pred_tensor, self.config, names = ['pred_heatmap'])
# pred_cls_cnt = KL.Lambda(lambda x: tf.count_nonzero(x[:,:,:,-1],axis = -1), name = 'pred_cls_count')(pred_tensor)
pred_refined_tensor, pred_deltas = build_refined_predictions(output_rois, mrcnn_class, mrcnn_bbox, self.config)
pr_ref_hm_norm, pr_ref_hm_scores = build_heatmap(pred_refined_tensor, self.config, names = ['pred_refined_heatmap'])
gt_tensor = build_ground_truth (tgt_class_ids, tgt_bboxes, self.config)
gt_hm_norm, gt_hm_scores = build_heatmap(gt_tensor, self.config, names = ['gt_heatmap'])
# gt_cls_cnt = KL.Lambda(lambda x: tf.count_nonzero(x[:,:,:,-1],axis = -1), name = 'gt_cls_count')(gt_tensor)
print()
# print(' pred_cls_cnt shape : ', pred_cls_cnt.shape , 'Keras tensor ', KB.is_keras_tensor(pred_cls_cnt) )
# print(' gt_cls_cnt shape : ', gt_cls_cnt.shape , 'Keras tensor ', KB.is_keras_tensor(gt_cls_cnt) )
print(' pred_heatmap : ', pr_hm_norm.shape , 'Keras tensor ', KB.is_keras_tensor(pr_hm_norm))
print(' pred_heatmap_scores: ', pr_hm_scores.shape , 'Keras tensor ', KB.is_keras_tensor(pr_hm_scores))
print(' pred_refined_heatmap : ', pr_ref_hm_norm.shape , 'Keras tensor ', KB.is_keras_tensor(pr_ref_hm_norm))
print(' pred_refnined_heatmap_scores: ', pr_ref_hm_scores.shape , 'Keras tensor ', KB.is_keras_tensor(pr_ref_hm_scores))
print(' gt_heatmap : ', gt_hm_norm.shape , 'Keras tensor ', KB.is_keras_tensor(gt_hm_norm))
print(' gt_heatmap_scores : ', gt_hm_scores.shape , 'Keras tensor ', KB.is_keras_tensor(gt_hm_scores))
print(' complete')
return [ pr_hm_norm, pr_ref_hm_norm, gt_hm_norm , pr_hm_scores, pr_ref_hm_scores, gt_hm_scores, pred_refined_tensor, pred_deltas]
# pred_tensor , gt_tensor]
def compute_output_shape(self, input_shape):
# may need to change dimensions of first return from IMAGE_SHAPE to MAX_DIM
return [
(None, self.config.IMAGE_SHAPE[0], self.config.IMAGE_SHAPE[1], self.config.NUM_CLASSES) # pred_heatmap_norm
, (None, self.config.IMAGE_SHAPE[0], self.config.IMAGE_SHAPE[1], self.config.NUM_CLASSES) # pred_refined_heatmap_norm
, (None, self.config.IMAGE_SHAPE[0], self.config.IMAGE_SHAPE[1], self.config.NUM_CLASSES) # gt_heatmap_norm
, (None, self.config.NUM_CLASSES , self.config.DETECTION_PER_CLASS ,11) # pred_heatmap_scores
, (None, self.config.NUM_CLASSES , self.config.DETECTION_PER_CLASS ,11) # pred_refined_heatmap_scores
, (None, self.config.NUM_CLASSES , self.config.DETECTION_PER_CLASS ,11) # gt_heatmap+scores
# ----extra stuff for now ---------------------------------------------------------------------------------------------------
, (None, self.config.NUM_CLASSES , self.config.DETECTION_PER_CLASS ,7) # pred_refined_tensor
, (None, self.config.NUM_CLASSES , self.config.DETECTION_PER_CLASS ,4) # pred_deltas
# , (None, self.config.NUM_CLASSES , self.config.TRAIN_ROIS_PER_IMAGE ,10) # pred_heatmap_scores (expanded)
# , (None, self.config.NUM_CLASSES , self.config.DETECTION_MAX_INSTANCES ,10) # gt_heatmap+scores (expanded)
# , (None, self.config.NUM_CLASSES , self.config.TRAIN_ROIS_PER_IMAGE , 7) # pred_tensor
# , (None, self.config.NUM_CLASSES , self.config.DETECTION_MAX_INSTANCES , 7) # gt_tensor (expanded)
]
##----------------------------------------------------------------------------------------------------------------------
##
##
##
##----------------------------------------------------------------------------------------------------------------------
##----------------------------------------------------------------------------------------------------------------------
## removed 17-05-2018 and replaced with version that calculates heatmap scores using the gauss_sum tensor instead
## of the gauss_scatter tensor -- this was done since the gauss_sum matches the output we have out of FCN
##----------------------------------------------------------------------------------------------------------------------
"""
def build_heatmap_old_2(in_tensor, config, names = None):
num_detections = config.DETECTION_MAX_INSTANCES
img_h, img_w = config.IMAGE_SHAPE[:2]
batch_size = config.BATCH_SIZE
num_classes = config.NUM_CLASSES
print('\n ')
print(' > NEW build_heatmap() for ', names )
print(' orignal in_tensor shape : ', in_tensor.shape)
# rois per image is determined by size of input tensor
# detection mode: config.TRAIN_ROIS_PER_IMAGE
# ground_truth : config.DETECTION_MAX_INSTANCES
rois_per_image = (in_tensor.shape)[2]
# strt_cls = 0 if rois_per_image == 32 else 1
print(' num of bboxes per class is : ', rois_per_image )
#-----------------------------------------------------------------------------
## Stack non_zero bboxes from in_tensor into pt2_dense
#-----------------------------------------------------------------------------
# pt2_ind shape is [?, 3].
# pt2_ind[0] corresponds to image_index
# pt2_ind[1] corresponds to class_index
# pt2_ind[2] corresponds to roi row_index
# pt2_dense shape is [?, 6]
# pt2_dense[0] is image index
# pt2_dense[1:4] roi cooridnaytes
# pt2_dense[5] is class id
#-----------------------------------------------------------------------------
pt2_sum = tf.reduce_sum(tf.abs(in_tensor[:,:,:,:-2]), axis=-1)
print(' pt2_sum shape ',pt2_sum.shape)
# print(pt2_sum[0].eval())
pt2_ind = tf.where(pt2_sum > 0)
## replaced the two operations below with the one above - 15-05-2018
# pt2_mask = tf.greater(pt2_sum , 0)
# pt2_ind = tf.where(pt2_mask)
# print(' pt2_mask shape ', pt2_mask.get_shape())
# print(pt2_mask.eval())
# print(' pt2_ind shape ', pt2_ind.get_shape())
# print(pt2_ind.eval())
pt2_dense = tf.gather_nd( in_tensor, pt2_ind)
print(' dense shape ',pt2_dense.get_shape())
#-----------------------------------------------------------------------------
## Build mesh-grid to hold pixel coordinates
#-----------------------------------------------------------------------------
X = tf.range(img_w, dtype=tf.int32)
Y = tf.range(img_h, dtype=tf.int32)
X, Y = tf.meshgrid(X, Y)
# duplicate (repeat) X and Y into a batch_size x rois_per_image tensor
print(' X/Y shapes :', X.get_shape(), Y.get_shape())
ones = tf.ones([tf.shape(pt2_dense)[0] , 1, 1], dtype = tf.int32)
rep_X = ones * X
rep_Y = ones * Y
print(' Ones: ', ones.shape)
print(' ones_exp * X', ones.shape, '*', X.shape, '= ',rep_X.shape)
print(' ones_exp * Y', ones.shape, '*', Y.shape, '= ',rep_Y.shape)
# # stack the X and Y grids
bef_pos = tf.to_float(tf.stack([rep_X,rep_Y], axis = -1))
print(' before transpse ', bef_pos.get_shape())
pos_grid = tf.transpose(bef_pos,[1,2,0,3])
print(' after transpose ', pos_grid.get_shape())
#-----------------------------------------------------------------------------
## Build mean and convariance tensors for Multivariate Normal Distribution
#-----------------------------------------------------------------------------
width = pt2_dense[:,3] - pt2_dense[:,1] # x2 - x1
height = pt2_dense[:,2] - pt2_dense[:,0]
cx = pt2_dense[:,1] + ( width / 2.0)
cy = pt2_dense[:,0] + ( height / 2.0)
means = tf.stack((cx,cy),axis = -1)
covar = tf.stack((width * 0.5 , height * 0.5), axis = -1)
covar = tf.sqrt(covar)
tfd = tf.contrib.distributions
mvn = tfd.MultivariateNormalDiag( loc = means, scale_diag = covar)
prob_grid = mvn.prob(pos_grid)
print(' Prob_grid shape before tanspose: ',prob_grid.get_shape())
prob_grid = tf.transpose(prob_grid,[2,0,1])
print(' Prob_grid shape after tanspose: ',prob_grid.get_shape())
print(' >> input to MVN.PROB: pos_grid (meshgrid) shape: ', pos_grid.get_shape())
print(' << output probabilities shape:' , prob_grid.get_shape())
#--------------------------------------------------------------------------------
## IMPORTANT: kill distributions of NaN boxes (resulting from bboxes with height/width of zero
## which cause singular sigma cov matrices
#--------------------------------------------------------------------------------
prob_grid = tf.where(tf.is_nan(prob_grid), tf.zeros_like(prob_grid), prob_grid)
## scatter out the probability distributions based on class --------------------------
print('\n Scatter out the probability distributions based on class --------------')
gauss_scatt = tf.scatter_nd(pt2_ind, prob_grid, [batch_size, num_classes, rois_per_image, img_w, img_h])
print(' pt2_ind shape : ', pt2_ind.shape)
print(' prob_grid shape : ', prob_grid.shape)
print(' gauss_scatt : ', gauss_scatt.shape) # batch_sz , num_classes, num_rois, image_h, image_w
## heatmap: sum gauss_scattered based on class ---------------------------------------
print('\n Reduce sum based on class ---------------------------------------------')
gauss_sum = tf.reduce_sum(gauss_scatt, axis=2, name='pred_heatmap2')
gauss_sum = tf.where(gauss_sum > 1e-12, gauss_sum, tf.zeros_like(gauss_sum))
print(' gaussian_sum shape : ', gauss_sum.get_shape(), 'Keras tensor ', KB.is_keras_tensor(gauss_sum) )
# reshape to [img, class, height, width] ---> [img, height, width, class]
gauss_sum = tf.transpose(gauss_sum,[0,2,3,1], name = names[0])
print(' gaussian sum type/name : ', type(gauss_sum), gauss_sum.name, names[0])
print(' gaussian_sum shape : ', gauss_sum.get_shape(), 'Keras tensor ', KB.is_keras_tensor(gauss_sum) )
## heatmap: L2 normalization -----------------------------------------------------------------
print('\n L2 normalization ------------------------------------------------------')
heatmap_shape=KB.shape(gauss_sum)
print(' gauss-sum.shape:', gauss_sum.shape, 'tf.shape :', tf.shape(gauss_sum))
gauss_flatten = KB.reshape(gauss_sum, (heatmap_shape[0], -1, heatmap_shape[-1]) ) # reshape to image, class
output_norm = KB.l2_normalize(gauss_flatten, axis = 1)
gauss_norm = KB.identity(KB.reshape(output_norm, heatmap_shape ) , name = names[0]+'_norm')
print(' gauss_flatten : ', KB.int_shape(gauss_flatten) , gauss_flatten.get_shape(),' Keras tensor ', KB.is_keras_tensor(gauss_flatten) )
print(' gauss_norm1 : ', KB.int_shape(output_norm) , output_norm.get_shape(),' Keras tensor ', KB.is_keras_tensor(output_norm) )
print(' gauss_norm final : ', KB.int_shape(gauss_norm) , gauss_norm.get_shape(),' Keras tensor ', KB.is_keras_tensor(gauss_norm) )
##--------------------------------------------------------------------------------------------
## generate score based on gaussian using bouding box masks
## NOTE: Score is generated on NON-NORMALIZED gaussian distributions
## If want to do this on normalized, we need to apply normalization to gauss_scatt first
##--------------------------------------------------------------------------------------------
# flatten guassian scattered and input_tensor, and pass on to build_bbox_score routine
in_tensor_flattened = tf.reshape(in_tensor, [-1,6])
bboxes = tf.to_int32(tf.round(in_tensor_flattened[...,0:4]))
print(' in_tensor_flattened is ', in_tensor_flattened.shape)
print(' boxes shape ', bboxes.shape)
# DONT NEED THIS - was put there to try to avoid computing sum/area for zero bboxes.
# kept as reference for future generations .....
# bbox_sum = tf.reduce_max(in_tensor[...,0:3], axis = -1, name = 'bbox_sum')
# print(' bbox sum shape: ', bbox_sum.shape)
gauss_scatt_shape = KB.int_shape(gauss_scatt)
gauss_scatt_reshape = KB.reshape(gauss_scatt, (-1, gauss_scatt_shape[-2], gauss_scatt_shape[-1]))
print(' gaussian scatter shape : ', gauss_scatt_shape)
print(' gaussian scatter reshaped : ', gauss_scatt_reshape.shape)
# ones_map = tf.ones([384,128,128])
scores = tf.map_fn(build_mask_routine, [gauss_scatt_reshape, bboxes], dtype=tf.float32)
new_shape = tf.shape(in_tensor)+ [0,0,0,tf.shape(scores)[-1]]
gaussian_bbox_scores = tf.concat([in_tensor_flattened, scores], axis = -1)
print(' Scatter Flattened shape : ', in_tensor_flattened.shape)
print(' Scores shape : ', scores.shape)
print(' gaussian_boxes_scores initial shape: ', gaussian_bbox_scores.shape)
gaussian_bbox_scores = tf.reshape(gaussian_bbox_scores, new_shape, name = names[0]+'_scores')
##--------------------------------------------------------------------------------------------
## Normalize computed score above, and add it to the heatmap_score tensor as last column
##--------------------------------------------------------------------------------------------
scr = gaussian_bbox_scores[...,-2]/gaussian_bbox_scores[...,-1]
scr = tf.where(tf.is_nan(scr), tf.zeros_like(scr), scr)
scr_norm = tf.nn.l2_normalize(scr, axis = -1)
scr_norm = tf.expand_dims(scr_norm, axis = -1)
gaussian_bbox_scores = tf.concat([gaussian_bbox_scores, scr_norm], axis = -1)
print(' gaussian_bbox_scores final shape : ', gaussian_bbox_scores.shape)
print(' complete')
return gauss_norm, gaussian_bbox_scores # [gauss_sum, gauss_scatt, means, covar]
"""
|
nilq/baby-python
|
python
|
from django.contrib import admin
from forums.models import Category
from guardian.admin import GuardedModelAdmin
class CategoryAdmin(GuardedModelAdmin):
list_display = ('title', 'parent', 'ordering')
list_display_links = ('title',)
admin.site.register(Category, CategoryAdmin)
|
nilq/baby-python
|
python
|
# transaction_model.py
#
# ATM MVC program
#
# Team alroda
#
# Aldrich Huang A01026502 2B
# Robert Janzen A01029341 2B
# David Xiao A00725026 2B
import datetime
import os
class TransactionModel:
_TRANSACTION_COLUMNS = 'date,uid,account_type,account_number,transaction_type,amount'
def __init__(self):
pass
def createNewEntry(self, uid, account_type, account_num, transaction_type, amount, date=datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')):
"""
Creates a new transaction log entry to be saved to file
Args:
uid:
UID of the user that owns the account that the transaction is initiated from
account_type:
The type of the account
account_num:
The account number of the account
transaction_type:
The type of transaction that was done
amount:
The dollar value involved in the transaction
date:
The time and date when the transaction took place
Returns:
None
"""
row = '{0},{1},{2},{3},{4},{5}'.format(date, uid, account_type, account_num, transaction_type, str(float(amount)), )
self.saveTransaction(uid, row)
def saveTransaction(self, uid, row):
"""
Saves the new entry to the transaction log file
Args:
uid:
UID of the user who owns that account that initiated the transaction
row:
String containing the new entry to be added to the transaction log file
Returns:
None
"""
filename = 'model/logs/'+str(uid)+'-transactions.csv'
try:
if os.path.getsize(filename) > 0:
with open(filename, 'a') as csv_file:
csv_file.write('\n'+row)
else:
with open(filename, 'w') as csv_file:
output = self._TRANSACTION_COLUMNS + '\n' + row
csv_file.write(output)
except OSError:
with open(filename, 'w') as csv_file:
output = self._TRANSACTION_COLUMNS + '\n' + row
csv_file.write(output)
def createNewActionEntry(self, uid, account_type, account_num, action_type, date=datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')):
"""
Creates a new account action entry in the transaction log
Args:
uid:
uid of the user who owns the accound
account_type:
type of the account
account_num:
account number of the account
action_type:
string describing the type of action done to the account
date:
current date
Returns:
None
"""
row = '{0},{1},{2},{3},{4}'.format(date, uid, account_type, account_num, action_type)
self.saveTransaction(uid, row)
def displayReport(self, uid):
"""
Displays a report of all recorded transactions for accounts owned by the User with the ID uid. The report
is displayed in the CLI
Args:
uid:
UID of the user
Returns:
None
"""
filename = 'model/logs/' + str(uid) + '-transactions.csv'
report_content = [['Comprehensive report for user no. ' + uid]]
try:
if os.path.getsize(filename) > 0:
with open(filename, 'r') as csv_file:
transaction_dic = {}
csv_file.readline()
full_file = csv_file.readlines()
account_list = []
for line in full_file:
line_data = line.rstrip('\n').split(',')
if line_data[3] in account_list:
transaction_dic[line_data[3]].append(', '.join(line_data))
else:
transaction_dic[line_data[3]] = [', '.join(line_data)]
account_list = list(transaction_dic.keys())
account_list.sort(key=str)
for account_num in account_list:
acc_specific_entry = [('Transactions for account no.' + account_num)]
for entry in transaction_dic[account_num]:
acc_specific_entry.append(entry)
report_content.append(acc_specific_entry)
return report_content
except:
return 'Error Generating Report...'
if __name__ == '__main__':
test = TransactionModel()
test.displayReport('1')
|
nilq/baby-python
|
python
|
#!/usr/bin/python3.5
"""
Command line utility to extract basic statistics from a gpx file
"""
import pdb
import sys as mod_sys
import logging as mod_logging
import math as mod_math
import gpxpy as mod_gpxpy
#hack for heart rate
import xml.etree.ElementTree as ET
#heart rate statistics
import numpy as np
import os
import sys
#mod_logging.basicConfig(level=mod_logging.DEBUG,
# format='%(asctime)s %(name)-12s %(levelname)-8s %(message)s')
header = 'id, duration, avgHeartRate, maxHeartRate, dateOfTraining, elevation, uphill, downhill, length_2d, length_3d, moving_time, stopped_time'
def format_time(time_s):
if not time_s:
return 'n/a'
minutes = mod_math.floor(time_s / 60.)
hours = mod_math.floor(minutes / 60.)
return '%s:%s:%s' % (str(int(hours)).zfill(2), str(int(minutes % 60)).zfill(2), str(int(time_s % 60)).zfill(2))
def print_gpx_part_info(gpx_part, csvFile, heartRate, athleteId):
#multivariable returns
start_time, end_time = gpx_part.get_time_bounds()
moving_time, stopped_time, moving_distance, stopped_distance, max_speed = gpx_part.get_moving_data()
uphill, downhill = gpx_part.get_uphill_downhill()
duration = gpx_part.get_duration()
avgHeartRate = round(np.mean(heartRate), 2)
maxHeartRate = np.max(heartRate)
dateOfTraining = start_time
elevation = round(uphill + downhill, 2)
uphill = round(uphill, 2)
downhill = round(downhill, 2)
length_2d = round(gpx_part.length_2d(), 2)
length_3d = round(gpx_part.length_3d(), 2)
#id is written seperately
data = [
duration,
avgHeartRate,
maxHeartRate,
dateOfTraining,
elevation,
uphill,
downhill,
length_2d,
length_3d,
moving_time,
stopped_time
]
csvFile.write('\n' + athleteId)
for d in data:
csvFile.write(", " + str(d))
def print_gpx_info(gpx, gpx_file, csvFile):
print('File: %s' % gpx_file)
if gpx.name:
print(' GPX name: %s' % gpx.name)
if gpx.description:
print(' GPX description: %s' % gpx.description)
if gpx.author_name:
print(' Author: %s' % gpx.author_name)
if gpx.author_email:
print(' Email: %s' % gpx.author_email)
print_gpx_part_info(gpx, csvFile)
'''for track_no, track in enumerate(gpx.tracks):
for segment_no, segment in enumerate(track.segments):
print(' Track #%s, Segment #%s' % (track_no, segment_no))
print_gpx_part_info(segment, indentation=' ')'''
def parseHeartRate(file):
hrs = []
tree = ET.parse(file)
root = tree.getroot()
for hr in root.iter('{http://www.garmin.com/xmlschemas/TrackPointExtension/v1}hr'):
hrs.append(int(hr.text))
return hrs
def run(gpx_files, csvFilePath, athleteId):
if not gpx_files:
print('No GPX files given')
mod_sys.exit(1)
csvFile = open(csvFilePath, "w")
csvFile.write(header)
i = 0
fLen = str(len(gpx_files))
for gpx_file in gpx_files:
sys.stdout.write("\rProgressing file " + str(i) + " out of " + fLen + " ")
#sys.stdout.write("\rDoing thing %i % i" % i, fLen)
sys.stdout.flush()
i += 1
try:
heartRate = parseHeartRate(gpx_file)
if not heartRate:
continue
gpx = mod_gpxpy.parse(open(gpx_file))
print_gpx_part_info(gpx, csvFile, heartRate, athleteId)
except Exception as e:
mod_logging.exception(e)
print('Error processing %s' % gpx_file)
mod_sys.exit(1)
def parserMain(directoryPath, outDirectoryPath):
for dir in os.listdir(directoryPath):
filePaths = os.listdir(directoryPath + dir)
for i in range(0, len(filePaths)):
filePaths[i] = directoryPath + dir + '/' + filePaths[i]
run(filePaths, outDirectoryPath + dir + ".csv", dir)
def joinFiles(dirPath, outFilePath):
outFile = open(outFilePath, "w")
outFile.write(header + '\n')
for fileName in os.listdir(dirPath):
with open(dirPath + fileName) as f:
f.readline() #throw away first line
content = f.readline()
while content != "":
outFile.write(content)
content = f.readline()
parserMain("../Data/Sport/", "../Data/Parsed/")
joinFiles('../Data/Parsed/', '../Data/summed.csv')
|
nilq/baby-python
|
python
|
import matplotlib.pyplot as plt, sys
sys.path.insert(0, '..')
from Louis.misc import *
from Louis.ARC_data.objects import *
from Louis.grids import *
from Louis.unifying import *
def show_pb(name, n=17):
l, i = reversed(pickle_read(name)), 0
for pb, p, c_type in l:
i += 1
if i % n == 0:
print(p)
pb_to_grid(pb)
display_pb(pb, 'Solution : '+str(p)+'\nCohesion type : '+str(c_type))
# figManager = plt.get_current_fig_manager()
# figManager.window.showMaximized()
plt.show(block=False)
if input() == '0': break
plt.close('all')
if __name__ == '__main__':
show_pb('../../espace partage remy louis/Louis/mutation.pickle', 1)
# l = pickle_read('../../espace partage remy louis/Louis/mutation.pickle')
# ans = [0] * 11
# i = 0
# j = 0
# for _, p, _ in l:
# if contains(p, 'singleton'): i += 1
# if contains(p, 'car'): j += 1
# _, d = analyze_var(p)
# ans[d] += 1
# print(ans, i, j)
# l = pickle_read('../../espace partage remy louis/diff_I_rand_5_25_10000.pickle')
# l = pickle_read('data_for_nn/problems/diff_I_5_1000.pickle')
# i = 0
# for pb, p, c_type in l:
# if i % 5 == 0:
# display_pb(pb, 'Solution : '+str(p)+'\nCohesion type : '+cohesion_types_corresp[c_type])
# figManager = plt.get_current_fig_manager()
# figManager.window.showMaximized()
# plt.show(block=False)
# if input() == '0':
# break
# plt.close('all')
# i += 1
# mut_l = pickle_read('data_for_nn/problems/mutation_10mutants_1grid.pickle')
# for pb, p, c_type in mut_l:
# display_pb(pb, 'Solution : '+str(p)+'\nCohesion type : '+str(c_type))
# figManager = plt.get_current_fig_manager()
# figManager.window.showMaximized()
# plt.show(block=False)
# if input() == '0':
# break
# plt.close('all')
|
nilq/baby-python
|
python
|
import re
import csv
from io import BytesIO
from zipfile import ZipFile
import requests
from ._version import __version__
URLHAUS_API_URL = 'https://urlhaus.abuse.ch/downloads/'
REGEX_CSV_HEADER = re.compile(r'^#\s((?:[a-z_]+,)+(?:[a-z_]+))\r', re.MULTILINE)
REGEX_HOSTFILE_DOMAIN = re.compile(r'^127\.0\.0\.1\t(.+)\r', re.MULTILINE)
class URLhaus(object):
def __init__(self, api_url=URLHAUS_API_URL):
'''
Prepare the URLhaus API
'''
# Save the API URL
self._api_url = api_url
# Get and prepare the session that will be used for all API calls
self._session = requests.session()
self._session.headers.update({
'User-Agent': f'abuse_ch-urlhaus-api/{__version__}',
})
def _request(self, path, **kwargs):
'''
Internal method to handle API requests. Raises for errors and
parses CSV or returns raw data as requested
'''
# Compose the full request URL
req_url = f'{self._api_url}{path}'
# Make the request
resp = self._session.get(req_url, **kwargs)
resp.raise_for_status()
# Determine what to do based on response content-type
content_type = resp.headers.get('content-type', None)
# Is it a zip?
if content_type == 'application/zip':
# Attempt to open the response as a zip file
sample_zip = ZipFile(BytesIO(resp.content))
# Get the file list and ensure it's just a single file
file_list = sample_zip.infolist()
assert len(file_list) == 1
# Extract the one file
resp = sample_zip.read(file_list[0].filename)
# Otherwise we're dealing with the raw content
else:
resp = resp.content
# Return the result
return resp.decode()
def _parse_csv(self, content):
# Attempt to find the CSV header
csv_header = REGEX_CSV_HEADER.search(content)
# We found the header
if csv_header is not None:
# Get the CSV columns
csv_columns = tuple(csv_header[1].split(','))
# Get the CSV data (minus comment lines)
csv_data = [row for row in content.splitlines() if not row.startswith('#')]
# Convert the CSV column names and data into a list of dicts
content = list(csv.DictReader(csv_data, fieldnames=csv_columns))
# Return the result
return content
def get_csv_urls_all(self, raw=False):
# Make the request
resp = self._request('csv/')
# Not returning a raw result? Parse it
if not raw:
resp = self._parse_csv(resp)
# Return the result
return resp
def get_csv_urls_recent(self, raw=False):
# Make the request
resp = self._request('csv_recent/')
# Not returning a raw result? Parse it
if not raw:
resp = self._parse_csv(resp)
# Return the result
return resp
def get_csv_urls_online(self, raw=False):
# Make the request
resp = self._request('csv_online/')
# Not returning a raw result? Parse it
if not raw:
resp = self._parse_csv(resp)
# Return the result
return resp
def get_text_urls_all(self):
# Make the request and return the result
resp = self._request('text/')
return resp
def get_text_urls_recent(self):
# Make the request and return the result
resp = self._request('text_recent/')
return resp
def get_text_urls_online(self):
# Make the request and return the result
resp = self._request('text_online/')
return resp
def get_hostfile(self):
# Make the request and return the result
resp = self._request('hostfile/')
return resp
def get_domains(self):
# Get the hostfile
hostfile = self.get_hostfile()
# Get the domains from the hostfile
domains = REGEX_HOSTFILE_DOMAIN.findall(hostfile)
# Return the result
return domains
def get_payloads(self, raw=False):
# Make the request and return the result
resp = self._request('payloads/')
return resp
|
nilq/baby-python
|
python
|
from enum import Enum
from typing import List, NewType
TeamID = NewType("TeamID", int)
class RoleType(Enum):
PLANNER = 0
OPERATOR = 1
LINKER = 2
KEYFARMING = 3
CLEANER = 4
FIELD_AGENT = 5
ITEM_SPONSOR = 6
KEY_TRANSPORT = 7
RECHARGING = 8
SOFTWARE_SUPPORT = 9
ANOMALY_TL = 10
TEAM_LEAD = 11
OTHER = 99
class TeamRole:
def __init__(self, id, name):
self._id = id
role_translation = { # TODO: esto deberia estar en la clase y despues llamar al .value al momento de mandarlo al server
"Planner": RoleType.PLANNER,
"Operator": RoleType.OPERATOR,
"Linker": RoleType.LINKER,
"Keyfarming": RoleType.KEYFARMING,
"Cleaner": RoleType.CLEANER,
"Field Agent": RoleType.FIELD_AGENT,
"Item Sponser": RoleType.ITEM_SPONSOR,
"Key Transport": RoleType.KEY_TRANSPORT,
"Recharging": RoleType.RECHARGING,
"Software Support": RoleType.SOFTWARE_SUPPORT,
"Anomaly TL": RoleType.ANOMALY_TL,
"Team Lead": RoleType.TEAM_LEAD,
"Other": RoleType.OTHER
}
self._name = role_translation[name]
@property
def id(self):
return self._id
@property
def name(self):
return self._name
class Team:
def __init__(self, api_result):
self._teamid = api_result["teamid"]
self._team = api_result["team"]
self._roles = [TeamRole(r["id"], r["name"])
for r in api_result["roles"]]
@property
def teamid(self) -> int:
return self._teamid
@property
def team(self) -> str:
return self._team
@property
def roles(self) -> List[TeamRole]:
return self._roles
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# Copyright 2015 Ufora Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import boto
import boto.s3
command = sys.argv[1]
conn = boto.connect_s3()
if command == "upload":
bucketname = sys.argv[2]
keyname = sys.argv[3]
filename = sys.argv[4]
bucket = conn.get_bucket(bucketname)
key = bucket.new_key(keyname)
key.set_contents_from_filename(filename)
elif command == "download":
bucketname = sys.argv[2]
keyname = sys.argv[3]
filename = sys.argv[4]
bucket = conn.get_bucket(bucketname)
key = bucket.new_key(keyname)
key.get_contents_to_filename(filename)
elif command == "list":
bucketname = sys.argv[2]
bucket = conn.get_bucket(bucketname)
keys = bucket.get_all_keys()
for key in keys:
print key
else:
raise Exception("unknown command: %s" % command)
|
nilq/baby-python
|
python
|
import os
import shutil
import datetime
import functools
import subprocess
import xml.etree.ElementTree as ET
import numpy as np
import torch
import logging
from util.misc import all_gather
from collections import OrderedDict, defaultdict
class OWEvaluator:
def __init__(self, voc_gt, iou_types, args=None, use_07_metric=True, ovthresh=list(range(50, 100, 5))):
assert tuple(iou_types) == ('bbox',)
self.use_07_metric = use_07_metric
self.ovthresh = ovthresh
self.voc_gt = voc_gt
self.eps = torch.finfo(torch.float64).eps
self.num_classes = len(self.voc_gt.CLASS_NAMES)
self._class_names = self.voc_gt.CLASS_NAMES
self.AP = torch.zeros(self.num_classes, 1)
self.all_recs = defaultdict(list)
self.all_precs = defaultdict(list)
self.recs = defaultdict(list)
self.precs = defaultdict(list)
self.num_unks = defaultdict(list)
self.unk_det_as_knowns = defaultdict(list)
self.tp_plus_fp_cs = defaultdict(list)
self.fp_os = defaultdict(list)
self.coco_eval = dict(bbox=lambda: None)
self.coco_eval['bbox'].stats = torch.tensor([])
self.coco_eval['bbox'].eval = dict()
self.img_ids = []
self.lines = []
self.lines_cls = []
if args is not None:
self.prev_intro_cls = args.PREV_INTRODUCED_CLS
self.curr_intro_cls = args.CUR_INTRODUCED_CLS
self.total_num_class = args.num_classes
self.unknown_class_index = self.total_num_class - 1
self.num_seen_classes = self.prev_intro_cls + self.curr_intro_cls
self.known_classes = self._class_names[:self.num_seen_classes]
print("testing data details")
print(self.total_num_class)
print(self.unknown_class_index)
print(self.known_classes)
print(self.voc_gt.CLASS_NAMES)
def update(self, predictions):
for img_id, pred in predictions.items():
pred_boxes, pred_labels, pred_scores = [pred[k].cpu() for k in ['boxes', 'labels', 'scores']]
image_id = self.voc_gt.convert_image_id(int(img_id), to_string=True)
self.img_ids.append(img_id)
classes = pred_labels.tolist()
for (xmin, ymin, xmax, ymax), cls, score in zip(pred_boxes.tolist(), classes , pred_scores.tolist()):
xmin += 1
ymin += 1
self.lines.append(f"{image_id} {score:.3f} {xmin:.1f} {ymin:.1f} {xmax:.1f} {ymax:.1f}")
self.lines_cls.append(cls)
def compute_avg_precision_at_many_recall_level_for_unk(self, precisions, recalls):
precs = {}
for r in range(1, 10):
r = r/10
p = self.compute_avg_precision_at_a_recall_level_for_unk(precisions, recalls, recall_level=r)
precs[r] = p
return precs
def compute_avg_precision_at_a_recall_level_for_unk(self, precisions, recalls, recall_level=0.5):
precs = {}
for iou, recall in recalls.items():
prec = []
for cls_id, rec in enumerate(recall):
if cls_id == self.unknown_class_index and len(rec)>0:
p = precisions[iou][cls_id][min(range(len(rec)), key=lambda i: abs(rec[i] - recall_level))]
prec.append(p)
if len(prec) > 0:
precs[iou] = np.mean(prec)
else:
precs[iou] = 0
return precs
def compute_WI_at_many_recall_level(self, recalls, tp_plus_fp_cs, fp_os):
wi_at_recall = {}
for r in range(1, 10):
r = r/10
wi = self.compute_WI_at_a_recall_level(recalls, tp_plus_fp_cs, fp_os, recall_level=r)
wi_at_recall[r] = wi
return wi_at_recall
def compute_WI_at_a_recall_level(self, recalls, tp_plus_fp_cs, fp_os, recall_level=0.5):
wi_at_iou = {}
for iou, recall in recalls.items():
tp_plus_fps = []
fps = []
for cls_id, rec in enumerate(recall):
if cls_id in range(self.num_seen_classes) and len(rec) > 0:
index = min(range(len(rec)), key=lambda i: abs(rec[i] - recall_level))
tp_plus_fp = tp_plus_fp_cs[iou][cls_id][index]
tp_plus_fps.append(tp_plus_fp)
fp = fp_os[iou][cls_id][index]
fps.append(fp)
if len(tp_plus_fps) > 0:
wi_at_iou[iou] = np.mean(fps) / np.mean(tp_plus_fps)
else:
wi_at_iou[iou] = 0
return wi_at_iou
def synchronize_between_processes(self):
self.img_ids = torch.tensor(self.img_ids, dtype=torch.int64)
self.lines_cls = torch.tensor(self.lines_cls, dtype=torch.int64)
self.img_ids, self.lines, self.lines_cls = self.merge(self.img_ids, self.lines, self.lines_cls)
def merge(self, img_ids, lines, lines_cls):
flatten = lambda ls: [s for l in ls for s in l]
all_img_ids = torch.cat(all_gather(img_ids))
all_lines_cls = torch.cat(all_gather(lines_cls))
all_lines = flatten(all_gather(lines))
return all_img_ids, all_lines, all_lines_cls
def accumulate(self):
for class_label_ind, class_label in enumerate(self.voc_gt.CLASS_NAMES):
lines_by_class = [l + '\n' for l, c in zip(self.lines, self.lines_cls.tolist()) if c == class_label_ind]
if len(lines_by_class) == 0:
lines_by_class = []
print(class_label + " has " + str(len(lines_by_class)) + " predictions.")
ovthresh = 50
ovthresh_ind, _ = map(self.ovthresh.index, [50, 75])
self.rec, self.prec, self.AP[class_label_ind, ovthresh_ind], self.unk_det_as_known, \
self.num_unk, self.tp_plus_fp_closed_set, self.fp_open_set = voc_eval(lines_by_class, \
self.voc_gt.annotations, self.voc_gt.image_set, class_label, ovthresh=ovthresh / 100.0, use_07_metric=self.use_07_metric, known_classes=self.known_classes) #[-1]
self.AP[class_label_ind, ovthresh_ind] = self.AP[class_label_ind, ovthresh_ind] * 100
self.all_recs[ovthresh].append(self.rec)
self.all_precs[ovthresh].append(self.prec)
self.num_unks[ovthresh].append(self.num_unk)
self.unk_det_as_knowns[ovthresh].append(self.unk_det_as_known)
self.tp_plus_fp_cs[ovthresh].append(self.tp_plus_fp_closed_set)
self.fp_os[ovthresh].append(self.fp_open_set)
try:
self.recs[ovthresh].append(self.rec[-1] * 100)
self.precs[ovthresh].append(self.prec[-1] * 100)
except:
self.recs[ovthresh].append(0.)
self.precs[ovthresh].append(0.)
def summarize(self, fmt='{:.06f}'):
o50, _ = map(self.ovthresh.index, [50, 75])
mAP = float(self.AP.mean())
mAP50 = float(self.AP[:, o50].mean())
print('detection mAP50:', fmt.format(mAP50))
print('detection mAP:', fmt.format(mAP))
print('---AP50---')
wi = self.compute_WI_at_many_recall_level(self.all_recs, self.tp_plus_fp_cs, self.fp_os)
print('Wilderness Impact: ' + str(wi))
avg_precision_unk = self.compute_avg_precision_at_many_recall_level_for_unk(self.all_precs, self.all_recs)
print('avg_precision: ' + str(avg_precision_unk))
total_num_unk_det_as_known = {iou: np.sum(x) for iou, x in self.unk_det_as_knowns.items()} #torch.sum(self.unk_det_as_knowns[:, o50]) #[np.sum(x) for x in self.unk_det_as_knowns[:, o50]]
total_num_unk = self.num_unks[50][0]
print('Absolute OSE (total_num_unk_det_as_known): ' + str(total_num_unk_det_as_known))
print('total_num_unk ' + str(total_num_unk))
print("AP50: " + str(['%.1f' % x for x in self.AP[:, o50]]))
print("Precisions50: " + str(['%.1f' % x for x in self.precs[50]]))
print("Recall50: " + str(['%.1f' % x for x in self.recs[50]]))
if self.prev_intro_cls > 0:
print("Prev class AP50: " + str(self.AP[:, o50][:self.prev_intro_cls].mean()))
print("Prev class Precisions50: " + str(np.mean(self.precs[50][:self.prev_intro_cls])))
print("Prev class Recall50: " + str(np.mean(self.recs[50][:self.prev_intro_cls])))
print("Current class AP50: " + str(self.AP[:, o50][self.prev_intro_cls:self.prev_intro_cls + self.curr_intro_cls].mean()))
print("Current class Precisions50: " + str(np.mean(self.precs[50][self.prev_intro_cls:self.prev_intro_cls + self.curr_intro_cls])))
print("Current class Recall50: " + str(np.mean(self.recs[50][self.prev_intro_cls:self.prev_intro_cls + self.curr_intro_cls])))
print("Known AP50: " + str(self.AP[:, o50][:self.prev_intro_cls + self.curr_intro_cls].mean()))
print("Known Precisions50: " + str(np.mean(self.precs[50][:self.prev_intro_cls + self.curr_intro_cls])))
print("Known Recall50: " + str(np.mean(self.recs[50][:self.prev_intro_cls + self.curr_intro_cls])))
print("Unknown AP50: " + str(self.AP[:, o50][-1]))
print("Unknown Precisions50: " + str(self.precs[50][-1]))
print("Unknown Recall50: " + str(self.recs[50][-1]))
for class_name, ap in zip(self.voc_gt.CLASS_NAMES, self.AP[:, o50].cpu().tolist()):
print(class_name, fmt.format(ap))
self.coco_eval['bbox'].stats = torch.cat(
[self.AP[:, o50].mean(dim=0, keepdim=True),
self.AP.flatten().mean(dim=0, keepdim=True), self.AP.flatten()])
def voc_ap(rec, prec, use_07_metric=False):
""" ap = voc_ap(rec, prec, [use_07_metric])
Compute VOC AP given precision and recall.
If use_07_metric is true, uses the
VOC 07 11 point method (default:False).
"""
if use_07_metric:
# 11 point metric
ap = 0.
for t in np.arange(0., 1.1, 0.1):
if np.sum(rec >= t) == 0:
p = 0
else:
p = np.max(prec[rec >= t])
ap = ap + p / 11.
else:
# correct AP calculation
# first append sentinel values at the end
mrec = np.concatenate(([0.], rec, [1.]))
mpre = np.concatenate(([0.], prec, [0.]))
# compute the precision envelope
for i in range(mpre.size - 1, 0, -1):
mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])
# to calculate area under PR curve, look for points
# where X axis (recall) changes value
i = np.where(mrec[1:] != mrec[:-1])[0]
# and sum (\Delta recall) * prec
ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])
return ap
@functools.lru_cache(maxsize=None)
def parse_rec(filename, known_classes):
""" Parse a PASCAL VOC xml file """
VOC_CLASS_NAMES_COCOFIED = [
"airplane", "dining table", "motorcycle",
"potted plant", "couch", "tv"
]
BASE_VOC_CLASS_NAMES = [
"aeroplane", "diningtable", "motorbike",
"pottedplant", "sofa", "tvmonitor"
]
tree = ET.parse(filename)
# import pdb;pdb.set_trace()
objects = []
for obj in tree.findall('object'):
obj_struct = {}
cls_name = obj.find('name').text
if cls_name in VOC_CLASS_NAMES_COCOFIED:
cls_name = BASE_VOC_CLASS_NAMES[VOC_CLASS_NAMES_COCOFIED.index(cls_name)]
if cls_name not in known_classes:
cls_name = 'unknown'
obj_struct['name'] = cls_name
obj_struct['difficult'] = int(obj.find('difficult').text)
bbox = obj.find('bndbox')
obj_struct['bbox'] = [int(bbox.find('xmin').text),
int(bbox.find('ymin').text),
int(bbox.find('xmax').text),
int(bbox.find('ymax').text)]
objects.append(obj_struct)
return objects
def voc_eval(detpath,
annopath,
imagesetfile,
classname,
ovthresh=0.5,
use_07_metric=False,
known_classes=None):
# --------------------------------------------------------
# Fast/er R-CNN
# Licensed under The MIT License [see LICENSE for details]
# Written by Bharath Hariharan
# --------------------------------------------------------
"""rec, prec, ap = voc_eval(detpath,
annopath,
imagesetfile,
classname,
[ovthresh],
[use_07_metric])
Top level function that does the PASCAL VOC evaluation.
detpath: Path to detections
detpath.format(classname) should produce the detection results file.
annopath: Path to annotations
annopath.format(imagename) should be the xml annotations file.
imagesetfile: Text file containing the list of images, one image per line.
classname: Category name (duh)
cachedir: Directory for caching the annotations
[ovthresh]: Overlap threshold (default = 0.5)
[use_07_metric]: Whether to use VOC07's 11 point AP computation
(default False)
"""
def iou(BBGT, bb):
ixmin = np.maximum(BBGT[:, 0], bb[0])
iymin = np.maximum(BBGT[:, 1], bb[1])
ixmax = np.minimum(BBGT[:, 2], bb[2])
iymax = np.minimum(BBGT[:, 3], bb[3])
iw = np.maximum(ixmax - ixmin + 1., 0.)
ih = np.maximum(iymax - iymin + 1., 0.)
inters = iw * ih
# union
uni = ((bb[2] - bb[0] + 1.) * (bb[3] - bb[1] + 1.) +
(BBGT[:, 2] - BBGT[:, 0] + 1.) *
(BBGT[:, 3] - BBGT[:, 1] + 1.) - inters)
overlaps = inters / uni
ovmax = np.max(overlaps)
jmax = np.argmax(overlaps)
return ovmax, jmax
# assumes detections are in detpath.format(classname)
# assumes annotations are in annopath.format(imagename)
# assumes imagesetfile is a text file with each line an image name
# cachedir caches the annotations in a pickle file
# read list of images
if isinstance(imagesetfile, list):
lines = imagesetfile
else:
with open(imagesetfile, 'r') as f:
lines = f.readlines()
imagenames = [x.strip() for x in lines]
# import pdb;pdb.set_trace()
# load annots
recs = {}
if isinstance(annopath, list):
# print("hi")
for a in annopath:
imagename = os.path.splitext(os.path.basename(a))[0]
recs[imagename] = parse_rec(a, tuple(known_classes))
else:
for i, imagename in enumerate(imagenames):
recs[imagename] = parse_rec(annopath.format(imagename), tuple(known_classes))
# extract gt objects for this class
class_recs = {}
npos = 0
for imagename in imagenames:
R = [obj for obj in recs[imagename] if obj['name'] == classname]
bbox = np.array([x['bbox'] for x in R])
difficult = np.array([x['difficult'] for x in R]).astype(np.bool)
det = [False] * len(R)
npos = npos + sum(~difficult)
class_recs[imagename] = {'bbox': bbox,
'difficult': difficult,
'det': det}
# read dets
if isinstance(detpath, list):
lines = detpath
else:
detfile = detpath.format(classname)
with open(detfile, 'r') as f:
lines = f.readlines()
# import pdb;pdb.set_trace()
splitlines = [x.strip().split(' ') for x in lines]
image_ids = [x[0] for x in splitlines]
confidence = np.array([float(x[1]) for x in splitlines])
if len(splitlines) == 0:
BB = np.array([[float(z) for z in x[2:]] for x in splitlines]).reshape(-1, 4)
else:
BB = np.array([[float(z) for z in x[2:]] for x in splitlines])#.reshape(-1, 4)
# if BB.size == 0:
# return 0, 0, 0
# sort by confidence
sorted_ind = np.argsort(-confidence)
BB = BB[sorted_ind, :]
# import pdb;pdb.set_trace()
image_ids = [image_ids[x] for x in sorted_ind]
# go down dets and mark TPs and FPs
nd = len(image_ids)
tp = np.zeros(nd)
fp = np.zeros(nd)
for d in range(nd):
R = class_recs[image_ids[d]]
bb = BB[d, :].astype(float)
ovmax = -np.inf
BBGT = R['bbox'].astype(float)
if BBGT.size > 0:
ovmax, jmax = iou(BBGT, bb)
if ovmax > ovthresh:
if not R['difficult'][jmax]:
if not R['det'][jmax]:
tp[d] = 1.
R['det'][jmax] = 1
else:
fp[d] = 1.
else:
fp[d] = 1.
# compute precision recall
fp = np.cumsum(fp)
tp = np.cumsum(tp)
rec = tp / float(npos)
# avoid divide by zero in case the first detection matches a difficult
# ground truth
prec = tp / np.maximum(tp + fp, np.finfo(np.float64).eps)
ap = voc_ap(rec, prec, use_07_metric)
'''
Computing Absolute Open-Set Error (A-OSE) and Wilderness Impact (WI)
===========
Absolute OSE = # of unknown objects classified as known objects of class 'classname'
WI = FP_openset / (TP_closed_set + FP_closed_set)
'''
# logger = logging.getLogger(__name__)
# Finding GT of unknown objects
unknown_class_recs = {}
n_unk = 0
for imagename in imagenames:
R = [obj for obj in recs[imagename] if obj["name"] == 'unknown']
bbox = np.array([x["bbox"] for x in R])
difficult = np.array([x["difficult"] for x in R]).astype(np.bool)
det = [False] * len(R)
n_unk = n_unk + sum(~difficult)
unknown_class_recs[imagename] = {"bbox": bbox, "difficult": difficult, "det": det}
if classname == 'unknown':
return rec, prec, ap, 0., n_unk, None, None
# Go down each detection and see if it has an overlap with an unknown object.
# If so, it is an unknown object that was classified as known.
is_unk = np.zeros(nd)
for d in range(nd):
R = unknown_class_recs[image_ids[d]]
bb = BB[d, :].astype(float)
ovmax = -np.inf
BBGT = R["bbox"].astype(float)
if BBGT.size > 0:
# compute overlaps
# intersection
ixmin = np.maximum(BBGT[:, 0], bb[0])
iymin = np.maximum(BBGT[:, 1], bb[1])
ixmax = np.minimum(BBGT[:, 2], bb[2])
iymax = np.minimum(BBGT[:, 3], bb[3])
iw = np.maximum(ixmax - ixmin + 1.0, 0.0)
ih = np.maximum(iymax - iymin + 1.0, 0.0)
inters = iw * ih
# union
uni = (
(bb[2] - bb[0] + 1.0) * (bb[3] - bb[1] + 1.0)
+ (BBGT[:, 2] - BBGT[:, 0] + 1.0) * (BBGT[:, 3] - BBGT[:, 1] + 1.0)
- inters
)
overlaps = inters / uni
ovmax = np.max(overlaps)
jmax = np.argmax(overlaps)
if ovmax > ovthresh:
is_unk[d] = 1.0
is_unk_sum = np.sum(is_unk)
tp_plus_fp_closed_set = tp+fp
fp_open_set = np.cumsum(is_unk)
# import pdb;pdb.set_trace()
return rec, prec, ap, is_unk_sum, n_unk, tp_plus_fp_closed_set, fp_open_set
def bbox_nms(boxes, scores, overlap_threshold=0.4, score_threshold=0.0, mask=False):
def overlap(box1, box2=None, rectint=False, eps=1e-6):
area = lambda boxes=None, x1=None, y1=None, x2=None, y2=None: (boxes[..., 2] - boxes[..., 0]) * (
boxes[..., 3] - boxes[..., 1]) if boxes is not None else (x2 - x1).clamp(min=0) * (y2 - y1).clamp(
min=0)
if box2 is None and not isinstance(box1, list) and box1.dim() == 3:
return torch.stack(list(map(overlap, box1)))
b1, b2 = [(b if b.dim() == 2 else b.unsqueeze(0)).t().contiguous() for b in
[box1, (box2 if box2 is not None else box1)]]
xx1 = torch.max(b1[0].unsqueeze(1), b2[0].unsqueeze(0))
yy1 = torch.max(b1[1].unsqueeze(1), b2[1].unsqueeze(0))
xx2 = torch.min(b1[2].unsqueeze(1), b2[2].unsqueeze(0))
yy2 = torch.min(b1[3].unsqueeze(1), b2[3].unsqueeze(0))
inter = area(x1=xx1, y1=yy1, x2=xx2, y2=yy2)
return inter / (area(b1.t()).unsqueeze(1) + area(b2.t()).unsqueeze(0) - inter + eps) if not rectint else inter
O = overlap(boxes)
I = scores.sort(0)[1]
M = scores.gather(0, I).ge(score_threshold)
M = M if M.any() else M.fill_(1)
pick = []
for i, m in zip(I.t(), M.t()):
p = []
i = i[m]
while len(i) > 1:
p.append(i[-1])
m = O[:, i[-1]][i].lt(overlap_threshold)
m[-1] = 0
i = i[m]
pick.append(torch.tensor(p + i.tolist(), dtype=torch.int64))
return pick if not mask else torch.stack(
[torch.zeros(len(scores), dtype=torch.bool).scatter_(0, p, 1) for p in pick])
def package_submission(out_dir, image_file_name, class_labels, VOCYEAR, SUBSET, TASK, tar=True, **kwargs):
def cls(file_path, class_label_ind, scores):
with open(file_path, 'w') as f:
f.writelines(map('{} {}\n'.format, image_file_name, scores[:, class_label_ind].tolist()))
def det(file_path, class_label_ind, scores, proposals, keep):
zipped = []
for example_idx, basename in enumerate(image_file_name):
I = keep[example_idx][class_label_ind]
zipped.extend((basename, s) + tuple(p) for s, p in zip(scores[example_idx][I, class_label_ind].tolist(),
proposals[example_idx][I, :4].add(1).tolist()))
with open(file_path, 'w') as f:
f.writelines(map('{} {} {:.0f} {:.0f} {:.0f} {:.0f} \n'.format, *zip(*zipped)))
task_a, task_b = TASK.split('_')
resdir = os.path.join(out_dir, 'results')
respath = os.path.join(resdir, VOCYEAR, 'Main', '%s_{}_{}_%s.txt'.format(task_b, SUBSET))
if os.path.exists(resdir):
shutil.rmtree(resdir)
os.makedirs(os.path.join(resdir, VOCYEAR, 'Main'))
for class_label_ind, class_label in enumerate(class_labels):
dict(det=det, cls=cls)[task_b](respath.replace('%s', '{}').format(task_a, class_label), class_label_ind,
**kwargs)
if tar:
subprocess.check_call(['tar', '-czf', 'results-{}-{}-{}.tar.gz'.format(VOCYEAR, TASK, SUBSET), 'results'],
cwd=out_dir)
return respath
def detection_mean_ap(out_dir, image_file_name, class_labels, VOCYEAR, SUBSET, VOC_DEVKIT_VOCYEAR, scores=None,
boxes=None, nms_score_threshold=1e-4, nms_overlap_threshold=0.4, tar=False, octave=False,
cmd='octave --eval', env=None, stdout_stderr=open(os.devnull, 'wb'), do_nms=True):
if scores is not None:
nms = list(map(lambda s, p: bbox_nms(p, s, overlap_threshold=nms_overlap_threshold,
score_threshold=nms_score_threshold), scores, boxes)) if do_nms else [
torch.arange(len(p)) for p in boxes]
else:
nms = torch.arange(len(class_labels)).unsqueeze(0).unsqueeze(-1).expand(len(image_file_name), len(class_labels),
1)
scores = torch.zeros(len(image_file_name), len(class_labels), len(class_labels))
imgsetpath = os.path.join(VOC_DEVKIT_VOCYEAR, 'ImageSets', 'Main', SUBSET + '.txt')
detrespath = package_submission(out_dir, image_file_name, class_labels, VOCYEAR, SUBSET, 'comp4_det', tar=tar,
scores=scores, proposals=boxes, nms=nms)
if octave:
imgsetpath_fix = os.path.join(out_dir, detection_mean_ap.__name__ + '.txt')
with open(imgsetpath_fix, 'w') as f:
f.writelines([line[:-1] + ' -1\n' for line in open(imgsetpath)])
procs = [subprocess.Popen(cmd.split() + [
"oldpwd = pwd; cd('{}/..'); addpath(fullfile(pwd, 'VOCcode')); VOCinit; cd(oldpwd); VOCopts.testset = '{}'; VOCopts.detrespath = '{}'; VOCopts.imgsetpath = '{}'; classlabel = '{}'; warning('off', 'Octave:possible-matlab-short-circuit-operator'); warning('off', 'Octave:num-to-str'); [rec, prec, ap] = VOCevaldet(VOCopts, 'comp4', classlabel, false); dlmwrite(sprintf(VOCopts.detrespath, 'resu4', classlabel), ap); quit;".format(
VOC_DEVKIT_VOCYEAR, SUBSET, detrespath, imgsetpath_fix, class_label)], stdout=stdout_stderr,
stderr=stdout_stderr, env=env) for class_label in class_labels]
res = list(map(lambda class_label, proc: proc.wait() or float(open(detrespath % ('resu4', class_label)).read()),
class_labels, procs))
else:
res = [voc_eval(detrespath.replace('%s', '{}').format('comp4', '{}'),
os.path.join(VOC_DEVKIT_VOCYEAR, 'Annotations', '{}.xml'), imgsetpath, class_label,
cachedir=os.path.join(out_dir, 'cache_detection_mean_ap_' + SUBSET), use_07_metric=True)[-1] for
class_label in class_labels]
return torch.tensor(res).mean(), res
|
nilq/baby-python
|
python
|
# a method for obtaining a rough estimate of species richness on islands with transient dynamics
# check it gives reasonable estimates
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# check a range of parameter values
# ---
# where to save results
dir_results = '../../../results/verify/test_sampling/'
suffix = '_rough_estimate'
J = 10000 # number of individuals on an island
theta = 30 # fundamental biodiversity number
# immigration rates
mV = [0.0005, 0.005, 0.01, 0.05, 0.1]
# time in generations since an island separated from the mainland
TV = [50, 100, 500, 1000, 5000, 100000]
# for each parameter combination, make rough estimate of species richness
# ---
E_SM = list() # a place to store species richnesses
for T in TV:
E_SV = list()
for m in mV:
# find the expected number of founders using Chen & Chen's asymptotic approximation
W = J*m / (1-m)
alpha = T/2
beta = (W-1)*T/(2*J)
D = ( T*(W-1)/2 ) / ( alpha*(np.exp(beta)-1) + beta*np.exp(beta) )
D = int(round(D))
# expected number of ancestors given the number of founders
E_C = D + sum( W / (W+i) for i in range(D,J) )
E_C = int(round(E_C))
# expected number of species given the number of ancestors
E_S = sum( theta / (theta+i) for i in range(E_C) )
# store
E_SV.append(E_S)
# store
E_SM.append(E_SV)
# for each parameter combination, average the species richnesses from the samples
# ---
# read in the dataframe
fname = dir_results + 'samples' + suffix + '.csv'
df = pd.read_csv(fname)
S_SM = list() # a place to store species richnesses from samples for each T
for T in TV:
# find the entries that match the T
df_sub = df[ df['T_0'] == T ]
# the islands are in the same order as mV (5 of them), so find the no species on each island
SV = df_sub['no_spp_S'].values
HV = df_sub['no_isles_H'].values
data_row_as_strV = df_sub['presence_absence_matrix_cols_(isles)_concatenated'].values
richness_islands = list()
for S, H, data_row_as_str in zip(SV, HV, data_row_as_strV):
isle_strings = [ data_row_as_str[i:i+S] for i in range(0, S*H, S) ]
richnesses = [ this_isle.count('p') for this_isle in isle_strings ]
richness_islands.append(richnesses)
S_SV = np.mean(np.array(richness_islands), axis=0)
S_SM.append(S_SV)
# plot it
# ---
colour_cycle = plt.rcParams['axes.prop_cycle'].by_key()['color']
# add fake markes so I can label rough estimate versus sample
plt.plot([], [], color='black', alpha=0.5, label='rough estimate')
plt.scatter([], [], marker='o', color='black', alpha=0.5, label='average over 30 samples')
for T, E_SV, S_SV, colour in zip(TV, E_SM, S_SM, colour_cycle):
plt.plot(mV, E_SV, color=colour, alpha=0.5, label = r'$T = ' + str(T) + '$')
plt.scatter(mV, S_SV, marker='o', color=colour, alpha=0.5)
plt.legend(loc='best')
plt.xlabel('immigrant probability')
plt.ylabel('number of species')
plt.xscale('log')
plt.tight_layout()
plt.savefig(dir_results + 'check_rough_richness_estimate.pdf')
plt.close()
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
import rospy
from nav_msgs.msg import Path, Odometry
from geometry_msgs.msg import PoseStamped, Point, Quaternion, Twist
from controller_copy import Controller
class Test():
def __init__(self):
self.odom_topic = "/odom"
self.target_path = Path()
self.target_path.poses.append(PoseStamped())
self.goal = PoseStamped() # for testing
self.goal.pose.position.x = 3.0
self.goal.pose.position.y = 2.0
# self.target_path.poses.append(goal)
self.controller = Controller(self.target_path, odom_topic=self.odom_topic)
if __name__ == "__main__":
rospy.init_node("test")
test = Test()
velocity = Twist()
pub = rospy.Publisher("/cmd_vel", Twist, queue_size=10)
while not rospy.is_shutdown():
velocity = test.controller.get_velocity(test.goal)
pub.publish(velocity)
if test.controller.reached_intermediate_goal():
velocity.linear.x = 0.0
velocity.linear.y = 0.0
pub.publish(velocity)
print("goal reached")
break
else:
continue
|
nilq/baby-python
|
python
|
from pathlib import Path
from typing import List, Tuple
import numpy as np
from relaxations.interval import Interval
from relaxations.linear_bounds import LinearBounds
def load_spec(spec_dir: Path, counter: int) -> List[Tuple[List[Interval], Interval, LinearBounds]]:
parameters = list()
interval_bounds = list()
lower_biases = list()
upper_biases = list()
lower_weights = list()
upper_weights = list()
with (spec_dir / f'{counter}.csv').open('r') as f:
split_parameters = list()
split_interval_bounds = list()
split_lower_biases = list()
split_upper_biases = list()
split_lower_weights = list()
split_upper_weights = list()
for line in f.readlines():
if '|' in line:
lower, upper = line.strip().split(' | ')
lower = [float(v) for v in lower.split(' ')]
upper = [float(v) for v in upper.split(' ')]
split_lower_biases.append(lower[0])
split_upper_biases.append(upper[0])
split_lower_weights.append(lower[1:])
split_upper_weights.append(upper[1:])
elif 'SPEC_FINISHED' in line:
parameters.append(np.asarray(split_parameters))
interval_bounds.append(np.asarray(split_interval_bounds))
lower_biases.append(np.asarray(split_lower_biases))
upper_biases.append(np.asarray(split_upper_biases))
lower_weights.append(np.asarray(split_lower_weights))
upper_weights.append(np.asarray(split_upper_weights))
split_parameters = list()
split_interval_bounds = list()
split_lower_biases = list()
split_upper_biases = list()
split_lower_weights = list()
split_upper_weights = list()
elif line.startswith('('):
split_interval_bounds.extend(eval(line))
else:
split_parameters.append([float(v) for v in line.strip().split(' ')])
parameters = np.array(parameters)
interval_bounds = np.asarray(interval_bounds)
lower_biases = np.asarray(lower_biases)
upper_biases = np.asarray(upper_biases)
lower_weights = np.asarray(lower_weights)
upper_weights = np.asarray(upper_weights)
result = list()
for i in range(len(parameters)):
params = [Interval(param[0], param[1]) for param in parameters[i]]
bounds = Interval(
lower_bound=interval_bounds[i][:, 0],
upper_bound=interval_bounds[i][:, 1]
)
constraints = LinearBounds(
upper_slope=upper_weights[i],
upper_offset=upper_biases[i],
lower_slope=lower_weights[i],
lower_offset=lower_biases[i]
)
result.append((params, bounds, constraints))
return result
|
nilq/baby-python
|
python
|
import cv2
import numpy as np
def parse_mapping(ground_truth):
map = {}
f = open(ground_truth, "r")
for line in f.readlines():
label = line.strip().split(",")[1]
if label not in map:
map[label] = len(map)
def parse_data(ground_truth):
images = {}
label_dict = {}
f = open(ground_truth, "r")
for line in f.readlines():
line_split = line.strip().split(",")
(fname, label, x1, y1, x2, y2) = line_split
# create dictionary of labels
if label in label_dict:
label_dict[label] += 1
else:
label_dict[label] = 1
fname = "MIO-TCD-Localization/train/" + fname + ".jpg"
if fname not in images:
images[fname] = {}
img = cv2.imread(fname)
h, w, _ = img.shape
# for every new image
images[fname]["filepath"] = fname
images[fname]["height"] = h
images[fname]["width"] = w
images[fname]["bboxes"] = []
images[fname]['bboxes'].append(
{
"x1" : int(float(x1)),
"y1" : int(float(y1)),
"x2" : int(float(x2)),
"y2" : int(float(y2)),
"class" : label
})
list1 = []
for image in images:
list1.append(images[image])
return list1, label_dict
|
nilq/baby-python
|
python
|
# 入力フレーズに対してコサイン類似度を求めていく
# 類似度の結果をjson, pklで出力
from sentence_transformers import SentenceTransformer
import numpy as np
import torch
from torch import nn
import pickle
import json
# 今回の入力
key_phrase = 'pulls the trigger'
# データセットの読み込み
with open('combined_word2id_dict.pkl', 'rb') as f:
phrase_dict = pickle.load(f)
# PhraseBERTのモデルの読み込み
model = SentenceTransformer('whaleloops/phrase-bert')
# 入力のベクトル表現を得る len(p1) = 256 の固定長
p1 = model.encode(key_phrase)
cos_sim = nn.CosineSimilarity(dim=0)
result = {}
# データセットの各フレーズに対してiterate
for phrase, id in phrase_dict.items():
print('phrase is:', phrase)
print('id is:', id)
# フレーズのベクトル表現を得る
emb = model.encode(phrase)
# 入力とフレーズとのコサイン類似度を求める
similarity = cos_sim(torch.tensor(p1), torch.tensor(emb))
print('similarity is:', similarity)
# print('similarty.item()', similarity.item())
result[phrase] = similarity.item()
# 結果の保存
with open('results_dict.json', 'w') as f:
json.dump(result, f, indent=4)
with open('results_dict.pkl', 'wb') as f:
pickle.dump(result, f)
# print(f'The cosine similarity between phrase 1 and 2 is: {cos_sim( torch.tensor(p1), torch.tensor(p2))}')
# print(f'The cosine similarity between phrase 1 and 3 is: {cos_sim( torch.tensor(p1), torch.tensor(p3))}')
# print(f'The cosine similarity between phrase 2 and 3 is: {cos_sim( torch.tensor(p2), torch.tensor(p3))}')
# print(f'The cosine similarity between phrase 4 and 1 is: {cos_sim( torch.tensor(p4), torch.tensor(p1))}')
# print(f'The cosine similarity between phrase 4 and 5 is: {cos_sim( torch.tensor(p4), torch.tensor(p5))}')
|
nilq/baby-python
|
python
|
s = 0
cont = 0
for c in range(1, 501, 2):
if c % 3 == 0 and c % 2 == 1:
s += c
cont += 1
print(f'A soma de {cont} valores múltiplos de 3 entre 1 e 501 é {s}')
|
nilq/baby-python
|
python
|
import math
import numpy as np
import os
from scipy import ndimage
from scipy.interpolate import RegularGridInterpolator as rgi
import common
import argparse
import ntpath
# Import shipped libraries.
import librender
import libmcubes
from multiprocessing import Pool
use_gpu = True
if use_gpu:
import libfusiongpu as libfusion
from libfusiongpu import tsdf_gpu as compute_tsdf
else:
import libfusioncpu as libfusion
from libfusioncpu import tsdf_cpu as compute_tsdf
class Fusion:
"""
Performs TSDF fusion.
"""
def __init__(self):
"""
Constructor.
"""
parser = self.get_parser()
self.options = parser.parse_args()
self.render_intrinsics = np.array([
self.options.focal_length_x,
self.options.focal_length_y,
self.options.principal_point_x,
self.options.principal_point_y,
], dtype=float)
# Essentially the same as above, just a slightly different format.
self.fusion_intrisics = np.array([
[self.options.focal_length_x, 0, self.options.principal_point_x],
[0, self.options.focal_length_y, self.options.principal_point_y],
[0, 0, 1]
])
self.image_size = np.array([
self.options.image_height,
self.options.image_width,
], dtype=np.int32)
# Mesh will be centered at (0, 0, 1)!
self.znf = np.array([
1 - 0.75,
1 + 0.75
], dtype=float)
# Derive voxel size from resolution.
self.voxel_size = 1./self.options.resolution
self.truncation = self.options.truncation_factor*self.voxel_size
def get_parser(self):
"""
Get parser of tool.
:return: parser
"""
parser = argparse.ArgumentParser(description='Scale a set of meshes stored as OFF files.')
parser.add_argument('--mode', type=str, default='render',
help='Operation mode: render, fuse or sample.')
input_group = parser.add_mutually_exclusive_group(required=True)
input_group.add_argument('--in_dir', type=str,
help='Path to input directory.')
input_group.add_argument('--in_file', type=str,
help='Path to input directory.')
parser.add_argument('--out_dir', type=str,
help='Path to output directory; files within are overwritten!')
parser.add_argument('--t_dir', type=str,
help='Path to transformation directory.')
parser.add_argument('--n_proc', type=int, default=0,
help='Number of processes to run in parallel'
'(0 means sequential execution).')
parser.add_argument('--overwrite', action='store_true',
help='Overwrites existing files if true.')
parser.add_argument('--n_points', type=int, default=100000,
help='Number of points to sample per model.')
parser.add_argument('--n_views', type=int, default=100,
help='Number of views per model.')
parser.add_argument('--image_height', type=int, default=640,
help='Depth image height.')
parser.add_argument('--image_width', type=int, default=640,
help='Depth image width.')
parser.add_argument('--focal_length_x', type=float, default=640,
help='Focal length in x direction.')
parser.add_argument('--focal_length_y', type=float, default=640,
help='Focal length in y direction.')
parser.add_argument('--principal_point_x', type=float, default=320,
help='Principal point location in x direction.')
parser.add_argument('--principal_point_y', type=float, default=320,
help='Principal point location in y direction.')
parser.add_argument('--sample_weighted', action='store_true',
help='Whether to use weighted sampling.')
parser.add_argument('--sample_scale', type=float, default=0.2,
help='Scale for weighted sampling.')
parser.add_argument(
'--depth_offset_factor', type=float, default=1.5,
help='The depth maps are offsetted using depth_offset_factor*voxel_size.')
parser.add_argument('--resolution', type=float, default=256,
help='Resolution for fusion.')
parser.add_argument(
'--truncation_factor', type=float, default=10,
help='Truncation for fusion is derived as truncation_factor*voxel_size.')
return parser
def read_directory(self, directory):
"""
Read directory.
:param directory: path to directory
:return: list of files
"""
files = []
for filename in os.listdir(directory):
files.append(os.path.normpath(os.path.join(directory, filename)))
return files
def get_in_files(self):
if self.options.in_dir is not None:
assert os.path.exists(self.options.in_dir)
common.makedir(self.options.out_dir)
files = self.read_directory(self.options.in_dir)
else:
files = [self.options.in_file]
if not self.options.overwrite:
def file_filter(filepath):
outpath = self.get_outpath(filepath)
return not os.path.exists(outpath)
files = list(filter(file_filter, files))
return files
def get_outpath(self, filepath):
filename = os.path.basename(filepath)
if self.options.mode == 'render':
outpath = os.path.join(self.options.out_dir, filename + '.h5')
elif self.options.mode == 'fuse':
modelname = os.path.splitext(os.path.splitext(filename)[0])[0]
outpath = os.path.join(self.options.out_dir, modelname + '.off')
elif self.options.mode == 'sample':
modelname = os.path.splitext(os.path.splitext(filename)[0])[0]
outpath = os.path.join(self.options.out_dir, modelname + '.npz')
return outpath
def get_points(self):
"""
See https://stackoverflow.com/questions/9600801/evenly-distributing-n-points-on-a-sphere.
:param n_points: number of points
:type n_points: int
:return: list of points
:rtype: numpy.ndarray
"""
rnd = 1.
points = []
offset = 2. / self.options.n_views
increment = math.pi * (3. - math.sqrt(5.))
for i in range(self.options.n_views):
y = ((i * offset) - 1) + (offset / 2)
r = math.sqrt(1 - pow(y, 2))
phi = ((i + rnd) % self.options.n_views) * increment
x = math.cos(phi) * r
z = math.sin(phi) * r
points.append([x, y, z])
# visualization.plot_point_cloud(np.array(points))
return np.array(points)
def get_views(self):
"""
Generate a set of views to generate depth maps from.
:param n_views: number of views per axis
:type n_views: int
:return: rotation matrices
:rtype: [numpy.ndarray]
"""
Rs = []
points = self.get_points()
for i in range(points.shape[0]):
# https://math.stackexchange.com/questions/1465611/given-a-point-on-a-sphere-how-do-i-find-the-angles-needed-to-point-at-its-ce
longitude = - math.atan2(points[i, 0], points[i, 1])
latitude = math.atan2(points[i, 2], math.sqrt(points[i, 0] ** 2 + points[i, 1] ** 2))
R_x = np.array([[1, 0, 0],
[0, math.cos(latitude), -math.sin(latitude)],
[0, math.sin(latitude), math.cos(latitude)]])
R_y = np.array([[math.cos(longitude), 0, math.sin(longitude)],
[0, 1, 0],
[-math.sin(longitude), 0, math.cos(longitude)]])
R = R_y.dot(R_x)
Rs.append(R)
return Rs
def render(self, mesh, Rs):
"""
Render the given mesh using the generated views.
:param base_mesh: mesh to render
:type base_mesh: mesh.Mesh
:param Rs: rotation matrices
:type Rs: [numpy.ndarray]
:return: depth maps
:rtype: numpy.ndarray
"""
depthmaps = []
for i in range(len(Rs)):
np_vertices = Rs[i].dot(mesh.vertices.astype(np.float64).T)
np_vertices[2, :] += 1
np_faces = mesh.faces.astype(np.float64)
np_faces += 1
depthmap, mask, img = \
librender.render(np_vertices.copy(), np_faces.T.copy(),
self.render_intrinsics, self.znf, self.image_size)
# This is mainly result of experimenting.
# The core idea is that the volume of the object is enlarged slightly
# (by subtracting a constant from the depth map).
# Dilation additionally enlarges thin structures (e.g. for chairs).
depthmap -= self.options.depth_offset_factor * self.voxel_size
depthmap = ndimage.morphology.grey_erosion(depthmap, size=(3, 3))
depthmaps.append(depthmap)
return depthmaps
def fusion(self, depthmaps, Rs):
"""
Fuse the rendered depth maps.
:param depthmaps: depth maps
:type depthmaps: numpy.ndarray
:param Rs: rotation matrices corresponding to views
:type Rs: [numpy.ndarray]
:return: (T)SDF
:rtype: numpy.ndarray
"""
Ks = self.fusion_intrisics.reshape((1, 3, 3))
Ks = np.repeat(Ks, len(depthmaps), axis=0).astype(np.float32)
Ts = []
for i in range(len(Rs)):
Rs[i] = Rs[i]
Ts.append(np.array([0, 0, 1]))
Ts = np.array(Ts).astype(np.float32)
Rs = np.array(Rs).astype(np.float32)
depthmaps = np.array(depthmaps).astype(np.float32)
views = libfusion.PyViews(depthmaps, Ks, Rs, Ts)
# Note that this is an alias defined as libfusiongpu.tsdf_gpu or libfusioncpu.tsdf_cpu!
tsdf = compute_tsdf(views,
self.options.resolution, self.options.resolution,
self.options.resolution, self.voxel_size, self.truncation, False)
tsdf = np.transpose(tsdf[0], [2, 1, 0])
return tsdf
def run(self):
"""
Run the tool.
"""
common.makedir(self.options.out_dir)
files = self.get_in_files()
if self.options.mode == 'render':
method = self.run_render
elif self.options.mode == 'fuse':
method = self.run_fuse
elif self.options.mode == 'sample':
method = self.run_sample
else:
print('Invalid model, choose render or fuse.')
exit()
if self.options.n_proc == 0:
for filepath in files:
method(filepath)
else:
with Pool(self.options.n_proc) as p:
p.map(method, files)
def run_render(self, filepath):
"""
Run rendering.
"""
timer = common.Timer()
Rs = self.get_views()
timer.reset()
print('Rendering {}'.format(filepath))
mesh = common.Mesh.from_off(filepath)
depths = self.render(mesh, Rs)
depth_file = self.get_outpath(filepath)
common.write_hdf5(depth_file, np.array(depths))
print('[Data] wrote %s (%f seconds)' % (depth_file, timer.elapsed()))
def run_fuse(self, filepath):
"""
Run fusion.
"""
timer = common.Timer()
Rs = self.get_views()
# As rendering might be slower, we wait for rendering to finish.
# This allows to run rendering and fusing in parallel (more or less).
print('Fusing {}'.format(filepath))
depths = common.read_hdf5(filepath)
timer.reset()
tsdf = self.fusion(depths, Rs)
# To ensure that the final mesh is indeed watertight
tsdf = np.pad(tsdf, 1, 'constant', constant_values=1e6)
vertices, triangles = libmcubes.marching_cubes(-tsdf, 0)
# Remove padding offset
vertices -= 1
# Normalize to [-0.5, 0.5]^3 cube
vertices /= self.options.resolution
vertices -= 0.5
modelname = os.path.splitext(os.path.splitext(os.path.basename(filepath))[0])[0]
t_loc, t_scale = self.get_transform(modelname)
vertices = t_loc + t_scale * vertices
off_file = self.get_outpath(filepath)
libmcubes.export_off(vertices, triangles, off_file)
print('[Data] wrote %s (%f seconds)' % (off_file, timer.elapsed()))
def run_sample(self, filepath):
"""
Run sampling.
"""
timer = common.Timer()
Rs = self.get_views()
# As rendering might be slower, we wait for rendering to finish.
# This allows to run rendering and fusing in parallel (more or less).
depths = common.read_hdf5(filepath)
timer.reset()
tsdf = self.fusion(depths, Rs)
xs = np.linspace(-0.5, 0.5, tsdf.shape[0])
ys = np.linspace(-0.5, 0.5, tsdf.shape[1])
zs = np.linspace(-0.5, 0.5, tsdf.shape[2])
tsdf_func = rgi((xs, ys, zs), tsdf)
modelname = os.path.splitext(os.path.splitext(os.path.basename(filepath))[0])[0]
points = self.get_random_points(tsdf)
values = tsdf_func(points)
t_loc, t_scale = self.get_transform(modelname)
occupancy = (values <= 0.)
out_file = self.get_outpath(filepath)
np.savez(out_file, points=points, occupancy=occupancy, loc=t_loc, scale=t_scale)
print('[Data] wrote %s (%f seconds)' % (out_file, timer.elapsed()))
def get_transform(self, modelname):
if self.options.t_dir is not None:
t_filename = os.path.join(self.options.t_dir, modelname + '.npz')
t_dict = np.load(t_filename)
t_loc = t_dict['loc']
t_scale = t_dict['scale']
else:
t_loc = np.zeros(3)
t_scale = np.ones(3)
return t_loc, t_scale
def get_random_points(self, tsdf):
N1, N2, N3 = tsdf.shape
npoints = self.options.n_points
if not self.options.sample_weighted:
points = np.random.rand(npoints, 3)
else:
df = np.abs(tsdf)
scale = self.options.sample_scale * df.max()
indices = np.arange(N1*N2*N3)
prob = np.exp(-df.flatten() / scale)
prob = prob / prob.sum()
indices_rnd = np.random.choice(indices, size=npoints, p=prob)
idx1, idx2, idx3 = np.unravel_index(indices_rnd, [N1, N2, N3])
idx1 = idx1 + np.random.rand(npoints)
idx2 = idx2 + np.random.rand(npoints)
idx3 = idx3 + np.random.rand(npoints)
points = np.stack([idx1 / N1, idx2 / N2, idx3 / N3], axis=1)
points -= 0.5
return points
if __name__ == '__main__':
app = Fusion()
app.run()
|
nilq/baby-python
|
python
|
arr=list(map(int,input().rstrip().split()))
fff=list(map(int,input().rstrip().split()))
a=0
for i in range(len(arr)):
a=a+abs(arr[i]-fff[i])
print(a)
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.16 on 2018-12-04 12:13
from __future__ import absolute_import, unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('icds_reports', '0080_merge_20181130_1818'),
]
operations = [
migrations.RenameField(
model_name='aggls',
old_name='unique_awc_vists',
new_name='awc_visits'
),
migrations.RenameField(
model_name='aggregatelsawcvisitform',
old_name='unique_awc_vists',
new_name='awc_visits'
)
]
|
nilq/baby-python
|
python
|
import storage
import nomadlist
import wikivoyage
def build(cities=None):
index_guides(cities or index_cities())
return True
def index_cities():
cities = nomadlist.list_cities()
storage.upsert_cities(cities)
return cities
def index_guides(cities):
city_docs = map(build_guide, cities)
storage.upsert_cities(city_docs)
return True
def build_guide(city):
guide = wikivoyage.find_city(city['name'])
return dict(city.items() + guide.items())
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
"""
@Created on: 2019/5/23 16:23
@Author: heyao
@Description:
"""
import os
import warnings
from knowledge_graph.local_config import DevelopmentConfig
try:
from knowledge_graph.production_config import ProductionConfig
except ImportError:
warnings.warn("you dont have production config")
ProductionConfig = {}
config = dict(
default=DevelopmentConfig,
development=DevelopmentConfig,
production=ProductionConfig
)
env_name = os.environ.get("KG_CONFIG_NAME", "default")
print("you are on {env_name} server".format(env_name=env_name))
config = config[env_name]
|
nilq/baby-python
|
python
|
"""Dataset setting and data loader for MNIST."""
import torch
from torchvision import datasets, transforms
import params
def get_svhn(train):
print("SVHN Data Loading ...")
train_dataset = datasets.SVHN(root='/home/hhjung/hhjung/SVHN/', split='train',
transform=transforms.Compose([transforms.Scale(28), transforms.ToTensor()
, transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))]),
download=True)
# test_dataset = datasets.SVHN(root='/home/hhjung/hhjung/SVHN/', split='test',
# transform=transforms.Compose([transforms.Scale(28), transforms.ToTensor()
# , transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))]),
# download=True)
train_loader = torch.utils.data.DataLoader(dataset=train_dataset, batch_size=params.batch_size, shuffle=True)
test_loader = torch.utils.data.DataLoader(dataset=train_dataset, batch_size=params.batch_size, shuffle=False)
if train:
return train_loader
else:
return test_loader
|
nilq/baby-python
|
python
|
from collections import namedtuple
ConnectArgsType = namedtuple('ConnectArgsType', [
'verify', 'verify_expiration', 'key', 'audience', 'issuer', 'algorithm', 'auth_header_prefix', 'decode_options'
])
CONNECT_ARGS = ConnectArgsType(
verify=None,
verify_expiration=None,
key=None,
audience=None,
issuer=None,
algorithm=None,
auth_header_prefix=None,
decode_options=None,
)
def configure(
key,
audience,
issuer,
algorithm,
verify=True,
verify_expiration=True,
auth_header_prefix='Bearer',
decode_options=None,
):
global CONNECT_ARGS
CONNECT_ARGS = ConnectArgsType(
verify=verify,
verify_expiration=verify_expiration,
key=key,
audience=audience,
issuer=issuer,
algorithm=algorithm,
auth_header_prefix=auth_header_prefix,
decode_options=decode_options or {},
)
__all__ = ["configure", "CONNECT_ARGS"]
|
nilq/baby-python
|
python
|
#!/usr/bin/python3
"""
This module contains the function is_same_class
"""
def is_same_class(obj, a_class):
"""return true if obj is the exact class a_class, otherwise false"""
return (type(obj) == a_class)
|
nilq/baby-python
|
python
|
from libtad.base_service import BaseService
from libtad.datatypes.places import Place
from libtad.common import XmlUtils
import libtad.constants as Constants
import xml.etree.ElementTree as ET
from urllib.parse import ParseResult, urlunparse, urlencode
from urllib.request import urlopen, Request
from ssl import SSLContext
from typing import List, Dict
class PlacesService(BaseService):
"""
The places service can be used to retrieve the list of supported places.
...
Attributes
----------
include_coordinates : bool
Return coordinates for the Geography object.
Methods
-------
get_places()
Gets list of supported places.
"""
def __init__(self, access_key: str, secret_key: str):
"""
Parameters
----------
access_key : str
Access key.
secret_key : str
Secret key.
"""
super().__init__(access_key, secret_key, "places")
self.include_coordinates: bool = True
def get_places(self) -> List[Place]:
"""
Gets list of supported places.
Returns
-------
places : list of Place
List of all currently known places, their identifiers and their
geographical location (if requested).
"""
args = self.__get_arguments()
url: str = Constants.ENTRYPOINT + "/" + self._service_name + "?" + urlencode(args)
req = Request(
url,
headers = { "User-Agent": "libtad-py"}
)
with urlopen(req, context=SSLContext()) as f:
result: str = f.read().decode("utf-8")
return self.__from_xml(result)
def __get_arguments(self) -> Dict[str, object]:
args: Dict[str, object] = self._authentication_options.copy()
args["lang"] = ",".join(self.language)
args["geo"] = int(self.include_coordinates)
args["version"] = str(self._version)
args["out"] = Constants.DEFAULTRETURNFORMAT
args["verbosetime"] = str(Constants.DEFAULTVERBOSETIMEVALUE)
return args
def __from_xml(self, result: str) -> List[Place]:
XmlUtils.check_for_errors(result)
xml: ET.Element = ET.fromstring(result)
places = xml.find("places")
return [Place(place_node) for place_node in places.findall("place")]
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
# zeroex00.com
# rfc1413
import argparse
import socket
import sys
import threading
master_results = []
master_banners = {}
master_errors = []
def main(args):
if not args.query_port and not args.all_ports:
print("[!] you must specify at least one port or -a")
exit(2)
hostname = clean_host(args.host)
ip_addr = resolve_host(hostname)
# if not check_ident_port(args.host, args.port, ip_addr):
# print("[!] Exiting...")
# exit(1)
if args.all_ports:
query_ports = list(map(str, range(1, 65536)))
q_string = "1-65535"
else:
query_ports = args.query_port
q_string = " ".join(query_ports)
print(
"[+] starting scan on {0} ({1}) {2} for connections to {3}".format(
hostname, ip_addr, args.port, q_string
)
)
try:
do_threaded_work(args.host, args.port, query_ports, verbose=args.verbose)
except KeyboardInterrupt:
print("Interrupted! Printing results:")
print_results(suppress=True, verbose=args.verbose)
print("[!] Errors suppressed on interrupt!")
exit(1)
if args.all_ports:
print_results(suppress=True, verbose=args.verbose)
print("[!] Errors suppressed on full scan!")
else:
print_results(verbose=args.verbose)
exit(0)
def parse_args(argv):
parser = argparse.ArgumentParser()
parser.add_argument("host", help="host to scan")
parser.add_argument(
"-q",
"--query-port",
nargs="+",
help="port(s) which the scan will query(ex: 22 or 21 22 23)",
)
parser.add_argument(
"-p",
"--port",
default="113",
type=int,
help="port IDENT service is listening on (default: 113)",
)
parser.add_argument(
"-a", "--all-ports", action="store_true", help="queries ALL ports!"
)
parser.add_argument(
"-v",
"--verbose",
action="count",
default=0,
help="increase verbosity - v: shows full success responses; vv: shows all open port responses",
)
return parser.parse_args(argv)
def clean_host(host):
if host.startswith("http://"):
tmp_host = host[7:]
elif host.startswith("https://"):
tmp_host = host[8:]
else:
tmp_host = host
return tmp_host
def resolve_host(host):
try:
ip = socket.gethostbyname(host)
except socket.error:
return "?.?.?.?"
return ip
def check_ident_port(host, port, ip):
print("[+] Checking if {0} ({1}) is listening on port: {2}".format(host, ip, port))
try:
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client.settimeout(5)
client.connect((host, port))
except socket.error:
print("[!] {0} ({1}) is not listening on port: {2}!".format(host, ip, port))
return False
except OverflowError:
print("[!] Invalid port!: {0}".format(port))
return False
client.close()
return True
def enum_port(host, port, query_port, verbose=0):
try:
client1 = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client1.connect((host, query_port))
local_port = client1.getsockname()[1]
except socket.error:
master_errors.append("{0:>5}: connection refused".format(query_port))
return
except OverflowError:
master_errors.append("{0:>5}: invalid port".format(query_port))
return
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client.connect((host, port))
try:
client.send(str(query_port) + "," + str(local_port) + "\x0d\x0a")
results = str(client.recv(4096))
client1.settimeout(1)
client1.send("\x0d\x0a")
try:
banner = str(client1.recv(4096)).strip()
except socket.error:
banner = ""
except Exception:
master_errors.append("{0:>5}: e".format(query_port))
client1.close()
client.close()
return
if verbose > 1:
master_results.append(results.strip())
master_banners[str(query_port)] = str(banner)
elif ": USERID :" in results:
master_results.append(results.strip())
master_banners[str(query_port)] = str(banner)
client1.close()
client.close()
def tqdm(iterable):
def report(i):
print(f"{i+1:>{formatter}}/{total}", file=sys.stderr, end="\r")
total = len(iterable)
formatter = len(str(total))
for i, el in enumerate(iterable):
yield el
report(i)
def do_threaded_work(host, port, q_ports, verbose=0):
threads = []
for i in tqdm(q_ports):
thread = threading.Thread(target=enum_port, args=(host, port, int(i), verbose))
threads.append(thread)
thread.start()
for thread in threads:
thread.join()
def print_results(suppress=False, verbose=0):
print("[*] Results:")
if verbose > 0:
print("\t(VERBOSE: Raw responses || Banners)")
elif verbose == 0:
print("\t{0:>5} {1:<20} {2}".format("Port", "Username", "Banner"))
print("\t{0:>5} {1:<20} {2}".format("----", "--------", "------"))
for each_result in master_results:
tmp_result = each_result.split(":") # ports, USERID, UNIX, username
result_port = str(tmp_result[0].split(",")[0]).strip()
result_username = tmp_result[3]
result_banner = master_banners.get(result_port, "")
if verbose > 0:
print("\t{0} || {1}".format(each_result, result_banner))
else:
print(
"\t{0:>5}: {1:<20} {2}".format(
result_port, result_username, result_banner
)
)
if suppress:
return
print("[!] Errors:")
for each_result in master_errors:
print("\t{0}".format(each_result))
if len(master_results) == 0 and len(master_errors) == 0:
print(
(
"[+] A lack of results AND errors could mean that the specified IDENT port is not actually running the "
"IDENT service"
)
)
if __name__ == "__main__":
main(parse_args(sys.argv[1:]))
|
nilq/baby-python
|
python
|
import json
import sys
from twisted.internet import reactor, defer
from twisted.web.client import getPage, HTTPClientFactory
try:
from twisted.internet import ssl
except ImportError:
ssl = None
class Jar:
def __init__(self, name_long, name_short, url):
self.name_long = list(name_long)
self.name_short = list(name_short)
self.url = str(url)
for i, l in enumerate(self.name_long):
l = l.replace(' ', '-').lower()
if i > len(self.name_short):
self.name_short.append(l)
elif self.name_short[i] is None:
self.name_short[i] = l
def __repr__(self):
return '-'.join(self.name_short)
class JarProvider:
major = None
def __init__(self, deferred):
self.deferred = deferred
self.response = []
self.work()
def get(self, url, callback):
d = getPage(str(url))
d.addCallback(callback)
d.addErrback(self.error)
return d
def add(self, *a, **k):
self.response.append(Jar(*a, **k))
def commit(self, d=None):
self.deferred.callback(self.response)
def error(self, d=None):
self.deferred.errback(d)
def work(self):
raise NotImplementedError
class JenkinsJarProvider(JarProvider):
base = None
project = None
name = None
def work(self):
self.get('{}job/{}/lastSuccessfulBuild/api/json'.format(self.base, self.project), self.handle_data)
def handle_data(self, data):
data = json.loads(data)
url = '{}job/{}/lastSuccessfulBuild/artifact/{}'.format(self.base, self.project, data['artifacts'][0]['relativePath'])
self.add((self.name, 'Latest'), (None, None), url)
self.commit()
modules = []
for m in ['vanilla']:
try:
name = "mk2.servers.{}".format(m)
__import__(name)
modules.append(sys.modules[name])
except ImportError:
pass
def get_raw():
d_results = defer.Deferred()
dd = [defer.succeed([])]
for mod in modules:
d = defer.Deferred()
mod.ref(d)
dd.append(d)
dd = defer.DeferredList(dd, consumeErrors=True)
def callback2(raw):
results = []
for ok, data in raw:
if ok:
results.extend(data)
else:
print("error: {}".format(data.value))
d_results.callback(results)
dd.addCallback(callback2)
return d_results
def jar_list():
d_result = defer.Deferred()
def got_results(results):
listing = ""
o = []
m = 0
for r in results:
left = '-'.join(r.name_short)
right = ' '.join(r.name_long)
m = max(m, len(left))
o.append((left, right))
for left, right in sorted(o):
listing += " %s | %s\n" % (left.ljust(m), right)
d_result.callback(listing.rstrip())
d = get_raw()
d.addCallbacks(got_results, d_result.errback)
return d_result
def jar_get(name):
d_result = defer.Deferred()
def got_data(factory, data):
filename = factory.path.split('/')[-1]
#parse the Content-Disposition header
dis = factory.response_headers.get('content-disposition', None)
if dis:
dis = dis[0].split(';')
if dis[0] == 'attachment':
for param in dis[1:]:
key, value = param.strip().split('=')
if key == 'filename':
filename = value.replace("\"", "")
d_result.callback((filename, data))
def got_results(results):
for r in results:
if name == '-'.join(r.name_short):
factory = HTTPClientFactory(r.url)
if factory.scheme == 'https':
if ssl:
reactor.connectSSL(factory.host, factory.port, factory, ssl.ClientContextFactory())
else:
d_result.errback(Exception("{} is not available because this installation does not have SSL support!".format(name)))
else:
reactor.connectTCP(factory.host, factory.port, factory)
factory.deferred.addCallback(lambda d: got_data(factory, d))
factory.deferred.addErrback(d_result.errback)
return
d_result.errback(Exception("{} is not available!".format(name)))
d = get_raw()
d.addCallbacks(got_results, d_result.errback)
return d_result
|
nilq/baby-python
|
python
|
import base64
import configparser
import click
import requests
from logbook import *
# from requests.cookies import RequestsCookieJar
import controller as ctrl
from config.base_settings import CAPTCHA_MODEL_NAME, TIMEOUT, USE_PROXY
from controller.url_config import url_captcha, url_login
# from service.log import init_log
from service.proxy import update_proxy, notify_ip_address, update_cookies
from service.sipoknn import get_captcha_result
logger = Logger(__name__)
account_notify_times = 0
description = (
'''
用户信息配置模块
由于专利网站的改版,现在要求必须要登录账号密码才能进行高级查询,
请使用者到专利网站自行注册账号,并修改一下USERNAME和PASSWORD的值
链接:http://www.pss-system.gov.cn/sipopublicsearch/portal/uiregister-showRegisterPage.shtml
'''
)
class Account:
"""
账户信息定义
"""
def __init__(self):
# 用户名,约定私有约束,使用请调用self.username
self._username = 'romaforever99'
# 密码,约定私有约束,使用请调用self.password
self._password = 'derossi16'
@property
def username(self):
return self._username
@username.setter
def username(self, username: str):
if username is None:
raise Exception('username invalid')
username = username.replace(' ', '')
if username == '':
raise Exception('username invalid')
self._username = username
@property
def password(self):
return self._password
@password.setter
def password(self, password: str):
if password is None or password == '':
raise Exception('password invalid')
self._password = password
def check_username(self, cfg: configparser.ConfigParser):
"""
用户名校验,设置
:param cfg:
:return:
"""
try:
username = cfg.get('account', 'username')
self.username = username
except:
click.echo(description)
while True:
try:
username = click.prompt('用户名出错,请填写')
self.username = username
break
except:
pass
def check_password(self, cfg: configparser.ConfigParser):
"""
密码校验,配置
:param cfg:
:return:
"""
try:
password = cfg.get('account', 'password')
self.password = password
except:
while True:
try:
password = click.prompt('密码出错,请填写')
self.password = password
break
except:
pass
# 账户信息的单例
account = Account()
def change_to_base64(source):
"""
将参数进行base64加密
:param source:
:return:
"""
return str(base64.b64encode(bytes(source, encoding='utf-8')), 'utf-8')
def get_captcha():
"""
获取验证码
:return:
"""
resp = requests.get(url=url_captcha.get('url'), cookies=ctrl.COOKIES, proxies=ctrl.PROXIES)
with open('captcha.png', 'wb') as f:
f.write(resp.content)
result = get_captcha_result(CAPTCHA_MODEL_NAME, 'captcha.png')
return result
def check_login_status():
if USE_PROXY:
try:
if ctrl.PROXIES is not None:
notify_ip_address()
logger.info('当前已有登录状态')
return True
except:
pass
return False
def login(username=None, password=None):
"""
登录API
:return: True: 登录成功; False: 登录失败
"""
if username is None or password is None:
username = account.username
password = account.password
ctrl.BEING_LOG = True
if check_login_status():
ctrl.BEING_LOG = False
return True
error_times = 0
while True:
try:
# logger.debug("before proxy")
update_proxy()
# logger.debug("before cookie")
update_cookies()
# logger.debug("after cookie")
busername = change_to_base64(username)
bpassword = change_to_base64(password)
captcha = get_captcha()
logger.info('验证码识别结果:%s' % captcha)
form_data = url_login.get('form_data')
form_data.__setitem__('j_validation_code', captcha)
form_data.__setitem__('j_username', busername)
form_data.__setitem__('j_password', bpassword)
resp = requests.post(url=url_login.get('url'), headers=url_login.get('headers'), data=form_data,
cookies=ctrl.COOKIES, proxies=ctrl.PROXIES, timeout=TIMEOUT)
if resp.text.find(username + ',欢迎访问') != -1:
# 网站调整了逻辑,下面这句不用了
# print(resp.cookies)
# ctrl.COOKIES.__delitem__('IS_LOGIN')
# ctrl.COOKIES.set('IS_LOGIN', 'true', domain='www.pss-system.gov.cn/sipopublicsearch/patentsearch')
jsession = ctrl.COOKIES.get('JSESSIONID')
resp.cookies.__delitem__('JSESSIONID')
resp.cookies.set('JSESSIONID', jsession, domain='www.pss-system.gov.cn')
update_cookies(resp.cookies)
requests.post(
'http://www.pss-system.gov.cn/sipopublicsearch/patentsearch/showViewList-jumpToView.shtml',
cookies=ctrl.COOKIES, proxies=ctrl.PROXIES)
ctrl.BEING_LOG = False
logger.info('登录成功')
return True
else:
if error_times > 5:
break
logger.error('登录失败')
error_times += 1
except Exception as e:
logger.error(e)
ctrl.BEING_LOG = False
return False
if __name__ == '__main__':
pass
#init_log()
#login('', '')
#print(notify_ip_address())
#resp = requests.post('http://www.pss-system.gov.cn/sipopublicsearch/patentsearch/showViewList-jumpToView.shtml', cookies=ctrl.COOKIES)
#print(resp.text)
#form_data = url_detail.get('form_data')
# # '''
# # 'nrdAn': '',
# # 'cid': '',
# # 'sid': '',
# # 'wee.bizlog.modulelevel': '0201101'
# # '''
#form_data.__setitem__('nrdAn', 'CN201520137687')
#form_data.__setitem__('cid', 'CN201520137687.320150916XX')
#form_data.__setitem__('sid', 'CN201520137687.320150916XX')
#print(ctrl.COOKIES)
#resp = requests.post(url_detail.get('url'), headers=url_detail.get('headers'), cookies=ctrl.COOKIES, data=form_data)
#print(resp.text)
|
nilq/baby-python
|
python
|
constants.physical_constants["neutron to shielded proton mag. mom. ratio"]
|
nilq/baby-python
|
python
|
""" Test for building manifests for COMBINE archives
:Author: Jonathan Karr <karr@mssm.edu>
:Date: 2021-07-19
:Copyright: 2021, Center for Reproducible Biomedical Modeling
:License: MIT
"""
from biomodels_qc.utils import EXTENSION_COMBINE_FORMAT_MAP
import os
import unittest
class CombineArchiveCreationTestCase(unittest.TestCase):
def test_support_for_all_file_extensions(self):
base_dir = os.path.join(os.path.dirname(__file__), '..', 'final')
exts = set()
for root, dirs, files in os.walk(base_dir):
for name in files:
_, ext = os.path.splitext(name)
assert ext and ext[0] == '.', \
"`{}` does not have an extension".format(os.path.relpath(os.path.join(root, name), base_dir))
exts.add(ext)
unsupported_exts = exts.difference(set(EXTENSION_COMBINE_FORMAT_MAP.keys()))
if unsupported_exts:
msg = (
'biomodels_qc.utils.EXTENSION_COMBINE_FORMAT_MAP '
'must be extended to support these additional extensions:\n {}'
).format('\n '.join(sorted(unsupported_exts)))
raise NotImplementedError(msg)
|
nilq/baby-python
|
python
|
from . import CostFunctions
from . import ActivationFunctions
from . import PyNet
#from . import tfNet #got rid of tensorflow
from . import Autoencoder
from .NeuralNetwork import NeuralNetwork ,NeuralNetworkArray
from .EvolutionaryNeuralNetwork import EvolutionaryNeuralNetwork,PyEvolutionaryNeuralNetwork
from .Tests import ReadCancerData,GetCancerNN
from ._CppInterface import _CreateNetwork,_DestroyNetwork,_LoadNetwork,_SaveNetwork,_InputTrainingData,_InputCrossValidationData,_TrainGradientDescent,_TrainGradientDescentSingle,_GetnSteps,_GetTrainingProgress,_GetCrossValidationProgress,_ClassifyData,_GetL,_Gets,_CreateEvolutionaryNetwork,_DestroyEvolutionaryNetwork,_EvolutionaryNetworkInputData,_EvolutionaryNetworkEvolve
from .MNIST import ReadMNISTLabels,ReadMNISTImages,GetMNISTData,ReadDigit,ReadDigits,_GetFrameRenderer,AnimateMNISTAutoencoder,MNISTAutoEncoder,MNISTClassifier
#---Custom---#"
from . import Globals
#---EndCustom---#
|
nilq/baby-python
|
python
|
{
"targets": [
{
"target_name": "node_ovhook",
"cflags!": ["-fno-exceptions"],
"cflags_cc!": ["-fno-exceptions"],
"include_dirs": [
"<!@(node -p \"require('node-addon-api').include\")",
"./src",
],
'defines': ['NAPI_DISABLE_CPP_EXCEPTIONS', 'UNICODE'],
"sources": [
"./src/utils.hpp",
"./src/main.cc"
],
"libraries": ["user32.lib", "Psapi.lib"]
}
]
}
|
nilq/baby-python
|
python
|
import ephem
from datetime import datetime
import pandas as pd
import numpy as np
import requests
from flask import Flask, render_template, session, redirect, request
import folium
import geocoder
app = Flask(__name__)
def get_latlng():
#Get user lat long via IP address
myloc = geocoder.ip('me')
return myloc.latlng
#https://stackoverflow.com/questions/19513212/can-i-get-the-altitude-with-geopy-in-python-with-longitude-latitude
#Credit: Iain D (https://stackoverflow.com/users/4486474/iain-d)
#Date: March 28, 2021
#This takes around 20ish seconds to run, if elevation not found, just returns 0
def get_elevation(lat, long):
query = ('https://api.open-elevation.com/api/v1/lookup'f'?locations={lat},{long}')
r = requests.get(query).json() # json object, various ways you can extract value
# extract elevation
elevation = pd.json_normalize(r, 'results')['elevation'].values[0]
return elevation
def make_observer(lat, long, elev):
obs = ephem.Observer()
obs.lat = lat
obs.lon = long
obs.elevation = elev
obs.date = datetime.now().strftime("%Y/%m/%d %H:%M:%S")
return obs
def calculate_visible(obs, map):
df = pd.read_csv('active.txt', delimiter = "\n", header= None)
#Reshape dataframe into three column dataframe
#Is there a better way to do this? Instead of reading in as a dataframe then reshaping, can we read it in a 3 column data frame?
#https://stackoverflow.com/questions/39761366/transpose-the-data-in-a-column-every-nth-rows-in-pandas
#Credit: jezrael (https://stackoverflow.com/users/2901002/jezrael)
new_df = pd.DataFrame(np.reshape(df.values,(int(df.shape[0] / 3),3)),columns=['Name','Line 1','Line 2'])
#Parse TLE data
for index, row in new_df.iterrows():
tle_rec = ephem.readtle(row['Name'], row['Line 1'], row['Line 2'])
#Perform TLE computations given some observer object
tle_rec.compute(obs)
#if altitude over local horizon > 0
if tle_rec.alt > 0:
coords = [tle_rec.sublat / ephem.degree, tle_rec.sublong / ephem.degree]
folium.Marker(coords, popup = tle_rec.name).add_to(map)
def generate_map(latlng):
#Get user lat long via IP address
myloc = geocoder.ip('me')
map = folium.Map(location = latlng, zoom_start = 13)
return map
@app.route('/')
def index():
return render_template('index.html')
@app.route('/map', methods=['GET', 'POST'])
def show_map():
#https://pythonise.com/series/learning-flask/flask-working-with-forms
#Author: Julian Nash
#Date: 2021-03-21
if request.method == 'POST':
req = request.form
auto_latlng = get_latlng()
#If blank, use values from geoIP
if req.get("latitude") == '':
latitude = auto_latlng[0]
else:
try:
#try to turn input value into float
latitude = float(req.get("latitude"))
#valid values for latitude are between -90 and 90
if latitude > 90.0 or latitude < -90.0:
return render_template('index.html')
except:
#return to main page if invalid input
return render_template('index.html')
#If blank, use values from geoIP
if req.get("longitude") == '':
longitude = auto_latlng[1]
else:
try:
#try to turn input value into float
longitude = float(req.get("longitude"))
#valid values for longitude are between -180 and 180
if longitude > 180.0 or longitude < -180.0:
return render_template('index.html')
except:
#return to main page if invalid input
return render_template('index.html')
#If blank, use values from geoIP
if req.get("elevation") == '':
elevation = get_elevation(latitude, longitude)
else:
try:
#try to turn input value into float
#allow any numeric values
elevation = float(req.get("elevation"))
except:
#return to main page if invalid input
return render_template('index.html')
latlng = [latitude, longitude]
map = generate_map(latlng)
obs = make_observer(latitude, longitude, elevation)
#TLE CALCULATION HERE
calculate_visible(obs, map)
return map._repr_html_()
return render_template('index.html')
if __name__ == '__main__':
app.run()
|
nilq/baby-python
|
python
|
from bcc import BPF
# Hello BPF Program
bpf_text = """
#include <net/inet_sock.h>
#include <bcc/proto.h>
// 1. Attach kprobe to "inet_listen"
int kprobe__inet_listen(struct pt_regs *ctx, struct socket *sock, int backlog)
{
bpf_trace_printk("Hello World!\\n");
return 0;
};
int kprobe__ip_rcv(struct pt_regs *ctx, struct sk_buff *skb)
{
bpf_trace_printk("ip_rcv!\\n");
return 0;
};
"""
# 2. Build and Inject program
b = BPF(text=bpf_text)
# 3. Print debug output
while True:
print b.trace_readline()
"""
The first argument to int kprobe__<fn_name>(struct pt_regs *ctx, ...)
is always struct pt_regs *ctx
after that it is the list of arguments the <fn> takes and its optional to have.
"""
|
nilq/baby-python
|
python
|
from tensornetwork.block_sparse import index
from tensornetwork.block_sparse import charge
from tensornetwork.block_sparse import blocksparsetensor
from tensornetwork.block_sparse import linalg
from tensornetwork.block_sparse.blocksparsetensor import (BlockSparseTensor,
ChargeArray,
tensordot,
outerproduct,
compare_shapes)
from tensornetwork.block_sparse.linalg import (svd, qr, diag, sqrt, trace, inv,#pylint: disable=redefined-builtin
pinv, eye, eigh, eig, conj,
reshape, transpose, norm, abs,
sign)
from tensornetwork.block_sparse.initialization import (zeros, ones, randn,
random, empty_like,
ones_like, zeros_like,
randn_like, random_like)
from tensornetwork.block_sparse.index import Index
from tensornetwork.block_sparse.caching import (get_cacher, enable_caching,
disable_caching, clear_cache,
get_caching_status,
set_caching_status)
from tensornetwork.block_sparse.charge import (U1Charge, BaseCharge, Z2Charge,
ZNCharge)
|
nilq/baby-python
|
python
|
from SuperImpose import SuperImpose
class SuperImposeFields(SuperImpose):
'''
SuperImpose subclass implementing specific functionality to
super-impose all field images on background image
'''
@staticmethod
def run_super_impose_on_all_fields(back_rgba_img, fields_img_loc_dict):
'''
Inputs:
back_rgba_img: PIL object - background image in RGBA format
fields_img_loc_dict: Dict - dictionary i.e.:
{field_name: {img: PIL object, loc: (x,y) }}
Return:
superimposed_img: PIL object - superimposed image in RGBA format
'''
superimposed_img = back_rgba_img.copy()
for field_name, img_loc in fields_img_loc_dict.items():
field_img = img_loc['img']
field_loc = img_loc['loc']
superimposed_img = SuperImpose.super_impose(superimposed_img, field_img, field_loc[0], field_loc[1])
return superimposed_img
|
nilq/baby-python
|
python
|
from django.urls import path
from . import views
app_name = 'articles'
urlpatterns = [
path('', views.index, name ='index'),
path('<int:article_id>/', views.detail, name ='detail'),
path('<int:article_id>/leave_comment/', views.leave_comment, name ='leave_comment'),
]
|
nilq/baby-python
|
python
|
# JEWELS AND STONES LEETCODE SOLUTION:
# creating a class.
class Solution(object):
# creating a function to solve the problem.
def numJewelsInStones(self, jewels, stones):
# creating a variable to track the count.
count = 0
# creating a for-loop to iterate for the elements in the stones.
for i in stones:
# creating a nested if-statement to check if the elements in the stones are jewels.
if i in jewels:
# code to increment the count if the condition is met.
count += 1
# returning the value of the count.
return count
|
nilq/baby-python
|
python
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.