hexsha
stringlengths 40
40
| size
int64 2
1.02M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
245
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
245
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 2
1.02M
| avg_line_length
float64 1
417k
| max_line_length
int64 1
987k
| alphanum_fraction
float64 0
1
| content_no_comment
stringlengths 0
1.01M
| is_comment_constant_removed
bool 1
class | is_sharp_comment_removed
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1c43bcddd351165d681ed76a72205a2d674eab7f
| 4,026
|
py
|
Python
|
cdtcommon/calculator.py
|
Just-Jojo/mcoc-v3
|
41c69960c8aff2dbbfd5d11ecc17e7af73e1e305
|
[
"MIT"
] | 3
|
2020-08-09T03:03:20.000Z
|
2020-12-13T19:01:07.000Z
|
cdtcommon/calculator.py
|
Just-Jojo/mcoc-v3
|
41c69960c8aff2dbbfd5d11ecc17e7af73e1e305
|
[
"MIT"
] | 19
|
2020-07-24T00:37:51.000Z
|
2021-06-18T17:22:14.000Z
|
cdtcommon/calculator.py
|
Just-Jojo/mcoc-v3
|
41c69960c8aff2dbbfd5d11ecc17e7af73e1e305
|
[
"MIT"
] | 7
|
2020-06-30T20:09:08.000Z
|
2021-02-20T03:48:09.000Z
|
import math
import re
import discord
from redbot.core import checks, commands
from redbot.core.config import Config
from .cdtcommon import CdtCommon
from .cdtembed import Embed
class Calculator(commands.Cog):
"""Calculator"""
def __init__(self, bot):
self.bot = bot
self.thumbnail = "https://www.ebuyer.com/blog/wp-content/uploads/2014/07/buttons-on-a-calculator-header1.jpg"
@commands.command(pass_context=True, name="calculator", aliases=("calc",))
async def _calc(self, ctx, *, m):
"""Math is fun!
Type math, get fun."""
m = "".join(m)
math_filter = re.findall(
r"[\[\]\-()*+/0-9=.,% ]|>|<|==|>=|<=|\||&|~|!=|^|sum"
+ "|range|random|randint|choice|randrange|True|False|if|and|or|else"
+ "|is|not|for|in|acos|acosh|asin|asinh|atan|atan2|atanh|ceil"
+ "|copysign|cos|cosh|degrees|e|erf|erfc|exp|expm1|fabs|factorial"
+ "|floor|fmod|frexp|fsum|gamma|gcd|hypot|inf|isclose|isfinite"
+ "|isinf|isnan|ldexp|lgamma|log|log10|log1p|log2|modf|nan|pi"
+ "|pow|radians|sin|sinh|sqrt|tan|tanh|round",
m,
)
calculate_stuff = eval("".join(math_filter))
if len(str(calculate_stuff)) > 0:
em = await Embed.create(
ctx,
title="CollectorDevTeam Calculator",
thumbnail=self.thumbnail,
description="**Input**\n`{}`\n\n**Result**\n`{}`".format(m, calculate_stuff),
)
em.add_field(name="Type Math", value="Get Fun")
await ctx.send(embed=em)
@commands.command(
aliases=[
"p2f",
],
hidden=True,
)
async def per2flat(self, ctx, per: float, ch_rating: int = 100):
"""Convert Percentage to MCOC Flat Value"""
await ctx.send(CdtCommon.to_flat(per, ch_rating))
# , aliases=('f2p')) --> this was translating as "flat | f | 2 | p"
@commands.command(pass_context=True, name="flat")
async def flat2per(self, ctx, *, m):
"""Convert MCOC Flat Value to Percentge
<equation> [challenger rating = 100]"""
if " " in m:
m, cr = m.rsplit(" ", 1)
challenger_rating = int(cr)
else:
challenger_rating = 100
m = "".join(m)
math_filter = re.findall(
r"[\[\]\-()*+/0-9=.,% ]"
+ r"|acos|acosh|asin|asinh"
+ r"|atan|atan2|atanh|ceil|copysign|cos|cosh|degrees|e|erf|erfc|exp"
+ r"|expm1|fabs|factorial|floor|fmod|frexp|fsum|gamma|gcd|hypot|inf"
+ r"|isclose|isfinite|isinf|isnan|round|ldexp|lgamma|log|log10|log1p"
+ r"|log2|modf|nan|pi|pow|radians|sin|sinh|sqrt|tan|tanh",
m,
)
flat_val = eval("".join(math_filter))
p = CdtCommon.from_flat(flat_val, challenger_rating)
em = await Embed.create(
ctx,
color=discord.Color.gold(),
title="FlatValue:",
thumbnail=self.thumbnail,
description="{}".format(flat_val),
)
em.add_field(name="Percentage:", value="{}\%".format(p))
await ctx.send(embed=em)
@commands.command(aliases=["compf", "cfrac"], hidden=True)
async def compound_frac(self, ctx, base: float, exp: int):
# On second thought, I'm not gonna touch this
# - Jojo
"""Calculate multiplicative compounded fractions"""
if base > 1:
base = base / 100
compound = 1 - (1 - base) ** exp
em = await Embed.create(
ctx,
color=discord.Color.gold(),
title="Compounded Fractions",
thumbnail=self.thumbnail,
description="{:.2%} compounded {} times".format(base, exp),
)
em.add_field(name="Expected Chance", value="{:.2%}".format(compound))
await ctx.send(embed=em)
| 38.711538
| 118
| 0.544709
|
import math
import re
import discord
from redbot.core import checks, commands
from redbot.core.config import Config
from .cdtcommon import CdtCommon
from .cdtembed import Embed
class Calculator(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.thumbnail = "https://www.ebuyer.com/blog/wp-content/uploads/2014/07/buttons-on-a-calculator-header1.jpg"
@commands.command(pass_context=True, name="calculator", aliases=("calc",))
async def _calc(self, ctx, *, m):
m = "".join(m)
math_filter = re.findall(
r"[\[\]\-()*+/0-9=.,% ]|>|<|==|>=|<=|\||&|~|!=|^|sum"
+ "|range|random|randint|choice|randrange|True|False|if|and|or|else"
+ "|is|not|for|in|acos|acosh|asin|asinh|atan|atan2|atanh|ceil"
+ "|copysign|cos|cosh|degrees|e|erf|erfc|exp|expm1|fabs|factorial"
+ "|floor|fmod|frexp|fsum|gamma|gcd|hypot|inf|isclose|isfinite"
+ "|isinf|isnan|ldexp|lgamma|log|log10|log1p|log2|modf|nan|pi"
+ "|pow|radians|sin|sinh|sqrt|tan|tanh|round",
m,
)
calculate_stuff = eval("".join(math_filter))
if len(str(calculate_stuff)) > 0:
em = await Embed.create(
ctx,
title="CollectorDevTeam Calculator",
thumbnail=self.thumbnail,
description="**Input**\n`{}`\n\n**Result**\n`{}`".format(m, calculate_stuff),
)
em.add_field(name="Type Math", value="Get Fun")
await ctx.send(embed=em)
@commands.command(
aliases=[
"p2f",
],
hidden=True,
)
async def per2flat(self, ctx, per: float, ch_rating: int = 100):
await ctx.send(CdtCommon.to_flat(per, ch_rating))
@commands.command(pass_context=True, name="flat")
async def flat2per(self, ctx, *, m):
if " " in m:
m, cr = m.rsplit(" ", 1)
challenger_rating = int(cr)
else:
challenger_rating = 100
m = "".join(m)
math_filter = re.findall(
r"[\[\]\-()*+/0-9=.,% ]"
+ r"|acos|acosh|asin|asinh"
+ r"|atan|atan2|atanh|ceil|copysign|cos|cosh|degrees|e|erf|erfc|exp"
+ r"|expm1|fabs|factorial|floor|fmod|frexp|fsum|gamma|gcd|hypot|inf"
+ r"|isclose|isfinite|isinf|isnan|round|ldexp|lgamma|log|log10|log1p"
+ r"|log2|modf|nan|pi|pow|radians|sin|sinh|sqrt|tan|tanh",
m,
)
flat_val = eval("".join(math_filter))
p = CdtCommon.from_flat(flat_val, challenger_rating)
em = await Embed.create(
ctx,
color=discord.Color.gold(),
title="FlatValue:",
thumbnail=self.thumbnail,
description="{}".format(flat_val),
)
em.add_field(name="Percentage:", value="{}\%".format(p))
await ctx.send(embed=em)
@commands.command(aliases=["compf", "cfrac"], hidden=True)
async def compound_frac(self, ctx, base: float, exp: int):
# - Jojo
if base > 1:
base = base / 100
compound = 1 - (1 - base) ** exp
em = await Embed.create(
ctx,
color=discord.Color.gold(),
title="Compounded Fractions",
thumbnail=self.thumbnail,
description="{:.2%} compounded {} times".format(base, exp),
)
em.add_field(name="Expected Chance", value="{:.2%}".format(compound))
await ctx.send(embed=em)
| true
| true
|
1c43bd3bebe28921f0af5e1ac829a25b07e7fc62
| 10,912
|
py
|
Python
|
qiskit/pulse/pulse_lib/samplers/decorators.py
|
lerongil/qiskit-terra
|
a25af2a2378bc3d4f5ec73b948d048d1b707454c
|
[
"Apache-2.0"
] | 3
|
2019-05-19T17:39:38.000Z
|
2020-01-28T19:59:18.000Z
|
qiskit/pulse/pulse_lib/samplers/decorators.py
|
lerongil/qiskit-terra
|
a25af2a2378bc3d4f5ec73b948d048d1b707454c
|
[
"Apache-2.0"
] | 4
|
2019-05-13T15:28:46.000Z
|
2019-12-19T20:47:02.000Z
|
qiskit/pulse/pulse_lib/samplers/decorators.py
|
lerongil/qiskit-terra
|
a25af2a2378bc3d4f5ec73b948d048d1b707454c
|
[
"Apache-2.0"
] | 1
|
2021-07-07T16:55:41.000Z
|
2021-07-07T16:55:41.000Z
|
# -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2019.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
# pylint: disable=missing-return-doc
"""Sampler decorator module for sampling of continuous pulses to discrete pulses to be
exposed to user.
Some atypical boilerplate has been added to solve the problem of decorators not preserving
their wrapped function signatures. Below we explain the problem that samplers solve and how
we implement this.
A sampler is a function that takes an continuous pulse function with signature:
```python
def f(times: np.ndarray, *args, **kwargs) -> np.ndarray:
...
```
and returns a new function:
def f(duration: int, *args, **kwargs) -> SamplePulse:
...
Samplers are used to build up pulse commands from continuous pulse functions.
In Python the creation of a dynamic function that wraps another function will cause
the underlying signature and documentation of the underlying function to be overwritten.
In order to circumvent this issue the Python standard library provides the decorator
`functools.wraps` which allows the programmer to expose the names and signature of the
wrapped function as those of the dynamic function.
Samplers are implemented by creating a function with signature
@sampler
def left(continuous_pulse: Callable, duration: int, *args, **kwargs)
...
This will create a sampler function for `left`. Since it is a dynamic function it would not
have the docstring of `left` available too `help`. This could be fixed by wrapping with
`functools.wraps` in the `sampler`, but this would then cause the signature to be that of the
sampler function which is called on the continuous pulse, below:
`(continuous_pulse: Callable, duration: int, *args, **kwargs)``
This is not correct for the sampler as the output sampled functions accept only a function.
For the standard sampler we get around this by not using `functools.wraps` and
explicitly defining our samplers such as `left`, `right` and `midpoint` and
calling `sampler` internally on the function that implements the sampling schemes such as
`left_sample`, `right_sample` and `midpoint_sample` respectively. See `left` for an example of this.
In this way our standard samplers will expose the proper help signature, but a user can
still create their own sampler with
@sampler
def custom_sampler(time, *args, **kwargs):
...
However, in this case it will be missing documentation of the underlying sampling methods.
We believe that the definition of custom samplers will be rather infrequent.
However, users will frequently apply sampler instances too continuous pulses. Therefore, a different
approach was required for sampled continuous functions (the output of an continuous pulse function
decorated by a sampler instance).
A sampler instance is a decorator that may be used to wrap continuous pulse functions such as
linear below:
```python
@left
def linear(times: np.ndarray, m: float, b: float) -> np.ndarray:
```Linear test function
Args:
times: Input times.
m: Slope.
b: Intercept
Returns:
np.ndarray
```
return m*times+b
```
Which after decoration may be called with a duration rather than an array of times
```python
duration = 10
pulse_command = linear(10, 0.1, 0.1)
```
If one calls help on `linear` they will find
```
linear(duration:int, *args, **kwargs) -> numpy.ndarray
Discretized continuous pulse function: `linear` using
sampler: `_left`.
The first argument (time) of the continuous pulse function has been replaced with
a discretized `duration` of type (int).
Args:
duration (int)
*args: Remaining arguments of continuous pulse function.
See continuous pulse function documentation below.
**kwargs: Remaining kwargs of continuous pulse function.
See continuous pulse function documentation below.
Sampled continuous function:
function linear in module test.python.pulse.test_samplers
linear(x:numpy.ndarray, m:float, b:float) -> numpy.ndarray
Linear test function
Args:
x: Input times.
m: Slope.
b: Intercept
Returns:
np.ndarray
```
This is partly because `functools.wraps` has been used on the underlying function.
This in itself is not sufficient as the signature of the sampled function has
`duration`, whereas the signature of the continuous function is `time`.
This is achieved by removing `__wrapped__` set by `functools.wraps` in order to preserve
the correct signature and also applying `_update_annotations` and `_update_docstring`
to the generated function which corrects the function annotations and adds an informative
docstring respectively.
The user therefore has access to the correct sampled function docstring in its entirety, while
still seeing the signature for the continuous pulse function and all of its arguments.
"""
import functools
from typing import Callable
import textwrap
import pydoc
import numpy as np
import qiskit.pulse.commands as commands
from . import strategies
def _update_annotations(discretized_pulse: Callable) -> Callable:
"""Update annotations of discretized continuous pulse function with duration.
Args:
discretized_pulse: Discretized decorated continuous pulse.
"""
undecorated_annotations = list(discretized_pulse.__annotations__.items())
decorated_annotations = undecorated_annotations[1:]
decorated_annotations.insert(0, ('duration', int))
discretized_pulse.__annotations__ = dict(decorated_annotations)
return discretized_pulse
def _update_docstring(discretized_pulse: Callable, sampler_inst: Callable) -> Callable:
"""Update annotations of discretized continuous pulse function.
Args:
discretized_pulse: Discretized decorated continuous pulse.
sampler_inst: Applied sampler.
"""
wrapped_docstring = pydoc.render_doc(discretized_pulse, '%s')
header, body = wrapped_docstring.split('\n', 1)
body = textwrap.indent(body, ' ')
wrapped_docstring = header+body
updated_ds = """
Discretized continuous pulse function: `{continuous_name}` using
sampler: `{sampler_name}`.
The first argument (time) of the continuous pulse function has been replaced with
a discretized `duration` of type (int).
Args:
duration (int)
*args: Remaining arguments of continuous pulse function.
See continuous pulse function documentation below.
**kwargs: Remaining kwargs of continuous pulse function.
See continuous pulse function documentation below.
Sampled continuous function:
{continuous_doc}
""".format(continuous_name=discretized_pulse.__name__,
sampler_name=sampler_inst.__name__,
continuous_doc=wrapped_docstring)
discretized_pulse.__doc__ = updated_ds
return discretized_pulse
def sampler(sample_function: Callable) -> Callable:
"""Sampler decorator base method.
Samplers are used for converting an continuous function to a discretized pulse.
They operate on a function with the signature:
`def f(times: np.ndarray, *args, **kwargs) -> np.ndarray`
Where `times` is a numpy array of floats with length n_times and the output array
is a complex numpy array with length n_times. The output of the decorator is an
instance of `FunctionalPulse` with signature:
`def g(duration: int, *args, **kwargs) -> SamplePulse`
Note if your continuous pulse function outputs a `complex` scalar rather than a
`np.ndarray`, you should first vectorize it before applying a sampler.
This class implements the sampler boilerplate for the sampler.
Args:
sample_function: A sampler function to be decorated.
"""
def generate_sampler(continuous_pulse: Callable) -> Callable:
"""Return a decorated sampler function."""
@functools.wraps(continuous_pulse)
def call_sampler(duration: int, *args, **kwargs) -> commands.SamplePulse:
"""Replace the call to the continuous function with a call to the sampler applied
to the analytic pulse function."""
sampled_pulse = sample_function(continuous_pulse, duration, *args, **kwargs)
return np.asarray(sampled_pulse, dtype=np.complex_)
# Update type annotations for wrapped continuous function to be discrete
call_sampler = _update_annotations(call_sampler)
# Update docstring with that of the sampler and include sampled function documentation.
call_sampler = _update_docstring(call_sampler, sample_function)
# Unset wrapped to return base sampler signature
# but still get rest of benefits of wraps
# such as __name__, __qualname__
call_sampler.__dict__.pop('__wrapped__')
# wrap with functional pulse
return commands.functional_pulse(call_sampler)
return generate_sampler
def left(continuous_pulse: Callable) -> Callable:
r"""Left sampling strategy decorator.
See `pulse.samplers.sampler` for more information.
For `duration`, return:
$$\{f(t) \in \mathbb{C} | t \in \mathbb{Z} \wedge 0<=t<\texttt{duration}\}$$
Args:
continuous_pulse: To sample.
"""
return sampler(strategies.left_sample)(continuous_pulse)
def right(continuous_pulse: Callable) -> Callable:
r"""Right sampling strategy decorator.
See `pulse.samplers.sampler` for more information.
For `duration`, return:
$$\{f(t) \in \mathbb{C} | t \in \mathbb{Z} \wedge 0<t<=\texttt{duration}\}$$
Args:
continuous_pulse: To sample.
"""
return sampler(strategies.right_sample)(continuous_pulse)
def midpoint(continuous_pulse: Callable) -> Callable:
r"""Midpoint sampling strategy decorator.
See `pulse.samplers.sampler` for more information.
For `duration`, return:
$$\{f(t+0.5) \in \mathbb{C} | t \in \mathbb{Z} \wedge 0<=t<\texttt{duration}\}$$
Args:
continuous_pulse: To sample.
"""
return sampler(strategies.midpoint_sample)(continuous_pulse)
| 38.971429
| 100
| 0.701155
|
import functools
from typing import Callable
import textwrap
import pydoc
import numpy as np
import qiskit.pulse.commands as commands
from . import strategies
def _update_annotations(discretized_pulse: Callable) -> Callable:
undecorated_annotations = list(discretized_pulse.__annotations__.items())
decorated_annotations = undecorated_annotations[1:]
decorated_annotations.insert(0, ('duration', int))
discretized_pulse.__annotations__ = dict(decorated_annotations)
return discretized_pulse
def _update_docstring(discretized_pulse: Callable, sampler_inst: Callable) -> Callable:
wrapped_docstring = pydoc.render_doc(discretized_pulse, '%s')
header, body = wrapped_docstring.split('\n', 1)
body = textwrap.indent(body, ' ')
wrapped_docstring = header+body
updated_ds = """
Discretized continuous pulse function: `{continuous_name}` using
sampler: `{sampler_name}`.
The first argument (time) of the continuous pulse function has been replaced with
a discretized `duration` of type (int).
Args:
duration (int)
*args: Remaining arguments of continuous pulse function.
See continuous pulse function documentation below.
**kwargs: Remaining kwargs of continuous pulse function.
See continuous pulse function documentation below.
Sampled continuous function:
{continuous_doc}
""".format(continuous_name=discretized_pulse.__name__,
sampler_name=sampler_inst.__name__,
continuous_doc=wrapped_docstring)
discretized_pulse.__doc__ = updated_ds
return discretized_pulse
def sampler(sample_function: Callable) -> Callable:
def generate_sampler(continuous_pulse: Callable) -> Callable:
@functools.wraps(continuous_pulse)
def call_sampler(duration: int, *args, **kwargs) -> commands.SamplePulse:
sampled_pulse = sample_function(continuous_pulse, duration, *args, **kwargs)
return np.asarray(sampled_pulse, dtype=np.complex_)
call_sampler = _update_annotations(call_sampler)
call_sampler = _update_docstring(call_sampler, sample_function)
call_sampler.__dict__.pop('__wrapped__')
return commands.functional_pulse(call_sampler)
return generate_sampler
def left(continuous_pulse: Callable) -> Callable:
return sampler(strategies.left_sample)(continuous_pulse)
def right(continuous_pulse: Callable) -> Callable:
return sampler(strategies.right_sample)(continuous_pulse)
def midpoint(continuous_pulse: Callable) -> Callable:
return sampler(strategies.midpoint_sample)(continuous_pulse)
| true
| true
|
1c43be1b50051126e52c822f3f6ff0e79cfbfacb
| 3,517
|
py
|
Python
|
simplex_method.py
|
dvapan/simplex_method
|
dcc930b092dffa2e55162ea035f43d85572c8568
|
[
"MIT"
] | null | null | null |
simplex_method.py
|
dvapan/simplex_method
|
dcc930b092dffa2e55162ea035f43d85572c8568
|
[
"MIT"
] | null | null | null |
simplex_method.py
|
dvapan/simplex_method
|
dcc930b092dffa2e55162ea035f43d85572c8568
|
[
"MIT"
] | null | null | null |
# coding=utf-8
__author__ = 'dvapan'
import scipy as sc
import scipy.linalg as lin
import pprint
#
# c = sc.matrix([2.0, 3.0]).transpose()
# A = sc.matrix([[-10.0, 5.0], [6.0, 20.0], [8.0, 15.0]])
# b = sc.matrix([600.0, 600.0, 600.0]).transpose()
# I = [2, 3, 4]
def transform_to_classic(A,b,c):
count_vars = A.shape[1]
addition_vars = A.shape[0]
count_all_vars = count_vars + addition_vars
_A = sc.resize(A, (A.shape[0], count_all_vars))
_A[:, :count_vars] = A
_A[:, count_vars:] = sc.eye(addition_vars)
_c = sc.resize(c, (count_all_vars, 1))
_c[count_vars:, :] = sc.zeros((addition_vars, 1))
I = range(count_vars, count_vars+addition_vars)
return _A, b, _c, I
# A = sc.matrix([[1, 1, -1, 1],
# [1, 14, 10, -10]])
# b = sc.matrix([2, 24]).transpose()
# c = sc.matrix([1, 2, 3, -4]).transpose()
def get_point_from_basis(A, b, I):
B_sigma = A[:, I]
x_sigma = lin.solve(B_sigma, b)
x = sc.zeros(A.shape[1])
#print x_sigma
x[I] = x_sigma
return x
def simplex_method(A, b, c, I, eps):
count_all_vars = A.shape[1]
q = 50
while q > 0:
B_sigma = A[:, I]
c_sigma = c[I, :]
x_sigma = lin.solve(B_sigma, b)
y = lin.solve(B_sigma.transpose(), c_sigma)
D = sc.matrix(A).transpose()*y - c
non_base_I = [e for e in range(count_all_vars) if e not in I]
q-=1
finish = reduce(lambda x, y: x and y, map(lambda x: x > -eps, D[non_base_I]), True)
# print I
# print D.transpose().tolist()[0], get_point_from_basis(A, b, I)
if finish:
x = get_point_from_basis(A, b, I)
return x, I, (sc.matrix(x)*sc.matrix(c))[0, 0]
k = min([i for i in non_base_I if D[i] < 0])
lmd_k = lin.solve(B_sigma, A[:, k])
finish = reduce(lambda x, y: x and y, map(lambda x: x < 0, lmd_k),True)
if finish:
return None, None, sc.nan
tmp = sc.array(x_sigma.transpose())[0].tolist()
min_i = 0
while lmd_k[min_i] <= 0:
min_i += 1
for i in xrange(len(lmd_k)):
if lmd_k[i] > 0 and tmp[i]/lmd_k[i] < tmp[min_i]/lmd_k[min_i]:
min_i = i
s = min_i
I[s] = k
return None,None,None
def artificial_basis_method(A, b, c, eps):
count_vars = A.shape[1]
addition_vars = A.shape[0]
count_all_vars = count_vars + addition_vars
_A = sc.resize(A, (A.shape[0], count_all_vars))
_A[:, :count_vars] = A
_A[:, count_vars:] = sc.eye(addition_vars)
_c = sc.resize(c, (count_all_vars, 1))
_c[:count_vars, :] = sc.zeros((count_vars, 1))
_c[count_vars:, :] = sc.full((addition_vars, 1), -1)
# if I is None:
I = range(count_vars, count_vars+addition_vars)
# pprint.pprint((_A, b, _c ,I))
Res = simplex_method(_A, b, _c, I, eps)
if Res[2] < -eps:
return None, None, None
Real_I = [i for i in range(count_vars) if i not in Res[1]]
for i in range(len(Res[1])):
if Res[1][i] >= count_vars:
Res[1][i] = Real_I.pop(0)
return Res
def double_phase_simplex_method(A, b, c, eps):
Res = artificial_basis_method(A, b, c, eps)
# while Res[1] is not None and len(filter(lambda x: x >= A.shape[1], Res[1])) > 0:
# print "NEED NEXT ITER OF FIRST PHASE"
# Res = artificial_basis_method(A, b, c, eps, Res[1])
if Res[1] is not None:
return simplex_method(A, b, c, Res[1], eps)
else:
return None, None, None
| 29.308333
| 91
| 0.562411
|
__author__ = 'dvapan'
import scipy as sc
import scipy.linalg as lin
import pprint
def transform_to_classic(A,b,c):
count_vars = A.shape[1]
addition_vars = A.shape[0]
count_all_vars = count_vars + addition_vars
_A = sc.resize(A, (A.shape[0], count_all_vars))
_A[:, :count_vars] = A
_A[:, count_vars:] = sc.eye(addition_vars)
_c = sc.resize(c, (count_all_vars, 1))
_c[count_vars:, :] = sc.zeros((addition_vars, 1))
I = range(count_vars, count_vars+addition_vars)
return _A, b, _c, I
def get_point_from_basis(A, b, I):
B_sigma = A[:, I]
x_sigma = lin.solve(B_sigma, b)
x = sc.zeros(A.shape[1])
x[I] = x_sigma
return x
def simplex_method(A, b, c, I, eps):
count_all_vars = A.shape[1]
q = 50
while q > 0:
B_sigma = A[:, I]
c_sigma = c[I, :]
x_sigma = lin.solve(B_sigma, b)
y = lin.solve(B_sigma.transpose(), c_sigma)
D = sc.matrix(A).transpose()*y - c
non_base_I = [e for e in range(count_all_vars) if e not in I]
q-=1
finish = reduce(lambda x, y: x and y, map(lambda x: x > -eps, D[non_base_I]), True)
if finish:
x = get_point_from_basis(A, b, I)
return x, I, (sc.matrix(x)*sc.matrix(c))[0, 0]
k = min([i for i in non_base_I if D[i] < 0])
lmd_k = lin.solve(B_sigma, A[:, k])
finish = reduce(lambda x, y: x and y, map(lambda x: x < 0, lmd_k),True)
if finish:
return None, None, sc.nan
tmp = sc.array(x_sigma.transpose())[0].tolist()
min_i = 0
while lmd_k[min_i] <= 0:
min_i += 1
for i in xrange(len(lmd_k)):
if lmd_k[i] > 0 and tmp[i]/lmd_k[i] < tmp[min_i]/lmd_k[min_i]:
min_i = i
s = min_i
I[s] = k
return None,None,None
def artificial_basis_method(A, b, c, eps):
count_vars = A.shape[1]
addition_vars = A.shape[0]
count_all_vars = count_vars + addition_vars
_A = sc.resize(A, (A.shape[0], count_all_vars))
_A[:, :count_vars] = A
_A[:, count_vars:] = sc.eye(addition_vars)
_c = sc.resize(c, (count_all_vars, 1))
_c[:count_vars, :] = sc.zeros((count_vars, 1))
_c[count_vars:, :] = sc.full((addition_vars, 1), -1)
I = range(count_vars, count_vars+addition_vars)
Res = simplex_method(_A, b, _c, I, eps)
if Res[2] < -eps:
return None, None, None
Real_I = [i for i in range(count_vars) if i not in Res[1]]
for i in range(len(Res[1])):
if Res[1][i] >= count_vars:
Res[1][i] = Real_I.pop(0)
return Res
def double_phase_simplex_method(A, b, c, eps):
Res = artificial_basis_method(A, b, c, eps)
if Res[1] is not None:
return simplex_method(A, b, c, Res[1], eps)
else:
return None, None, None
| true
| true
|
1c43beac38a9be1ebc96e5f4db2e17a1f69ebb88
| 24,223
|
py
|
Python
|
python/process_content.py
|
tdjames1/TMA-data-extraction
|
03af0ef3b61df5486f6f061e4e3b62de2e238476
|
[
"BSD-3-Clause"
] | null | null | null |
python/process_content.py
|
tdjames1/TMA-data-extraction
|
03af0ef3b61df5486f6f061e4e3b62de2e238476
|
[
"BSD-3-Clause"
] | 5
|
2021-01-05T12:14:53.000Z
|
2021-08-23T09:18:11.000Z
|
python/process_content.py
|
tdjames1/TMA-data-extraction
|
03af0ef3b61df5486f6f061e4e3b62de2e238476
|
[
"BSD-3-Clause"
] | 1
|
2021-02-18T14:59:42.000Z
|
2021-02-18T14:59:42.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""tma_process_content
.. module:: TMA-data-extraction
:synopis: Scripts and functions for extracting weather alert data
from Tanzanian Meteorological Authority "Five days Severe weather
impact-based forecasts" PDFs.
.. moduleauthor: Tamora D. James <t.d.james1@leeds.ac.uk>, CEMAC (UoL)
.. description: This module was developed by CEMAC as part of the GCRF
African Swift Project. This script processes page contents and
metadata extracted from Tanzanian Meteorological Authority "Five days
Severe weather impact-based forecasts" PDFs and produces a netCDF4
file containing gridded weather alert data.
:copyright: © 2020 University of Leeds.
:license: BSD 3-clause (see LICENSE)
Example:
To use::
./tma_process_content <path/to/page2_content.txt> <path/to/metadata.csv>
<path/to/page2_content.txt> - Path to content extracted from page 2 of TMA weather forecast PDF
<path/to/metadata.csv> - Path to CSV containing text metadata extracted from page 2 of TMA weather forecast PDF
.. CEMAC_cemac_generic:
https://github.com/cemac/cemac_generic
"""
import sys
import argparse
import os
import numpy as np
import numpy.linalg as LA
import bezier
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
from matplotlib.path import Path
import cartopy.crs as ccrs
import cartopy.feature as cfeature
import cartopy.io.shapereader as shpreader
import skimage.draw
import xarray as xr
import pandas as pd
PDF_GS_OPS = {
'g': 'setgray (nonstroke)',
'G': 'setgray (stroke)',
'gs': 'setgraphicsstate',
'j': 'setlinejoin',
'M': 'setmiterlimit',
'rg': 'setrgbcolor (nonstroke)',
'RG': 'setrgbcolor (stroke)',
'q': 'gsave',
'Q': 'grestore',
'w': 'setlinewidth',
'W': 'clip',
'W*': 'eoclip',
}
MAP_IMG = "../resources/TZA_map.png"
# Extent of original map image when matched to PlateCarree projection
extent_MAP_IMG = [28.405, 41.475, -12., -0.745]
def readFile(fp):
with open(fp) as f:
lines = [line.rstrip() for line in f]
return lines
def extractGraphics(lines):
path_ops = {'m', 'c', 'l'}
term_ops = {'f*', 'S', 'n'}
col_ops = {'rg', 'RG', 'g', 'G'}
block = []
graphics = []
images = []
col = None
# Iterate over the lines
for line in lines:
if line.endswith(tuple(path_ops)):
#print("got path operator")
block.append(line)
elif line.endswith(tuple(term_ops)):
#print("got terminating path operator")
block.append(line)
path = processBlock(list(block))
if len(path['contour']):
graphics.append({'path': path, 'colour': col})
del block[:]
elif line.endswith(tuple(col_ops)):
block.append(line)
col = processColour(line)
elif "Do" in line:
#print("got image operator")
block.append(line)
image = processImage(list(block))
if len(image):
images.append(image)
del block[:]
else:
block.append(line)
# print(len(graphics))
# print(len(graphics[0]['path']))
# print(len(images))
return [images, graphics]
def appendCurve(start, controls):
nodes = np.concatenate((start, controls))
nodes = nodes.reshape(len(nodes)//2,2).transpose()
#print(nodes)
curve = bezier.Curve.from_nodes(nodes)
return curve
def getCentroid(vertices):
#print(vertices)
if len(vertices):
v = np.array(vertices)
return np.mean(v, axis = 0)
def processBlock(lines):
#print(list(line.rstrip() for line in lines))
path_is_open = False
start_xy = []
current = []
next_xy = []
controls = []
vertices = []
line_collection = []
draw_filled_area = True
for line in lines:
s = line.split()
if not len(s):
continue
#print(s[-1])
op = s[-1]
if op == "m":
path_is_open = True
if (len(s) > 3):
s = s[len(s)-3:]
start_xy = current = np.array(s[:-1], dtype = float)
vertices.append(current)
print("[PATH] start point:", start_xy)
elif op == "c":
if path_is_open:
#print("append bezier curve")
controls = np.array(s[:-1], dtype = float)
print("[PATH] bezier curve, control points:", controls)
curve = appendCurve(current, controls)
line_collection.append(curve)
current = controls[-2:]
vertices.append(current)
else:
print("[PATH] current path is not open to append bezier curve")
elif op == "l":
if path_is_open:
print("[PATH] append line segment")
next_xy = np.array(s[:-1], dtype = float)
curve = appendCurve(current, next_xy)
line_collection.append(curve)
current = next_xy
vertices.append(current)
else:
print("[PATH] current path is not open to append line segment")
elif op == "f*":
print("[PATH] fill region")
path_is_open = False
if not draw_filled_area:
del line_collection[:]
break
elif op == "S":
print("[PATH] stroke region")
path_is_open = False
break
elif op == "n":
print("[PATH] end path without fill or stroke")
path_is_open = False
del line_collection[:]
break
elif op == "h":
print("[PATH] close subpath")
if path_is_open:
if (current - start_xy).any():
print("[PATH] append line segment to close subpath")
line = appendCurve(current, start_xy)
line_collection.append(line)
current = start_xy
vertices.append(current)
path_is_open = False
else:
print("[PATH] current path is not open to close path")
else:
if op in PDF_GS_OPS.keys():
print("[PATH] got operator: " + op + " = " + PDF_GS_OPS[op])
else:
print("[PATH] unknown operator: " + op)
centroid = getCentroid(vertices)
return {'contour': line_collection, 'centroid': centroid}
def processColour(line):
col = None
s = line.split()
if not len(s):
return
op = s[-1]
if op.lower() == "rg":
print("[COLOUR] got set RGB colour operator", op)
print(s)
if (len(s) > 4):
s = s[len(s)-4:]
otype = 'stroke' if op == "RG" else 'fill'
col = {'type': otype,
'col_spec': 'rgb',
'val': np.array(s[:-1], dtype = float) }
elif op.lower() == "g":
print("[PATH] got set gray operator", op)
print(s)
if (len(s) > 2):
s = s[len(s)-2:]
otype = 'stroke' if op == "G" else 'fill'
col = { 'type': otype,
'col_spec': 'gs',
'val': np.array(s[:-1], dtype = float) }
return col
def processImage(lines):
#print(list(line.rstrip() for line in lines))
img_collection = []
rect = []
ctm = []
name = ""
for line in lines:
s = line.split()
if not len(s):
continue
#print(s[-1])
op = s[-1]
if op == "q":
print("[IMG] start image")
elif op == "re":
rect = np.array(s[:-1], dtype = float)
elif op == "cm":
try:
ctm = np.array(s[-7:-1], dtype = float)
print("[IMG] ctm:", ctm)
except ValueError as e:
print("Error setting CTM from", s, ": ", e)
elif op == "Q":
if s[-2] == "Do":
name = s[-3]
img_collection.append({'name': name, 'clip': rect, 'ctm': ctm})
elif op == "n":
print("[IMG] end path")
else:
if op in PDF_GS_OPS.keys():
print("[IMG] got operator: " + op + " = " + PDF_GS_OPS[op])
else:
print("[IMG] unknown operator: " + op)
return img_collection
def createPlot(images, contours):
fig, ax = plt.subplots() # Create a figure containing a single axis
n_col = len(plt.rcParams['axes.prop_cycle'])
for i in range(len(contours)):
for curve in contours[i]['contour']:
_ = curve.plot(num_pts = 256, color = "C" + str(i%n_col), ax = ax)
# plot centroid i
cx, cy = contours[i]['centroid']
plt.plot(cx, cy, "o")
for i in range(len(images)):
for img in images[i]:
print("Image:", img['name'])
## x y w h re
# xy = img[1][:2]
# wh = img[1][2:]
# w, h = img[1][2:]
# print(xy)
# print(wh)
## w 0 0 h x y cm
ctm = img['ctm'].reshape(2,3)
scale = [img['ctm'][0], img['ctm'][3]]
position = img['ctm'][4:]
w, h = scale
xy = position
print("Position:", xy)
print("Size:", w, "x", h)
pos_check = xy[1] + h < 450
size_check = w > 120
if pos_check & size_check:
rect = mpatches.Rectangle(tuple(xy), w, h,
fc="none", ec="green")
ax.add_patch(rect)
arr_img = plt.imread(MAP_IMG, format='png')
ax.imshow(arr_img, interpolation='none',
origin='lower',
extent=[xy[0], xy[0]+w, xy[1]+h, xy[1]],
clip_on=True)
_ = ax.set_xlim(0, 842)
_ = ax.set_ylim(0, 595)
#_ = ax.set_xlim(150, 650)
#_ = ax.set_ylim(200, 400)
_ = ax.set_aspect(1)
plt.show()
def getMapGroups(images, graphics):
map_groups = []
for i in range(len(images)):
for img in images[i]:
#print("Image:", img['name'])
# w 0 0 h x y cm
ctm = img['ctm'].reshape(2,3)
w, h = [img['ctm'][0], img['ctm'][3]]
x, y = img['ctm'][4:]
#print("Position: ", x, ",", y)
#print("Size: ", w, "x", h)
# Identify map images by location/size
pos_check = y + h < 450
size_check = w > 120
if pos_check & size_check:
# Get graphics within map boundary
graphics_dict = {}
for gfx in graphics:
ix, iy = gfx['path']['centroid']
#print("Centroid: ", ix, ",", iy)
x_check = (x < ix) & (ix < x + w)
y_check = (y < iy) & (iy < y + h)
if x_check & y_check:
print("Centroid: ", ix, ",", iy)
if (ix, iy) not in graphics_dict.keys():
graphics_dict[(ix, iy)] = [ gfx ]
else:
# Check whether colour and contour are the
# same as previously stored graphics
found_match = False
nodes = np.hstack([np.hstack(c.nodes) for c in gfx['path']['contour']])
for g in graphics_dict[(ix, iy)]:
n = np.hstack([np.hstack(c.nodes) for c in g['path']['contour']])
if (nodes == n).all():
# nodes match, what about colours?
col = gfx['colour']
c = g['colour']
if col['col_spec'] == c['col_spec'] and np.array_equal(getColourValue(col), getColourValue(c)):
found_match = True
break
if not found_match:
graphics_dict[(ix, iy)].append(gfx)
print("Graphics with distinct centroids:", len(graphics_dict))
map_groups.append((img, graphics_dict))
def getXPos(mg):
return mg[0]['ctm'][4]
map_groups.sort(key = getXPos)
return map_groups
def getColourValue(col):
if col is not None:
return tuple(col['val'])
def transformMapGroup(map_group):
img, graphics_dict = map_group
print("Image:", img['name'])
# Construct current transformation matrix for image
# a b 0
# c d 0
# e f 1
m1 = np.hstack((img['ctm'].reshape(3,2), np.array([[0],[0],[1]])))
try:
m1_inv = LA.inv(m1)
except LinAlgError:
sys.exit("Could not invert transformation matrix")
# Create transformation matrix to map from canonical image coords
# to extent of original map image matched to PlateCarree projection
lon_min, lon_max, lat_min, lat_max = extent_MAP_IMG
tm = np.array([lon_max - lon_min, 0, 0, lat_max - lat_min, lon_min, lat_min])
m2 = np.hstack((tm.reshape(3,2), np.array([[0],[0],[1]])))
# Pre-multiply transformation matrices
m = np.matmul(m1_inv, m2)
graphics_list = []
print("Processing graphics:", len(graphics_dict))
for z, graphics in graphics_dict.items():
print("Got", len(graphics), "graphics objects with centroid:", z)
stroke_col = None
fill_col = None
#breakpoint()
for i in range(len(graphics)):
col = graphics[i]['colour']
if col is not None:
print("got colour state:", col)
# Get stroke colour specification
if col['type'] == "stroke":
stroke_col = getColourValue(col)
# Get fill colour specification
if col['type'] == "fill":
fill_col = getColourValue(col)
contour = []
for curve in graphics[i]['path']['contour']:
## Relocate curve according to new coordinate system
nodes = curve.nodes
nodes_new = []
for j in range(len(nodes.T)):
# Multiply node by combined transformation matrix m to
# get coordinates with respect to image space and
# transform from canonical image coords to PlateCarree
# map projection
v = np.matmul(np.append(nodes.T[j], 1), m)
nodes_new.append(v[:-1])
nodes = np.array(nodes_new).T
curve_new = bezier.Curve.from_nodes(nodes)
contour.append(curve_new)
# Relocate centroid i
centroid = np.matmul(np.append(graphics[i]['path']['centroid'], 1), m)[:-1]
path = { 'colour': { 'stroke': stroke_col,
'fill': fill_col },
'contour': contour,
'centroid': centroid }
graphics_list.append({ 'path': path})
return (img, graphics_list)
## end of transformMapGroup()
def plotMapGroup(map_group, ax):
_, graphics = map_group
n_col = len(plt.rcParams['axes.prop_cycle'])
print("Processing graphics:", len(graphics))
for i in range(len(graphics)):
col = "C" + str(i%n_col)
if graphics[i]['path']['colour'] is not None:
col = graphics[i]['path']['colour']
for curve in graphics[i]['path']['contour']:
_ = curve.plot(num_pts = 256, color = col, ax = ax)
# plot centroid i
cx, cy = graphics[i]['path']['centroid']
ax.plot(cx, cy, "o")
## end of plotMapGroup()
def getAlertMasks(map_group):
_, graphics = map_group
# mask will have shape defined by the image map extent divided
# into 0.1 degree grid
res = 0.1
# lon_min, lon_max, lat_min, lat_max = [round(x, 1) for x in extent_MAP_IMG]
# nx, ny = np.array([lon_max - lon_min, lat_max - lat_min])/res
# img_shape = (round(nx), round(ny), 3)
lon_min, lon_max, lat_min, lat_max = extent_MAP_IMG
x = np.arange(lon_min, lon_max, res) # [round(x,1) for x in x]
y = np.arange(lat_min, lat_max, res) # [round(y,1) for y in y]
xx, yy = np.meshgrid(x, y)
xy = np.vstack((xx.ravel(), yy.ravel())).T
# Create transformation matrix
tm = np.array([res, 0, 0, res, lon_min, lat_min])
m = np.hstack((tm.reshape(3,2), np.array([[0],[0],[1]])))
try:
m_inv = LA.inv(m)
except LinAlgError:
sys.exit("Could not invert transformation matrix")
mask_list = []
for i in range(len(graphics)):
col = graphics[i]['path']['colour']
print(col)
if col['stroke'] is not None and col['stroke'].count(col['stroke'][0]) != 3:
# got a contour with RGB colour
alert_val = 0
r, g, b = col
if col == (0.0, 0.0, 0.0):
print("colour: black")
elif col == (1.0, 1.0, 0.0):
print("colour: yellow")
alert_val = 1
elif g > 0.33 and g < 0.66:
# (0.89, 0.424, 0.0392)
# (0.969, 0.588, 0.275)
print("colour: orange")
alert_val = 2
elif g < 0.33:
print("colour: red")
alert_val = 3
else:
print("colour: other")
#img = np.zeros(img_shape, dtype = np.double)
img2 = np.array([alert_val]*xx.size).reshape(xx.shape)
img = np.zeros(xx.shape, dtype = np.double)
# nodes_new = []
# for curve in graphics[i]['path']['contour']:
# # transform curve to grid coords
# nodes = curve.nodes
# for i in range(len(nodes.T)):
# # Multiply node by transformation matrix m to
# # get grid coordinates
# v = np.matmul(np.append(nodes.T[i], 1), m_inv)
# nodes_new.append(v[:-1])
# nodes = np.array([node.round() for node in nodes_new])
# mask = skimage.draw.polygon2mask(img_shape[:-1], nodes)
# img[mask] = col
#mask_list.append(img)
## alternative approach
vv = np.vstack([curve.nodes.T for curve in graphics[i]['path']['contour']])
# construct a Path from the vertices
pth = Path(vv, closed=False)
# test which pixels fall within the path
mask = pth.contains_points(xy)
# reshape to the same size as the grid
mask = mask.reshape(xx.shape)
# create a masked array
masked = np.ma.masked_array(img2, ~mask)
# or simply set values for masked pixels
img[mask] = alert_val
# combine with coords...
am = np.stack((xx, yy, img))
mask_list.append(am)
return mask_list
## end
def createGriddedData(map_groups, alert_data, file_path=None):
# container for gridded data layers
vars = {}
# data will have shape defined by the image map extent divided
# into 0.1 degree grid
res = 0.1
lon_min, lon_max, lat_min, lat_max = extent_MAP_IMG
x = np.arange(lon_min, lon_max, res) # [round(x,1) for x in x]
y = np.arange(lat_min, lat_max, res) # [round(y,1) for y in y]
xx, yy = np.meshgrid(x, y)
xy = np.vstack((xx.ravel(), yy.ravel())).T
# Create transformation matrix
tm = np.array([res, 0, 0, res, lon_min, lat_min])
m = np.hstack((tm.reshape(3,2), np.array([[0],[0],[1]])))
try:
m_inv = LA.inv(m)
except LinAlgError:
sys.exit("Could not invert transformation matrix")
for i, mg in enumerate(map_groups):
_, graphics = mg
print(i)
# count arrays added for this group
n = 0
for j, gfx in enumerate(graphics):
colour = gfx['path']['colour']
print(colour)
col = None
if colour['stroke'] is not None and len(colour['stroke']) == 3:
col = colour['stroke']
elif colour['fill'] is not None and len(colour['fill']) == 3:
col = colour['fill']
if col is not None:
# got a contour with associated RGB colour
print(col)
alert_val = 0
r, g, b = col
if col == (0.0, 0.0, 0.0):
print("colour: black")
elif col == (1.0, 1.0, 0.0):
print("colour: yellow")
alert_val = 1
elif col == (1.0, 0.0, 0.0):
print("colour: red")
alert_val = 3
elif g > 0.25 and g < 0.66:
# (0.89, 0.424, 0.0392)
# (0.969, 0.588, 0.275)
# (0.596, 0.282, 0.0275)
print("colour: orange")
alert_val = 2
elif r > 0.9 and g < 0.25:
print("colour: red")
alert_val = 3
else:
print("colour: other")
img = np.zeros(xx.shape, dtype = np.double)
# get nodes for the alert area
vv = np.vstack([curve.nodes.T for curve in gfx['path']['contour']])
# construct a Path from the vertices
pth = Path(vv, closed=False)
# test which pixels fall within the path
mask = pth.contains_points(xy)
# reshape to the same size as the grid
mask = mask.reshape(xx.shape)
# set values for masked pixels
img[mask] = alert_val
da = xr.DataArray(data=img, dims=["lat", "lon"], coords=[y, x])
da.attrs = {
'issue_date': alert_data.loc[i,'issue_date'],
'alert_date': alert_data.loc[i,'date'],
'alert_day': alert_data.loc[i,'day'],
'alert_weekday': alert_data.loc[i,'weekday'],
'alert_id': n+1,
'alert_type': '',
'alert_text': alert_data.loc[i,'alert_text'],
}
var_name = '_'.join(['alert', 'day'+str(da.attrs['alert_day']),
str(da.attrs['alert_id'])])
vars[var_name] = da
n += 1
# combine data arrays into data set
issue_date = alert_data.loc[0, 'issue_date']
ds = xr.Dataset(data_vars=vars,
attrs={
'title': 'TMA weather warnings for ' + issue_date,
'issue_date': issue_date,
})
if file_path is None:
file_path = 'TMA_weather_warning_'+issue_date+'.nc'
ds.to_netcdf(file_path)
## end
# Main
def main():
parser = argparse.ArgumentParser(description='Process TMA PDF contents')
parser.add_argument('filepath', nargs=1, type=str)
parser.add_argument('metadata', nargs=1, type=str)
args = parser.parse_args()
try:
# read lines from input file
lines = readFile(args.filepath[0])
except:
# IOError
print("Input file not found:", args.filepath[0])
sys.exit(4)
images, graphics = extractGraphics(lines)
mgroups = getMapGroups(images, graphics)
mgroups = [transformMapGroup(mg) for mg in mgroups]
try:
# Get associated data - one row per forecast date
alert_data = pd.read_csv(args.metadata[0])
except FileNotFoundError:
print("Couldn't read metadata file:", args.metadata[0])
else:
file_name = os.path.basename(args.metadata[0]).split(".")[0] + ".nc"
createGriddedData(mgroups, alert_data, file_name)
## end of main()
# Run main
if __name__ == "__main__":
main()
| 35.054993
| 131
| 0.509103
|
import sys
import argparse
import os
import numpy as np
import numpy.linalg as LA
import bezier
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
from matplotlib.path import Path
import cartopy.crs as ccrs
import cartopy.feature as cfeature
import cartopy.io.shapereader as shpreader
import skimage.draw
import xarray as xr
import pandas as pd
PDF_GS_OPS = {
'g': 'setgray (nonstroke)',
'G': 'setgray (stroke)',
'gs': 'setgraphicsstate',
'j': 'setlinejoin',
'M': 'setmiterlimit',
'rg': 'setrgbcolor (nonstroke)',
'RG': 'setrgbcolor (stroke)',
'q': 'gsave',
'Q': 'grestore',
'w': 'setlinewidth',
'W': 'clip',
'W*': 'eoclip',
}
MAP_IMG = "../resources/TZA_map.png"
extent_MAP_IMG = [28.405, 41.475, -12., -0.745]
def readFile(fp):
with open(fp) as f:
lines = [line.rstrip() for line in f]
return lines
def extractGraphics(lines):
path_ops = {'m', 'c', 'l'}
term_ops = {'f*', 'S', 'n'}
col_ops = {'rg', 'RG', 'g', 'G'}
block = []
graphics = []
images = []
col = None
for line in lines:
if line.endswith(tuple(path_ops)):
block.append(line)
elif line.endswith(tuple(term_ops)):
block.append(line)
path = processBlock(list(block))
if len(path['contour']):
graphics.append({'path': path, 'colour': col})
del block[:]
elif line.endswith(tuple(col_ops)):
block.append(line)
col = processColour(line)
elif "Do" in line:
block.append(line)
image = processImage(list(block))
if len(image):
images.append(image)
del block[:]
else:
block.append(line)
return [images, graphics]
def appendCurve(start, controls):
nodes = np.concatenate((start, controls))
nodes = nodes.reshape(len(nodes)//2,2).transpose()
curve = bezier.Curve.from_nodes(nodes)
return curve
def getCentroid(vertices):
if len(vertices):
v = np.array(vertices)
return np.mean(v, axis = 0)
def processBlock(lines):
path_is_open = False
start_xy = []
current = []
next_xy = []
controls = []
vertices = []
line_collection = []
draw_filled_area = True
for line in lines:
s = line.split()
if not len(s):
continue
op = s[-1]
if op == "m":
path_is_open = True
if (len(s) > 3):
s = s[len(s)-3:]
start_xy = current = np.array(s[:-1], dtype = float)
vertices.append(current)
print("[PATH] start point:", start_xy)
elif op == "c":
if path_is_open:
controls = np.array(s[:-1], dtype = float)
print("[PATH] bezier curve, control points:", controls)
curve = appendCurve(current, controls)
line_collection.append(curve)
current = controls[-2:]
vertices.append(current)
else:
print("[PATH] current path is not open to append bezier curve")
elif op == "l":
if path_is_open:
print("[PATH] append line segment")
next_xy = np.array(s[:-1], dtype = float)
curve = appendCurve(current, next_xy)
line_collection.append(curve)
current = next_xy
vertices.append(current)
else:
print("[PATH] current path is not open to append line segment")
elif op == "f*":
print("[PATH] fill region")
path_is_open = False
if not draw_filled_area:
del line_collection[:]
break
elif op == "S":
print("[PATH] stroke region")
path_is_open = False
break
elif op == "n":
print("[PATH] end path without fill or stroke")
path_is_open = False
del line_collection[:]
break
elif op == "h":
print("[PATH] close subpath")
if path_is_open:
if (current - start_xy).any():
print("[PATH] append line segment to close subpath")
line = appendCurve(current, start_xy)
line_collection.append(line)
current = start_xy
vertices.append(current)
path_is_open = False
else:
print("[PATH] current path is not open to close path")
else:
if op in PDF_GS_OPS.keys():
print("[PATH] got operator: " + op + " = " + PDF_GS_OPS[op])
else:
print("[PATH] unknown operator: " + op)
centroid = getCentroid(vertices)
return {'contour': line_collection, 'centroid': centroid}
def processColour(line):
col = None
s = line.split()
if not len(s):
return
op = s[-1]
if op.lower() == "rg":
print("[COLOUR] got set RGB colour operator", op)
print(s)
if (len(s) > 4):
s = s[len(s)-4:]
otype = 'stroke' if op == "RG" else 'fill'
col = {'type': otype,
'col_spec': 'rgb',
'val': np.array(s[:-1], dtype = float) }
elif op.lower() == "g":
print("[PATH] got set gray operator", op)
print(s)
if (len(s) > 2):
s = s[len(s)-2:]
otype = 'stroke' if op == "G" else 'fill'
col = { 'type': otype,
'col_spec': 'gs',
'val': np.array(s[:-1], dtype = float) }
return col
def processImage(lines):
img_collection = []
rect = []
ctm = []
name = ""
for line in lines:
s = line.split()
if not len(s):
continue
op = s[-1]
if op == "q":
print("[IMG] start image")
elif op == "re":
rect = np.array(s[:-1], dtype = float)
elif op == "cm":
try:
ctm = np.array(s[-7:-1], dtype = float)
print("[IMG] ctm:", ctm)
except ValueError as e:
print("Error setting CTM from", s, ": ", e)
elif op == "Q":
if s[-2] == "Do":
name = s[-3]
img_collection.append({'name': name, 'clip': rect, 'ctm': ctm})
elif op == "n":
print("[IMG] end path")
else:
if op in PDF_GS_OPS.keys():
print("[IMG] got operator: " + op + " = " + PDF_GS_OPS[op])
else:
print("[IMG] unknown operator: " + op)
return img_collection
def createPlot(images, contours):
fig, ax = plt.subplots()
n_col = len(plt.rcParams['axes.prop_cycle'])
for i in range(len(contours)):
for curve in contours[i]['contour']:
_ = curve.plot(num_pts = 256, color = "C" + str(i%n_col), ax = ax)
cx, cy = contours[i]['centroid']
plt.plot(cx, cy, "o")
for i in range(len(images)):
for img in images[i]:
print("Image:", img['name'])
= img['ctm'].reshape(2,3)
scale = [img['ctm'][0], img['ctm'][3]]
position = img['ctm'][4:]
w, h = scale
xy = position
print("Position:", xy)
print("Size:", w, "x", h)
pos_check = xy[1] + h < 450
size_check = w > 120
if pos_check & size_check:
rect = mpatches.Rectangle(tuple(xy), w, h,
fc="none", ec="green")
ax.add_patch(rect)
arr_img = plt.imread(MAP_IMG, format='png')
ax.imshow(arr_img, interpolation='none',
origin='lower',
extent=[xy[0], xy[0]+w, xy[1]+h, xy[1]],
clip_on=True)
_ = ax.set_xlim(0, 842)
_ = ax.set_ylim(0, 595)
_ = ax.set_aspect(1)
plt.show()
def getMapGroups(images, graphics):
map_groups = []
for i in range(len(images)):
for img in images[i]:
ctm = img['ctm'].reshape(2,3)
w, h = [img['ctm'][0], img['ctm'][3]]
x, y = img['ctm'][4:]
pos_check = y + h < 450
size_check = w > 120
if pos_check & size_check:
graphics_dict = {}
for gfx in graphics:
ix, iy = gfx['path']['centroid']
x_check = (x < ix) & (ix < x + w)
y_check = (y < iy) & (iy < y + h)
if x_check & y_check:
print("Centroid: ", ix, ",", iy)
if (ix, iy) not in graphics_dict.keys():
graphics_dict[(ix, iy)] = [ gfx ]
else:
found_match = False
nodes = np.hstack([np.hstack(c.nodes) for c in gfx['path']['contour']])
for g in graphics_dict[(ix, iy)]:
n = np.hstack([np.hstack(c.nodes) for c in g['path']['contour']])
if (nodes == n).all():
col = gfx['colour']
c = g['colour']
if col['col_spec'] == c['col_spec'] and np.array_equal(getColourValue(col), getColourValue(c)):
found_match = True
break
if not found_match:
graphics_dict[(ix, iy)].append(gfx)
print("Graphics with distinct centroids:", len(graphics_dict))
map_groups.append((img, graphics_dict))
def getXPos(mg):
return mg[0]['ctm'][4]
map_groups.sort(key = getXPos)
return map_groups
def getColourValue(col):
if col is not None:
return tuple(col['val'])
def transformMapGroup(map_group):
img, graphics_dict = map_group
print("Image:", img['name'])
m1 = np.hstack((img['ctm'].reshape(3,2), np.array([[0],[0],[1]])))
try:
m1_inv = LA.inv(m1)
except LinAlgError:
sys.exit("Could not invert transformation matrix")
lon_min, lon_max, lat_min, lat_max = extent_MAP_IMG
tm = np.array([lon_max - lon_min, 0, 0, lat_max - lat_min, lon_min, lat_min])
m2 = np.hstack((tm.reshape(3,2), np.array([[0],[0],[1]])))
m = np.matmul(m1_inv, m2)
graphics_list = []
print("Processing graphics:", len(graphics_dict))
for z, graphics in graphics_dict.items():
print("Got", len(graphics), "graphics objects with centroid:", z)
stroke_col = None
fill_col = None
for i in range(len(graphics)):
col = graphics[i]['colour']
if col is not None:
print("got colour state:", col)
if col['type'] == "stroke":
stroke_col = getColourValue(col)
if col['type'] == "fill":
fill_col = getColourValue(col)
contour = []
for curve in graphics[i]['path']['contour']:
nodes_new = []
for j in range(len(nodes.T)):
v = np.matmul(np.append(nodes.T[j], 1), m)
nodes_new.append(v[:-1])
nodes = np.array(nodes_new).T
curve_new = bezier.Curve.from_nodes(nodes)
contour.append(curve_new)
centroid = np.matmul(np.append(graphics[i]['path']['centroid'], 1), m)[:-1]
path = { 'colour': { 'stroke': stroke_col,
'fill': fill_col },
'contour': contour,
'centroid': centroid }
graphics_list.append({ 'path': path})
return (img, graphics_list)
, ax):
_, graphics = map_group
n_col = len(plt.rcParams['axes.prop_cycle'])
print("Processing graphics:", len(graphics))
for i in range(len(graphics)):
col = "C" + str(i%n_col)
if graphics[i]['path']['colour'] is not None:
col = graphics[i]['path']['colour']
for curve in graphics[i]['path']['contour']:
_ = curve.plot(num_pts = 256, color = col, ax = ax)
cx, cy = graphics[i]['path']['centroid']
ax.plot(cx, cy, "o")
_group):
_, graphics = map_group
res = 0.1
lon_min, lon_max, lat_min, lat_max = extent_MAP_IMG
x = np.arange(lon_min, lon_max, res)
y = np.arange(lat_min, lat_max, res)
xx, yy = np.meshgrid(x, y)
xy = np.vstack((xx.ravel(), yy.ravel())).T
tm = np.array([res, 0, 0, res, lon_min, lat_min])
m = np.hstack((tm.reshape(3,2), np.array([[0],[0],[1]])))
try:
m_inv = LA.inv(m)
except LinAlgError:
sys.exit("Could not invert transformation matrix")
mask_list = []
for i in range(len(graphics)):
col = graphics[i]['path']['colour']
print(col)
if col['stroke'] is not None and col['stroke'].count(col['stroke'][0]) != 3:
alert_val = 0
r, g, b = col
if col == (0.0, 0.0, 0.0):
print("colour: black")
elif col == (1.0, 1.0, 0.0):
print("colour: yellow")
alert_val = 1
elif g > 0.33 and g < 0.66:
print("colour: orange")
alert_val = 2
elif g < 0.33:
print("colour: red")
alert_val = 3
else:
print("colour: other")
img2 = np.array([alert_val]*xx.size).reshape(xx.shape)
img = np.zeros(xx.shape, dtype = np.double)
stack([curve.nodes.T for curve in graphics[i]['path']['contour']])
pth = Path(vv, closed=False)
mask = pth.contains_points(xy)
mask = mask.reshape(xx.shape)
masked = np.ma.masked_array(img2, ~mask)
img[mask] = alert_val
am = np.stack((xx, yy, img))
mask_list.append(am)
return mask_list
createGriddedData(map_groups, alert_data, file_path=None):
vars = {}
res = 0.1
lon_min, lon_max, lat_min, lat_max = extent_MAP_IMG
x = np.arange(lon_min, lon_max, res)
y = np.arange(lat_min, lat_max, res)
xx, yy = np.meshgrid(x, y)
xy = np.vstack((xx.ravel(), yy.ravel())).T
tm = np.array([res, 0, 0, res, lon_min, lat_min])
m = np.hstack((tm.reshape(3,2), np.array([[0],[0],[1]])))
try:
m_inv = LA.inv(m)
except LinAlgError:
sys.exit("Could not invert transformation matrix")
for i, mg in enumerate(map_groups):
_, graphics = mg
print(i)
n = 0
for j, gfx in enumerate(graphics):
colour = gfx['path']['colour']
print(colour)
col = None
if colour['stroke'] is not None and len(colour['stroke']) == 3:
col = colour['stroke']
elif colour['fill'] is not None and len(colour['fill']) == 3:
col = colour['fill']
if col is not None:
print(col)
alert_val = 0
r, g, b = col
if col == (0.0, 0.0, 0.0):
print("colour: black")
elif col == (1.0, 1.0, 0.0):
print("colour: yellow")
alert_val = 1
elif col == (1.0, 0.0, 0.0):
print("colour: red")
alert_val = 3
elif g > 0.25 and g < 0.66:
print("colour: orange")
alert_val = 2
elif r > 0.9 and g < 0.25:
print("colour: red")
alert_val = 3
else:
print("colour: other")
img = np.zeros(xx.shape, dtype = np.double)
vv = np.vstack([curve.nodes.T for curve in gfx['path']['contour']])
pth = Path(vv, closed=False)
mask = pth.contains_points(xy)
mask = mask.reshape(xx.shape)
img[mask] = alert_val
da = xr.DataArray(data=img, dims=["lat", "lon"], coords=[y, x])
da.attrs = {
'issue_date': alert_data.loc[i,'issue_date'],
'alert_date': alert_data.loc[i,'date'],
'alert_day': alert_data.loc[i,'day'],
'alert_weekday': alert_data.loc[i,'weekday'],
'alert_id': n+1,
'alert_type': '',
'alert_text': alert_data.loc[i,'alert_text'],
}
var_name = '_'.join(['alert', 'day'+str(da.attrs['alert_day']),
str(da.attrs['alert_id'])])
vars[var_name] = da
n += 1
issue_date = alert_data.loc[0, 'issue_date']
ds = xr.Dataset(data_vars=vars,
attrs={
'title': 'TMA weather warnings for ' + issue_date,
'issue_date': issue_date,
})
if file_path is None:
file_path = 'TMA_weather_warning_'+issue_date+'.nc'
ds.to_netcdf(file_path)
f main():
parser = argparse.ArgumentParser(description='Process TMA PDF contents')
parser.add_argument('filepath', nargs=1, type=str)
parser.add_argument('metadata', nargs=1, type=str)
args = parser.parse_args()
try:
lines = readFile(args.filepath[0])
except:
print("Input file not found:", args.filepath[0])
sys.exit(4)
images, graphics = extractGraphics(lines)
mgroups = getMapGroups(images, graphics)
mgroups = [transformMapGroup(mg) for mg in mgroups]
try:
alert_data = pd.read_csv(args.metadata[0])
except FileNotFoundError:
print("Couldn't read metadata file:", args.metadata[0])
else:
file_name = os.path.basename(args.metadata[0]).split(".")[0] + ".nc"
createGriddedData(mgroups, alert_data, file_name)
## end of main()
# Run main
if __name__ == "__main__":
main()
| true
| true
|
1c43c028cb0bd6fa2468ee77c1143f9fda2d934e
| 66,798
|
py
|
Python
|
nipyapi/registry/apis/bundles_api.py
|
iMajna/nipyapi
|
5480af8fe8c6b470249837835cb1a067abb6678e
|
[
"Apache-2.0"
] | null | null | null |
nipyapi/registry/apis/bundles_api.py
|
iMajna/nipyapi
|
5480af8fe8c6b470249837835cb1a067abb6678e
|
[
"Apache-2.0"
] | 1
|
2020-03-16T10:02:46.000Z
|
2020-03-16T13:37:42.000Z
|
nipyapi/registry/apis/bundles_api.py
|
iMajna/nipyapi
|
5480af8fe8c6b470249837835cb1a067abb6678e
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
Apache NiFi Registry REST API
The REST API provides an interface to a registry with operations for saving, versioning, reading NiFi flows and components.
OpenAPI spec version: 0.7.0
Contact: dev@nifi.apache.org
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import sys
import os
import re
# python 2 and python 3 compatibility library
from six import iteritems
from ..configuration import Configuration
from ..api_client import ApiClient
class BundlesApi(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
config = Configuration()
if api_client:
self.api_client = api_client
else:
if not config.api_client:
config.api_client = ApiClient()
self.api_client = config.api_client
def get_bundle_version_extension_additional_details_docs(self, bundle_id, version, name, **kwargs):
"""
Get bundle version extension docs details
Gets the additional details documentation for the given extension in the given extension bundle version. NOTE: This endpoint is subject to change as NiFi Registry and its REST API evolve.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_bundle_version_extension_additional_details_docs(bundle_id, version, name, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str bundle_id: The extension bundle identifier (required)
:param str version: The version of the bundle (required)
:param str name: The fully qualified name of the extension (required)
:return: str
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_bundle_version_extension_additional_details_docs_with_http_info(bundle_id, version, name, **kwargs)
else:
(data) = self.get_bundle_version_extension_additional_details_docs_with_http_info(bundle_id, version, name, **kwargs)
return data
def get_bundle_version_extension_additional_details_docs_with_http_info(self, bundle_id, version, name, **kwargs):
"""
Get bundle version extension docs details
Gets the additional details documentation for the given extension in the given extension bundle version. NOTE: This endpoint is subject to change as NiFi Registry and its REST API evolve.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_bundle_version_extension_additional_details_docs_with_http_info(bundle_id, version, name, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str bundle_id: The extension bundle identifier (required)
:param str version: The version of the bundle (required)
:param str name: The fully qualified name of the extension (required)
:return: str
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['bundle_id', 'version', 'name']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_bundle_version_extension_additional_details_docs" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'bundle_id' is set
if ('bundle_id' not in params) or (params['bundle_id'] is None):
raise ValueError("Missing the required parameter `bundle_id` when calling `get_bundle_version_extension_additional_details_docs`")
# verify the required parameter 'version' is set
if ('version' not in params) or (params['version'] is None):
raise ValueError("Missing the required parameter `version` when calling `get_bundle_version_extension_additional_details_docs`")
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `get_bundle_version_extension_additional_details_docs`")
collection_formats = {}
path_params = {}
if 'bundle_id' in params:
path_params['bundleId'] = params['bundle_id']
if 'version' in params:
path_params['version'] = params['version']
if 'name' in params:
path_params['name'] = params['name']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['text/html'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['tokenAuth', 'Authorization']
return self.api_client.call_api('/bundles/{bundleId}/versions/{version}/extensions/{name}/docs/additional-details', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='str',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_bundle_version_extension_docs(self, bundle_id, version, name, **kwargs):
"""
Get bundle version extension docs
Gets the documentation for the given extension in the given extension bundle version. NOTE: This endpoint is subject to change as NiFi Registry and its REST API evolve.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_bundle_version_extension_docs(bundle_id, version, name, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str bundle_id: The extension bundle identifier (required)
:param str version: The version of the bundle (required)
:param str name: The fully qualified name of the extension (required)
:return: str
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_bundle_version_extension_docs_with_http_info(bundle_id, version, name, **kwargs)
else:
(data) = self.get_bundle_version_extension_docs_with_http_info(bundle_id, version, name, **kwargs)
return data
def get_bundle_version_extension_docs_with_http_info(self, bundle_id, version, name, **kwargs):
"""
Get bundle version extension docs
Gets the documentation for the given extension in the given extension bundle version. NOTE: This endpoint is subject to change as NiFi Registry and its REST API evolve.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_bundle_version_extension_docs_with_http_info(bundle_id, version, name, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str bundle_id: The extension bundle identifier (required)
:param str version: The version of the bundle (required)
:param str name: The fully qualified name of the extension (required)
:return: str
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['bundle_id', 'version', 'name']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_bundle_version_extension_docs" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'bundle_id' is set
if ('bundle_id' not in params) or (params['bundle_id'] is None):
raise ValueError("Missing the required parameter `bundle_id` when calling `get_bundle_version_extension_docs`")
# verify the required parameter 'version' is set
if ('version' not in params) or (params['version'] is None):
raise ValueError("Missing the required parameter `version` when calling `get_bundle_version_extension_docs`")
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `get_bundle_version_extension_docs`")
collection_formats = {}
path_params = {}
if 'bundle_id' in params:
path_params['bundleId'] = params['bundle_id']
if 'version' in params:
path_params['version'] = params['version']
if 'name' in params:
path_params['name'] = params['name']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['text/html'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['tokenAuth', 'Authorization']
return self.api_client.call_api('/bundles/{bundleId}/versions/{version}/extensions/{name}/docs', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='str',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_bundle_versions(self, **kwargs):
"""
Get all bundle versions
Gets the metadata about extension bundle versions across all authorized buckets with optional filters applied. If the user is not authorized to any buckets, an empty list will be returned. NOTE: This endpoint is subject to change as NiFi Registry and its REST API evolve.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_bundle_versions(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str group_id: Optional groupId to filter results. The value may be an exact match, or a wildcard, such as 'com.%' to select all bundle versions where the groupId starts with 'com.'.
:param str artifact_id: Optional artifactId to filter results. The value may be an exact match, or a wildcard, such as 'nifi-%' to select all bundle versions where the artifactId starts with 'nifi-'.
:param str version: Optional version to filter results. The value maye be an exact match, or a wildcard, such as '1.0.%' to select all bundle versions where the version starts with '1.0.'.
:return: list[BundleVersionMetadata]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_bundle_versions_with_http_info(**kwargs)
else:
(data) = self.get_bundle_versions_with_http_info(**kwargs)
return data
def get_bundle_versions_with_http_info(self, **kwargs):
"""
Get all bundle versions
Gets the metadata about extension bundle versions across all authorized buckets with optional filters applied. If the user is not authorized to any buckets, an empty list will be returned. NOTE: This endpoint is subject to change as NiFi Registry and its REST API evolve.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_bundle_versions_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str group_id: Optional groupId to filter results. The value may be an exact match, or a wildcard, such as 'com.%' to select all bundle versions where the groupId starts with 'com.'.
:param str artifact_id: Optional artifactId to filter results. The value may be an exact match, or a wildcard, such as 'nifi-%' to select all bundle versions where the artifactId starts with 'nifi-'.
:param str version: Optional version to filter results. The value maye be an exact match, or a wildcard, such as '1.0.%' to select all bundle versions where the version starts with '1.0.'.
:return: list[BundleVersionMetadata]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['group_id', 'artifact_id', 'version']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_bundle_versions" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'group_id' in params:
query_params.append(('groupId', params['group_id']))
if 'artifact_id' in params:
query_params.append(('artifactId', params['artifact_id']))
if 'version' in params:
query_params.append(('version', params['version']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['tokenAuth', 'Authorization']
return self.api_client.call_api('/bundles/versions', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[BundleVersionMetadata]',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_bundles(self, **kwargs):
"""
Get all bundles
Gets the metadata for all bundles across all authorized buckets with optional filters applied. The returned results will include only items from buckets for which the user is authorized. If the user is not authorized to any buckets, an empty list will be returned. NOTE: This endpoint is subject to change as NiFi Registry and its REST API evolve.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_bundles(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str bucket_name: Optional bucket name to filter results. The value may be an exact match, or a wildcard, such as 'My Bucket%' to select all bundles where the bucket name starts with 'My Bucket'.
:param str group_id: Optional groupId to filter results. The value may be an exact match, or a wildcard, such as 'com.%' to select all bundles where the groupId starts with 'com.'.
:param str artifact_id: Optional artifactId to filter results. The value may be an exact match, or a wildcard, such as 'nifi-%' to select all bundles where the artifactId starts with 'nifi-'.
:return: list[ExtensionBundle]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_bundles_with_http_info(**kwargs)
else:
(data) = self.get_bundles_with_http_info(**kwargs)
return data
def get_bundles_with_http_info(self, **kwargs):
"""
Get all bundles
Gets the metadata for all bundles across all authorized buckets with optional filters applied. The returned results will include only items from buckets for which the user is authorized. If the user is not authorized to any buckets, an empty list will be returned. NOTE: This endpoint is subject to change as NiFi Registry and its REST API evolve.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_bundles_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str bucket_name: Optional bucket name to filter results. The value may be an exact match, or a wildcard, such as 'My Bucket%' to select all bundles where the bucket name starts with 'My Bucket'.
:param str group_id: Optional groupId to filter results. The value may be an exact match, or a wildcard, such as 'com.%' to select all bundles where the groupId starts with 'com.'.
:param str artifact_id: Optional artifactId to filter results. The value may be an exact match, or a wildcard, such as 'nifi-%' to select all bundles where the artifactId starts with 'nifi-'.
:return: list[ExtensionBundle]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['bucket_name', 'group_id', 'artifact_id']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_bundles" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'bucket_name' in params:
query_params.append(('bucketName', params['bucket_name']))
if 'group_id' in params:
query_params.append(('groupId', params['group_id']))
if 'artifact_id' in params:
query_params.append(('artifactId', params['artifact_id']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['tokenAuth', 'Authorization']
return self.api_client.call_api('/bundles', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[ExtensionBundle]',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def global_delete_bundle_version(self, bundle_id, version, **kwargs):
"""
Delete bundle version
Deletes the given extension bundle version and it's associated binary content. NOTE: This endpoint is subject to change as NiFi Registry and its REST API evolve.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.global_delete_bundle_version(bundle_id, version, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str bundle_id: The extension bundle identifier (required)
:param str version: The version of the bundle (required)
:return: BundleVersion
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.global_delete_bundle_version_with_http_info(bundle_id, version, **kwargs)
else:
(data) = self.global_delete_bundle_version_with_http_info(bundle_id, version, **kwargs)
return data
def global_delete_bundle_version_with_http_info(self, bundle_id, version, **kwargs):
"""
Delete bundle version
Deletes the given extension bundle version and it's associated binary content. NOTE: This endpoint is subject to change as NiFi Registry and its REST API evolve.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.global_delete_bundle_version_with_http_info(bundle_id, version, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str bundle_id: The extension bundle identifier (required)
:param str version: The version of the bundle (required)
:return: BundleVersion
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['bundle_id', 'version']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method global_delete_bundle_version" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'bundle_id' is set
if ('bundle_id' not in params) or (params['bundle_id'] is None):
raise ValueError("Missing the required parameter `bundle_id` when calling `global_delete_bundle_version`")
# verify the required parameter 'version' is set
if ('version' not in params) or (params['version'] is None):
raise ValueError("Missing the required parameter `version` when calling `global_delete_bundle_version`")
collection_formats = {}
path_params = {}
if 'bundle_id' in params:
path_params['bundleId'] = params['bundle_id']
if 'version' in params:
path_params['version'] = params['version']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['tokenAuth', 'Authorization']
return self.api_client.call_api('/bundles/{bundleId}/versions/{version}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='BundleVersion',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def global_delete_extension_bundle(self, bundle_id, **kwargs):
"""
Delete bundle
Deletes the given extension bundle and all of it's versions. NOTE: This endpoint is subject to change as NiFi Registry and its REST API evolve.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.global_delete_extension_bundle(bundle_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str bundle_id: The extension bundle identifier (required)
:return: ExtensionBundle
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.global_delete_extension_bundle_with_http_info(bundle_id, **kwargs)
else:
(data) = self.global_delete_extension_bundle_with_http_info(bundle_id, **kwargs)
return data
def global_delete_extension_bundle_with_http_info(self, bundle_id, **kwargs):
"""
Delete bundle
Deletes the given extension bundle and all of it's versions. NOTE: This endpoint is subject to change as NiFi Registry and its REST API evolve.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.global_delete_extension_bundle_with_http_info(bundle_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str bundle_id: The extension bundle identifier (required)
:return: ExtensionBundle
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['bundle_id']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method global_delete_extension_bundle" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'bundle_id' is set
if ('bundle_id' not in params) or (params['bundle_id'] is None):
raise ValueError("Missing the required parameter `bundle_id` when calling `global_delete_extension_bundle`")
collection_formats = {}
path_params = {}
if 'bundle_id' in params:
path_params['bundleId'] = params['bundle_id']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['tokenAuth', 'Authorization']
return self.api_client.call_api('/bundles/{bundleId}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ExtensionBundle',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def global_get_bundle_version(self, bundle_id, version, **kwargs):
"""
Get bundle version
Gets the descriptor for the given version of the given extension bundle. NOTE: This endpoint is subject to change as NiFi Registry and its REST API evolve.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.global_get_bundle_version(bundle_id, version, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str bundle_id: The extension bundle identifier (required)
:param str version: The version of the bundle (required)
:return: BundleVersion
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.global_get_bundle_version_with_http_info(bundle_id, version, **kwargs)
else:
(data) = self.global_get_bundle_version_with_http_info(bundle_id, version, **kwargs)
return data
def global_get_bundle_version_with_http_info(self, bundle_id, version, **kwargs):
"""
Get bundle version
Gets the descriptor for the given version of the given extension bundle. NOTE: This endpoint is subject to change as NiFi Registry and its REST API evolve.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.global_get_bundle_version_with_http_info(bundle_id, version, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str bundle_id: The extension bundle identifier (required)
:param str version: The version of the bundle (required)
:return: BundleVersion
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['bundle_id', 'version']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method global_get_bundle_version" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'bundle_id' is set
if ('bundle_id' not in params) or (params['bundle_id'] is None):
raise ValueError("Missing the required parameter `bundle_id` when calling `global_get_bundle_version`")
# verify the required parameter 'version' is set
if ('version' not in params) or (params['version'] is None):
raise ValueError("Missing the required parameter `version` when calling `global_get_bundle_version`")
collection_formats = {}
path_params = {}
if 'bundle_id' in params:
path_params['bundleId'] = params['bundle_id']
if 'version' in params:
path_params['version'] = params['version']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['tokenAuth', 'Authorization']
return self.api_client.call_api('/bundles/{bundleId}/versions/{version}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='BundleVersion',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def global_get_bundle_version_content(self, bundle_id, version, **kwargs):
"""
Get bundle version content
Gets the binary content for the given version of the given extension bundle. NOTE: This endpoint is subject to change as NiFi Registry and its REST API evolve.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.global_get_bundle_version_content(bundle_id, version, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str bundle_id: The extension bundle identifier (required)
:param str version: The version of the bundle (required)
:return: list[str]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.global_get_bundle_version_content_with_http_info(bundle_id, version, **kwargs)
else:
(data) = self.global_get_bundle_version_content_with_http_info(bundle_id, version, **kwargs)
return data
def global_get_bundle_version_content_with_http_info(self, bundle_id, version, **kwargs):
"""
Get bundle version content
Gets the binary content for the given version of the given extension bundle. NOTE: This endpoint is subject to change as NiFi Registry and its REST API evolve.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.global_get_bundle_version_content_with_http_info(bundle_id, version, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str bundle_id: The extension bundle identifier (required)
:param str version: The version of the bundle (required)
:return: list[str]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['bundle_id', 'version']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method global_get_bundle_version_content" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'bundle_id' is set
if ('bundle_id' not in params) or (params['bundle_id'] is None):
raise ValueError("Missing the required parameter `bundle_id` when calling `global_get_bundle_version_content`")
# verify the required parameter 'version' is set
if ('version' not in params) or (params['version'] is None):
raise ValueError("Missing the required parameter `version` when calling `global_get_bundle_version_content`")
collection_formats = {}
path_params = {}
if 'bundle_id' in params:
path_params['bundleId'] = params['bundle_id']
if 'version' in params:
path_params['version'] = params['version']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/octet-stream'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['tokenAuth', 'Authorization']
return self.api_client.call_api('/bundles/{bundleId}/versions/{version}/content', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[str]',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def global_get_bundle_version_extension(self, bundle_id, version, name, **kwargs):
"""
Get bundle version extension
Gets the metadata about the extension with the given name in the given extension bundle version. NOTE: This endpoint is subject to change as NiFi Registry and its REST API evolve.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.global_get_bundle_version_extension(bundle_id, version, name, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str bundle_id: The extension bundle identifier (required)
:param str version: The version of the bundle (required)
:param str name: The fully qualified name of the extension (required)
:return: list[Extension]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.global_get_bundle_version_extension_with_http_info(bundle_id, version, name, **kwargs)
else:
(data) = self.global_get_bundle_version_extension_with_http_info(bundle_id, version, name, **kwargs)
return data
def global_get_bundle_version_extension_with_http_info(self, bundle_id, version, name, **kwargs):
"""
Get bundle version extension
Gets the metadata about the extension with the given name in the given extension bundle version. NOTE: This endpoint is subject to change as NiFi Registry and its REST API evolve.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.global_get_bundle_version_extension_with_http_info(bundle_id, version, name, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str bundle_id: The extension bundle identifier (required)
:param str version: The version of the bundle (required)
:param str name: The fully qualified name of the extension (required)
:return: list[Extension]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['bundle_id', 'version', 'name']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method global_get_bundle_version_extension" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'bundle_id' is set
if ('bundle_id' not in params) or (params['bundle_id'] is None):
raise ValueError("Missing the required parameter `bundle_id` when calling `global_get_bundle_version_extension`")
# verify the required parameter 'version' is set
if ('version' not in params) or (params['version'] is None):
raise ValueError("Missing the required parameter `version` when calling `global_get_bundle_version_extension`")
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `global_get_bundle_version_extension`")
collection_formats = {}
path_params = {}
if 'bundle_id' in params:
path_params['bundleId'] = params['bundle_id']
if 'version' in params:
path_params['version'] = params['version']
if 'name' in params:
path_params['name'] = params['name']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['tokenAuth', 'Authorization']
return self.api_client.call_api('/bundles/{bundleId}/versions/{version}/extensions/{name}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[Extension]',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def global_get_bundle_version_extensions(self, bundle_id, version, **kwargs):
"""
Get bundle version extensions
Gets the metadata about the extensions in the given extension bundle version. NOTE: This endpoint is subject to change as NiFi Registry and its REST API evolve.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.global_get_bundle_version_extensions(bundle_id, version, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str bundle_id: The extension bundle identifier (required)
:param str version: The version of the bundle (required)
:return: list[ExtensionMetadata]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.global_get_bundle_version_extensions_with_http_info(bundle_id, version, **kwargs)
else:
(data) = self.global_get_bundle_version_extensions_with_http_info(bundle_id, version, **kwargs)
return data
def global_get_bundle_version_extensions_with_http_info(self, bundle_id, version, **kwargs):
"""
Get bundle version extensions
Gets the metadata about the extensions in the given extension bundle version. NOTE: This endpoint is subject to change as NiFi Registry and its REST API evolve.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.global_get_bundle_version_extensions_with_http_info(bundle_id, version, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str bundle_id: The extension bundle identifier (required)
:param str version: The version of the bundle (required)
:return: list[ExtensionMetadata]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['bundle_id', 'version']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method global_get_bundle_version_extensions" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'bundle_id' is set
if ('bundle_id' not in params) or (params['bundle_id'] is None):
raise ValueError("Missing the required parameter `bundle_id` when calling `global_get_bundle_version_extensions`")
# verify the required parameter 'version' is set
if ('version' not in params) or (params['version'] is None):
raise ValueError("Missing the required parameter `version` when calling `global_get_bundle_version_extensions`")
collection_formats = {}
path_params = {}
if 'bundle_id' in params:
path_params['bundleId'] = params['bundle_id']
if 'version' in params:
path_params['version'] = params['version']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['tokenAuth', 'Authorization']
return self.api_client.call_api('/bundles/{bundleId}/versions/{version}/extensions', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[ExtensionMetadata]',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def global_get_bundle_versions(self, bundle_id, **kwargs):
"""
Get bundle versions
Gets the metadata for the versions of the given extension bundle. NOTE: This endpoint is subject to change as NiFi Registry and its REST API evolve.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.global_get_bundle_versions(bundle_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str bundle_id: The extension bundle identifier (required)
:return: list[BundleVersionMetadata]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.global_get_bundle_versions_with_http_info(bundle_id, **kwargs)
else:
(data) = self.global_get_bundle_versions_with_http_info(bundle_id, **kwargs)
return data
def global_get_bundle_versions_with_http_info(self, bundle_id, **kwargs):
"""
Get bundle versions
Gets the metadata for the versions of the given extension bundle. NOTE: This endpoint is subject to change as NiFi Registry and its REST API evolve.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.global_get_bundle_versions_with_http_info(bundle_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str bundle_id: The extension bundle identifier (required)
:return: list[BundleVersionMetadata]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['bundle_id']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method global_get_bundle_versions" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'bundle_id' is set
if ('bundle_id' not in params) or (params['bundle_id'] is None):
raise ValueError("Missing the required parameter `bundle_id` when calling `global_get_bundle_versions`")
collection_formats = {}
path_params = {}
if 'bundle_id' in params:
path_params['bundleId'] = params['bundle_id']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['tokenAuth', 'Authorization']
return self.api_client.call_api('/bundles/{bundleId}/versions', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[BundleVersionMetadata]',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def global_get_extension_bundle(self, bundle_id, **kwargs):
"""
Get bundle
Gets the metadata about an extension bundle. NOTE: This endpoint is subject to change as NiFi Registry and its REST API evolve.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.global_get_extension_bundle(bundle_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str bundle_id: The extension bundle identifier (required)
:return: ExtensionBundle
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.global_get_extension_bundle_with_http_info(bundle_id, **kwargs)
else:
(data) = self.global_get_extension_bundle_with_http_info(bundle_id, **kwargs)
return data
def global_get_extension_bundle_with_http_info(self, bundle_id, **kwargs):
"""
Get bundle
Gets the metadata about an extension bundle. NOTE: This endpoint is subject to change as NiFi Registry and its REST API evolve.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.global_get_extension_bundle_with_http_info(bundle_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str bundle_id: The extension bundle identifier (required)
:return: ExtensionBundle
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['bundle_id']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method global_get_extension_bundle" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'bundle_id' is set
if ('bundle_id' not in params) or (params['bundle_id'] is None):
raise ValueError("Missing the required parameter `bundle_id` when calling `global_get_extension_bundle`")
collection_formats = {}
path_params = {}
if 'bundle_id' in params:
path_params['bundleId'] = params['bundle_id']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['tokenAuth', 'Authorization']
return self.api_client.call_api('/bundles/{bundleId}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ExtensionBundle',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| 47.918221
| 357
| 0.600243
|
from __future__ import absolute_import
import sys
import os
import re
from six import iteritems
from ..configuration import Configuration
from ..api_client import ApiClient
class BundlesApi(object):
def __init__(self, api_client=None):
config = Configuration()
if api_client:
self.api_client = api_client
else:
if not config.api_client:
config.api_client = ApiClient()
self.api_client = config.api_client
def get_bundle_version_extension_additional_details_docs(self, bundle_id, version, name, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_bundle_version_extension_additional_details_docs_with_http_info(bundle_id, version, name, **kwargs)
else:
(data) = self.get_bundle_version_extension_additional_details_docs_with_http_info(bundle_id, version, name, **kwargs)
return data
def get_bundle_version_extension_additional_details_docs_with_http_info(self, bundle_id, version, name, **kwargs):
all_params = ['bundle_id', 'version', 'name']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_bundle_version_extension_additional_details_docs" % key
)
params[key] = val
del params['kwargs']
if ('bundle_id' not in params) or (params['bundle_id'] is None):
raise ValueError("Missing the required parameter `bundle_id` when calling `get_bundle_version_extension_additional_details_docs`")
if ('version' not in params) or (params['version'] is None):
raise ValueError("Missing the required parameter `version` when calling `get_bundle_version_extension_additional_details_docs`")
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `get_bundle_version_extension_additional_details_docs`")
collection_formats = {}
path_params = {}
if 'bundle_id' in params:
path_params['bundleId'] = params['bundle_id']
if 'version' in params:
path_params['version'] = params['version']
if 'name' in params:
path_params['name'] = params['name']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
header_params['Accept'] = self.api_client.\
select_header_accept(['text/html'])
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
auth_settings = ['tokenAuth', 'Authorization']
return self.api_client.call_api('/bundles/{bundleId}/versions/{version}/extensions/{name}/docs/additional-details', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='str',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_bundle_version_extension_docs(self, bundle_id, version, name, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_bundle_version_extension_docs_with_http_info(bundle_id, version, name, **kwargs)
else:
(data) = self.get_bundle_version_extension_docs_with_http_info(bundle_id, version, name, **kwargs)
return data
def get_bundle_version_extension_docs_with_http_info(self, bundle_id, version, name, **kwargs):
all_params = ['bundle_id', 'version', 'name']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_bundle_version_extension_docs" % key
)
params[key] = val
del params['kwargs']
if ('bundle_id' not in params) or (params['bundle_id'] is None):
raise ValueError("Missing the required parameter `bundle_id` when calling `get_bundle_version_extension_docs`")
if ('version' not in params) or (params['version'] is None):
raise ValueError("Missing the required parameter `version` when calling `get_bundle_version_extension_docs`")
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `get_bundle_version_extension_docs`")
collection_formats = {}
path_params = {}
if 'bundle_id' in params:
path_params['bundleId'] = params['bundle_id']
if 'version' in params:
path_params['version'] = params['version']
if 'name' in params:
path_params['name'] = params['name']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
header_params['Accept'] = self.api_client.\
select_header_accept(['text/html'])
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
auth_settings = ['tokenAuth', 'Authorization']
return self.api_client.call_api('/bundles/{bundleId}/versions/{version}/extensions/{name}/docs', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='str',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_bundle_versions(self, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_bundle_versions_with_http_info(**kwargs)
else:
(data) = self.get_bundle_versions_with_http_info(**kwargs)
return data
def get_bundle_versions_with_http_info(self, **kwargs):
all_params = ['group_id', 'artifact_id', 'version']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_bundle_versions" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'group_id' in params:
query_params.append(('groupId', params['group_id']))
if 'artifact_id' in params:
query_params.append(('artifactId', params['artifact_id']))
if 'version' in params:
query_params.append(('version', params['version']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
auth_settings = ['tokenAuth', 'Authorization']
return self.api_client.call_api('/bundles/versions', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[BundleVersionMetadata]',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_bundles(self, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_bundles_with_http_info(**kwargs)
else:
(data) = self.get_bundles_with_http_info(**kwargs)
return data
def get_bundles_with_http_info(self, **kwargs):
all_params = ['bucket_name', 'group_id', 'artifact_id']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_bundles" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'bucket_name' in params:
query_params.append(('bucketName', params['bucket_name']))
if 'group_id' in params:
query_params.append(('groupId', params['group_id']))
if 'artifact_id' in params:
query_params.append(('artifactId', params['artifact_id']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
auth_settings = ['tokenAuth', 'Authorization']
return self.api_client.call_api('/bundles', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[ExtensionBundle]',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def global_delete_bundle_version(self, bundle_id, version, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.global_delete_bundle_version_with_http_info(bundle_id, version, **kwargs)
else:
(data) = self.global_delete_bundle_version_with_http_info(bundle_id, version, **kwargs)
return data
def global_delete_bundle_version_with_http_info(self, bundle_id, version, **kwargs):
all_params = ['bundle_id', 'version']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method global_delete_bundle_version" % key
)
params[key] = val
del params['kwargs']
if ('bundle_id' not in params) or (params['bundle_id'] is None):
raise ValueError("Missing the required parameter `bundle_id` when calling `global_delete_bundle_version`")
if ('version' not in params) or (params['version'] is None):
raise ValueError("Missing the required parameter `version` when calling `global_delete_bundle_version`")
collection_formats = {}
path_params = {}
if 'bundle_id' in params:
path_params['bundleId'] = params['bundle_id']
if 'version' in params:
path_params['version'] = params['version']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
auth_settings = ['tokenAuth', 'Authorization']
return self.api_client.call_api('/bundles/{bundleId}/versions/{version}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='BundleVersion',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def global_delete_extension_bundle(self, bundle_id, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.global_delete_extension_bundle_with_http_info(bundle_id, **kwargs)
else:
(data) = self.global_delete_extension_bundle_with_http_info(bundle_id, **kwargs)
return data
def global_delete_extension_bundle_with_http_info(self, bundle_id, **kwargs):
all_params = ['bundle_id']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method global_delete_extension_bundle" % key
)
params[key] = val
del params['kwargs']
if ('bundle_id' not in params) or (params['bundle_id'] is None):
raise ValueError("Missing the required parameter `bundle_id` when calling `global_delete_extension_bundle`")
collection_formats = {}
path_params = {}
if 'bundle_id' in params:
path_params['bundleId'] = params['bundle_id']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
auth_settings = ['tokenAuth', 'Authorization']
return self.api_client.call_api('/bundles/{bundleId}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ExtensionBundle',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def global_get_bundle_version(self, bundle_id, version, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.global_get_bundle_version_with_http_info(bundle_id, version, **kwargs)
else:
(data) = self.global_get_bundle_version_with_http_info(bundle_id, version, **kwargs)
return data
def global_get_bundle_version_with_http_info(self, bundle_id, version, **kwargs):
all_params = ['bundle_id', 'version']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method global_get_bundle_version" % key
)
params[key] = val
del params['kwargs']
if ('bundle_id' not in params) or (params['bundle_id'] is None):
raise ValueError("Missing the required parameter `bundle_id` when calling `global_get_bundle_version`")
if ('version' not in params) or (params['version'] is None):
raise ValueError("Missing the required parameter `version` when calling `global_get_bundle_version`")
collection_formats = {}
path_params = {}
if 'bundle_id' in params:
path_params['bundleId'] = params['bundle_id']
if 'version' in params:
path_params['version'] = params['version']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
auth_settings = ['tokenAuth', 'Authorization']
return self.api_client.call_api('/bundles/{bundleId}/versions/{version}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='BundleVersion',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def global_get_bundle_version_content(self, bundle_id, version, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.global_get_bundle_version_content_with_http_info(bundle_id, version, **kwargs)
else:
(data) = self.global_get_bundle_version_content_with_http_info(bundle_id, version, **kwargs)
return data
def global_get_bundle_version_content_with_http_info(self, bundle_id, version, **kwargs):
all_params = ['bundle_id', 'version']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method global_get_bundle_version_content" % key
)
params[key] = val
del params['kwargs']
if ('bundle_id' not in params) or (params['bundle_id'] is None):
raise ValueError("Missing the required parameter `bundle_id` when calling `global_get_bundle_version_content`")
if ('version' not in params) or (params['version'] is None):
raise ValueError("Missing the required parameter `version` when calling `global_get_bundle_version_content`")
collection_formats = {}
path_params = {}
if 'bundle_id' in params:
path_params['bundleId'] = params['bundle_id']
if 'version' in params:
path_params['version'] = params['version']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
header_params['Accept'] = self.api_client.\
select_header_accept(['application/octet-stream'])
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
auth_settings = ['tokenAuth', 'Authorization']
return self.api_client.call_api('/bundles/{bundleId}/versions/{version}/content', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[str]',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def global_get_bundle_version_extension(self, bundle_id, version, name, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.global_get_bundle_version_extension_with_http_info(bundle_id, version, name, **kwargs)
else:
(data) = self.global_get_bundle_version_extension_with_http_info(bundle_id, version, name, **kwargs)
return data
def global_get_bundle_version_extension_with_http_info(self, bundle_id, version, name, **kwargs):
all_params = ['bundle_id', 'version', 'name']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method global_get_bundle_version_extension" % key
)
params[key] = val
del params['kwargs']
if ('bundle_id' not in params) or (params['bundle_id'] is None):
raise ValueError("Missing the required parameter `bundle_id` when calling `global_get_bundle_version_extension`")
if ('version' not in params) or (params['version'] is None):
raise ValueError("Missing the required parameter `version` when calling `global_get_bundle_version_extension`")
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `global_get_bundle_version_extension`")
collection_formats = {}
path_params = {}
if 'bundle_id' in params:
path_params['bundleId'] = params['bundle_id']
if 'version' in params:
path_params['version'] = params['version']
if 'name' in params:
path_params['name'] = params['name']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
auth_settings = ['tokenAuth', 'Authorization']
return self.api_client.call_api('/bundles/{bundleId}/versions/{version}/extensions/{name}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[Extension]',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def global_get_bundle_version_extensions(self, bundle_id, version, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.global_get_bundle_version_extensions_with_http_info(bundle_id, version, **kwargs)
else:
(data) = self.global_get_bundle_version_extensions_with_http_info(bundle_id, version, **kwargs)
return data
def global_get_bundle_version_extensions_with_http_info(self, bundle_id, version, **kwargs):
all_params = ['bundle_id', 'version']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method global_get_bundle_version_extensions" % key
)
params[key] = val
del params['kwargs']
if ('bundle_id' not in params) or (params['bundle_id'] is None):
raise ValueError("Missing the required parameter `bundle_id` when calling `global_get_bundle_version_extensions`")
if ('version' not in params) or (params['version'] is None):
raise ValueError("Missing the required parameter `version` when calling `global_get_bundle_version_extensions`")
collection_formats = {}
path_params = {}
if 'bundle_id' in params:
path_params['bundleId'] = params['bundle_id']
if 'version' in params:
path_params['version'] = params['version']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
auth_settings = ['tokenAuth', 'Authorization']
return self.api_client.call_api('/bundles/{bundleId}/versions/{version}/extensions', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[ExtensionMetadata]',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def global_get_bundle_versions(self, bundle_id, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.global_get_bundle_versions_with_http_info(bundle_id, **kwargs)
else:
(data) = self.global_get_bundle_versions_with_http_info(bundle_id, **kwargs)
return data
def global_get_bundle_versions_with_http_info(self, bundle_id, **kwargs):
all_params = ['bundle_id']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method global_get_bundle_versions" % key
)
params[key] = val
del params['kwargs']
if ('bundle_id' not in params) or (params['bundle_id'] is None):
raise ValueError("Missing the required parameter `bundle_id` when calling `global_get_bundle_versions`")
collection_formats = {}
path_params = {}
if 'bundle_id' in params:
path_params['bundleId'] = params['bundle_id']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
auth_settings = ['tokenAuth', 'Authorization']
return self.api_client.call_api('/bundles/{bundleId}/versions', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[BundleVersionMetadata]',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def global_get_extension_bundle(self, bundle_id, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.global_get_extension_bundle_with_http_info(bundle_id, **kwargs)
else:
(data) = self.global_get_extension_bundle_with_http_info(bundle_id, **kwargs)
return data
def global_get_extension_bundle_with_http_info(self, bundle_id, **kwargs):
all_params = ['bundle_id']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method global_get_extension_bundle" % key
)
params[key] = val
del params['kwargs']
if ('bundle_id' not in params) or (params['bundle_id'] is None):
raise ValueError("Missing the required parameter `bundle_id` when calling `global_get_extension_bundle`")
collection_formats = {}
path_params = {}
if 'bundle_id' in params:
path_params['bundleId'] = params['bundle_id']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
auth_settings = ['tokenAuth', 'Authorization']
return self.api_client.call_api('/bundles/{bundleId}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ExtensionBundle',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| true
| true
|
1c43c04f2267901e6a6439f5190e528fae312122
| 23,661
|
py
|
Python
|
vt/client.py
|
kesh-stripe/vt-py
|
00ec3743cfc8649c84d3aabc45986177f468bd71
|
[
"Apache-2.0"
] | null | null | null |
vt/client.py
|
kesh-stripe/vt-py
|
00ec3743cfc8649c84d3aabc45986177f468bd71
|
[
"Apache-2.0"
] | null | null | null |
vt/client.py
|
kesh-stripe/vt-py
|
00ec3743cfc8649c84d3aabc45986177f468bd71
|
[
"Apache-2.0"
] | null | null | null |
# Copyright © 2019 The vt-py authors. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import aiohttp
import asyncio
import base64
import json
from .error import APIError
from .feed import Feed
from .object import Object
from .iterator import Iterator
from .version import __version__
__all__ = [
'Client',
'ClientResponse',
'url_id']
_API_HOST = 'https://www.virustotal.com'
# All API endpoints start with this prefix, you don't need to include the
# prefix in the paths you request as it's prepended automatically.
_ENDPOINT_PREFIX = '/api/v3'
# AppEngine server decides whether or not it should serve gzipped content
# based on Accept-Encoding and User-Agent. Non-standard UAs are not served
# with gzipped content unless it contains the string "gzip" somewhere.
# See: https://cloud.google.com/appengine/kb/#compression
_USER_AGENT_FMT = '{agent}; vtpy {version}; gzip'
def _make_sync(future):
"""Utility function that waits for an async call, making it sync."""
try:
event_loop = asyncio.get_event_loop()
except RuntimeError:
# Generate an event loop if there isn't any.
event_loop = asyncio.new_event_loop()
asyncio.set_event_loop(event_loop)
return event_loop.run_until_complete(future)
def url_id(url):
"""Generates the object ID for an URL.
The ID generated by this function can be used in calls that expect a URL ID
like `client.get_object('/urls/<id>')`
"""
return base64.urlsafe_b64encode(url.encode()).decode().strip("=")
class ClientResponse:
"""Class representing the HTTP responses returned by the client.
This class is just a thing wrapper around `aiohttp.ClientResponse
<https://aiohttp.readthedocs.io/en/stable/client_reference.html#aiohttp.ClientResponse>`_
that allows using it in both asynchronous and synchronous mode. Instances of
this class have all the attributes that you can find in `aiohttp.ClientResponse`,
like `version`, `status`, `method`, `url`, and so on. Methods in
`aiohttp.ClientResponse` that return a coroutine have two flavors in this
class: synchronous and asynchronous. For example, `aiohttp.ClientResponse.read()`
becomes `vt.ClientResponse.read_async()`, and `vt.ClientResponse.read()` is
the synchronous version of `vt.ClientResponse.read_async()`. Find more
information about attributes and methods in `aiohttp.ClientResponse` in:
https://aiohttp.readthedocs.io/en/stable/client_reference.html#aiohttp.ClientResponse
"""
def __init__(self, aiohttp_resp):
self._aiohttp_resp = aiohttp_resp
def __getattr__(self, attr):
return getattr(self._aiohttp_resp, attr)
@property
def content(self):
return StreamReader(self._aiohttp_resp.content)
async def read_async(self):
return await self._aiohttp_resp.read()
def read(self):
return _make_sync(self.read_async())
async def json_async(self):
return await self._aiohttp_resp.json()
def json(self):
return _make_sync(self.json_async())
async def text_async(self):
return await self._aiohttp_resp.text()
def text(self):
return _make_sync(self.text_async())
class StreamReader:
"""Class representing the HTTP responses returned by the client.
This class is just a thing wrapper around `aiohttp.StreamReader
<https://aiohttp.readthedocs.io/en/stable/streams.html#aiohttp.StreamReader>`_
that allows using it in both asynchronous and synchronous mode. Instances of
this class have all the methods that you can find in `aiohttp.StreamReader`,
like `readany()`, `readany()`, etc. Methods in `aiohttp.StreamReader`
come in two flavors in this class: synchronous and asynchronous. For example,
`read()` and `read_async`, where `read` is the synchronous one and `read_async`
is the asynchronous. Find more information about attributes and methods in
`aiohttp.StreamReader` in:
https://aiohttp.readthedocs.io/en/stable/streams.html#aiohttp.StreamReader
"""
def __init__(self, aiohttp_stream_reader):
self._aiohttp_stream_reader = aiohttp_stream_reader
def __getattr__(self, attr):
return getattr(self._aiohttp_stream_reader, attr)
async def read_async(self, n=-1):
return await self._aiohttp_stream_reader.read(n)
def read(self, n=-1):
return _make_sync(self.read_async(n))
async def readany_async(self):
return await self._aiohttp_stream_reader.readany()
def readany(self):
return _make_sync(self.readany_async())
async def readexactly_async(self, n):
return await self._aiohttp_stream_reader.readexactly(n)
def readexactly(self, n):
return _make_sync(self.readexactly_async(n))
async def readline_async(self):
return await self._aiohttp_stream_reader.readline()
def readline(self):
return _make_sync(self.readline_async())
async def readchunk_async(self):
return await self._aiohttp_stream_reader.readchunk()
def readchunk(self):
return _make_sync(self.readchunk_async())
class Client:
"""Client for interacting with VirusTotal.
:param apikey: Your VirusTotal API key.
:param agent: A string that identifies your application.
:param host: By default https://www.virustotal.com, it can be changed for
testing purposes.
:type apikey: str
:type agent: str
:type host: str
"""
def __init__(self, apikey, agent="unknown", host=None):
"""Intialize the client with the provided API key."""
if not isinstance(apikey, str):
raise ValueError('API key must be a string')
if not apikey:
raise ValueError('API key can not be an empty string')
self._host = host or _API_HOST
self._apikey = apikey
self._agent = agent
self._session = None
def _full_url(self, path, *args):
try:
path = path.format(*args)
except IndexError:
raise ValueError('Not enough arguments to fill all placeholders in path')
if path.startswith('http'):
return path
return self._host + _ENDPOINT_PREFIX + path
def _get_session(self):
if not self._session:
self._session = aiohttp.ClientSession(
connector=aiohttp.TCPConnector(ssl=False),
headers={
'X-Apikey': self._apikey,
'Accept-Encoding': 'gzip',
'User-Agent': _USER_AGENT_FMT.format_map({
'agent': self._agent, 'version': __version__})})
return self._session
async def __aenter__(self):
return self
async def __aexit__(self, type, value, traceback):
await self.close_async()
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
def _extract_data_from_json(self, json_response):
if not 'data' in json_response:
raise ValueError('response does not returns a data field')
return json_response['data']
async def _response_to_json(self, response):
error = await self.get_error_async(response)
if error:
raise error
return await response.json_async()
async def _response_to_object(self, response):
json_response = await self._response_to_json(response)
try:
return Object.from_dict(self._extract_data_from_json(json_response))
except ValueError as err:
raise ValueError('response is not an object: {}'.format(err))
async def close_async(self):
"""Like :func:`close` but returns a coroutine."""
if self._session:
await self._session.close()
self._session = None
def close(self):
"""Closes the client.
When the client is not needed anymore it should be closed for releasing
resources like TCP connections.
"""
return _make_sync(self.close_async( ))
def delete(self, path, *path_args):
"""Sends a DELETE request to a given API endpoint.
:param path: Path to API endpoint, can contain format placeholders {}.
:param path_args: A variable number of arguments that are put into any
placeholders used in path.
:type path: str
:returns: An instance of :class:`ClientResponse`.
"""
return _make_sync(self.delete_async(path, *path_args))
async def delete_async(self, path, *path_args):
"""Like :func:`delete` but returns a coroutine."""
return ClientResponse(
await self._get_session().delete(self._full_url(path, *path_args)))
def download_file(self, hash, file):
"""Downloads a file given its hash (SHA-256, SHA-1 or MD5).
The file indentified by the hash will be written to the provided file
object. The file object must be opened in write binary mode ('wb').
:param hash: File hash.
:param file: A file object where the downloaded file will be written to.
:type hash: str
:type file: file-like object
"""
return _make_sync(self.download_file_async(hash, file))
async def download_file_async(self, hash, file):
"""Like :func:`download_file` but returns a coroutine."""
response = await self.get_async('/files/{}/download'.format(hash))
while True:
chunk = await response.content.read_async(1024*1024)
if not chunk:
break
file.write(chunk)
def feed(self, feed_type, cursor=None):
"""Returns an iterator for a VirusTotal feed.
This functions returns an iterator that allows to retrieve a continuous
stream of files as they are scanned by VirusTotal. See the documentation
for the :class:`Feed` class for more details.
:param feed_type: One of the supported feed types enumerated in
:class:`FeedType`.
:param cursor: An optional cursor indicating where to start. This argument
can be a string in the format 'YYYMMDDhhmm' indicating the date and time
of the first package that will be retrieved.
:type hash: :class:`vt.FeedType`
:type cursor: str
"""
return Feed(self, feed_type, cursor=cursor)
def get(self, path, *path_args, params=None):
"""Sends a GET request to a given API endpoint.
This is a low-level function that returns a raw HTTP response, no error
checking nor response parsing is performed. See :func:`get_json`,
:func:`get_data` and :func:`get_object` for higher-level functions.
:param path: Path to API endpoint, can contain format placeholders {}.
:param path_args: A variable number of arguments that are put into any
placeholders used in path.
:param params: Parameters sent in the request.
:type path: str
:type params: dict
:returns: An instance of :class:`ClientResponse`.
"""
return _make_sync(self.get_async(path, *path_args, params=params))
async def get_async(self, path, *path_args, params=None):
"""Like :func:`get` but returns a coroutine."""
return ClientResponse(
await self._get_session().get(
self._full_url(path, *path_args),
params=params))
def get_data(self, path, *path_args, params=None):
"""Sends a GET request to a given API endpoint and returns response's data.
Most VirusTotal API responses are JSON-encoded with the following format::
{"data": <response data>}
This function parses the server's response and return only the data, if the
response is not in the expected format an exception is raised. For endpoints
where the data is a VirusTotal object you can use :func:`get_object` instead.
:param path: Path to API endpoint, can contain format placeholders {}.
:param path_args: A variable number of arguments that are put into any
placeholders used in path.
:param params: Parameters sent in the request.
:type path: str
:type params: dict
:returns:
Whatever the server returned in the response's data field, it may be a
dict, list, string or some other Python type, depending on the endpoint
called.
"""
return _make_sync(self.get_data_async(path, *path_args, params=params))
async def get_data_async(self, path, *path_args, params=None):
"""Like :func:`get_data` but returns a coroutine."""
json_response = await self.get_json_async(path, *path_args, params=params)
return self._extract_data_from_json(json_response)
async def get_error_async(self, response):
"""Given a :class:`ClientResponse` returns a :class:`APIError`
This function checks if the response from the VirusTotal backend was an
error and returns the appropiate :class:`APIError` or None if no error
occurred.
:param response: A :class:`ClientResponse` instance.
:returns: An instance of :class:`APIError` or None.
"""
if response.status == 200:
return None
if response.status >= 400 and response.status <= 499:
if response.content_type == 'application/json':
json_response = await response.json_async()
error = json_response.get('error')
if error:
return APIError.from_dict(error)
return APIError('ClientError', await response.text_async())
return APIError('ServerError', await response.text_async())
def get_json(self, path, *path_args, params=None):
"""Sends a GET request to a given API endpoint and parses the response.
Most VirusTotal API responses are JSON-encoded. This function parses the
JSON, check for errors, and return the server response as a dictionary.
:param path: Path to API endpoint, can contain format placeholders {}.
:param path_args: A variable number of arguments that are put into any
placeholders used in path.
:param params: Parameters sent in the request.
:type path: str
:type params: dict
:returns:
A dictionary with the backend's response.
"""
return _make_sync(self.get_json_async(path, *path_args, params=params))
async def get_json_async(self, path, *path_args, params=None):
"""Like :func:`get_json` but returns a coroutine."""
response = await self.get_async(path, *path_args, params=params)
return await self._response_to_json(response)
def get_object(self, path, *path_args, params=None):
"""Sends a GET request to a given API endpoint and returns an object.
The endpoint specified must return an object, not a collection. This
means that get_object can be used with endpoints like /files/{file_id}
and /urls/{url_id}, which return an individual object but not with
/comments, which returns a collection of objects.
:param path: Path to API endpoint, can contain format placeholders {}.
:param path_args: A variable number of arguments that are put into any
placeholders used in path.
:param params: Parameters sent in the request.
:type path: str
:type params: dict
:returns:
An instance of :class:`Object`.
"""
return _make_sync(self.get_object_async(path, *path_args, params=params))
async def get_object_async(self, path, *path_args, params=None):
"""Like :func:`get_object` but returns a coroutine."""
response = await self.get_async(path, *path_args, params=params)
return await self._response_to_object(response)
def patch(self, path, *path_args, data=None):
"""Sends a PATCH request to a given API endpoint.
This is a low-level function that returns a raw HTTP response, no error
checking nor response parsing is performed. See :func:`patch_object` for
a higher-level function.
:param path: Path to API endpoint, can contain format placeholders {}.
:param path_args: A variable number of arguments that are put into any
placeholders used in path.
:param data: Data sent in the request body.
:type path: str
:type data: A string or bytes
:returns: An instance of :class:`ClientResponse`.
"""
return _make_sync(self.patch_async(path, *path_args, data))
async def patch_async(self, path, *path_args, data=None):
"""Like :func:`patch` but returns a coroutine."""
return ClientResponse(
await self._get_session().patch(
self._full_url(path, *path_args),
data=data))
def patch_object(self, path, *path_args, obj):
"""Sends a PATCH request for modifying an object.
This function modifies an object. The endpoint must be one that identifies
an object, like /intelligence/hunting_rulesets/{id}.
:param path: Path to API endpoint, can contain format placeholders {}.
:param path_args: A variable number of arguments that are put into any
placeholders used in path.
:param obj: Object that has been modified.
:type path: str
:type obj: :class:`Object`
:returns: An instance of :class:`Object` representing the same object after
the changes has been applied.
"""
return _make_sync(self.patch_object_async(path, *path_args, obj=obj))
async def patch_object_async(self, path, *path_args, obj):
"""Like :func:`patch_object` but returns a coroutine."""
data = json.dumps({'data': obj.to_dict(modified_attributes_only=True)})
response = await self.patch_async(path, *path_args, data=data)
return await self._response_to_object(response)
def post(self, path, *path_args, data=None):
"""Sends a POST request to a given API endpoint.
This is a low-level function that returns a raw HTTP response, no error
checking nor response parsing is performed. See :func:`post_object` for
a higher-level function.
:param path: Path to API endpoint, can contain format placeholders {}.
:param path_args: A variable number of arguments that are put into any
placeholders used in path.
:param data: Data sent in the request body.
:type path: str
:type data: A string or bytes
:returns: An instance of :class:`ClientResponse`.
"""
return _make_sync(self.post_async(path, *path_args, data=data))
async def post_async(self, path, *path_args, data=None):
"""Like :func:`post` but returns a coroutine."""
return ClientResponse(
await self._get_session().post(
self._full_url(path, *path_args),
data=data))
def post_object(self, path, *path_args, obj):
"""Sends a POST request for creating an object.
This function create a new object. The endpoint must be one that identifies
a collection, like /intelligence/hunting_rulesets.
:param path: Path to API endpoint.
:param path_args: A variable number of arguments that are put into any
placeholders used in path.
:param obj: Instance :class:`Object` whith the type expected by the API
endpoint.
:type path: str
:type obj: :class:`Object`
:returns: An instance of :class:`Object` representing the new object.
"""
return _make_sync(self.post_object_async(path, *path_args, obj=obj))
async def post_object_async(self, path, *path_args, obj):
"""Like :func:`post_object` but returns a coroutine."""
data = json.dumps({'data': obj.to_dict()})
response = await self.post_async(path, *path_args, data=data)
return await self._response_to_object(response)
def iterator(self, path, *path_args, params=None, cursor=None,
limit=None, batch_size=0):
"""Returns an iterator for the collection specified by the given path.
The endpoint specified by path must return a collection of objects. An
example of such an endpoint are /comments and /intelligence/search.
:param path: Path to API endpoint returning a collection.
:param path_args: A variable number of arguments that are put into any
placeholders used in path.
:param params: Additional parameters passed to the endpoint.
:param cursor: Cursor for resuming the iteration at the point it was left
previously. A cursor can be obtained with Iterator.cursor(). This
cursor is not the same one returned by the VirusTotal API.
:param limit: Maximum number of objects that will be returned by the iterator.
If a limit is not provided the iterator continues until it reaches the
last object in the collection.
:param batch_size: Maximum number of objects retrieved on each call to the
endpoint. If not provided the server will decide how many objects to
return.
:type path: str
:type params: dict
:type cursor: str
:type limit: int
:type batch_size: int
:returns: An instance of :class:`Iterator`.
"""
return Iterator(self, self._full_url(path, *path_args),
params=params, cursor=cursor, limit=limit, batch_size=batch_size)
def scan_file(self, file, wait_for_completion=False):
"""Scans a file.
:param file: File to be scanned.
:param wait_for_completion: If True the function doesn't return until the
analysis has been completed.
:type file: File-like object.
:type wait_for_completion: bool
:returns: An instance of :class:`Object` of analysis type.
"""
return _make_sync(self.scan_file_async(
file, wait_for_completion=wait_for_completion))
async def scan_file_async(self, file, wait_for_completion=False):
"""Like :func:`scan_file` but returns a coroutine."""
# The snippet below could be replaced with this simpler code:
#
# form_data = aiohttp.FormData()
# form_data.add_field('file', file)
#
# However, aiohttp.FormData assumes that the server supports RFC 5987 and
# send a Content-Disposition like:
#
# 'form-data; name="file"; filename="foobar"; filename*=UTF-8''foobar
#
# AppEngine's upload handler doesn't like the filename*=UTF-8''foobar field
# and fails with this Content-Disposition header.
part = aiohttp.get_payload(file)
filename = file.name if hasattr(file, 'name') else 'unknown'
disposition = 'form-data; name="file"; filename="{}"'.format(filename)
part.headers['Content-Disposition'] = disposition
form_data = aiohttp.MultipartWriter('form-data')
form_data.append_payload(part)
upload_url = await self.get_data_async('/files/upload_url')
response = ClientResponse(
await self._get_session().post(upload_url, data=form_data))
analysis = await self._response_to_object(response)
if wait_for_completion:
analysis = await self._wait_for_analysis_completion(analysis)
return analysis
def scan_url(self, url, wait_for_completion=False):
"""Scans a URL.
:param url: The URL to be scanned.
:param wait_for_completion: If True the function doesn't return until the
analysis has been completed.
:type url: str
:type wait_for_completion: bool
:returns: An instance of :class:`Object` of analysis type.
"""
return _make_sync(self.scan_url_async(
url, wait_for_completion=wait_for_completion))
async def scan_url_async(self, url, wait_for_completion=False):
"""Like :func:`scan_url` but returns a coroutine."""
form_data = aiohttp.FormData()
form_data.add_field('url', url)
response = ClientResponse(
await self._get_session().post(self._full_url('/urls'), data=form_data))
analysis = await self._response_to_object(response)
if wait_for_completion:
analysis = await self._wait_for_analysis_completion(analysis)
return analysis
async def _wait_for_analysis_completion(self, analysis):
while True:
analysis = await self.get_object_async('/analyses/{}', analysis.id)
if analysis.status == 'completed':
break
await asyncio.sleep(20)
return analysis
| 37.261417
| 91
| 0.711677
|
import aiohttp
import asyncio
import base64
import json
from .error import APIError
from .feed import Feed
from .object import Object
from .iterator import Iterator
from .version import __version__
__all__ = [
'Client',
'ClientResponse',
'url_id']
_API_HOST = 'https://www.virustotal.com'
# prefix in the paths you request as it's prepended automatically.
_ENDPOINT_PREFIX = '/api/v3'
_FMT = '{agent}; vtpy {version}; gzip'
def _make_sync(future):
try:
event_loop = asyncio.get_event_loop()
except RuntimeError:
event_loop = asyncio.new_event_loop()
asyncio.set_event_loop(event_loop)
return event_loop.run_until_complete(future)
def url_id(url):
return base64.urlsafe_b64encode(url.encode()).decode().strip("=")
class ClientResponse:
def __init__(self, aiohttp_resp):
self._aiohttp_resp = aiohttp_resp
def __getattr__(self, attr):
return getattr(self._aiohttp_resp, attr)
@property
def content(self):
return StreamReader(self._aiohttp_resp.content)
async def read_async(self):
return await self._aiohttp_resp.read()
def read(self):
return _make_sync(self.read_async())
async def json_async(self):
return await self._aiohttp_resp.json()
def json(self):
return _make_sync(self.json_async())
async def text_async(self):
return await self._aiohttp_resp.text()
def text(self):
return _make_sync(self.text_async())
class StreamReader:
def __init__(self, aiohttp_stream_reader):
self._aiohttp_stream_reader = aiohttp_stream_reader
def __getattr__(self, attr):
return getattr(self._aiohttp_stream_reader, attr)
async def read_async(self, n=-1):
return await self._aiohttp_stream_reader.read(n)
def read(self, n=-1):
return _make_sync(self.read_async(n))
async def readany_async(self):
return await self._aiohttp_stream_reader.readany()
def readany(self):
return _make_sync(self.readany_async())
async def readexactly_async(self, n):
return await self._aiohttp_stream_reader.readexactly(n)
def readexactly(self, n):
return _make_sync(self.readexactly_async(n))
async def readline_async(self):
return await self._aiohttp_stream_reader.readline()
def readline(self):
return _make_sync(self.readline_async())
async def readchunk_async(self):
return await self._aiohttp_stream_reader.readchunk()
def readchunk(self):
return _make_sync(self.readchunk_async())
class Client:
def __init__(self, apikey, agent="unknown", host=None):
if not isinstance(apikey, str):
raise ValueError('API key must be a string')
if not apikey:
raise ValueError('API key can not be an empty string')
self._host = host or _API_HOST
self._apikey = apikey
self._agent = agent
self._session = None
def _full_url(self, path, *args):
try:
path = path.format(*args)
except IndexError:
raise ValueError('Not enough arguments to fill all placeholders in path')
if path.startswith('http'):
return path
return self._host + _ENDPOINT_PREFIX + path
def _get_session(self):
if not self._session:
self._session = aiohttp.ClientSession(
connector=aiohttp.TCPConnector(ssl=False),
headers={
'X-Apikey': self._apikey,
'Accept-Encoding': 'gzip',
'User-Agent': _USER_AGENT_FMT.format_map({
'agent': self._agent, 'version': __version__})})
return self._session
async def __aenter__(self):
return self
async def __aexit__(self, type, value, traceback):
await self.close_async()
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
def _extract_data_from_json(self, json_response):
if not 'data' in json_response:
raise ValueError('response does not returns a data field')
return json_response['data']
async def _response_to_json(self, response):
error = await self.get_error_async(response)
if error:
raise error
return await response.json_async()
async def _response_to_object(self, response):
json_response = await self._response_to_json(response)
try:
return Object.from_dict(self._extract_data_from_json(json_response))
except ValueError as err:
raise ValueError('response is not an object: {}'.format(err))
async def close_async(self):
if self._session:
await self._session.close()
self._session = None
def close(self):
return _make_sync(self.close_async( ))
def delete(self, path, *path_args):
return _make_sync(self.delete_async(path, *path_args))
async def delete_async(self, path, *path_args):
return ClientResponse(
await self._get_session().delete(self._full_url(path, *path_args)))
def download_file(self, hash, file):
return _make_sync(self.download_file_async(hash, file))
async def download_file_async(self, hash, file):
response = await self.get_async('/files/{}/download'.format(hash))
while True:
chunk = await response.content.read_async(1024*1024)
if not chunk:
break
file.write(chunk)
def feed(self, feed_type, cursor=None):
return Feed(self, feed_type, cursor=cursor)
def get(self, path, *path_args, params=None):
return _make_sync(self.get_async(path, *path_args, params=params))
async def get_async(self, path, *path_args, params=None):
return ClientResponse(
await self._get_session().get(
self._full_url(path, *path_args),
params=params))
def get_data(self, path, *path_args, params=None):
return _make_sync(self.get_data_async(path, *path_args, params=params))
async def get_data_async(self, path, *path_args, params=None):
json_response = await self.get_json_async(path, *path_args, params=params)
return self._extract_data_from_json(json_response)
async def get_error_async(self, response):
if response.status == 200:
return None
if response.status >= 400 and response.status <= 499:
if response.content_type == 'application/json':
json_response = await response.json_async()
error = json_response.get('error')
if error:
return APIError.from_dict(error)
return APIError('ClientError', await response.text_async())
return APIError('ServerError', await response.text_async())
def get_json(self, path, *path_args, params=None):
return _make_sync(self.get_json_async(path, *path_args, params=params))
async def get_json_async(self, path, *path_args, params=None):
response = await self.get_async(path, *path_args, params=params)
return await self._response_to_json(response)
def get_object(self, path, *path_args, params=None):
return _make_sync(self.get_object_async(path, *path_args, params=params))
async def get_object_async(self, path, *path_args, params=None):
response = await self.get_async(path, *path_args, params=params)
return await self._response_to_object(response)
def patch(self, path, *path_args, data=None):
return _make_sync(self.patch_async(path, *path_args, data))
async def patch_async(self, path, *path_args, data=None):
return ClientResponse(
await self._get_session().patch(
self._full_url(path, *path_args),
data=data))
def patch_object(self, path, *path_args, obj):
return _make_sync(self.patch_object_async(path, *path_args, obj=obj))
async def patch_object_async(self, path, *path_args, obj):
data = json.dumps({'data': obj.to_dict(modified_attributes_only=True)})
response = await self.patch_async(path, *path_args, data=data)
return await self._response_to_object(response)
def post(self, path, *path_args, data=None):
return _make_sync(self.post_async(path, *path_args, data=data))
async def post_async(self, path, *path_args, data=None):
return ClientResponse(
await self._get_session().post(
self._full_url(path, *path_args),
data=data))
def post_object(self, path, *path_args, obj):
return _make_sync(self.post_object_async(path, *path_args, obj=obj))
async def post_object_async(self, path, *path_args, obj):
data = json.dumps({'data': obj.to_dict()})
response = await self.post_async(path, *path_args, data=data)
return await self._response_to_object(response)
def iterator(self, path, *path_args, params=None, cursor=None,
limit=None, batch_size=0):
return Iterator(self, self._full_url(path, *path_args),
params=params, cursor=cursor, limit=limit, batch_size=batch_size)
def scan_file(self, file, wait_for_completion=False):
return _make_sync(self.scan_file_async(
file, wait_for_completion=wait_for_completion))
async def scan_file_async(self, file, wait_for_completion=False):
# The snippet below could be replaced with this simpler code:
#
# form_data = aiohttp.FormData()
# form_data.add_field('file', file)
#
# However, aiohttp.FormData assumes that the server supports RFC 5987 and
# send a Content-Disposition like:
#
# 'form-data; name="file"; filename="foobar"; filename*=UTF-8''foobar
part = aiohttp.get_payload(file)
filename = file.name if hasattr(file, 'name') else 'unknown'
disposition = 'form-data; name="file"; filename="{}"'.format(filename)
part.headers['Content-Disposition'] = disposition
form_data = aiohttp.MultipartWriter('form-data')
form_data.append_payload(part)
upload_url = await self.get_data_async('/files/upload_url')
response = ClientResponse(
await self._get_session().post(upload_url, data=form_data))
analysis = await self._response_to_object(response)
if wait_for_completion:
analysis = await self._wait_for_analysis_completion(analysis)
return analysis
def scan_url(self, url, wait_for_completion=False):
return _make_sync(self.scan_url_async(
url, wait_for_completion=wait_for_completion))
async def scan_url_async(self, url, wait_for_completion=False):
form_data = aiohttp.FormData()
form_data.add_field('url', url)
response = ClientResponse(
await self._get_session().post(self._full_url('/urls'), data=form_data))
analysis = await self._response_to_object(response)
if wait_for_completion:
analysis = await self._wait_for_analysis_completion(analysis)
return analysis
async def _wait_for_analysis_completion(self, analysis):
while True:
analysis = await self.get_object_async('/analyses/{}', analysis.id)
if analysis.status == 'completed':
break
await asyncio.sleep(20)
return analysis
| true
| true
|
1c43c0fdce2f9815a2113123cb5ee97352dd4dee
| 1,064
|
py
|
Python
|
leaderboard/models.py
|
Fredrik3B/kultspill_backend
|
8aad6431f36dad46ef06f4da40f2bc63c6185dd2
|
[
"MIT"
] | 1
|
2021-03-11T13:24:55.000Z
|
2021-03-11T13:24:55.000Z
|
leaderboard/models.py
|
Fredrik3B/kultspill_backend
|
8aad6431f36dad46ef06f4da40f2bc63c6185dd2
|
[
"MIT"
] | null | null | null |
leaderboard/models.py
|
Fredrik3B/kultspill_backend
|
8aad6431f36dad46ef06f4da40f2bc63c6185dd2
|
[
"MIT"
] | null | null | null |
from django.db import models
from django.contrib.auth.models import User
from django.db.models.fields import PositiveIntegerField
# Create your models here.
class Player(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE)
playername = models.CharField(max_length=30)
highscore_arcade = models.PositiveIntegerField(default=0)
car = models.PositiveSmallIntegerField(default=0)
coins = models.PositiveIntegerField(default=0)
def __str__(self):
return self.playername
class Highscore(models.Model):
player = models.ForeignKey(Player, on_delete=models.SET_NULL, null=True)
score = PositiveIntegerField()
def __str__(self):
return str(self.player)
# return f"{str(self.player)} - {str(self.score)}"
class Leaderboard(models.Model):
leaderboard_name = models.CharField(max_length=100)
nr_of_players = models.PositiveIntegerField(default=0)
highscores = models.ManyToManyField(Highscore, blank=True)
def __str__(self):
return self.leaderboard_name
| 31.294118
| 76
| 0.737782
|
from django.db import models
from django.contrib.auth.models import User
from django.db.models.fields import PositiveIntegerField
class Player(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE)
playername = models.CharField(max_length=30)
highscore_arcade = models.PositiveIntegerField(default=0)
car = models.PositiveSmallIntegerField(default=0)
coins = models.PositiveIntegerField(default=0)
def __str__(self):
return self.playername
class Highscore(models.Model):
player = models.ForeignKey(Player, on_delete=models.SET_NULL, null=True)
score = PositiveIntegerField()
def __str__(self):
return str(self.player)
class Leaderboard(models.Model):
leaderboard_name = models.CharField(max_length=100)
nr_of_players = models.PositiveIntegerField(default=0)
highscores = models.ManyToManyField(Highscore, blank=True)
def __str__(self):
return self.leaderboard_name
| true
| true
|
1c43c29935b123c3b372d55287a8e6ceb0dc1d18
| 1,596
|
py
|
Python
|
visitor_counter/utils/test_db.py
|
jcromerohdz/FlaskDev
|
29539259cba3a0e18c205fb439ee916fb12e5318
|
[
"MIT"
] | null | null | null |
visitor_counter/utils/test_db.py
|
jcromerohdz/FlaskDev
|
29539259cba3a0e18c205fb439ee916fb12e5318
|
[
"MIT"
] | null | null | null |
visitor_counter/utils/test_db.py
|
jcromerohdz/FlaskDev
|
29539259cba3a0e18c205fb439ee916fb12e5318
|
[
"MIT"
] | null | null | null |
import os
from flask_sqlalchemy import sqlalchemy
class TestDB:
def __init__(self):
self.db_name = os.environ['DATABASE_NAME'] + '_test'
self.db_host = os.environ['DB_HOST']
self.db_root_password = os.environ['POSTGRES_ROOT_PASSWORD']
if self.db_root_password:
self.db_username = 'postgres'
self.db_password = self.db_root_password
else:
self.db_username = os.environ['DB_USERNAME']
self.db_password = os.environ['DB_PASSWORD']
self.db_uri = 'postgresql://%s:%s@%s:5433' %(self.db_username, self.db_password, self.db_host)
def create_db(self):
# create the database if root user
if self.db_root_password:
engine = sqlalchemy.create_engine(self.db_uri)
conn = engine.connect()
conn.execute("COMMIT")
conn.execute("CREATE DATABASE "+ self.db_name)
conn.close()
return self.db_uri + '/' + self.db_name
def drop_db(self):
# drop the database if root user
engine = sqlalchemy.create_engine(self.db_uri)
conn = engine.connect()
conn.execute("COMMIT")
conn.execute("SELECT pg_terminate_backend(pg_stat_activity.pid) FROM pg_stat_activity WHERE datname = 'counter_test' AND pid <> pg_backend_pid()")
conn.close()
if self.db_root_password:
engine = sqlalchemy.create_engine(self.db_uri)
conn = engine.connect()
conn.execute("COMMIT")
conn.execute("DROP DATABASE " + self.db_name)
conn.close()
| 38.926829
| 154
| 0.624687
|
import os
from flask_sqlalchemy import sqlalchemy
class TestDB:
def __init__(self):
self.db_name = os.environ['DATABASE_NAME'] + '_test'
self.db_host = os.environ['DB_HOST']
self.db_root_password = os.environ['POSTGRES_ROOT_PASSWORD']
if self.db_root_password:
self.db_username = 'postgres'
self.db_password = self.db_root_password
else:
self.db_username = os.environ['DB_USERNAME']
self.db_password = os.environ['DB_PASSWORD']
self.db_uri = 'postgresql://%s:%s@%s:5433' %(self.db_username, self.db_password, self.db_host)
def create_db(self):
if self.db_root_password:
engine = sqlalchemy.create_engine(self.db_uri)
conn = engine.connect()
conn.execute("COMMIT")
conn.execute("CREATE DATABASE "+ self.db_name)
conn.close()
return self.db_uri + '/' + self.db_name
def drop_db(self):
engine = sqlalchemy.create_engine(self.db_uri)
conn = engine.connect()
conn.execute("COMMIT")
conn.execute("SELECT pg_terminate_backend(pg_stat_activity.pid) FROM pg_stat_activity WHERE datname = 'counter_test' AND pid <> pg_backend_pid()")
conn.close()
if self.db_root_password:
engine = sqlalchemy.create_engine(self.db_uri)
conn = engine.connect()
conn.execute("COMMIT")
conn.execute("DROP DATABASE " + self.db_name)
conn.close()
| true
| true
|
1c43c2a3742fc0d3c893cf6ca0a6a729e50cb27d
| 1,672
|
py
|
Python
|
src/programy/processors/pre/stemming.py
|
NeolithEra/program-y
|
8c2396611f30c8095e98ff02988223a641c1a3be
|
[
"MIT"
] | null | null | null |
src/programy/processors/pre/stemming.py
|
NeolithEra/program-y
|
8c2396611f30c8095e98ff02988223a641c1a3be
|
[
"MIT"
] | null | null | null |
src/programy/processors/pre/stemming.py
|
NeolithEra/program-y
|
8c2396611f30c8095e98ff02988223a641c1a3be
|
[
"MIT"
] | null | null | null |
"""
Copyright (c) 2016-2019 Keith Sterling http://www.keithsterling.com
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from programy.utils.logging.ylogger import YLogger
from programy.processors.processing import PreProcessor
from programy.nlp.stemming import Stemmer
class StemmingPreProcessor(PreProcessor):
def __init__(self):
PreProcessor.__init__(self)
def process(self, context, word_string):
YLogger.debug(context, "Stemming sentence...")
unstemmed_words = context.brain.tokenizer.texts_to_words(word_string)
stemmed_words = [Stemmer.stem(x) for x in unstemmed_words]
return context.brain.tokenizer.words_to_texts(stemmed_words)
| 47.771429
| 120
| 0.785885
|
from programy.utils.logging.ylogger import YLogger
from programy.processors.processing import PreProcessor
from programy.nlp.stemming import Stemmer
class StemmingPreProcessor(PreProcessor):
def __init__(self):
PreProcessor.__init__(self)
def process(self, context, word_string):
YLogger.debug(context, "Stemming sentence...")
unstemmed_words = context.brain.tokenizer.texts_to_words(word_string)
stemmed_words = [Stemmer.stem(x) for x in unstemmed_words]
return context.brain.tokenizer.words_to_texts(stemmed_words)
| true
| true
|
1c43c2a3c540a69b7c7955a52ca5fcfac255bb4a
| 488
|
py
|
Python
|
DCNN-Pytorch/shape_testing.py
|
linklab-uva/deepracing
|
fc25c47658277df029e7399d295d97a75fe85216
|
[
"Apache-2.0"
] | 11
|
2020-06-29T15:21:37.000Z
|
2021-04-12T00:42:26.000Z
|
DCNN-Pytorch/shape_testing.py
|
linklab-uva/deepracing
|
fc25c47658277df029e7399d295d97a75fe85216
|
[
"Apache-2.0"
] | null | null | null |
DCNN-Pytorch/shape_testing.py
|
linklab-uva/deepracing
|
fc25c47658277df029e7399d295d97a75fe85216
|
[
"Apache-2.0"
] | 4
|
2019-01-23T23:36:57.000Z
|
2021-07-02T00:18:37.000Z
|
import torch
import deepracing_models.nn_models.Models as M
import time
#net = M.AdmiralNetKinematicPredictor(use_3dconv=False, sequence_length=20, context_length=5)
net = M.AdmiralNetCurvePredictor(use_3dconv=True, context_length=5, params_per_dimension=6)
net = net.cuda(0)
im = torch.rand(64,5,3,66,200)
im = im.cuda(0)
net=net.eval()
print(net)
print("Running net")
tick = time.time()
out = net(im)
tock = time.time()
print(out.shape)
print("Got prediction in %f seconds"%(tock-tick))
| 30.5
| 93
| 0.764344
|
import torch
import deepracing_models.nn_models.Models as M
import time
net = M.AdmiralNetCurvePredictor(use_3dconv=True, context_length=5, params_per_dimension=6)
net = net.cuda(0)
im = torch.rand(64,5,3,66,200)
im = im.cuda(0)
net=net.eval()
print(net)
print("Running net")
tick = time.time()
out = net(im)
tock = time.time()
print(out.shape)
print("Got prediction in %f seconds"%(tock-tick))
| true
| true
|
1c43c356ac0bdcea6eceba09900f788aa2884b63
| 1,000
|
py
|
Python
|
isi_sdk_8_1_1/test/test_storagepool_settings_extended.py
|
mohitjain97/isilon_sdk_python
|
a371f438f542568edb8cda35e929e6b300b1177c
|
[
"Unlicense"
] | 24
|
2018-06-22T14:13:23.000Z
|
2022-03-23T01:21:26.000Z
|
isi_sdk_8_1_1/test/test_storagepool_settings_extended.py
|
mohitjain97/isilon_sdk_python
|
a371f438f542568edb8cda35e929e6b300b1177c
|
[
"Unlicense"
] | 46
|
2018-04-30T13:28:22.000Z
|
2022-03-21T21:11:07.000Z
|
isi_sdk_8_1_1/test/test_storagepool_settings_extended.py
|
mohitjain97/isilon_sdk_python
|
a371f438f542568edb8cda35e929e6b300b1177c
|
[
"Unlicense"
] | 29
|
2018-06-19T00:14:04.000Z
|
2022-02-08T17:51:19.000Z
|
# coding: utf-8
"""
Isilon SDK
Isilon SDK - Language bindings for the OneFS API # noqa: E501
OpenAPI spec version: 6
Contact: sdk@isilon.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import isi_sdk_8_1_1
from isi_sdk_8_1_1.models.storagepool_settings_extended import StoragepoolSettingsExtended # noqa: E501
from isi_sdk_8_1_1.rest import ApiException
class TestStoragepoolSettingsExtended(unittest.TestCase):
"""StoragepoolSettingsExtended unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testStoragepoolSettingsExtended(self):
"""Test StoragepoolSettingsExtended"""
# FIXME: construct object with mandatory attributes with example values
# model = isi_sdk_8_1_1.models.storagepool_settings_extended.StoragepoolSettingsExtended() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 24.390244
| 112
| 0.734
|
from __future__ import absolute_import
import unittest
import isi_sdk_8_1_1
from isi_sdk_8_1_1.models.storagepool_settings_extended import StoragepoolSettingsExtended
from isi_sdk_8_1_1.rest import ApiException
class TestStoragepoolSettingsExtended(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def testStoragepoolSettingsExtended(self):
s
if __name__ == '__main__':
unittest.main()
| true
| true
|
1c43c36c2ada9f5d833ee6c1c64f1c7f9dff279a
| 113
|
py
|
Python
|
dexy/reporters/nodegraph/__init__.py
|
dsoto/dexy
|
0f2090250040c3c54c8481a16de8e476b559e87c
|
[
"MIT"
] | 136
|
2015-01-06T15:04:47.000Z
|
2021-12-21T22:52:41.000Z
|
dexy/reporters/nodegraph/__init__.py
|
dsoto/dexy
|
0f2090250040c3c54c8481a16de8e476b559e87c
|
[
"MIT"
] | 13
|
2015-01-26T14:06:58.000Z
|
2020-03-27T21:16:10.000Z
|
dexy/reporters/nodegraph/__init__.py
|
dsoto/dexy
|
0f2090250040c3c54c8481a16de8e476b559e87c
|
[
"MIT"
] | 34
|
2015-01-02T16:24:53.000Z
|
2021-11-27T05:38:30.000Z
|
import dexy.reporters.nodegraph.d3
import dexy.reporters.nodegraph.text
import dexy.reporters.nodegraph.graphviz
| 28.25
| 40
| 0.867257
|
import dexy.reporters.nodegraph.d3
import dexy.reporters.nodegraph.text
import dexy.reporters.nodegraph.graphviz
| true
| true
|
1c43c4671697267b7379d98f325b8b8c320e8e61
| 3,078
|
py
|
Python
|
cv/models.py
|
ezraermy/mkcv
|
a75ec4144b313d1f92795da582d988634cd4ac7c
|
[
"MIT"
] | null | null | null |
cv/models.py
|
ezraermy/mkcv
|
a75ec4144b313d1f92795da582d988634cd4ac7c
|
[
"MIT"
] | null | null | null |
cv/models.py
|
ezraermy/mkcv
|
a75ec4144b313d1f92795da582d988634cd4ac7c
|
[
"MIT"
] | null | null | null |
from django.db import models
from phonenumber_field.modelfields import PhoneNumberField
# Create your models here.
class CVmaker(models.Model):
title = models.CharField(max_length=255, null=True)
def __str__(self):
return self.title
class Meta:
verbose_name_plural = 'CVmaker'
class Employee(models.Model):
Format = (
('Fancy', 'Fancy'),
('Casual', 'Casual'),
('Modern', 'Modern'),
('Classic', 'Classic'),
('Banking', 'Banking'),
('Neat', 'Neat'),
)
sex = (
('Male', 'Male'),
('Female', 'Female'),
)
CV_format = models.CharField(
max_length=100,
blank=False,
choices=Format,
help_text="Choose CV format in drop down list.")
name = models.CharField(max_length=200, blank=True)
date_of_birth = models.DateTimeField(null=True, default="yyyy-mm-dd" )
gender = models.CharField(max_length=20, blank=True, choices=sex)
Home_address = models.CharField(max_length=200, blank=True)
phone = PhoneNumberField()
email = models.EmailField(max_length=200, blank=True)
BSc = models.CharField(max_length=2000, blank=True, help_text="BSc title, University name.")
BSc_start_date = models.DateTimeField(blank=True, null = True )
BSc_end_date = models.DateTimeField(blank=True, null = True )
MSc = models.CharField(
max_length=2000,
blank=True,
help_text= "Skip if you don't have one.")
MSc_start_date = models.DateTimeField(blank=True, null = True )
MSc_end_date = models.DateTimeField(blank=True, null = True )
training = models.CharField(
max_length=2000,
blank=True,
help_text="Skip if you don't have one.")
training_start_date = models.DateTimeField(blank=True, null = True )
training_end_date = models.DateTimeField(blank=True, null = True )
work_experience = models.CharField(
max_length=2000,
blank=True,
help_text="Skip if you don't have one.")
organization = models.CharField(max_length=200, blank=True)
work_exp_start_date = models.DateTimeField(blank=True, null = True)
work_exp_end_date = models.DateTimeField(blank=True, null = True )
computer_skills = models.CharField(
max_length=500,
blank = True,
help_text="List all skills from higher to lower.")
other_skills = models.CharField(
max_length=1000,
blank = True,
help_text="Your personal qualities other than proffesional skills?")
references = models.CharField(
max_length=2000,
blank = True,
help_text="Name email address and phone.")
photo = models.FileField(blank=True, help_text="Recomended but not mandatory.")
cvmaker = models.ManyToManyField(
CVmaker,
help_text = "By selecting RATIFY I hereby declare that the information provided is true and correct.",
blank = False
)
def __str__(self):
return self.name
class Meta:
verbose_name_plural = 'Employee'
| 34.58427
| 110
| 0.649773
|
from django.db import models
from phonenumber_field.modelfields import PhoneNumberField
class CVmaker(models.Model):
title = models.CharField(max_length=255, null=True)
def __str__(self):
return self.title
class Meta:
verbose_name_plural = 'CVmaker'
class Employee(models.Model):
Format = (
('Fancy', 'Fancy'),
('Casual', 'Casual'),
('Modern', 'Modern'),
('Classic', 'Classic'),
('Banking', 'Banking'),
('Neat', 'Neat'),
)
sex = (
('Male', 'Male'),
('Female', 'Female'),
)
CV_format = models.CharField(
max_length=100,
blank=False,
choices=Format,
help_text="Choose CV format in drop down list.")
name = models.CharField(max_length=200, blank=True)
date_of_birth = models.DateTimeField(null=True, default="yyyy-mm-dd" )
gender = models.CharField(max_length=20, blank=True, choices=sex)
Home_address = models.CharField(max_length=200, blank=True)
phone = PhoneNumberField()
email = models.EmailField(max_length=200, blank=True)
BSc = models.CharField(max_length=2000, blank=True, help_text="BSc title, University name.")
BSc_start_date = models.DateTimeField(blank=True, null = True )
BSc_end_date = models.DateTimeField(blank=True, null = True )
MSc = models.CharField(
max_length=2000,
blank=True,
help_text= "Skip if you don't have one.")
MSc_start_date = models.DateTimeField(blank=True, null = True )
MSc_end_date = models.DateTimeField(blank=True, null = True )
training = models.CharField(
max_length=2000,
blank=True,
help_text="Skip if you don't have one.")
training_start_date = models.DateTimeField(blank=True, null = True )
training_end_date = models.DateTimeField(blank=True, null = True )
work_experience = models.CharField(
max_length=2000,
blank=True,
help_text="Skip if you don't have one.")
organization = models.CharField(max_length=200, blank=True)
work_exp_start_date = models.DateTimeField(blank=True, null = True)
work_exp_end_date = models.DateTimeField(blank=True, null = True )
computer_skills = models.CharField(
max_length=500,
blank = True,
help_text="List all skills from higher to lower.")
other_skills = models.CharField(
max_length=1000,
blank = True,
help_text="Your personal qualities other than proffesional skills?")
references = models.CharField(
max_length=2000,
blank = True,
help_text="Name email address and phone.")
photo = models.FileField(blank=True, help_text="Recomended but not mandatory.")
cvmaker = models.ManyToManyField(
CVmaker,
help_text = "By selecting RATIFY I hereby declare that the information provided is true and correct.",
blank = False
)
def __str__(self):
return self.name
class Meta:
verbose_name_plural = 'Employee'
| true
| true
|
1c43c476a29fee110ef9fc498803191a11545755
| 35,260
|
py
|
Python
|
meta_dataset/learners/experimental/optimization_learners.py
|
shikanggao/meta-dataset
|
7b1e99009516eda3bbd5e740e178ebc37e2d6767
|
[
"Apache-2.0"
] | null | null | null |
meta_dataset/learners/experimental/optimization_learners.py
|
shikanggao/meta-dataset
|
7b1e99009516eda3bbd5e740e178ebc37e2d6767
|
[
"Apache-2.0"
] | null | null | null |
meta_dataset/learners/experimental/optimization_learners.py
|
shikanggao/meta-dataset
|
7b1e99009516eda3bbd5e740e178ebc37e2d6767
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# Copyright 2021 The Meta-Dataset Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Optimization-based learners."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import itertools
import gin.tf
from meta_dataset.learners.experimental import base as learner_base
from meta_dataset.models.experimental import reparameterizable_backbones
from meta_dataset.models.experimental import reparameterizable_base
from meta_dataset.models.experimental import reparameterizable_distributions
from six.moves import zip
import tensorflow as tf
@gin.configurable
def sgd(learning_rate):
"""Construct optimizer triple for stochastic gradient descent (SGD).
Inspired by the optimizer definitions in JAX
(https://github.com/google/jax/blob/main/jax/experimental/optimizers.py),
this implementation of SGD is fully functional (i.e., it maintains no hidden
state) and so is compatible for use with an optimization-based meta-learner.
Args:
learning_rate: A positive scalar.
Returns:
An (init, update, get_params) function triple.
"""
def init(x0):
return x0
def update(i, grad, state):
del i
x = state
return x - learning_rate * grad
def get_params(state):
x = state
return x
return init, update, get_params
@gin.configurable
def adam(learning_rate, b1=0.9, b2=0.999, eps=1e-8):
"""Construct optimizer triple for Adam.
Inspired by the optimizer definitions in JAX
(https://github.com/google/jax/blob/main/jax/experimental/optimizers.py),
this implementation of Adam is fully functional (i.e., it maintains no hidden
state) and so is compatible for use with an optimization-based meta-learner.
Args:
learning_rate: A positive scalar.
b1: optional, a positive scalar value for beta_1, the exponential decay rate
for the first moment estimates (default 0.9).
b2: optional, a positive scalar value for beta_2, the exponential decay rate
for the second moment estimates (default 0.999).
eps: optional, a positive scalar value for epsilon, a small constant for
numerical stability (default 1e-8).
Returns:
An (init, update, get_params) function triple.
"""
def init(x0):
m0 = tf.zeros_like(x0)
v0 = tf.zeros_like(x0)
return x0, m0, v0
def update(i, grad, state):
i = tf.cast(i, dtype=tf.float32)
x, m, v = state
m = (1. - b1) * grad + b1 * m # First moment estimate.
v = (1. - b2) * (grad**2.) + b2 * v # Second moment estimate.
mhat = m / (1. - b1**(i + 1.)) # Bias correction.
vhat = v / (1. - b2**(i + 1.))
x = x - learning_rate * mhat / (tf.sqrt(vhat) + eps)
return x, m, v
def get_params(state):
x, _, _ = state
return x
return init, update, get_params
def optimizer_update(iterate_collection, iteration_idx, objective_fn, update_fn,
get_params_fn, first_order, clip_grad_norm):
"""Returns the next iterate in the optimization of objective_fn wrt variables.
Args:
iterate_collection: A (potentially structured) container of tf.Tensors
corresponding to the state of the current iterate.
iteration_idx: An int Tensor; the iteration number.
objective_fn: Callable that takes in variables and produces the value of the
objective function.
update_fn: Callable that takes in the gradient of the objective function and
the current iterate and produces the next iterate.
get_params_fn: Callable that takes in the gradient of the objective function
and the current iterate and produces the next iterate.
first_order: If True, prevent the computation of higher order gradients.
clip_grad_norm: If not None, gradient dimensions are independently clipped
to lie in the interval [-clip_grad_norm, clip_grad_norm].
"""
variables = [get_params_fn(iterate) for iterate in iterate_collection]
if tf.executing_eagerly():
with tf.GradientTape(persistent=True) as g:
g.watch(variables)
loss = objective_fn(variables, iteration_idx)
grads = g.gradient(loss, variables)
else:
loss = objective_fn(variables, iteration_idx)
grads = tf.gradients(ys=loss, xs=variables)
if clip_grad_norm:
grads = [
tf.clip_by_value(grad, -1 * clip_grad_norm, clip_grad_norm)
for grad in grads
]
if first_order:
grads = [tf.stop_gradient(dv) for dv in grads]
return [
update_fn(i=iteration_idx, grad=dv, state=s)
for (s, dv) in zip(iterate_collection, grads)
]
def em_loop(
num_updates,
e_step,
m_step,
variables,
):
"""Expectation-maximization of objective_fn wrt variables for num_updates."""
def _body(step, preupdate_vars):
train_predictions_, responsibilities_ = e_step(preupdate_vars)
updated_vars = m_step(preupdate_vars, train_predictions_, responsibilities_)
return step + 1, updated_vars
def _cond(step, *args):
del args
return step < num_updates
step = tf.Variable(0, trainable=False, name='inner_step_counter')
loop_vars = (step, variables)
step, updated_vars = tf.while_loop(
cond=_cond, body=_body, loop_vars=loop_vars, swap_memory=True)
return updated_vars
@gin.configurable
def optimizer_loop(
num_updates,
objective_fn,
update_fn,
variables,
first_order,
clip_grad_norm,
):
"""Optimization of `objective_fn` for `num_updates` of `variables`."""
# Optimizer specifics.
init, update, get_params = update_fn()
def _body(step, preupdate_vars):
"""Optimization loop body."""
updated_vars = optimizer_update(
iterate_collection=preupdate_vars,
iteration_idx=step,
objective_fn=objective_fn,
update_fn=update,
get_params_fn=get_params,
first_order=first_order,
clip_grad_norm=clip_grad_norm,
)
return step + 1, updated_vars
def _cond(step, *args):
"""Optimization truncation condition."""
del args
return step < num_updates
step = tf.Variable(0, trainable=False, name='inner_step_counter')
loop_vars = (step, [init(var) for var in variables])
step, updated_vars = tf.while_loop(
cond=_cond, body=_body, loop_vars=loop_vars, swap_memory=True)
return [get_params(v) for v in updated_vars]
ForwardPass = collections.namedtuple('ForwardPass', (
'embeddings',
'predictions',
'inner_objective_value',
'outer_objective_value',
'accuracy',
))
Adaptation = collections.namedtuple('Adaptation', (
'pre_adaptation_support_results',
'post_adaptation_support_results',
'pre_adaptation_query_results',
'post_adaptation_query_results',
'objective_fn',
'support_module_objective_fn',
'query_module_objective_fn',
'forward_pass_fn',
'init_loop_variables_mapping',
'final_loop_variables_mapping',
))
@gin.configurable
class ExperimentalOptimizationLearner(learner_base.ExperimentalEpisodicLearner):
"""An optimization-based learner."""
def __init__(self, adapt_embedding_predicate, num_update_steps,
additional_evaluation_update_steps, first_order,
adapt_batch_norm, clip_grad_norm, update_fn, **kwargs):
"""Initializes a `ExperimentalOptimizationLearner` instance.
Args:
adapt_embedding_predicate: A callable that returns True for `tf.Variable`
attributes of the embedding function should be adapted for each task.
num_update_steps: The number of inner loop optimization steps to take.
additional_evaluation_update_steps: The number of additional inner loop
optimization steps to take during evaluation (on the meta-test and
meta-validation sets).
first_order: If True, prevent the computation of higher order gradients.
adapt_batch_norm: If True, adapt the scale and offset parameteres of batch
normalization layers in the inner loop of optimization.
clip_grad_norm: If not None, gradient dimensions are independently clipped
to lie in the interval [-clip_grad_norm, clip_grad_norm] before being
processed by the `update_fn`.
update_fn: A Callable that takes in a learning rate and produces a
function triple defining an iterative optimization process; see `sgd`
and `adam` for examples.
**kwargs: Keyword arguments common to all `ExperimentalEpisodicLearner`s.
"""
self.adapt_embedding_predicate = adapt_embedding_predicate
self.num_update_steps = num_update_steps
self.additional_evaluation_update_steps = additional_evaluation_update_steps
self.adapt_batch_norm = adapt_batch_norm
self.first_order = first_order
self.clip_grad_norm = clip_grad_norm
self.update_fn = update_fn
super(ExperimentalOptimizationLearner, self).__init__(**kwargs)
assert isinstance(self.embedding_fn,
reparameterizable_base.ReparameterizableModule)
def compute_loss(self, onehot_labels, predictions):
"""Computes the loss on the query set of a given episode."""
return (self.outer_objective(
onehot_labels=onehot_labels, predictions=predictions))
@property
def trainable_variables(self):
"""Returns a tuple of variables to update in the outer optimization loop."""
raise NotImplementedError
@property
def task_parameters(self):
"""Returns a tuple of variables to update in the inner optimization loop."""
raise NotImplementedError
def episodic_init_ops(self, labels, embeddings, task_parameters):
raise NotImplementedError
def inner_loop_prediction(self, embeddings):
raise NotImplementedError
def inner_objective(self, onehot_labels, predictions, iteration_idx):
raise NotImplementedError
def outer_loop_prediction(self, embeddings):
raise NotImplementedError
def outer_objective(self, onehot_labels, predictions):
raise NotImplementedError
def forward_pass(self, data):
"""Wrapper around `detailed_forward_pass` to return query set predictions.
Args:
data: A `meta_dataset.providers.Episode` containing the data for the
episode.
Returns:
A Tensor of the predictions on the query set.
"""
forward_pass_result = self.detailed_forward_pass(data)
post_adaptation_query_results = (
forward_pass_result.post_adaptation_query_results)
return post_adaptation_query_results.predictions
def detailed_forward_pass(self, data):
"""Returns all information from a forward pass of the `OptimizationLearner`.
Args:
data: A `meta_dataset.providers.Episode` containing the data for the
episode.
Returns:
A `collections.NamedTuple` that contains the results of the forward pass.
"""
# Loop initialization.
init_loop_variables = self.task_parameters
init_loop_variable_refs = [
v.experimental_ref() for v in init_loop_variables
]
# Construct ops for data-dependent episodic initialization.
episodic_init_ops = self.episodic_init_ops(
labels=data.support_labels,
embeddings=self.embedding_fn(data.support_images, training=True),
task_parameters=init_loop_variables,
)
def _forward_pass(iteration_idx_, variables_mapping_, images_,
onehot_labels_):
"""Helper function to compute the outputs of a forward pass."""
with self.embedding_fn.reparameterize(variables_mapping_):
# TODO(eringrant): Implement non-transductive batch normalization (i.e.,
# pass the support set statistics through the query set forward pass.
embeddings_ = self.embedding_fn(images_, training=True)
# TODO(eringrant): `head_fn` is an attribute of the subclass.
with self.head_fn.reparameterize(variables_mapping_):
predictions_ = self.head_fn(embeddings_)[:, :data.way]
accuracy_ = tf.reduce_mean(
input_tensor=self.compute_accuracy(
onehot_labels=onehot_labels_, predictions=predictions_))
inner_objective_ = self.inner_objective(
onehot_labels=onehot_labels_,
predictions=predictions_,
iteration_idx=iteration_idx_)
outer_objective_ = self.outer_objective(
onehot_labels=onehot_labels_,
predictions=predictions_,
)
return ForwardPass(
embeddings=embeddings_,
predictions=predictions_,
inner_objective_value=inner_objective_,
outer_objective_value=outer_objective_,
accuracy=accuracy_,
)
def _objective_fn(loop_variables_, iteration_idx_):
"""Evaluate the support set objective given `loop_variables_`."""
# Get attribute paths for the loop_variables.
loop_variables_mapping_ = dict(
zip(init_loop_variable_refs, loop_variables_))
adaptation_support_results = _forward_pass(
iteration_idx_=iteration_idx_,
variables_mapping_=loop_variables_mapping_,
images_=data.support_images,
onehot_labels_=data.onehot_support_labels)
return adaptation_support_results.inner_objective_value
def _e_step(loop_variables_):
"""Evaluate expectations given `loop_variables_`."""
# Get attribute paths for the loop_variables.
loop_variables_dict_ = dict(zip(init_loop_variable_refs, loop_variables_))
with self.embedding_fn.reparameterize(loop_variables_dict_):
# TODO(eringrant): training to True for normalization with batch stats.
# Figure out the appropriate way to pass this around.
train_embeddings_ = self.embedding_fn(data.train_images, training=True)
class_embeddings_ = learner_base.class_specific_data(
data.onehot_train_labels, train_embeddings_, self.logit_dim)
def _compute_responsibilities(examples_, class_idx):
train_predictions_ = tf.squeeze(
self.head_fn(
embeddings=examples_, components=True, class_idx=[class_idx]),
axis=1)
return tf.nn.softmax(train_predictions_, axis=-1)
with self.head_fn.reparameterize(loop_variables_dict_):
class_responsibilities_ = [
_compute_responsibilities(embeddings_, class_idx=i)
for i, embeddings_ in enumerate(class_embeddings_)
]
return class_embeddings_, class_responsibilities_
def _m_step(preupdate_vars, all_embeddings_, all_responsibilities_):
"""Compute parameter estimates given `loop_variables_`."""
means, log_scales, logits = zip(*map(
reparameterizable_distributions.fit_gaussian_mixture, all_embeddings_,
all_responsibilities_, itertools.repeat(self.head_fn.damping)))
def flatten(x):
return list(itertools.chain.from_iterable(x))
means = flatten(means)
log_scales = flatten(log_scales)
logits = flatten(logits)
if not self.head_fn.estimate_loc:
means = [None for _ in means]
if not self.head_fn.estimate_scale:
log_scales = [None for _ in log_scales]
if not self.head_fn.estimate_logits:
logits = [None for _ in logits]
updated_vars = means + log_scales + logits
# Replace constant variables.
# TODO(eringrant): This interface differs from just excluding these
# variables from `task_variables`.
no_none_updated_vars = []
for preupdate_var, updated_var in zip(preupdate_vars, updated_vars):
if updated_var is None:
no_none_updated_vars.append(preupdate_var)
else:
no_none_updated_vars.append(updated_var)
# TODO(eringrant): This assumes an ordering of mean, log_scales,
# mixing_logits.
return no_none_updated_vars
# Loop body.
with tf.control_dependencies(episodic_init_ops):
# Inner loop of expectation maximization.
num_em_steps = getattr(self, 'num_em_steps', 0)
if num_em_steps > 0:
loop_variables = em_loop(
num_updates=self.num_em_steps,
e_step=_e_step,
m_step=_m_step,
variables=loop_variables)
# Inner loop of gradient-based optimization.
num_optimizer_steps = (
self.num_update_steps + (self.additional_evaluation_update_steps
if not self.is_training else 0))
if num_optimizer_steps > 0:
# pylint: disable=no-value-for-parameter
final_loop_variables = optimizer_loop(
num_updates=num_optimizer_steps,
objective_fn=_objective_fn,
update_fn=self.update_fn,
variables=init_loop_variables,
first_order=self.first_order,
clip_grad_norm=self.clip_grad_norm,
)
# pylint: enable=no-value-for-parameter
# If no inner loop adaptation is performed, ensure the episodic
# initialization is still part of the graph via a control dependency.
if num_optimizer_steps + num_em_steps == 0:
loop_variables = [tf.identity(v) for v in init_loop_variables]
# Get variable references to use when remapping the loop_variables.
init_loop_variables_mapping = dict(
zip(init_loop_variable_refs, init_loop_variables))
final_loop_variables_mapping = dict(
zip(init_loop_variable_refs, final_loop_variables))
# Collect statistics about the inner optimization.
with tf.compat.v1.name_scope('pre-adaptation'):
with tf.compat.v1.name_scope('support'):
pre_adaptation_support_results = _forward_pass(
iteration_idx_=0,
variables_mapping_=init_loop_variables_mapping,
images_=data.support_images,
onehot_labels_=data.onehot_support_labels)
with tf.compat.v1.name_scope('query'):
pre_adaptation_query_results = _forward_pass(
iteration_idx_=0,
variables_mapping_=init_loop_variables_mapping,
images_=data.query_images,
onehot_labels_=data.onehot_query_labels)
with tf.compat.v1.name_scope('post-adaptation'):
with tf.compat.v1.name_scope('support'):
post_adaptation_support_results = _forward_pass(
iteration_idx_=num_optimizer_steps,
variables_mapping_=final_loop_variables_mapping,
images_=data.support_images,
onehot_labels_=data.onehot_support_labels,
)
with tf.compat.v1.name_scope('query'):
post_adaptation_query_results = _forward_pass(
iteration_idx_=num_optimizer_steps,
variables_mapping_=final_loop_variables_mapping,
images_=data.query_images,
onehot_labels_=data.onehot_query_labels,
)
def _support_module_objective_fn(module_variables_, module_variable_refs_):
"""Evaluate the query set objective given `module_variables_`."""
# Use the values of the parameters at convergence as the default value.
variables_mapping_ = final_loop_variables_mapping.copy()
# Loop over and replace the module-specific variables.
for module_variable_ref, module_variable in zip(module_variable_refs_,
module_variables_):
variables_mapping_[module_variable_ref] = module_variable
adaptation_query_results = _forward_pass(
iteration_idx_=num_optimizer_steps,
variables_mapping_=variables_mapping_,
images_=data.support_images,
onehot_labels_=data.onehot_support_labels,
)
return adaptation_query_results.inner_objective_value
def _query_module_objective_fn(module_variables_, module_variable_refs_):
"""Evaluate the query set objective given `module_variables_`."""
# Use the values of the parameters at convergence as the default value.
variables_mapping_ = final_loop_variables_mapping.copy()
# Loop over and replace the module-specific variables.
for module_variable_ref, module_variable in zip(module_variable_refs_,
module_variables_):
variables_mapping_[module_variable_ref] = module_variable
adaptation_query_results = _forward_pass(
iteration_idx_=num_optimizer_steps,
variables_mapping_=variables_mapping_,
images_=data.query_images,
onehot_labels_=data.onehot_query_labels)
return adaptation_query_results.inner_objective_value
return Adaptation(
pre_adaptation_support_results=pre_adaptation_support_results,
post_adaptation_support_results=post_adaptation_support_results,
pre_adaptation_query_results=pre_adaptation_query_results,
post_adaptation_query_results=post_adaptation_query_results,
objective_fn=_objective_fn,
support_module_objective_fn=_support_module_objective_fn,
query_module_objective_fn=_query_module_objective_fn,
forward_pass_fn=_forward_pass,
init_loop_variables_mapping=init_loop_variables_mapping,
final_loop_variables_mapping=final_loop_variables_mapping,
)
@gin.configurable
class HeadAndBackboneLearner(ExperimentalOptimizationLearner):
"""A head-and-backbone learner."""
def __init__(self,
head_cls,
adapt_head_predicate,
episodic_head_init_fn=None,
**kwargs):
"""Initializes a `HeadAndBackboneLearner` instance.
Args:
head_cls: A subclass of `ReparameterizableModule` used to instantiate the
head function.
adapt_head_predicate: A callable that returns True for `tf.Variable`
attributes of the head function should be adapted for each task.
episodic_head_init_fn: A callable that takes in a tuple of one-hot labels,
embeddings and head classifier weights, and produces intialization
operations to be executed at the start of each episode. If None, no
episodic initialization is performed.
**kwargs: Keyword arguments common to all
`ExperimentalOptimizationLearner`s.
"""
super(HeadAndBackboneLearner, self).__init__(**kwargs)
assert issubclass(head_cls, reparameterizable_base.ReparameterizableModule)
self.adapt_head_predicate = adapt_head_predicate
self.head_fn = head_cls(output_dim=self.logit_dim)
def no_op_initialization(onehot_labels, embeddings, *vbls):
del onehot_labels
del embeddings
del vbls
return [tf.no_op()]
self.episodic_head_init_fn = episodic_head_init_fn or no_op_initialization
def compute_regularizer(self, onehot_labels, predictions):
"""Computes a regularizer, maybe using `predictions` and `onehot_labels`."""
del onehot_labels
del predictions
return (tf.reduce_sum(input_tensor=self.embedding_fn.losses) +
tf.reduce_sum(input_tensor=self.head_fn.losses))
def build(self):
"""Instantiate the parameters belonging to this `HeadAndBackboneLearner`."""
super(HeadAndBackboneLearner, self).build()
if not self.head_fn.built:
self.head_fn.build(self.embedding_shape)
self.output_shape = self.head_fn.compute_output_shape(self.embedding_shape)
def episodic_init_ops(self, labels, embeddings, task_parameters):
"""Return operations for episodic initalization of `task_parameters`."""
# Isolate the head parameters.
head_parameters = task_parameters[len(list(self.backbone_parameters)):]
assert len(head_parameters) == len(list(self.head_parameters))
return self.episodic_head_init_fn(labels, embeddings, *head_parameters)
def inner_objective(self, onehot_labels, predictions, iteration_idx):
"""Alias for softmax cross entropy loss."""
cce = tf.keras.losses.CategoricalCrossentropy()
return cce(onehot_labels, predictions)
def outer_objective(self, onehot_labels, predictions):
"""Alias for softmax cross entropy loss."""
cce = tf.keras.losses.CategoricalCrossentropy()
regularization = self.compute_regularizer(
onehot_labels=onehot_labels, predictions=predictions)
return cce(onehot_labels, predictions) + regularization
@property
def variables(self):
"""Returns a tuple of this Learner's variables."""
if not self._built:
raise learner_base.NotBuiltError
return self.embedding_fn.variables + self.head_fn.variables
@property
def trainable_variables(self):
"""Returns a tuple of this Learner's trainable variables."""
if not self._built:
raise learner_base.NotBuiltError
return (self.embedding_fn.trainable_variables +
self.head_fn.trainable_variables)
@property
def task_parameters(self):
"""Returns a tuple of the variables to be adapted for each task."""
if not self._built:
raise learner_base.NotBuiltError
return list(itertools.chain(self.backbone_parameters, self.head_parameters))
@property
def backbone_parameters(self):
return list(
self.embedding_fn.reparameterizables(self.adapt_embedding_predicate))
@property
def head_parameters(self):
return list(self.head_fn.reparameterizables(self.adapt_head_predicate))
@gin.configurable(allowlist=['prototype_multiplier'])
def proto_maml_fc_layer_init_fn(labels, embeddings, weights, biases,
prototype_multiplier):
"""Return a list of operations for reparameterized ProtoNet initialization."""
# This is robust to classes missing from the training set, but assumes that
# the last class is present.
num_ways = tf.cast(
tf.math.reduce_max(input_tensor=tf.unique(labels)[0]) + 1, tf.int32)
# When there are no examples for a given class, we default its prototype to
# zeros, per the implementation of `tf.math.unsorted_segment_mean`.
prototypes = tf.math.unsorted_segment_mean(embeddings, labels, num_ways)
# Scale the prototypes, which acts as a regularizer on the weights and biases.
prototypes *= prototype_multiplier
# logit = -<squared Euclidian distance to prototype>
# = -(x - p)^T.(x - p)
# = 2 x^T.p - p^T.p - x^T.x
# = x^T.w + b
# where w = 2p, b = -p^T.p
output_weights = tf.transpose(a=2 * prototypes)
output_biases = -tf.reduce_sum(input_tensor=prototypes * prototypes, axis=1)
# We zero-pad to align with the original weights and biases.
output_weights = tf.pad(
tensor=output_weights,
paddings=[[
0, 0
], [0, tf.shape(input=weights)[1] - tf.shape(input=output_weights)[1]]],
mode='CONSTANT',
constant_values=0)
output_biases = tf.pad(
tensor=output_biases,
paddings=[[
0, tf.shape(input=biases)[0] - tf.shape(input=output_biases)[0]
]],
mode='CONSTANT',
constant_values=0)
return [
weights.assign(output_weights),
biases.assign(output_biases),
]
def zero_init_fn(labels, embeddings, *vbls):
"""Return a list of operations for initialization at zero."""
del labels
del embeddings
return [vbl.assign(tf.zeros_like(vbl)) for vbl in vbls]
@gin.configurable
class MAML(HeadAndBackboneLearner):
"""A 'model-agnostic' meta-learner."""
def __init__(self, proto_maml_fc_layer_init, zero_fc_layer_init, **kwargs):
"""Initializes a MAML instance.
Args:
proto_maml_fc_layer_init: Whether to use `PrototypicalNetwork`-equivalent
fc layer initialization.
zero_fc_layer_init: Whether to initialize the parameters of the output
layer to zero.
**kwargs: Keyword arguments common to all `HeadAndBackboneLearner`s.
Raises:
ValueError: If both `proto_maml_fc_layer_init` and `zero_fc_layer_init`
are `True`.
"""
if proto_maml_fc_layer_init and zero_fc_layer_init:
raise ValueError('Conflicting initialization options for `MAML`.')
super(MAML, self).__init__(
episodic_head_init_fn=(proto_maml_fc_layer_init_fn
if proto_maml_fc_layer_init else
zero_init_fn if zero_fc_layer_init else None),
adapt_embedding_predicate=reparameterizable_base.is_trainable_variable,
adapt_head_predicate=reparameterizable_base.is_trainable_variable,
head_cls=reparameterizable_backbones.LinearModel,
**kwargs)
@gin.configurable
class ANIL(HeadAndBackboneLearner):
"""An 'almost-no-inner-loop' learner."""
def __init__(self, proto_maml_fc_layer_init, zero_fc_layer_init, **kwargs):
"""Initializes an ANIL instance.
Args:
proto_maml_fc_layer_init: Whether to use `PrototypicalNetwork`-equivalent
fc layer initialization.
zero_fc_layer_init: Whether to initialize the parameters of the output
layer to zero.
**kwargs: Keyword arguments common to all `HeadAndBackboneLearner`s.
Raises:
ValueError: If both `proto_maml_fc_layer_init` and `zero_fc_layer_init`
are `True`.
"""
if proto_maml_fc_layer_init and zero_fc_layer_init:
raise ValueError('Conflicting initialization options for `ANIL`.')
super(ANIL, self).__init__(
episodic_head_init_fn=(proto_maml_fc_layer_init_fn
if proto_maml_fc_layer_init else
zero_init_fn if zero_fc_layer_init else None),
adapt_embedding_predicate=lambda x: False,
adapt_head_predicate=reparameterizable_base.is_trainable_variable,
head_cls=reparameterizable_backbones.LinearModel,
**kwargs)
@gin.configurable
def generative_then_discriminative_schedule(proportion_generative, num_updates):
num_generative_updates = int(proportion_generative * num_updates)
num_discriminative_updates = num_updates - num_generative_updates
return [0.0] * num_generative_updates + [1.0] * num_discriminative_updates
@gin.configurable
class GenerativeClassifier(HeadAndBackboneLearner):
"""A generative classifier."""
def __init__(self, generative_scaling, interpolation_schedule, **kwargs):
"""Initializes a GenerativeClassifier instance.
Args:
generative_scaling:
interpolation_schedule: A callable that produces a sequence of
coefficients used to interpolate between the generative and
discriminative objectives. additional_evaluation_update_steps] array of
coefficients used to interpolate between the generative and
discriminative objectives.
**kwargs: Keyword arguments common to all `HeadAndBackboneLearner`s.
"""
super(GenerativeClassifier, self).__init__(
adapt_embedding_predicate=lambda x: False,
adapt_head_predicate=reparameterizable_base.is_trainable_variable,
**kwargs)
assert isinstance(
self.head_fn,
reparameterizable_distributions.ReparameterizableClassMixture)
self.generative_scaling = generative_scaling
self.gen_disc_interpolation = (
interpolation_schedule(num_updates=self.num_update_steps) +
[1.0] * self.additional_evaluation_update_steps
) # Assume discriminative.
assert all(coef >= 0 for coef in self.gen_disc_interpolation), (
'Interpolation coefficient should be nonnegative.')
# Validate interpolation coefficient.
# TODO(eringrant): generalize to other models admitting EM.
if isinstance(self.head_fn,
reparameterizable_distributions.GaussianMixture):
# Override the usual generative training to perform EM.
try:
num_em_steps = self.gen_disc_interpolation.index(1.0)
except ValueError:
# All steps are EM.
num_em_steps = self.num_update_steps
assert (
all(coef == 0.0
for coef in self.gen_disc_interpolation[:num_em_steps]) and
all(coef == 1.0
for coef in self.gen_disc_interpolation[num_em_steps:])
), ('Each step must be fully discriminative or generative when using EM.')
self.num_em_steps = num_em_steps
self.num_update_steps -= num_em_steps
@property
def task_parameters(self):
return self.head_fn.task_parameters
def joint_log_likelihood(self, onehot_labels, log_probs):
"""Compute p(z, y)."""
labels = tf.cast(
tf.reduce_sum(input_tensor=onehot_labels, axis=0), dtype=tf.float32)
class_log_probs = tf.math.log(labels / tf.reduce_sum(input_tensor=labels))
return log_probs + tf.expand_dims(class_log_probs, 0)
def inner_objective(self, onehot_labels, predictions, iteration_idx):
"""Compute the inner-loop objective."""
# p(z, y), joint log-likelihood.
joint_log_probs = self.joint_log_likelihood(onehot_labels, predictions)
labels = tf.expand_dims(tf.argmax(input=onehot_labels, axis=-1), axis=-1)
numerator = tf.gather(joint_log_probs, labels, axis=-1, batch_dims=1)
# p(z), normalization constant.
evidence = tf.reduce_logsumexp(
input_tensor=joint_log_probs, axis=-1, keepdims=True)
# p(y | z) if interpolation coefficient > 0 else p(z, y).
# TODO(eringrant): This assumes that `interp` is either 1 or 0.
# Adapt to a hybridized approach.
interp = tf.gather(self.gen_disc_interpolation, iteration_idx)
scale = tf.cond(
pred=interp > 0.0,
true_fn=lambda: 1.0,
false_fn=lambda: self.generative_scaling)
return -scale * tf.reduce_mean(
input_tensor=numerator - interp * evidence, axis=0)
def outer_objective(self, onehot_labels, predictions):
"""Compute the outer-loop objective."""
joint_log_probs = self.joint_log_likelihood(onehot_labels, predictions)
cce = tf.keras.losses.CategoricalCrossentropy()
regularization = self.compute_regularizer(
onehot_labels=onehot_labels, predictions=predictions)
return cce(onehot_labels, joint_log_probs) + regularization
def validate_model_independence(self, labels, log_probs, task_parameters):
"""Partition gradients into those assumed active and inactive."""
num_task_parameters = len(task_parameters)
# pylint: disable=g-complex-comprehension
on_gradients = [[
tf.norm(tensor=on_gradient) for on_gradient in on_gradients
] for on_gradients in [
tf.gradients(
ys=tf.gather(log_probs, tf.compat.v1.where(tf.equal(labels, i))),
xs=task_parameters[i * num_task_parameters:(i + 1) *
num_task_parameters]) for i in range(1)
]]
off_gradients = [[
tf.norm(tensor=off_gradient) for off_gradient in off_gradients
] for off_gradients in [
tf.gradients(
ys=tf.gather(log_probs, tf.compat.v1.where(tf.equal(labels, i))),
xs=task_parameters[i * num_task_parameters:(i + 1) *
num_task_parameters]) for i in range(1)
]]
# pylint: enable=g-complex-comprehension
return (list(itertools.chain.from_iterable(on_gradients)),
list(itertools.chain.from_iterable(off_gradients)))
| 37.71123
| 80
| 0.713244
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import itertools
import gin.tf
from meta_dataset.learners.experimental import base as learner_base
from meta_dataset.models.experimental import reparameterizable_backbones
from meta_dataset.models.experimental import reparameterizable_base
from meta_dataset.models.experimental import reparameterizable_distributions
from six.moves import zip
import tensorflow as tf
@gin.configurable
def sgd(learning_rate):
def init(x0):
return x0
def update(i, grad, state):
del i
x = state
return x - learning_rate * grad
def get_params(state):
x = state
return x
return init, update, get_params
@gin.configurable
def adam(learning_rate, b1=0.9, b2=0.999, eps=1e-8):
def init(x0):
m0 = tf.zeros_like(x0)
v0 = tf.zeros_like(x0)
return x0, m0, v0
def update(i, grad, state):
i = tf.cast(i, dtype=tf.float32)
x, m, v = state
m = (1. - b1) * grad + b1 * m
v = (1. - b2) * (grad**2.) + b2 * v
mhat = m / (1. - b1**(i + 1.))
vhat = v / (1. - b2**(i + 1.))
x = x - learning_rate * mhat / (tf.sqrt(vhat) + eps)
return x, m, v
def get_params(state):
x, _, _ = state
return x
return init, update, get_params
def optimizer_update(iterate_collection, iteration_idx, objective_fn, update_fn,
get_params_fn, first_order, clip_grad_norm):
variables = [get_params_fn(iterate) for iterate in iterate_collection]
if tf.executing_eagerly():
with tf.GradientTape(persistent=True) as g:
g.watch(variables)
loss = objective_fn(variables, iteration_idx)
grads = g.gradient(loss, variables)
else:
loss = objective_fn(variables, iteration_idx)
grads = tf.gradients(ys=loss, xs=variables)
if clip_grad_norm:
grads = [
tf.clip_by_value(grad, -1 * clip_grad_norm, clip_grad_norm)
for grad in grads
]
if first_order:
grads = [tf.stop_gradient(dv) for dv in grads]
return [
update_fn(i=iteration_idx, grad=dv, state=s)
for (s, dv) in zip(iterate_collection, grads)
]
def em_loop(
num_updates,
e_step,
m_step,
variables,
):
def _body(step, preupdate_vars):
train_predictions_, responsibilities_ = e_step(preupdate_vars)
updated_vars = m_step(preupdate_vars, train_predictions_, responsibilities_)
return step + 1, updated_vars
def _cond(step, *args):
del args
return step < num_updates
step = tf.Variable(0, trainable=False, name='inner_step_counter')
loop_vars = (step, variables)
step, updated_vars = tf.while_loop(
cond=_cond, body=_body, loop_vars=loop_vars, swap_memory=True)
return updated_vars
@gin.configurable
def optimizer_loop(
num_updates,
objective_fn,
update_fn,
variables,
first_order,
clip_grad_norm,
):
init, update, get_params = update_fn()
def _body(step, preupdate_vars):
updated_vars = optimizer_update(
iterate_collection=preupdate_vars,
iteration_idx=step,
objective_fn=objective_fn,
update_fn=update,
get_params_fn=get_params,
first_order=first_order,
clip_grad_norm=clip_grad_norm,
)
return step + 1, updated_vars
def _cond(step, *args):
del args
return step < num_updates
step = tf.Variable(0, trainable=False, name='inner_step_counter')
loop_vars = (step, [init(var) for var in variables])
step, updated_vars = tf.while_loop(
cond=_cond, body=_body, loop_vars=loop_vars, swap_memory=True)
return [get_params(v) for v in updated_vars]
ForwardPass = collections.namedtuple('ForwardPass', (
'embeddings',
'predictions',
'inner_objective_value',
'outer_objective_value',
'accuracy',
))
Adaptation = collections.namedtuple('Adaptation', (
'pre_adaptation_support_results',
'post_adaptation_support_results',
'pre_adaptation_query_results',
'post_adaptation_query_results',
'objective_fn',
'support_module_objective_fn',
'query_module_objective_fn',
'forward_pass_fn',
'init_loop_variables_mapping',
'final_loop_variables_mapping',
))
@gin.configurable
class ExperimentalOptimizationLearner(learner_base.ExperimentalEpisodicLearner):
def __init__(self, adapt_embedding_predicate, num_update_steps,
additional_evaluation_update_steps, first_order,
adapt_batch_norm, clip_grad_norm, update_fn, **kwargs):
self.adapt_embedding_predicate = adapt_embedding_predicate
self.num_update_steps = num_update_steps
self.additional_evaluation_update_steps = additional_evaluation_update_steps
self.adapt_batch_norm = adapt_batch_norm
self.first_order = first_order
self.clip_grad_norm = clip_grad_norm
self.update_fn = update_fn
super(ExperimentalOptimizationLearner, self).__init__(**kwargs)
assert isinstance(self.embedding_fn,
reparameterizable_base.ReparameterizableModule)
def compute_loss(self, onehot_labels, predictions):
return (self.outer_objective(
onehot_labels=onehot_labels, predictions=predictions))
@property
def trainable_variables(self):
raise NotImplementedError
@property
def task_parameters(self):
raise NotImplementedError
def episodic_init_ops(self, labels, embeddings, task_parameters):
raise NotImplementedError
def inner_loop_prediction(self, embeddings):
raise NotImplementedError
def inner_objective(self, onehot_labels, predictions, iteration_idx):
raise NotImplementedError
def outer_loop_prediction(self, embeddings):
raise NotImplementedError
def outer_objective(self, onehot_labels, predictions):
raise NotImplementedError
def forward_pass(self, data):
forward_pass_result = self.detailed_forward_pass(data)
post_adaptation_query_results = (
forward_pass_result.post_adaptation_query_results)
return post_adaptation_query_results.predictions
def detailed_forward_pass(self, data):
init_loop_variables = self.task_parameters
init_loop_variable_refs = [
v.experimental_ref() for v in init_loop_variables
]
episodic_init_ops = self.episodic_init_ops(
labels=data.support_labels,
embeddings=self.embedding_fn(data.support_images, training=True),
task_parameters=init_loop_variables,
)
def _forward_pass(iteration_idx_, variables_mapping_, images_,
onehot_labels_):
with self.embedding_fn.reparameterize(variables_mapping_):
embeddings_ = self.embedding_fn(images_, training=True)
with self.head_fn.reparameterize(variables_mapping_):
predictions_ = self.head_fn(embeddings_)[:, :data.way]
accuracy_ = tf.reduce_mean(
input_tensor=self.compute_accuracy(
onehot_labels=onehot_labels_, predictions=predictions_))
inner_objective_ = self.inner_objective(
onehot_labels=onehot_labels_,
predictions=predictions_,
iteration_idx=iteration_idx_)
outer_objective_ = self.outer_objective(
onehot_labels=onehot_labels_,
predictions=predictions_,
)
return ForwardPass(
embeddings=embeddings_,
predictions=predictions_,
inner_objective_value=inner_objective_,
outer_objective_value=outer_objective_,
accuracy=accuracy_,
)
def _objective_fn(loop_variables_, iteration_idx_):
loop_variables_mapping_ = dict(
zip(init_loop_variable_refs, loop_variables_))
adaptation_support_results = _forward_pass(
iteration_idx_=iteration_idx_,
variables_mapping_=loop_variables_mapping_,
images_=data.support_images,
onehot_labels_=data.onehot_support_labels)
return adaptation_support_results.inner_objective_value
def _e_step(loop_variables_):
loop_variables_dict_ = dict(zip(init_loop_variable_refs, loop_variables_))
with self.embedding_fn.reparameterize(loop_variables_dict_):
train_embeddings_ = self.embedding_fn(data.train_images, training=True)
class_embeddings_ = learner_base.class_specific_data(
data.onehot_train_labels, train_embeddings_, self.logit_dim)
def _compute_responsibilities(examples_, class_idx):
train_predictions_ = tf.squeeze(
self.head_fn(
embeddings=examples_, components=True, class_idx=[class_idx]),
axis=1)
return tf.nn.softmax(train_predictions_, axis=-1)
with self.head_fn.reparameterize(loop_variables_dict_):
class_responsibilities_ = [
_compute_responsibilities(embeddings_, class_idx=i)
for i, embeddings_ in enumerate(class_embeddings_)
]
return class_embeddings_, class_responsibilities_
def _m_step(preupdate_vars, all_embeddings_, all_responsibilities_):
means, log_scales, logits = zip(*map(
reparameterizable_distributions.fit_gaussian_mixture, all_embeddings_,
all_responsibilities_, itertools.repeat(self.head_fn.damping)))
def flatten(x):
return list(itertools.chain.from_iterable(x))
means = flatten(means)
log_scales = flatten(log_scales)
logits = flatten(logits)
if not self.head_fn.estimate_loc:
means = [None for _ in means]
if not self.head_fn.estimate_scale:
log_scales = [None for _ in log_scales]
if not self.head_fn.estimate_logits:
logits = [None for _ in logits]
updated_vars = means + log_scales + logits
no_none_updated_vars = []
for preupdate_var, updated_var in zip(preupdate_vars, updated_vars):
if updated_var is None:
no_none_updated_vars.append(preupdate_var)
else:
no_none_updated_vars.append(updated_var)
return no_none_updated_vars
with tf.control_dependencies(episodic_init_ops):
num_em_steps = getattr(self, 'num_em_steps', 0)
if num_em_steps > 0:
loop_variables = em_loop(
num_updates=self.num_em_steps,
e_step=_e_step,
m_step=_m_step,
variables=loop_variables)
num_optimizer_steps = (
self.num_update_steps + (self.additional_evaluation_update_steps
if not self.is_training else 0))
if num_optimizer_steps > 0:
final_loop_variables = optimizer_loop(
num_updates=num_optimizer_steps,
objective_fn=_objective_fn,
update_fn=self.update_fn,
variables=init_loop_variables,
first_order=self.first_order,
clip_grad_norm=self.clip_grad_norm,
)
if num_optimizer_steps + num_em_steps == 0:
loop_variables = [tf.identity(v) for v in init_loop_variables]
init_loop_variables_mapping = dict(
zip(init_loop_variable_refs, init_loop_variables))
final_loop_variables_mapping = dict(
zip(init_loop_variable_refs, final_loop_variables))
with tf.compat.v1.name_scope('pre-adaptation'):
with tf.compat.v1.name_scope('support'):
pre_adaptation_support_results = _forward_pass(
iteration_idx_=0,
variables_mapping_=init_loop_variables_mapping,
images_=data.support_images,
onehot_labels_=data.onehot_support_labels)
with tf.compat.v1.name_scope('query'):
pre_adaptation_query_results = _forward_pass(
iteration_idx_=0,
variables_mapping_=init_loop_variables_mapping,
images_=data.query_images,
onehot_labels_=data.onehot_query_labels)
with tf.compat.v1.name_scope('post-adaptation'):
with tf.compat.v1.name_scope('support'):
post_adaptation_support_results = _forward_pass(
iteration_idx_=num_optimizer_steps,
variables_mapping_=final_loop_variables_mapping,
images_=data.support_images,
onehot_labels_=data.onehot_support_labels,
)
with tf.compat.v1.name_scope('query'):
post_adaptation_query_results = _forward_pass(
iteration_idx_=num_optimizer_steps,
variables_mapping_=final_loop_variables_mapping,
images_=data.query_images,
onehot_labels_=data.onehot_query_labels,
)
def _support_module_objective_fn(module_variables_, module_variable_refs_):
variables_mapping_ = final_loop_variables_mapping.copy()
for module_variable_ref, module_variable in zip(module_variable_refs_,
module_variables_):
variables_mapping_[module_variable_ref] = module_variable
adaptation_query_results = _forward_pass(
iteration_idx_=num_optimizer_steps,
variables_mapping_=variables_mapping_,
images_=data.support_images,
onehot_labels_=data.onehot_support_labels,
)
return adaptation_query_results.inner_objective_value
def _query_module_objective_fn(module_variables_, module_variable_refs_):
variables_mapping_ = final_loop_variables_mapping.copy()
for module_variable_ref, module_variable in zip(module_variable_refs_,
module_variables_):
variables_mapping_[module_variable_ref] = module_variable
adaptation_query_results = _forward_pass(
iteration_idx_=num_optimizer_steps,
variables_mapping_=variables_mapping_,
images_=data.query_images,
onehot_labels_=data.onehot_query_labels)
return adaptation_query_results.inner_objective_value
return Adaptation(
pre_adaptation_support_results=pre_adaptation_support_results,
post_adaptation_support_results=post_adaptation_support_results,
pre_adaptation_query_results=pre_adaptation_query_results,
post_adaptation_query_results=post_adaptation_query_results,
objective_fn=_objective_fn,
support_module_objective_fn=_support_module_objective_fn,
query_module_objective_fn=_query_module_objective_fn,
forward_pass_fn=_forward_pass,
init_loop_variables_mapping=init_loop_variables_mapping,
final_loop_variables_mapping=final_loop_variables_mapping,
)
@gin.configurable
class HeadAndBackboneLearner(ExperimentalOptimizationLearner):
def __init__(self,
head_cls,
adapt_head_predicate,
episodic_head_init_fn=None,
**kwargs):
super(HeadAndBackboneLearner, self).__init__(**kwargs)
assert issubclass(head_cls, reparameterizable_base.ReparameterizableModule)
self.adapt_head_predicate = adapt_head_predicate
self.head_fn = head_cls(output_dim=self.logit_dim)
def no_op_initialization(onehot_labels, embeddings, *vbls):
del onehot_labels
del embeddings
del vbls
return [tf.no_op()]
self.episodic_head_init_fn = episodic_head_init_fn or no_op_initialization
def compute_regularizer(self, onehot_labels, predictions):
del onehot_labels
del predictions
return (tf.reduce_sum(input_tensor=self.embedding_fn.losses) +
tf.reduce_sum(input_tensor=self.head_fn.losses))
def build(self):
super(HeadAndBackboneLearner, self).build()
if not self.head_fn.built:
self.head_fn.build(self.embedding_shape)
self.output_shape = self.head_fn.compute_output_shape(self.embedding_shape)
def episodic_init_ops(self, labels, embeddings, task_parameters):
head_parameters = task_parameters[len(list(self.backbone_parameters)):]
assert len(head_parameters) == len(list(self.head_parameters))
return self.episodic_head_init_fn(labels, embeddings, *head_parameters)
def inner_objective(self, onehot_labels, predictions, iteration_idx):
cce = tf.keras.losses.CategoricalCrossentropy()
return cce(onehot_labels, predictions)
def outer_objective(self, onehot_labels, predictions):
cce = tf.keras.losses.CategoricalCrossentropy()
regularization = self.compute_regularizer(
onehot_labels=onehot_labels, predictions=predictions)
return cce(onehot_labels, predictions) + regularization
@property
def variables(self):
if not self._built:
raise learner_base.NotBuiltError
return self.embedding_fn.variables + self.head_fn.variables
@property
def trainable_variables(self):
if not self._built:
raise learner_base.NotBuiltError
return (self.embedding_fn.trainable_variables +
self.head_fn.trainable_variables)
@property
def task_parameters(self):
if not self._built:
raise learner_base.NotBuiltError
return list(itertools.chain(self.backbone_parameters, self.head_parameters))
@property
def backbone_parameters(self):
return list(
self.embedding_fn.reparameterizables(self.adapt_embedding_predicate))
@property
def head_parameters(self):
return list(self.head_fn.reparameterizables(self.adapt_head_predicate))
@gin.configurable(allowlist=['prototype_multiplier'])
def proto_maml_fc_layer_init_fn(labels, embeddings, weights, biases,
prototype_multiplier):
num_ways = tf.cast(
tf.math.reduce_max(input_tensor=tf.unique(labels)[0]) + 1, tf.int32)
prototypes = tf.math.unsorted_segment_mean(embeddings, labels, num_ways)
prototypes *= prototype_multiplier
output_weights = tf.transpose(a=2 * prototypes)
output_biases = -tf.reduce_sum(input_tensor=prototypes * prototypes, axis=1)
output_weights = tf.pad(
tensor=output_weights,
paddings=[[
0, 0
], [0, tf.shape(input=weights)[1] - tf.shape(input=output_weights)[1]]],
mode='CONSTANT',
constant_values=0)
output_biases = tf.pad(
tensor=output_biases,
paddings=[[
0, tf.shape(input=biases)[0] - tf.shape(input=output_biases)[0]
]],
mode='CONSTANT',
constant_values=0)
return [
weights.assign(output_weights),
biases.assign(output_biases),
]
def zero_init_fn(labels, embeddings, *vbls):
del labels
del embeddings
return [vbl.assign(tf.zeros_like(vbl)) for vbl in vbls]
@gin.configurable
class MAML(HeadAndBackboneLearner):
def __init__(self, proto_maml_fc_layer_init, zero_fc_layer_init, **kwargs):
if proto_maml_fc_layer_init and zero_fc_layer_init:
raise ValueError('Conflicting initialization options for `MAML`.')
super(MAML, self).__init__(
episodic_head_init_fn=(proto_maml_fc_layer_init_fn
if proto_maml_fc_layer_init else
zero_init_fn if zero_fc_layer_init else None),
adapt_embedding_predicate=reparameterizable_base.is_trainable_variable,
adapt_head_predicate=reparameterizable_base.is_trainable_variable,
head_cls=reparameterizable_backbones.LinearModel,
**kwargs)
@gin.configurable
class ANIL(HeadAndBackboneLearner):
def __init__(self, proto_maml_fc_layer_init, zero_fc_layer_init, **kwargs):
if proto_maml_fc_layer_init and zero_fc_layer_init:
raise ValueError('Conflicting initialization options for `ANIL`.')
super(ANIL, self).__init__(
episodic_head_init_fn=(proto_maml_fc_layer_init_fn
if proto_maml_fc_layer_init else
zero_init_fn if zero_fc_layer_init else None),
adapt_embedding_predicate=lambda x: False,
adapt_head_predicate=reparameterizable_base.is_trainable_variable,
head_cls=reparameterizable_backbones.LinearModel,
**kwargs)
@gin.configurable
def generative_then_discriminative_schedule(proportion_generative, num_updates):
num_generative_updates = int(proportion_generative * num_updates)
num_discriminative_updates = num_updates - num_generative_updates
return [0.0] * num_generative_updates + [1.0] * num_discriminative_updates
@gin.configurable
class GenerativeClassifier(HeadAndBackboneLearner):
def __init__(self, generative_scaling, interpolation_schedule, **kwargs):
super(GenerativeClassifier, self).__init__(
adapt_embedding_predicate=lambda x: False,
adapt_head_predicate=reparameterizable_base.is_trainable_variable,
**kwargs)
assert isinstance(
self.head_fn,
reparameterizable_distributions.ReparameterizableClassMixture)
self.generative_scaling = generative_scaling
self.gen_disc_interpolation = (
interpolation_schedule(num_updates=self.num_update_steps) +
[1.0] * self.additional_evaluation_update_steps
)
assert all(coef >= 0 for coef in self.gen_disc_interpolation), (
'Interpolation coefficient should be nonnegative.')
if isinstance(self.head_fn,
reparameterizable_distributions.GaussianMixture):
try:
num_em_steps = self.gen_disc_interpolation.index(1.0)
except ValueError:
num_em_steps = self.num_update_steps
assert (
all(coef == 0.0
for coef in self.gen_disc_interpolation[:num_em_steps]) and
all(coef == 1.0
for coef in self.gen_disc_interpolation[num_em_steps:])
), ('Each step must be fully discriminative or generative when using EM.')
self.num_em_steps = num_em_steps
self.num_update_steps -= num_em_steps
@property
def task_parameters(self):
return self.head_fn.task_parameters
def joint_log_likelihood(self, onehot_labels, log_probs):
labels = tf.cast(
tf.reduce_sum(input_tensor=onehot_labels, axis=0), dtype=tf.float32)
class_log_probs = tf.math.log(labels / tf.reduce_sum(input_tensor=labels))
return log_probs + tf.expand_dims(class_log_probs, 0)
def inner_objective(self, onehot_labels, predictions, iteration_idx):
joint_log_probs = self.joint_log_likelihood(onehot_labels, predictions)
labels = tf.expand_dims(tf.argmax(input=onehot_labels, axis=-1), axis=-1)
numerator = tf.gather(joint_log_probs, labels, axis=-1, batch_dims=1)
evidence = tf.reduce_logsumexp(
input_tensor=joint_log_probs, axis=-1, keepdims=True)
interp = tf.gather(self.gen_disc_interpolation, iteration_idx)
scale = tf.cond(
pred=interp > 0.0,
true_fn=lambda: 1.0,
false_fn=lambda: self.generative_scaling)
return -scale * tf.reduce_mean(
input_tensor=numerator - interp * evidence, axis=0)
def outer_objective(self, onehot_labels, predictions):
joint_log_probs = self.joint_log_likelihood(onehot_labels, predictions)
cce = tf.keras.losses.CategoricalCrossentropy()
regularization = self.compute_regularizer(
onehot_labels=onehot_labels, predictions=predictions)
return cce(onehot_labels, joint_log_probs) + regularization
def validate_model_independence(self, labels, log_probs, task_parameters):
num_task_parameters = len(task_parameters)
on_gradients = [[
tf.norm(tensor=on_gradient) for on_gradient in on_gradients
] for on_gradients in [
tf.gradients(
ys=tf.gather(log_probs, tf.compat.v1.where(tf.equal(labels, i))),
xs=task_parameters[i * num_task_parameters:(i + 1) *
num_task_parameters]) for i in range(1)
]]
off_gradients = [[
tf.norm(tensor=off_gradient) for off_gradient in off_gradients
] for off_gradients in [
tf.gradients(
ys=tf.gather(log_probs, tf.compat.v1.where(tf.equal(labels, i))),
xs=task_parameters[i * num_task_parameters:(i + 1) *
num_task_parameters]) for i in range(1)
]]
return (list(itertools.chain.from_iterable(on_gradients)),
list(itertools.chain.from_iterable(off_gradients)))
| true
| true
|
1c43c5bfecaed4e7e8964c995dae337fc39d6831
| 126
|
py
|
Python
|
server/app.py
|
ZhiShiMao/one
|
313c64a47e563fabf9b24e67c52308daff6912e3
|
[
"MIT"
] | null | null | null |
server/app.py
|
ZhiShiMao/one
|
313c64a47e563fabf9b24e67c52308daff6912e3
|
[
"MIT"
] | null | null | null |
server/app.py
|
ZhiShiMao/one
|
313c64a47e563fabf9b24e67c52308daff6912e3
|
[
"MIT"
] | null | null | null |
from fastapi import FastAPI
from .api import routers
app = FastAPI()
for router in routers:
app.include_router(router)
| 14
| 30
| 0.753968
|
from fastapi import FastAPI
from .api import routers
app = FastAPI()
for router in routers:
app.include_router(router)
| true
| true
|
1c43c887f1307042e07971b032b7bf4181a998aa
| 2,327
|
py
|
Python
|
ion/services/coi/object_management_service.py
|
ooici/coi-services
|
43246f46a82e597345507afd7dfc7373cb346afa
|
[
"BSD-2-Clause"
] | 3
|
2016-09-20T09:50:06.000Z
|
2018-08-10T01:41:38.000Z
|
ion/services/coi/object_management_service.py
|
ooici/coi-services
|
43246f46a82e597345507afd7dfc7373cb346afa
|
[
"BSD-2-Clause"
] | null | null | null |
ion/services/coi/object_management_service.py
|
ooici/coi-services
|
43246f46a82e597345507afd7dfc7373cb346afa
|
[
"BSD-2-Clause"
] | 2
|
2016-03-16T22:25:49.000Z
|
2016-11-26T14:54:21.000Z
|
#!/usr/bin/env python
__author__ = 'Stephen P. Henrie'
from interface.services.coi.iobject_management_service import BaseObjectManagementService
from pyon.util.containers import is_basic_identifier
from pyon.core.exception import BadRequest, NotFound
from pyon.core.interfaces.interface_util import is_yaml_string_valid
class ObjectManagementService(BaseObjectManagementService):
"""
A service for defining and managing object types used as resource, messages, etc.
"""
def create_object_type(self, object_type=None):
""" Should receive an ObjectType object
"""
# Return Value
# ------------
# {object_type_id: ''}
#
if not is_basic_identifier(object_type.name):
raise BadRequest("Invalid object_type name: %s" % object_type.name)
if not is_yaml_string_valid(object_type.definition):
raise BadRequest("Invalid YAML definition")
object_type_id, version = self.clients.resource_registry.create(object_type)
return object_type_id
def update_object_type(self, object_type=None):
""" Should receive an ObjectType object
"""
# Return Value
# ------------
# {success: true}
#
if not is_basic_identifier(object_type.name):
raise BadRequest("Invalid object_type name: %s" % object_type.name)
if not is_yaml_string_valid(object_type.definition):
raise BadRequest("Invalid YAML definition")
object_id, version = self.clients.resource_registry.update(object_type)
return object_id
def read_object_type(self, object_type_id=''):
""" Should return an ObjectType object
"""
# Return Value
# ------------
# object_type: {}
#
if not object_type_id:
raise BadRequest("The resource_type_id parameter is missing")
return self.clients.resource_registry.read(object_type_id)
def delete_object_type(self, object_type_id=''):
"""method docstring
"""
# Return Value
# ------------
# {success: true}
#
if not object_type_id:
raise BadRequest("The object_type_id parameter is missing")
return self.clients.resource_registry.delete(object_type_id)
| 33.724638
| 89
| 0.647615
|
__author__ = 'Stephen P. Henrie'
from interface.services.coi.iobject_management_service import BaseObjectManagementService
from pyon.util.containers import is_basic_identifier
from pyon.core.exception import BadRequest, NotFound
from pyon.core.interfaces.interface_util import is_yaml_string_valid
class ObjectManagementService(BaseObjectManagementService):
def create_object_type(self, object_type=None):
if not is_basic_identifier(object_type.name):
raise BadRequest("Invalid object_type name: %s" % object_type.name)
if not is_yaml_string_valid(object_type.definition):
raise BadRequest("Invalid YAML definition")
object_type_id, version = self.clients.resource_registry.create(object_type)
return object_type_id
def update_object_type(self, object_type=None):
if not is_basic_identifier(object_type.name):
raise BadRequest("Invalid object_type name: %s" % object_type.name)
if not is_yaml_string_valid(object_type.definition):
raise BadRequest("Invalid YAML definition")
object_id, version = self.clients.resource_registry.update(object_type)
return object_id
def read_object_type(self, object_type_id=''):
if not object_type_id:
raise BadRequest("The resource_type_id parameter is missing")
return self.clients.resource_registry.read(object_type_id)
def delete_object_type(self, object_type_id=''):
if not object_type_id:
raise BadRequest("The object_type_id parameter is missing")
return self.clients.resource_registry.delete(object_type_id)
| true
| true
|
1c43c91efb5c15cc4a8aef6be89a5b65609db2e1
| 633
|
py
|
Python
|
src/manage.py
|
ravihansa/django-multiple-user-auth
|
7b6d1c783fc72d30cb7a5bcdf3a262f6ac0772b1
|
[
"bzip2-1.0.6"
] | 1
|
2019-10-07T15:26:24.000Z
|
2019-10-07T15:26:24.000Z
|
src/manage.py
|
ravihansa/django-multiple-user-auth
|
7b6d1c783fc72d30cb7a5bcdf3a262f6ac0772b1
|
[
"bzip2-1.0.6"
] | null | null | null |
src/manage.py
|
ravihansa/django-multiple-user-auth
|
7b6d1c783fc72d30cb7a5bcdf3a262f6ac0772b1
|
[
"bzip2-1.0.6"
] | null | null | null |
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'multiUserAuth.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| 28.772727
| 77
| 0.685624
|
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'multiUserAuth.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| true
| true
|
1c43c9505cca8a34ef1bc2f328835501e9ee524a
| 969
|
py
|
Python
|
django/contrib/admin/decorators.py
|
ni-ning/django
|
2e7ba6057cfc82a15a22b6021cd60cf307152e2d
|
[
"CNRI-Python-GPL-Compatible",
"BSD-3-Clause"
] | 7
|
2021-03-18T10:21:34.000Z
|
2022-02-09T12:54:51.000Z
|
virtual/lib/python3.6/site-packages/django/contrib/admin/decorators.py
|
kahenya-anita/Insta-Clone
|
4894e959c17170505e73aee6dc497aeb29d55a71
|
[
"MIT"
] | 61
|
2021-01-10T12:59:01.000Z
|
2021-06-24T09:19:20.000Z
|
virtual/lib/python3.6/site-packages/django/contrib/admin/decorators.py
|
kahenya-anita/Insta-Clone
|
4894e959c17170505e73aee6dc497aeb29d55a71
|
[
"MIT"
] | 7
|
2021-03-15T13:39:20.000Z
|
2022-03-29T12:08:21.000Z
|
def register(*models, site=None):
"""
Register the given model(s) classes and wrapped ModelAdmin class with
admin site:
@register(Author)
class AuthorAdmin(admin.ModelAdmin):
pass
The `site` kwarg is an admin site to use instead of the default admin site.
"""
from django.contrib.admin import ModelAdmin
from django.contrib.admin.sites import AdminSite, site as default_site
def _model_admin_wrapper(admin_class):
if not models:
raise ValueError('At least one model must be passed to register.')
admin_site = site or default_site
if not isinstance(admin_site, AdminSite):
raise ValueError('site must subclass AdminSite')
if not issubclass(admin_class, ModelAdmin):
raise ValueError('Wrapped class must subclass ModelAdmin.')
admin_site.register(models, admin_class=admin_class)
return admin_class
return _model_admin_wrapper
| 31.258065
| 79
| 0.691434
|
def register(*models, site=None):
from django.contrib.admin import ModelAdmin
from django.contrib.admin.sites import AdminSite, site as default_site
def _model_admin_wrapper(admin_class):
if not models:
raise ValueError('At least one model must be passed to register.')
admin_site = site or default_site
if not isinstance(admin_site, AdminSite):
raise ValueError('site must subclass AdminSite')
if not issubclass(admin_class, ModelAdmin):
raise ValueError('Wrapped class must subclass ModelAdmin.')
admin_site.register(models, admin_class=admin_class)
return admin_class
return _model_admin_wrapper
| true
| true
|
1c43cac183ab237cc74013825695bd82ee649cd5
| 1,123
|
py
|
Python
|
CODE/models/.ipynb_checkpoints/regression-checkpoint.py
|
happyfuntimegroup/machinelearning
|
48b381092736591e4685faafdddc713391922266
|
[
"MIT"
] | 1
|
2021-12-07T12:38:33.000Z
|
2021-12-07T12:38:33.000Z
|
CODE/models/regression.py
|
SelinZ/machinelearning
|
105273b2cf5907b23a2ee2b4c076d89f215c38ff
|
[
"MIT"
] | 12
|
2021-11-30T13:57:48.000Z
|
2021-12-07T08:33:18.000Z
|
CODE/models/regression.py
|
SelinZ/machinelearning
|
105273b2cf5907b23a2ee2b4c076d89f215c38ff
|
[
"MIT"
] | 1
|
2021-12-07T12:38:00.000Z
|
2021-12-07T12:38:00.000Z
|
def simple_linear(X_train, y_train, X_val, y_val):
from sklearn.linear_model import LinearRegression
from sklearn.metrics import r2_score, mean_absolute_error
model = LinearRegression()
reg = model.fit(X = X_train, y = y_train)
y_pred_val = model.predict(X_val)
print(r2_score(y_val, y_pred_val))
print(mean_absolute_error(y_val, y_pred_val))
print()
#return r2, mae
def log_reg(X_train, y_train, X_val, y_val):
import numpy as np
from sklearn.linear_model import LogisticRegression
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import r2_score, mean_absolute_error
scaler = StandardScaler()
X_train_s = scaler.fit_transform(X_train)
X_val_s = scaler.transform(X_val)
y_ravel = np.ravel(y_train)
model = LogisticRegression(random_state = 123, max_iter = 2000)
reg = model.fit(X = X_train_s, y = y_ravel)
y_pred_val = model.predict(X_val_s)
print('r2:', r2_score(y_val, y_pred_val)) # 0.006551953988217396
print("MAE:", mean_absolute_error(y_val, y_pred_val)) # 34.07342328208346
print()
| 34.030303
| 80
| 0.719501
|
def simple_linear(X_train, y_train, X_val, y_val):
from sklearn.linear_model import LinearRegression
from sklearn.metrics import r2_score, mean_absolute_error
model = LinearRegression()
reg = model.fit(X = X_train, y = y_train)
y_pred_val = model.predict(X_val)
print(r2_score(y_val, y_pred_val))
print(mean_absolute_error(y_val, y_pred_val))
print()
def log_reg(X_train, y_train, X_val, y_val):
import numpy as np
from sklearn.linear_model import LogisticRegression
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import r2_score, mean_absolute_error
scaler = StandardScaler()
X_train_s = scaler.fit_transform(X_train)
X_val_s = scaler.transform(X_val)
y_ravel = np.ravel(y_train)
model = LogisticRegression(random_state = 123, max_iter = 2000)
reg = model.fit(X = X_train_s, y = y_ravel)
y_pred_val = model.predict(X_val_s)
print('r2:', r2_score(y_val, y_pred_val))
print("MAE:", mean_absolute_error(y_val, y_pred_val))
print()
| true
| true
|
1c43cad59151b333a58e3e9bbe73d9671373383e
| 6,744
|
py
|
Python
|
spider/quanmin_anchor.py
|
AcerFeng/Zhudao
|
5a36d0dc7bd718ce03aa476a31b36d7b5230b1b7
|
[
"MIT"
] | null | null | null |
spider/quanmin_anchor.py
|
AcerFeng/Zhudao
|
5a36d0dc7bd718ce03aa476a31b36d7b5230b1b7
|
[
"MIT"
] | null | null | null |
spider/quanmin_anchor.py
|
AcerFeng/Zhudao
|
5a36d0dc7bd718ce03aa476a31b36d7b5230b1b7
|
[
"MIT"
] | 1
|
2018-09-13T07:41:44.000Z
|
2018-09-13T07:41:44.000Z
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
# Created on 2018-01-25 00:45:53
# Project: quanmin_anchor
from pyspider.libs.base_handler import *
import pymysql
from datetime import datetime
class Handler(BaseHandler):
headers = {
'Host': 'www.quanmin.tv',
'Connection': 'Keep-Alive',
'Accept-Encoding': 'gzip',
'User-Agent': 'okhttp/3.9.1',
}
crawl_config = {
'itag': 'v001',
'headers': headers,
}
def __init__(self):
self.platform_id = 6
try:
self.connect = pymysql.connect(host='localhost', port=3306, user='root', passwd='123456', db='zhudao', charset='utf8mb4')
except Exception as e:
print('Cannot Connect To Mysql!/n', e)
raise e
@every(minutes=24 * 60)
def on_start(self):
try:
cursor = self.connect.cursor()
cursor.execute('select short_name,id,cate_id from category where platform_id = %s;' % str(
self.platform_id))
results = cursor.fetchall()
for item in results:
self.crawl('https://www.quanmin.tv/json/categories/%s/list.json?01250041=&toid=0&token&sid&cv=xiaomi_3.5.33&ua=sagit&dev=28dc7f83c185d337&conn=WIFI&osversion=android_25&cid=6&nonce=b7560fbc6e56929469624ee3c9eb10f9&sign=658A5253C80A22054714887EC24CA693' %
(item[0],),
callback=self.detail_page,
save={
'short_name': item[0],
'category_id': item[1],
'cate_id': item[2],
})
except Exception as e:
self.connect.rollback()
raise e
@config(age=10 * 24 * 60 * 60)
def index_page(self, response):
for each in response.doc('a[href^="http"]').items():
self.crawl(each.attr.href, callback=self.detail_page)
@config(priority=2)
def detail_page(self, response):
return {
"url": response.url,
"results": response.json['data'],
"category_id": response.save['category_id'],
"cate_id": response.save['cate_id'],
}
def on_result(self,result):
if not result:
return
self.save_data(**result)
def save_data(self, **kw):
if len(kw['results']) == 0:
return
for item in kw['results']:
try:
cursor = self.connect.cursor()
cursor.execute('select id from anchor where user_id=%s and platform_id=%s', (item['uid'],self.platform_id))
result = cursor.fetchone()
if result:
# 更新操作(是否创建个主播分析表(新爬虫?):包含平台、主播id、)
sql = '''update anchor set
name=%s,
room_id=%s,
room_name=%s,
cover=%s,
avatar=%s,
avatar_mid=%s,
avatar_small=%s,
fans=%s,
category_id=%s,
cate_id=%s,
online=%s,
pc_url=%s,
update_time=%s,
announcement=%s,
beauty_cover=%s,
show_time=%s
where user_id=%s and platform_id=%s'''
cursor.execute(sql, (item['nick'],
item['no'],
item['title'],
item['thumb'],
item['avatar'],
item['avatar'],
item['avatar'],
item['follow'],
kw['category_id'],
kw['cate_id'],
item['view'],
'https://www.quanmin.tv/' + item['no'],
datetime.now(),
item['announcement'],
item['beauty_cover'] if 'beauty_cover' in item else '',
item['play_at'],
item['uid'],
self.platform_id))
else:
# 插入操作
sql = '''insert into anchor(
user_id,
name,
room_id,
room_name,
cover,
avatar,
avatar_mid,
avatar_small,
fans,
category_id,
cate_id,
online,
platform_id,
pc_url,
show_time,
announcement,
beauty_cover,
created_time)
values (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)'''
cursor.execute(sql, (item['uid'],
item['nick'],
item['no'],
item['title'],
item['thumb'],
item['avatar'],
item['avatar'],
item['avatar'],
item['follow'],
kw['category_id'],
kw['cate_id'],
item['view'],
self.platform_id,
'https://www.quanmin.tv/' + item['no'],
item['play_at'],
item['announcement'],
item['beauty_cover'] if 'beauty_cover' in item else '',
datetime.now(),
))
self.connect.commit()
except Exception as e:
self.connect.rollback()
raise e
| 40.626506
| 270
| 0.361358
|
from pyspider.libs.base_handler import *
import pymysql
from datetime import datetime
class Handler(BaseHandler):
headers = {
'Host': 'www.quanmin.tv',
'Connection': 'Keep-Alive',
'Accept-Encoding': 'gzip',
'User-Agent': 'okhttp/3.9.1',
}
crawl_config = {
'itag': 'v001',
'headers': headers,
}
def __init__(self):
self.platform_id = 6
try:
self.connect = pymysql.connect(host='localhost', port=3306, user='root', passwd='123456', db='zhudao', charset='utf8mb4')
except Exception as e:
print('Cannot Connect To Mysql!/n', e)
raise e
@every(minutes=24 * 60)
def on_start(self):
try:
cursor = self.connect.cursor()
cursor.execute('select short_name,id,cate_id from category where platform_id = %s;' % str(
self.platform_id))
results = cursor.fetchall()
for item in results:
self.crawl('https://www.quanmin.tv/json/categories/%s/list.json?01250041=&toid=0&token&sid&cv=xiaomi_3.5.33&ua=sagit&dev=28dc7f83c185d337&conn=WIFI&osversion=android_25&cid=6&nonce=b7560fbc6e56929469624ee3c9eb10f9&sign=658A5253C80A22054714887EC24CA693' %
(item[0],),
callback=self.detail_page,
save={
'short_name': item[0],
'category_id': item[1],
'cate_id': item[2],
})
except Exception as e:
self.connect.rollback()
raise e
@config(age=10 * 24 * 60 * 60)
def index_page(self, response):
for each in response.doc('a[href^="http"]').items():
self.crawl(each.attr.href, callback=self.detail_page)
@config(priority=2)
def detail_page(self, response):
return {
"url": response.url,
"results": response.json['data'],
"category_id": response.save['category_id'],
"cate_id": response.save['cate_id'],
}
def on_result(self,result):
if not result:
return
self.save_data(**result)
def save_data(self, **kw):
if len(kw['results']) == 0:
return
for item in kw['results']:
try:
cursor = self.connect.cursor()
cursor.execute('select id from anchor where user_id=%s and platform_id=%s', (item['uid'],self.platform_id))
result = cursor.fetchone()
if result:
sql = '''update anchor set
name=%s,
room_id=%s,
room_name=%s,
cover=%s,
avatar=%s,
avatar_mid=%s,
avatar_small=%s,
fans=%s,
category_id=%s,
cate_id=%s,
online=%s,
pc_url=%s,
update_time=%s,
announcement=%s,
beauty_cover=%s,
show_time=%s
where user_id=%s and platform_id=%s'''
cursor.execute(sql, (item['nick'],
item['no'],
item['title'],
item['thumb'],
item['avatar'],
item['avatar'],
item['avatar'],
item['follow'],
kw['category_id'],
kw['cate_id'],
item['view'],
'https://www.quanmin.tv/' + item['no'],
datetime.now(),
item['announcement'],
item['beauty_cover'] if 'beauty_cover' in item else '',
item['play_at'],
item['uid'],
self.platform_id))
else:
sql = '''insert into anchor(
user_id,
name,
room_id,
room_name,
cover,
avatar,
avatar_mid,
avatar_small,
fans,
category_id,
cate_id,
online,
platform_id,
pc_url,
show_time,
announcement,
beauty_cover,
created_time)
values (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)'''
cursor.execute(sql, (item['uid'],
item['nick'],
item['no'],
item['title'],
item['thumb'],
item['avatar'],
item['avatar'],
item['avatar'],
item['follow'],
kw['category_id'],
kw['cate_id'],
item['view'],
self.platform_id,
'https://www.quanmin.tv/' + item['no'],
item['play_at'],
item['announcement'],
item['beauty_cover'] if 'beauty_cover' in item else '',
datetime.now(),
))
self.connect.commit()
except Exception as e:
self.connect.rollback()
raise e
| true
| true
|
1c43cad729128c44137a71f706001f60a8c1b995
| 100
|
py
|
Python
|
modules/ESP8266/ota.py
|
ccccmagicboy/MicroPython_fw
|
d2049bc19e3d5010f5d6d0d17aa13a8693914fbd
|
[
"MIT"
] | 4
|
2020-02-02T20:12:59.000Z
|
2020-07-20T15:44:07.000Z
|
modules/ESP8266/ota.py
|
ccccmagicboy/MicroPython_fw
|
d2049bc19e3d5010f5d6d0d17aa13a8693914fbd
|
[
"MIT"
] | 10
|
2020-02-18T09:57:04.000Z
|
2020-03-04T11:39:17.000Z
|
modules/ESP8266/ota.py
|
ccccmagicboy/MicroPython_fw
|
d2049bc19e3d5010f5d6d0d17aa13a8693914fbd
|
[
"MIT"
] | null | null | null |
import machine
def start():
machine.RTC().memory('yaotaota')
machine.reset()
| 12.5
| 36
| 0.57
|
import machine
def start():
machine.RTC().memory('yaotaota')
machine.reset()
| true
| true
|
1c43cb3436d953b4031542e8c7c48bea06d83265
| 11,467
|
py
|
Python
|
ovsdbapp/api.py
|
Sharpeye90/ovsdbapp
|
6577bbd5e80cdbe95207211d4d47f43b121f2c86
|
[
"Apache-2.0"
] | 34
|
2017-03-24T10:14:33.000Z
|
2021-11-19T05:04:54.000Z
|
ovsdbapp/api.py
|
Sharpeye90/ovsdbapp
|
6577bbd5e80cdbe95207211d4d47f43b121f2c86
|
[
"Apache-2.0"
] | 2
|
2021-09-21T13:23:01.000Z
|
2021-09-21T13:23:28.000Z
|
ovsdbapp/api.py
|
Sharpeye90/ovsdbapp
|
6577bbd5e80cdbe95207211d4d47f43b121f2c86
|
[
"Apache-2.0"
] | 15
|
2017-07-06T08:00:52.000Z
|
2022-03-13T10:29:40.000Z
|
# Copyright (c) 2014 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import contextlib
import threading
class Command(object, metaclass=abc.ABCMeta):
"""An OVSDB command that can be executed in a transaction
:attr result: The result of executing the command in a transaction
"""
@abc.abstractmethod
def execute(self, **transaction_options):
"""Immediately execute an OVSDB command
This implicitly creates a transaction with the passed options and then
executes it, returning the value of the executed transaction
:param transaction_options: Options to pass to the transaction
"""
class Transaction(object, metaclass=abc.ABCMeta):
@abc.abstractmethod
def commit(self):
"""Commit the transaction to OVSDB"""
@abc.abstractmethod
def add(self, command):
"""Append an OVSDB operation to the transaction
Operation is returned back as a convenience.
"""
def extend(self, commands):
"""Add multiple OVSDB operations to the transaction
List of operations is returned back as a convenience.
"""
return [self.add(command) for command in commands]
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, tb):
if exc_type is None:
self.result = self.commit()
class API(object, metaclass=abc.ABCMeta):
def __init__(self, nested_transactions=True):
# Mapping between a (green)thread and its transaction.
self._nested_txns = nested_transactions
self._nested_txns_map = {}
@abc.abstractmethod
def create_transaction(self, check_error=False, log_errors=True, **kwargs):
"""Create a transaction
:param check_error: Allow the transaction to raise an exception?
:type check_error: bool
:param log_errors: Log an error if the transaction fails?
:type log_errors: bool
:returns: A new transaction
:rtype: :class:`Transaction`
"""
@contextlib.contextmanager
def transaction(self, check_error=False, log_errors=True, nested=True,
**kwargs):
"""Create a transaction context.
:param check_error: Allow the transaction to raise an exception?
:type check_error: bool
:param log_errors: Log an error if the transaction fails?
:type log_errors: bool
:param nested: Allow nested transactions be merged into one txn
:type nested: bool
:returns: Either a new transaction or an existing one.
:rtype: :class:`Transaction`
"""
# ojbect() is unique, so if we are not nested, this will always result
# in a KeyError on lookup and so a unique Transaction
nested = nested and self._nested_txns
cur_thread_id = threading.get_ident() if nested else object()
if cur_thread_id in self._nested_txns_map:
yield self._nested_txns_map[cur_thread_id]
else:
with self.create_transaction(
check_error, log_errors, **kwargs) as txn:
self._nested_txns_map[cur_thread_id] = txn
try:
yield txn
finally:
del self._nested_txns_map[cur_thread_id]
@abc.abstractmethod
def db_create(self, table, **col_values):
"""Create a command to create new record
:param table: The OVS table containing the record to be created
:type table: string
:param col_values: The columns and their associated values
to be set after create
:type col_values: Dictionary of columns id's and values
:returns: :class:`Command` with uuid result
"""
def db_create_row(self, table, **col_values):
"""Create a command to create new record
Identical to db_create, but returns a RowView result
:returns: :class:`Command` with RowView result
"""
# vif_plug_ovs has a copy of impl_vsctl that doesn't implement this
raise NotImplementedError
@abc.abstractmethod
def db_destroy(self, table, record):
"""Create a command to destroy a record
:param table: The OVS table containing the record to be destroyed
:type table: string
:param record: The record id (name/uuid) to be destroyed
:type record: uuid/string
:returns: :class:`Command` with no result
"""
@abc.abstractmethod
def db_set(self, table, record, *col_values):
"""Create a command to set fields in a record
:param table: The OVS table containing the record to be modified
:type table: string
:param record: The record id (name/uuid) to be modified
:type table: string
:param col_values: The columns and their associated values
:type col_values: Tuples of (column, value). Values may be atomic
values or unnested sequences/mappings
:returns: :class:`Command` with no result
"""
# TODO(twilson) Consider handling kwargs for arguments where order
# doesn't matter. Though that would break the assert_called_once_with
# unit tests
@abc.abstractmethod
def db_add(self, table, record, column, *values):
"""Create a command to add a value to a record
Adds each value or key-value pair to column in record in table. If
column is a map, then each value will be a dict, otherwise a base type.
If key already exists in a map column, then the current value is not
replaced (use the set command to replace an existing value).
:param table: The OVS table containing the record to be modified
:type table: string
:param record: The record id (name/uuid) to modified
:type record: string
:param column: The column name to be modified
:type column: string
:param values: The values to be added to the column
:type values: The base type of the column. If column is a map, then
a dict containing the key name and the map's value type
:returns: :class:`Command` with no result
"""
@abc.abstractmethod
def db_clear(self, table, record, column):
"""Create a command to clear a field's value in a record
:param table: The OVS table containing the record to be modified
:type table: string
:param record: The record id (name/uuid) to be modified
:type record: string
:param column: The column whose value should be cleared
:type column: string
:returns: :class:`Command` with no result
"""
@abc.abstractmethod
def db_get(self, table, record, column):
"""Create a command to return a field's value in a record
:param table: The OVS table containing the record to be queried
:type table: string
:param record: The record id (name/uuid) to be queried
:type record: string
:param column: The column whose value should be returned
:type column: string
:returns: :class:`Command` with the field's value result
"""
@abc.abstractmethod
def db_list(self, table, records=None, columns=None, if_exists=False):
"""Create a command to return a list of OVSDB records
:param table: The OVS table to query
:type table: string
:param records: The records to return values from
:type records: list of record ids (names/uuids)
:param columns: Limit results to only columns, None means all columns
:type columns: list of column names or None
:param if_exists: Do not fail if the record does not exist
:type if_exists: bool
:returns: :class:`Command` with [{'column', value}, ...] result
"""
@abc.abstractmethod
def db_list_rows(self, table, record=None, if_exists=False):
"""Create a command to return a list of OVSDB records
Identical to db_list, but returns a RowView list result
:returns: :class:`Command` with RowView list result
"""
@abc.abstractmethod
def db_find(self, table, *conditions, **kwargs):
"""Create a command to return find OVSDB records matching conditions
:param table: The OVS table to query
:type table: string
:param conditions:The conditions to satisfy the query
:type conditions: 3-tuples containing (column, operation, match)
Type of 'match' parameter MUST be identical to column
type
Examples:
atomic: ('tag', '=', 7)
map: ('external_ids' '=', {'iface-id': 'xxx'})
field exists?
('external_ids', '!=', {'iface-id', ''})
set contains?:
('protocols', '{>=}', 'OpenFlow13')
See the ovs-vsctl man page for more operations
:param columns: Limit results to only columns, None means all columns
:type columns: list of column names or None
:returns: :class:`Command` with [{'column', value}, ...] result
"""
@abc.abstractmethod
def db_find_rows(self, table, *conditions, **kwargs):
"""Create a command to return OVSDB records matching conditions
Identical to db_find, but returns a list of RowView objects
:returns: :class:`Command` with RowView list result
"""
@abc.abstractmethod
def db_remove(self, table, record, column, *values, **keyvalues):
"""Create a command to delete fields or key-value pairs in a record
:param table: The OVS table to query
:type table: string
:param record: The record id (name/uuid)
:type record: string
:param column: The column whose value should be deleted
:type column: string
:param values: In case of list columns, the values to be deleted
from the list of values
In case of dict columns, the keys to delete
regardless of their value
:type value: varies depending on column
:param keyvalues: For dict columns, the keys to delete when the key's
value matches the argument value
:type keyvalues: values vary depending on column
:param if_exists: Do not fail if the record does not exist
:type if_exists: bool
:returns: :class:`Command` with no result
"""
| 40.235088
| 79
| 0.616552
|
import abc
import contextlib
import threading
class Command(object, metaclass=abc.ABCMeta):
@abc.abstractmethod
def execute(self, **transaction_options):
class Transaction(object, metaclass=abc.ABCMeta):
@abc.abstractmethod
def commit(self):
@abc.abstractmethod
def add(self, command):
def extend(self, commands):
return [self.add(command) for command in commands]
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, tb):
if exc_type is None:
self.result = self.commit()
class API(object, metaclass=abc.ABCMeta):
def __init__(self, nested_transactions=True):
self._nested_txns = nested_transactions
self._nested_txns_map = {}
@abc.abstractmethod
def create_transaction(self, check_error=False, log_errors=True, **kwargs):
@contextlib.contextmanager
def transaction(self, check_error=False, log_errors=True, nested=True,
**kwargs):
nested = nested and self._nested_txns
cur_thread_id = threading.get_ident() if nested else object()
if cur_thread_id in self._nested_txns_map:
yield self._nested_txns_map[cur_thread_id]
else:
with self.create_transaction(
check_error, log_errors, **kwargs) as txn:
self._nested_txns_map[cur_thread_id] = txn
try:
yield txn
finally:
del self._nested_txns_map[cur_thread_id]
@abc.abstractmethod
def db_create(self, table, **col_values):
def db_create_row(self, table, **col_values):
raise NotImplementedError
@abc.abstractmethod
def db_destroy(self, table, record):
@abc.abstractmethod
def db_set(self, table, record, *col_values):
# TODO(twilson) Consider handling kwargs for arguments where order
# doesn't matter. Though that would break the assert_called_once_with
@abc.abstractmethod
def db_add(self, table, record, column, *values):
@abc.abstractmethod
def db_clear(self, table, record, column):
@abc.abstractmethod
def db_get(self, table, record, column):
@abc.abstractmethod
def db_list(self, table, records=None, columns=None, if_exists=False):
@abc.abstractmethod
def db_list_rows(self, table, record=None, if_exists=False):
@abc.abstractmethod
def db_find(self, table, *conditions, **kwargs):
@abc.abstractmethod
def db_find_rows(self, table, *conditions, **kwargs):
@abc.abstractmethod
def db_remove(self, table, record, column, *values, **keyvalues):
| true
| true
|
1c43cb8e251561f5ffb61eedf812dca217fee446
| 524
|
py
|
Python
|
mysite/ads/views.py
|
MarcosSalib/mysite_django
|
593c9758eeff0b9f536fe6dd2a84a8097ed1850e
|
[
"MIT"
] | null | null | null |
mysite/ads/views.py
|
MarcosSalib/mysite_django
|
593c9758eeff0b9f536fe6dd2a84a8097ed1850e
|
[
"MIT"
] | null | null | null |
mysite/ads/views.py
|
MarcosSalib/mysite_django
|
593c9758eeff0b9f536fe6dd2a84a8097ed1850e
|
[
"MIT"
] | null | null | null |
from django.views import View
from .owner import OwnerListView, OwnerDetailView, OwnerCreateView, OwnerUpdateView, OwnerDeleteView
from .models import Ad
# Create your views here.
class AdListView(OwnerListView):
model = Ad
class AdDetailView(OwnerDetailView):
model = Ad
class AdCreateView(OwnerCreateView):
model = Ad
fields = ['title', 'price', 'text']
class AdUpdateView(OwnerUpdateView):
model = Ad
fields = ['title', 'price', 'text']
class AdDeleteView(OwnerDeleteView):
model = Ad
| 20.96
| 100
| 0.727099
|
from django.views import View
from .owner import OwnerListView, OwnerDetailView, OwnerCreateView, OwnerUpdateView, OwnerDeleteView
from .models import Ad
class AdListView(OwnerListView):
model = Ad
class AdDetailView(OwnerDetailView):
model = Ad
class AdCreateView(OwnerCreateView):
model = Ad
fields = ['title', 'price', 'text']
class AdUpdateView(OwnerUpdateView):
model = Ad
fields = ['title', 'price', 'text']
class AdDeleteView(OwnerDeleteView):
model = Ad
| true
| true
|
1c43cbadac49f2a6bc13476f9326555357353823
| 1,364
|
py
|
Python
|
app.py
|
eshaan7/IPU_GPA_Calculator
|
19744864525ceb6de5bd7b6c5c5467870c0281ac
|
[
"MIT"
] | 6
|
2019-06-12T09:58:14.000Z
|
2019-07-28T23:13:28.000Z
|
app.py
|
eshaan7/IPU_GPA_Calculator
|
19744864525ceb6de5bd7b6c5c5467870c0281ac
|
[
"MIT"
] | 1
|
2019-06-24T13:57:21.000Z
|
2019-06-24T13:57:21.000Z
|
app.py
|
Eshaan7/IPU_GPA_Calculator
|
19744864525ceb6de5bd7b6c5c5467870c0281ac
|
[
"MIT"
] | 3
|
2019-06-12T09:58:16.000Z
|
2019-08-26T20:08:17.000Z
|
import os
from flask import Flask, render_template, request, redirect, url_for
app = Flask(__name__)
app.secret_key = "66b58fafa6a470f26fd2adc9de14cef2"
''' PWA Stuff '''
# only trigger SSLify if the app is running on Heroku
if 'DYNO' in os.environ:
from flask_sslify import SSLify
sslify = SSLify(app)
@app.route('/sw.js', methods=['GET'])
def sw():
return app.send_static_file('sw.js')
@app.route('/offline.html')
def offline():
return app.send_static_file('offline.html')
''' Routes/views '''
@app.route('/', methods=['GET','POST'])
@app.route('/home', methods=['GET','POST'])
def index():
no_of_subjects = 14
if request.method == 'POST':
credits = [ int (i) for i in request.form.getlist('credits[]') ]
grades = request.form.getlist('grades[]')
FinalGPA = gpa_calc(no_of_subjects, credits, grades)
return render_template('index.html', no_of_subjects=no_of_subjects, FinalGPA=FinalGPA)
return render_template('index.html', no_of_subjects=no_of_subjects)
''' Utility functions '''
def gpa_calc(no_of_subjects, credits, grades):
FinalGPA = 0
grade_dict = { 'O': 10, 'A+': 9, 'A': 8, 'B+': 7, 'B': 6, 'C': 5, 'P': 4 }
grade_pts = [ grade_dict[grade] for grade in grades ]
for c, gp in zip(credits, grade_pts):
FinalGPA = FinalGPA + float(c*gp)
return FinalGPA/sum(credits)
if __name__=='__main__':
app.run(debug = False)
| 29.021277
| 88
| 0.691349
|
import os
from flask import Flask, render_template, request, redirect, url_for
app = Flask(__name__)
app.secret_key = "66b58fafa6a470f26fd2adc9de14cef2"
if 'DYNO' in os.environ:
from flask_sslify import SSLify
sslify = SSLify(app)
@app.route('/sw.js', methods=['GET'])
def sw():
return app.send_static_file('sw.js')
@app.route('/offline.html')
def offline():
return app.send_static_file('offline.html')
@app.route('/', methods=['GET','POST'])
@app.route('/home', methods=['GET','POST'])
def index():
no_of_subjects = 14
if request.method == 'POST':
credits = [ int (i) for i in request.form.getlist('credits[]') ]
grades = request.form.getlist('grades[]')
FinalGPA = gpa_calc(no_of_subjects, credits, grades)
return render_template('index.html', no_of_subjects=no_of_subjects, FinalGPA=FinalGPA)
return render_template('index.html', no_of_subjects=no_of_subjects)
def gpa_calc(no_of_subjects, credits, grades):
FinalGPA = 0
grade_dict = { 'O': 10, 'A+': 9, 'A': 8, 'B+': 7, 'B': 6, 'C': 5, 'P': 4 }
grade_pts = [ grade_dict[grade] for grade in grades ]
for c, gp in zip(credits, grade_pts):
FinalGPA = FinalGPA + float(c*gp)
return FinalGPA/sum(credits)
if __name__=='__main__':
app.run(debug = False)
| true
| true
|
1c43cc4bdaf1de7725864b1cb397715c2bbf7991
| 1,517
|
py
|
Python
|
main.py
|
yelite/RoomMonitor
|
2a1699478aa91ec001fe691c1160e7ac7f7f291d
|
[
"MIT"
] | null | null | null |
main.py
|
yelite/RoomMonitor
|
2a1699478aa91ec001fe691c1160e7ac7f7f291d
|
[
"MIT"
] | null | null | null |
main.py
|
yelite/RoomMonitor
|
2a1699478aa91ec001fe691c1160e7ac7f7f291d
|
[
"MIT"
] | null | null | null |
#coding=utf-8
from datetime import datetime, timedelta
from flask import Flask, render_template, g, jsonify
from model import Data
from helper import gen_unpack_func
from fetch import fetch
app = Flask(__name__)
def get_session():
session = getattr(g, '_session', None)
if session is None:
from db import Session
session = g._session = Session()
return session
@app.teardown_appcontext
def close_session(exception):
session = getattr(g, '_session', None)
if session:
# noinspection PyUnresolvedReferences
session.close()
@app.route('/')
def new():
return render_template('stat.html')
@app.route('/data')
def data():
session = get_session()
t = datetime.now() - timedelta(weeks=2)
obj = session.query(Data).filter(Data.time > t).order_by(Data.time).all()
# noinspection PyShadowingNames
data = {
'timestamp': [],
'pressure': [],
'light_level': [],
'temp': [],
'hum': []
}
map(gen_unpack_func(data, ['timestamp', 'pressure', 'light_level', 'temp', 'hum']), obj)
return jsonify(**data)
@app.route('/current')
def current():
rv = fetch()
data = {'Light': 4095 - int(rv['light']),
'Temp': str(rv['temp']) + ' C',
'Pressure': str(rv['pressure'] / 100) + ' hPa',
'Humidity': str(rv['hum']) + '%'}
return render_template('current.html', items=data.items())
if __name__ == "__main__":
app.run(host='0.0.0.0', port=8089, debug=1)
| 23.338462
| 92
| 0.607779
|
from datetime import datetime, timedelta
from flask import Flask, render_template, g, jsonify
from model import Data
from helper import gen_unpack_func
from fetch import fetch
app = Flask(__name__)
def get_session():
session = getattr(g, '_session', None)
if session is None:
from db import Session
session = g._session = Session()
return session
@app.teardown_appcontext
def close_session(exception):
session = getattr(g, '_session', None)
if session:
session.close()
@app.route('/')
def new():
return render_template('stat.html')
@app.route('/data')
def data():
session = get_session()
t = datetime.now() - timedelta(weeks=2)
obj = session.query(Data).filter(Data.time > t).order_by(Data.time).all()
data = {
'timestamp': [],
'pressure': [],
'light_level': [],
'temp': [],
'hum': []
}
map(gen_unpack_func(data, ['timestamp', 'pressure', 'light_level', 'temp', 'hum']), obj)
return jsonify(**data)
@app.route('/current')
def current():
rv = fetch()
data = {'Light': 4095 - int(rv['light']),
'Temp': str(rv['temp']) + ' C',
'Pressure': str(rv['pressure'] / 100) + ' hPa',
'Humidity': str(rv['hum']) + '%'}
return render_template('current.html', items=data.items())
if __name__ == "__main__":
app.run(host='0.0.0.0', port=8089, debug=1)
| true
| true
|
1c43ce98895617da79c14c62c224e64da0d934dc
| 335
|
py
|
Python
|
notes/algo-ds-practice/problems/dp/kadane.py
|
Anmol-Singh-Jaggi/interview-notes
|
65af75e2b5725894fa5e13bb5cd9ecf152a0d652
|
[
"MIT"
] | 6
|
2020-07-05T05:15:19.000Z
|
2021-01-24T20:17:14.000Z
|
notes/algo-ds-practice/problems/dp/kadane.py
|
Anmol-Singh-Jaggi/interview-notes
|
65af75e2b5725894fa5e13bb5cd9ecf152a0d652
|
[
"MIT"
] | null | null | null |
notes/algo-ds-practice/problems/dp/kadane.py
|
Anmol-Singh-Jaggi/interview-notes
|
65af75e2b5725894fa5e13bb5cd9ecf152a0d652
|
[
"MIT"
] | 2
|
2020-09-14T06:46:37.000Z
|
2021-06-15T09:17:21.000Z
|
def kadane(arr):
max_global = -1e9
max_local = max_global
for elem in arr:
max_local = max(elem, elem + max_local)
max_global = max(max_global, max_local)
return max_global
def main():
arr = [-2, -3, 4, -1, -2, 1, 5, -3]
ret = kadane(arr)
print(ret)
if __name__ == "__main__":
main()
| 19.705882
| 47
| 0.576119
|
def kadane(arr):
max_global = -1e9
max_local = max_global
for elem in arr:
max_local = max(elem, elem + max_local)
max_global = max(max_global, max_local)
return max_global
def main():
arr = [-2, -3, 4, -1, -2, 1, 5, -3]
ret = kadane(arr)
print(ret)
if __name__ == "__main__":
main()
| true
| true
|
1c43d004d4a185b1bfb3c178eb285b0d6056337f
| 2,100
|
py
|
Python
|
llvm-spirv/test/lit.cfg.py
|
Ralender/sycl
|
1fcd1e6d3da10024be92148501aced30ae3aa2be
|
[
"Apache-2.0"
] | 1
|
2020-09-25T23:33:05.000Z
|
2020-09-25T23:33:05.000Z
|
llvm-spirv/test/lit.cfg.py
|
Ralender/sycl
|
1fcd1e6d3da10024be92148501aced30ae3aa2be
|
[
"Apache-2.0"
] | null | null | null |
llvm-spirv/test/lit.cfg.py
|
Ralender/sycl
|
1fcd1e6d3da10024be92148501aced30ae3aa2be
|
[
"Apache-2.0"
] | null | null | null |
# -*- Python -*-
import lit.formats
import lit.util
from lit.llvm import llvm_config
from lit.llvm.subst import ToolSubst
from lit.llvm.subst import FindTool
# Configuration file for the 'lit' test runner.
# name: The name of this test suite.
config.name = 'LLVM_SPIRV'
# testFormat: The test format to use to interpret tests.
config.test_format = lit.formats.ShTest(True)
# suffixes: A list of file extensions to treat as test files.
config.suffixes = ['.cl', '.ll', '.spt']
# excludes: A list of directories and fles to exclude from the testsuite.
config.excludes = ['CMakeLists.txt']
if not config.spirv_skip_debug_info_tests:
# Direct object generation.
config.available_features.add('object-emission')
# LLVM can be configured with an empty default triple.
# Some tests are "generic" and require a valid default triple.
if config.target_triple:
config.available_features.add('default_triple')
# Ask llvm-config about asserts.
llvm_config.feature_config([('--assertion-mode', {'ON': 'asserts'})])
# test_source_root: The root path where tests are located.
config.test_source_root = os.path.dirname(__file__)
# test_exec_root: The root path where tests should be run.
config.test_exec_root = os.path.join(config.test_run_dir, 'test_output')
llvm_config.use_default_substitutions()
llvm_config.use_clang()
config.substitutions.append(('%PATH%', config.environment['PATH']))
tool_dirs = [config.llvm_tools_dir, config.llvm_spirv_dir]
tools = ['llvm-as', 'llvm-dis', 'llvm-spirv', 'not']
if not config.spirv_skip_debug_info_tests:
tools.extend(['llc', 'llvm-dwarfdump', 'llvm-objdump', 'llvm-readelf', 'llvm-readobj'])
llvm_config.add_tool_substitutions(tools, tool_dirs)
if config.spirv_tools_have_spirv_val:
new_ld_library_path = os.path.pathsep.join((config.spirv_tools_lib_dir, config.environment['LD_LIBRARY_PATH']))
config.environment['LD_LIBRARY_PATH'] = new_ld_library_path
llvm_config.add_tool_substitutions(['spirv-val'], [config.spirv_tools_bin_dir])
else:
config.substitutions.append(('spirv-val', ':'))
| 33.870968
| 115
| 0.748571
|
import lit.formats
import lit.util
from lit.llvm import llvm_config
from lit.llvm.subst import ToolSubst
from lit.llvm.subst import FindTool
config.name = 'LLVM_SPIRV'
config.test_format = lit.formats.ShTest(True)
config.suffixes = ['.cl', '.ll', '.spt']
config.excludes = ['CMakeLists.txt']
if not config.spirv_skip_debug_info_tests:
config.available_features.add('object-emission')
if config.target_triple:
config.available_features.add('default_triple')
llvm_config.feature_config([('--assertion-mode', {'ON': 'asserts'})])
config.test_source_root = os.path.dirname(__file__)
config.test_exec_root = os.path.join(config.test_run_dir, 'test_output')
llvm_config.use_default_substitutions()
llvm_config.use_clang()
config.substitutions.append(('%PATH%', config.environment['PATH']))
tool_dirs = [config.llvm_tools_dir, config.llvm_spirv_dir]
tools = ['llvm-as', 'llvm-dis', 'llvm-spirv', 'not']
if not config.spirv_skip_debug_info_tests:
tools.extend(['llc', 'llvm-dwarfdump', 'llvm-objdump', 'llvm-readelf', 'llvm-readobj'])
llvm_config.add_tool_substitutions(tools, tool_dirs)
if config.spirv_tools_have_spirv_val:
new_ld_library_path = os.path.pathsep.join((config.spirv_tools_lib_dir, config.environment['LD_LIBRARY_PATH']))
config.environment['LD_LIBRARY_PATH'] = new_ld_library_path
llvm_config.add_tool_substitutions(['spirv-val'], [config.spirv_tools_bin_dir])
else:
config.substitutions.append(('spirv-val', ':'))
| true
| true
|
1c43d04f11f00dcdcc7312268a8db53989c15597
| 28,424
|
py
|
Python
|
swift/common/request_helpers.py
|
Priyanka-Askani/swift
|
1ab691f63778008015b34ce004992844acee9968
|
[
"Apache-2.0"
] | 1
|
2019-05-25T10:55:58.000Z
|
2019-05-25T10:55:58.000Z
|
swift/common/request_helpers.py
|
Priyanka-Askani/swift
|
1ab691f63778008015b34ce004992844acee9968
|
[
"Apache-2.0"
] | 12
|
2015-06-23T23:20:17.000Z
|
2016-01-27T00:37:12.000Z
|
swift/common/request_helpers.py
|
Priyanka-Askani/swift
|
1ab691f63778008015b34ce004992844acee9968
|
[
"Apache-2.0"
] | 5
|
2015-06-04T19:00:11.000Z
|
2015-12-16T21:04:33.000Z
|
# Copyright (c) 2010-2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Miscellaneous utility functions for use in generating responses.
Why not swift.common.utils, you ask? Because this way we can import things
from swob in here without creating circular imports.
"""
import hashlib
import itertools
import sys
import time
import six
from six.moves.urllib.parse import unquote
from swift.common.header_key_dict import HeaderKeyDict
from swift import gettext_ as _
from swift.common.storage_policy import POLICIES
from swift.common.exceptions import ListingIterError, SegmentError
from swift.common.http import is_success
from swift.common.swob import HTTPBadRequest, \
HTTPServiceUnavailable, Range, is_chunked, multi_range_iterator, \
HTTPPreconditionFailed
from swift.common.utils import split_path, validate_device_partition, \
close_if_possible, maybe_multipart_byteranges_to_document_iters, \
multipart_byteranges_to_document_iters, parse_content_type, \
parse_content_range, csv_append, list_from_csv, Spliterator
from swift.common.wsgi import make_subrequest
OBJECT_TRANSIENT_SYSMETA_PREFIX = 'x-object-transient-sysmeta-'
def get_param(req, name, default=None):
"""
Get parameters from an HTTP request ensuring proper handling UTF-8
encoding.
:param req: request object
:param name: parameter name
:param default: result to return if the parameter is not found
:returns: HTTP request parameter value
(as UTF-8 encoded str, not unicode object)
:raises HTTPBadRequest: if param not valid UTF-8 byte sequence
"""
value = req.params.get(name, default)
if value and not isinstance(value, six.text_type):
try:
value.decode('utf8') # Ensure UTF8ness
except UnicodeDecodeError:
raise HTTPBadRequest(
request=req, content_type='text/plain',
body='"%s" parameter not valid UTF-8' % name)
return value
def get_name_and_placement(request, minsegs=1, maxsegs=None,
rest_with_last=False):
"""
Utility function to split and validate the request path and storage
policy. The storage policy index is extracted from the headers of
the request and converted to a StoragePolicy instance. The
remaining args are passed through to
:meth:`split_and_validate_path`.
:returns: a list, result of :meth:`split_and_validate_path` with
the BaseStoragePolicy instance appended on the end
:raises HTTPServiceUnavailable: if the path is invalid or no policy exists
with the extracted policy_index.
"""
policy_index = request.headers.get('X-Backend-Storage-Policy-Index')
policy = POLICIES.get_by_index(policy_index)
if not policy:
raise HTTPServiceUnavailable(
body=_("No policy with index %s") % policy_index,
request=request, content_type='text/plain')
results = split_and_validate_path(request, minsegs=minsegs,
maxsegs=maxsegs,
rest_with_last=rest_with_last)
results.append(policy)
return results
def split_and_validate_path(request, minsegs=1, maxsegs=None,
rest_with_last=False):
"""
Utility function to split and validate the request path.
:returns: result of :meth:`~swift.common.utils.split_path` if
everything's okay
:raises HTTPBadRequest: if something's not okay
"""
try:
segs = split_path(unquote(request.path),
minsegs, maxsegs, rest_with_last)
validate_device_partition(segs[0], segs[1])
return segs
except ValueError as err:
raise HTTPBadRequest(body=str(err), request=request,
content_type='text/plain')
def is_user_meta(server_type, key):
"""
Tests if a header key starts with and is longer than the user
metadata prefix for given server type.
:param server_type: type of backend server i.e. [account|container|object]
:param key: header key
:returns: True if the key satisfies the test, False otherwise
"""
if len(key) <= 8 + len(server_type):
return False
return key.lower().startswith(get_user_meta_prefix(server_type))
def is_sys_meta(server_type, key):
"""
Tests if a header key starts with and is longer than the system
metadata prefix for given server type.
:param server_type: type of backend server i.e. [account|container|object]
:param key: header key
:returns: True if the key satisfies the test, False otherwise
"""
if len(key) <= 11 + len(server_type):
return False
return key.lower().startswith(get_sys_meta_prefix(server_type))
def is_sys_or_user_meta(server_type, key):
"""
Tests if a header key starts with and is longer than the user or system
metadata prefix for given server type.
:param server_type: type of backend server i.e. [account|container|object]
:param key: header key
:returns: True if the key satisfies the test, False otherwise
"""
return is_user_meta(server_type, key) or is_sys_meta(server_type, key)
def is_object_transient_sysmeta(key):
"""
Tests if a header key starts with and is longer than the prefix for object
transient system metadata.
:param key: header key
:returns: True if the key satisfies the test, False otherwise
"""
if len(key) <= len(OBJECT_TRANSIENT_SYSMETA_PREFIX):
return False
return key.lower().startswith(OBJECT_TRANSIENT_SYSMETA_PREFIX)
def strip_user_meta_prefix(server_type, key):
"""
Removes the user metadata prefix for a given server type from the start
of a header key.
:param server_type: type of backend server i.e. [account|container|object]
:param key: header key
:returns: stripped header key
"""
if not is_user_meta(server_type, key):
raise ValueError('Key is not user meta')
return key[len(get_user_meta_prefix(server_type)):]
def strip_sys_meta_prefix(server_type, key):
"""
Removes the system metadata prefix for a given server type from the start
of a header key.
:param server_type: type of backend server i.e. [account|container|object]
:param key: header key
:returns: stripped header key
"""
if not is_sys_meta(server_type, key):
raise ValueError('Key is not sysmeta')
return key[len(get_sys_meta_prefix(server_type)):]
def strip_object_transient_sysmeta_prefix(key):
"""
Removes the object transient system metadata prefix from the start of a
header key.
:param key: header key
:returns: stripped header key
"""
if not is_object_transient_sysmeta(key):
raise ValueError('Key is not object transient sysmeta')
return key[len(OBJECT_TRANSIENT_SYSMETA_PREFIX):]
def get_user_meta_prefix(server_type):
"""
Returns the prefix for user metadata headers for given server type.
This prefix defines the namespace for headers that will be persisted
by backend servers.
:param server_type: type of backend server i.e. [account|container|object]
:returns: prefix string for server type's user metadata headers
"""
return 'x-%s-%s-' % (server_type.lower(), 'meta')
def get_sys_meta_prefix(server_type):
"""
Returns the prefix for system metadata headers for given server type.
This prefix defines the namespace for headers that will be persisted
by backend servers.
:param server_type: type of backend server i.e. [account|container|object]
:returns: prefix string for server type's system metadata headers
"""
return 'x-%s-%s-' % (server_type.lower(), 'sysmeta')
def get_object_transient_sysmeta(key):
"""
Returns the Object Transient System Metadata header for key.
The Object Transient System Metadata namespace will be persisted by
backend object servers. These headers are treated in the same way as
object user metadata i.e. all headers in this namespace will be
replaced on every POST request.
:param key: metadata key
:returns: the entire object transient system metadata header for key
"""
return '%s%s' % (OBJECT_TRANSIENT_SYSMETA_PREFIX, key)
def remove_items(headers, condition):
"""
Removes items from a dict whose keys satisfy
the given condition.
:param headers: a dict of headers
:param condition: a function that will be passed the header key as a
single argument and should return True if the header
is to be removed.
:returns: a dict, possibly empty, of headers that have been removed
"""
removed = {}
keys = filter(condition, headers)
removed.update((key, headers.pop(key)) for key in keys)
return removed
def copy_header_subset(from_r, to_r, condition):
"""
Will copy desired subset of headers from from_r to to_r.
:param from_r: a swob Request or Response
:param to_r: a swob Request or Response
:param condition: a function that will be passed the header key as a
single argument and should return True if the header
is to be copied.
"""
for k, v in from_r.headers.items():
if condition(k):
to_r.headers[k] = v
def check_path_header(req, name, length, error_msg):
"""
Validate that the value of path-like header is
well formatted. We assume the caller ensures that
specific header is present in req.headers.
:param req: HTTP request object
:param name: header name
:param length: length of path segment check
:param error_msg: error message for client
:returns: A tuple with path parts according to length
:raise: HTTPPreconditionFailed if header value
is not well formatted.
"""
hdr = unquote(req.headers.get(name))
if not hdr.startswith('/'):
hdr = '/' + hdr
try:
return split_path(hdr, length, length, True)
except ValueError:
raise HTTPPreconditionFailed(
request=req,
body=error_msg)
class SegmentedIterable(object):
"""
Iterable that returns the object contents for a large object.
:param req: original request object
:param app: WSGI application from which segments will come
:param listing_iter: iterable yielding the object segments to fetch,
along with the byte subranges to fetch, in the form of a 5-tuple
(object-path, object-etag, object-size, first-byte, last-byte).
If object-etag is None, no MD5 verification will be done.
If object-size is None, no length verification will be done.
If first-byte and last-byte are None, then the entire object will be
fetched.
:param max_get_time: maximum permitted duration of a GET request (seconds)
:param logger: logger object
:param swift_source: value of swift.source in subrequest environ
(just for logging)
:param ua_suffix: string to append to user-agent.
:param name: name of manifest (used in logging only)
:param response_body_length: optional response body length for
the response being sent to the client.
"""
def __init__(self, req, app, listing_iter, max_get_time,
logger, ua_suffix, swift_source,
name='<not specified>', response_body_length=None):
self.req = req
self.app = app
self.listing_iter = listing_iter
self.max_get_time = max_get_time
self.logger = logger
self.ua_suffix = " " + ua_suffix
self.swift_source = swift_source
self.name = name
self.response_body_length = response_body_length
self.peeked_chunk = None
self.app_iter = self._internal_iter()
self.validated_first_segment = False
self.current_resp = None
def _coalesce_requests(self):
pending_req = pending_etag = pending_size = None
try:
for seg_dict in self.listing_iter:
if 'raw_data' in seg_dict:
if pending_req:
yield pending_req, pending_etag, pending_size
to_yield = seg_dict['raw_data'][
seg_dict['first_byte']:seg_dict['last_byte'] + 1]
yield to_yield, None, len(seg_dict['raw_data'])
pending_req = pending_etag = pending_size = None
continue
seg_path, seg_etag, seg_size, first_byte, last_byte = (
seg_dict['path'], seg_dict.get('hash'),
seg_dict.get('bytes'),
seg_dict['first_byte'], seg_dict['last_byte'])
if seg_size is not None:
seg_size = int(seg_size)
first_byte = first_byte or 0
go_to_end = last_byte is None or (
seg_size is not None and last_byte == seg_size - 1)
# The "multipart-manifest=get" query param ensures that the
# segment is a plain old object, not some flavor of large
# object; therefore, its etag is its MD5sum and hence we can
# check it.
path = seg_path + '?multipart-manifest=get'
seg_req = make_subrequest(
self.req.environ, path=path, method='GET',
headers={'x-auth-token': self.req.headers.get(
'x-auth-token')},
agent=('%(orig)s ' + self.ua_suffix),
swift_source=self.swift_source)
seg_req_rangeval = None
if first_byte != 0 or not go_to_end:
seg_req_rangeval = "%s-%s" % (
first_byte, '' if go_to_end else last_byte)
seg_req.headers['Range'] = "bytes=" + seg_req_rangeval
# We can only coalesce if paths match and we know the segment
# size (so we can check that the ranges will be allowed)
if pending_req and pending_req.path == seg_req.path and \
seg_size is not None:
# Make a new Range object so that we don't goof up the
# existing one in case of invalid ranges. Note that a
# range set with too many individual byteranges is
# invalid, so we can combine N valid byteranges and 1
# valid byterange and get an invalid range set.
if pending_req.range:
new_range_str = str(pending_req.range)
else:
new_range_str = "bytes=0-%d" % (seg_size - 1)
if seg_req.range:
new_range_str += "," + seg_req_rangeval
else:
new_range_str += ",0-%d" % (seg_size - 1)
if Range(new_range_str).ranges_for_length(seg_size):
# Good news! We can coalesce the requests
pending_req.headers['Range'] = new_range_str
continue
# else, Too many ranges, or too much backtracking, or ...
if pending_req:
yield pending_req, pending_etag, pending_size
pending_req = seg_req
pending_etag = seg_etag
pending_size = seg_size
except ListingIterError:
e_type, e_value, e_traceback = sys.exc_info()
if pending_req:
yield pending_req, pending_etag, pending_size
six.reraise(e_type, e_value, e_traceback)
if pending_req:
yield pending_req, pending_etag, pending_size
def _requests_to_bytes_iter(self):
# Take the requests out of self._coalesce_requests, actually make
# the requests, and generate the bytes from the responses.
#
# Yields 2-tuples (segment-name, byte-chunk). The segment name is
# used for logging.
for data_or_req, seg_etag, seg_size in self._coalesce_requests():
if isinstance(data_or_req, bytes): # ugly, awful overloading
yield ('data segment', data_or_req)
continue
seg_req = data_or_req
seg_resp = seg_req.get_response(self.app)
if not is_success(seg_resp.status_int):
close_if_possible(seg_resp.app_iter)
raise SegmentError(
'While processing manifest %s, '
'got %d while retrieving %s' %
(self.name, seg_resp.status_int, seg_req.path))
elif ((seg_etag and (seg_resp.etag != seg_etag)) or
(seg_size and (seg_resp.content_length != seg_size) and
not seg_req.range)):
# The content-length check is for security reasons. Seems
# possible that an attacker could upload a >1mb object and
# then replace it with a much smaller object with same
# etag. Then create a big nested SLO that calls that
# object many times which would hammer our obj servers. If
# this is a range request, don't check content-length
# because it won't match.
close_if_possible(seg_resp.app_iter)
raise SegmentError(
'Object segment no longer valid: '
'%(path)s etag: %(r_etag)s != %(s_etag)s or '
'%(r_size)s != %(s_size)s.' %
{'path': seg_req.path, 'r_etag': seg_resp.etag,
'r_size': seg_resp.content_length,
's_etag': seg_etag,
's_size': seg_size})
else:
self.current_resp = seg_resp
seg_hash = None
if seg_resp.etag and not seg_req.headers.get('Range'):
# Only calculate the MD5 if it we can use it to validate
seg_hash = hashlib.md5()
document_iters = maybe_multipart_byteranges_to_document_iters(
seg_resp.app_iter,
seg_resp.headers['Content-Type'])
for chunk in itertools.chain.from_iterable(document_iters):
if seg_hash:
seg_hash.update(chunk)
yield (seg_req.path, chunk)
close_if_possible(seg_resp.app_iter)
if seg_hash and seg_hash.hexdigest() != seg_resp.etag:
raise SegmentError(
"Bad MD5 checksum in %(name)s for %(seg)s: headers had"
" %(etag)s, but object MD5 was actually %(actual)s" %
{'seg': seg_req.path, 'etag': seg_resp.etag,
'name': self.name, 'actual': seg_hash.hexdigest()})
def _byte_counting_iter(self):
# Checks that we give the client the right number of bytes. Raises
# SegmentError if the number of bytes is wrong.
bytes_left = self.response_body_length
for seg_name, chunk in self._requests_to_bytes_iter():
if bytes_left is None:
yield chunk
elif bytes_left >= len(chunk):
yield chunk
bytes_left -= len(chunk)
else:
yield chunk[:bytes_left]
bytes_left -= len(chunk)
raise SegmentError(
'Too many bytes for %(name)s; truncating in '
'%(seg)s with %(left)d bytes left' %
{'name': self.name, 'seg': seg_name,
'left': -bytes_left})
if bytes_left:
raise SegmentError('Expected another %d bytes for %s; '
'closing connection' % (bytes_left, self.name))
def _time_limited_iter(self):
# Makes sure a GET response doesn't take more than self.max_get_time
# seconds to process. Raises an exception if things take too long.
start_time = time.time()
for chunk in self._byte_counting_iter():
now = time.time()
yield chunk
if now - start_time > self.max_get_time:
raise SegmentError(
'While processing manifest %s, '
'max LO GET time of %ds exceeded' %
(self.name, self.max_get_time))
def _internal_iter(self):
# Top level of our iterator stack: pass bytes through; catch and
# handle exceptions.
try:
for chunk in self._time_limited_iter():
yield chunk
except (ListingIterError, SegmentError) as err:
self.logger.error(err)
if not self.validated_first_segment:
raise
finally:
if self.current_resp:
close_if_possible(self.current_resp.app_iter)
def app_iter_range(self, *a, **kw):
"""
swob.Response will only respond with a 206 status in certain cases; one
of those is if the body iterator responds to .app_iter_range().
However, this object (or really, its listing iter) is smart enough to
handle the range stuff internally, so we just no-op this out for swob.
"""
return self
def app_iter_ranges(self, ranges, content_type, boundary, content_size):
"""
This method assumes that iter(self) yields all the data bytes that
go into the response, but none of the MIME stuff. For example, if
the response will contain three MIME docs with data "abcd", "efgh",
and "ijkl", then iter(self) will give out the bytes "abcdefghijkl".
This method inserts the MIME stuff around the data bytes.
"""
si = Spliterator(self)
mri = multi_range_iterator(
ranges, content_type, boundary, content_size,
lambda start, end_plus_one: si.take(end_plus_one - start))
try:
for x in mri:
yield x
finally:
self.close()
def validate_first_segment(self):
"""
Start fetching object data to ensure that the first segment (if any) is
valid. This is to catch cases like "first segment is missing" or
"first segment's etag doesn't match manifest".
Note: this does not validate that you have any segments. A
zero-segment large object is not erroneous; it is just empty.
"""
if self.validated_first_segment:
return
try:
self.peeked_chunk = next(self.app_iter)
except StopIteration:
pass
finally:
self.validated_first_segment = True
def __iter__(self):
if self.peeked_chunk is not None:
pc = self.peeked_chunk
self.peeked_chunk = None
return itertools.chain([pc], self.app_iter)
else:
return self.app_iter
def close(self):
"""
Called when the client disconnect. Ensure that the connection to the
backend server is closed.
"""
close_if_possible(self.app_iter)
def http_response_to_document_iters(response, read_chunk_size=4096):
"""
Takes a successful object-GET HTTP response and turns it into an
iterator of (first-byte, last-byte, length, headers, body-file)
5-tuples.
The response must either be a 200 or a 206; if you feed in a 204 or
something similar, this probably won't work.
:param response: HTTP response, like from bufferedhttp.http_connect(),
not a swob.Response.
"""
chunked = is_chunked(dict(response.getheaders()))
if response.status == 200:
if chunked:
# Single "range" that's the whole object with an unknown length
return iter([(0, None, None, response.getheaders(),
response)])
# Single "range" that's the whole object
content_length = int(response.getheader('Content-Length'))
return iter([(0, content_length - 1, content_length,
response.getheaders(), response)])
content_type, params_list = parse_content_type(
response.getheader('Content-Type'))
if content_type != 'multipart/byteranges':
# Single range; no MIME framing, just the bytes. The start and end
# byte indices are in the Content-Range header.
start, end, length = parse_content_range(
response.getheader('Content-Range'))
return iter([(start, end, length, response.getheaders(), response)])
else:
# Multiple ranges; the response body is a multipart/byteranges MIME
# document, and we have to parse it using the MIME boundary
# extracted from the Content-Type header.
params = dict(params_list)
return multipart_byteranges_to_document_iters(
response, params['boundary'], read_chunk_size)
def update_etag_is_at_header(req, name):
"""
Helper function to update an X-Backend-Etag-Is-At header whose value is a
list of alternative header names at which the actual object etag may be
found. This informs the object server where to look for the actual object
etag when processing conditional requests.
Since the proxy server and/or middleware may set alternative etag header
names, the value of X-Backend-Etag-Is-At is a comma separated list which
the object server inspects in order until it finds an etag value.
:param req: a swob Request
:param name: name of a sysmeta where alternative etag may be found
"""
if ',' in name:
# HTTP header names should not have commas but we'll check anyway
raise ValueError('Header name must not contain commas')
existing = req.headers.get("X-Backend-Etag-Is-At")
req.headers["X-Backend-Etag-Is-At"] = csv_append(
existing, name)
def resolve_etag_is_at_header(req, metadata):
"""
Helper function to resolve an alternative etag value that may be stored in
metadata under an alternate name.
The value of the request's X-Backend-Etag-Is-At header (if it exists) is a
comma separated list of alternate names in the metadata at which an
alternate etag value may be found. This list is processed in order until an
alternate etag is found.
The left most value in X-Backend-Etag-Is-At will have been set by the left
most middleware, or if no middleware, by ECObjectController, if an EC
policy is in use. The left most middleware is assumed to be the authority
on what the etag value of the object content is.
The resolver will work from left to right in the list until it finds a
value that is a name in the given metadata. So the left most wins, IF it
exists in the metadata.
By way of example, assume the encrypter middleware is installed. If an
object is *not* encrypted then the resolver will not find the encrypter
middleware's alternate etag sysmeta (X-Object-Sysmeta-Crypto-Etag) but will
then find the EC alternate etag (if EC policy). But if the object *is*
encrypted then X-Object-Sysmeta-Crypto-Etag is found and used, which is
correct because it should be preferred over X-Object-Sysmeta-Ec-Etag.
:param req: a swob Request
:param metadata: a dict containing object metadata
:return: an alternate etag value if any is found, otherwise None
"""
alternate_etag = None
metadata = HeaderKeyDict(metadata)
if "X-Backend-Etag-Is-At" in req.headers:
names = list_from_csv(req.headers["X-Backend-Etag-Is-At"])
for name in names:
if name in metadata:
alternate_etag = metadata[name]
break
return alternate_etag
| 39.587744
| 79
| 0.63369
|
import hashlib
import itertools
import sys
import time
import six
from six.moves.urllib.parse import unquote
from swift.common.header_key_dict import HeaderKeyDict
from swift import gettext_ as _
from swift.common.storage_policy import POLICIES
from swift.common.exceptions import ListingIterError, SegmentError
from swift.common.http import is_success
from swift.common.swob import HTTPBadRequest, \
HTTPServiceUnavailable, Range, is_chunked, multi_range_iterator, \
HTTPPreconditionFailed
from swift.common.utils import split_path, validate_device_partition, \
close_if_possible, maybe_multipart_byteranges_to_document_iters, \
multipart_byteranges_to_document_iters, parse_content_type, \
parse_content_range, csv_append, list_from_csv, Spliterator
from swift.common.wsgi import make_subrequest
OBJECT_TRANSIENT_SYSMETA_PREFIX = 'x-object-transient-sysmeta-'
def get_param(req, name, default=None):
value = req.params.get(name, default)
if value and not isinstance(value, six.text_type):
try:
value.decode('utf8')
except UnicodeDecodeError:
raise HTTPBadRequest(
request=req, content_type='text/plain',
body='"%s" parameter not valid UTF-8' % name)
return value
def get_name_and_placement(request, minsegs=1, maxsegs=None,
rest_with_last=False):
policy_index = request.headers.get('X-Backend-Storage-Policy-Index')
policy = POLICIES.get_by_index(policy_index)
if not policy:
raise HTTPServiceUnavailable(
body=_("No policy with index %s") % policy_index,
request=request, content_type='text/plain')
results = split_and_validate_path(request, minsegs=minsegs,
maxsegs=maxsegs,
rest_with_last=rest_with_last)
results.append(policy)
return results
def split_and_validate_path(request, minsegs=1, maxsegs=None,
rest_with_last=False):
try:
segs = split_path(unquote(request.path),
minsegs, maxsegs, rest_with_last)
validate_device_partition(segs[0], segs[1])
return segs
except ValueError as err:
raise HTTPBadRequest(body=str(err), request=request,
content_type='text/plain')
def is_user_meta(server_type, key):
if len(key) <= 8 + len(server_type):
return False
return key.lower().startswith(get_user_meta_prefix(server_type))
def is_sys_meta(server_type, key):
if len(key) <= 11 + len(server_type):
return False
return key.lower().startswith(get_sys_meta_prefix(server_type))
def is_sys_or_user_meta(server_type, key):
return is_user_meta(server_type, key) or is_sys_meta(server_type, key)
def is_object_transient_sysmeta(key):
if len(key) <= len(OBJECT_TRANSIENT_SYSMETA_PREFIX):
return False
return key.lower().startswith(OBJECT_TRANSIENT_SYSMETA_PREFIX)
def strip_user_meta_prefix(server_type, key):
if not is_user_meta(server_type, key):
raise ValueError('Key is not user meta')
return key[len(get_user_meta_prefix(server_type)):]
def strip_sys_meta_prefix(server_type, key):
if not is_sys_meta(server_type, key):
raise ValueError('Key is not sysmeta')
return key[len(get_sys_meta_prefix(server_type)):]
def strip_object_transient_sysmeta_prefix(key):
if not is_object_transient_sysmeta(key):
raise ValueError('Key is not object transient sysmeta')
return key[len(OBJECT_TRANSIENT_SYSMETA_PREFIX):]
def get_user_meta_prefix(server_type):
return 'x-%s-%s-' % (server_type.lower(), 'meta')
def get_sys_meta_prefix(server_type):
return 'x-%s-%s-' % (server_type.lower(), 'sysmeta')
def get_object_transient_sysmeta(key):
return '%s%s' % (OBJECT_TRANSIENT_SYSMETA_PREFIX, key)
def remove_items(headers, condition):
removed = {}
keys = filter(condition, headers)
removed.update((key, headers.pop(key)) for key in keys)
return removed
def copy_header_subset(from_r, to_r, condition):
for k, v in from_r.headers.items():
if condition(k):
to_r.headers[k] = v
def check_path_header(req, name, length, error_msg):
hdr = unquote(req.headers.get(name))
if not hdr.startswith('/'):
hdr = '/' + hdr
try:
return split_path(hdr, length, length, True)
except ValueError:
raise HTTPPreconditionFailed(
request=req,
body=error_msg)
class SegmentedIterable(object):
def __init__(self, req, app, listing_iter, max_get_time,
logger, ua_suffix, swift_source,
name='<not specified>', response_body_length=None):
self.req = req
self.app = app
self.listing_iter = listing_iter
self.max_get_time = max_get_time
self.logger = logger
self.ua_suffix = " " + ua_suffix
self.swift_source = swift_source
self.name = name
self.response_body_length = response_body_length
self.peeked_chunk = None
self.app_iter = self._internal_iter()
self.validated_first_segment = False
self.current_resp = None
def _coalesce_requests(self):
pending_req = pending_etag = pending_size = None
try:
for seg_dict in self.listing_iter:
if 'raw_data' in seg_dict:
if pending_req:
yield pending_req, pending_etag, pending_size
to_yield = seg_dict['raw_data'][
seg_dict['first_byte']:seg_dict['last_byte'] + 1]
yield to_yield, None, len(seg_dict['raw_data'])
pending_req = pending_etag = pending_size = None
continue
seg_path, seg_etag, seg_size, first_byte, last_byte = (
seg_dict['path'], seg_dict.get('hash'),
seg_dict.get('bytes'),
seg_dict['first_byte'], seg_dict['last_byte'])
if seg_size is not None:
seg_size = int(seg_size)
first_byte = first_byte or 0
go_to_end = last_byte is None or (
seg_size is not None and last_byte == seg_size - 1)
path = seg_path + '?multipart-manifest=get'
seg_req = make_subrequest(
self.req.environ, path=path, method='GET',
headers={'x-auth-token': self.req.headers.get(
'x-auth-token')},
agent=('%(orig)s ' + self.ua_suffix),
swift_source=self.swift_source)
seg_req_rangeval = None
if first_byte != 0 or not go_to_end:
seg_req_rangeval = "%s-%s" % (
first_byte, '' if go_to_end else last_byte)
seg_req.headers['Range'] = "bytes=" + seg_req_rangeval
if pending_req and pending_req.path == seg_req.path and \
seg_size is not None:
# existing one in case of invalid ranges. Note that a
# range set with too many individual byteranges is
# invalid, so we can combine N valid byteranges and 1
# valid byterange and get an invalid range set.
if pending_req.range:
new_range_str = str(pending_req.range)
else:
new_range_str = "bytes=0-%d" % (seg_size - 1)
if seg_req.range:
new_range_str += "," + seg_req_rangeval
else:
new_range_str += ",0-%d" % (seg_size - 1)
if Range(new_range_str).ranges_for_length(seg_size):
# Good news! We can coalesce the requests
pending_req.headers['Range'] = new_range_str
continue
# else, Too many ranges, or too much backtracking, or ...
if pending_req:
yield pending_req, pending_etag, pending_size
pending_req = seg_req
pending_etag = seg_etag
pending_size = seg_size
except ListingIterError:
e_type, e_value, e_traceback = sys.exc_info()
if pending_req:
yield pending_req, pending_etag, pending_size
six.reraise(e_type, e_value, e_traceback)
if pending_req:
yield pending_req, pending_etag, pending_size
def _requests_to_bytes_iter(self):
# Take the requests out of self._coalesce_requests, actually make
# the requests, and generate the bytes from the responses.
#
# Yields 2-tuples (segment-name, byte-chunk). The segment name is
# used for logging.
for data_or_req, seg_etag, seg_size in self._coalesce_requests():
if isinstance(data_or_req, bytes): # ugly, awful overloading
yield ('data segment', data_or_req)
continue
seg_req = data_or_req
seg_resp = seg_req.get_response(self.app)
if not is_success(seg_resp.status_int):
close_if_possible(seg_resp.app_iter)
raise SegmentError(
'While processing manifest %s, '
'got %d while retrieving %s' %
(self.name, seg_resp.status_int, seg_req.path))
elif ((seg_etag and (seg_resp.etag != seg_etag)) or
(seg_size and (seg_resp.content_length != seg_size) and
not seg_req.range)):
# The content-length check is for security reasons. Seems
# possible that an attacker could upload a >1mb object and
# then replace it with a much smaller object with same
# etag. Then create a big nested SLO that calls that
# object many times which would hammer our obj servers. If
# this is a range request, don't check content-length
close_if_possible(seg_resp.app_iter)
raise SegmentError(
'Object segment no longer valid: '
'%(path)s etag: %(r_etag)s != %(s_etag)s or '
'%(r_size)s != %(s_size)s.' %
{'path': seg_req.path, 'r_etag': seg_resp.etag,
'r_size': seg_resp.content_length,
's_etag': seg_etag,
's_size': seg_size})
else:
self.current_resp = seg_resp
seg_hash = None
if seg_resp.etag and not seg_req.headers.get('Range'):
# Only calculate the MD5 if it we can use it to validate
seg_hash = hashlib.md5()
document_iters = maybe_multipart_byteranges_to_document_iters(
seg_resp.app_iter,
seg_resp.headers['Content-Type'])
for chunk in itertools.chain.from_iterable(document_iters):
if seg_hash:
seg_hash.update(chunk)
yield (seg_req.path, chunk)
close_if_possible(seg_resp.app_iter)
if seg_hash and seg_hash.hexdigest() != seg_resp.etag:
raise SegmentError(
"Bad MD5 checksum in %(name)s for %(seg)s: headers had"
" %(etag)s, but object MD5 was actually %(actual)s" %
{'seg': seg_req.path, 'etag': seg_resp.etag,
'name': self.name, 'actual': seg_hash.hexdigest()})
def _byte_counting_iter(self):
# Checks that we give the client the right number of bytes. Raises
# SegmentError if the number of bytes is wrong.
bytes_left = self.response_body_length
for seg_name, chunk in self._requests_to_bytes_iter():
if bytes_left is None:
yield chunk
elif bytes_left >= len(chunk):
yield chunk
bytes_left -= len(chunk)
else:
yield chunk[:bytes_left]
bytes_left -= len(chunk)
raise SegmentError(
'Too many bytes for %(name)s; truncating in '
'%(seg)s with %(left)d bytes left' %
{'name': self.name, 'seg': seg_name,
'left': -bytes_left})
if bytes_left:
raise SegmentError('Expected another %d bytes for %s; '
'closing connection' % (bytes_left, self.name))
def _time_limited_iter(self):
# Makes sure a GET response doesn't take more than self.max_get_time
start_time = time.time()
for chunk in self._byte_counting_iter():
now = time.time()
yield chunk
if now - start_time > self.max_get_time:
raise SegmentError(
'While processing manifest %s, '
'max LO GET time of %ds exceeded' %
(self.name, self.max_get_time))
def _internal_iter(self):
try:
for chunk in self._time_limited_iter():
yield chunk
except (ListingIterError, SegmentError) as err:
self.logger.error(err)
if not self.validated_first_segment:
raise
finally:
if self.current_resp:
close_if_possible(self.current_resp.app_iter)
def app_iter_range(self, *a, **kw):
return self
def app_iter_ranges(self, ranges, content_type, boundary, content_size):
si = Spliterator(self)
mri = multi_range_iterator(
ranges, content_type, boundary, content_size,
lambda start, end_plus_one: si.take(end_plus_one - start))
try:
for x in mri:
yield x
finally:
self.close()
def validate_first_segment(self):
if self.validated_first_segment:
return
try:
self.peeked_chunk = next(self.app_iter)
except StopIteration:
pass
finally:
self.validated_first_segment = True
def __iter__(self):
if self.peeked_chunk is not None:
pc = self.peeked_chunk
self.peeked_chunk = None
return itertools.chain([pc], self.app_iter)
else:
return self.app_iter
def close(self):
close_if_possible(self.app_iter)
def http_response_to_document_iters(response, read_chunk_size=4096):
chunked = is_chunked(dict(response.getheaders()))
if response.status == 200:
if chunked:
return iter([(0, None, None, response.getheaders(),
response)])
# Single "range" that's the whole object
content_length = int(response.getheader('Content-Length'))
return iter([(0, content_length - 1, content_length,
response.getheaders(), response)])
content_type, params_list = parse_content_type(
response.getheader('Content-Type'))
if content_type != 'multipart/byteranges':
start, end, length = parse_content_range(
response.getheader('Content-Range'))
return iter([(start, end, length, response.getheaders(), response)])
else:
params = dict(params_list)
return multipart_byteranges_to_document_iters(
response, params['boundary'], read_chunk_size)
def update_etag_is_at_header(req, name):
if ',' in name:
raise ValueError('Header name must not contain commas')
existing = req.headers.get("X-Backend-Etag-Is-At")
req.headers["X-Backend-Etag-Is-At"] = csv_append(
existing, name)
def resolve_etag_is_at_header(req, metadata):
alternate_etag = None
metadata = HeaderKeyDict(metadata)
if "X-Backend-Etag-Is-At" in req.headers:
names = list_from_csv(req.headers["X-Backend-Etag-Is-At"])
for name in names:
if name in metadata:
alternate_etag = metadata[name]
break
return alternate_etag
| true
| true
|
1c43d04ffcbbf8f8291758e9efda58952ca0da53
| 1,510
|
py
|
Python
|
nengolib/stats/ortho.py
|
ikajic/nengolib
|
bd30ec38ba656bedb94a267b5f86b51d1cec4954
|
[
"MIT"
] | 27
|
2016-01-21T04:11:02.000Z
|
2021-11-16T20:41:04.000Z
|
nengolib/stats/ortho.py
|
ikajic/nengolib
|
bd30ec38ba656bedb94a267b5f86b51d1cec4954
|
[
"MIT"
] | 178
|
2016-01-21T16:04:34.000Z
|
2021-05-01T16:28:02.000Z
|
nengolib/stats/ortho.py
|
ikajic/nengolib
|
bd30ec38ba656bedb94a267b5f86b51d1cec4954
|
[
"MIT"
] | 4
|
2019-03-19T18:22:02.000Z
|
2021-03-23T16:06:57.000Z
|
import numpy as np
from scipy.linalg import svd
from nengo.dists import UniformHypersphere
__all__ = ['random_orthogonal']
def random_orthogonal(d, rng=None):
"""Returns a random orthogonal matrix.
Parameters
----------
d : ``integer``
Positive dimension of returned matrix.
rng : :class:`numpy.random.RandomState` or ``None``, optional
Random number generator state.
Returns
-------
samples : ``(d, d) np.array``
Random orthogonal matrix (an orthonormal basis);
linearly transforms any vector into a uniformly sampled
vector on the ``d``--ball with the same L2 norm.
See Also
--------
:class:`.ScatteredHypersphere`
Examples
--------
>>> from nengolib.stats import random_orthogonal, sphere
>>> rng = np.random.RandomState(seed=0)
>>> u = sphere.sample(1000, 3, rng=rng)
>>> u[:, 0] = 0
>>> v = u.dot(random_orthogonal(3, rng=rng))
>>> import matplotlib.pyplot as plt
>>> from mpl_toolkits.mplot3d import Axes3D
>>> ax = plt.subplot(111, projection='3d')
>>> ax.scatter(*u.T, alpha=.5, label="u")
>>> ax.scatter(*v.T, alpha=.5, label="v")
>>> ax.patch.set_facecolor('white')
>>> ax.set_xlim3d(-1, 1)
>>> ax.set_ylim3d(-1, 1)
>>> ax.set_zlim3d(-1, 1)
>>> plt.legend()
>>> plt.show()
"""
rng = np.random if rng is None else rng
m = UniformHypersphere(surface=True).sample(d, d, rng=rng)
u, s, v = svd(m)
return np.dot(u, v)
| 26.964286
| 65
| 0.598675
|
import numpy as np
from scipy.linalg import svd
from nengo.dists import UniformHypersphere
__all__ = ['random_orthogonal']
def random_orthogonal(d, rng=None):
rng = np.random if rng is None else rng
m = UniformHypersphere(surface=True).sample(d, d, rng=rng)
u, s, v = svd(m)
return np.dot(u, v)
| true
| true
|
1c43d0671192f23cc6c504e35209d21603155e04
| 5,160
|
py
|
Python
|
wikimapper/cli.py
|
jcklie/wikimapper
|
fadecea085bfa11779e33e94b03a8dcdd2d045a7
|
[
"Apache-2.0"
] | 69
|
2019-05-07T02:41:57.000Z
|
2022-03-29T09:33:43.000Z
|
wikimapper/cli.py
|
jcklie/wikimapper
|
fadecea085bfa11779e33e94b03a8dcdd2d045a7
|
[
"Apache-2.0"
] | 6
|
2019-04-26T11:16:07.000Z
|
2021-04-08T15:35:33.000Z
|
wikimapper/cli.py
|
jcklie/wikimapper
|
fadecea085bfa11779e33e94b03a8dcdd2d045a7
|
[
"Apache-2.0"
] | 7
|
2020-02-14T20:00:23.000Z
|
2021-12-17T09:56:19.000Z
|
import argparse
import logging
import os
from wikimapper.__version__ import __version__
from wikimapper import download_wikidumps, create_index, WikiMapper
def main():
logging.basicConfig(
level=logging.DEBUG, format="%(asctime)s - %(name)s - %(levelname)s - %(message)s"
)
description = "Map Wikipedia page titles to Wikidata IDs and vice versa."
parser = argparse.ArgumentParser(description=description)
subparsers = parser.add_subparsers(help="sub-command help", dest="command")
# Downloda parser
parser_download = subparsers.add_parser(
"download", help="Download Wikipedia dumps for creating a custom index."
)
parser_download.add_argument(
"dumpname",
type=_dump_name,
help='Name of the Wikipedia dump, e.g. "enwiki-latest" for the latest English Wikipedia dump or "barwiki-20190420" for a dump from the Bavarian Wikipedia taken at the 20th April, 2019',
)
parser_download.add_argument(
"--overwrite",
action="store_true",
help='Overwrite existing files if they already exist (default: "False")',
)
parser_download.add_argument(
"--dir",
type=_dir_path,
default=os.getcwd(),
help="Path to the folder in which the dump should be stored (default: current directory)",
)
parser_download.add_argument(
"--mirror",
type=str,
default="https://dumps.wikimedia.org",
help='URL of the Wikipedia dump mirror to use (default: "https://dumps.wikimedia.org")',
)
# Index creation parser
parser_create = subparsers.add_parser(
"create", help="Use a previously downloaded Wikipedia dump to create a custom index."
)
parser_create.add_argument(
"dumpname",
type=_dump_name,
help='Name of the Wikipedia dump, e.g. "enwiki-latest" for the latest English Wikipedia dump or "barwiki-20190420" for a dump from the Bavarian Wikipedia taken at the 20th April, 2019',
)
parser_create.add_argument(
"--target",
default=None,
type=str,
help='Path and name of the index to create (default: "index_${dumpname}.db")',
)
parser_create.add_argument(
"--dumpdir",
type=_dir_path,
default=os.getcwd(),
help="Path to the folder in which the dump was stored (default: current directory)",
)
# Mapping parser
parser_title_to_id = subparsers.add_parser(
"title2id", help="Map a Wikipedia title to a Wikidata ID."
)
parser_title_to_id.add_argument(
"index", type=str, help="Path to the index file that shall be used for the mapping."
)
parser_title_to_id.add_argument(
"title",
type=str,
help="Page title to map. Spaces are replaced by underscores, the title should not be escaped.",
)
parser_url_to_id = subparsers.add_parser("url2id", help="Map a Wikipedia URL to a Wikidata ID.")
parser_url_to_id.add_argument(
"index", type=str, help="Path to the index file that shall be used for the mapping."
)
parser_url_to_id.add_argument(
"url",
type=str,
help="URL to map. It is not checked whether the URL comes from the same Wiki as the index.",
)
parser_id_to_title = subparsers.add_parser(
"id2titles", help="Map a Wikidata ID to one or more Wikipedia titles."
)
parser_id_to_title.add_argument(
"index", type=str, help="Path to the index file that shall be used for the mapping."
)
parser_id_to_title.add_argument("id", type=str, help="Wikidata ID to map.")
# Version
parser.add_argument("--version", action="version", version="%(prog)s " + __version__)
# Do the work
args = parser.parse_args()
if args.command == "download":
download_wikidumps(args.dumpname, args.dir, args.mirror, args.overwrite)
elif args.command == "create":
create_index(args.dumpname, args.dumpdir, args.target)
elif args.command == "title2id":
mapper = WikiMapper(args.index)
result = mapper.title_to_id(args.title)
if result:
print(result)
elif args.command == "url2id":
mapper = WikiMapper(args.index)
result = mapper.url_to_id(args.url)
if result:
print(result)
elif args.command == "id2titles":
mapper = WikiMapper(args.index)
results = mapper.id_to_titles(args.id)
for result in results:
print(result)
else:
parser.print_help()
def _dir_path(path) -> str:
""" Checks whether `path` is a valid path to a directory. """
if os.path.isdir(path):
return path
else:
raise argparse.ArgumentTypeError(f"readable_dir:{path} is not a valid path to a directory!")
def _dump_name(name) -> str:
""" Checks whether `name` is a valid Wikipedia dump name. """
parts = name.split("-")
err = lambda: argparse.ArgumentTypeError(f"dumpname: [{name}] is not a valid dump name")
if len(parts) != 2:
raise err()
wikiname, date = parts
if not wikiname.endswith("wiki"):
raise err()
return name
| 34.630872
| 193
| 0.650775
|
import argparse
import logging
import os
from wikimapper.__version__ import __version__
from wikimapper import download_wikidumps, create_index, WikiMapper
def main():
logging.basicConfig(
level=logging.DEBUG, format="%(asctime)s - %(name)s - %(levelname)s - %(message)s"
)
description = "Map Wikipedia page titles to Wikidata IDs and vice versa."
parser = argparse.ArgumentParser(description=description)
subparsers = parser.add_subparsers(help="sub-command help", dest="command")
parser_download = subparsers.add_parser(
"download", help="Download Wikipedia dumps for creating a custom index."
)
parser_download.add_argument(
"dumpname",
type=_dump_name,
help='Name of the Wikipedia dump, e.g. "enwiki-latest" for the latest English Wikipedia dump or "barwiki-20190420" for a dump from the Bavarian Wikipedia taken at the 20th April, 2019',
)
parser_download.add_argument(
"--overwrite",
action="store_true",
help='Overwrite existing files if they already exist (default: "False")',
)
parser_download.add_argument(
"--dir",
type=_dir_path,
default=os.getcwd(),
help="Path to the folder in which the dump should be stored (default: current directory)",
)
parser_download.add_argument(
"--mirror",
type=str,
default="https://dumps.wikimedia.org",
help='URL of the Wikipedia dump mirror to use (default: "https://dumps.wikimedia.org")',
)
parser_create = subparsers.add_parser(
"create", help="Use a previously downloaded Wikipedia dump to create a custom index."
)
parser_create.add_argument(
"dumpname",
type=_dump_name,
help='Name of the Wikipedia dump, e.g. "enwiki-latest" for the latest English Wikipedia dump or "barwiki-20190420" for a dump from the Bavarian Wikipedia taken at the 20th April, 2019',
)
parser_create.add_argument(
"--target",
default=None,
type=str,
help='Path and name of the index to create (default: "index_${dumpname}.db")',
)
parser_create.add_argument(
"--dumpdir",
type=_dir_path,
default=os.getcwd(),
help="Path to the folder in which the dump was stored (default: current directory)",
)
parser_title_to_id = subparsers.add_parser(
"title2id", help="Map a Wikipedia title to a Wikidata ID."
)
parser_title_to_id.add_argument(
"index", type=str, help="Path to the index file that shall be used for the mapping."
)
parser_title_to_id.add_argument(
"title",
type=str,
help="Page title to map. Spaces are replaced by underscores, the title should not be escaped.",
)
parser_url_to_id = subparsers.add_parser("url2id", help="Map a Wikipedia URL to a Wikidata ID.")
parser_url_to_id.add_argument(
"index", type=str, help="Path to the index file that shall be used for the mapping."
)
parser_url_to_id.add_argument(
"url",
type=str,
help="URL to map. It is not checked whether the URL comes from the same Wiki as the index.",
)
parser_id_to_title = subparsers.add_parser(
"id2titles", help="Map a Wikidata ID to one or more Wikipedia titles."
)
parser_id_to_title.add_argument(
"index", type=str, help="Path to the index file that shall be used for the mapping."
)
parser_id_to_title.add_argument("id", type=str, help="Wikidata ID to map.")
parser.add_argument("--version", action="version", version="%(prog)s " + __version__)
args = parser.parse_args()
if args.command == "download":
download_wikidumps(args.dumpname, args.dir, args.mirror, args.overwrite)
elif args.command == "create":
create_index(args.dumpname, args.dumpdir, args.target)
elif args.command == "title2id":
mapper = WikiMapper(args.index)
result = mapper.title_to_id(args.title)
if result:
print(result)
elif args.command == "url2id":
mapper = WikiMapper(args.index)
result = mapper.url_to_id(args.url)
if result:
print(result)
elif args.command == "id2titles":
mapper = WikiMapper(args.index)
results = mapper.id_to_titles(args.id)
for result in results:
print(result)
else:
parser.print_help()
def _dir_path(path) -> str:
if os.path.isdir(path):
return path
else:
raise argparse.ArgumentTypeError(f"readable_dir:{path} is not a valid path to a directory!")
def _dump_name(name) -> str:
parts = name.split("-")
err = lambda: argparse.ArgumentTypeError(f"dumpname: [{name}] is not a valid dump name")
if len(parts) != 2:
raise err()
wikiname, date = parts
if not wikiname.endswith("wiki"):
raise err()
return name
| true
| true
|
1c43d0fc92643c28afe3143be964b00509b6b818
| 22,725
|
py
|
Python
|
build/lib/pspnet/pspnet.py
|
NamTran838P/pspnet-keras
|
4005fd7867036e5476bcc694fd2f548a22860d4b
|
[
"MIT"
] | null | null | null |
build/lib/pspnet/pspnet.py
|
NamTran838P/pspnet-keras
|
4005fd7867036e5476bcc694fd2f548a22860d4b
|
[
"MIT"
] | null | null | null |
build/lib/pspnet/pspnet.py
|
NamTran838P/pspnet-keras
|
4005fd7867036e5476bcc694fd2f548a22860d4b
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
"""
A Keras/Tensorflow implementation of Pyramid Scene Parsing Networks.
Original paper & code published by Hengshuang Zhao et al. (2017)
"""
from __future__ import print_function
from __future__ import division
from os.path import splitext, join, isfile, isdir
from os import environ, walk
from math import ceil
import argparse
import glob
import fnmatch
import warnings
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.widgets import RadioButtons
from scipy import misc, ndimage
from keras import backend as K
from keras.models import model_from_json
import tensorflow as tf
from layers_builder import build_pspnet
from utils import download_weights, download_npy_weights, preprocess_image, color_class_image, gt_image_to_class_image
from evaluation import evaluate_iou
warnings.filterwarnings('ignore', '.*output shape of zoom.*')
__author__ = "Vlad Kryvoruchko, Chaoyue Wang, Jeffrey Hu & Julian Tatsch"
class PSPNet(object):
"""Pyramid Scene Parsing Network by Hengshuang Zhao et al 2017."""
def __init__(self, nb_classes, resnet_layers, input_shape, weights):
"""Instanciate a PSPNet."""
self.input_shape = input_shape
self.nb_classes = nb_classes
json_path = join("..", "weights", "keras", weights + ".json")
h5_path = join("..", "weights", "keras", weights + ".h5")
if not isfile(json_path) and not isfile(h5_path):
download_weights(weights)
if isfile(json_path) and isfile(h5_path):
print("Keras model & weights found, loading...")
with open(json_path, 'r') as file_handle:
try:
self.model = model_from_json(file_handle.read())
except ValueError as err: # bad marshal data error when loading py2 model in py3 an vice versa
# https://github.com/fchollet/keras/issues/7440
print("Couldn't import model from json because it was build using a different python version: %s" % err)
print("Rebuilding pspnet model ...")
self.model = build_pspnet(nb_classes=nb_classes,
resnet_layers=resnet_layers,
input_shape=self.input_shape)
print("Saving pspnet to disk ...")
json_string = self.model.to_json()
with open(json_path, 'w') as file_handle:
file_handle.write(json_string)
except SystemError as err: # bad marshal data error when loading py3.5 model in py3.6
print("Couldn't import model from json because it was build using a different python version: %s" % err)
print("Converting pspnet model from npy")
self.model = build_pspnet(nb_classes=nb_classes,
resnet_layers=resnet_layers,
input_shape=self.input_shape)
self.set_npy_weights(weights)
self.model.load_weights(h5_path)
else:
print("No Keras model & weights found, import from npy weights.")
self.model = build_pspnet(nb_classes=nb_classes,
resnet_layers=resnet_layers,
input_shape=self.input_shape)
self.set_npy_weights(weights)
def predict(self, img, flip_evaluation):
"""
Predict segementation for an image.
Arguments:
img: must be rowsxcolsx3
"""
h_ori, w_ori = img.shape[:2]
if img.shape[0:2] != self.input_shape:
print("Input %s not fitting for network size %s, resizing. You may want to try sliding prediction for better results." % (img.shape[0:2], self.input_shape))
img = misc.imresize(img, self.input_shape)
data = preprocess_image(img, mean=[[[174.08136209, 163.97867657, 138.72837669]]])
# debug(self.model, input_data)
if flip_evaluation:
input_with_flipped = np.array([data, np.flip(data, axis=1)])
prediction_with_flipped = self.model.predict(input_with_flipped)
prediction = (prediction_with_flipped[0] + np.fliplr(prediction_with_flipped[1])) / 2.0
else:
prediction = self.model.predict(np.expand_dims(data, 0))[0]
return prediction
if img.shape[0:1] != self.input_shape: # upscale prediction if necessary
h, w = prediction.shape[:2]
prediction = ndimage.zoom(prediction, (1.*h_ori/h, 1.*w_ori/w, 1.),
order=1, prefilter=False)
return prediction
def set_npy_weights(self, weights_path):
"""Set weights from the intermediary npy file."""
npy_weights_path = join("..", "weights", "npy", weights_path + ".npy")
json_path = join("..", "weights", "keras", weights_path + ".json")
h5_path = join("..", "weights", "keras", weights_path + ".h5")
if not isfile(npy_weights_path):
download_npy_weights(weights_path)
print("Importing weights from %s" % npy_weights_path)
weights = np.load(npy_weights_path, encoding="latin1").item()
whitelist = ["InputLayer", "Activation", "ZeroPadding2D", "Add", "MaxPooling2D", "AveragePooling2D", "Lambda", "Concatenate", "Dropout"]
weights_set = 0
for layer in self.model.layers:
print("Processing %s" % layer.name)
if layer.name[:4] == 'conv' and layer.name[-2:] == 'bn':
mean = weights[layer.name]['mean'].reshape(-1)
variance = weights[layer.name]['variance'].reshape(-1)
scale = weights[layer.name]['scale'].reshape(-1)
offset = weights[layer.name]['offset'].reshape(-1)
self.model.get_layer(layer.name).set_weights([scale, offset, mean, variance])
weights_set += 1
elif layer.name[:4] == 'conv' and not layer.name[-4:] == 'relu':
try:
weight = weights[layer.name]['weights']
self.model.get_layer(layer.name).set_weights([weight])
except Exception:
biases = weights[layer.name]['biases']
self.model.get_layer(layer.name).set_weights([weight,
biases])
weights_set += 1
elif layer.__class__.__name__ in whitelist:
# print("Nothing to set in %s" % layer.__class__.__name__)
pass
else:
print("Warning: Did not find weights for keras layer %s in numpy weights" % layer)
print("Set a total of %i weights" % weights_set)
print('Finished importing weights.')
print("Writing keras model & weights")
json_string = self.model.to_json()
with open(json_path, 'w') as file_handle:
file_handle.write(json_string)
self.model.save_weights(h5_path)
print("Finished writing Keras model & weights")
class PSPNet50(PSPNet):
"""Build a PSPNet based on a 50-Layer ResNet."""
def __init__(self, nb_classes, weights, input_shape):
"""Instanciate a PSPNet50."""
PSPNet.__init__(self, nb_classes=nb_classes, resnet_layers=50,
input_shape=input_shape, weights=weights)
class PSPNet101(PSPNet):
"""Build a PSPNet based on a 101-Layer ResNet."""
def __init__(self, nb_classes, weights, input_shape):
"""Instanciate a PSPNet101."""
PSPNet.__init__(self, nb_classes=nb_classes, resnet_layers=101,
input_shape=input_shape, weights=weights)
def pad_image(img, target_size):
"""Pad an image up to the target size."""
rows_missing = target_size[0] - img.shape[0]
cols_missing = target_size[1] - img.shape[1]
padded_img = np.pad(img, ((0, rows_missing), (0, cols_missing), (0, 0)), 'constant')
return padded_img
def produce_view(input_image, class_image, id2label, viewstyle):
"""Produce an image ready for plotting or saving."""
view = None
if viewstyle == 'original':
view = input_image
elif (viewstyle == 'predictions') or (viewstyle == 'overlay'):
view = color_class_image(class_image, id2label)
if viewstyle == 'overlay':
view = (0.5 * view.astype(np.float32) + 0.5 * input_image.astype(np.float32)).astype(np.uint8)
else:
print("Unknown view style")
return view
def visualize_prediction(input_image, class_scores, id2label):
"""Visualize prediction in faux colors."""
class_image = np.argmax(class_scores, axis=2)
fig = plt.figure()
axis = fig.add_subplot(111)
def button_handler(viewstyle):
axis.imshow(produce_view(input_image, class_image, id2label, viewstyle))
plt.draw()
# plt.subplots_adjust(left=0.3)
rax = plt.axes([0.4, 0.05, 0.2, 0.15])
radio_buttons = RadioButtons(rax, ('original', 'overlay', 'predictions'))
radio_buttons.on_clicked(button_handler)
# image = produce_view(input_image, class_image, 'overlay')
# axis.imshow(image)
button_handler('original')
axis.set_axis_off()
# overwrite the status bar with class information
axis.format_coord = lambda x, y: id2label[class_image[int(y), int(x)]].name
plt.show()
def show_class_heatmap(class_scores, class_name):
"""Show a heatmap with the probabilities of a certain class."""
try:
class_id = name2label[class_name].id
class_heatmap = class_scores[:, :, class_id]
plt.axis('off')
plt.imshow(class_heatmap, cmap='coolwarm')
plt.show()
except KeyError as err:
print("Could not find index for %s because of %s" % (class_name, err))
def show_class_heatmaps(class_scores):
"""
Show heatmap with the probabilities of a certain class.
Cycle through with lef and right arrow keys.
"""
show_class_heatmaps.curr_index = 0
def key_event(event):
"""Handle forward & backward arrow key presses."""
if event.key == "right":
show_class_heatmaps.curr_index += 1
elif event.key == "left":
show_class_heatmaps.curr_index -= 1
else:
return
show_class_heatmaps.curr_index = show_class_heatmaps.curr_index % class_scores.shape[2]
axis.cla()
class_heatmap = class_scores[:, :, show_class_heatmaps.curr_index]
axis.imshow(class_heatmap, cmap='coolwarm')
axis.set_axis_off()
fig.canvas.set_window_title(id2label[show_class_heatmaps.curr_index].name)
fig.canvas.draw()
fig = plt.figure()
fig.canvas.mpl_connect('key_press_event', key_event)
fig.canvas.set_window_title(id2label[show_class_heatmaps.curr_index].name)
axis = fig.add_subplot(111)
class_heatmap = class_scores[:, :, show_class_heatmaps.curr_index]
axis.imshow(class_heatmap, cmap='coolwarm')
axis.set_axis_off()
plt.show()
def predict_sliding(full_image, net, flip_evaluation):
"""
Predict on tiles of exactly the network input shape.
This way nothing gets squeezed.
"""
tile_size = net.input_shape
classes = net.model.outputs[0].shape[3]
overlap = 1/3
stride = ceil(tile_size[0] * (1 - overlap))
tile_rows = max(int(ceil((full_image.shape[0] - tile_size[0]) / stride) + 1), 1) # strided convolution formula
tile_cols = max(int(ceil((full_image.shape[1] - tile_size[1]) / stride) + 1), 1)
print("Need %i x %i prediction tiles @ stride %i px" % (tile_cols, tile_rows, stride))
full_probs = np.zeros((full_image.shape[0], full_image.shape[1], classes))
count_predictions = np.zeros((full_image.shape[0], full_image.shape[1], classes))
tile_counter = 0
for row in range(tile_rows):
for col in range(tile_cols):
x1 = int(col * stride)
y1 = int(row * stride)
x2 = min(x1 + tile_size[1], full_image.shape[1])
y2 = min(y1 + tile_size[0], full_image.shape[0])
x1 = max(int(x2 - tile_size[1]), 0) # for portrait images the x1 underflows sometimes
y1 = max(int(y2 - tile_size[0]), 0) # for very few rows y1 underflows
img = full_image[y1:y2, x1:x2]
padded_img = pad_image(img, tile_size)
# plt.imshow(padded_img)
# plt.show()
tile_counter += 1
print("Predicting tile %i" % tile_counter)
padded_prediction = net.predict(padded_img, flip_evaluation)
prediction = padded_prediction[0:img.shape[0], 0:img.shape[1], :]
count_predictions[y1:y2, x1:x2] += 1
full_probs[y1:y2, x1:x2] += prediction # accumulate the predictions also in the overlapping regions
# average the predictions in the overlapping regions
full_probs /= count_predictions
# visualize normalization Weights
# plt.imshow(np.mean(count_predictions, axis=2))
# plt.show()
return full_probs
def predict_multi_scale(full_image, net, scales, sliding_evaluation, flip_evaluation):
"""Predict an image by looking at it with different scales."""
classes = net.model.outputs[0].shape[3]
full_probs = np.zeros((full_image.shape[0], full_image.shape[1], classes))
h_ori, w_ori = full_image.shape[:2]
for scale in scales:
print("Predicting image scaled by %f" % scale)
scaled_img = misc.imresize(full_image, size=scale, interp="bilinear")
if sliding_evaluation:
scaled_probs = predict_sliding(scaled_img, net, flip_evaluation)
else:
scaled_probs = net.predict(scaled_img, flip_evaluation)
# scale probs up to full size
h, w = scaled_probs.shape[:2]
probs = ndimage.zoom(scaled_probs, (1.*h_ori/h, 1.*w_ori/w, 1.),
order=1, prefilter=False)
# visualize_prediction(probs)
# integrate probs over all scales
full_probs += probs
full_probs /= len(scales)
return full_probs
def trainid_to_class_image(trainid_image):
"""Inflate an image with trainId's into a full class image with class ids."""
from cityscapesscripts.helpers.labels import trainId2label
class_image = np.zeros(trainid_image.shape, np.uint8)
try:
for row in range(trainid_image.shape[0]):
for col in range(trainid_image.shape[1]):
class_image[row][col] = trainId2label[trainid_image[row][col]].id
except Exception as ex:
print("Unknown trainid : %s" % ex)
return class_image
def find_matching_gt(gt_dir, image_name, model_name, verbose=False):
"""Find a matching ground truth in gt_dir for image_name."""
if "cityscapes" in model_name:
filter_string = image_name + "*labelIds.png"
else:
filter_string = image_name + "*.png"
for root, __, files in walk(gt_dir):
for filename in fnmatch.filter(files, filter_string):
if verbose:
print("Found matching groundtruth at: %s" % join(root, filename))
return join(root, filename)
def complete_coarse_image(coarse_image, predicted_img):
"""Complete a coarsely labeld cityscapes image with predictions."""
mask_indices = coarse_image == 0 # complete everywhere where coarse_image is 0
coarse_image[mask_indices] = predicted_img[mask_indices]
return coarse_image
def main():
"""Run when running this module as the primary one."""
EVALUATION_SCALES = [1.0] # must be all floats!
parser = argparse.ArgumentParser()
parser.add_argument('-m', '--model', type=str, default='pspnet50_ade20k',
help='Model/Weights to use',
choices=['pspnet50_ade20k',
'pspnet101_cityscapes',
'pspnet101_voc2012'])
parser.add_argument('-i', '--input_path', type=str, default='../example_images',
help='Path to the input images')
parser.add_argument('-o', '--output_path', type=str, default='../example_results',
help='Path to output')
parser.add_argument('-g', '--groundtruth_path', type=str, default='../example_groundtruth',
help='Path to groundtruth')
parser.add_argument('--id', default="0")
parser.add_argument('-s', '--sliding', action='store_true', default=True,
help="Whether the network should be slided over the original image for prediction.")
parser.add_argument('-f', '--flip', action='store_true', default=True,
help="Whether the network should predict on both image and flipped image.")
parser.add_argument('-ms', '--multi_scale', action='store_true',
help="Whether the network should predict on multiple scales.")
parser.add_argument('-hm', '--heat_maps', action='store_true',
help="Whether the network should diplay heatmaps.")
parser.add_argument('-v', '--vis', action='store_true',
help="Whether an interactive plot should be diplayed.")
parser.add_argument('-cci', '--complete_coarse_image', action='store_true',
help="Whether a coarse imae should be completed with predictions.")
parser.add_argument('-e', '--evaluate', action='store_true',
help="Whether an evaluation against groundtruth should be attempted.")
args = parser.parse_args()
environ["CUDA_VISIBLE_DEVICES"] = args.id
sess = tf.Session()
K.set_session(sess)
with sess.as_default():
print(args)
import os
cwd = os.getcwd()
print("Running in %s" % cwd)
image_paths = []
if isfile(args.input_path):
image_paths.append(args.input_path)
elif isdir(args.input_path):
file_types = ('png', 'jpg')
for file_type in file_types:
image_paths.extend(glob.glob(join(args.input_path + '/**/*.' + file_type), recursive=True))
image_paths = sorted(image_paths)
# print(image_paths)
if "pspnet50" in args.model:
pspnet = PSPNet50(nb_classes=150, input_shape=(473, 473),
weights=args.model)
if "ade20k" in args.model:
from ade20k_labels import id2label, name2label
elif "pspnet101" in args.model:
if "cityscapes" in args.model:
pspnet = PSPNet101(nb_classes=19, input_shape=(713, 713),
weights=args.model)
from cityscapes_labels import id2label, name2label
if "voc2012" in args.model:
pspnet = PSPNet101(nb_classes=21, input_shape=(473, 473),
weights=args.model)
from pascal_voc_labels import id2label, name2label
else:
print("Network architecture not implemented.")
if args.multi_scale:
EVALUATION_SCALES = [0.5, 0.75, 1.0, 1.25, 1.5, 1.75] # original implementation, must be all floats!
for image_path in image_paths:
image_name, ext = splitext(os.path.basename(image_path))
image_name = image_name.replace('_leftImg8bit', '') # strip leftImg8bit tag for gt matching and producting groundtruth
print("Predicting image name: %s" % (image_name + ext))
img = misc.imread(image_path)
class_scores = predict_multi_scale(img, pspnet, EVALUATION_SCALES, args.sliding, args.flip)
if args.heat_maps:
# show_class_heatmap(class_scores, 'person')
show_class_heatmaps(class_scores)
# visualize_prediction(img, class_scores, id2label)
class_image = np.argmax(class_scores, axis=2)
output_path, _ = splitext(args.output_path)
if not os.path.exists(output_path):
os.makedirs(output_path)
output_path = join(output_path, image_name)
print("Writing results to %s" % (output_path + ext))
confidence_map = np.max(class_scores, axis=2) # probability of the most likely class, a vage measure of the networks confidence
colored_class_image = color_class_image(class_image, id2label)
# colored_class_image is [0.0-1.0] img is [0-255]
alpha_blended = 0.5 * colored_class_image + 0.5 * img
if "cityscapes" in args.model:
class_image = trainid_to_class_image(class_image)
misc.imsave(output_path + "_gtFine_labelIds" + ext, class_image)
misc.imsave(output_path + "_seg" + ext, colored_class_image)
misc.imsave(output_path + "_probs" + ext, confidence_map)
misc.imsave(output_path + "_seg_blended" + ext, alpha_blended)
gt_path = find_matching_gt(args.groundtruth_path, image_name, args.model, verbose=True)
if gt_path is not None:
if args.complete_coarse_image: # only for cityscapes
try:
coarse_image = misc.imread(gt_path)
class_image = complete_coarse_image(coarse_image, class_image)
misc.imsave(output_path + "_gtFine_labelIds" + ext, class_image)
except AttributeError as err:
print("Warning: Could not read groundtruth: %s" % err)
if args.evaluate:
if "cityscapes" in args.model:
evaluate_iou([class_image], [misc.imread(gt_path)], classes=35)
else:
# gt_image to class image
gt_image = misc.imread(gt_path)
gt_class_image = gt_image_to_class_image(gt_image, id2label)
evaluate_iou([class_image], [gt_class_image], classes=pspnet.nb_classes)
else:
print("Could not find groundtruth for %s" % image_name)
if __name__ == "__main__":
main()
| 45.089286
| 169
| 0.602552
|
from __future__ import print_function
from __future__ import division
from os.path import splitext, join, isfile, isdir
from os import environ, walk
from math import ceil
import argparse
import glob
import fnmatch
import warnings
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.widgets import RadioButtons
from scipy import misc, ndimage
from keras import backend as K
from keras.models import model_from_json
import tensorflow as tf
from layers_builder import build_pspnet
from utils import download_weights, download_npy_weights, preprocess_image, color_class_image, gt_image_to_class_image
from evaluation import evaluate_iou
warnings.filterwarnings('ignore', '.*output shape of zoom.*')
__author__ = "Vlad Kryvoruchko, Chaoyue Wang, Jeffrey Hu & Julian Tatsch"
class PSPNet(object):
def __init__(self, nb_classes, resnet_layers, input_shape, weights):
self.input_shape = input_shape
self.nb_classes = nb_classes
json_path = join("..", "weights", "keras", weights + ".json")
h5_path = join("..", "weights", "keras", weights + ".h5")
if not isfile(json_path) and not isfile(h5_path):
download_weights(weights)
if isfile(json_path) and isfile(h5_path):
print("Keras model & weights found, loading...")
with open(json_path, 'r') as file_handle:
try:
self.model = model_from_json(file_handle.read())
except ValueError as err:
print("Couldn't import model from json because it was build using a different python version: %s" % err)
print("Rebuilding pspnet model ...")
self.model = build_pspnet(nb_classes=nb_classes,
resnet_layers=resnet_layers,
input_shape=self.input_shape)
print("Saving pspnet to disk ...")
json_string = self.model.to_json()
with open(json_path, 'w') as file_handle:
file_handle.write(json_string)
except SystemError as err: # bad marshal data error when loading py3.5 model in py3.6
print("Couldn't import model from json because it was build using a different python version: %s" % err)
print("Converting pspnet model from npy")
self.model = build_pspnet(nb_classes=nb_classes,
resnet_layers=resnet_layers,
input_shape=self.input_shape)
self.set_npy_weights(weights)
self.model.load_weights(h5_path)
else:
print("No Keras model & weights found, import from npy weights.")
self.model = build_pspnet(nb_classes=nb_classes,
resnet_layers=resnet_layers,
input_shape=self.input_shape)
self.set_npy_weights(weights)
def predict(self, img, flip_evaluation):
h_ori, w_ori = img.shape[:2]
if img.shape[0:2] != self.input_shape:
print("Input %s not fitting for network size %s, resizing. You may want to try sliding prediction for better results." % (img.shape[0:2], self.input_shape))
img = misc.imresize(img, self.input_shape)
data = preprocess_image(img, mean=[[[174.08136209, 163.97867657, 138.72837669]]])
if flip_evaluation:
input_with_flipped = np.array([data, np.flip(data, axis=1)])
prediction_with_flipped = self.model.predict(input_with_flipped)
prediction = (prediction_with_flipped[0] + np.fliplr(prediction_with_flipped[1])) / 2.0
else:
prediction = self.model.predict(np.expand_dims(data, 0))[0]
return prediction
if img.shape[0:1] != self.input_shape:
h, w = prediction.shape[:2]
prediction = ndimage.zoom(prediction, (1.*h_ori/h, 1.*w_ori/w, 1.),
order=1, prefilter=False)
return prediction
def set_npy_weights(self, weights_path):
npy_weights_path = join("..", "weights", "npy", weights_path + ".npy")
json_path = join("..", "weights", "keras", weights_path + ".json")
h5_path = join("..", "weights", "keras", weights_path + ".h5")
if not isfile(npy_weights_path):
download_npy_weights(weights_path)
print("Importing weights from %s" % npy_weights_path)
weights = np.load(npy_weights_path, encoding="latin1").item()
whitelist = ["InputLayer", "Activation", "ZeroPadding2D", "Add", "MaxPooling2D", "AveragePooling2D", "Lambda", "Concatenate", "Dropout"]
weights_set = 0
for layer in self.model.layers:
print("Processing %s" % layer.name)
if layer.name[:4] == 'conv' and layer.name[-2:] == 'bn':
mean = weights[layer.name]['mean'].reshape(-1)
variance = weights[layer.name]['variance'].reshape(-1)
scale = weights[layer.name]['scale'].reshape(-1)
offset = weights[layer.name]['offset'].reshape(-1)
self.model.get_layer(layer.name).set_weights([scale, offset, mean, variance])
weights_set += 1
elif layer.name[:4] == 'conv' and not layer.name[-4:] == 'relu':
try:
weight = weights[layer.name]['weights']
self.model.get_layer(layer.name).set_weights([weight])
except Exception:
biases = weights[layer.name]['biases']
self.model.get_layer(layer.name).set_weights([weight,
biases])
weights_set += 1
elif layer.__class__.__name__ in whitelist:
pass
else:
print("Warning: Did not find weights for keras layer %s in numpy weights" % layer)
print("Set a total of %i weights" % weights_set)
print('Finished importing weights.')
print("Writing keras model & weights")
json_string = self.model.to_json()
with open(json_path, 'w') as file_handle:
file_handle.write(json_string)
self.model.save_weights(h5_path)
print("Finished writing Keras model & weights")
class PSPNet50(PSPNet):
def __init__(self, nb_classes, weights, input_shape):
PSPNet.__init__(self, nb_classes=nb_classes, resnet_layers=50,
input_shape=input_shape, weights=weights)
class PSPNet101(PSPNet):
def __init__(self, nb_classes, weights, input_shape):
PSPNet.__init__(self, nb_classes=nb_classes, resnet_layers=101,
input_shape=input_shape, weights=weights)
def pad_image(img, target_size):
rows_missing = target_size[0] - img.shape[0]
cols_missing = target_size[1] - img.shape[1]
padded_img = np.pad(img, ((0, rows_missing), (0, cols_missing), (0, 0)), 'constant')
return padded_img
def produce_view(input_image, class_image, id2label, viewstyle):
view = None
if viewstyle == 'original':
view = input_image
elif (viewstyle == 'predictions') or (viewstyle == 'overlay'):
view = color_class_image(class_image, id2label)
if viewstyle == 'overlay':
view = (0.5 * view.astype(np.float32) + 0.5 * input_image.astype(np.float32)).astype(np.uint8)
else:
print("Unknown view style")
return view
def visualize_prediction(input_image, class_scores, id2label):
class_image = np.argmax(class_scores, axis=2)
fig = plt.figure()
axis = fig.add_subplot(111)
def button_handler(viewstyle):
axis.imshow(produce_view(input_image, class_image, id2label, viewstyle))
plt.draw()
rax = plt.axes([0.4, 0.05, 0.2, 0.15])
radio_buttons = RadioButtons(rax, ('original', 'overlay', 'predictions'))
radio_buttons.on_clicked(button_handler)
button_handler('original')
axis.set_axis_off()
axis.format_coord = lambda x, y: id2label[class_image[int(y), int(x)]].name
plt.show()
def show_class_heatmap(class_scores, class_name):
try:
class_id = name2label[class_name].id
class_heatmap = class_scores[:, :, class_id]
plt.axis('off')
plt.imshow(class_heatmap, cmap='coolwarm')
plt.show()
except KeyError as err:
print("Could not find index for %s because of %s" % (class_name, err))
def show_class_heatmaps(class_scores):
show_class_heatmaps.curr_index = 0
def key_event(event):
if event.key == "right":
show_class_heatmaps.curr_index += 1
elif event.key == "left":
show_class_heatmaps.curr_index -= 1
else:
return
show_class_heatmaps.curr_index = show_class_heatmaps.curr_index % class_scores.shape[2]
axis.cla()
class_heatmap = class_scores[:, :, show_class_heatmaps.curr_index]
axis.imshow(class_heatmap, cmap='coolwarm')
axis.set_axis_off()
fig.canvas.set_window_title(id2label[show_class_heatmaps.curr_index].name)
fig.canvas.draw()
fig = plt.figure()
fig.canvas.mpl_connect('key_press_event', key_event)
fig.canvas.set_window_title(id2label[show_class_heatmaps.curr_index].name)
axis = fig.add_subplot(111)
class_heatmap = class_scores[:, :, show_class_heatmaps.curr_index]
axis.imshow(class_heatmap, cmap='coolwarm')
axis.set_axis_off()
plt.show()
def predict_sliding(full_image, net, flip_evaluation):
tile_size = net.input_shape
classes = net.model.outputs[0].shape[3]
overlap = 1/3
stride = ceil(tile_size[0] * (1 - overlap))
tile_rows = max(int(ceil((full_image.shape[0] - tile_size[0]) / stride) + 1), 1)
tile_cols = max(int(ceil((full_image.shape[1] - tile_size[1]) / stride) + 1), 1)
print("Need %i x %i prediction tiles @ stride %i px" % (tile_cols, tile_rows, stride))
full_probs = np.zeros((full_image.shape[0], full_image.shape[1], classes))
count_predictions = np.zeros((full_image.shape[0], full_image.shape[1], classes))
tile_counter = 0
for row in range(tile_rows):
for col in range(tile_cols):
x1 = int(col * stride)
y1 = int(row * stride)
x2 = min(x1 + tile_size[1], full_image.shape[1])
y2 = min(y1 + tile_size[0], full_image.shape[0])
x1 = max(int(x2 - tile_size[1]), 0)
y1 = max(int(y2 - tile_size[0]), 0)
img = full_image[y1:y2, x1:x2]
padded_img = pad_image(img, tile_size)
tile_counter += 1
print("Predicting tile %i" % tile_counter)
padded_prediction = net.predict(padded_img, flip_evaluation)
prediction = padded_prediction[0:img.shape[0], 0:img.shape[1], :]
count_predictions[y1:y2, x1:x2] += 1
full_probs[y1:y2, x1:x2] += prediction
full_probs /= count_predictions
return full_probs
def predict_multi_scale(full_image, net, scales, sliding_evaluation, flip_evaluation):
classes = net.model.outputs[0].shape[3]
full_probs = np.zeros((full_image.shape[0], full_image.shape[1], classes))
h_ori, w_ori = full_image.shape[:2]
for scale in scales:
print("Predicting image scaled by %f" % scale)
scaled_img = misc.imresize(full_image, size=scale, interp="bilinear")
if sliding_evaluation:
scaled_probs = predict_sliding(scaled_img, net, flip_evaluation)
else:
scaled_probs = net.predict(scaled_img, flip_evaluation)
h, w = scaled_probs.shape[:2]
probs = ndimage.zoom(scaled_probs, (1.*h_ori/h, 1.*w_ori/w, 1.),
order=1, prefilter=False)
full_probs += probs
full_probs /= len(scales)
return full_probs
def trainid_to_class_image(trainid_image):
from cityscapesscripts.helpers.labels import trainId2label
class_image = np.zeros(trainid_image.shape, np.uint8)
try:
for row in range(trainid_image.shape[0]):
for col in range(trainid_image.shape[1]):
class_image[row][col] = trainId2label[trainid_image[row][col]].id
except Exception as ex:
print("Unknown trainid : %s" % ex)
return class_image
def find_matching_gt(gt_dir, image_name, model_name, verbose=False):
if "cityscapes" in model_name:
filter_string = image_name + "*labelIds.png"
else:
filter_string = image_name + "*.png"
for root, __, files in walk(gt_dir):
for filename in fnmatch.filter(files, filter_string):
if verbose:
print("Found matching groundtruth at: %s" % join(root, filename))
return join(root, filename)
def complete_coarse_image(coarse_image, predicted_img):
mask_indices = coarse_image == 0
coarse_image[mask_indices] = predicted_img[mask_indices]
return coarse_image
def main():
EVALUATION_SCALES = [1.0]
parser = argparse.ArgumentParser()
parser.add_argument('-m', '--model', type=str, default='pspnet50_ade20k',
help='Model/Weights to use',
choices=['pspnet50_ade20k',
'pspnet101_cityscapes',
'pspnet101_voc2012'])
parser.add_argument('-i', '--input_path', type=str, default='../example_images',
help='Path to the input images')
parser.add_argument('-o', '--output_path', type=str, default='../example_results',
help='Path to output')
parser.add_argument('-g', '--groundtruth_path', type=str, default='../example_groundtruth',
help='Path to groundtruth')
parser.add_argument('--id', default="0")
parser.add_argument('-s', '--sliding', action='store_true', default=True,
help="Whether the network should be slided over the original image for prediction.")
parser.add_argument('-f', '--flip', action='store_true', default=True,
help="Whether the network should predict on both image and flipped image.")
parser.add_argument('-ms', '--multi_scale', action='store_true',
help="Whether the network should predict on multiple scales.")
parser.add_argument('-hm', '--heat_maps', action='store_true',
help="Whether the network should diplay heatmaps.")
parser.add_argument('-v', '--vis', action='store_true',
help="Whether an interactive plot should be diplayed.")
parser.add_argument('-cci', '--complete_coarse_image', action='store_true',
help="Whether a coarse imae should be completed with predictions.")
parser.add_argument('-e', '--evaluate', action='store_true',
help="Whether an evaluation against groundtruth should be attempted.")
args = parser.parse_args()
environ["CUDA_VISIBLE_DEVICES"] = args.id
sess = tf.Session()
K.set_session(sess)
with sess.as_default():
print(args)
import os
cwd = os.getcwd()
print("Running in %s" % cwd)
image_paths = []
if isfile(args.input_path):
image_paths.append(args.input_path)
elif isdir(args.input_path):
file_types = ('png', 'jpg')
for file_type in file_types:
image_paths.extend(glob.glob(join(args.input_path + '/**/*.' + file_type), recursive=True))
image_paths = sorted(image_paths)
if "pspnet50" in args.model:
pspnet = PSPNet50(nb_classes=150, input_shape=(473, 473),
weights=args.model)
if "ade20k" in args.model:
from ade20k_labels import id2label, name2label
elif "pspnet101" in args.model:
if "cityscapes" in args.model:
pspnet = PSPNet101(nb_classes=19, input_shape=(713, 713),
weights=args.model)
from cityscapes_labels import id2label, name2label
if "voc2012" in args.model:
pspnet = PSPNet101(nb_classes=21, input_shape=(473, 473),
weights=args.model)
from pascal_voc_labels import id2label, name2label
else:
print("Network architecture not implemented.")
if args.multi_scale:
EVALUATION_SCALES = [0.5, 0.75, 1.0, 1.25, 1.5, 1.75]
for image_path in image_paths:
image_name, ext = splitext(os.path.basename(image_path))
image_name = image_name.replace('_leftImg8bit', '')
print("Predicting image name: %s" % (image_name + ext))
img = misc.imread(image_path)
class_scores = predict_multi_scale(img, pspnet, EVALUATION_SCALES, args.sliding, args.flip)
if args.heat_maps:
show_class_heatmaps(class_scores)
class_image = np.argmax(class_scores, axis=2)
output_path, _ = splitext(args.output_path)
if not os.path.exists(output_path):
os.makedirs(output_path)
output_path = join(output_path, image_name)
print("Writing results to %s" % (output_path + ext))
confidence_map = np.max(class_scores, axis=2)
colored_class_image = color_class_image(class_image, id2label)
alpha_blended = 0.5 * colored_class_image + 0.5 * img
if "cityscapes" in args.model:
class_image = trainid_to_class_image(class_image)
misc.imsave(output_path + "_gtFine_labelIds" + ext, class_image)
misc.imsave(output_path + "_seg" + ext, colored_class_image)
misc.imsave(output_path + "_probs" + ext, confidence_map)
misc.imsave(output_path + "_seg_blended" + ext, alpha_blended)
gt_path = find_matching_gt(args.groundtruth_path, image_name, args.model, verbose=True)
if gt_path is not None:
if args.complete_coarse_image:
try:
coarse_image = misc.imread(gt_path)
class_image = complete_coarse_image(coarse_image, class_image)
misc.imsave(output_path + "_gtFine_labelIds" + ext, class_image)
except AttributeError as err:
print("Warning: Could not read groundtruth: %s" % err)
if args.evaluate:
if "cityscapes" in args.model:
evaluate_iou([class_image], [misc.imread(gt_path)], classes=35)
else:
gt_image = misc.imread(gt_path)
gt_class_image = gt_image_to_class_image(gt_image, id2label)
evaluate_iou([class_image], [gt_class_image], classes=pspnet.nb_classes)
else:
print("Could not find groundtruth for %s" % image_name)
if __name__ == "__main__":
main()
| true
| true
|
1c43d13e1d7ae8c436ac089155e651917aefd88c
| 5,603
|
py
|
Python
|
Parser-hybrid/nparser/neural/models/nlp/parsers/gama_parser.py
|
sb-b/BOUN-PARSE
|
2b529924897d8e2613c4d2193a67796a895da40b
|
[
"Apache-2.0"
] | 12
|
2020-03-04T17:36:12.000Z
|
2021-09-26T14:02:49.000Z
|
Parser-hybrid/nparser/neural/models/nlp/parsers/gama_parser.py
|
sb-b/BOUN-PARSE
|
2b529924897d8e2613c4d2193a67796a895da40b
|
[
"Apache-2.0"
] | 1
|
2020-12-09T08:21:11.000Z
|
2020-12-09T08:21:11.000Z
|
Parser-hybrid/nparser/neural/models/nlp/parsers/gama_parser.py
|
sb-b/BOUN-PARSE
|
2b529924897d8e2613c4d2193a67796a895da40b
|
[
"Apache-2.0"
] | 3
|
2020-11-18T09:53:42.000Z
|
2020-12-17T23:04:59.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2016 Timothy Dozat
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import tensorflow as tf
from nparser.neural.models.nlp.parsers.base_parser import BaseParser
#***************************************************************
class GamaParser(BaseParser):
""" """
#=============================================================
def __call__(self, vocabs, moving_params=None):
""" """
top_recur = super(GamaParser, self).__call__(vocabs, moving_params=moving_params)
int_tokens_to_keep = tf.to_int32(self.tokens_to_keep)
with tf.variable_scope('MLP'):
dep_mlp, head_mlp = self.MLP(top_recur, self.arc_mlp_size + self.rel_mlp_size + 2*self.p_mlp_size,
n_splits=2)
arc_dep_mlp, rel_dep_mlp, mu_dep_mlp, sigma_dep_mlp = tf.split(dep_mlp, [self.arc_mlp_size, self.rel_mlp_size, self.p_mlp_size, self.p_mlp_size], axis=2)
arc_head_mlp, rel_head_mlp, mu_head_mlp, sigma_head_mlp = tf.split(head_mlp, [self.arc_mlp_size, self.rel_mlp_size, self.p_mlp_size, self.p_mlp_size], axis=2)
with tf.variable_scope('dist'):
with tf.variable_scope('mu'):
# (n x b x d) o (d x 1 x d) o (n x b x d).T -> (n x b x b)
arc_mus = self.bilinear(mu_dep_mlp, mu_head_mlp, 1)**2
with tf.variable_scope('sigma'):
# (n x b x d) o (d x 1 x d) o (n x b x d).T -> (n x b x b)
arc_sigmas = self.bilinear(sigma_dep_mlp, sigma_head_mlp, 1, initializer=None)**2 + .1
# (b x 1)
i_mat = tf.expand_dims(tf.range(self.bucket_size), 1)
# (1 x b)
j_mat = tf.expand_dims(tf.range(self.bucket_size), 0)
# (b x 1) - (1 x b) -> (b x b)
k_mat = tf.to_float(tf.abs(i_mat - j_mat))
arc_logits = -.5*tf.log(2*np.pi * arc_sigmas) - .5*(k_mat-arc_mus)**2 / arc_sigmas
#arc_rs += tf.to_float(k_mat)#tf.to_float(tf.expand_dims(tf.expand_dims(self.sequence_lengths, 1), 1))
# (b x 1)
#n_mat = tf.expand_dims(self.sequence_lengths, 1) - 1 - i_mat
# (b x b) * (n x b x b) - (n x b x b) - (b x b) -> (n x b x b)
#arc_logits = (tf.lgamma(arc_rs+1) - tf.lgamma(k_mat) - tf.lgamma(arc_rs-k_mat+2) +
# k_mat * tf.log(arc_ps) + (arc_rs-k_mat+1)*tf.log(1-arc_ps) )
with tf.variable_scope('Arc'):
# (n x b x d) o (d x 1 x d) o (n x b x d).T -> (n x b x b)
arc_logits += self.bilinear(arc_dep_mlp, arc_head_mlp, 1, add_bias2=False)
# (n x b x b)
arc_probs = tf.nn.softmax(arc_logits)
# (n x b)
arc_preds = tf.to_int32(tf.argmax(arc_logits, axis=-1))
# (n x b)
arc_targets = self.vocabs['heads'].placeholder
# (n x b)
arc_correct = tf.to_int32(tf.equal(arc_preds, arc_targets))*int_tokens_to_keep
# ()
arc_loss = tf.losses.sparse_softmax_cross_entropy(arc_targets, arc_logits, self.tokens_to_keep)
with tf.variable_scope('Rel'):
# (n x b x d) o (d x r x d) o (n x b x d).T -> (n x b x r x b)
rel_logits = self.bilinear(rel_dep_mlp, rel_head_mlp, len(self.vocabs['rels']))
# (n x b x r x b)
rel_probs = tf.nn.softmax(rel_logits, dim=2)
# (n x b x b)
one_hot = tf.one_hot(arc_preds if moving_params is not None else arc_targets, self.bucket_size)
# (n x b x b) -> (n x b x b x 1)
one_hot = tf.expand_dims(one_hot, axis=3)
# (n x b x r x b) o (n x b x b x 1) -> (n x b x r x 1)
select_rel_logits = tf.matmul(rel_logits, one_hot)
# (n x b x r x 1) -> (n x b x r)
select_rel_logits = tf.squeeze(select_rel_logits, axis=3)
# (n x b)
rel_preds = tf.to_int32(tf.argmax(select_rel_logits, axis=-1))
# (n x b)
rel_targets = self.vocabs['rels'].placeholder
# (n x b)
rel_correct = tf.to_int32(tf.equal(rel_preds, rel_targets))*int_tokens_to_keep
# ()
rel_loss = tf.losses.sparse_softmax_cross_entropy(rel_targets, select_rel_logits, self.tokens_to_keep)
n_arc_correct = tf.reduce_sum(arc_correct)
n_rel_correct = tf.reduce_sum(rel_correct)
correct = arc_correct * rel_correct
n_correct = tf.reduce_sum(correct)
n_seqs_correct = tf.reduce_sum(tf.to_int32(tf.equal(tf.reduce_sum(correct, axis=1), self.sequence_lengths-1)))
loss = arc_loss + rel_loss
outputs = {
'arc_logits': arc_logits,
'arc_mus': arc_mus,
'arc_sigmas': arc_sigmas,
'arc_probs': arc_probs,
'arc_preds': arc_preds,
'arc_targets': arc_targets,
'arc_correct': arc_correct,
'arc_loss': arc_loss,
'n_arc_correct': n_arc_correct,
'rel_logits': rel_logits,
'rel_probs': rel_probs,
'rel_preds': rel_preds,
'rel_targets': rel_targets,
'rel_correct': rel_correct,
'rel_loss': rel_loss,
'n_rel_correct': n_rel_correct,
'n_tokens': self.n_tokens,
'n_seqs': self.batch_size,
'tokens_to_keep': self.tokens_to_keep,
'n_correct': n_correct,
'n_seqs_correct': n_seqs_correct,
'loss': loss
}
return outputs
| 41.198529
| 164
| 0.622345
|
import numpy as np
import tensorflow as tf
from nparser.neural.models.nlp.parsers.base_parser import BaseParser
class GamaParser(BaseParser):
def __call__(self, vocabs, moving_params=None):
top_recur = super(GamaParser, self).__call__(vocabs, moving_params=moving_params)
int_tokens_to_keep = tf.to_int32(self.tokens_to_keep)
with tf.variable_scope('MLP'):
dep_mlp, head_mlp = self.MLP(top_recur, self.arc_mlp_size + self.rel_mlp_size + 2*self.p_mlp_size,
n_splits=2)
arc_dep_mlp, rel_dep_mlp, mu_dep_mlp, sigma_dep_mlp = tf.split(dep_mlp, [self.arc_mlp_size, self.rel_mlp_size, self.p_mlp_size, self.p_mlp_size], axis=2)
arc_head_mlp, rel_head_mlp, mu_head_mlp, sigma_head_mlp = tf.split(head_mlp, [self.arc_mlp_size, self.rel_mlp_size, self.p_mlp_size, self.p_mlp_size], axis=2)
with tf.variable_scope('dist'):
with tf.variable_scope('mu'):
arc_mus = self.bilinear(mu_dep_mlp, mu_head_mlp, 1)**2
with tf.variable_scope('sigma'):
arc_sigmas = self.bilinear(sigma_dep_mlp, sigma_head_mlp, 1, initializer=None)**2 + .1
i_mat = tf.expand_dims(tf.range(self.bucket_size), 1)
j_mat = tf.expand_dims(tf.range(self.bucket_size), 0)
k_mat = tf.to_float(tf.abs(i_mat - j_mat))
arc_logits = -.5*tf.log(2*np.pi * arc_sigmas) - .5*(k_mat-arc_mus)**2 / arc_sigmas
arc_logits += self.bilinear(arc_dep_mlp, arc_head_mlp, 1, add_bias2=False)
arc_probs = tf.nn.softmax(arc_logits)
arc_preds = tf.to_int32(tf.argmax(arc_logits, axis=-1))
arc_targets = self.vocabs['heads'].placeholder
arc_correct = tf.to_int32(tf.equal(arc_preds, arc_targets))*int_tokens_to_keep
arc_loss = tf.losses.sparse_softmax_cross_entropy(arc_targets, arc_logits, self.tokens_to_keep)
with tf.variable_scope('Rel'):
rel_logits = self.bilinear(rel_dep_mlp, rel_head_mlp, len(self.vocabs['rels']))
rel_probs = tf.nn.softmax(rel_logits, dim=2)
one_hot = tf.one_hot(arc_preds if moving_params is not None else arc_targets, self.bucket_size)
one_hot = tf.expand_dims(one_hot, axis=3)
select_rel_logits = tf.matmul(rel_logits, one_hot)
select_rel_logits = tf.squeeze(select_rel_logits, axis=3)
rel_preds = tf.to_int32(tf.argmax(select_rel_logits, axis=-1))
rel_targets = self.vocabs['rels'].placeholder
rel_correct = tf.to_int32(tf.equal(rel_preds, rel_targets))*int_tokens_to_keep
rel_loss = tf.losses.sparse_softmax_cross_entropy(rel_targets, select_rel_logits, self.tokens_to_keep)
n_arc_correct = tf.reduce_sum(arc_correct)
n_rel_correct = tf.reduce_sum(rel_correct)
correct = arc_correct * rel_correct
n_correct = tf.reduce_sum(correct)
n_seqs_correct = tf.reduce_sum(tf.to_int32(tf.equal(tf.reduce_sum(correct, axis=1), self.sequence_lengths-1)))
loss = arc_loss + rel_loss
outputs = {
'arc_logits': arc_logits,
'arc_mus': arc_mus,
'arc_sigmas': arc_sigmas,
'arc_probs': arc_probs,
'arc_preds': arc_preds,
'arc_targets': arc_targets,
'arc_correct': arc_correct,
'arc_loss': arc_loss,
'n_arc_correct': n_arc_correct,
'rel_logits': rel_logits,
'rel_probs': rel_probs,
'rel_preds': rel_preds,
'rel_targets': rel_targets,
'rel_correct': rel_correct,
'rel_loss': rel_loss,
'n_rel_correct': n_rel_correct,
'n_tokens': self.n_tokens,
'n_seqs': self.batch_size,
'tokens_to_keep': self.tokens_to_keep,
'n_correct': n_correct,
'n_seqs_correct': n_seqs_correct,
'loss': loss
}
return outputs
| true
| true
|
1c43d18889a0f6900709ccbbcf70196aa6da5678
| 6,236
|
py
|
Python
|
db_comm_messages.py
|
seeul8er/DroneBridge_Comm
|
156ef546f4680084acc94c34f9ed3caeecf23585
|
[
"Apache-2.0"
] | 1
|
2017-11-29T17:06:37.000Z
|
2017-11-29T17:06:37.000Z
|
db_comm_messages.py
|
seeul8er/DroneBridge_Comm
|
156ef546f4680084acc94c34f9ed3caeecf23585
|
[
"Apache-2.0"
] | null | null | null |
db_comm_messages.py
|
seeul8er/DroneBridge_Comm
|
156ef546f4680084acc94c34f9ed3caeecf23585
|
[
"Apache-2.0"
] | null | null | null |
import json
import configparser
import binascii
from itertools import chain
import os
tag = 'DB_COMM_MESSAGE: '
PATH_DRONEBRIDGE_TX_SETTINGS = "/boot/DroneBridgeTX.ini"
PATH_DRONEBRIDGE_RX_SETTINGS = "/boot/DroneBridgeRX.ini"
PATH_WBC_SETTINGS = "/boot/wifibroadcast-1.txt"
# As we send it as a single frame we do not want the payload to be unnecessarily big. Only respond important settings
wbc_settings_blacklist = ["TXMODE", "MAC_RX[0]", "FREQ_RX[0]", "MAC_RX[1]", "FREQ_RX[1]", "MAC_RX[2]", "FREQ_RX[2]",
"MAC_RX[3]", "FREQ_RX[3]", "MAC_TX[0]", "FREQ_TX[0]", "MAC_TX[1]", "FREQ_TX[1]",
"WIFI_HOTSPOT_NIC", "RELAY", "RELAY_NIC", "RELAY_FREQ", "QUIET", "FREQSCAN",
"EXTERNAL_TELEMETRY_SERIALPORT_GROUND", "TELEMETRY_OUTPUT_SERIALPORT_GROUND",
"FC_RC_BAUDRATE", "FC_RC_SERIALPORT", "TELEMETRY_UPLINK", "FC_MSP_SERIALPORT",
"EXTERNAL_TELEMETRY_SERIALPORT_GROUND_BAUDRATE", "TELEMETRY_OUTPUT_SERIALPORT_GROUND_BAUDRATE"]
db_settings_blacklist = ["ip_drone", "interface_selection", "interface_control", "interface_tel", "interface_video",
"interface_comm", "joy_cal"]
def new_settingsresponse_message(loaded_json, origin):
"""takes in a request - executes search for settings and creates a response as bytes"""
complete_response = {}
complete_response['destination'] = 4
complete_response['type'] = 'settingsresponse'
complete_response['response'] = loaded_json['request']
complete_response['origin'] = origin
complete_response['id'] = loaded_json['id']
if loaded_json['request'] == 'dronebridge':
complete_response = read_dronebridge_settings(complete_response, origin)
elif loaded_json['request'] == 'wifibroadcast':
complete_response = read_wbc_settings(complete_response)
response = json.dumps(complete_response)
crc32 = binascii.crc32(str.encode(response))
return response.encode()+crc32.to_bytes(4, byteorder='little', signed=False)
def new_settingschangesuccess_message(origin, new_id):
"""returns a settings change success message"""
command = json.dumps({'destination': 4, 'type': 'settingssuccess', 'origin': origin, 'id': new_id})
crc32 = binascii.crc32(str.encode(command))
return command.encode()+crc32.to_bytes(4, byteorder='little', signed=False)
def change_settings_wbc(loaded_json, origin):
try:
with open(PATH_WBC_SETTINGS, 'r+') as file:
lines = file.readlines()
for key in loaded_json['settings']:
for index, line in enumerate(lines):
if line.startswith(key+"="):
lines[index] = key+"="+loaded_json['settings'][key]+"\n"
file.seek(0, 0)
for line in lines:
file.write(line)
file.truncate()
file.flush()
os.fsync(file.fileno())
except Exception as ex:
print("Error writing wbc settings: " + str(ex))
return False
return True
def change_settings_db(loaded_json, origin):
try:
section = ''
filepath = ''
if origin=='groundstation':
section = 'TX'
filepath = PATH_DRONEBRIDGE_TX_SETTINGS
elif origin == 'drone':
section = 'RX'
filepath = PATH_DRONEBRIDGE_RX_SETTINGS
with open(filepath, 'r+') as file:
lines = file.readlines()
for key in loaded_json['settings'][section]:
for index, line in enumerate(lines):
if line.startswith(key+"="):
lines[index] = key+"="+loaded_json['settings'][section][key]+"\n"
file.seek(0, 0)
for line in lines:
file.write(line)
file.truncate()
file.flush()
os.fsync(file.fileno())
except Exception as ex:
print("Error writing db settings: "+str(ex))
return False
return True
def change_settings(loaded_json, origin):
"""takes a settings change request - executes it - returns a encoded settings change success message"""
worked = False
if loaded_json['change'] == 'db':
worked = change_settings_db(loaded_json, origin)
elif loaded_json['change'] == 'wbc':
worked = change_settings_wbc(loaded_json, origin)
if worked:
return new_settingschangesuccess_message(origin, loaded_json['id'])
else:
return "error_settingschange".encode()
def change_settings_gopro(loaded_json):
# TODO change GoPro settings
pass
def read_dronebridge_settings(response_header, origin):
config = configparser.ConfigParser()
config.optionxform = str
section = ''
settings = {}
if origin == 'groundstation':
config.read(PATH_DRONEBRIDGE_TX_SETTINGS)
section = 'TX'
elif origin == 'drone':
config.read(PATH_DRONEBRIDGE_RX_SETTINGS)
section = 'RX'
for key in config[section]:
if key not in db_settings_blacklist:
settings[key] = config.get(section, key)
response_header['settings'] = settings
return response_header
def read_wbc_settings(response_header):
virtual_section = 'root'
settings = {}
config = configparser.ConfigParser()
config.optionxform = str
with open(PATH_WBC_SETTINGS, 'r') as lines:
lines = chain(('['+virtual_section+']',), lines)
config.read_file(lines)
for key in config[virtual_section]:
if key not in wbc_settings_blacklist:
settings[key] = config.get(virtual_section, key)
response_header['settings'] = settings
return response_header
def remove_first_line(filepath):
with open(filepath, 'r') as f1:
data = f1.read().splitlines(True)
with open(filepath, 'w') as f2:
f2.writelines(data[1:])
def comm_message_extract_info(message):
alist = message.rsplit(b'}', 1)
alist[0] = alist[0]+b'}'
return alist
def check_package_good(extracted_info):
if binascii.crc32(extracted_info[0]).to_bytes(4, byteorder='little', signed=False) == extracted_info[1]:
return True
print(tag+"Bad CRC!")
return False
| 37.341317
| 121
| 0.642559
|
import json
import configparser
import binascii
from itertools import chain
import os
tag = 'DB_COMM_MESSAGE: '
PATH_DRONEBRIDGE_TX_SETTINGS = "/boot/DroneBridgeTX.ini"
PATH_DRONEBRIDGE_RX_SETTINGS = "/boot/DroneBridgeRX.ini"
PATH_WBC_SETTINGS = "/boot/wifibroadcast-1.txt"
wbc_settings_blacklist = ["TXMODE", "MAC_RX[0]", "FREQ_RX[0]", "MAC_RX[1]", "FREQ_RX[1]", "MAC_RX[2]", "FREQ_RX[2]",
"MAC_RX[3]", "FREQ_RX[3]", "MAC_TX[0]", "FREQ_TX[0]", "MAC_TX[1]", "FREQ_TX[1]",
"WIFI_HOTSPOT_NIC", "RELAY", "RELAY_NIC", "RELAY_FREQ", "QUIET", "FREQSCAN",
"EXTERNAL_TELEMETRY_SERIALPORT_GROUND", "TELEMETRY_OUTPUT_SERIALPORT_GROUND",
"FC_RC_BAUDRATE", "FC_RC_SERIALPORT", "TELEMETRY_UPLINK", "FC_MSP_SERIALPORT",
"EXTERNAL_TELEMETRY_SERIALPORT_GROUND_BAUDRATE", "TELEMETRY_OUTPUT_SERIALPORT_GROUND_BAUDRATE"]
db_settings_blacklist = ["ip_drone", "interface_selection", "interface_control", "interface_tel", "interface_video",
"interface_comm", "joy_cal"]
def new_settingsresponse_message(loaded_json, origin):
complete_response = {}
complete_response['destination'] = 4
complete_response['type'] = 'settingsresponse'
complete_response['response'] = loaded_json['request']
complete_response['origin'] = origin
complete_response['id'] = loaded_json['id']
if loaded_json['request'] == 'dronebridge':
complete_response = read_dronebridge_settings(complete_response, origin)
elif loaded_json['request'] == 'wifibroadcast':
complete_response = read_wbc_settings(complete_response)
response = json.dumps(complete_response)
crc32 = binascii.crc32(str.encode(response))
return response.encode()+crc32.to_bytes(4, byteorder='little', signed=False)
def new_settingschangesuccess_message(origin, new_id):
command = json.dumps({'destination': 4, 'type': 'settingssuccess', 'origin': origin, 'id': new_id})
crc32 = binascii.crc32(str.encode(command))
return command.encode()+crc32.to_bytes(4, byteorder='little', signed=False)
def change_settings_wbc(loaded_json, origin):
try:
with open(PATH_WBC_SETTINGS, 'r+') as file:
lines = file.readlines()
for key in loaded_json['settings']:
for index, line in enumerate(lines):
if line.startswith(key+"="):
lines[index] = key+"="+loaded_json['settings'][key]+"\n"
file.seek(0, 0)
for line in lines:
file.write(line)
file.truncate()
file.flush()
os.fsync(file.fileno())
except Exception as ex:
print("Error writing wbc settings: " + str(ex))
return False
return True
def change_settings_db(loaded_json, origin):
try:
section = ''
filepath = ''
if origin=='groundstation':
section = 'TX'
filepath = PATH_DRONEBRIDGE_TX_SETTINGS
elif origin == 'drone':
section = 'RX'
filepath = PATH_DRONEBRIDGE_RX_SETTINGS
with open(filepath, 'r+') as file:
lines = file.readlines()
for key in loaded_json['settings'][section]:
for index, line in enumerate(lines):
if line.startswith(key+"="):
lines[index] = key+"="+loaded_json['settings'][section][key]+"\n"
file.seek(0, 0)
for line in lines:
file.write(line)
file.truncate()
file.flush()
os.fsync(file.fileno())
except Exception as ex:
print("Error writing db settings: "+str(ex))
return False
return True
def change_settings(loaded_json, origin):
worked = False
if loaded_json['change'] == 'db':
worked = change_settings_db(loaded_json, origin)
elif loaded_json['change'] == 'wbc':
worked = change_settings_wbc(loaded_json, origin)
if worked:
return new_settingschangesuccess_message(origin, loaded_json['id'])
else:
return "error_settingschange".encode()
def change_settings_gopro(loaded_json):
pass
def read_dronebridge_settings(response_header, origin):
config = configparser.ConfigParser()
config.optionxform = str
section = ''
settings = {}
if origin == 'groundstation':
config.read(PATH_DRONEBRIDGE_TX_SETTINGS)
section = 'TX'
elif origin == 'drone':
config.read(PATH_DRONEBRIDGE_RX_SETTINGS)
section = 'RX'
for key in config[section]:
if key not in db_settings_blacklist:
settings[key] = config.get(section, key)
response_header['settings'] = settings
return response_header
def read_wbc_settings(response_header):
virtual_section = 'root'
settings = {}
config = configparser.ConfigParser()
config.optionxform = str
with open(PATH_WBC_SETTINGS, 'r') as lines:
lines = chain(('['+virtual_section+']',), lines)
config.read_file(lines)
for key in config[virtual_section]:
if key not in wbc_settings_blacklist:
settings[key] = config.get(virtual_section, key)
response_header['settings'] = settings
return response_header
def remove_first_line(filepath):
with open(filepath, 'r') as f1:
data = f1.read().splitlines(True)
with open(filepath, 'w') as f2:
f2.writelines(data[1:])
def comm_message_extract_info(message):
alist = message.rsplit(b'}', 1)
alist[0] = alist[0]+b'}'
return alist
def check_package_good(extracted_info):
if binascii.crc32(extracted_info[0]).to_bytes(4, byteorder='little', signed=False) == extracted_info[1]:
return True
print(tag+"Bad CRC!")
return False
| true
| true
|
1c43d22b596fddf6286a515cf1c8f75f7f260ee6
| 1,209
|
py
|
Python
|
common/message_forwarder.py
|
matthewdargan/Cozmo-Capture-the-Flag
|
959467ed6ebaeeb42fe60db5905e49963b5d2096
|
[
"MIT"
] | null | null | null |
common/message_forwarder.py
|
matthewdargan/Cozmo-Capture-the-Flag
|
959467ed6ebaeeb42fe60db5905e49963b5d2096
|
[
"MIT"
] | null | null | null |
common/message_forwarder.py
|
matthewdargan/Cozmo-Capture-the-Flag
|
959467ed6ebaeeb42fe60db5905e49963b5d2096
|
[
"MIT"
] | 1
|
2019-03-05T17:12:07.000Z
|
2019-03-05T17:12:07.000Z
|
import socket
from socket import error as socket_error
from typing import List
def start_connection(ip: str, port: int) -> socket.socket:
"""
Start a connection to a TCP network.
:param ip ip address of the network
:param port port number to forward messages over
:return: socket opened with the ip address and port number
"""
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
except socket_error:
print("Connection failed.")
try:
s.connect((ip, port))
except socket_error:
print('Socket failed to bind')
s.setblocking(False)
return s
def receive_message(connection: socket.socket) -> List[str]:
"""
Receive a cube message from the network and parse it into sections so we can
check the coordinates of a robot's cubes against a base.
:param connection the network connection used to receive data
:return: parameterized coordinate data
"""
try:
bytedata = connection.recv(4048)
data = bytedata.decode('utf-8')
if not data:
print('No message to receive')
else:
return data.split(' ')
except socket.error:
return []
| 25.1875
| 80
| 0.649297
|
import socket
from socket import error as socket_error
from typing import List
def start_connection(ip: str, port: int) -> socket.socket:
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
except socket_error:
print("Connection failed.")
try:
s.connect((ip, port))
except socket_error:
print('Socket failed to bind')
s.setblocking(False)
return s
def receive_message(connection: socket.socket) -> List[str]:
try:
bytedata = connection.recv(4048)
data = bytedata.decode('utf-8')
if not data:
print('No message to receive')
else:
return data.split(' ')
except socket.error:
return []
| true
| true
|
1c43d37c4d3866bf302fa057cdab10f32428ea99
| 953
|
py
|
Python
|
cohesity_management_sdk/models/search_job_status_enum.py
|
nick6655/management-sdk-python
|
88e792cb83e5c24a22af495b220c145d0c45841d
|
[
"Apache-2.0"
] | 18
|
2019-09-24T17:35:53.000Z
|
2022-03-25T08:08:47.000Z
|
cohesity_management_sdk/models/search_job_status_enum.py
|
nick6655/management-sdk-python
|
88e792cb83e5c24a22af495b220c145d0c45841d
|
[
"Apache-2.0"
] | 18
|
2019-03-29T19:32:29.000Z
|
2022-01-03T23:16:45.000Z
|
cohesity_management_sdk/models/search_job_status_enum.py
|
nick6655/management-sdk-python
|
88e792cb83e5c24a22af495b220c145d0c45841d
|
[
"Apache-2.0"
] | 16
|
2019-02-27T06:54:12.000Z
|
2021-11-16T18:10:24.000Z
|
# -*- coding: utf-8 -*-
# Copyright 2021 Cohesity Inc.
class SearchJobStatusEnum(object):
"""Implementation of the 'SearchJobStatus' enum.
Specifies the status of the search.
'kJobRunning' indicates that the Job/task is currently running.
'kJobFinished' indicates that the Job/task completed and finished.
'kJobFailed' indicates that the Job/task failed and did not complete.
'kJobCanceled' indicates that the Job/task was canceled.
'kJobPaused' indicates the Job/task is paused.
Attributes:
KJOBRUNNING: TODO: type description here.
KJOBFINISHED: TODO: type description here.
KJOBFAILED: TODO: type description here.
KJOBCANCELED: TODO: type description here.
KJOBPAUSED: TODO: type description here.
"""
KJOBRUNNING = 'kJobRunning'
KJOBFINISHED = 'kJobFinished'
KJOBFAILED = 'kJobFailed'
KJOBCANCELED = 'kJobCanceled'
KJOBPAUSED = 'kJobPaused'
| 28.029412
| 73
| 0.699895
|
class SearchJobStatusEnum(object):
KJOBRUNNING = 'kJobRunning'
KJOBFINISHED = 'kJobFinished'
KJOBFAILED = 'kJobFailed'
KJOBCANCELED = 'kJobCanceled'
KJOBPAUSED = 'kJobPaused'
| true
| true
|
1c43d38f9620e64fb551b35cd7d5f96e522ca4e4
| 96
|
py
|
Python
|
spark_surveymonkey/__init__.py
|
eferm/spark-surveymonkey
|
0912268c9604f32226d29b3d870296d781787a3a
|
[
"MIT"
] | null | null | null |
spark_surveymonkey/__init__.py
|
eferm/spark-surveymonkey
|
0912268c9604f32226d29b3d870296d781787a3a
|
[
"MIT"
] | null | null | null |
spark_surveymonkey/__init__.py
|
eferm/spark-surveymonkey
|
0912268c9604f32226d29b3d870296d781787a3a
|
[
"MIT"
] | null | null | null |
from ._transform import transform_survey
__version__ = '0.1.0'
__all__ = ('transform_survey')
| 16
| 40
| 0.760417
|
from ._transform import transform_survey
__version__ = '0.1.0'
__all__ = ('transform_survey')
| true
| true
|
1c43d4b1509801a52270c7d65891a51a3a8e8637
| 10,527
|
py
|
Python
|
helpers/shuffleMockCatalog.py
|
manodeep/yymao-helpers
|
4ceffd639f4a10d259146f3f94e0b2415e835f32
|
[
"MIT"
] | null | null | null |
helpers/shuffleMockCatalog.py
|
manodeep/yymao-helpers
|
4ceffd639f4a10d259146f3f94e0b2415e835f32
|
[
"MIT"
] | null | null | null |
helpers/shuffleMockCatalog.py
|
manodeep/yymao-helpers
|
4ceffd639f4a10d259146f3f94e0b2415e835f32
|
[
"MIT"
] | null | null | null |
__all__ = ['shuffleMockCatalog', 'generate_upid']
import warnings
from itertools import izip
import numpy as np
from numpy.lib.recfunctions import rename_fields
def _iter_plateau_in_sorted_array(a):
if len(a):
k = np.where(a[1:] != a[:-1])[0]
k += 1
i = 0
for j in k:
yield i, j
i = j
yield i, len(a)
def _iter_indices_in_bins(bins, a):
if len(a) and len(bins):
s = a.argsort()
k = np.searchsorted(a, bins, 'right', sorter=s)
i = 0
for j in k:
yield s[i:j]
i = j
yield s[i:]
def _apply_rotation(pos, box_size):
half_box_size = box_size * 0.5
pos[pos > half_box_size] -= box_size
pos[pos < -half_box_size] += box_size
return np.dot(pos, np.linalg.qr(np.random.randn(3,3))[0])
_axes = list('xyz')
def _get_xyz(a, ax_type=float):
return np.fromiter((a[ax] for ax in _axes), ax_type, 3)
def generate_upid(pid, id, recursive=True):
"""
To generate (or to fix) the upid of a halo catalog.
Parameters
----------
pid : array_like
An ndarray of integer that contains the parent IDs of each halo.
id : array_like
An ndarray of integer that contains the halo IDs.
recursive : bool, optional
Whether or not to run this function recursively. Default is True.
Returns
-------
upid : array_like
The ultimate parent IDs.
Examples
--------
>>> halos['upid'] = generate_upid(halos['pid'], halos['id'])
"""
pid = np.ravel(pid)
id = np.ravel(id)
if len(id) != len(pid):
raise ValueError('`pid` and `id` must have the same length.')
if not len(pid):
raise ValueError('`pid` and `id` must not be empty.')
s = pid.argsort()
idx = np.fromiter(_iter_plateau_in_sorted_array(pid[s]), \
np.dtype([('start', int), ('stop', int)]))
unique_pid = pid[s[idx['start']]]
if unique_pid[0] == -1:
unique_pid = unique_pid[1:]
idx = idx[1:]
host_flag = (pid == -1)
not_found = np.where(np.in1d(unique_pid, id[host_flag], True, True))[0]
if not len(not_found):
return pid
sub_flag = np.where(~host_flag)[0]
found = sub_flag[np.in1d(id[sub_flag], unique_pid[not_found], True)]
found = found[id[found].argsort()]
assert (id[found] == unique_pid[not_found]).all()
del host_flag, sub_flag, unique_pid
pid_old = pid.copy()
for i, j in izip(found, not_found):
pid[s[slice(*idx[j])]] = pid_old[i]
del pid_old, idx, s, found, not_found
return generate_upid(pid, id, True) if recursive else pid
def shuffleMockCatalog(mock_ids, halo_catalog, bin_width=None, bins=None,
proxy='mvir', box_size=None, apply_rsd=False,
shuffle_centrals=True, shuffle_satellites=True, rotate_satellites=False,
return_structured_array=False):
"""
Shuffle a mock catalog according to Zentner et al. (2014) [arXiv:1311.1818]
Parameters
----------
mock_ids : array_like
Should be a 1-d array of int which contains the corresponding halo IDs
for the galaxies in the mock catalog to be shuffled.
halo_catalog : array_like
Should be a 1-d structrued array which has the following fields:
id, upid, x, y, z, vz (if `apply_rsd` it True), and the proxy.
bin_width : float or None, optional
The width of the bin, in dex.
bins : int, array_like, or None, optional
If an integer is provided, it is interpreted as the number of bins.
If an array is provided, it is interpreted as the edges of the bins.
The parameter _overwrites_ `bin_width`.
proxy : string, optional
The proxy to bin on. Must be present in the fields of `halo_catalog`.
box_size : float or None, optional
The side length of the box. Should be in the same unit as x, y, z.
apply_rsd : bool, optional
Whether or not to apply redshift space distortions on the z-axis.
(Default is False)
shuffle_centrals : bool, optional
Whether or not to shuffle central galaxies (Default is True)
shuffle_satellites : bool, optional
Whether or not to shuffle satellite galaxies (Default is True)
rotate_satellites : bool, optional
Whether or not to apply a random rotation to satellite galaxies
(Default is False)
return_structured_array : bool, optional
Whether to return a structured array that contains x, y, z
or just a n-by-3 float array.
Returns
-------
pos : array_like
A ndarray that contains x, y, z of the shuffled positions.
"""
# check necessary fields in halo_catalog
fields = ['id', 'upid', proxy] + _axes
if apply_rsd:
fields.append('vz')
if not all((f in halo_catalog.dtype.names for f in fields)):
raise ValueError('`halo_catalog` should have the following fields: '+ \
', '.join(fields))
# check dtype
ax_type = halo_catalog['x'].dtype.type
if any((halo_catalog[ax].dtype.type != ax_type for ax in 'yz')):
raise ValueError('The types of fields x, y, z in `halo_catalog` ' \
'must all be the same.')
# check all mock_ids are in halo_catalog
s = halo_catalog['id'].argsort()
idx = np.searchsorted(halo_catalog['id'], mock_ids, sorter=s)
try:
idx = s[idx]
except IndexError:
raise ValueError('`mock_ids` must all present in `halo_catalog`')
if not (halo_catalog['id'][idx] == mock_ids).all():
raise ValueError('`mock_ids` must all present in `halo_catalog`')
mock_idx = np.ones(len(halo_catalog), dtype=int)
mock_idx *= -1
mock_idx[idx] = np.arange(len(mock_ids))
del idx
# separate hosts and subs
host_flag = (halo_catalog['upid'] == -1)
subs = rename_fields(halo_catalog[~host_flag], {'id':'mock_idx'})
subs['mock_idx'] = mock_idx[~host_flag]
subs = subs[subs['mock_idx'] > -1] # only need subs that are mocks
host_flag = s[host_flag[s]] # this sorts `hosts` by `id`
hosts = rename_fields(halo_catalog[host_flag], {'upid':'mock_idx'})
hosts['mock_idx'] = mock_idx[host_flag]
del host_flag, mock_idx, s
# group subhalos
subs.sort(order='upid')
idx = np.fromiter(_iter_plateau_in_sorted_array(subs['upid']), \
np.dtype([('start', int), ('stop', int)]))
host_ids = subs['upid'][idx['start']]
if not np.in1d(host_ids, hosts['id'], True).all():
raise ValueError('Some subhalos associdated with the mock galaxies ' \
'have no parent halos in `halo_catalog`. Consider using ' \
'`generate_upid` to fix this.')
# for the following to work, `hosts` need to be sorted by `id`
subs_idx = np.zeros(len(hosts), dtype=idx.dtype)
subs_idx[np.in1d(hosts['id'], host_ids, True)] = idx
del idx, host_ids
# check bins
try:
bin_width = float(bin_width)
except (ValueError, TypeError):
bin_width = None
else:
if bin_width <= 0:
bin_width = None
if bin_width is None:
bin_width = 0.1
mi = np.log10(hosts[proxy].min()*0.99999)
ma = np.log10(hosts[proxy].max())
if bins is None:
bins = int(np.ceil((ma-mi)/bin_width))
mi = ma - bin_width*bins
try:
bins = int(bins)
except (ValueError, TypeError):
bins = np.asarray(bins)
if len(bins) < 2 or (bins[1:]<bins[:-1]).any():
raise ValueError('Please specify a valid `bin` parameter.')
else:
bins = np.logspace(mi, ma, bins+1)
# create the array for storing results
pos = np.empty((len(mock_ids), 3), ax_type)
pos.fill(np.nan)
# loop of bins of proxy (e.g. mvir)
for i, indices in enumerate(_iter_indices_in_bins(bins, hosts[proxy])):
if not len(indices):
continue
if i==0 or i==len(bins):
if (hosts['mock_idx'][indices] > -1).any() or \
any((subs_idx['start'][j] < subs_idx['stop'][j] \
for j in indices)):
warnings.warn('Some halos associdated with the mock catalog ' \
'are outside the bin range.', RuntimeWarning)
continue
# shuffle satellites
if shuffle_satellites:
choices = indices.tolist()
for j in indices:
subs_this = subs[slice(*subs_idx[j])]
if not len(subs_this):
continue
mock_idx_this = subs_this['mock_idx']
pos[mock_idx_this] = subs_this[_axes].view((ax_type,3))
if shuffle_satellites:
k = choices.pop(np.random.randint(len(choices)))
pos[mock_idx_this] -= _get_xyz(hosts[j], ax_type)
if rotate_satellites:
pos[mock_idx_this] = \
_apply_rotation(pos[mock_idx_this], box_size)
pos[mock_idx_this] += _get_xyz(hosts[k], ax_type)
if apply_rsd:
pos[mock_idx_this,2] += (subs_this['vz'] \
+ hosts['vz'][k] - hosts['vz'][j])/100.0
else:
if rotate_satellites:
host_pos = _get_xyz(hosts[j], ax_type)
pos[mock_idx_this] -= host_pos
pos[mock_idx_this] = \
_apply_rotation(pos[mock_idx_this], box_size)
pos[mock_idx_this] += host_pos
if apply_rsd:
pos[mock_idx_this,2] += subs_this['vz']/100.0
# shuffle hosts
has_mock = indices[hosts['mock_idx'][indices] > -1]
if not len(has_mock):
continue
mock_idx_this = hosts['mock_idx'][has_mock]
if shuffle_centrals:
has_mock = np.random.choice(indices, len(has_mock), False)
pos[mock_idx_this] = hosts[_axes][has_mock].view((ax_type,3))
if apply_rsd:
pos[mock_idx_this,2] += hosts['vz'][has_mock]/100.0
# sanity check
if np.isnan(pos).any():
warnings.warn('Some galaxies in the mock catalog have not been ' \
'assigned a new position. Maybe the corresponding halo is ' \
'outside the bin range.', RuntimeWarning)
# wrap box
if box_size is not None:
pos = np.remainder(pos, box_size, pos)
if return_structured_array:
pos = pos.view(np.dtype(zip(_axes, [ax_type]*3)))
return pos
| 36.807692
| 80
| 0.595516
|
__all__ = ['shuffleMockCatalog', 'generate_upid']
import warnings
from itertools import izip
import numpy as np
from numpy.lib.recfunctions import rename_fields
def _iter_plateau_in_sorted_array(a):
if len(a):
k = np.where(a[1:] != a[:-1])[0]
k += 1
i = 0
for j in k:
yield i, j
i = j
yield i, len(a)
def _iter_indices_in_bins(bins, a):
if len(a) and len(bins):
s = a.argsort()
k = np.searchsorted(a, bins, 'right', sorter=s)
i = 0
for j in k:
yield s[i:j]
i = j
yield s[i:]
def _apply_rotation(pos, box_size):
half_box_size = box_size * 0.5
pos[pos > half_box_size] -= box_size
pos[pos < -half_box_size] += box_size
return np.dot(pos, np.linalg.qr(np.random.randn(3,3))[0])
_axes = list('xyz')
def _get_xyz(a, ax_type=float):
return np.fromiter((a[ax] for ax in _axes), ax_type, 3)
def generate_upid(pid, id, recursive=True):
pid = np.ravel(pid)
id = np.ravel(id)
if len(id) != len(pid):
raise ValueError('`pid` and `id` must have the same length.')
if not len(pid):
raise ValueError('`pid` and `id` must not be empty.')
s = pid.argsort()
idx = np.fromiter(_iter_plateau_in_sorted_array(pid[s]), \
np.dtype([('start', int), ('stop', int)]))
unique_pid = pid[s[idx['start']]]
if unique_pid[0] == -1:
unique_pid = unique_pid[1:]
idx = idx[1:]
host_flag = (pid == -1)
not_found = np.where(np.in1d(unique_pid, id[host_flag], True, True))[0]
if not len(not_found):
return pid
sub_flag = np.where(~host_flag)[0]
found = sub_flag[np.in1d(id[sub_flag], unique_pid[not_found], True)]
found = found[id[found].argsort()]
assert (id[found] == unique_pid[not_found]).all()
del host_flag, sub_flag, unique_pid
pid_old = pid.copy()
for i, j in izip(found, not_found):
pid[s[slice(*idx[j])]] = pid_old[i]
del pid_old, idx, s, found, not_found
return generate_upid(pid, id, True) if recursive else pid
def shuffleMockCatalog(mock_ids, halo_catalog, bin_width=None, bins=None,
proxy='mvir', box_size=None, apply_rsd=False,
shuffle_centrals=True, shuffle_satellites=True, rotate_satellites=False,
return_structured_array=False):
fields = ['id', 'upid', proxy] + _axes
if apply_rsd:
fields.append('vz')
if not all((f in halo_catalog.dtype.names for f in fields)):
raise ValueError('`halo_catalog` should have the following fields: '+ \
', '.join(fields))
ax_type = halo_catalog['x'].dtype.type
if any((halo_catalog[ax].dtype.type != ax_type for ax in 'yz')):
raise ValueError('The types of fields x, y, z in `halo_catalog` ' \
'must all be the same.')
s = halo_catalog['id'].argsort()
idx = np.searchsorted(halo_catalog['id'], mock_ids, sorter=s)
try:
idx = s[idx]
except IndexError:
raise ValueError('`mock_ids` must all present in `halo_catalog`')
if not (halo_catalog['id'][idx] == mock_ids).all():
raise ValueError('`mock_ids` must all present in `halo_catalog`')
mock_idx = np.ones(len(halo_catalog), dtype=int)
mock_idx *= -1
mock_idx[idx] = np.arange(len(mock_ids))
del idx
host_flag = (halo_catalog['upid'] == -1)
subs = rename_fields(halo_catalog[~host_flag], {'id':'mock_idx'})
subs['mock_idx'] = mock_idx[~host_flag]
subs = subs[subs['mock_idx'] > -1]
host_flag = s[host_flag[s]]
hosts = rename_fields(halo_catalog[host_flag], {'upid':'mock_idx'})
hosts['mock_idx'] = mock_idx[host_flag]
del host_flag, mock_idx, s
subs.sort(order='upid')
idx = np.fromiter(_iter_plateau_in_sorted_array(subs['upid']), \
np.dtype([('start', int), ('stop', int)]))
host_ids = subs['upid'][idx['start']]
if not np.in1d(host_ids, hosts['id'], True).all():
raise ValueError('Some subhalos associdated with the mock galaxies ' \
'have no parent halos in `halo_catalog`. Consider using ' \
'`generate_upid` to fix this.')
subs_idx = np.zeros(len(hosts), dtype=idx.dtype)
subs_idx[np.in1d(hosts['id'], host_ids, True)] = idx
del idx, host_ids
try:
bin_width = float(bin_width)
except (ValueError, TypeError):
bin_width = None
else:
if bin_width <= 0:
bin_width = None
if bin_width is None:
bin_width = 0.1
mi = np.log10(hosts[proxy].min()*0.99999)
ma = np.log10(hosts[proxy].max())
if bins is None:
bins = int(np.ceil((ma-mi)/bin_width))
mi = ma - bin_width*bins
try:
bins = int(bins)
except (ValueError, TypeError):
bins = np.asarray(bins)
if len(bins) < 2 or (bins[1:]<bins[:-1]).any():
raise ValueError('Please specify a valid `bin` parameter.')
else:
bins = np.logspace(mi, ma, bins+1)
pos = np.empty((len(mock_ids), 3), ax_type)
pos.fill(np.nan)
for i, indices in enumerate(_iter_indices_in_bins(bins, hosts[proxy])):
if not len(indices):
continue
if i==0 or i==len(bins):
if (hosts['mock_idx'][indices] > -1).any() or \
any((subs_idx['start'][j] < subs_idx['stop'][j] \
for j in indices)):
warnings.warn('Some halos associdated with the mock catalog ' \
'are outside the bin range.', RuntimeWarning)
continue
if shuffle_satellites:
choices = indices.tolist()
for j in indices:
subs_this = subs[slice(*subs_idx[j])]
if not len(subs_this):
continue
mock_idx_this = subs_this['mock_idx']
pos[mock_idx_this] = subs_this[_axes].view((ax_type,3))
if shuffle_satellites:
k = choices.pop(np.random.randint(len(choices)))
pos[mock_idx_this] -= _get_xyz(hosts[j], ax_type)
if rotate_satellites:
pos[mock_idx_this] = \
_apply_rotation(pos[mock_idx_this], box_size)
pos[mock_idx_this] += _get_xyz(hosts[k], ax_type)
if apply_rsd:
pos[mock_idx_this,2] += (subs_this['vz'] \
+ hosts['vz'][k] - hosts['vz'][j])/100.0
else:
if rotate_satellites:
host_pos = _get_xyz(hosts[j], ax_type)
pos[mock_idx_this] -= host_pos
pos[mock_idx_this] = \
_apply_rotation(pos[mock_idx_this], box_size)
pos[mock_idx_this] += host_pos
if apply_rsd:
pos[mock_idx_this,2] += subs_this['vz']/100.0
has_mock = indices[hosts['mock_idx'][indices] > -1]
if not len(has_mock):
continue
mock_idx_this = hosts['mock_idx'][has_mock]
if shuffle_centrals:
has_mock = np.random.choice(indices, len(has_mock), False)
pos[mock_idx_this] = hosts[_axes][has_mock].view((ax_type,3))
if apply_rsd:
pos[mock_idx_this,2] += hosts['vz'][has_mock]/100.0
if np.isnan(pos).any():
warnings.warn('Some galaxies in the mock catalog have not been ' \
'assigned a new position. Maybe the corresponding halo is ' \
'outside the bin range.', RuntimeWarning)
if box_size is not None:
pos = np.remainder(pos, box_size, pos)
if return_structured_array:
pos = pos.view(np.dtype(zip(_axes, [ax_type]*3)))
return pos
| true
| true
|
1c43d5f43d8a82eed84cc58c3d38663d541a8be4
| 831
|
py
|
Python
|
accessible_output/speech/outputs/jaws.py
|
Timtam/cards-against-humanity
|
89ea61b5c9915198b845bbf8a93c3f7827323ceb
|
[
"MIT"
] | 5
|
2017-04-11T00:18:42.000Z
|
2021-08-01T04:27:20.000Z
|
accessible_output/speech/outputs/jaws.py
|
Timtam/cards-against-humanity
|
89ea61b5c9915198b845bbf8a93c3f7827323ceb
|
[
"MIT"
] | 47
|
2017-04-27T18:57:27.000Z
|
2017-07-16T21:18:28.000Z
|
accessible_output/speech/outputs/jaws.py
|
Timtam/cards-against-humanity
|
89ea61b5c9915198b845bbf8a93c3f7827323ceb
|
[
"MIT"
] | 4
|
2018-05-17T12:33:59.000Z
|
2022-02-20T16:08:51.000Z
|
from pywintypes import com_error
import win32gui
import win32com.client
from main import OutputError, ScreenreaderSpeechOutput
class Jaws (ScreenreaderSpeechOutput):
"""Speech output supporting the Jaws for Windows screen reader."""
name = 'Jaws'
def __init__(self, *args, **kwargs):
super (Jaws, self).__init__(*args, **kwargs)
try:
self.object = win32com.client.Dispatch("FreedomSci.JawsApi")
except com_error: #try jfwapi
try:
self.object = win32com.client.Dispatch("jfwapi")
except com_error: #give up
raise OutputError
def speak(self, text, interrupt=False):
self.object.SayString(' %s' % text, interrupt)
def canSpeak(self):
try:
return self.object.SayString('',0) == True or win32gui.FindWindow("JFWUI2", "JAWS") != 0 and super(Jaws, self).canSpeak()
except:
return False
| 26.806452
| 124
| 0.718412
|
from pywintypes import com_error
import win32gui
import win32com.client
from main import OutputError, ScreenreaderSpeechOutput
class Jaws (ScreenreaderSpeechOutput):
name = 'Jaws'
def __init__(self, *args, **kwargs):
super (Jaws, self).__init__(*args, **kwargs)
try:
self.object = win32com.client.Dispatch("FreedomSci.JawsApi")
except com_error:
try:
self.object = win32com.client.Dispatch("jfwapi")
except com_error:
raise OutputError
def speak(self, text, interrupt=False):
self.object.SayString(' %s' % text, interrupt)
def canSpeak(self):
try:
return self.object.SayString('',0) == True or win32gui.FindWindow("JFWUI2", "JAWS") != 0 and super(Jaws, self).canSpeak()
except:
return False
| true
| true
|
1c43d7479da3c0f98336c0c943df2ebf50f430e0
| 9,059
|
py
|
Python
|
algs/nsga_net/utils/utils.py
|
Beautyya/BenchENA
|
5f5491614fc2f00ca26dc29f35f44c334db4718c
|
[
"MIT"
] | null | null | null |
algs/nsga_net/utils/utils.py
|
Beautyya/BenchENA
|
5f5491614fc2f00ca26dc29f35f44c334db4718c
|
[
"MIT"
] | null | null | null |
algs/nsga_net/utils/utils.py
|
Beautyya/BenchENA
|
5f5491614fc2f00ca26dc29f35f44c334db4718c
|
[
"MIT"
] | null | null | null |
import configparser
import os
import platform
import multiprocessing
from compute.file import get_algo_local_dir, get_local_path
import time
import os
import numpy as np
from algs.nsga_net.utils.statusupdatetool import StatusUpdateTool
from algs.nsga_net.genetic.population import Population, Individual
class Utils(object):
_lock = multiprocessing.Lock()
@classmethod
def get_lock_for_write_fitness(cls):
return cls._lock
@classmethod
def path_replace(cls, input_str):
# input a str, replace '\\' with '/', because the os.path in windows return path with '\\' joining
# please use it after creating a string with both os.path and string '/'
if (platform.system() == 'Windows'):
new_str = input_str.replace('\\', '/')
else: # Linux or Mac
new_str = input_str
return new_str
@classmethod
def load_cache_data(cls):
file_name = '%s/cache.txt' % (os.path.join(get_algo_local_dir(), 'populations'))
file_name = cls.path_replace(file_name)
_map = {}
if os.path.exists(file_name):
f = open(file_name, 'r')
for each_line in f:
rs_ = each_line.strip().split(';')
_map[rs_[0]] = '%.5f' % (float(rs_[1]))
f.close()
return _map
@classmethod
def save_fitness_to_cache(cls, individuals):
_map1, _map2 = cls.load_cache_data()
for indi in individuals:
_key, _str = indi.uuid()
_acc = indi.acc
_flop = indi.flop
if _key not in _map:
file_name = '%s/cache.txt' % (os.path.join(get_algo_local_dir(), 'populations'))
file_name = cls.path_replace(file_name)
f = open(file_name, 'a+')
_str = '%s;%.5f;%.5f;%s\n' % (_key, _acc, _flop, _str)
f.write(_str)
f.close()
_map1[_key] = _acc
_map2[_key] = _flop
@classmethod
def save_population_at_begin(cls, _str, gen_no):
file_name = '%s/begin_%05d.txt' % (os.path.join(get_algo_local_dir(), 'populations'), gen_no)
# solve the path differences caused by different platforms
file_name = cls.path_replace(file_name)
with open(file_name, 'w') as f:
f.write(_str)
@classmethod
def save_population_after_mutation(cls, _str, gen_no):
file_name = '%s/mutation_%05d.txt' % (os.path.join(get_algo_local_dir(), 'populations'), gen_no)
file_name = cls.path_replace(file_name)
with open(file_name, 'w') as f:
f.write(_str)
@classmethod
def get_newest_file_based_on_prefix(cls, prefix):
id_list = []
for _, _, file_names in os.walk(os.path.join(get_algo_local_dir(), 'populations')):
for file_name in file_names:
if file_name.startswith(prefix):
number_index = len(prefix) + 1 # the first number index
id_list.append(int(file_name[number_index:number_index + 5]))
if len(id_list) == 0:
return None
else:
return np.max(id_list)
@classmethod
def load_population(cls, prefix, gen_no):
file_name = '%s/%s_%05d.txt' % (os.path.join(get_algo_local_dir(), 'populations'), prefix, np.min(gen_no))
file_name = cls.path_replace(file_name)
params = StatusUpdateTool.get_init_params()
pop = Population(gen_no, params)
f = open(file_name)
indi_start_line = f.readline().strip()
while indi_start_line.startswith('indi'):
indi_no = indi_start_line[5:]
indi = Individual(indi_no, params, params['n_var'])
genome = []
for line in f:
line = line.strip()
if line.startswith('--'):
indi_start_line = f.readline().strip()
break
else:
if line.startswith('Acc'):
indi.acc = float(line[4:])
elif line.startswith('flop'):
indi.flop = float(line[5:])
elif line.startswith('genome'):
print(line)
l = list(line[8:])
while ' ' in l:
l.remove(' ')
while ',' in l:
l.remove(',')
while ']' in l:
l.remove(']')
for i in l:
genome.append(int(i))
elif line.startswith('0') or line.startswith('1'):
print(line)
l = list(line)
while ' ' in l:
l.remove(' ')
while ',' in l:
l.remove(',')
while ']' in l:
l.remove(']')
for i in l:
genome.append(int(i))
else:
print('Unknown key for load unit type, line content:%s' % (line))
indi.genome = np.array(genome)
pop.individuals.append(indi)
f.close()
return pop
@classmethod
def read_template(cls, search_space):
_path = os.path.join(os.path.dirname(__file__), 'template', search_space + '_models.py')
part1 = []
part2 = []
f = open(_path)
f.readline() # skip this comment
line = f.readline().rstrip()
while line.strip() != "#generate_init":
part1.append(line)
line = f.readline().rstrip()
line = f.readline().rstrip() # skip the comment '#generate_forward'
while line.strip() != '"""':
part2.append(line)
line = f.readline().rstrip()
return part1, part2
@classmethod
def generate_micro_pytorch_file(cls, indi, params, test=False):
search_space = "micro"
part1, part2 = cls.read_template(search_space)
line1 = "genome = convert(%s)" % (str(list(indi.genome)))
line2 = "genotype = decode(genome)"
line3 = "self.net = Network(%d, %d, %d, False, genotype)" % \
(params['init_channels'], params['classes'], params['layers'])
_str = []
current_time = time.strftime("%Y-%m-%d %H:%M:%S")
_str.append('"""')
_str.append(current_time)
_str.append('"""')
_str.extend(part1)
_str.append(' %s' % (line1))
_str.append(' %s' % (line2))
_str.append(' %s' % (line3))
_str.extend(part2)
if not test:
file_name = '%s/%s.py' % (os.path.join(get_algo_local_dir(), 'scripts'), indi.id)
else:
file_name = '%s/nsga_micro_%s.py' % (os.path.join(get_local_path(), 'example'), indi.id)
file_name = cls.path_replace(file_name)
if not os.path.exists(os.path.join(get_algo_local_dir(), 'scripts')):
os.makedirs(os.path.join(get_algo_local_dir(), 'scripts'))
script_file_handler = open(file_name, 'w')
script_file_handler.write('\n'.join(_str))
script_file_handler.flush()
script_file_handler.close()
@classmethod
def generate_macro_pytorch_file(cls, indi, channels, params, test=False):
search_space = "macro"
part1, part2 = cls.read_template(search_space)
line1 = "genome = convert(np.array(%s))" % (str(list(indi.genome)))
line2 = "genotype = decode(genome)"
line3 = "channels = %s" % (str(channels))
line4 = "self.net = EvoNetwork(genotype, channels, %d, (%d, %d), decoder='residual')" % \
(params['classes'], StatusUpdateTool.get_input_weight(), StatusUpdateTool.get_input_height())
_str = []
current_time = time.strftime("%Y-%m-%d %H:%M:%S")
_str.append('"""')
_str.append(current_time)
_str.append('"""')
_str.extend(part1)
_str.append(' %s' % line1)
_str.append(' %s' % line2)
_str.append(' %s' % line3)
_str.append(' %s' % line4)
_str.extend(part2)
if not test:
file_name = '%s/%s.py' % (os.path.join(get_algo_local_dir(), 'scripts'), indi.id)
else:
file_name = '%s/nsga_macro_%s.py' % (os.path.join(get_local_path(), 'example'), indi.id)
file_name = cls.path_replace(file_name)
if not os.path.exists(os.path.join(get_algo_local_dir(), 'scripts')):
os.makedirs(os.path.join(get_algo_local_dir(), 'scripts'))
script_file_handler = open(file_name, 'w')
script_file_handler.write('\n'.join(_str))
script_file_handler.flush()
script_file_handler.close()
@classmethod
def write_to_file(cls, _str, _file):
f = open(_file, 'w')
f.write(_str)
f.flush()
f.close()
| 40.084071
| 114
| 0.535821
|
import configparser
import os
import platform
import multiprocessing
from compute.file import get_algo_local_dir, get_local_path
import time
import os
import numpy as np
from algs.nsga_net.utils.statusupdatetool import StatusUpdateTool
from algs.nsga_net.genetic.population import Population, Individual
class Utils(object):
_lock = multiprocessing.Lock()
@classmethod
def get_lock_for_write_fitness(cls):
return cls._lock
@classmethod
def path_replace(cls, input_str):
if (platform.system() == 'Windows'):
new_str = input_str.replace('\\', '/')
else:
new_str = input_str
return new_str
@classmethod
def load_cache_data(cls):
file_name = '%s/cache.txt' % (os.path.join(get_algo_local_dir(), 'populations'))
file_name = cls.path_replace(file_name)
_map = {}
if os.path.exists(file_name):
f = open(file_name, 'r')
for each_line in f:
rs_ = each_line.strip().split(';')
_map[rs_[0]] = '%.5f' % (float(rs_[1]))
f.close()
return _map
@classmethod
def save_fitness_to_cache(cls, individuals):
_map1, _map2 = cls.load_cache_data()
for indi in individuals:
_key, _str = indi.uuid()
_acc = indi.acc
_flop = indi.flop
if _key not in _map:
file_name = '%s/cache.txt' % (os.path.join(get_algo_local_dir(), 'populations'))
file_name = cls.path_replace(file_name)
f = open(file_name, 'a+')
_str = '%s;%.5f;%.5f;%s\n' % (_key, _acc, _flop, _str)
f.write(_str)
f.close()
_map1[_key] = _acc
_map2[_key] = _flop
@classmethod
def save_population_at_begin(cls, _str, gen_no):
file_name = '%s/begin_%05d.txt' % (os.path.join(get_algo_local_dir(), 'populations'), gen_no)
file_name = cls.path_replace(file_name)
with open(file_name, 'w') as f:
f.write(_str)
@classmethod
def save_population_after_mutation(cls, _str, gen_no):
file_name = '%s/mutation_%05d.txt' % (os.path.join(get_algo_local_dir(), 'populations'), gen_no)
file_name = cls.path_replace(file_name)
with open(file_name, 'w') as f:
f.write(_str)
@classmethod
def get_newest_file_based_on_prefix(cls, prefix):
id_list = []
for _, _, file_names in os.walk(os.path.join(get_algo_local_dir(), 'populations')):
for file_name in file_names:
if file_name.startswith(prefix):
number_index = len(prefix) + 1
id_list.append(int(file_name[number_index:number_index + 5]))
if len(id_list) == 0:
return None
else:
return np.max(id_list)
@classmethod
def load_population(cls, prefix, gen_no):
file_name = '%s/%s_%05d.txt' % (os.path.join(get_algo_local_dir(), 'populations'), prefix, np.min(gen_no))
file_name = cls.path_replace(file_name)
params = StatusUpdateTool.get_init_params()
pop = Population(gen_no, params)
f = open(file_name)
indi_start_line = f.readline().strip()
while indi_start_line.startswith('indi'):
indi_no = indi_start_line[5:]
indi = Individual(indi_no, params, params['n_var'])
genome = []
for line in f:
line = line.strip()
if line.startswith('--'):
indi_start_line = f.readline().strip()
break
else:
if line.startswith('Acc'):
indi.acc = float(line[4:])
elif line.startswith('flop'):
indi.flop = float(line[5:])
elif line.startswith('genome'):
print(line)
l = list(line[8:])
while ' ' in l:
l.remove(' ')
while ',' in l:
l.remove(',')
while ']' in l:
l.remove(']')
for i in l:
genome.append(int(i))
elif line.startswith('0') or line.startswith('1'):
print(line)
l = list(line)
while ' ' in l:
l.remove(' ')
while ',' in l:
l.remove(',')
while ']' in l:
l.remove(']')
for i in l:
genome.append(int(i))
else:
print('Unknown key for load unit type, line content:%s' % (line))
indi.genome = np.array(genome)
pop.individuals.append(indi)
f.close()
return pop
@classmethod
def read_template(cls, search_space):
_path = os.path.join(os.path.dirname(__file__), 'template', search_space + '_models.py')
part1 = []
part2 = []
f = open(_path)
f.readline()
line = f.readline().rstrip()
while line.strip() != "#generate_init":
part1.append(line)
line = f.readline().rstrip()
line = f.readline().rstrip()
while line.strip() != '"""':
part2.append(line)
line = f.readline().rstrip()
return part1, part2
@classmethod
def generate_micro_pytorch_file(cls, indi, params, test=False):
search_space = "micro"
part1, part2 = cls.read_template(search_space)
line1 = "genome = convert(%s)" % (str(list(indi.genome)))
line2 = "genotype = decode(genome)"
line3 = "self.net = Network(%d, %d, %d, False, genotype)" % \
(params['init_channels'], params['classes'], params['layers'])
_str = []
current_time = time.strftime("%Y-%m-%d %H:%M:%S")
_str.append('"""')
_str.append(current_time)
_str.append('"""')
_str.extend(part1)
_str.append(' %s' % (line1))
_str.append(' %s' % (line2))
_str.append(' %s' % (line3))
_str.extend(part2)
if not test:
file_name = '%s/%s.py' % (os.path.join(get_algo_local_dir(), 'scripts'), indi.id)
else:
file_name = '%s/nsga_micro_%s.py' % (os.path.join(get_local_path(), 'example'), indi.id)
file_name = cls.path_replace(file_name)
if not os.path.exists(os.path.join(get_algo_local_dir(), 'scripts')):
os.makedirs(os.path.join(get_algo_local_dir(), 'scripts'))
script_file_handler = open(file_name, 'w')
script_file_handler.write('\n'.join(_str))
script_file_handler.flush()
script_file_handler.close()
@classmethod
def generate_macro_pytorch_file(cls, indi, channels, params, test=False):
search_space = "macro"
part1, part2 = cls.read_template(search_space)
line1 = "genome = convert(np.array(%s))" % (str(list(indi.genome)))
line2 = "genotype = decode(genome)"
line3 = "channels = %s" % (str(channels))
line4 = "self.net = EvoNetwork(genotype, channels, %d, (%d, %d), decoder='residual')" % \
(params['classes'], StatusUpdateTool.get_input_weight(), StatusUpdateTool.get_input_height())
_str = []
current_time = time.strftime("%Y-%m-%d %H:%M:%S")
_str.append('"""')
_str.append(current_time)
_str.append('"""')
_str.extend(part1)
_str.append(' %s' % line1)
_str.append(' %s' % line2)
_str.append(' %s' % line3)
_str.append(' %s' % line4)
_str.extend(part2)
if not test:
file_name = '%s/%s.py' % (os.path.join(get_algo_local_dir(), 'scripts'), indi.id)
else:
file_name = '%s/nsga_macro_%s.py' % (os.path.join(get_local_path(), 'example'), indi.id)
file_name = cls.path_replace(file_name)
if not os.path.exists(os.path.join(get_algo_local_dir(), 'scripts')):
os.makedirs(os.path.join(get_algo_local_dir(), 'scripts'))
script_file_handler = open(file_name, 'w')
script_file_handler.write('\n'.join(_str))
script_file_handler.flush()
script_file_handler.close()
@classmethod
def write_to_file(cls, _str, _file):
f = open(_file, 'w')
f.write(_str)
f.flush()
f.close()
| true
| true
|
1c43d78b35231ae2efb4db918cbc7bf068cee45e
| 4,547
|
py
|
Python
|
examples/mfa_extraction/fix_mismatch.py
|
geneing/TensorFlowTTS
|
0035ba00fec1b2b1184c8df32646d6a88b01ee5b
|
[
"Apache-2.0"
] | null | null | null |
examples/mfa_extraction/fix_mismatch.py
|
geneing/TensorFlowTTS
|
0035ba00fec1b2b1184c8df32646d6a88b01ee5b
|
[
"Apache-2.0"
] | null | null | null |
examples/mfa_extraction/fix_mismatch.py
|
geneing/TensorFlowTTS
|
0035ba00fec1b2b1184c8df32646d6a88b01ee5b
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2020 TensorFlowTTS Team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Fix mismatch between sum durations and mel lengths."""
import numpy as np
import os
from tqdm import tqdm
import click
import logging
import sys
logging.basicConfig(
level=logging.DEBUG,
stream=sys.stdout,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
@click.command()
@click.option("--base_path", default="dump")
@click.option("--trimmed_dur_path", default="dataset/trimmed-durations")
@click.option("--dur_path", default="dataset/durations")
@click.option("--use_norm", default="f")
def fix(base_path: str, dur_path: str, trimmed_dur_path: str, use_norm: str):
for t in ["train", "valid"]:
mfa_longer = []
mfa_shorter = []
big_diff = []
not_fixed = []
pre_path = os.path.join(base_path, t)
os.makedirs(os.path.join(pre_path, "fix_dur"), exist_ok=True)
os.makedirs(os.path.join(pre_path, "phids"), exist_ok=True)
logging.info(f"FIXING {t} set ...\n")
base = lambda s: s.replace('-ids.npy','')
for i in tqdm(os.listdir(os.path.join(pre_path, "ids"))):
if use_norm == "t":
mel = np.load(
os.path.join(
pre_path, "norm-feats", f"{base(i)}-norm-feats.npy"
)
)
else:
mel = np.load(
os.path.join(
pre_path, "raw-feats", f"{base(i)}-raw-feats.npy"
)
)
try:
dur = np.load(
os.path.join(trimmed_dur_path, f"{base(i)}-durations.npy")
)
except:
dur = np.load(
os.path.join(dur_path, f"{base(i)}-durations.npy")
)
ph_ids = np.load(os.path.join(dur_path, f"{base(i)}-phids.npy"))
l_mel = len(mel)
dur_s = np.sum(dur)
cloned = np.array(dur, copy=True)
diff = abs(l_mel - dur_s)
if abs(l_mel - dur_s) > 30: # more then 300 ms
big_diff.append([i, abs(l_mel - dur_s)])
if dur_s > l_mel:
for j in range(1, len(dur) - 1):
if diff == 0:
break
dur_val = cloned[-j]
if dur_val >= diff:
cloned[-j] -= diff
diff -= dur_val
break
else:
cloned[-j] = 0
diff -= dur_val
if j == len(dur) - 2:
not_fixed.append(i)
mfa_longer.append(abs(l_mel - dur_s))
elif dur_s < l_mel:
cloned[-1] += diff
mfa_shorter.append(abs(l_mel - dur_s))
np.save(
os.path.join(pre_path, "fix_dur", f"{base(i)}-durations.npy"),
cloned.astype(np.int32),
allow_pickle=False,
)
np.save(
os.path.join(pre_path, "phids", f"{base(i)}-phids.npy"),
ph_ids,
allow_pickle=False,
)
logging.info(
f"{t} stats: number of mfa with longer duration: {len(mfa_longer)}, total diff: {sum(mfa_longer)}"
f", mean diff: {sum(mfa_longer)/len(mfa_longer) if len(mfa_longer) > 0 else 0}"
)
logging.info(
f"{t} stats: number of mfa with shorter duration: {len(mfa_shorter)}, total diff: {sum(mfa_shorter)}"
f", mean diff: {sum(mfa_shorter)/len(mfa_shorter) if len(mfa_shorter) > 0 else 0}"
)
logging.info(
f"{t} stats: number of files with a ''big'' duration diff: {len(big_diff)} if number>1 you should check it"
)
logging.info(f"{t} stats: not fixed len: {len(not_fixed)}\n")
if __name__ == "__main__":
fix()
| 34.44697
| 119
| 0.520563
|
import numpy as np
import os
from tqdm import tqdm
import click
import logging
import sys
logging.basicConfig(
level=logging.DEBUG,
stream=sys.stdout,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
@click.command()
@click.option("--base_path", default="dump")
@click.option("--trimmed_dur_path", default="dataset/trimmed-durations")
@click.option("--dur_path", default="dataset/durations")
@click.option("--use_norm", default="f")
def fix(base_path: str, dur_path: str, trimmed_dur_path: str, use_norm: str):
for t in ["train", "valid"]:
mfa_longer = []
mfa_shorter = []
big_diff = []
not_fixed = []
pre_path = os.path.join(base_path, t)
os.makedirs(os.path.join(pre_path, "fix_dur"), exist_ok=True)
os.makedirs(os.path.join(pre_path, "phids"), exist_ok=True)
logging.info(f"FIXING {t} set ...\n")
base = lambda s: s.replace('-ids.npy','')
for i in tqdm(os.listdir(os.path.join(pre_path, "ids"))):
if use_norm == "t":
mel = np.load(
os.path.join(
pre_path, "norm-feats", f"{base(i)}-norm-feats.npy"
)
)
else:
mel = np.load(
os.path.join(
pre_path, "raw-feats", f"{base(i)}-raw-feats.npy"
)
)
try:
dur = np.load(
os.path.join(trimmed_dur_path, f"{base(i)}-durations.npy")
)
except:
dur = np.load(
os.path.join(dur_path, f"{base(i)}-durations.npy")
)
ph_ids = np.load(os.path.join(dur_path, f"{base(i)}-phids.npy"))
l_mel = len(mel)
dur_s = np.sum(dur)
cloned = np.array(dur, copy=True)
diff = abs(l_mel - dur_s)
if abs(l_mel - dur_s) > 30:
big_diff.append([i, abs(l_mel - dur_s)])
if dur_s > l_mel:
for j in range(1, len(dur) - 1):
if diff == 0:
break
dur_val = cloned[-j]
if dur_val >= diff:
cloned[-j] -= diff
diff -= dur_val
break
else:
cloned[-j] = 0
diff -= dur_val
if j == len(dur) - 2:
not_fixed.append(i)
mfa_longer.append(abs(l_mel - dur_s))
elif dur_s < l_mel:
cloned[-1] += diff
mfa_shorter.append(abs(l_mel - dur_s))
np.save(
os.path.join(pre_path, "fix_dur", f"{base(i)}-durations.npy"),
cloned.astype(np.int32),
allow_pickle=False,
)
np.save(
os.path.join(pre_path, "phids", f"{base(i)}-phids.npy"),
ph_ids,
allow_pickle=False,
)
logging.info(
f"{t} stats: number of mfa with longer duration: {len(mfa_longer)}, total diff: {sum(mfa_longer)}"
f", mean diff: {sum(mfa_longer)/len(mfa_longer) if len(mfa_longer) > 0 else 0}"
)
logging.info(
f"{t} stats: number of mfa with shorter duration: {len(mfa_shorter)}, total diff: {sum(mfa_shorter)}"
f", mean diff: {sum(mfa_shorter)/len(mfa_shorter) if len(mfa_shorter) > 0 else 0}"
)
logging.info(
f"{t} stats: number of files with a ''big'' duration diff: {len(big_diff)} if number>1 you should check it"
)
logging.info(f"{t} stats: not fixed len: {len(not_fixed)}\n")
if __name__ == "__main__":
fix()
| true
| true
|
1c43dadf8de6f9e1d56a4de21587cb982ee0979e
| 3,531
|
py
|
Python
|
profiles_project/settings.py
|
CGarcia8CG/profiles-resst-api
|
2a31f66f875f006a437865999fb5dd63049b14ae
|
[
"MIT"
] | null | null | null |
profiles_project/settings.py
|
CGarcia8CG/profiles-resst-api
|
2a31f66f875f006a437865999fb5dd63049b14ae
|
[
"MIT"
] | null | null | null |
profiles_project/settings.py
|
CGarcia8CG/profiles-resst-api
|
2a31f66f875f006a437865999fb5dd63049b14ae
|
[
"MIT"
] | null | null | null |
"""
Django settings for profiles_project project.
Generated by 'django-admin startproject' using Django 2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'u8&5lcphj96%8)2qf1bj*73i@p_p%_drs0$xrj44@o&6*txak!'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'rest_framework.authtoken',
'profiles_api',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'profiles_project.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'profiles_project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
#Cambiar el default de user manager al creado en models.py IMPORTANTE
AUTH_USER_MODEL = 'profiles_api.UserProfile'
#Despues de esto es hacer en consola
#1 python3 manage.py makemigrations "nombre_api"
#2 python3 manage.py migrate
#3 Resultado sincroniza DB
#4 python manage.py createsuperuser --> crear super usuario
#5 Enable Django admin
| 26.548872
| 91
| 0.708581
|
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
SECRET_KEY = 'u8&5lcphj96%8)2qf1bj*73i@p_p%_drs0$xrj44@o&6*txak!'
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'rest_framework.authtoken',
'profiles_api',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'profiles_project.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'profiles_project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
#Cambiar el default de user manager al creado en models.py IMPORTANTE
AUTH_USER_MODEL = 'profiles_api.UserProfile'
#Despues de esto es hacer en consola
#1 python3 manage.py makemigrations "nombre_api"
#2 python3 manage.py migrate
#3 Resultado sincroniza DB
#4 python manage.py createsuperuser --> crear super usuario
#5 Enable Django admin
| true
| true
|
1c43db2740e3ecabf360bbab3b871d70e31d5cf0
| 938
|
py
|
Python
|
composite.py
|
sloev/photobooth_web
|
ed2799f30f43dbc8042476c3f9238ffb39ead3b5
|
[
"MIT"
] | null | null | null |
composite.py
|
sloev/photobooth_web
|
ed2799f30f43dbc8042476c3f9238ffb39ead3b5
|
[
"MIT"
] | null | null | null |
composite.py
|
sloev/photobooth_web
|
ed2799f30f43dbc8042476c3f9238ffb39ead3b5
|
[
"MIT"
] | null | null | null |
from PIL import Image
import os
MAX_COLUMNS = 5
INPUT_DIR = './segments/'
images = [Image.open(INPUT_DIR + filename) for filename in sorted(os.listdir(INPUT_DIR)) if filename.endswith('.png')]
MAX_ROWS = int(len(images)/MAX_COLUMNS) + (1 if len(images) % 20 !=0 else 0)
img_width, img_height = images[0].size
print(MAX_COLUMNS, MAX_ROWS, img_width,img_height)
background = Image.new('RGBA', ((img_width * MAX_COLUMNS) + img_width, (img_height * MAX_ROWS)), color='black')
bg_width, bg_height = background.size
x_index = 0
y_index = 0
column = 0
for index, image in enumerate(images):
this_width = image.size[0]
if x_index == 0:
x_index = this_width
background.paste(image, (x_index, y_index), image)
x_index += this_width-20
if x_index + img_width > bg_width:
y_index += int(img_height - (img_height/2))
column += 1
x_index = int(this_width)
background.save("composite.png")
| 29.3125
| 118
| 0.689765
|
from PIL import Image
import os
MAX_COLUMNS = 5
INPUT_DIR = './segments/'
images = [Image.open(INPUT_DIR + filename) for filename in sorted(os.listdir(INPUT_DIR)) if filename.endswith('.png')]
MAX_ROWS = int(len(images)/MAX_COLUMNS) + (1 if len(images) % 20 !=0 else 0)
img_width, img_height = images[0].size
print(MAX_COLUMNS, MAX_ROWS, img_width,img_height)
background = Image.new('RGBA', ((img_width * MAX_COLUMNS) + img_width, (img_height * MAX_ROWS)), color='black')
bg_width, bg_height = background.size
x_index = 0
y_index = 0
column = 0
for index, image in enumerate(images):
this_width = image.size[0]
if x_index == 0:
x_index = this_width
background.paste(image, (x_index, y_index), image)
x_index += this_width-20
if x_index + img_width > bg_width:
y_index += int(img_height - (img_height/2))
column += 1
x_index = int(this_width)
background.save("composite.png")
| true
| true
|
1c43db677624e3a656cc3cea81f95f7b0f6b3c81
| 353
|
py
|
Python
|
bitcoin/metrics.py
|
darbik/work
|
7f5640822fc5bbbd4033385d6377878b22785cb2
|
[
"MIT"
] | null | null | null |
bitcoin/metrics.py
|
darbik/work
|
7f5640822fc5bbbd4033385d6377878b22785cb2
|
[
"MIT"
] | 3
|
2016-08-04T18:12:05.000Z
|
2016-08-09T16:55:09.000Z
|
bitcoin/metrics.py
|
darbik/bitcoin
|
7f5640822fc5bbbd4033385d6377878b22785cb2
|
[
"MIT"
] | null | null | null |
from time import strftime
def get_volume(atmid, price, amount):
volume = (atmid, price, amount)
return volume
def get_time(atmid):
time = (atmid, strftime("%Y-%m-%d %H:%M:%S")) # time format is in UTC
return time
def get_fees(atmid, price, amount):
feesMade = (atmid, (amount / price) * 0.05)
return feesMade
| 16.809524
| 79
| 0.620397
|
from time import strftime
def get_volume(atmid, price, amount):
volume = (atmid, price, amount)
return volume
def get_time(atmid):
time = (atmid, strftime("%Y-%m-%d %H:%M:%S"))
return time
def get_fees(atmid, price, amount):
feesMade = (atmid, (amount / price) * 0.05)
return feesMade
| true
| true
|
1c43dbee4862a38bad9336f123d8fa764442b2cb
| 8,629
|
py
|
Python
|
torch_complex/complex_operation.py
|
veya2ztn/mltool
|
4ed151152845ebe3de128e1f53c478581c1492e4
|
[
"IJG"
] | null | null | null |
torch_complex/complex_operation.py
|
veya2ztn/mltool
|
4ed151152845ebe3de128e1f53c478581c1492e4
|
[
"IJG"
] | null | null | null |
torch_complex/complex_operation.py
|
veya2ztn/mltool
|
4ed151152845ebe3de128e1f53c478581c1492e4
|
[
"IJG"
] | null | null | null |
import numpy as np
import torch
import torch.nn.functional as F
def complex_mul(tensor_1: torch.Tensor,tensor_2: torch.Tensor,mode='cc')-> torch.Tensor:
'''
:param tensor_1(2) [...,2] for real part and image part
'''
if mode == 'cc':
assert tensor_1.shape[-1]==2
assert tensor_2.shape[-1]==2
real1,imag1=tensor_1[...,0],tensor_1[...,1]
real2,imag2=tensor_2[...,0],tensor_2[...,1]
return torch.stack([real1 * real2 - imag1 * imag2, real1 * imag2 + imag1 * real2], dim = -1)
elif mode=='cr':
assert tensor_1.shape[-1]==2
real1,imag1=tensor_1[...,0],tensor_1[...,1]
real2 =tensor_2
return torch.stack([real1 * real2, imag1 * real2], dim = -1)
elif mode=='rc':
assert tensor_2.shape[-1]==2
real1,imag1=tensor_2[...,0],tensor_2[...,1]
real2 =tensor_1
return torch.stack([real1 * real2, imag1 * real2], dim = -1)
else:
raise NotImplementedError
def complex_mm(tensor_1: torch.Tensor,tensor_2: torch.Tensor,mode='cc')-> torch.Tensor:
if mode == 'cc':
assert tensor_1.shape[-1]==2
assert tensor_2.shape[-1]==2
real1,imag1=tensor_1[...,0],tensor_1[...,1]
real2,imag2=tensor_2[...,0],tensor_2[...,1]
return torch.stack([torch.matmul(real1, real2) - torch.matmul(imag1, imag2),
torch.matmul(real1, imag2) + torch.matmul(imag1, real2)], dim = -1)
elif mode=='cr':
assert tensor_1.shape[-1]==2
real1,imag1=tensor_1[...,0],tensor_1[...,1]
real2 =tensor_2
return torch.stack([real1.mm(real2), imag1.mm(real2)], dim = -1)
elif mode=='rc':
assert tensor_1.shape[-1]==2
real1,imag1=tensor_2[...,0],tensor_2[...,1]
real2 =tensor_1
return torch.stack([real1.mm(real2), imag1.mm(real2)], dim = -1)
else:
raise NotImplementedError
def complex_mv(matrix: torch.Tensor,vector: torch.Tensor,mode='cc')-> torch.Tensor:
if mode == 'cc':
assert matrix.shape[-1]==2
assert vector.shape[-1]==2
real1,imag1=matrix[...,0],matrix[...,1]
real2,imag2=vector[...,0],vector[...,1]
return torch.stack([real1.mv(real2) - imag1.mv(imag2), real1.mv(imag2) + imag1.mv(real2)], dim = -1)
elif mode=='cr':
assert matrix.shape[-1]==2
real1,imag1=matrix[...,0],matrix[...,1]
real2 =vector
return torch.stack([real1.mv(real2), imag1.mv(real2)], dim = -1)
else:
raise NotImplementedError
def complex_div(tensor_1: torch.Tensor,tensor_2: torch.Tensor)-> torch.Tensor:
if mode == 'cc':
assert tensor_1.shape[-1]==2
assert tensor_2.shape[-1]==2
a,b=tensor_1[...,0],tensor_1[...,1]
c,d=tensor_2[...,0],tensor_2[...,1]
Denominator = c**2+d**2
return torch.stack([(a * c + b * d)/Denominator, (b*c-a*d)/Denominator], dim = -1)
elif mode=='cr':
assert tensor_1.shape[-1]==2
a,b=tensor_1[...,0],tensor_1[...,1]
c =tensor_2
return torch.stack([a/c,b/c], dim = -1)
else:
raise NotImplementedError
def complex_conj(tensor_1: torch.Tensor)-> torch.Tensor:
assert tensor_1.shape[-1]==2
real1,imag1=tensor_1[...,0],tensor_1[...,1]
imag1=-imag1
return torch.stack([real1,imag1], dim = -1)
def complex_polar(tensor: torch.Tensor)-> torch.Tensor:
assert tensor.shape[-1]==2
real,imag=tensor[...,0],tensor[...,1]
radius = torch.norm(tensor,dim=-1)
angles = torch.atan(real/imag)
return torch.stack([radius,angles],dim=-1)
def complex_exp(tensor: torch.Tensor,angle_unit=1)-> torch.Tensor:
assert tensor.shape[-1]==2
factor,angles=tensor[...,0],tensor[...,1]
radius = torch.exp(factor)
angles = angles*angle_unit
direct = torch.stack([angles.cos(),angles.sin()],dim=-1)
return complex_mul(direct,radius,'cr')
def complex_polar_ln(tensor: torch.Tensor):
assert tensor.shape[-1]==2
real,imag=tensor[...,0],tensor[...,1]
radius = torch.norm(tensor,dim=-1).log()
angles = torch.atan(real/imag)
return radius,angles
def complex_tch2np(tch: torch.Tensor)->np.ndarray:
assert tch.shape[-1]==2
out=tch.detach().numpy()
return out[...,0]+1j*out[...,1]
def complex_np2tch(npx:np.ndarray)-> torch.Tensor:
real = torch.Tensor(np.real(npx))
imag = torch.Tensor(np.imag(npx))
return torch.stack([real,imag],dim=-1)
def complex_conv2d(inputs,filters,bias=None,**kargs):
assert len(inputs.shape)==5
assert len(filters.shape)==5
assert inputs.shape[-1]==2
assert filters.shape[-1]==2
convfun = lambda x,w,b:F.conv2d(x,w,b,**kargs)
x_r,x_i=inputs[...,0],inputs[...,1]
w_r,w_i=filters[...,0],filters[...,1]
b_r=b_i=None
if bias is not None:
assert bias.shape[-1]==2
b_r,b_i = bias[...,0],bias[...,1]
o_r = convfun(x_r,w_r,b_r) - convfun(x_i,w_i,None)
o_i = convfun(x_r,w_i,b_i) + convfun(x_i,w_r,None)
### another implement
## but with very slow performance
# o_r = F.conv3d(_inputs*torch.Tensor([1,-1]),_filter,stride=(stride,stride,1),padding=(padding,padding,0))
# o_i = F.conv3d(_inputs,_filter.flip(-1),stride=(stride,stride,1),padding=(padding,padding,0))
return torch.stack([o_r, o_i], dim = -1)
def complex_conv1d(inputs,filters,bias=None,**kargs):
assert len(inputs.shape)==4
assert len(filters.shape)==4
assert inputs.shape[-1]==2
assert filters.shape[-1]==2
convfun = lambda x,w,b:F.conv1d(x,w,b,**kargs)
x_r,x_i=inputs[...,0],inputs[...,1]
w_r,w_i=filters[...,0],filters[...,1]
b_r=b_i=None
if bias is not None:
assert bias.shape[-1]==2
b_r,b_i = bias[...,0],bias[...,1]
o_r = convfun(x_r,w_r,b_r) - convfun(x_i,w_i,None)
o_i = convfun(x_r,w_i,b_i) + convfun(x_i,w_r,None)
return torch.stack([o_r, o_i], dim = -1)
# def complex_tanh(tensor:torch.Tensor)-> torch.Tensor:
# #tensor = F.softplus(tensor) # avoid inf
# x,y = tensor.split(1,dim=-1)
# x = 2*x
# y = 2*y
# real = x.tanh()/(y.cos()/x.cosh() +1)
# imag = y.sin()/(y.cos() + x.cosh() + 1e-8)
# #real = x.sinh()/n
# #imag = y.sin()/n
# return torch.cat([real, imag], dim = -1)
class ComplexTanh(torch.autograd.Function):
@staticmethod
def forward(ctx, input):
"""
In the forward pass we receive a Tensor containing the input and return
a Tensor containing the output. ctx is a context object that can be used
to stash information for backward computation. You can cache arbitrary
objects for use in the backward pass using the ctx.save_for_backward method.
"""
ctx.save_for_backward(input)
x,y = input.split(1,dim=-1)
x = 2*x
y = 2*y
real = x.tanh()/(y.cos()/x.cosh() +1)
imag = y.sin()/(y.cos() + x.cosh() + 1e-8)
return torch.cat([real, imag], dim = -1)
@staticmethod
def backward(ctx, grad_output):
"""
In the backward pass we receive a Tensor containing the gradient of the loss
with respect to the output, and we need to compute the gradient of the loss
with respect to the input.
f(x,y) = tanh(z) = u(x,y)+1j*v(x,y)
grad_matrix =| \partial u |\partial u |
| ---------- |---------- |
| \partial x |\partial y |
| --- | --- |
| \partial v |\partial v |
| ---------- |---------- |
| \partial x |\partial y |
"""
input, = ctx.saved_tensors
x,y = input.split(1,dim=-1)
x = 2*x
y = 2*y
ys = y.sin()
yc = y.cos()
xch= x.cosh()
xth= x.tanh()
n = (1+yc/xch)**2
ux = 2 +2*yc/xch-2*xth**2
uy = 2*(ys/xch)*xth
ux = ux/n
uy = uy/n
vx = -uy
vy = ux
u,v= grad_output.split(1,dim=-1)
real = u*ux+v*vx
imag =-u*uy-v*vy # miners is required by complex number.
return torch.cat([real,imag],-1)
complex_tanh = ComplexTanh.apply
def complex_sigmoid(tensor:torch.Tensor)-> torch.Tensor:
x,y = tensor.split(1,dim=-1)
x = torch.exp(-x)
a = 1+x*y.cos()
b = x*y.sin()
n = a**2+b**2+ 1e-8
return torch.cat([a/n, b/n], dim = -1)
def complexize(tensor: torch.Tensor)-> torch.Tensor:
'''
real to complex
'''
if tensor.shape[-1] == 2:return tensor
imag = torch.zeros_like(tensor)
return torch.stack([tensor,imag],-1)
| 35.510288
| 111
| 0.571909
|
import numpy as np
import torch
import torch.nn.functional as F
def complex_mul(tensor_1: torch.Tensor,tensor_2: torch.Tensor,mode='cc')-> torch.Tensor:
if mode == 'cc':
assert tensor_1.shape[-1]==2
assert tensor_2.shape[-1]==2
real1,imag1=tensor_1[...,0],tensor_1[...,1]
real2,imag2=tensor_2[...,0],tensor_2[...,1]
return torch.stack([real1 * real2 - imag1 * imag2, real1 * imag2 + imag1 * real2], dim = -1)
elif mode=='cr':
assert tensor_1.shape[-1]==2
real1,imag1=tensor_1[...,0],tensor_1[...,1]
real2 =tensor_2
return torch.stack([real1 * real2, imag1 * real2], dim = -1)
elif mode=='rc':
assert tensor_2.shape[-1]==2
real1,imag1=tensor_2[...,0],tensor_2[...,1]
real2 =tensor_1
return torch.stack([real1 * real2, imag1 * real2], dim = -1)
else:
raise NotImplementedError
def complex_mm(tensor_1: torch.Tensor,tensor_2: torch.Tensor,mode='cc')-> torch.Tensor:
if mode == 'cc':
assert tensor_1.shape[-1]==2
assert tensor_2.shape[-1]==2
real1,imag1=tensor_1[...,0],tensor_1[...,1]
real2,imag2=tensor_2[...,0],tensor_2[...,1]
return torch.stack([torch.matmul(real1, real2) - torch.matmul(imag1, imag2),
torch.matmul(real1, imag2) + torch.matmul(imag1, real2)], dim = -1)
elif mode=='cr':
assert tensor_1.shape[-1]==2
real1,imag1=tensor_1[...,0],tensor_1[...,1]
real2 =tensor_2
return torch.stack([real1.mm(real2), imag1.mm(real2)], dim = -1)
elif mode=='rc':
assert tensor_1.shape[-1]==2
real1,imag1=tensor_2[...,0],tensor_2[...,1]
real2 =tensor_1
return torch.stack([real1.mm(real2), imag1.mm(real2)], dim = -1)
else:
raise NotImplementedError
def complex_mv(matrix: torch.Tensor,vector: torch.Tensor,mode='cc')-> torch.Tensor:
if mode == 'cc':
assert matrix.shape[-1]==2
assert vector.shape[-1]==2
real1,imag1=matrix[...,0],matrix[...,1]
real2,imag2=vector[...,0],vector[...,1]
return torch.stack([real1.mv(real2) - imag1.mv(imag2), real1.mv(imag2) + imag1.mv(real2)], dim = -1)
elif mode=='cr':
assert matrix.shape[-1]==2
real1,imag1=matrix[...,0],matrix[...,1]
real2 =vector
return torch.stack([real1.mv(real2), imag1.mv(real2)], dim = -1)
else:
raise NotImplementedError
def complex_div(tensor_1: torch.Tensor,tensor_2: torch.Tensor)-> torch.Tensor:
if mode == 'cc':
assert tensor_1.shape[-1]==2
assert tensor_2.shape[-1]==2
a,b=tensor_1[...,0],tensor_1[...,1]
c,d=tensor_2[...,0],tensor_2[...,1]
Denominator = c**2+d**2
return torch.stack([(a * c + b * d)/Denominator, (b*c-a*d)/Denominator], dim = -1)
elif mode=='cr':
assert tensor_1.shape[-1]==2
a,b=tensor_1[...,0],tensor_1[...,1]
c =tensor_2
return torch.stack([a/c,b/c], dim = -1)
else:
raise NotImplementedError
def complex_conj(tensor_1: torch.Tensor)-> torch.Tensor:
assert tensor_1.shape[-1]==2
real1,imag1=tensor_1[...,0],tensor_1[...,1]
imag1=-imag1
return torch.stack([real1,imag1], dim = -1)
def complex_polar(tensor: torch.Tensor)-> torch.Tensor:
assert tensor.shape[-1]==2
real,imag=tensor[...,0],tensor[...,1]
radius = torch.norm(tensor,dim=-1)
angles = torch.atan(real/imag)
return torch.stack([radius,angles],dim=-1)
def complex_exp(tensor: torch.Tensor,angle_unit=1)-> torch.Tensor:
assert tensor.shape[-1]==2
factor,angles=tensor[...,0],tensor[...,1]
radius = torch.exp(factor)
angles = angles*angle_unit
direct = torch.stack([angles.cos(),angles.sin()],dim=-1)
return complex_mul(direct,radius,'cr')
def complex_polar_ln(tensor: torch.Tensor):
assert tensor.shape[-1]==2
real,imag=tensor[...,0],tensor[...,1]
radius = torch.norm(tensor,dim=-1).log()
angles = torch.atan(real/imag)
return radius,angles
def complex_tch2np(tch: torch.Tensor)->np.ndarray:
assert tch.shape[-1]==2
out=tch.detach().numpy()
return out[...,0]+1j*out[...,1]
def complex_np2tch(npx:np.ndarray)-> torch.Tensor:
real = torch.Tensor(np.real(npx))
imag = torch.Tensor(np.imag(npx))
return torch.stack([real,imag],dim=-1)
def complex_conv2d(inputs,filters,bias=None,**kargs):
assert len(inputs.shape)==5
assert len(filters.shape)==5
assert inputs.shape[-1]==2
assert filters.shape[-1]==2
convfun = lambda x,w,b:F.conv2d(x,w,b,**kargs)
x_r,x_i=inputs[...,0],inputs[...,1]
w_r,w_i=filters[...,0],filters[...,1]
b_r=b_i=None
if bias is not None:
assert bias.shape[-1]==2
b_r,b_i = bias[...,0],bias[...,1]
o_r = convfun(x_r,w_r,b_r) - convfun(x_i,w_i,None)
o_i = convfun(x_r,w_i,b_i) + convfun(x_i,w_r,None)
x_conv1d(inputs,filters,bias=None,**kargs):
assert len(inputs.shape)==4
assert len(filters.shape)==4
assert inputs.shape[-1]==2
assert filters.shape[-1]==2
convfun = lambda x,w,b:F.conv1d(x,w,b,**kargs)
x_r,x_i=inputs[...,0],inputs[...,1]
w_r,w_i=filters[...,0],filters[...,1]
b_r=b_i=None
if bias is not None:
assert bias.shape[-1]==2
b_r,b_i = bias[...,0],bias[...,1]
o_r = convfun(x_r,w_r,b_r) - convfun(x_i,w_i,None)
o_i = convfun(x_r,w_i,b_i) + convfun(x_i,w_r,None)
return torch.stack([o_r, o_i], dim = -1)
(ctx, input):
ctx.save_for_backward(input)
x,y = input.split(1,dim=-1)
x = 2*x
y = 2*y
real = x.tanh()/(y.cos()/x.cosh() +1)
imag = y.sin()/(y.cos() + x.cosh() + 1e-8)
return torch.cat([real, imag], dim = -1)
@staticmethod
def backward(ctx, grad_output):
input, = ctx.saved_tensors
x,y = input.split(1,dim=-1)
x = 2*x
y = 2*y
ys = y.sin()
yc = y.cos()
xch= x.cosh()
xth= x.tanh()
n = (1+yc/xch)**2
ux = 2 +2*yc/xch-2*xth**2
uy = 2*(ys/xch)*xth
ux = ux/n
uy = uy/n
vx = -uy
vy = ux
u,v= grad_output.split(1,dim=-1)
real = u*ux+v*vx
imag =-u*uy-v*vy
return torch.cat([real,imag],-1)
complex_tanh = ComplexTanh.apply
def complex_sigmoid(tensor:torch.Tensor)-> torch.Tensor:
x,y = tensor.split(1,dim=-1)
x = torch.exp(-x)
a = 1+x*y.cos()
b = x*y.sin()
n = a**2+b**2+ 1e-8
return torch.cat([a/n, b/n], dim = -1)
def complexize(tensor: torch.Tensor)-> torch.Tensor:
if tensor.shape[-1] == 2:return tensor
imag = torch.zeros_like(tensor)
return torch.stack([tensor,imag],-1)
| true
| true
|
1c43dcbc0e87b2b9319a3efe3dd0e07b164d11cd
| 36,802
|
py
|
Python
|
code_generation/code_generator_online.py
|
annihilatorrrr/pytgbot
|
2f84b11253873f7af1bc7539eb7d93197d51c90c
|
[
"MIT"
] | 52
|
2015-06-25T15:48:19.000Z
|
2021-08-10T20:29:11.000Z
|
code_generation/code_generator_online.py
|
annihilatorrrr/pytgbot
|
2f84b11253873f7af1bc7539eb7d93197d51c90c
|
[
"MIT"
] | 16
|
2016-04-12T08:11:30.000Z
|
2021-07-22T18:00:07.000Z
|
code_generation/code_generator_online.py
|
annihilatorrrr/pytgbot
|
2f84b11253873f7af1bc7539eb7d93197d51c90c
|
[
"MIT"
] | 14
|
2015-06-26T15:29:48.000Z
|
2021-08-10T20:29:14.000Z
|
# -*- coding: utf-8 -*-
from pathlib import Path
from typing import Dict, List, Union
from code_generator import get_type_path
from code_generator_template import clazz, func, get_template, as_types
from code_generator_classes import Clazz, Function, Variable, Type, Import, FunctionClazz
from luckydonaldUtils.files.basics import mkdir_p # luckydonaldUtils v0.49+
from luckydonaldUtils.interactions import answer, confirm
from luckydonaldUtils.logger import logging
from code_generator_settings import CLASS_TYPE_PATHS, CLASS_TYPE_PATHS__PARENT, WHITELISTED_FUNCS, WHITELISTED_CLASSES, CUSTOM_CLASSES
from code_generator_template import path_to_import_text, split_path
from jinja2.exceptions import TemplateError, TemplateSyntaxError
import requests
import black # code formatter
from yapf.yapflib.yapf_api import FormatFile # code formatter
from bs4 import BeautifulSoup
from bs4.element import NavigableString
from os.path import abspath, dirname, join as path_join, sep as folder_seperator, isfile, exists, isdir
from luckydonaldUtils.interactions import safe_eval, NoBuiltins
__author__ = "luckydonald"
logger = logging.getLogger(__name__)
from logging import LogRecord
def log_filter(record: LogRecord):
if f'{record.name}.{record.funcName}' == 'luckydonaldUtils.functions.wrapper':
return False
return True
# end def
root_logger = logging.add_colored_handler(level=logging.DEBUG, filter=log_filter)
FILE_HEADER = "# -*- coding: utf-8 -*-\n"
MAIN_FILE_CLASS_HEADER = "class Bot(object):\n _base_url = \"https://api.telegram.org/bot{api_key}/{command}\"\n"
__author__ = 'luckydonald'
logger = logging.getLogger(__name__)
BASE_URL = "https://core.telegram.org/bots/api"
SAVE_VALUES = NoBuiltins([], {}, {"Function": Function, "Clazz": Clazz, "Import": Import, "Type": Type, "Variable": Variable})
def lol1(tag):
return tag.has_attr("class") and "anchor" in tag["class"]
class_fields = [
["Field", "Type", "Description"],
["Parameters", "Type", "Description"],
["Parameter", "Type", "Description"]
]
func_fields = [
["Parameters", "Type", "Required", "Description"],
["Parameter", "Type", "Required", "Description"],
]
use_back = False
use_yapf = False
black_settings = dict(
write_back=black.WriteBack.from_configuration(check=False, diff=False),
report=black.Report(check=False, quiet=False, verbose=False),
mode=black.FileMode(
target_versions=set(),
line_length=black.DEFAULT_LINE_LENGTH,
is_pyi=False,
string_normalization=True,
),
)
yapf_settings = dict(
style={
'ALIGN_CLOSING_BRACKET_WITH_VISUAL_INDENT': True,
'ALLOW_MULTILINE_LAMBDAS': True,
'ALLOW_MULTILINE_DICTIONARY_KEYS': False,
'ALLOW_SPLIT_BEFORE_DEFAULT_OR_NAMED_ASSIGNS': True,
'ALLOW_SPLIT_BEFORE_DICT_VALUE': False,
'ARITHMETIC_PRECEDENCE_INDICATION': False,
'BLANK_LINE_BEFORE_NESTED_CLASS_OR_DEF': False,
'BLANK_LINE_BEFORE_MODULE_DOCSTRING': True,
'BLANK_LINE_BEFORE_CLASS_DOCSTRING': False,
'BLANK_LINES_AROUND_TOP_LEVEL_DEFINITION': 2, # Sets the number of desired blank lines surrounding top-level function and class definitions.
'COALESCE_BRACKETS': True,
'COLUMN_LIMIT': black.DEFAULT_LINE_LENGTH,
'CONTINUATION_ALIGN_STYLE': "space",
'CONTINUATION_INDENT_WIDTH': 2,
'DEDENT_CLOSING_BRACKETS': True,
'DISABLE_ENDING_COMMA_HEURISTIC': True,
'EACH_DICT_ENTRY_ON_SEPARATE_LINE': False,
'INDENT_DICTIONARY_VALUE': False, # Indent the dictionary value if it cannot fit on the same line as the dictionary key.
'INDENT_WIDTH': 2,
'INDENT_BLANK_LINES': False, # Set to True to prefer indented blank lines rather than empty
'JOIN_MULTIPLE_LINES': False, # Join short lines into one line. E.g., single line if statements.
'NO_SPACES_AROUND_SELECTED_BINARY_OPERATORS': False, # Do not include spaces around selected binary operators. For example: 1 + 2*3 - 4/5
'SPACES_AROUND_POWER_OPERATOR': True, # Set to True to prefer using spaces around **.
# 'SPACES_AROUND_DEFAULT_OR_NAMED_ASSIGN': False, # Set to True to prefer spaces around the assignment operator for default or keyword arguments.
'SPACES_BEFORE_COMMENT': 2,
'SPACE_BETWEEN_ENDING_COMMA_AND_CLOSING_BRACKET': False, # Insert a space between the ending comma and closing bracket of a list, etc.
'SPLIT_ARGUMENTS_WHEN_COMMA_TERMINATED': True, # Split before arguments if the argument list is terminated by a comma.
'SPLIT_ALL_COMMA_SEPARATED_VALUES': True, # If a comma separated list (dict, list, tuple, or function def) is on a line that is too long, split such that all elements are on a single line.
'SPLIT_ALL_TOP_LEVEL_COMMA_SEPARATED_VALUES': True, # Variation on SPLIT_ALL_COMMA_SEPARATED_VALUES in which, if a subexpression with a comma fits in its starting line, then the subexpression is not split. This avoids splits like the one for b in this code:
'SPLIT_BEFORE_BITWISE_OPERATOR': False, # Set to True to prefer splitting before &, | or ^ rather than after.
'SPLIT_BEFORE_ARITHMETIC_OPERATOR': False, # Set to True to prefer splitting before +, -, *, /, //, or @ rather than after.
'SPLIT_BEFORE_CLOSING_BRACKET': True, # Split before the closing bracket if a list or dict literal doesn't fit on a single line.
'SPLIT_BEFORE_DICT_SET_GENERATOR': True, # Split before a dictionary or set generator (comp_for). For example, note the split before the for:
'SPLIT_BEFORE_DOT': False, # Split before the . if we need to split a longer expression:
# 'SPLIT_BEFORE_EXPRESSION_AFTER_OPENING_PAREN': False, # Split after the opening paren which surrounds an expression if it doesn't fit on a single line.
'SPLIT_BEFORE_FIRST_ARGUMENT': True, # If an argument / parameter list is going to be split, then split before the first argument.
'SPLIT_BEFORE_LOGICAL_OPERATOR': True, # Set to True to prefer splitting before and or or rather than after.
# 'SPLIT_BEFORE_NAMED_ASSIGNS': False, # Split named assignments onto individual lines.
'SPLIT_COMPLEX_COMPREHENSION': True, # For list comprehensions and generator expressions with multiple clauses (e.g multiple for calls, if filter expressions) and which need to be reflowed, split each clause onto its own line.
'USE_TABS': False,
# 'SPLIT_PENALTY_AFTER_OPENING_BRACKET': 0
# 'SPLIT_PENALTY_AFTER_UNARY_OPERATOR':
# 'SPLIT_PENALTY_ARITHMETIC_OPERATOR':
# 'SPLIT_PENALTY_BEFORE_IF_EXPR':
# 'SPLIT_PENALTY_BEFORE_IF_EXPR': 30
# 'SPLIT_PENALTY_FOR_ADDED_LINE_SPLIT': 30
},
)
def parse_table(tag):
"""
returns tuple of type ("class"/"func") and list of param strings.
:param tag:
:return:
"""
first = True
table_header = None
table_type = 'unknown'
param_strings = []
thead = tag.find('thead', recursive=False)
theads = None # list (items in <tr> row) of <th>/<tr> elements.
if thead:
theads = thead.find_all(["th", "td"])
# end if
tbody = tag.find('tbody', recursive=False)
if tbody:
tbody_rows = tbody.find_all("tr")
else:
tbody_rows = tag.find_all("tr")
# end if
tbodys = [ # list (rows) of list (items in <tr> row) of <tr> elements.
row.find_all(["td" ,"th"]) for row in tbody_rows
]
if not thead: # so first row = header
theads = tbody_rows[0]
tbodys = tbody_rows[1:]
# end if
# TABLE HEADER
found_columns = []
for column in theads:
# Either (a) `<td><strong> ... </strong></td>`
# or new (b) `<th> ... </th>`
col = column.find("strong")
if col:
# (a) `<td><strong> ... </strong></td>`
col_text = col.text
else:
# (b) `<th> ... </th>`
col_text = column.text
# end if
found_columns.append(col_text)
# end def
# if TABLE is func
for test_columns in func_fields:
if found_columns == test_columns:
table_header = test_columns
table_type = 'func'
break
# end if
# end for
# if TABLE is class
if not table_header: # only check if we don't have a result yet
# search class now
for test_columns in class_fields:
if found_columns == test_columns:
if table_header is not None:
raise AssertionError("Table detected as func and class: {!r}".format(found_columns))
table_header = test_columns
table_type = 'class'
break
# end if
# end for
# end if
# TABLE is none of the above
if not table_header: # we don't have a result yet
raise AssertionError("Unknown table, {!r}".format(found_columns))
# end if
# TABLE BODY
for tds in tbodys:
string = ''
for col in tds:
string += "\t"
had_something = False
for sub_col in col:
if isinstance(sub_col, NavigableString):
string += sub_col
had_something = True
elif sub_col.name == 'img':
# emojis are images: <img alt="🎲" class="emoji" height="20" src="//telegram.org/img/emoji/40/F09F8EB2.png" width="20"/>
string += sub_col.attrs.get('alt', '')
had_something = True
else:
string += sub_col.text
had_something = True
# end if
# end for
if not had_something:
string += col.text
# end if
# end for
string = string.lstrip("\t")
logger.debug("t: " + string)
param_strings.append(string)
pass
# end for row
return table_type, param_strings
# end def
def load_from_html(folder):
filter = get_filter()
document = requests.get(BASE_URL)
bs = BeautifulSoup(document.content)
results = []
for h in bs.select("#dev_page_content > h4"):
logger.info("------")
anchor = h.find(lol1)
if not anchor or not anchor.has_attr("name"):
continue
link = "{base_url}#{anchor}".format(base_url=BASE_URL, anchor=anchor["name"])
title = h.text
descr = []
table_type, param_strings = None, None
logger.info("title: " + title)
logger.info("link: " + link)
if filter and title not in filter:
logger.info("Skipping {title}, filtered.".format(title=title))
continue
# logger.debug(h)
type_strings = []
default_returns = []
for sibling in h.next_siblings:
if sibling == "\n":
continue
if sibling.name in ["p", "blockquote"]:
if "return" in sibling.text.lower():
parts_splitted = []
is_first_element = True # truein string,
for x in sibling.children:
if isinstance(x, NavigableString):
if is_first_element: # Start of a new sentence => new list
parts_splitted.extend([[foo.lstrip()] for foo in x.split(".")])
is_first_element = False
else: # not = in the middle of a sentence => append
parts_splitted[len(parts_splitted)-1].append(x.split(".", maxsplit=1)[0])
parts_splitted.extend([[foo] for foo in x.split(".")[1:]])
is_first_element = False
is_first_element = x.strip().endswith(".")
else:
obj = None
if x.name in ["a", "em"]:
obj = x
else:
obj = x.text
# end if
if is_first_element: # if it is at the beginning of the sentence.
parts_splitted.append([obj])
is_first_element = False
else:
parts_splitted[len(parts_splitted)-1].append(obj)
# end if
# end for
# end for
returns__ = [] # array of strings
return_text__ = [] # array if strings. one item = one sentence. Not ending with a dot.
is_array = False
for lol_part in parts_splitted:
has_return = False
returns_ = []
return_text_ = ""
for lol_part_part in lol_part:
if isinstance(lol_part_part, str):
return_text_ += lol_part_part
if lol_part_part.strip().lower().endswith("array of"):
is_array = True
if "return" in lol_part_part.lower():
has_return = True
# end if
else: # not str
return_text_ += lol_part_part.text
if is_array:
returns_.append("list of " + lol_part_part.text)
is_array = False
else:
returns_.append(lol_part_part.text)
# end for
if has_return: # append, so we can have multible sentences.
return_text__.append(return_text_.strip())
returns__.extend(returns_)
# end if
# end for
if return_text__ or returns__: # finally set it.
default_returns = [". ".join(return_text__).strip(), " or ".join(returns__).strip()]
# end if
# end if
descr.append(sibling.text.replace('“', '"').replace('”', '"'))
elif sibling.name == "table":
assert sibling.has_attr("class") and "table" in sibling["class"]
table_type, param_strings = parse_table(sibling)
elif sibling.name == "h4":
break
elif sibling.name == "h3":
break
elif sibling.name == "hr": # end of page
break
elif sibling.name == "img": # end of page
break
else:
logger.info("unknown: " + sibling.name)
# end if
# end for
if not all([link, title, descr]):
logger.warning("Skipped: Missing link, title or description")
continue
if not all([table_type, param_strings]):
if title in WHITELISTED_FUNCS:
table_type = 'func'
elif title in WHITELISTED_CLASSES:
table_type = 'class'
elif [key[key.rindex('.')+1:] for key in CUSTOM_CLASSES.keys()]:
logger.info(
"Skipped. Has no table with Parameters or Fields.\n"
"Has a `code_generator_settings.CUSTOM_CLASSES` which seems to fit though."
)
continue
else:
logger.warning(
"Skipped. Has no table with Parameters or Fields.\n"
"Also isn't a whitelisted function in `code_generator_settings.WHITELISTED_CLASSES` or class in `code_generator_settings.WHITELISTED_CLASSES`."
)
continue
# -> else: is in WHITELISTED_FUNCS:
# end if
descr = "\n".join(descr)
logger.info("descr: " + repr(descr))
params_string = "\n".join(param_strings) if param_strings else None # WHITELISTED_FUNCS/WHITELISTED_CLASSES have no params
if table_type == "func":
seems_valid = False
if len(default_returns) != 2:
if "return" in descr.lower():
default_returns = ["", "Message"]
default_returns[0] = [x for x in descr.split(".") if "return" in x.lower()][0].strip()
seems_valid = len(default_returns[0].split(".")) == 1
default_returns[1] = " or ".join(type_strings) if type_strings else "Message"
default_returns[1] = as_types(default_returns[1], "returns")
else:
default_returns = ["On success, True is returned", "True"]
# end if "return" in description
else:
seems_valid = len(default_returns[0].split(".")) == 1
# end if default set
replaced_valid = None # load replacements from WHITELISTED_FUNCS.
if title in WHITELISTED_FUNCS:
# "func": {'return': {'expected': '', 'replace': ''}, 'rtype': {'expected': '', 'replace': ''}},
wlist_func = WHITELISTED_FUNCS[title]
wlist_func_return = wlist_func['return'] if 'return' in wlist_func else None
wlist_func_r_type = wlist_func['r_type'] if 'r_type' in wlist_func else None
if wlist_func_return and default_returns[0] != wlist_func_return['expected']:
logger.warning(f"whitelist: Mismatch in return.\nExpected {wlist_func_return['expected']!r},\ninstead got {default_returns[0]!r}.")
replaced_valid = False
if wlist_func_r_type and default_returns[1] != wlist_func_r_type['expected']:
logger.warning(f"whitelist: Mismatch in r_type.\nExpected {wlist_func_r_type['expected']!r},\ninstead got {default_returns[1]!r}")
replaced_valid = False
if replaced_valid is None: # whitelist didn't fail
replaced_valid = True
logger.info("the found return: " + repr(default_returns[0]) + '.')
logger.info("the found r_type: " + repr(default_returns[1]) + '.')
logger.info("whitelist return: " + repr(wlist_func_return['replace']) + '.')
logger.info("whitelist r_type: " + repr(wlist_func_r_type['replace']) + '.')
default_returns[0] = wlist_func_return['replace']
default_returns[1] = wlist_func_r_type['replace']
if not seems_valid and not replaced_valid:
returns = answer("Textual description what the function returns", default_returns[0])
return_type = answer("Return type", default_returns[1])
if isinstance(return_type, str):
return_type = as_types(return_type, "return type")
# end if
else:
returns = default_returns[0]
return_type = default_returns[1]
# end if
logger.debug("\n")
result = func(title, descr, link, params_string, returns=returns, return_type=return_type)
results.append(result)
elif table_type == "class":
if title in CLASS_TYPE_PATHS:
parent_clazz = CLASS_TYPE_PATHS[title][CLASS_TYPE_PATHS__PARENT]
logger.info("superclass: " + parent_clazz)
else:
parent_clazz = answer("Parent class name", "TgBotApiObject")
# end if
if title in WHITELISTED_CLASSES:
pass
# end def
result = clazz(
clazz=title, parent_clazz=parent_clazz, description=descr, link=link, params_string=params_string
)
results.append(result)
# end if
# end for
return results, document.content
# end def main
def main():
folder, html_document, results = load_api_definitions()
output(folder, results, html_content=html_document)
def load_api_definitions():
folder = get_folder_path()
mode = confirm("Offline Mode: Load from a dump instead of the API Docs?")
if not mode: # API
results, html_document = load_from_html(folder)
else: # Dump
results, html_document = load_from_dump(folder)
# end def
results = preprocess_results(results, additional_items=list(CUSTOM_CLASSES.values()))
return folder, html_document, results
# end def
def load_from_dump(folder):
# read dump
dump = ""
with open(path_join(folder, "api.py"), "r") as f:
dump = "".join(f.readlines())
# end with
# existing old api.html
html_document = None
if exists(path_join(folder, "api.html")):
with open(path_join(folder, "api.html"), "rb") as f:
html_document = f.read()
# end with
# end if
results = safe_eval(dump, SAVE_VALUES)
return results, html_document
# end def
# noinspection PyCompatibility
def preprocess_results(results: List[Union[Clazz, Function]], additional_items: Union[None, List[Clazz]] = None):
"""
Sets `variable.duplicate_of_parent` appropriately for all variables of all classes in the results list.
:param results:
:param additional_items: e.g. CUSTOM_CLASSES.values()
:return:
"""
if additional_items is None:
additional_items = []
# end if
logger.info('Calculating duplicate_of_parent.')
clazzes_by_name: Dict[str, Clazz] = {} # "Class": Class
for other in additional_items:
if isinstance(other, Clazz):
clazzes_by_name[other.clazz] = other
# end if
# end for
for result in results:
if isinstance(result, Clazz):
clazzes_by_name[result.clazz] = result
# end if
# end for
for result in results:
if not isinstance(result, Clazz):
continue
# end if
# fill in clazz._parent_clazz_clazz, so we can check our parents
if result.parent_clazz is None or result.parent_clazz.string == 'object':
continue
# end if
if result.parent_clazz.string in clazzes_by_name:
parent_clazz: Clazz = clazzes_by_name[result.parent_clazz.string]
for variable in result.variables:
variable: Variable
parent_variable = parent_clazz.get_same_variable(
variable,
ignore_pytg_name=True,
ignore_description=True,
ignore_optional=True,
ignore_type_always_is_value=True,
allow_additional_allowed_type_matchings=True,
)
variable.duplicate_of_parent = parent_variable is not None
# if we fit parent's class 'additional_allowed_type_matchings', we should upgrade our own types.
if variable.duplicate_of_parent and parent_variable.additional_allowed_type_matchings:
variable.types = parent_variable.types[:]
# end if
# end for
else:
logger.warning(f'Could not resolve parent class: {result.parent_clazz}')
# end if
# end for
return results
# end def
def output(folder, results, html_content=None):
can_quit = False
do_delete_first = confirm("Can the folder {path} be deleted before writing?".format(path=folder))
logger.info("vvvvvvvvv")
while not can_quit:
if do_delete_first:
try:
import Send2Trash
Send2Trash.send2trash(folder)
except ImportError:
import shutil
shutil.rmtree(folder)
# end try
# end if
# write crawled data
mkdir_p(folder)
with open(path_join(folder, "api.py"), "w") as f:
f.write("[\n ")
f.write(",\n ".join([repr(result) for result in results]))
f.write("\n]")
# end for
# end with
if html_content:
with open(path_join(folder, "api.html"), "wb") as f:
f.write(html_content)
# end with
# end if
# write templates
try:
safe_to_file(folder, results)
except TemplateError as e:
if isinstance(e, TemplateSyntaxError):
logger.exception("Template error at {file}:{line}".format(file=e.filename, line=e.lineno))
else:
logger.exception("Template error.")
# end if
# end try
logger.info("Written to file.")
can_quit = not confirm("Write again after reloading templates?", default=True)
logger.info("#########")
logger.info("Exit.")
# end def
def get_filter():
filter = answer(
"Only generate the doc for specific functions/classes. Comma seperated list. Leave empty to generate all.",
default=""
# getChat, leaveChat, getChatAdministrators, getChatMember, getChatMembersCount, Message, MessageEntity"
)
if filter.strip():
filter = [x.strip() for x in filter.split(",")]
else:
filter = None
# end if
return filter
# end def
def get_folder_path():
default = "/tmp/pytgbotapi/"
candidate = abspath(path_join(dirname(abspath(__file__)), 'output'))
logger.info(f'canidate: {candidate}')
if exists(candidate) and isdir(candidate):
default = candidate
# end if
file = answer("Folder path to store the results.", default=default)
if file:
try:
file = abspath(file)
mkdir_p(file)
with open(path_join(file, "__init__.py"), "w") as f:
f.write(FILE_HEADER)
# end with
except IOError:
pass
# end try
# end if file
return file
# end def
# noinspection PyCompatibility
def safe_to_file(folder, results):
"""
Receives a list of results (type :class:`Clazz` or :class:`Function`), and put them into the right files in :var:`folder`
:param folder: Where the files should be in.
:type folder: str
:param results: A list of :class:`Clazz` or :class:`Function` objects, which will be used to calculate the source code.
:type results: Union(Clazz, Function)
"""
functions = []
message_send_clazzes = []
clazzes: Dict[str, List[Clazz]] = {} # "filepath": [Class, Class, ...]
all_the_clazzes = []
custom_classes = {} # "filepath": [Class, Class, ...]
all_the_custom_clazzes = [] # actually only used to call preprocess_results with all the items. They will be modified in place anyway.
for import_path, result in CUSTOM_CLASSES.items():
# result.import_path = result.calculate_import_path()
result.filepath = result.calculate_filepath(folder)
file_path = result.filepath
if file_path not in custom_classes:
custom_classes[file_path] = []
# end if
custom_classes[file_path].append(result)
if file_path not in clazzes:
clazzes[file_path] = []
# end if
clazzes[file_path].append(result)
all_the_clazzes.append(result)
all_the_custom_clazzes.append(result)
# end def
_ = preprocess_results(all_the_custom_clazzes, additional_items=results)
# split results into functions and classes
for result in results:
assert isinstance(result, (Clazz, Function))
if isinstance(result, Clazz):
result.import_path = result.calculate_import_path()
result.filepath = result.calculate_filepath(folder)
file_path = result.filepath
if file_path not in clazzes:
clazzes[file_path] = []
clazzes[file_path].append(result)
all_the_clazzes.append(result)
else:
assert isinstance(result, Function)
pytgbot_dir = Path(__file__).parent.parent
# import_path = "pytgbot.bot.asynchronous."
# file_path = calc_path_and_create_folders(pytgbot_dir.absolute(), import_path)
result.filepath = str(pytgbot_dir.joinpath('pytgbot').joinpath('bot').joinpath('asynchronous.py').absolute())
functions.append(result)
if result.name.startswith('send_'):
import_path = "teleflask_messages."
file_path = calc_path_and_create_folders(folder, import_path)
args, special_kwargs, kwargs = result.class_variables_separated
result2 = FunctionClazz(
clazz=result.class_name_teleflask_message,
import_path=Import(path=import_path.rstrip('.'), name=result.class_name_teleflask_message),
imports=result.imports,
parent_clazz=Type(string='ReturnableMessageBase', is_builtin=False, is_list=0, import_path=None, description="Base class"),
link=result.link,
description=result.description,
parameters=args,
keywords=special_kwargs + kwargs,
function=result,
)
result2.filepath = file_path
message_send_clazzes.append(result2)
# end if
# end if
# end for
bot_template = get_template("bot.template")
bot_base_template = get_template("bot_base.template")
clazzfile_template = get_template("classfile.template")
teleflask_messages_template = get_template("teleflask_messages_file.template")
typehints_template = get_template("typehintsfile.template")
telegram_bot_api_server_funcs_template = get_template("telegram_bot_api_server/funcs.template")
telegram_bot_api_server_class_template = get_template("telegram_bot_api_server/classes.template")
mkdir_p(path_join(folder, 'telegram_bot_api_server', 'generated'))
if all_the_clazzes:
txt = telegram_bot_api_server_class_template.render(clazzes=all_the_clazzes)
render_file_to_disk(path_join(folder, 'telegram_bot_api_server', 'generated', 'models.py'), txt)
# end if
for path, clazz_list in clazzes.items():
clazz_imports = set()
for clazz_ in clazz_list:
assert isinstance(clazz_, Clazz)
assert isinstance(clazz_.parent_clazz, Type)
if not clazz_.parent_clazz.is_builtin:
clazz_imports.add(clazz_.parent_clazz.as_import)
# end if
# end for
clazz_imports = list(clazz_imports)
clazz_imports.sort()
is_sendable = ("sendable" in path)
try:
txt = clazzfile_template.render(clazzes=clazz_list, manual_clazzes=[], imports=clazz_imports, is_sendable=is_sendable)
txt = txt.replace("\t", " ")
render_file_to_disk(path, txt)
except IOError:
raise # lol
# end try
try:
txt = typehints_template.render(clazzes=clazz_list, imports=clazz_imports, is_sendable=is_sendable)
txt = txt.replace("\t", " ")
render_file_to_disk(path + "i", txt) # "ponies.py" + "i" => "ponies.pyi"
except IOError:
raise # lol
# end try
try:
txt = typehints_template.render(clazzes=clazz_list, imports=clazz_imports, is_sendable=is_sendable)
txt = txt.replace("\t", " ")
render_file_to_disk(path + "i", txt) # "ponies.py" + "i" => "ponies.pyi"
except IOError:
raise # lol
# end try
# end for classes
if functions:
func_imports = set()
for func_ in functions:
assert isinstance(func_, Function)
for var_ in func_.variables:
assert isinstance(var_, Variable)
for type_ in var_.types:
assert isinstance(type_, Type)
func_imports.add(type_.as_import)
# end for
# end for
if func_.returns is not None:
assert isinstance(func_.returns, Variable)
for type_ in func_.returns.types:
assert isinstance(type_, Type)
func_imports.add(type_.as_import)
# end for
# end if
# end for
func_imports = list(func_imports)
func_imports.sort()
txt_sync = bot_template.render(functions=functions, is_asyncio=False, imports=func_imports, file_import_path='pytgbot.bot.synchronous')
render_file_to_disk(functions[0].filepath.replace('asynchronous', 'synchronous'), txt_sync)
txt_async = bot_template.render(functions=functions, is_asyncio=True, imports=func_imports, file_import_path='pytgbot.bot.asynchronous')
render_file_to_disk(functions[0].filepath, txt_async)
txt_base = bot_base_template.render(functions=functions, imports=func_imports, file_import_path='pytgbot.bot.base')
render_file_to_disk(functions[0].filepath.replace('asynchronous', 'base'), txt_base)
imports = set()
imports.add(('enum', 'Enum'))
imports.add(('typing', 'Union, List, Optional'))
imports.add(('fastapi', 'APIRouter, HTTPException'))
imports.add(('telethon', 'TelegramClient'))
imports.add(('serializer', 'to_web_api, get_entity'))
imports.add(('fastapi.params', 'Query'))
imports.add(('telethon.errors', 'BotMethodInvalidError'))
imports.add(('telethon.tl.types', 'TypeSendMessageAction'))
imports.add(('telethon.client.chats', '_ChatAction'))
imports.add(('luckydonaldUtils.logger', 'logging'))
imports.add(('telethon.tl.functions.messages', 'SetTypingRequest'))
for function in functions:
function: Function
for the_import in function.imports:
the_import: Import
imports.add((the_import.path, the_import.name))
# end for
# end for
# https://stackoverflow.com/a/613218/3423324#how-do-i-sort-a-dictionary-by-value
# https://stackoverflow.com/a/4659539/3423324#how-to-sort-by-length-of-string-followed-by-alphabetical-order
imports_sorted = ["from " + path + ' import ' + name for path, name in sorted(imports, key=lambda item: (-len(item[0]), item[0], -len(item[1]), item[1]))]
# imports_sorted.sort(key=lambda item: (-len(item), item))
txt = telegram_bot_api_server_funcs_template.render(functions=functions, imports=imports_sorted)
render_file_to_disk(path_join(folder, 'telegram_bot_api_server', 'generated', 'funcs.py'), txt)
# end if
if message_send_clazzes:
txt = teleflask_messages_template.render(clazzes=message_send_clazzes)
render_file_to_disk(message_send_clazzes[0].filepath, txt)
# end if
# end def
# noinspection PyCompatibility
def render_file_to_disk(file, txt):
# remove whitespaces at the end of a line
txt = "\n".join(line.rstrip() for line in txt.splitlines())
# add blank line at end of file.
if not txt.endswith("\n"):
txt += "\n"
# end if
with open(file, "w") as f:
f.write(txt)
# end with
logger.info(f'Written {file!r} to disk, {len(txt)} chars.')
if use_back:
black.reformat_one(
src=black.Path(file),
write_back=black_settings['write_back'],
fast=False,
mode=black_settings['mode'],
report=black_settings['report'],
)
# end if
if use_yapf:
try:
FormatFile(file, in_place=True, style_config=yapf_settings['style'])
except:
logger.exception("Formatting file {file} failed.".format(file=file))
# end try
# end if
# end def
def calc_path_and_create_folders(folder, import_path, create_folder=True):
"""
calculate the path and create the needed folders
>>> calc_path_and_create_folders(folder='/somewhere/', import_path='foo.bar.BarClass', create_folder=False)
'/somewhere/foo/bar/BarClass'
:param import_path: 'foo.bar.BarClass'
:param folder: base folder where we wanna place 'foo.bar.BarClass' in.
"""
file_path = abspath(path_join(folder, import_path[:import_path.rfind(".")].replace(".", folder_seperator) + ".py"))
if create_folder:
mkdir_p(dirname(file_path))
# end if
return file_path
# end def
if __name__ == '__main__':
main()
# end if
| 42.496536
| 266
| 0.597386
|
from pathlib import Path
from typing import Dict, List, Union
from code_generator import get_type_path
from code_generator_template import clazz, func, get_template, as_types
from code_generator_classes import Clazz, Function, Variable, Type, Import, FunctionClazz
from luckydonaldUtils.files.basics import mkdir_p
from luckydonaldUtils.interactions import answer, confirm
from luckydonaldUtils.logger import logging
from code_generator_settings import CLASS_TYPE_PATHS, CLASS_TYPE_PATHS__PARENT, WHITELISTED_FUNCS, WHITELISTED_CLASSES, CUSTOM_CLASSES
from code_generator_template import path_to_import_text, split_path
from jinja2.exceptions import TemplateError, TemplateSyntaxError
import requests
import black
from yapf.yapflib.yapf_api import FormatFile
from bs4 import BeautifulSoup
from bs4.element import NavigableString
from os.path import abspath, dirname, join as path_join, sep as folder_seperator, isfile, exists, isdir
from luckydonaldUtils.interactions import safe_eval, NoBuiltins
__author__ = "luckydonald"
logger = logging.getLogger(__name__)
from logging import LogRecord
def log_filter(record: LogRecord):
if f'{record.name}.{record.funcName}' == 'luckydonaldUtils.functions.wrapper':
return False
return True
root_logger = logging.add_colored_handler(level=logging.DEBUG, filter=log_filter)
FILE_HEADER = "# -*- coding: utf-8 -*-\n"
MAIN_FILE_CLASS_HEADER = "class Bot(object):\n _base_url = \"https://api.telegram.org/bot{api_key}/{command}\"\n"
__author__ = 'luckydonald'
logger = logging.getLogger(__name__)
BASE_URL = "https://core.telegram.org/bots/api"
SAVE_VALUES = NoBuiltins([], {}, {"Function": Function, "Clazz": Clazz, "Import": Import, "Type": Type, "Variable": Variable})
def lol1(tag):
return tag.has_attr("class") and "anchor" in tag["class"]
class_fields = [
["Field", "Type", "Description"],
["Parameters", "Type", "Description"],
["Parameter", "Type", "Description"]
]
func_fields = [
["Parameters", "Type", "Required", "Description"],
["Parameter", "Type", "Required", "Description"],
]
use_back = False
use_yapf = False
black_settings = dict(
write_back=black.WriteBack.from_configuration(check=False, diff=False),
report=black.Report(check=False, quiet=False, verbose=False),
mode=black.FileMode(
target_versions=set(),
line_length=black.DEFAULT_LINE_LENGTH,
is_pyi=False,
string_normalization=True,
),
)
yapf_settings = dict(
style={
'ALIGN_CLOSING_BRACKET_WITH_VISUAL_INDENT': True,
'ALLOW_MULTILINE_LAMBDAS': True,
'ALLOW_MULTILINE_DICTIONARY_KEYS': False,
'ALLOW_SPLIT_BEFORE_DEFAULT_OR_NAMED_ASSIGNS': True,
'ALLOW_SPLIT_BEFORE_DICT_VALUE': False,
'ARITHMETIC_PRECEDENCE_INDICATION': False,
'BLANK_LINE_BEFORE_NESTED_CLASS_OR_DEF': False,
'BLANK_LINE_BEFORE_MODULE_DOCSTRING': True,
'BLANK_LINE_BEFORE_CLASS_DOCSTRING': False,
'BLANK_LINES_AROUND_TOP_LEVEL_DEFINITION': 2,
'COALESCE_BRACKETS': True,
'COLUMN_LIMIT': black.DEFAULT_LINE_LENGTH,
'CONTINUATION_ALIGN_STYLE': "space",
'CONTINUATION_INDENT_WIDTH': 2,
'DEDENT_CLOSING_BRACKETS': True,
'DISABLE_ENDING_COMMA_HEURISTIC': True,
'EACH_DICT_ENTRY_ON_SEPARATE_LINE': False,
'INDENT_DICTIONARY_VALUE': False,
'INDENT_WIDTH': 2,
'INDENT_BLANK_LINES': False,
'JOIN_MULTIPLE_LINES': False,
'NO_SPACES_AROUND_SELECTED_BINARY_OPERATORS': False,
'SPACES_AROUND_POWER_OPERATOR': True,
False,
'SPLIT_ARGUMENTS_WHEN_COMMA_TERMINATED': True,
'SPLIT_ALL_COMMA_SEPARATED_VALUES': True,
'SPLIT_ALL_TOP_LEVEL_COMMA_SEPARATED_VALUES': True,
'SPLIT_BEFORE_BITWISE_OPERATOR': False,
'SPLIT_BEFORE_ARITHMETIC_OPERATOR': False,
'SPLIT_BEFORE_CLOSING_BRACKET': True,
'SPLIT_BEFORE_DICT_SET_GENERATOR': True, # Split before a dictionary or set generator (comp_for). For example, note the split before the for:
'SPLIT_BEFORE_DOT': False, # Split before the . if we need to split a longer expression:
# 'SPLIT_BEFORE_EXPRESSION_AFTER_OPENING_PAREN': False, # Split after the opening paren which surrounds an expression if it doesn't fit on a single line.
'SPLIT_BEFORE_FIRST_ARGUMENT': True,
'SPLIT_BEFORE_LOGICAL_OPERATOR': True,
'USE_TABS': False,
},
)
def parse_table(tag):
first = True
table_header = None
table_type = 'unknown'
param_strings = []
thead = tag.find('thead', recursive=False)
theads = None
if thead:
theads = thead.find_all(["th", "td"])
tbody = tag.find('tbody', recursive=False)
if tbody:
tbody_rows = tbody.find_all("tr")
else:
tbody_rows = tag.find_all("tr")
tbodys = [
row.find_all(["td" ,"th"]) for row in tbody_rows
]
if not thead:
theads = tbody_rows[0]
tbodys = tbody_rows[1:]
found_columns = []
for column in theads:
col = column.find("strong")
if col:
col_text = col.text
else:
col_text = column.text
found_columns.append(col_text)
for test_columns in func_fields:
if found_columns == test_columns:
table_header = test_columns
table_type = 'func'
break
if not table_header:
# search class now
for test_columns in class_fields:
if found_columns == test_columns:
if table_header is not None:
raise AssertionError("Table detected as func and class: {!r}".format(found_columns))
table_header = test_columns
table_type = 'class'
break
# end if
# end for
# end if
# TABLE is none of the above
if not table_header: # we don't have a result yet
raise AssertionError("Unknown table, {!r}".format(found_columns))
for tds in tbodys:
string = ''
for col in tds:
string += "\t"
had_something = False
for sub_col in col:
if isinstance(sub_col, NavigableString):
string += sub_col
had_something = True
elif sub_col.name == 'img':
string += sub_col.attrs.get('alt', '')
had_something = True
else:
string += sub_col.text
had_something = True
if not had_something:
string += col.text
string = string.lstrip("\t")
logger.debug("t: " + string)
param_strings.append(string)
pass
return table_type, param_strings
def load_from_html(folder):
filter = get_filter()
document = requests.get(BASE_URL)
bs = BeautifulSoup(document.content)
results = []
for h in bs.select("#dev_page_content > h4"):
logger.info("------")
anchor = h.find(lol1)
if not anchor or not anchor.has_attr("name"):
continue
link = "{base_url}#{anchor}".format(base_url=BASE_URL, anchor=anchor["name"])
title = h.text
descr = []
table_type, param_strings = None, None
logger.info("title: " + title)
logger.info("link: " + link)
if filter and title not in filter:
logger.info("Skipping {title}, filtered.".format(title=title))
continue
type_strings = []
default_returns = []
for sibling in h.next_siblings:
if sibling == "\n":
continue
if sibling.name in ["p", "blockquote"]:
if "return" in sibling.text.lower():
parts_splitted = []
is_first_element = True
for x in sibling.children:
if isinstance(x, NavigableString):
if is_first_element:
parts_splitted.extend([[foo.lstrip()] for foo in x.split(".")])
is_first_element = False
else:
parts_splitted[len(parts_splitted)-1].append(x.split(".", maxsplit=1)[0])
parts_splitted.extend([[foo] for foo in x.split(".")[1:]])
is_first_element = False
is_first_element = x.strip().endswith(".")
else:
obj = None
if x.name in ["a", "em"]:
obj = x
else:
obj = x.text
if is_first_element:
parts_splitted.append([obj])
is_first_element = False
else:
parts_splitted[len(parts_splitted)-1].append(obj)
returns__ = []
return_text__ = []
is_array = False
for lol_part in parts_splitted:
has_return = False
returns_ = []
return_text_ = ""
for lol_part_part in lol_part:
if isinstance(lol_part_part, str):
return_text_ += lol_part_part
if lol_part_part.strip().lower().endswith("array of"):
is_array = True
if "return" in lol_part_part.lower():
has_return = True
else:
return_text_ += lol_part_part.text
if is_array:
returns_.append("list of " + lol_part_part.text)
is_array = False
else:
returns_.append(lol_part_part.text)
if has_return:
return_text__.append(return_text_.strip())
returns__.extend(returns_)
if return_text__ or returns__:
default_returns = [". ".join(return_text__).strip(), " or ".join(returns__).strip()]
descr.append(sibling.text.replace('“', '"').replace('”', '"'))
elif sibling.name == "table":
assert sibling.has_attr("class") and "table" in sibling["class"]
table_type, param_strings = parse_table(sibling)
elif sibling.name == "h4":
break
elif sibling.name == "h3":
break
elif sibling.name == "hr":
break
elif sibling.name == "img":
break
else:
logger.info("unknown: " + sibling.name)
if not all([link, title, descr]):
logger.warning("Skipped: Missing link, title or description")
continue
if not all([table_type, param_strings]):
if title in WHITELISTED_FUNCS:
table_type = 'func'
elif title in WHITELISTED_CLASSES:
table_type = 'class'
elif [key[key.rindex('.')+1:] for key in CUSTOM_CLASSES.keys()]:
logger.info(
"Skipped. Has no table with Parameters or Fields.\n"
"Has a `code_generator_settings.CUSTOM_CLASSES` which seems to fit though."
)
continue
else:
logger.warning(
"Skipped. Has no table with Parameters or Fields.\n"
"Also isn't a whitelisted function in `code_generator_settings.WHITELISTED_CLASSES` or class in `code_generator_settings.WHITELISTED_CLASSES`."
)
continue
# -> else: is in WHITELISTED_FUNCS:
# end if
descr = "\n".join(descr)
logger.info("descr: " + repr(descr))
params_string = "\n".join(param_strings) if param_strings else None # WHITELISTED_FUNCS/WHITELISTED_CLASSES have no params
if table_type == "func":
seems_valid = False
if len(default_returns) != 2:
if "return" in descr.lower():
default_returns = ["", "Message"]
default_returns[0] = [x for x in descr.split(".") if "return" in x.lower()][0].strip()
seems_valid = len(default_returns[0].split(".")) == 1
default_returns[1] = " or ".join(type_strings) if type_strings else "Message"
default_returns[1] = as_types(default_returns[1], "returns")
else:
default_returns = ["On success, True is returned", "True"]
# end if "return" in description
else:
seems_valid = len(default_returns[0].split(".")) == 1
# end if default set
replaced_valid = None # load replacements from WHITELISTED_FUNCS.
if title in WHITELISTED_FUNCS:
# "func": {'return': {'expected': '', 'replace': ''}, 'rtype': {'expected': '', 'replace': ''}},
wlist_func = WHITELISTED_FUNCS[title]
wlist_func_return = wlist_func['return'] if 'return' in wlist_func else None
wlist_func_r_type = wlist_func['r_type'] if 'r_type' in wlist_func else None
if wlist_func_return and default_returns[0] != wlist_func_return['expected']:
logger.warning(f"whitelist: Mismatch in return.\nExpected {wlist_func_return['expected']!r},\ninstead got {default_returns[0]!r}.")
replaced_valid = False
if wlist_func_r_type and default_returns[1] != wlist_func_r_type['expected']:
logger.warning(f"whitelist: Mismatch in r_type.\nExpected {wlist_func_r_type['expected']!r},\ninstead got {default_returns[1]!r}")
replaced_valid = False
if replaced_valid is None: # whitelist didn't fail
replaced_valid = True
logger.info("the found return: " + repr(default_returns[0]) + '.')
logger.info("the found r_type: " + repr(default_returns[1]) + '.')
logger.info("whitelist return: " + repr(wlist_func_return['replace']) + '.')
logger.info("whitelist r_type: " + repr(wlist_func_r_type['replace']) + '.')
default_returns[0] = wlist_func_return['replace']
default_returns[1] = wlist_func_r_type['replace']
if not seems_valid and not replaced_valid:
returns = answer("Textual description what the function returns", default_returns[0])
return_type = answer("Return type", default_returns[1])
if isinstance(return_type, str):
return_type = as_types(return_type, "return type")
else:
returns = default_returns[0]
return_type = default_returns[1]
logger.debug("\n")
result = func(title, descr, link, params_string, returns=returns, return_type=return_type)
results.append(result)
elif table_type == "class":
if title in CLASS_TYPE_PATHS:
parent_clazz = CLASS_TYPE_PATHS[title][CLASS_TYPE_PATHS__PARENT]
logger.info("superclass: " + parent_clazz)
else:
parent_clazz = answer("Parent class name", "TgBotApiObject")
if title in WHITELISTED_CLASSES:
pass
result = clazz(
clazz=title, parent_clazz=parent_clazz, description=descr, link=link, params_string=params_string
)
results.append(result)
return results, document.content
def main():
folder, html_document, results = load_api_definitions()
output(folder, results, html_content=html_document)
def load_api_definitions():
folder = get_folder_path()
mode = confirm("Offline Mode: Load from a dump instead of the API Docs?")
if not mode:
results, html_document = load_from_html(folder)
else:
results, html_document = load_from_dump(folder)
results = preprocess_results(results, additional_items=list(CUSTOM_CLASSES.values()))
return folder, html_document, results
def load_from_dump(folder):
dump = ""
with open(path_join(folder, "api.py"), "r") as f:
dump = "".join(f.readlines())
html_document = None
if exists(path_join(folder, "api.html")):
with open(path_join(folder, "api.html"), "rb") as f:
html_document = f.read()
results = safe_eval(dump, SAVE_VALUES)
return results, html_document
def preprocess_results(results: List[Union[Clazz, Function]], additional_items: Union[None, List[Clazz]] = None):
if additional_items is None:
additional_items = []
logger.info('Calculating duplicate_of_parent.')
clazzes_by_name: Dict[str, Clazz] = {}
for other in additional_items:
if isinstance(other, Clazz):
clazzes_by_name[other.clazz] = other
for result in results:
if isinstance(result, Clazz):
clazzes_by_name[result.clazz] = result
for result in results:
if not isinstance(result, Clazz):
continue
if result.parent_clazz is None or result.parent_clazz.string == 'object':
continue
if result.parent_clazz.string in clazzes_by_name:
parent_clazz: Clazz = clazzes_by_name[result.parent_clazz.string]
for variable in result.variables:
variable: Variable
parent_variable = parent_clazz.get_same_variable(
variable,
ignore_pytg_name=True,
ignore_description=True,
ignore_optional=True,
ignore_type_always_is_value=True,
allow_additional_allowed_type_matchings=True,
)
variable.duplicate_of_parent = parent_variable is not None
if variable.duplicate_of_parent and parent_variable.additional_allowed_type_matchings:
variable.types = parent_variable.types[:]
# end if
# end for
else:
logger.warning(f'Could not resolve parent class: {result.parent_clazz}')
# end if
# end for
return results
# end def
def output(folder, results, html_content=None):
can_quit = False
do_delete_first = confirm("Can the folder {path} be deleted before writing?".format(path=folder))
logger.info("vvvvvvvvv")
while not can_quit:
if do_delete_first:
try:
import Send2Trash
Send2Trash.send2trash(folder)
except ImportError:
import shutil
shutil.rmtree(folder)
# end try
# end if
# write crawled data
mkdir_p(folder)
with open(path_join(folder, "api.py"), "w") as f:
f.write("[\n ")
f.write(",\n ".join([repr(result) for result in results]))
f.write("\n]")
# end for
# end with
if html_content:
with open(path_join(folder, "api.html"), "wb") as f:
f.write(html_content)
# end with
# end if
# write templates
try:
safe_to_file(folder, results)
except TemplateError as e:
if isinstance(e, TemplateSyntaxError):
logger.exception("Template error at {file}:{line}".format(file=e.filename, line=e.lineno))
else:
logger.exception("Template error.")
# end if
# end try
logger.info("Written to file.")
can_quit = not confirm("Write again after reloading templates?", default=True)
logger.info("#########")
logger.info("Exit.")
# end def
def get_filter():
filter = answer(
"Only generate the doc for specific functions/classes. Comma seperated list. Leave empty to generate all.",
default=""
# getChat, leaveChat, getChatAdministrators, getChatMember, getChatMembersCount, Message, MessageEntity"
)
if filter.strip():
filter = [x.strip() for x in filter.split(",")]
else:
filter = None
# end if
return filter
# end def
def get_folder_path():
default = "/tmp/pytgbotapi/"
candidate = abspath(path_join(dirname(abspath(__file__)), 'output'))
logger.info(f'canidate: {candidate}')
if exists(candidate) and isdir(candidate):
default = candidate
# end if
file = answer("Folder path to store the results.", default=default)
if file:
try:
file = abspath(file)
mkdir_p(file)
with open(path_join(file, "__init__.py"), "w") as f:
f.write(FILE_HEADER)
# end with
except IOError:
pass
# end try
# end if file
return file
# end def
# noinspection PyCompatibility
def safe_to_file(folder, results):
functions = []
message_send_clazzes = []
clazzes: Dict[str, List[Clazz]] = {} # "filepath": [Class, Class, ...]
all_the_clazzes = []
custom_classes = {} # "filepath": [Class, Class, ...]
all_the_custom_clazzes = [] # actually only used to call preprocess_results with all the items. They will be modified in place anyway.
for import_path, result in CUSTOM_CLASSES.items():
# result.import_path = result.calculate_import_path()
result.filepath = result.calculate_filepath(folder)
file_path = result.filepath
if file_path not in custom_classes:
custom_classes[file_path] = []
# end if
custom_classes[file_path].append(result)
if file_path not in clazzes:
clazzes[file_path] = []
# end if
clazzes[file_path].append(result)
all_the_clazzes.append(result)
all_the_custom_clazzes.append(result)
# end def
_ = preprocess_results(all_the_custom_clazzes, additional_items=results)
# split results into functions and classes
for result in results:
assert isinstance(result, (Clazz, Function))
if isinstance(result, Clazz):
result.import_path = result.calculate_import_path()
result.filepath = result.calculate_filepath(folder)
file_path = result.filepath
if file_path not in clazzes:
clazzes[file_path] = []
clazzes[file_path].append(result)
all_the_clazzes.append(result)
else:
assert isinstance(result, Function)
pytgbot_dir = Path(__file__).parent.parent
# import_path = "pytgbot.bot.asynchronous."
# file_path = calc_path_and_create_folders(pytgbot_dir.absolute(), import_path)
result.filepath = str(pytgbot_dir.joinpath('pytgbot').joinpath('bot').joinpath('asynchronous.py').absolute())
functions.append(result)
if result.name.startswith('send_'):
import_path = "teleflask_messages."
file_path = calc_path_and_create_folders(folder, import_path)
args, special_kwargs, kwargs = result.class_variables_separated
result2 = FunctionClazz(
clazz=result.class_name_teleflask_message,
import_path=Import(path=import_path.rstrip('.'), name=result.class_name_teleflask_message),
imports=result.imports,
parent_clazz=Type(string='ReturnableMessageBase', is_builtin=False, is_list=0, import_path=None, description="Base class"),
link=result.link,
description=result.description,
parameters=args,
keywords=special_kwargs + kwargs,
function=result,
)
result2.filepath = file_path
message_send_clazzes.append(result2)
# end if
# end if
# end for
bot_template = get_template("bot.template")
bot_base_template = get_template("bot_base.template")
clazzfile_template = get_template("classfile.template")
teleflask_messages_template = get_template("teleflask_messages_file.template")
typehints_template = get_template("typehintsfile.template")
telegram_bot_api_server_funcs_template = get_template("telegram_bot_api_server/funcs.template")
telegram_bot_api_server_class_template = get_template("telegram_bot_api_server/classes.template")
mkdir_p(path_join(folder, 'telegram_bot_api_server', 'generated'))
if all_the_clazzes:
txt = telegram_bot_api_server_class_template.render(clazzes=all_the_clazzes)
render_file_to_disk(path_join(folder, 'telegram_bot_api_server', 'generated', 'models.py'), txt)
# end if
for path, clazz_list in clazzes.items():
clazz_imports = set()
for clazz_ in clazz_list:
assert isinstance(clazz_, Clazz)
assert isinstance(clazz_.parent_clazz, Type)
if not clazz_.parent_clazz.is_builtin:
clazz_imports.add(clazz_.parent_clazz.as_import)
# end if
# end for
clazz_imports = list(clazz_imports)
clazz_imports.sort()
is_sendable = ("sendable" in path)
try:
txt = clazzfile_template.render(clazzes=clazz_list, manual_clazzes=[], imports=clazz_imports, is_sendable=is_sendable)
txt = txt.replace("\t", " ")
render_file_to_disk(path, txt)
except IOError:
raise # lol
# end try
try:
txt = typehints_template.render(clazzes=clazz_list, imports=clazz_imports, is_sendable=is_sendable)
txt = txt.replace("\t", " ")
render_file_to_disk(path + "i", txt) # "ponies.py" + "i" => "ponies.pyi"
except IOError:
raise # lol
# end try
try:
txt = typehints_template.render(clazzes=clazz_list, imports=clazz_imports, is_sendable=is_sendable)
txt = txt.replace("\t", " ")
render_file_to_disk(path + "i", txt) # "ponies.py" + "i" => "ponies.pyi"
except IOError:
raise # lol
# end try
# end for classes
if functions:
func_imports = set()
for func_ in functions:
assert isinstance(func_, Function)
for var_ in func_.variables:
assert isinstance(var_, Variable)
for type_ in var_.types:
assert isinstance(type_, Type)
func_imports.add(type_.as_import)
# end for
# end for
if func_.returns is not None:
assert isinstance(func_.returns, Variable)
for type_ in func_.returns.types:
assert isinstance(type_, Type)
func_imports.add(type_.as_import)
# end for
# end if
# end for
func_imports = list(func_imports)
func_imports.sort()
txt_sync = bot_template.render(functions=functions, is_asyncio=False, imports=func_imports, file_import_path='pytgbot.bot.synchronous')
render_file_to_disk(functions[0].filepath.replace('asynchronous', 'synchronous'), txt_sync)
txt_async = bot_template.render(functions=functions, is_asyncio=True, imports=func_imports, file_import_path='pytgbot.bot.asynchronous')
render_file_to_disk(functions[0].filepath, txt_async)
txt_base = bot_base_template.render(functions=functions, imports=func_imports, file_import_path='pytgbot.bot.base')
render_file_to_disk(functions[0].filepath.replace('asynchronous', 'base'), txt_base)
imports = set()
imports.add(('enum', 'Enum'))
imports.add(('typing', 'Union, List, Optional'))
imports.add(('fastapi', 'APIRouter, HTTPException'))
imports.add(('telethon', 'TelegramClient'))
imports.add(('serializer', 'to_web_api, get_entity'))
imports.add(('fastapi.params', 'Query'))
imports.add(('telethon.errors', 'BotMethodInvalidError'))
imports.add(('telethon.tl.types', 'TypeSendMessageAction'))
imports.add(('telethon.client.chats', '_ChatAction'))
imports.add(('luckydonaldUtils.logger', 'logging'))
imports.add(('telethon.tl.functions.messages', 'SetTypingRequest'))
for function in functions:
function: Function
for the_import in function.imports:
the_import: Import
imports.add((the_import.path, the_import.name))
# end for
# end for
# https://stackoverflow.com/a/613218/3423324#how-do-i-sort-a-dictionary-by-value
# https://stackoverflow.com/a/4659539/3423324#how-to-sort-by-length-of-string-followed-by-alphabetical-order
imports_sorted = ["from " + path + ' import ' + name for path, name in sorted(imports, key=lambda item: (-len(item[0]), item[0], -len(item[1]), item[1]))]
# imports_sorted.sort(key=lambda item: (-len(item), item))
txt = telegram_bot_api_server_funcs_template.render(functions=functions, imports=imports_sorted)
render_file_to_disk(path_join(folder, 'telegram_bot_api_server', 'generated', 'funcs.py'), txt)
# end if
if message_send_clazzes:
txt = teleflask_messages_template.render(clazzes=message_send_clazzes)
render_file_to_disk(message_send_clazzes[0].filepath, txt)
# end if
# end def
# noinspection PyCompatibility
def render_file_to_disk(file, txt):
# remove whitespaces at the end of a line
txt = "\n".join(line.rstrip() for line in txt.splitlines())
# add blank line at end of file.
if not txt.endswith("\n"):
txt += "\n"
# end if
with open(file, "w") as f:
f.write(txt)
# end with
logger.info(f'Written {file!r} to disk, {len(txt)} chars.')
if use_back:
black.reformat_one(
src=black.Path(file),
write_back=black_settings['write_back'],
fast=False,
mode=black_settings['mode'],
report=black_settings['report'],
)
# end if
if use_yapf:
try:
FormatFile(file, in_place=True, style_config=yapf_settings['style'])
except:
logger.exception("Formatting file {file} failed.".format(file=file))
# end try
# end if
# end def
def calc_path_and_create_folders(folder, import_path, create_folder=True):
file_path = abspath(path_join(folder, import_path[:import_path.rfind(".")].replace(".", folder_seperator) + ".py"))
if create_folder:
mkdir_p(dirname(file_path))
# end if
return file_path
# end def
if __name__ == '__main__':
main()
# end if
| true
| true
|
1c43ded31ce72ef1e9790a43940975d2e33defc8
| 42
|
py
|
Python
|
run.py
|
chrisbarr/bilious-rutabaga
|
b2d01f03c2cce8342f11139279870156c0ebc9c9
|
[
"MIT"
] | null | null | null |
run.py
|
chrisbarr/bilious-rutabaga
|
b2d01f03c2cce8342f11139279870156c0ebc9c9
|
[
"MIT"
] | null | null | null |
run.py
|
chrisbarr/bilious-rutabaga
|
b2d01f03c2cce8342f11139279870156c0ebc9c9
|
[
"MIT"
] | null | null | null |
import bucket_lister
bucket_lister.main()
| 14
| 20
| 0.857143
|
import bucket_lister
bucket_lister.main()
| true
| true
|
1c43df75a6f017476d2000ddd8cfa609911e7416
| 48,453
|
py
|
Python
|
pymatgen/core/tests/test_structure.py
|
MahdiDavari/pymatgen
|
eb6cd95230c11ac761a96ebf82b98f71177bb71f
|
[
"MIT"
] | null | null | null |
pymatgen/core/tests/test_structure.py
|
MahdiDavari/pymatgen
|
eb6cd95230c11ac761a96ebf82b98f71177bb71f
|
[
"MIT"
] | null | null | null |
pymatgen/core/tests/test_structure.py
|
MahdiDavari/pymatgen
|
eb6cd95230c11ac761a96ebf82b98f71177bb71f
|
[
"MIT"
] | 1
|
2018-04-09T21:49:14.000Z
|
2018-04-09T21:49:14.000Z
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, unicode_literals, print_function
from pymatgen.util.testing import PymatgenTest
from pymatgen.core.periodic_table import Element, Specie
from pymatgen.core.composition import Composition
from pymatgen.core.operations import SymmOp
from pymatgen.core.structure import IStructure, Structure, IMolecule, \
StructureError, Molecule
from pymatgen.core.lattice import Lattice
import random
import os
import numpy as np
class IStructureTest(PymatgenTest):
def setUp(self):
coords = [[0, 0, 0], [0.75, 0.5, 0.75]]
self.lattice = Lattice([[3.8401979337, 0.00, 0.00],
[1.9200989668, 3.3257101909, 0.00],
[0.00, -2.2171384943, 3.1355090603]])
self.struct = IStructure(self.lattice, ["Si"] * 2, coords)
self.assertEqual(len(self.struct), 2,
"Wrong number of sites in structure!")
self.assertTrue(self.struct.is_ordered)
self.assertTrue(self.struct.ntypesp == 1)
coords = list()
coords.append([0, 0, 0])
coords.append([0., 0, 0.0000001])
self.assertRaises(StructureError, IStructure, self.lattice,
["Si"] * 2, coords, True)
self.propertied_structure = IStructure(
self.lattice, ["Si"] * 2, coords,
site_properties={'magmom': [5, -5]})
def test_matches(self):
ss = self.struct * 2
self.assertTrue(ss.matches(self.struct))
def test_bad_structure(self):
coords = list()
coords.append([0, 0, 0])
coords.append([0.75, 0.5, 0.75])
coords.append([0.75, 0.5, 0.75])
self.assertRaises(StructureError, IStructure, self.lattice,
["Si"] * 3, coords, validate_proximity=True)
#these shouldn't raise an error
IStructure(self.lattice, ["Si"] * 2, coords[:2], True)
IStructure(self.lattice, ["Si"], coords[:1], True)
def test_volume_and_density(self):
self.assertAlmostEqual(self.struct.volume, 40.04, 2, "Volume wrong!")
self.assertAlmostEqual(self.struct.density, 2.33, 2,
"Incorrect density")
def test_specie_init(self):
coords = list()
coords.append([0, 0, 0])
coords.append([0.75, 0.5, 0.75])
s = IStructure(self.lattice, [{Specie('O', -2): 1.0},
{Specie('Mg', 2): 0.8}], coords)
self.assertEqual(s.composition.formula, 'Mg0.8 O1')
def test_get_sorted_structure(self):
coords = list()
coords.append([0, 0, 0])
coords.append([0.75, 0.5, 0.75])
s = IStructure(self.lattice, ["O", "Li"], coords,
site_properties={'charge': [-2, 1]})
sorted_s = s.get_sorted_structure()
self.assertEqual(sorted_s[0].species_and_occu, Composition("Li"))
self.assertEqual(sorted_s[1].species_and_occu, Composition("O"))
self.assertEqual(sorted_s[0].charge, 1)
self.assertEqual(sorted_s[1].charge, -2)
s = IStructure(self.lattice, ["Se", "C", "Se", "C"],
[[0] * 3, [0.5] * 3, [0.25] * 3, [0.75] * 3])
self.assertEqual([site.specie.symbol
for site in s.get_sorted_structure()],
["C", "C", "Se", "Se"])
def test_get_space_group_data(self):
self.assertEqual(self.struct.get_space_group_info(), ('Fd-3m', 227))
def test_fractional_occupations(self):
coords = list()
coords.append([0, 0, 0])
coords.append([0.75, 0.5, 0.75])
s = IStructure(self.lattice, [{'O': 1.0}, {'Mg': 0.8}],
coords)
self.assertEqual(s.composition.formula, 'Mg0.8 O1')
self.assertFalse(s.is_ordered)
def test_get_distance(self):
self.assertAlmostEqual(self.struct.get_distance(0, 1), 2.35, 2,
"Distance calculated wrongly!")
pt = [0.9, 0.9, 0.8]
self.assertAlmostEqual(self.struct[0].distance_from_point(pt),
1.50332963784, 2,
"Distance calculated wrongly!")
def test_as_dict(self):
si = Specie("Si", 4)
mn = Element("Mn")
coords = list()
coords.append([0, 0, 0])
coords.append([0.75, 0.5, 0.75])
struct = IStructure(self.lattice, [{si: 0.5, mn: 0.5}, {si: 0.5}],
coords)
self.assertIn("lattice", struct.as_dict())
self.assertIn("sites", struct.as_dict())
d = self.propertied_structure.as_dict()
self.assertEqual(d['sites'][0]['properties']['magmom'], 5)
coords = list()
coords.append([0, 0, 0])
coords.append([0.75, 0.5, 0.75])
s = IStructure(self.lattice, [{Specie('O', -2,
properties={"spin": 3}): 1.0},
{Specie('Mg', 2,
properties={"spin": 2}): 0.8}],
coords, site_properties={'magmom': [5, -5]})
d = s.as_dict()
self.assertEqual(d['sites'][0]['properties']['magmom'], 5)
self.assertEqual(d['sites'][0]['species'][0]['properties']['spin'], 3)
d = s.as_dict(0)
self.assertNotIn("volume", d['lattice'])
self.assertNotIn("xyz", d['sites'][0])
def test_from_dict(self):
d = self.propertied_structure.as_dict()
s = IStructure.from_dict(d)
self.assertEqual(s[0].magmom, 5)
d = self.propertied_structure.as_dict(0)
s2 = IStructure.from_dict(d)
self.assertEqual(s, s2)
d = {'lattice': {'a': 3.8401979337, 'volume': 40.044794644251596,
'c': 3.8401979337177736, 'b': 3.840198994344244,
'matrix': [[3.8401979337, 0.0, 0.0],
[1.9200989668, 3.3257101909, 0.0],
[0.0, -2.2171384943, 3.1355090603]],
'alpha': 119.9999908639842, 'beta': 90.0,
'gamma': 60.000009137322195},
'sites': [{'properties': {'magmom': 5}, 'abc': [0.0, 0.0, 0.0],
'occu': 1.0, 'species': [{'occu': 1.0,
'oxidation_state': -2,
'properties': {'spin': 3},
'element': 'O'}],
'label': 'O2-', 'xyz': [0.0, 0.0, 0.0]},
{'properties': {'magmom': -5},
'abc': [0.75, 0.5, 0.75],
'occu': 0.8, 'species': [{'occu': 0.8,
'oxidation_state': 2,
'properties': {'spin': 2},
'element': 'Mg'}],
'label': 'Mg2+:0.800',
'xyz': [3.8401979336749994, 1.2247250003039056e-06,
2.351631795225]}]}
s = IStructure.from_dict(d)
self.assertEqual(s[0].magmom, 5)
self.assertEqual(s[0].specie.spin, 3)
self.assertEqual(type(s), IStructure)
def test_site_properties(self):
site_props = self.propertied_structure.site_properties
self.assertEqual(site_props['magmom'], [5, -5])
self.assertEqual(self.propertied_structure[0].magmom, 5)
self.assertEqual(self.propertied_structure[1].magmom, -5)
def test_copy(self):
new_struct = self.propertied_structure.copy(site_properties={'charge':
[2, 3]})
self.assertEqual(new_struct[0].magmom, 5)
self.assertEqual(new_struct[1].magmom, -5)
self.assertEqual(new_struct[0].charge, 2)
self.assertEqual(new_struct[1].charge, 3)
coords = list()
coords.append([0, 0, 0])
coords.append([0., 0, 0.0000001])
structure = IStructure(self.lattice, ["O", "Si"], coords,
site_properties={'magmom': [5, -5]})
new_struct = structure.copy(site_properties={'charge': [2, 3]},
sanitize=True)
self.assertEqual(new_struct[0].magmom, -5)
self.assertEqual(new_struct[1].magmom, 5)
self.assertEqual(new_struct[0].charge, 3)
self.assertEqual(new_struct[1].charge, 2)
self.assertAlmostEqual(new_struct.volume, structure.volume)
def test_interpolate(self):
coords = list()
coords.append([0, 0, 0])
coords.append([0.75, 0.5, 0.75])
struct = IStructure(self.lattice, ["Si"] * 2, coords)
coords2 = list()
coords2.append([0, 0, 0])
coords2.append([0.5, 0.5, 0.5])
struct2 = IStructure(self.struct.lattice, ["Si"] * 2, coords2)
int_s = struct.interpolate(struct2, 10)
for s in int_s:
self.assertIsNotNone(s, "Interpolation Failed!")
self.assertEqual(int_s[0].lattice, s.lattice)
self.assertArrayEqual(int_s[1][1].frac_coords, [0.725, 0.5, 0.725])
badlattice = [[1, 0.00, 0.00], [0, 1, 0.00], [0.00, 0, 1]]
struct2 = IStructure(badlattice, ["Si"] * 2, coords2)
self.assertRaises(ValueError, struct.interpolate, struct2)
coords2 = list()
coords2.append([0, 0, 0])
coords2.append([0.5, 0.5, 0.5])
struct2 = IStructure(self.struct.lattice, ["Si", "Fe"], coords2)
self.assertRaises(ValueError, struct.interpolate, struct2)
# Test autosort feature.
s1 = Structure.from_spacegroup("Fm-3m", Lattice.cubic(3),
["Fe"], [[0, 0, 0]])
s1.pop(0)
s2 = Structure.from_spacegroup("Fm-3m", Lattice.cubic(3),
["Fe"], [[0, 0, 0]])
s2.pop(2)
random.shuffle(s2)
for s in s1.interpolate(s2, autosort_tol=0.5):
self.assertArrayAlmostEqual(s1[0].frac_coords, s[0].frac_coords)
self.assertArrayAlmostEqual(s1[2].frac_coords, s[2].frac_coords)
# Make sure autosort has no effect on simpler interpolations,
# and with shuffled sites.
s1 = Structure.from_spacegroup("Fm-3m", Lattice.cubic(3),
["Fe"], [[0, 0, 0]])
s2 = Structure.from_spacegroup("Fm-3m", Lattice.cubic(3),
["Fe"], [[0, 0, 0]])
s2[0] = "Fe", [0.01, 0.01, 0.01]
random.shuffle(s2)
for s in s1.interpolate(s2, autosort_tol=0.5):
self.assertArrayAlmostEqual(s1[1].frac_coords, s[1].frac_coords)
self.assertArrayAlmostEqual(s1[2].frac_coords, s[2].frac_coords)
self.assertArrayAlmostEqual(s1[3].frac_coords, s[3].frac_coords)
def test_interpolate_lattice(self):
coords = list()
coords.append([0, 0, 0])
coords.append([0.75, 0.5, 0.75])
struct = IStructure(self.lattice, ["Si"] * 2, coords)
coords2 = list()
coords2.append([0, 0, 0])
coords2.append([0.5, 0.5, 0.5])
l2 = Lattice.from_lengths_and_angles([3,4,4], [100,100,70])
struct2 = IStructure(l2, ["Si"] * 2, coords2)
int_s = struct.interpolate(struct2, 2, interpolate_lattices=True)
self.assertArrayAlmostEqual(struct.lattice.abc,
int_s[0].lattice.abc)
self.assertArrayAlmostEqual(struct.lattice.angles,
int_s[0].lattice.angles)
self.assertArrayAlmostEqual(struct2.lattice.abc,
int_s[2].lattice.abc)
self.assertArrayAlmostEqual(struct2.lattice.angles,
int_s[2].lattice.angles)
int_angles = [110.3976469, 94.5359731, 64.5165856]
self.assertArrayAlmostEqual(int_angles,
int_s[1].lattice.angles)
# Assert that volume is monotonic
self.assertTrue(struct2.lattice.volume >= int_s[1].lattice.volume)
self.assertTrue(int_s[1].lattice.volume >= struct.lattice.volume)
def test_interpolate_lattice_rotation(self):
l1 = Lattice([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
l2 = Lattice([[-1.01, 0, 0], [0, -1.01, 0], [0, 0, 1]])
coords = [[0, 0, 0], [0.75, 0.5, 0.75]]
struct1 = IStructure(l1, ["Si"] * 2, coords)
struct2 = IStructure(l2, ["Si"] * 2, coords)
int_s = struct1.interpolate(struct2, 2, interpolate_lattices=True)
# Assert that volume is monotonic
self.assertTrue(struct2.lattice.volume >= int_s[1].lattice.volume)
self.assertTrue(int_s[1].lattice.volume >= struct1.lattice.volume)
def test_get_primitive_structure(self):
coords = [[0, 0, 0], [0.5, 0.5, 0], [0, 0.5, 0.5], [0.5, 0, 0.5]]
fcc_ag = IStructure(Lattice.cubic(4.09), ["Ag"] * 4, coords)
self.assertEqual(len(fcc_ag.get_primitive_structure()), 1)
coords = [[0, 0, 0], [0.5, 0.5, 0.5]]
bcc_li = IStructure(Lattice.cubic(4.09), ["Li"] * 2, coords)
bcc_prim = bcc_li.get_primitive_structure()
self.assertEqual(len(bcc_prim), 1)
self.assertAlmostEqual(bcc_prim.lattice.alpha, 109.47122, 3)
coords = [[0] * 3, [0.5] * 3, [0.25] * 3, [0.26] * 3]
s = IStructure(Lattice.cubic(4.09), ["Ag"] * 4, coords)
self.assertEqual(len(s.get_primitive_structure()), 4)
def test_primitive_cell_site_merging(self):
l = Lattice.cubic(10)
coords = [[0, 0, 0], [0, 0, 0.5],
[0, 0, 0.26], [0, 0, 0.74]]
sp = ['Ag', 'Ag', 'Be', 'Be']
s = Structure(l, sp, coords)
dm = s.get_primitive_structure().distance_matrix
self.assertArrayAlmostEqual(dm, [[0, 2.5], [2.5, 0]])
def test_primitive_on_large_supercell(self):
coords = [[0, 0, 0], [0.5, 0.5, 0], [0, 0.5, 0.5], [0.5, 0, 0.5]]
fcc_ag = Structure(Lattice.cubic(4.09), ["Ag"] * 4, coords)
fcc_ag.make_supercell([2, 2, 2])
fcc_ag_prim = fcc_ag.get_primitive_structure()
self.assertEqual(len(fcc_ag_prim), 1)
self.assertAlmostEqual(fcc_ag_prim.volume, 17.10448225)
def test_primitive_positions(self):
coords = [[0, 0, 0], [0.3, 0.35, 0.45]]
s = Structure(Lattice.from_parameters(1,2,3,50,66,88), ["Ag"] * 2, coords)
a = [[-1,2,-3], [3,2,-4], [1,0,-1]]
b = [[4, 0, 0], [1, 1, 0], [3, 0, 1]]
c = [[2, 0, 0], [1, 3, 0], [1, 1, 1]]
for sc_matrix in [c]:
sc = s.copy()
sc.make_supercell(sc_matrix)
prim = sc.get_primitive_structure(0.01)
self.assertEqual(len(prim), 2)
self.assertAlmostEqual(prim.distance_matrix[0,1], 1.0203432356739286)
def test_primitive_structure_volume_check(self):
l = Lattice.tetragonal(10, 30)
coords = [[0.5, 0.8, 0], [0.5, 0.2, 0],
[0.5, 0.8, 0.333], [0.5, 0.5, 0.333],
[0.5, 0.5, 0.666], [0.5, 0.2, 0.666]]
s = IStructure(l, ["Ag"] * 6, coords)
sprim = s.get_primitive_structure(tolerance=0.1)
self.assertEqual(len(sprim), 6)
def test_get_all_neighbors_and_get_neighbors(self):
s = self.struct
nn = s.get_neighbors_in_shell(s[0].frac_coords, 2, 4,
include_index=True)
self.assertEqual(len(nn), 47)
self.assertEqual(nn[0][-1], 0)
r = random.uniform(3, 6)
all_nn = s.get_all_neighbors(r, True)
for i in range(len(s)):
self.assertEqual(len(all_nn[i]), len(s.get_neighbors(s[i], r)))
for site, nns in zip(s, all_nn):
for nn in nns:
self.assertTrue(nn[0].is_periodic_image(s[nn[2]]))
d = sum((site.coords - nn[0].coords) ** 2) ** 0.5
self.assertAlmostEqual(d, nn[1])
s = Structure(Lattice.cubic(1), ['Li'], [[0,0,0]])
s.make_supercell([2,2,2])
self.assertEqual(sum(map(len, s.get_all_neighbors(3))), 976)
def test_get_all_neighbors_outside_cell(self):
s = Structure(Lattice.cubic(2), ['Li', 'Li', 'Li', 'Si'],
[[3.1] * 3, [0.11] * 3, [-1.91] * 3, [0.5] * 3])
all_nn = s.get_all_neighbors(0.2, True)
for site, nns in zip(s, all_nn):
for nn in nns:
self.assertTrue(nn[0].is_periodic_image(s[nn[2]]))
d = sum((site.coords - nn[0].coords) ** 2) ** 0.5
self.assertAlmostEqual(d, nn[1])
self.assertEqual(list(map(len, all_nn)), [2, 2, 2, 0])
def test_get_dist_matrix(self):
ans = [[0., 2.3516318],
[2.3516318, 0.]]
self.assertArrayAlmostEqual(self.struct.distance_matrix, ans)
def test_to_from_file_string(self):
for fmt in ["cif", "json", "poscar", "cssr"]:
s = self.struct.to(fmt=fmt)
self.assertIsNotNone(s)
ss = IStructure.from_str(s, fmt=fmt)
self.assertArrayAlmostEqual(
ss.lattice.lengths_and_angles,
self.struct.lattice.lengths_and_angles, decimal=5)
self.assertArrayAlmostEqual(ss.frac_coords, self.struct.frac_coords)
self.assertIsInstance(ss, IStructure)
self.struct.to(filename="POSCAR.testing")
self.assertTrue(os.path.exists("POSCAR.testing"))
os.remove("POSCAR.testing")
self.struct.to(filename="Si_testing.yaml")
self.assertTrue(os.path.exists("Si_testing.yaml"))
s = Structure.from_file("Si_testing.yaml")
self.assertEqual(s, self.struct)
os.remove("Si_testing.yaml")
self.struct.to(filename="POSCAR.testing.gz")
s = Structure.from_file("POSCAR.testing.gz")
self.assertEqual(s, self.struct)
os.remove("POSCAR.testing.gz")
class StructureTest(PymatgenTest):
def setUp(self):
coords = list()
coords.append([0, 0, 0])
coords.append([0.75, 0.5, 0.75])
lattice = Lattice([[3.8401979337, 0.00, 0.00],
[1.9200989668, 3.3257101909, 0.00],
[0.00, -2.2171384943, 3.1355090603]])
self.structure = Structure(lattice, ["Si", "Si"], coords)
def test_mutable_sequence_methods(self):
s = self.structure
s[0] = "Fe"
self.assertEqual(s.formula, "Fe1 Si1")
s[0] = "Fe", [0.5, 0.5, 0.5]
self.assertEqual(s.formula, "Fe1 Si1")
self.assertArrayAlmostEqual(s[0].frac_coords, [0.5, 0.5, 0.5])
s.reverse()
self.assertEqual(s[0].specie, Element("Si"))
self.assertArrayAlmostEqual(s[0].frac_coords, [0.75, 0.5, 0.75])
s[0] = {"Mn": 0.5}
self.assertEqual(s.formula, "Mn0.5 Fe1")
del s[1]
self.assertEqual(s.formula, "Mn0.5")
s[0] = "Fe", [0.9, 0.9, 0.9], {"magmom": 5}
self.assertEqual(s.formula, "Fe1")
self.assertEqual(s[0].magmom, 5)
def test_non_hash(self):
self.assertRaises(TypeError, dict, [(self.structure, 1)])
def test_sort(self):
s = self.structure
s[0] = "F"
s.sort()
self.assertEqual(s[0].species_string, "Si")
self.assertEqual(s[1].species_string, "F")
s.sort(key=lambda site: site.species_string)
self.assertEqual(s[0].species_string, "F")
self.assertEqual(s[1].species_string, "Si")
s.sort(key=lambda site: site.species_string, reverse=True)
self.assertEqual(s[0].species_string, "Si")
self.assertEqual(s[1].species_string, "F")
def test_append_insert_remove_replace(self):
s = self.structure
s.insert(1, "O", [0.5, 0.5, 0.5])
self.assertEqual(s.formula, "Si2 O1")
self.assertTrue(s.ntypesp == 2)
self.assertTrue(s.symbol_set == ("Si", "O"))
self.assertTrue(s.indices_from_symbol("Si") == (0,2))
self.assertTrue(s.indices_from_symbol("O") == (1,))
del s[2]
self.assertEqual(s.formula, "Si1 O1")
self.assertTrue(s.indices_from_symbol("Si") == (0,))
self.assertTrue(s.indices_from_symbol("O") == (1,))
s.append("N", [0.25, 0.25, 0.25])
self.assertEqual(s.formula, "Si1 N1 O1")
self.assertTrue(s.ntypesp == 3)
self.assertTrue(s.symbol_set == ("Si", "O", "N"))
self.assertTrue(s.indices_from_symbol("Si") == (0,))
self.assertTrue(s.indices_from_symbol("O") == (1,))
self.assertTrue(s.indices_from_symbol("N") == (2,))
s[0] = "Ge"
self.assertEqual(s.formula, "Ge1 N1 O1")
self.assertTrue(s.symbol_set == ("Ge", "O", "N"))
s.replace_species({"Ge": "Si"})
self.assertEqual(s.formula, "Si1 N1 O1")
self.assertTrue(s.ntypesp == 3)
s.replace_species({"Si": {"Ge": 0.5, "Si": 0.5}})
self.assertEqual(s.formula, "Si0.5 Ge0.5 N1 O1")
#this should change the .5Si .5Ge sites to .75Si .25Ge
s.replace_species({"Ge": {"Ge": 0.5, "Si": 0.5}})
self.assertEqual(s.formula, "Si0.75 Ge0.25 N1 O1")
# In this case, s.ntypesp is ambiguous.
# for the time being, we raise AttributeError.
with self.assertRaises(AttributeError):
s.ntypesp
s.remove_species(["Si"])
self.assertEqual(s.formula, "Ge0.25 N1 O1")
s.remove_sites([1, 2])
self.assertEqual(s.formula, "Ge0.25")
def test_add_site_property(self):
s = self.structure
s.add_site_property("charge", [4.1, -5])
self.assertEqual(s[0].charge, 4.1)
self.assertEqual(s[1].charge, -5)
s.add_site_property("magmom", [3, 2])
self.assertEqual(s[0].charge, 4.1)
self.assertEqual(s[0].magmom, 3)
def test_propertied_structure(self):
#Make sure that site properties are set to None for missing values.
s = self.structure
s.add_site_property("charge", [4.1, -5])
s.append("Li", [0.3, 0.3 ,0.3])
self.assertEqual(len(s.site_properties["charge"]), 3)
def test_perturb(self):
d = 0.1
pre_perturbation_sites = self.structure.sites[:]
self.structure.perturb(distance=d)
post_perturbation_sites = self.structure.sites
for i, x in enumerate(pre_perturbation_sites):
self.assertAlmostEqual(x.distance(post_perturbation_sites[i]), d,
3, "Bad perturbation distance")
def test_add_oxidation_states(self):
oxidation_states = {"Si": -4}
self.structure.add_oxidation_state_by_element(oxidation_states)
for site in self.structure:
for k in site.species_and_occu.keys():
self.assertEqual(k.oxi_state, oxidation_states[k.symbol],
"Wrong oxidation state assigned!")
oxidation_states = {"Fe": 2}
self.assertRaises(ValueError,
self.structure.add_oxidation_state_by_element,
oxidation_states)
self.structure.add_oxidation_state_by_site([2, -4])
self.assertEqual(self.structure[0].specie.oxi_state, 2)
self.assertRaises(ValueError,
self.structure.add_oxidation_state_by_site,
[1])
def test_remove_oxidation_states(self):
co_elem = Element("Co")
o_elem = Element("O")
co_specie = Specie("Co", 2)
o_specie = Specie("O", -2)
coords = list()
coords.append([0, 0, 0])
coords.append([0.75, 0.5, 0.75])
lattice = Lattice.cubic(10)
s_elem = Structure(lattice, [co_elem, o_elem], coords)
s_specie = Structure(lattice, [co_specie, o_specie], coords)
s_specie.remove_oxidation_states()
self.assertEqual(s_elem, s_specie, "Oxidation state remover "
"failed")
def test_apply_operation(self):
op = SymmOp.from_axis_angle_and_translation([0, 0, 1], 90)
s = self.structure.copy()
s.apply_operation(op)
self.assertArrayAlmostEqual(
s.lattice.matrix,
[[0.000000, 3.840198, 0.000000],
[-3.325710, 1.920099, 0.000000],
[2.217138, -0.000000, 3.135509]], 5)
op = SymmOp([[1, 1, 0, 0.5], [1, 0, 0, 0.5], [0, 0, 1, 0.5],
[0, 0, 0, 1]])
s = self.structure.copy()
s.apply_operation(op, fractional=True)
self.assertArrayAlmostEqual(
s.lattice.matrix,
[[5.760297, 3.325710, 0.000000],
[3.840198, 0.000000, 0.000000],
[0.000000, -2.217138, 3.135509]], 5)
def test_apply_strain(self):
s = self.structure
initial_coord = s[1].coords
s.apply_strain(0.01)
self.assertAlmostEqual(
s.lattice.abc,
(3.8785999130369997, 3.878600984287687, 3.8785999130549516))
self.assertArrayAlmostEqual(s[1].coords, initial_coord * 1.01)
a1, b1, c1 = s.lattice.abc
s.apply_strain([0.1, 0.2, 0.3])
a2, b2, c2 = s.lattice.abc
self.assertAlmostEqual(a2 / a1, 1.1)
self.assertAlmostEqual(b2 / b1, 1.2)
self.assertAlmostEqual(c2 / c1, 1.3)
def test_scale_lattice(self):
initial_coord = self.structure[1].coords
self.structure.scale_lattice(self.structure.volume * 1.01 ** 3)
self.assertArrayAlmostEqual(
self.structure.lattice.abc,
(3.8785999130369997, 3.878600984287687, 3.8785999130549516))
self.assertArrayAlmostEqual(self.structure[1].coords,
initial_coord * 1.01)
def test_translate_sites(self):
self.structure.translate_sites([0, 1], [0.5, 0.5, 0.5],
frac_coords=True)
self.assertArrayEqual(self.structure.frac_coords[0],
[0.5, 0.5, 0.5])
self.structure.translate_sites([0], [0.5, 0.5, 0.5],
frac_coords=False)
self.assertArrayAlmostEqual(self.structure.cart_coords[0],
[3.38014845, 1.05428585, 2.06775453])
self.structure.translate_sites([0], [0.5, 0.5, 0.5],
frac_coords=True, to_unit_cell=False)
self.assertArrayAlmostEqual(self.structure.frac_coords[0],
[1.00187517, 1.25665291, 1.15946374])
def test_mul(self):
self.structure *= [2, 1, 1]
self.assertEqual(self.structure.formula, "Si4")
s = [2, 1, 1] * self.structure
self.assertEqual(s.formula, "Si8")
self.assertIsInstance(s, Structure)
s = self.structure * [[1, 0, 0], [2, 1, 0], [0, 0, 2]]
self.assertEqual(s.formula, "Si8")
self.assertArrayAlmostEqual(s.lattice.abc,
[7.6803959, 17.5979979, 7.6803959])
def test_make_supercell(self):
self.structure.make_supercell([2, 1, 1])
self.assertEqual(self.structure.formula, "Si4")
self.structure.make_supercell([[1, 0, 0], [2, 1, 0], [0, 0, 1]])
self.assertEqual(self.structure.formula, "Si4")
self.structure.make_supercell(2)
self.assertEqual(self.structure.formula, "Si32")
self.assertArrayAlmostEqual(self.structure.lattice.abc,
[15.360792, 35.195996, 7.680396], 5)
def test_disordered_supercell_primitive_cell(self):
l = Lattice.cubic(2)
f = [[0.5, 0.5, 0.5]]
sp = [{'Si': 0.54738}]
s = Structure(l, sp, f)
#this supercell often breaks things
s.make_supercell([[0,-1,1],[-1,1,0],[1,1,1]])
self.assertEqual(len(s.get_primitive_structure()), 1)
def test_another_supercell(self):
#this is included b/c for some reason the old algo was failing on it
s = self.structure.copy()
s.make_supercell([[0, 2, 2], [2, 0, 2], [2, 2, 0]])
self.assertEqual(s.formula, "Si32")
s = self.structure.copy()
s.make_supercell([[0, 2, 0], [1, 0, 0], [0, 0, 1]])
self.assertEqual(s.formula, "Si4")
def test_to_from_dict(self):
d = self.structure.as_dict()
s2 = Structure.from_dict(d)
self.assertEqual(type(s2), Structure)
def test_to_from_file_string(self):
for fmt in ["cif", "json", "poscar", "cssr", "yaml", "xsf"]:
s = self.structure.to(fmt=fmt)
self.assertIsNotNone(s)
ss = Structure.from_str(s, fmt=fmt)
self.assertArrayAlmostEqual(
ss.lattice.lengths_and_angles,
self.structure.lattice.lengths_and_angles, decimal=5)
self.assertArrayAlmostEqual(ss.frac_coords,
self.structure.frac_coords)
self.assertIsInstance(ss, Structure)
self.structure.to(filename="POSCAR.testing")
self.assertTrue(os.path.exists("POSCAR.testing"))
os.remove("POSCAR.testing")
self.structure.to(filename="structure_testing.json")
self.assertTrue(os.path.exists("structure_testing.json"))
s = Structure.from_file("structure_testing.json")
self.assertEqual(s, self.structure)
os.remove("structure_testing.json")
def test_from_spacegroup(self):
s1 = Structure.from_spacegroup("Fm-3m", Lattice.cubic(3), ["Li", "O"],
[[0.25, 0.25, 0.25], [0, 0, 0]])
self.assertEqual(s1.formula, "Li8 O4")
s2 = Structure.from_spacegroup(225, Lattice.cubic(3), ["Li", "O"],
[[0.25, 0.25, 0.25], [0, 0, 0]])
self.assertEqual(s1, s2)
s2 = Structure.from_spacegroup(225, Lattice.cubic(3), ["Li", "O"],
[[0.25, 0.25, 0.25], [0, 0, 0]],
site_properties={"charge": [1, -2]})
self.assertEqual(sum(s2.site_properties["charge"]), 0)
s = Structure.from_spacegroup("Pm-3m", Lattice.cubic(3), ["Cs", "Cl"],
[[0, 0, 0], [0.5, 0.5, 0.5]])
self.assertEqual(s.formula, "Cs1 Cl1")
self.assertRaises(ValueError, Structure.from_spacegroup,
"Pm-3m", Lattice.tetragonal(1, 3), ["Cs", "Cl"],
[[0, 0, 0], [0.5, 0.5, 0.5]])
self.assertRaises(ValueError, Structure.from_spacegroup,
"Pm-3m", Lattice.cubic(3), ["Cs"],
[[0, 0, 0], [0.5, 0.5, 0.5]])
def test_merge_sites(self):
species = [{'Ag': 0.5}, {'Cl': 0.25}, {'Cl': 0.1},
{'Ag': 0.5}, {'F': 0.15}, {'F': 0.1}]
coords = [[0, 0, 0], [0.5, 0.5, 0.5], [0.5, 0.5, 0.5],
[0, 0, 0], [0.5, 0.5, 1.501], [0.5, 0.5, 1.501]]
s = Structure(Lattice.cubic(1), species, coords)
s.merge_sites(mode="s")
self.assertEqual(s[0].specie.symbol, 'Ag')
self.assertEqual(s[1].species_and_occu,
Composition({'Cl': 0.35, 'F': 0.25}))
self.assertArrayAlmostEqual(s[1].frac_coords, [.5, .5, .5005])
# Test for TaS2 with spacegroup 166 in 160 setting.
l = Lattice.from_lengths_and_angles([3.374351, 3.374351, 20.308941],
[90.000000, 90.000000, 120.000000])
species = ["Ta", "S", "S"]
coords = [[0.000000, 0.000000, 0.944333], [0.333333, 0.666667, 0.353424],
[0.666667, 0.333333, 0.535243]]
tas2 = Structure.from_spacegroup(160, l, species, coords)
assert len(tas2) == 13
tas2.merge_sites(mode="d")
assert len(tas2) == 9
l = Lattice.from_lengths_and_angles([3.587776, 3.587776, 19.622793],
[90.000000, 90.000000, 120.000000])
species = ["Na", "V", "S", "S"]
coords = [[0.333333, 0.666667, 0.165000], [0.000000, 0.000000, 0.998333],
[0.333333, 0.666667, 0.399394], [0.666667, 0.333333, 0.597273]]
navs2 = Structure.from_spacegroup(160, l, species, coords)
assert len(navs2) == 18
navs2.merge_sites(mode="d")
assert len(navs2) == 12
def test_properties(self):
self.assertEqual(self.structure.num_sites, len(self.structure))
self.structure.make_supercell(2)
self.structure[1] = "C"
sites = list(self.structure.group_by_types())
self.assertEqual(sites[-1].specie.symbol, "C")
self.structure.add_oxidation_state_by_element({"Si": 4, "C": 2})
self.assertEqual(self.structure.charge, 62)
def test_set_item(self):
s = self.structure.copy()
s[0] = "C"
self.assertEqual(s.formula, "Si1 C1")
s[(0, 1)] = "Ge"
self.assertEqual(s.formula, "Ge2")
s[0:2] = "Sn"
self.assertEqual(s.formula, "Sn2")
s = self.structure.copy()
s["Si"] = "C"
self.assertEqual(s.formula, "C2")
s["C"] = "C0.25Si0.5"
self.assertEqual(s.formula, "Si1 C0.5")
s["C"] = "C0.25Si0.5"
self.assertEqual(s.formula, "Si1.25 C0.125")
def test_init_error(self):
self.assertRaises(StructureError, Structure, Lattice.cubic(3), ["Si"], [[0, 0, 0], [0.5, 0.5, 0.5]])
def test_from_sites(self):
self.structure.add_site_property("hello", [1, 2])
s = Structure.from_sites(self.structure, to_unit_cell=True)
self.assertEqual(s.site_properties["hello"][1], 2)
def test_magic(self):
s = Structure.from_sites(self.structure)
self.assertEqual(s, self.structure)
self.assertNotEqual(s, None)
s.apply_strain(0.5)
self.assertNotEqual(s, self.structure)
self.assertNotEqual(self.structure * 2, self.structure)
class IMoleculeTest(PymatgenTest):
def setUp(self):
coords = [[0.000000, 0.000000, 0.000000],
[0.000000, 0.000000, 1.089000],
[1.026719, 0.000000, -0.363000],
[-0.513360, -0.889165, -0.363000],
[-0.513360, 0.889165, -0.363000]]
self.coords = coords
self.mol = Molecule(["C", "H", "H", "H", "H"], coords)
def test_set_item(self):
s = self.mol.copy()
s[0] = "Si"
self.assertEqual(s.formula, "Si1 H4")
s[(0, 1)] = "Ge"
self.assertEqual(s.formula, "Ge2 H3")
s[0:2] = "Sn"
self.assertEqual(s.formula, "Sn2 H3")
s = self.mol.copy()
s["H"] = "F"
self.assertEqual(s.formula, "C1 F4")
s["C"] = "C0.25Si0.5"
self.assertEqual(s.formula, "Si0.5 C0.25 F4")
s["C"] = "C0.25Si0.5"
self.assertEqual(s.formula, "Si0.625 C0.0625 F4")
def test_bad_molecule(self):
coords = [[0.000000, 0.000000, 0.000000],
[0.000000, 0.000000, 1.089000],
[1.026719, 0.000000, -0.363000],
[-0.513360, -0.889165, -0.363000],
[-0.513360, 0.889165, -0.363000],
[-0.513360, 0.889165, -0.36301]]
self.assertRaises(StructureError, Molecule,
["C", "H", "H", "H", "H", "H"], coords,
validate_proximity=True)
def test_get_angle_dihedral(self):
self.assertAlmostEqual(self.mol.get_angle(1, 0, 2), 109.47122144618737)
self.assertAlmostEqual(self.mol.get_angle(3, 1, 2), 60.00001388659683)
self.assertAlmostEqual(self.mol.get_dihedral(0, 1, 2, 3),
- 35.26438851071765)
coords = list()
coords.append([0, 0, 0])
coords.append([0, 0, 1])
coords.append([0, 1, 1])
coords.append([1, 1, 1])
self.mol2 = Molecule(["C", "O", "N", "S"], coords)
self.assertAlmostEqual(self.mol2.get_dihedral(0, 1, 2, 3), -90)
def test_get_covalent_bonds(self):
self.assertEqual(len(self.mol.get_covalent_bonds()), 4)
def test_properties(self):
self.assertEqual(len(self.mol), 5)
self.assertTrue(self.mol.is_ordered)
self.assertEqual(self.mol.formula, "H4 C1")
def test_repr_str(self):
ans = """Full Formula (H4 C1)
Reduced Formula: H4C
Charge = 0, Spin Mult = 1
Sites (5)
0 C 0.000000 0.000000 0.000000
1 H 0.000000 0.000000 1.089000
2 H 1.026719 0.000000 -0.363000
3 H -0.513360 -0.889165 -0.363000
4 H -0.513360 0.889165 -0.363000"""
self.assertEqual(self.mol.__str__(), ans)
ans = """Molecule Summary
Site: C (0.0000, 0.0000, 0.0000)
Site: H (0.0000, 0.0000, 1.0890)
Site: H (1.0267, 0.0000, -0.3630)
Site: H (-0.5134, -0.8892, -0.3630)
Site: H (-0.5134, 0.8892, -0.3630)"""
self.assertEqual(repr(self.mol), ans)
def test_site_properties(self):
propertied_mol = Molecule(["C", "H", "H", "H", "H"], self.coords,
site_properties={'magmom':
[0.5, -0.5, 1, 2, 3]})
self.assertEqual(propertied_mol[0].magmom, 0.5)
self.assertEqual(propertied_mol[1].magmom, -0.5)
def test_get_boxed_structure(self):
s = self.mol.get_boxed_structure(9, 9, 9)
# C atom should be in center of box.
self.assertArrayAlmostEqual(s[4].frac_coords,
[0.50000001, 0.5, 0.5])
self.assertArrayAlmostEqual(s[1].frac_coords,
[0.6140799, 0.5, 0.45966667])
self.assertRaises(ValueError, self.mol.get_boxed_structure, 1, 1, 1)
s2 = self.mol.get_boxed_structure(5, 5, 5, (2, 3, 4))
self.assertEqual(len(s2), 24 * 5)
self.assertEqual(s2.lattice.abc, (10, 15, 20))
# Test offset option
s3 = self.mol.get_boxed_structure(9, 9, 9, offset=[0.5,0.5,0.5])
self.assertArrayAlmostEqual(s3[4].coords,
[5,5,5])
# Test no_cross option
self.assertRaises(ValueError, self.mol.get_boxed_structure,
5, 5, 5, offset=[10,10,10],no_cross = True)
def test_get_distance(self):
self.assertAlmostEqual(self.mol.get_distance(0, 1), 1.089)
def test_get_neighbors(self):
nn = self.mol.get_neighbors(self.mol[0], 1)
self.assertEqual(len(nn), 0)
nn = self.mol.get_neighbors(self.mol[0], 2)
self.assertEqual(len(nn), 4)
def test_get_neighbors_in_shell(self):
nn = self.mol.get_neighbors_in_shell([0, 0, 0], 0, 1)
self.assertEqual(len(nn), 1)
nn = self.mol.get_neighbors_in_shell([0, 0, 0], 1, 0.9)
self.assertEqual(len(nn), 4)
nn = self.mol.get_neighbors_in_shell([0, 0, 0], 1, 0.9)
self.assertEqual(len(nn), 4)
nn = self.mol.get_neighbors_in_shell([0, 0, 0], 2, 0.1)
self.assertEqual(len(nn), 0)
def test_get_dist_matrix(self):
ans = [[0.0, 1.089, 1.08899995636, 1.08900040717, 1.08900040717],
[1.089, 0.0, 1.77832952654, 1.7783298026, 1.7783298026],
[1.08899995636, 1.77832952654, 0.0, 1.77833003783,
1.77833003783],
[1.08900040717, 1.7783298026, 1.77833003783, 0.0, 1.77833],
[1.08900040717, 1.7783298026, 1.77833003783, 1.77833, 0.0]]
self.assertArrayAlmostEqual(self.mol.distance_matrix, ans)
def test_break_bond(self):
(mol1, mol2) = self.mol.break_bond(0, 1)
self.assertEqual(mol1.formula, "H3 C1")
self.assertEqual(mol2.formula, "H1")
def test_prop(self):
self.assertEqual(self.mol.charge, 0)
self.assertEqual(self.mol.spin_multiplicity, 1)
self.assertEqual(self.mol.nelectrons, 10)
self.assertArrayAlmostEqual(self.mol.center_of_mass, [0, 0, 0])
self.assertRaises(ValueError, Molecule, ["C", "H", "H", "H", "H"],
self.coords, charge=1, spin_multiplicity=1)
mol = Molecule(["C", "H", "H", "H", "H"], self.coords, charge=1)
self.assertEqual(mol.spin_multiplicity, 2)
self.assertEqual(mol.nelectrons, 9)
#Triplet O2
mol = IMolecule(["O"] * 2, [[0, 0, 0], [0, 0, 1.2]],
spin_multiplicity=3)
self.assertEqual(mol.spin_multiplicity, 3)
def test_equal(self):
mol = IMolecule(["C", "H", "H", "H", "H"], self.coords, charge=1)
self.assertNotEqual(mol, self.mol)
def test_get_centered_molecule(self):
mol = IMolecule(["O"] * 2, [[0, 0, 0], [0, 0, 1.2]],
spin_multiplicity=3)
centered = mol.get_centered_molecule()
self.assertArrayAlmostEqual(centered.center_of_mass, [0, 0, 0])
def test_to_from_dict(self):
d = self.mol.as_dict()
mol2 = IMolecule.from_dict(d)
self.assertEqual(type(mol2), IMolecule)
propertied_mol = Molecule(["C", "H", "H", "H", "H"], self.coords,
charge=1,
site_properties={'magmom':
[0.5, -0.5, 1, 2, 3]})
d = propertied_mol.as_dict()
self.assertEqual(d['sites'][0]['properties']['magmom'], 0.5)
mol = Molecule.from_dict(d)
self.assertEqual(propertied_mol, mol)
self.assertEqual(mol[0].magmom, 0.5)
self.assertEqual(mol.formula, "H4 C1")
self.assertEqual(mol.charge, 1)
def test_to_from_file_string(self):
for fmt in ["xyz", "json", "g03", "yaml"]:
s = self.mol.to(fmt=fmt)
self.assertIsNotNone(s)
m = IMolecule.from_str(s, fmt=fmt)
self.assertEqual(m, self.mol)
self.assertIsInstance(m, IMolecule)
self.mol.to(filename="CH4_testing.xyz")
self.assertTrue(os.path.exists("CH4_testing.xyz"))
os.remove("CH4_testing.xyz")
self.mol.to(filename="CH4_testing.yaml")
self.assertTrue(os.path.exists("CH4_testing.yaml"))
mol = Molecule.from_file("CH4_testing.yaml")
self.assertEqual(self.mol, mol)
os.remove("CH4_testing.yaml")
class MoleculeTest(PymatgenTest):
def setUp(self):
coords = [[0.000000, 0.000000, 0.000000],
[0.000000, 0.000000, 1.089000],
[1.026719, 0.000000, -0.363000],
[-0.513360, -0.889165, -0.363000],
[-0.513360, 0.889165, -0.363000]]
self.mol = Molecule(["C", "H", "H", "H", "H"], coords)
def test_mutable_sequence_methods(self):
s = self.mol
s[1] = ("F", [0.5, 0.5, 0.5])
self.assertEqual(s.formula, "H3 C1 F1")
self.assertArrayAlmostEqual(s[1].coords, [0.5, 0.5, 0.5])
s.reverse()
self.assertEqual(s[0].specie, Element("H"))
self.assertArrayAlmostEqual(s[0].coords,
[-0.513360, 0.889165, -0.363000])
del s[1]
self.assertEqual(s.formula, "H2 C1 F1")
s[3] = "N", [0,0,0], {"charge": 4}
self.assertEqual(s.formula, "H2 N1 F1")
self.assertEqual(s[3].charge, 4)
def test_insert_remove_append(self):
mol = self.mol
mol.insert(1, "O", [0.5, 0.5, 0.5])
self.assertEqual(mol.formula, "H4 C1 O1")
del mol[2]
self.assertEqual(mol.formula, "H3 C1 O1")
mol.set_charge_and_spin(0)
self.assertEqual(mol.spin_multiplicity, 2)
mol.append("N", [1, 1, 1])
self.assertEqual(mol.formula, "H3 C1 N1 O1")
self.assertRaises(TypeError, dict, [(mol, 1)])
mol.remove_sites([0, 1])
self.assertEqual(mol.formula, "H3 N1")
def test_translate_sites(self):
self.mol.translate_sites([0, 1], [0.5, 0.5, 0.5])
self.assertArrayEqual(self.mol.cart_coords[0],
[0.5, 0.5, 0.5])
def test_rotate_sites(self):
self.mol.rotate_sites(theta=np.radians(30))
self.assertArrayAlmostEqual(self.mol.cart_coords[2],
[ 0.889164737, 0.513359500, -0.363000000])
def test_replace(self):
self.mol[0] = "Ge"
self.assertEqual(self.mol.formula, "Ge1 H4")
self.mol.replace_species({Element("Ge"): {Element("Ge"): 0.5,
Element("Si"): 0.5}})
self.assertEqual(self.mol.formula, "Si0.5 Ge0.5 H4")
#this should change the .5Si .5Ge sites to .75Si .25Ge
self.mol.replace_species({Element("Ge"): {Element("Ge"): 0.5,
Element("Si"): 0.5}})
self.assertEqual(self.mol.formula, "Si0.75 Ge0.25 H4")
d = 0.1
pre_perturbation_sites = self.mol.sites[:]
self.mol.perturb(distance=d)
post_perturbation_sites = self.mol.sites
for i, x in enumerate(pre_perturbation_sites):
self.assertAlmostEqual(x.distance(post_perturbation_sites[i]), d,
3, "Bad perturbation distance")
def test_add_site_property(self):
self.mol.add_site_property("charge", [4.1, -2, -2, -2, -2])
self.assertEqual(self.mol[0].charge, 4.1)
self.assertEqual(self.mol[1].charge, -2)
self.mol.add_site_property("magmom", [3, 2, 2, 2, 2])
self.assertEqual(self.mol[0].charge, 4.1)
self.assertEqual(self.mol[0].magmom, 3)
def test_to_from_dict(self):
d = self.mol.as_dict()
mol2 = Molecule.from_dict(d)
self.assertEqual(type(mol2), Molecule)
def test_apply_operation(self):
op = SymmOp.from_axis_angle_and_translation([0, 0, 1], 90)
self.mol.apply_operation(op)
self.assertArrayAlmostEqual(self.mol[2].coords,
[0.000000, 1.026719, -0.363000])
def test_substitute(self):
coords = [[0.000000, 0.000000, 1.08],
[0.000000, 0.000000, 0.000000],
[1.026719, 0.000000, -0.363000],
[-0.513360, -0.889165, -0.363000],
[-0.513360, 0.889165, -0.363000]]
sub = Molecule(["X", "C", "H", "H", "H"], coords)
self.mol.substitute(1, sub)
self.assertAlmostEqual(self.mol.get_distance(0, 4), 1.54)
f = Molecule(["X", "F"], [[0, 0, 0], [0, 0, 1.11]])
self.mol.substitute(2, f)
self.assertAlmostEqual(self.mol.get_distance(0, 7), 1.35)
oh = Molecule(["X", "O", "H"],
[[0, 0.780362, -.456316], [0, 0, .114079],
[0, -.780362, -.456316]])
self.mol.substitute(1, oh)
self.assertAlmostEqual(self.mol.get_distance(0, 7), 1.43)
self.mol.substitute(3, "methyl")
self.assertEqual(self.mol.formula, "H7 C3 O1 F1")
coords = [[0.00000, 1.40272, 0.00000],
[0.00000, 2.49029, 0.00000],
[-1.21479, 0.70136, 0.00000],
[-2.15666, 1.24515, 0.00000],
[-1.21479, -0.70136, 0.00000],
[-2.15666, -1.24515, 0.00000],
[0.00000, -1.40272, 0.00000],
[0.00000, -2.49029, 0.00000],
[1.21479, -0.70136, 0.00000],
[2.15666, -1.24515, 0.00000],
[1.21479, 0.70136, 0.00000],
[2.15666, 1.24515, 0.00000]]
benzene = Molecule(["C", "H", "C", "H", "C", "H", "C", "H", "C", "H",
"C", "H"], coords)
benzene.substitute(1, sub)
self.assertEqual(benzene.formula, "H8 C7")
#Carbon attached should be in plane.
self.assertAlmostEqual(benzene[11].coords[2], 0)
def test_to_from_file_string(self):
for fmt in ["xyz", "json", "g03"]:
s = self.mol.to(fmt=fmt)
self.assertIsNotNone(s)
m = Molecule.from_str(s, fmt=fmt)
self.assertEqual(m, self.mol)
self.assertIsInstance(m, Molecule)
self.mol.to(filename="CH4_testing.xyz")
self.assertTrue(os.path.exists("CH4_testing.xyz"))
os.remove("CH4_testing.xyz")
if __name__ == '__main__':
import unittest2 as unittest
unittest.main()
| 42.878761
| 108
| 0.546137
|
from __future__ import division, unicode_literals, print_function
from pymatgen.util.testing import PymatgenTest
from pymatgen.core.periodic_table import Element, Specie
from pymatgen.core.composition import Composition
from pymatgen.core.operations import SymmOp
from pymatgen.core.structure import IStructure, Structure, IMolecule, \
StructureError, Molecule
from pymatgen.core.lattice import Lattice
import random
import os
import numpy as np
class IStructureTest(PymatgenTest):
def setUp(self):
coords = [[0, 0, 0], [0.75, 0.5, 0.75]]
self.lattice = Lattice([[3.8401979337, 0.00, 0.00],
[1.9200989668, 3.3257101909, 0.00],
[0.00, -2.2171384943, 3.1355090603]])
self.struct = IStructure(self.lattice, ["Si"] * 2, coords)
self.assertEqual(len(self.struct), 2,
"Wrong number of sites in structure!")
self.assertTrue(self.struct.is_ordered)
self.assertTrue(self.struct.ntypesp == 1)
coords = list()
coords.append([0, 0, 0])
coords.append([0., 0, 0.0000001])
self.assertRaises(StructureError, IStructure, self.lattice,
["Si"] * 2, coords, True)
self.propertied_structure = IStructure(
self.lattice, ["Si"] * 2, coords,
site_properties={'magmom': [5, -5]})
def test_matches(self):
ss = self.struct * 2
self.assertTrue(ss.matches(self.struct))
def test_bad_structure(self):
coords = list()
coords.append([0, 0, 0])
coords.append([0.75, 0.5, 0.75])
coords.append([0.75, 0.5, 0.75])
self.assertRaises(StructureError, IStructure, self.lattice,
["Si"] * 3, coords, validate_proximity=True)
IStructure(self.lattice, ["Si"] * 2, coords[:2], True)
IStructure(self.lattice, ["Si"], coords[:1], True)
def test_volume_and_density(self):
self.assertAlmostEqual(self.struct.volume, 40.04, 2, "Volume wrong!")
self.assertAlmostEqual(self.struct.density, 2.33, 2,
"Incorrect density")
def test_specie_init(self):
coords = list()
coords.append([0, 0, 0])
coords.append([0.75, 0.5, 0.75])
s = IStructure(self.lattice, [{Specie('O', -2): 1.0},
{Specie('Mg', 2): 0.8}], coords)
self.assertEqual(s.composition.formula, 'Mg0.8 O1')
def test_get_sorted_structure(self):
coords = list()
coords.append([0, 0, 0])
coords.append([0.75, 0.5, 0.75])
s = IStructure(self.lattice, ["O", "Li"], coords,
site_properties={'charge': [-2, 1]})
sorted_s = s.get_sorted_structure()
self.assertEqual(sorted_s[0].species_and_occu, Composition("Li"))
self.assertEqual(sorted_s[1].species_and_occu, Composition("O"))
self.assertEqual(sorted_s[0].charge, 1)
self.assertEqual(sorted_s[1].charge, -2)
s = IStructure(self.lattice, ["Se", "C", "Se", "C"],
[[0] * 3, [0.5] * 3, [0.25] * 3, [0.75] * 3])
self.assertEqual([site.specie.symbol
for site in s.get_sorted_structure()],
["C", "C", "Se", "Se"])
def test_get_space_group_data(self):
self.assertEqual(self.struct.get_space_group_info(), ('Fd-3m', 227))
def test_fractional_occupations(self):
coords = list()
coords.append([0, 0, 0])
coords.append([0.75, 0.5, 0.75])
s = IStructure(self.lattice, [{'O': 1.0}, {'Mg': 0.8}],
coords)
self.assertEqual(s.composition.formula, 'Mg0.8 O1')
self.assertFalse(s.is_ordered)
def test_get_distance(self):
self.assertAlmostEqual(self.struct.get_distance(0, 1), 2.35, 2,
"Distance calculated wrongly!")
pt = [0.9, 0.9, 0.8]
self.assertAlmostEqual(self.struct[0].distance_from_point(pt),
1.50332963784, 2,
"Distance calculated wrongly!")
def test_as_dict(self):
si = Specie("Si", 4)
mn = Element("Mn")
coords = list()
coords.append([0, 0, 0])
coords.append([0.75, 0.5, 0.75])
struct = IStructure(self.lattice, [{si: 0.5, mn: 0.5}, {si: 0.5}],
coords)
self.assertIn("lattice", struct.as_dict())
self.assertIn("sites", struct.as_dict())
d = self.propertied_structure.as_dict()
self.assertEqual(d['sites'][0]['properties']['magmom'], 5)
coords = list()
coords.append([0, 0, 0])
coords.append([0.75, 0.5, 0.75])
s = IStructure(self.lattice, [{Specie('O', -2,
properties={"spin": 3}): 1.0},
{Specie('Mg', 2,
properties={"spin": 2}): 0.8}],
coords, site_properties={'magmom': [5, -5]})
d = s.as_dict()
self.assertEqual(d['sites'][0]['properties']['magmom'], 5)
self.assertEqual(d['sites'][0]['species'][0]['properties']['spin'], 3)
d = s.as_dict(0)
self.assertNotIn("volume", d['lattice'])
self.assertNotIn("xyz", d['sites'][0])
def test_from_dict(self):
d = self.propertied_structure.as_dict()
s = IStructure.from_dict(d)
self.assertEqual(s[0].magmom, 5)
d = self.propertied_structure.as_dict(0)
s2 = IStructure.from_dict(d)
self.assertEqual(s, s2)
d = {'lattice': {'a': 3.8401979337, 'volume': 40.044794644251596,
'c': 3.8401979337177736, 'b': 3.840198994344244,
'matrix': [[3.8401979337, 0.0, 0.0],
[1.9200989668, 3.3257101909, 0.0],
[0.0, -2.2171384943, 3.1355090603]],
'alpha': 119.9999908639842, 'beta': 90.0,
'gamma': 60.000009137322195},
'sites': [{'properties': {'magmom': 5}, 'abc': [0.0, 0.0, 0.0],
'occu': 1.0, 'species': [{'occu': 1.0,
'oxidation_state': -2,
'properties': {'spin': 3},
'element': 'O'}],
'label': 'O2-', 'xyz': [0.0, 0.0, 0.0]},
{'properties': {'magmom': -5},
'abc': [0.75, 0.5, 0.75],
'occu': 0.8, 'species': [{'occu': 0.8,
'oxidation_state': 2,
'properties': {'spin': 2},
'element': 'Mg'}],
'label': 'Mg2+:0.800',
'xyz': [3.8401979336749994, 1.2247250003039056e-06,
2.351631795225]}]}
s = IStructure.from_dict(d)
self.assertEqual(s[0].magmom, 5)
self.assertEqual(s[0].specie.spin, 3)
self.assertEqual(type(s), IStructure)
def test_site_properties(self):
site_props = self.propertied_structure.site_properties
self.assertEqual(site_props['magmom'], [5, -5])
self.assertEqual(self.propertied_structure[0].magmom, 5)
self.assertEqual(self.propertied_structure[1].magmom, -5)
def test_copy(self):
new_struct = self.propertied_structure.copy(site_properties={'charge':
[2, 3]})
self.assertEqual(new_struct[0].magmom, 5)
self.assertEqual(new_struct[1].magmom, -5)
self.assertEqual(new_struct[0].charge, 2)
self.assertEqual(new_struct[1].charge, 3)
coords = list()
coords.append([0, 0, 0])
coords.append([0., 0, 0.0000001])
structure = IStructure(self.lattice, ["O", "Si"], coords,
site_properties={'magmom': [5, -5]})
new_struct = structure.copy(site_properties={'charge': [2, 3]},
sanitize=True)
self.assertEqual(new_struct[0].magmom, -5)
self.assertEqual(new_struct[1].magmom, 5)
self.assertEqual(new_struct[0].charge, 3)
self.assertEqual(new_struct[1].charge, 2)
self.assertAlmostEqual(new_struct.volume, structure.volume)
def test_interpolate(self):
coords = list()
coords.append([0, 0, 0])
coords.append([0.75, 0.5, 0.75])
struct = IStructure(self.lattice, ["Si"] * 2, coords)
coords2 = list()
coords2.append([0, 0, 0])
coords2.append([0.5, 0.5, 0.5])
struct2 = IStructure(self.struct.lattice, ["Si"] * 2, coords2)
int_s = struct.interpolate(struct2, 10)
for s in int_s:
self.assertIsNotNone(s, "Interpolation Failed!")
self.assertEqual(int_s[0].lattice, s.lattice)
self.assertArrayEqual(int_s[1][1].frac_coords, [0.725, 0.5, 0.725])
badlattice = [[1, 0.00, 0.00], [0, 1, 0.00], [0.00, 0, 1]]
struct2 = IStructure(badlattice, ["Si"] * 2, coords2)
self.assertRaises(ValueError, struct.interpolate, struct2)
coords2 = list()
coords2.append([0, 0, 0])
coords2.append([0.5, 0.5, 0.5])
struct2 = IStructure(self.struct.lattice, ["Si", "Fe"], coords2)
self.assertRaises(ValueError, struct.interpolate, struct2)
# Test autosort feature.
s1 = Structure.from_spacegroup("Fm-3m", Lattice.cubic(3),
["Fe"], [[0, 0, 0]])
s1.pop(0)
s2 = Structure.from_spacegroup("Fm-3m", Lattice.cubic(3),
["Fe"], [[0, 0, 0]])
s2.pop(2)
random.shuffle(s2)
for s in s1.interpolate(s2, autosort_tol=0.5):
self.assertArrayAlmostEqual(s1[0].frac_coords, s[0].frac_coords)
self.assertArrayAlmostEqual(s1[2].frac_coords, s[2].frac_coords)
# Make sure autosort has no effect on simpler interpolations,
# and with shuffled sites.
s1 = Structure.from_spacegroup("Fm-3m", Lattice.cubic(3),
["Fe"], [[0, 0, 0]])
s2 = Structure.from_spacegroup("Fm-3m", Lattice.cubic(3),
["Fe"], [[0, 0, 0]])
s2[0] = "Fe", [0.01, 0.01, 0.01]
random.shuffle(s2)
for s in s1.interpolate(s2, autosort_tol=0.5):
self.assertArrayAlmostEqual(s1[1].frac_coords, s[1].frac_coords)
self.assertArrayAlmostEqual(s1[2].frac_coords, s[2].frac_coords)
self.assertArrayAlmostEqual(s1[3].frac_coords, s[3].frac_coords)
def test_interpolate_lattice(self):
coords = list()
coords.append([0, 0, 0])
coords.append([0.75, 0.5, 0.75])
struct = IStructure(self.lattice, ["Si"] * 2, coords)
coords2 = list()
coords2.append([0, 0, 0])
coords2.append([0.5, 0.5, 0.5])
l2 = Lattice.from_lengths_and_angles([3,4,4], [100,100,70])
struct2 = IStructure(l2, ["Si"] * 2, coords2)
int_s = struct.interpolate(struct2, 2, interpolate_lattices=True)
self.assertArrayAlmostEqual(struct.lattice.abc,
int_s[0].lattice.abc)
self.assertArrayAlmostEqual(struct.lattice.angles,
int_s[0].lattice.angles)
self.assertArrayAlmostEqual(struct2.lattice.abc,
int_s[2].lattice.abc)
self.assertArrayAlmostEqual(struct2.lattice.angles,
int_s[2].lattice.angles)
int_angles = [110.3976469, 94.5359731, 64.5165856]
self.assertArrayAlmostEqual(int_angles,
int_s[1].lattice.angles)
# Assert that volume is monotonic
self.assertTrue(struct2.lattice.volume >= int_s[1].lattice.volume)
self.assertTrue(int_s[1].lattice.volume >= struct.lattice.volume)
def test_interpolate_lattice_rotation(self):
l1 = Lattice([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
l2 = Lattice([[-1.01, 0, 0], [0, -1.01, 0], [0, 0, 1]])
coords = [[0, 0, 0], [0.75, 0.5, 0.75]]
struct1 = IStructure(l1, ["Si"] * 2, coords)
struct2 = IStructure(l2, ["Si"] * 2, coords)
int_s = struct1.interpolate(struct2, 2, interpolate_lattices=True)
# Assert that volume is monotonic
self.assertTrue(struct2.lattice.volume >= int_s[1].lattice.volume)
self.assertTrue(int_s[1].lattice.volume >= struct1.lattice.volume)
def test_get_primitive_structure(self):
coords = [[0, 0, 0], [0.5, 0.5, 0], [0, 0.5, 0.5], [0.5, 0, 0.5]]
fcc_ag = IStructure(Lattice.cubic(4.09), ["Ag"] * 4, coords)
self.assertEqual(len(fcc_ag.get_primitive_structure()), 1)
coords = [[0, 0, 0], [0.5, 0.5, 0.5]]
bcc_li = IStructure(Lattice.cubic(4.09), ["Li"] * 2, coords)
bcc_prim = bcc_li.get_primitive_structure()
self.assertEqual(len(bcc_prim), 1)
self.assertAlmostEqual(bcc_prim.lattice.alpha, 109.47122, 3)
coords = [[0] * 3, [0.5] * 3, [0.25] * 3, [0.26] * 3]
s = IStructure(Lattice.cubic(4.09), ["Ag"] * 4, coords)
self.assertEqual(len(s.get_primitive_structure()), 4)
def test_primitive_cell_site_merging(self):
l = Lattice.cubic(10)
coords = [[0, 0, 0], [0, 0, 0.5],
[0, 0, 0.26], [0, 0, 0.74]]
sp = ['Ag', 'Ag', 'Be', 'Be']
s = Structure(l, sp, coords)
dm = s.get_primitive_structure().distance_matrix
self.assertArrayAlmostEqual(dm, [[0, 2.5], [2.5, 0]])
def test_primitive_on_large_supercell(self):
coords = [[0, 0, 0], [0.5, 0.5, 0], [0, 0.5, 0.5], [0.5, 0, 0.5]]
fcc_ag = Structure(Lattice.cubic(4.09), ["Ag"] * 4, coords)
fcc_ag.make_supercell([2, 2, 2])
fcc_ag_prim = fcc_ag.get_primitive_structure()
self.assertEqual(len(fcc_ag_prim), 1)
self.assertAlmostEqual(fcc_ag_prim.volume, 17.10448225)
def test_primitive_positions(self):
coords = [[0, 0, 0], [0.3, 0.35, 0.45]]
s = Structure(Lattice.from_parameters(1,2,3,50,66,88), ["Ag"] * 2, coords)
a = [[-1,2,-3], [3,2,-4], [1,0,-1]]
b = [[4, 0, 0], [1, 1, 0], [3, 0, 1]]
c = [[2, 0, 0], [1, 3, 0], [1, 1, 1]]
for sc_matrix in [c]:
sc = s.copy()
sc.make_supercell(sc_matrix)
prim = sc.get_primitive_structure(0.01)
self.assertEqual(len(prim), 2)
self.assertAlmostEqual(prim.distance_matrix[0,1], 1.0203432356739286)
def test_primitive_structure_volume_check(self):
l = Lattice.tetragonal(10, 30)
coords = [[0.5, 0.8, 0], [0.5, 0.2, 0],
[0.5, 0.8, 0.333], [0.5, 0.5, 0.333],
[0.5, 0.5, 0.666], [0.5, 0.2, 0.666]]
s = IStructure(l, ["Ag"] * 6, coords)
sprim = s.get_primitive_structure(tolerance=0.1)
self.assertEqual(len(sprim), 6)
def test_get_all_neighbors_and_get_neighbors(self):
s = self.struct
nn = s.get_neighbors_in_shell(s[0].frac_coords, 2, 4,
include_index=True)
self.assertEqual(len(nn), 47)
self.assertEqual(nn[0][-1], 0)
r = random.uniform(3, 6)
all_nn = s.get_all_neighbors(r, True)
for i in range(len(s)):
self.assertEqual(len(all_nn[i]), len(s.get_neighbors(s[i], r)))
for site, nns in zip(s, all_nn):
for nn in nns:
self.assertTrue(nn[0].is_periodic_image(s[nn[2]]))
d = sum((site.coords - nn[0].coords) ** 2) ** 0.5
self.assertAlmostEqual(d, nn[1])
s = Structure(Lattice.cubic(1), ['Li'], [[0,0,0]])
s.make_supercell([2,2,2])
self.assertEqual(sum(map(len, s.get_all_neighbors(3))), 976)
def test_get_all_neighbors_outside_cell(self):
s = Structure(Lattice.cubic(2), ['Li', 'Li', 'Li', 'Si'],
[[3.1] * 3, [0.11] * 3, [-1.91] * 3, [0.5] * 3])
all_nn = s.get_all_neighbors(0.2, True)
for site, nns in zip(s, all_nn):
for nn in nns:
self.assertTrue(nn[0].is_periodic_image(s[nn[2]]))
d = sum((site.coords - nn[0].coords) ** 2) ** 0.5
self.assertAlmostEqual(d, nn[1])
self.assertEqual(list(map(len, all_nn)), [2, 2, 2, 0])
def test_get_dist_matrix(self):
ans = [[0., 2.3516318],
[2.3516318, 0.]]
self.assertArrayAlmostEqual(self.struct.distance_matrix, ans)
def test_to_from_file_string(self):
for fmt in ["cif", "json", "poscar", "cssr"]:
s = self.struct.to(fmt=fmt)
self.assertIsNotNone(s)
ss = IStructure.from_str(s, fmt=fmt)
self.assertArrayAlmostEqual(
ss.lattice.lengths_and_angles,
self.struct.lattice.lengths_and_angles, decimal=5)
self.assertArrayAlmostEqual(ss.frac_coords, self.struct.frac_coords)
self.assertIsInstance(ss, IStructure)
self.struct.to(filename="POSCAR.testing")
self.assertTrue(os.path.exists("POSCAR.testing"))
os.remove("POSCAR.testing")
self.struct.to(filename="Si_testing.yaml")
self.assertTrue(os.path.exists("Si_testing.yaml"))
s = Structure.from_file("Si_testing.yaml")
self.assertEqual(s, self.struct)
os.remove("Si_testing.yaml")
self.struct.to(filename="POSCAR.testing.gz")
s = Structure.from_file("POSCAR.testing.gz")
self.assertEqual(s, self.struct)
os.remove("POSCAR.testing.gz")
class StructureTest(PymatgenTest):
def setUp(self):
coords = list()
coords.append([0, 0, 0])
coords.append([0.75, 0.5, 0.75])
lattice = Lattice([[3.8401979337, 0.00, 0.00],
[1.9200989668, 3.3257101909, 0.00],
[0.00, -2.2171384943, 3.1355090603]])
self.structure = Structure(lattice, ["Si", "Si"], coords)
def test_mutable_sequence_methods(self):
s = self.structure
s[0] = "Fe"
self.assertEqual(s.formula, "Fe1 Si1")
s[0] = "Fe", [0.5, 0.5, 0.5]
self.assertEqual(s.formula, "Fe1 Si1")
self.assertArrayAlmostEqual(s[0].frac_coords, [0.5, 0.5, 0.5])
s.reverse()
self.assertEqual(s[0].specie, Element("Si"))
self.assertArrayAlmostEqual(s[0].frac_coords, [0.75, 0.5, 0.75])
s[0] = {"Mn": 0.5}
self.assertEqual(s.formula, "Mn0.5 Fe1")
del s[1]
self.assertEqual(s.formula, "Mn0.5")
s[0] = "Fe", [0.9, 0.9, 0.9], {"magmom": 5}
self.assertEqual(s.formula, "Fe1")
self.assertEqual(s[0].magmom, 5)
def test_non_hash(self):
self.assertRaises(TypeError, dict, [(self.structure, 1)])
def test_sort(self):
s = self.structure
s[0] = "F"
s.sort()
self.assertEqual(s[0].species_string, "Si")
self.assertEqual(s[1].species_string, "F")
s.sort(key=lambda site: site.species_string)
self.assertEqual(s[0].species_string, "F")
self.assertEqual(s[1].species_string, "Si")
s.sort(key=lambda site: site.species_string, reverse=True)
self.assertEqual(s[0].species_string, "Si")
self.assertEqual(s[1].species_string, "F")
def test_append_insert_remove_replace(self):
s = self.structure
s.insert(1, "O", [0.5, 0.5, 0.5])
self.assertEqual(s.formula, "Si2 O1")
self.assertTrue(s.ntypesp == 2)
self.assertTrue(s.symbol_set == ("Si", "O"))
self.assertTrue(s.indices_from_symbol("Si") == (0,2))
self.assertTrue(s.indices_from_symbol("O") == (1,))
del s[2]
self.assertEqual(s.formula, "Si1 O1")
self.assertTrue(s.indices_from_symbol("Si") == (0,))
self.assertTrue(s.indices_from_symbol("O") == (1,))
s.append("N", [0.25, 0.25, 0.25])
self.assertEqual(s.formula, "Si1 N1 O1")
self.assertTrue(s.ntypesp == 3)
self.assertTrue(s.symbol_set == ("Si", "O", "N"))
self.assertTrue(s.indices_from_symbol("Si") == (0,))
self.assertTrue(s.indices_from_symbol("O") == (1,))
self.assertTrue(s.indices_from_symbol("N") == (2,))
s[0] = "Ge"
self.assertEqual(s.formula, "Ge1 N1 O1")
self.assertTrue(s.symbol_set == ("Ge", "O", "N"))
s.replace_species({"Ge": "Si"})
self.assertEqual(s.formula, "Si1 N1 O1")
self.assertTrue(s.ntypesp == 3)
s.replace_species({"Si": {"Ge": 0.5, "Si": 0.5}})
self.assertEqual(s.formula, "Si0.5 Ge0.5 N1 O1")
#this should change the .5Si .5Ge sites to .75Si .25Ge
s.replace_species({"Ge": {"Ge": 0.5, "Si": 0.5}})
self.assertEqual(s.formula, "Si0.75 Ge0.25 N1 O1")
# In this case, s.ntypesp is ambiguous.
# for the time being, we raise AttributeError.
with self.assertRaises(AttributeError):
s.ntypesp
s.remove_species(["Si"])
self.assertEqual(s.formula, "Ge0.25 N1 O1")
s.remove_sites([1, 2])
self.assertEqual(s.formula, "Ge0.25")
def test_add_site_property(self):
s = self.structure
s.add_site_property("charge", [4.1, -5])
self.assertEqual(s[0].charge, 4.1)
self.assertEqual(s[1].charge, -5)
s.add_site_property("magmom", [3, 2])
self.assertEqual(s[0].charge, 4.1)
self.assertEqual(s[0].magmom, 3)
def test_propertied_structure(self):
#Make sure that site properties are set to None for missing values.
s = self.structure
s.add_site_property("charge", [4.1, -5])
s.append("Li", [0.3, 0.3 ,0.3])
self.assertEqual(len(s.site_properties["charge"]), 3)
def test_perturb(self):
d = 0.1
pre_perturbation_sites = self.structure.sites[:]
self.structure.perturb(distance=d)
post_perturbation_sites = self.structure.sites
for i, x in enumerate(pre_perturbation_sites):
self.assertAlmostEqual(x.distance(post_perturbation_sites[i]), d,
3, "Bad perturbation distance")
def test_add_oxidation_states(self):
oxidation_states = {"Si": -4}
self.structure.add_oxidation_state_by_element(oxidation_states)
for site in self.structure:
for k in site.species_and_occu.keys():
self.assertEqual(k.oxi_state, oxidation_states[k.symbol],
"Wrong oxidation state assigned!")
oxidation_states = {"Fe": 2}
self.assertRaises(ValueError,
self.structure.add_oxidation_state_by_element,
oxidation_states)
self.structure.add_oxidation_state_by_site([2, -4])
self.assertEqual(self.structure[0].specie.oxi_state, 2)
self.assertRaises(ValueError,
self.structure.add_oxidation_state_by_site,
[1])
def test_remove_oxidation_states(self):
co_elem = Element("Co")
o_elem = Element("O")
co_specie = Specie("Co", 2)
o_specie = Specie("O", -2)
coords = list()
coords.append([0, 0, 0])
coords.append([0.75, 0.5, 0.75])
lattice = Lattice.cubic(10)
s_elem = Structure(lattice, [co_elem, o_elem], coords)
s_specie = Structure(lattice, [co_specie, o_specie], coords)
s_specie.remove_oxidation_states()
self.assertEqual(s_elem, s_specie, "Oxidation state remover "
"failed")
def test_apply_operation(self):
op = SymmOp.from_axis_angle_and_translation([0, 0, 1], 90)
s = self.structure.copy()
s.apply_operation(op)
self.assertArrayAlmostEqual(
s.lattice.matrix,
[[0.000000, 3.840198, 0.000000],
[-3.325710, 1.920099, 0.000000],
[2.217138, -0.000000, 3.135509]], 5)
op = SymmOp([[1, 1, 0, 0.5], [1, 0, 0, 0.5], [0, 0, 1, 0.5],
[0, 0, 0, 1]])
s = self.structure.copy()
s.apply_operation(op, fractional=True)
self.assertArrayAlmostEqual(
s.lattice.matrix,
[[5.760297, 3.325710, 0.000000],
[3.840198, 0.000000, 0.000000],
[0.000000, -2.217138, 3.135509]], 5)
def test_apply_strain(self):
s = self.structure
initial_coord = s[1].coords
s.apply_strain(0.01)
self.assertAlmostEqual(
s.lattice.abc,
(3.8785999130369997, 3.878600984287687, 3.8785999130549516))
self.assertArrayAlmostEqual(s[1].coords, initial_coord * 1.01)
a1, b1, c1 = s.lattice.abc
s.apply_strain([0.1, 0.2, 0.3])
a2, b2, c2 = s.lattice.abc
self.assertAlmostEqual(a2 / a1, 1.1)
self.assertAlmostEqual(b2 / b1, 1.2)
self.assertAlmostEqual(c2 / c1, 1.3)
def test_scale_lattice(self):
initial_coord = self.structure[1].coords
self.structure.scale_lattice(self.structure.volume * 1.01 ** 3)
self.assertArrayAlmostEqual(
self.structure.lattice.abc,
(3.8785999130369997, 3.878600984287687, 3.8785999130549516))
self.assertArrayAlmostEqual(self.structure[1].coords,
initial_coord * 1.01)
def test_translate_sites(self):
self.structure.translate_sites([0, 1], [0.5, 0.5, 0.5],
frac_coords=True)
self.assertArrayEqual(self.structure.frac_coords[0],
[0.5, 0.5, 0.5])
self.structure.translate_sites([0], [0.5, 0.5, 0.5],
frac_coords=False)
self.assertArrayAlmostEqual(self.structure.cart_coords[0],
[3.38014845, 1.05428585, 2.06775453])
self.structure.translate_sites([0], [0.5, 0.5, 0.5],
frac_coords=True, to_unit_cell=False)
self.assertArrayAlmostEqual(self.structure.frac_coords[0],
[1.00187517, 1.25665291, 1.15946374])
def test_mul(self):
self.structure *= [2, 1, 1]
self.assertEqual(self.structure.formula, "Si4")
s = [2, 1, 1] * self.structure
self.assertEqual(s.formula, "Si8")
self.assertIsInstance(s, Structure)
s = self.structure * [[1, 0, 0], [2, 1, 0], [0, 0, 2]]
self.assertEqual(s.formula, "Si8")
self.assertArrayAlmostEqual(s.lattice.abc,
[7.6803959, 17.5979979, 7.6803959])
def test_make_supercell(self):
self.structure.make_supercell([2, 1, 1])
self.assertEqual(self.structure.formula, "Si4")
self.structure.make_supercell([[1, 0, 0], [2, 1, 0], [0, 0, 1]])
self.assertEqual(self.structure.formula, "Si4")
self.structure.make_supercell(2)
self.assertEqual(self.structure.formula, "Si32")
self.assertArrayAlmostEqual(self.structure.lattice.abc,
[15.360792, 35.195996, 7.680396], 5)
def test_disordered_supercell_primitive_cell(self):
l = Lattice.cubic(2)
f = [[0.5, 0.5, 0.5]]
sp = [{'Si': 0.54738}]
s = Structure(l, sp, f)
#this supercell often breaks things
s.make_supercell([[0,-1,1],[-1,1,0],[1,1,1]])
self.assertEqual(len(s.get_primitive_structure()), 1)
def test_another_supercell(self):
#this is included b/c for some reason the old algo was failing on it
s = self.structure.copy()
s.make_supercell([[0, 2, 2], [2, 0, 2], [2, 2, 0]])
self.assertEqual(s.formula, "Si32")
s = self.structure.copy()
s.make_supercell([[0, 2, 0], [1, 0, 0], [0, 0, 1]])
self.assertEqual(s.formula, "Si4")
def test_to_from_dict(self):
d = self.structure.as_dict()
s2 = Structure.from_dict(d)
self.assertEqual(type(s2), Structure)
def test_to_from_file_string(self):
for fmt in ["cif", "json", "poscar", "cssr", "yaml", "xsf"]:
s = self.structure.to(fmt=fmt)
self.assertIsNotNone(s)
ss = Structure.from_str(s, fmt=fmt)
self.assertArrayAlmostEqual(
ss.lattice.lengths_and_angles,
self.structure.lattice.lengths_and_angles, decimal=5)
self.assertArrayAlmostEqual(ss.frac_coords,
self.structure.frac_coords)
self.assertIsInstance(ss, Structure)
self.structure.to(filename="POSCAR.testing")
self.assertTrue(os.path.exists("POSCAR.testing"))
os.remove("POSCAR.testing")
self.structure.to(filename="structure_testing.json")
self.assertTrue(os.path.exists("structure_testing.json"))
s = Structure.from_file("structure_testing.json")
self.assertEqual(s, self.structure)
os.remove("structure_testing.json")
def test_from_spacegroup(self):
s1 = Structure.from_spacegroup("Fm-3m", Lattice.cubic(3), ["Li", "O"],
[[0.25, 0.25, 0.25], [0, 0, 0]])
self.assertEqual(s1.formula, "Li8 O4")
s2 = Structure.from_spacegroup(225, Lattice.cubic(3), ["Li", "O"],
[[0.25, 0.25, 0.25], [0, 0, 0]])
self.assertEqual(s1, s2)
s2 = Structure.from_spacegroup(225, Lattice.cubic(3), ["Li", "O"],
[[0.25, 0.25, 0.25], [0, 0, 0]],
site_properties={"charge": [1, -2]})
self.assertEqual(sum(s2.site_properties["charge"]), 0)
s = Structure.from_spacegroup("Pm-3m", Lattice.cubic(3), ["Cs", "Cl"],
[[0, 0, 0], [0.5, 0.5, 0.5]])
self.assertEqual(s.formula, "Cs1 Cl1")
self.assertRaises(ValueError, Structure.from_spacegroup,
"Pm-3m", Lattice.tetragonal(1, 3), ["Cs", "Cl"],
[[0, 0, 0], [0.5, 0.5, 0.5]])
self.assertRaises(ValueError, Structure.from_spacegroup,
"Pm-3m", Lattice.cubic(3), ["Cs"],
[[0, 0, 0], [0.5, 0.5, 0.5]])
def test_merge_sites(self):
species = [{'Ag': 0.5}, {'Cl': 0.25}, {'Cl': 0.1},
{'Ag': 0.5}, {'F': 0.15}, {'F': 0.1}]
coords = [[0, 0, 0], [0.5, 0.5, 0.5], [0.5, 0.5, 0.5],
[0, 0, 0], [0.5, 0.5, 1.501], [0.5, 0.5, 1.501]]
s = Structure(Lattice.cubic(1), species, coords)
s.merge_sites(mode="s")
self.assertEqual(s[0].specie.symbol, 'Ag')
self.assertEqual(s[1].species_and_occu,
Composition({'Cl': 0.35, 'F': 0.25}))
self.assertArrayAlmostEqual(s[1].frac_coords, [.5, .5, .5005])
# Test for TaS2 with spacegroup 166 in 160 setting.
l = Lattice.from_lengths_and_angles([3.374351, 3.374351, 20.308941],
[90.000000, 90.000000, 120.000000])
species = ["Ta", "S", "S"]
coords = [[0.000000, 0.000000, 0.944333], [0.333333, 0.666667, 0.353424],
[0.666667, 0.333333, 0.535243]]
tas2 = Structure.from_spacegroup(160, l, species, coords)
assert len(tas2) == 13
tas2.merge_sites(mode="d")
assert len(tas2) == 9
l = Lattice.from_lengths_and_angles([3.587776, 3.587776, 19.622793],
[90.000000, 90.000000, 120.000000])
species = ["Na", "V", "S", "S"]
coords = [[0.333333, 0.666667, 0.165000], [0.000000, 0.000000, 0.998333],
[0.333333, 0.666667, 0.399394], [0.666667, 0.333333, 0.597273]]
navs2 = Structure.from_spacegroup(160, l, species, coords)
assert len(navs2) == 18
navs2.merge_sites(mode="d")
assert len(navs2) == 12
def test_properties(self):
self.assertEqual(self.structure.num_sites, len(self.structure))
self.structure.make_supercell(2)
self.structure[1] = "C"
sites = list(self.structure.group_by_types())
self.assertEqual(sites[-1].specie.symbol, "C")
self.structure.add_oxidation_state_by_element({"Si": 4, "C": 2})
self.assertEqual(self.structure.charge, 62)
def test_set_item(self):
s = self.structure.copy()
s[0] = "C"
self.assertEqual(s.formula, "Si1 C1")
s[(0, 1)] = "Ge"
self.assertEqual(s.formula, "Ge2")
s[0:2] = "Sn"
self.assertEqual(s.formula, "Sn2")
s = self.structure.copy()
s["Si"] = "C"
self.assertEqual(s.formula, "C2")
s["C"] = "C0.25Si0.5"
self.assertEqual(s.formula, "Si1 C0.5")
s["C"] = "C0.25Si0.5"
self.assertEqual(s.formula, "Si1.25 C0.125")
def test_init_error(self):
self.assertRaises(StructureError, Structure, Lattice.cubic(3), ["Si"], [[0, 0, 0], [0.5, 0.5, 0.5]])
def test_from_sites(self):
self.structure.add_site_property("hello", [1, 2])
s = Structure.from_sites(self.structure, to_unit_cell=True)
self.assertEqual(s.site_properties["hello"][1], 2)
def test_magic(self):
s = Structure.from_sites(self.structure)
self.assertEqual(s, self.structure)
self.assertNotEqual(s, None)
s.apply_strain(0.5)
self.assertNotEqual(s, self.structure)
self.assertNotEqual(self.structure * 2, self.structure)
class IMoleculeTest(PymatgenTest):
def setUp(self):
coords = [[0.000000, 0.000000, 0.000000],
[0.000000, 0.000000, 1.089000],
[1.026719, 0.000000, -0.363000],
[-0.513360, -0.889165, -0.363000],
[-0.513360, 0.889165, -0.363000]]
self.coords = coords
self.mol = Molecule(["C", "H", "H", "H", "H"], coords)
def test_set_item(self):
s = self.mol.copy()
s[0] = "Si"
self.assertEqual(s.formula, "Si1 H4")
s[(0, 1)] = "Ge"
self.assertEqual(s.formula, "Ge2 H3")
s[0:2] = "Sn"
self.assertEqual(s.formula, "Sn2 H3")
s = self.mol.copy()
s["H"] = "F"
self.assertEqual(s.formula, "C1 F4")
s["C"] = "C0.25Si0.5"
self.assertEqual(s.formula, "Si0.5 C0.25 F4")
s["C"] = "C0.25Si0.5"
self.assertEqual(s.formula, "Si0.625 C0.0625 F4")
def test_bad_molecule(self):
coords = [[0.000000, 0.000000, 0.000000],
[0.000000, 0.000000, 1.089000],
[1.026719, 0.000000, -0.363000],
[-0.513360, -0.889165, -0.363000],
[-0.513360, 0.889165, -0.363000],
[-0.513360, 0.889165, -0.36301]]
self.assertRaises(StructureError, Molecule,
["C", "H", "H", "H", "H", "H"], coords,
validate_proximity=True)
def test_get_angle_dihedral(self):
self.assertAlmostEqual(self.mol.get_angle(1, 0, 2), 109.47122144618737)
self.assertAlmostEqual(self.mol.get_angle(3, 1, 2), 60.00001388659683)
self.assertAlmostEqual(self.mol.get_dihedral(0, 1, 2, 3),
- 35.26438851071765)
coords = list()
coords.append([0, 0, 0])
coords.append([0, 0, 1])
coords.append([0, 1, 1])
coords.append([1, 1, 1])
self.mol2 = Molecule(["C", "O", "N", "S"], coords)
self.assertAlmostEqual(self.mol2.get_dihedral(0, 1, 2, 3), -90)
def test_get_covalent_bonds(self):
self.assertEqual(len(self.mol.get_covalent_bonds()), 4)
def test_properties(self):
self.assertEqual(len(self.mol), 5)
self.assertTrue(self.mol.is_ordered)
self.assertEqual(self.mol.formula, "H4 C1")
def test_repr_str(self):
ans = """Full Formula (H4 C1)
Reduced Formula: H4C
Charge = 0, Spin Mult = 1
Sites (5)
0 C 0.000000 0.000000 0.000000
1 H 0.000000 0.000000 1.089000
2 H 1.026719 0.000000 -0.363000
3 H -0.513360 -0.889165 -0.363000
4 H -0.513360 0.889165 -0.363000"""
self.assertEqual(self.mol.__str__(), ans)
ans = """Molecule Summary
Site: C (0.0000, 0.0000, 0.0000)
Site: H (0.0000, 0.0000, 1.0890)
Site: H (1.0267, 0.0000, -0.3630)
Site: H (-0.5134, -0.8892, -0.3630)
Site: H (-0.5134, 0.8892, -0.3630)"""
self.assertEqual(repr(self.mol), ans)
def test_site_properties(self):
propertied_mol = Molecule(["C", "H", "H", "H", "H"], self.coords,
site_properties={'magmom':
[0.5, -0.5, 1, 2, 3]})
self.assertEqual(propertied_mol[0].magmom, 0.5)
self.assertEqual(propertied_mol[1].magmom, -0.5)
def test_get_boxed_structure(self):
s = self.mol.get_boxed_structure(9, 9, 9)
# C atom should be in center of box.
self.assertArrayAlmostEqual(s[4].frac_coords,
[0.50000001, 0.5, 0.5])
self.assertArrayAlmostEqual(s[1].frac_coords,
[0.6140799, 0.5, 0.45966667])
self.assertRaises(ValueError, self.mol.get_boxed_structure, 1, 1, 1)
s2 = self.mol.get_boxed_structure(5, 5, 5, (2, 3, 4))
self.assertEqual(len(s2), 24 * 5)
self.assertEqual(s2.lattice.abc, (10, 15, 20))
# Test offset option
s3 = self.mol.get_boxed_structure(9, 9, 9, offset=[0.5,0.5,0.5])
self.assertArrayAlmostEqual(s3[4].coords,
[5,5,5])
# Test no_cross option
self.assertRaises(ValueError, self.mol.get_boxed_structure,
5, 5, 5, offset=[10,10,10],no_cross = True)
def test_get_distance(self):
self.assertAlmostEqual(self.mol.get_distance(0, 1), 1.089)
def test_get_neighbors(self):
nn = self.mol.get_neighbors(self.mol[0], 1)
self.assertEqual(len(nn), 0)
nn = self.mol.get_neighbors(self.mol[0], 2)
self.assertEqual(len(nn), 4)
def test_get_neighbors_in_shell(self):
nn = self.mol.get_neighbors_in_shell([0, 0, 0], 0, 1)
self.assertEqual(len(nn), 1)
nn = self.mol.get_neighbors_in_shell([0, 0, 0], 1, 0.9)
self.assertEqual(len(nn), 4)
nn = self.mol.get_neighbors_in_shell([0, 0, 0], 1, 0.9)
self.assertEqual(len(nn), 4)
nn = self.mol.get_neighbors_in_shell([0, 0, 0], 2, 0.1)
self.assertEqual(len(nn), 0)
def test_get_dist_matrix(self):
ans = [[0.0, 1.089, 1.08899995636, 1.08900040717, 1.08900040717],
[1.089, 0.0, 1.77832952654, 1.7783298026, 1.7783298026],
[1.08899995636, 1.77832952654, 0.0, 1.77833003783,
1.77833003783],
[1.08900040717, 1.7783298026, 1.77833003783, 0.0, 1.77833],
[1.08900040717, 1.7783298026, 1.77833003783, 1.77833, 0.0]]
self.assertArrayAlmostEqual(self.mol.distance_matrix, ans)
def test_break_bond(self):
(mol1, mol2) = self.mol.break_bond(0, 1)
self.assertEqual(mol1.formula, "H3 C1")
self.assertEqual(mol2.formula, "H1")
def test_prop(self):
self.assertEqual(self.mol.charge, 0)
self.assertEqual(self.mol.spin_multiplicity, 1)
self.assertEqual(self.mol.nelectrons, 10)
self.assertArrayAlmostEqual(self.mol.center_of_mass, [0, 0, 0])
self.assertRaises(ValueError, Molecule, ["C", "H", "H", "H", "H"],
self.coords, charge=1, spin_multiplicity=1)
mol = Molecule(["C", "H", "H", "H", "H"], self.coords, charge=1)
self.assertEqual(mol.spin_multiplicity, 2)
self.assertEqual(mol.nelectrons, 9)
#Triplet O2
mol = IMolecule(["O"] * 2, [[0, 0, 0], [0, 0, 1.2]],
spin_multiplicity=3)
self.assertEqual(mol.spin_multiplicity, 3)
def test_equal(self):
mol = IMolecule(["C", "H", "H", "H", "H"], self.coords, charge=1)
self.assertNotEqual(mol, self.mol)
def test_get_centered_molecule(self):
mol = IMolecule(["O"] * 2, [[0, 0, 0], [0, 0, 1.2]],
spin_multiplicity=3)
centered = mol.get_centered_molecule()
self.assertArrayAlmostEqual(centered.center_of_mass, [0, 0, 0])
def test_to_from_dict(self):
d = self.mol.as_dict()
mol2 = IMolecule.from_dict(d)
self.assertEqual(type(mol2), IMolecule)
propertied_mol = Molecule(["C", "H", "H", "H", "H"], self.coords,
charge=1,
site_properties={'magmom':
[0.5, -0.5, 1, 2, 3]})
d = propertied_mol.as_dict()
self.assertEqual(d['sites'][0]['properties']['magmom'], 0.5)
mol = Molecule.from_dict(d)
self.assertEqual(propertied_mol, mol)
self.assertEqual(mol[0].magmom, 0.5)
self.assertEqual(mol.formula, "H4 C1")
self.assertEqual(mol.charge, 1)
def test_to_from_file_string(self):
for fmt in ["xyz", "json", "g03", "yaml"]:
s = self.mol.to(fmt=fmt)
self.assertIsNotNone(s)
m = IMolecule.from_str(s, fmt=fmt)
self.assertEqual(m, self.mol)
self.assertIsInstance(m, IMolecule)
self.mol.to(filename="CH4_testing.xyz")
self.assertTrue(os.path.exists("CH4_testing.xyz"))
os.remove("CH4_testing.xyz")
self.mol.to(filename="CH4_testing.yaml")
self.assertTrue(os.path.exists("CH4_testing.yaml"))
mol = Molecule.from_file("CH4_testing.yaml")
self.assertEqual(self.mol, mol)
os.remove("CH4_testing.yaml")
class MoleculeTest(PymatgenTest):
def setUp(self):
coords = [[0.000000, 0.000000, 0.000000],
[0.000000, 0.000000, 1.089000],
[1.026719, 0.000000, -0.363000],
[-0.513360, -0.889165, -0.363000],
[-0.513360, 0.889165, -0.363000]]
self.mol = Molecule(["C", "H", "H", "H", "H"], coords)
def test_mutable_sequence_methods(self):
s = self.mol
s[1] = ("F", [0.5, 0.5, 0.5])
self.assertEqual(s.formula, "H3 C1 F1")
self.assertArrayAlmostEqual(s[1].coords, [0.5, 0.5, 0.5])
s.reverse()
self.assertEqual(s[0].specie, Element("H"))
self.assertArrayAlmostEqual(s[0].coords,
[-0.513360, 0.889165, -0.363000])
del s[1]
self.assertEqual(s.formula, "H2 C1 F1")
s[3] = "N", [0,0,0], {"charge": 4}
self.assertEqual(s.formula, "H2 N1 F1")
self.assertEqual(s[3].charge, 4)
def test_insert_remove_append(self):
mol = self.mol
mol.insert(1, "O", [0.5, 0.5, 0.5])
self.assertEqual(mol.formula, "H4 C1 O1")
del mol[2]
self.assertEqual(mol.formula, "H3 C1 O1")
mol.set_charge_and_spin(0)
self.assertEqual(mol.spin_multiplicity, 2)
mol.append("N", [1, 1, 1])
self.assertEqual(mol.formula, "H3 C1 N1 O1")
self.assertRaises(TypeError, dict, [(mol, 1)])
mol.remove_sites([0, 1])
self.assertEqual(mol.formula, "H3 N1")
def test_translate_sites(self):
self.mol.translate_sites([0, 1], [0.5, 0.5, 0.5])
self.assertArrayEqual(self.mol.cart_coords[0],
[0.5, 0.5, 0.5])
def test_rotate_sites(self):
self.mol.rotate_sites(theta=np.radians(30))
self.assertArrayAlmostEqual(self.mol.cart_coords[2],
[ 0.889164737, 0.513359500, -0.363000000])
def test_replace(self):
self.mol[0] = "Ge"
self.assertEqual(self.mol.formula, "Ge1 H4")
self.mol.replace_species({Element("Ge"): {Element("Ge"): 0.5,
Element("Si"): 0.5}})
self.assertEqual(self.mol.formula, "Si0.5 Ge0.5 H4")
#this should change the .5Si .5Ge sites to .75Si .25Ge
self.mol.replace_species({Element("Ge"): {Element("Ge"): 0.5,
Element("Si"): 0.5}})
self.assertEqual(self.mol.formula, "Si0.75 Ge0.25 H4")
d = 0.1
pre_perturbation_sites = self.mol.sites[:]
self.mol.perturb(distance=d)
post_perturbation_sites = self.mol.sites
for i, x in enumerate(pre_perturbation_sites):
self.assertAlmostEqual(x.distance(post_perturbation_sites[i]), d,
3, "Bad perturbation distance")
def test_add_site_property(self):
self.mol.add_site_property("charge", [4.1, -2, -2, -2, -2])
self.assertEqual(self.mol[0].charge, 4.1)
self.assertEqual(self.mol[1].charge, -2)
self.mol.add_site_property("magmom", [3, 2, 2, 2, 2])
self.assertEqual(self.mol[0].charge, 4.1)
self.assertEqual(self.mol[0].magmom, 3)
def test_to_from_dict(self):
d = self.mol.as_dict()
mol2 = Molecule.from_dict(d)
self.assertEqual(type(mol2), Molecule)
def test_apply_operation(self):
op = SymmOp.from_axis_angle_and_translation([0, 0, 1], 90)
self.mol.apply_operation(op)
self.assertArrayAlmostEqual(self.mol[2].coords,
[0.000000, 1.026719, -0.363000])
def test_substitute(self):
coords = [[0.000000, 0.000000, 1.08],
[0.000000, 0.000000, 0.000000],
[1.026719, 0.000000, -0.363000],
[-0.513360, -0.889165, -0.363000],
[-0.513360, 0.889165, -0.363000]]
sub = Molecule(["X", "C", "H", "H", "H"], coords)
self.mol.substitute(1, sub)
self.assertAlmostEqual(self.mol.get_distance(0, 4), 1.54)
f = Molecule(["X", "F"], [[0, 0, 0], [0, 0, 1.11]])
self.mol.substitute(2, f)
self.assertAlmostEqual(self.mol.get_distance(0, 7), 1.35)
oh = Molecule(["X", "O", "H"],
[[0, 0.780362, -.456316], [0, 0, .114079],
[0, -.780362, -.456316]])
self.mol.substitute(1, oh)
self.assertAlmostEqual(self.mol.get_distance(0, 7), 1.43)
self.mol.substitute(3, "methyl")
self.assertEqual(self.mol.formula, "H7 C3 O1 F1")
coords = [[0.00000, 1.40272, 0.00000],
[0.00000, 2.49029, 0.00000],
[-1.21479, 0.70136, 0.00000],
[-2.15666, 1.24515, 0.00000],
[-1.21479, -0.70136, 0.00000],
[-2.15666, -1.24515, 0.00000],
[0.00000, -1.40272, 0.00000],
[0.00000, -2.49029, 0.00000],
[1.21479, -0.70136, 0.00000],
[2.15666, -1.24515, 0.00000],
[1.21479, 0.70136, 0.00000],
[2.15666, 1.24515, 0.00000]]
benzene = Molecule(["C", "H", "C", "H", "C", "H", "C", "H", "C", "H",
"C", "H"], coords)
benzene.substitute(1, sub)
self.assertEqual(benzene.formula, "H8 C7")
#Carbon attached should be in plane.
self.assertAlmostEqual(benzene[11].coords[2], 0)
def test_to_from_file_string(self):
for fmt in ["xyz", "json", "g03"]:
s = self.mol.to(fmt=fmt)
self.assertIsNotNone(s)
m = Molecule.from_str(s, fmt=fmt)
self.assertEqual(m, self.mol)
self.assertIsInstance(m, Molecule)
self.mol.to(filename="CH4_testing.xyz")
self.assertTrue(os.path.exists("CH4_testing.xyz"))
os.remove("CH4_testing.xyz")
if __name__ == '__main__':
import unittest2 as unittest
unittest.main()
| true
| true
|
1c43e0cada49727bd7584ef88bc5aea8845fc86a
| 9,387
|
py
|
Python
|
src/bondora_api/models/api_result_event_log.py
|
parruc/bondora_api
|
f36ea8d7149d75a2e5f14a695e5a4e57f0a3518d
|
[
"Apache-2.0"
] | 8
|
2019-03-09T20:38:27.000Z
|
2021-02-10T20:44:22.000Z
|
src/bondora_api/models/api_result_event_log.py
|
parruc/bondora_api
|
f36ea8d7149d75a2e5f14a695e5a4e57f0a3518d
|
[
"Apache-2.0"
] | 1
|
2018-03-06T09:44:21.000Z
|
2018-03-06T09:44:21.000Z
|
src/bondora_api/models/api_result_event_log.py
|
parruc/bondora_api
|
f36ea8d7149d75a2e5f14a695e5a4e57f0a3518d
|
[
"Apache-2.0"
] | 3
|
2019-06-03T13:44:05.000Z
|
2020-11-16T13:17:38.000Z
|
# coding: utf-8
"""
Bondora API V1
Bondora API version 1
OpenAPI spec version: v1
Contact: investor@bondora.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from pprint import pformat
from six import iteritems
import re
class ApiResultEventLog(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, page_size=None, page_nr=None, total_count=None, count=None, payload=None, success=None, errors=None):
"""
ApiResultEventLog - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'page_size': 'int',
'page_nr': 'int',
'total_count': 'int',
'count': 'int',
'payload': 'list[EventLogItem]',
'success': 'bool',
'errors': 'list[ApiError]'
}
self.attribute_map = {
'page_size': 'PageSize',
'page_nr': 'PageNr',
'total_count': 'TotalCount',
'count': 'Count',
'payload': 'Payload',
'success': 'Success',
'errors': 'Errors'
}
self._page_size = page_size
self._page_nr = page_nr
self._total_count = total_count
self._count = count
self._payload = payload
self._success = success
self._errors = errors
@property
def page_size(self):
"""
Gets the page_size of this ApiResultEventLog.
Requested Max items in result
:return: The page_size of this ApiResultEventLog.
:rtype: int
"""
return self._page_size
@page_size.setter
def page_size(self, page_size):
"""
Sets the page_size of this ApiResultEventLog.
Requested Max items in result
:param page_size: The page_size of this ApiResultEventLog.
:type: int
"""
if not page_size:
raise ValueError("Invalid value for `page_size`, must not be `None`")
if page_size > 2.147483647E9:
raise ValueError("Invalid value for `page_size`, must be a value less than or equal to `2.147483647E9`")
if page_size < 0.0:
raise ValueError("Invalid value for `page_size`, must be a value greater than or equal to `0.0`")
self._page_size = page_size
@property
def page_nr(self):
"""
Gets the page_nr of this ApiResultEventLog.
Requested page nr
:return: The page_nr of this ApiResultEventLog.
:rtype: int
"""
return self._page_nr
@page_nr.setter
def page_nr(self, page_nr):
"""
Sets the page_nr of this ApiResultEventLog.
Requested page nr
:param page_nr: The page_nr of this ApiResultEventLog.
:type: int
"""
if not page_nr:
raise ValueError("Invalid value for `page_nr`, must not be `None`")
if page_nr > 2.147483647E9:
raise ValueError("Invalid value for `page_nr`, must be a value less than or equal to `2.147483647E9`")
if page_nr < 1.0:
raise ValueError("Invalid value for `page_nr`, must be a value greater than or equal to `1.0`")
self._page_nr = page_nr
@property
def total_count(self):
"""
Gets the total_count of this ApiResultEventLog.
Total number of items found
:return: The total_count of this ApiResultEventLog.
:rtype: int
"""
return self._total_count
@total_count.setter
def total_count(self, total_count):
"""
Sets the total_count of this ApiResultEventLog.
Total number of items found
:param total_count: The total_count of this ApiResultEventLog.
:type: int
"""
if not total_count:
total_count = 0
# raise ValueError("Invalid value for `total_count`, must not be `None`")
if total_count > 2.147483647E9:
raise ValueError("Invalid value for `total_count`, must be a value less than or equal to `2.147483647E9`")
if total_count < 0.0:
raise ValueError("Invalid value for `total_count`, must be a value greater than or equal to `0.0`")
self._total_count = total_count
@property
def count(self):
"""
Gets the count of this ApiResultEventLog.
Number of items returned
:return: The count of this ApiResultEventLog.
:rtype: int
"""
return self._count
@count.setter
def count(self, count):
"""
Sets the count of this ApiResultEventLog.
Number of items returned
:param count: The count of this ApiResultEventLog.
:type: int
"""
if not count:
count = 0
# raise ValueError("Invalid value for `count`, must not be `None`")
if count > 2.147483647E9:
raise ValueError("Invalid value for `count`, must be a value less than or equal to `2.147483647E9`")
if count < 0.0:
raise ValueError("Invalid value for `count`, must be a value greater than or equal to `0.0`")
self._count = count
@property
def payload(self):
"""
Gets the payload of this ApiResultEventLog.
The payload of the response. Type depends on the API request.
:return: The payload of this ApiResultEventLog.
:rtype: list[EventLogItem]
"""
return self._payload
@payload.setter
def payload(self, payload):
"""
Sets the payload of this ApiResultEventLog.
The payload of the response. Type depends on the API request.
:param payload: The payload of this ApiResultEventLog.
:type: list[EventLogItem]
"""
self._payload = payload
@property
def success(self):
"""
Gets the success of this ApiResultEventLog.
Indicates if the request was successfull or not. true if the request was handled successfully, false otherwise.
:return: The success of this ApiResultEventLog.
:rtype: bool
"""
return self._success
@success.setter
def success(self, success):
"""
Sets the success of this ApiResultEventLog.
Indicates if the request was successfull or not. true if the request was handled successfully, false otherwise.
:param success: The success of this ApiResultEventLog.
:type: bool
"""
self._success = success
@property
def errors(self):
"""
Gets the errors of this ApiResultEventLog.
Error(s) accociated with the API request.
:return: The errors of this ApiResultEventLog.
:rtype: list[ApiError]
"""
return self._errors
@errors.setter
def errors(self, errors):
"""
Sets the errors of this ApiResultEventLog.
Error(s) accociated with the API request.
:param errors: The errors of this ApiResultEventLog.
:type: list[ApiError]
"""
self._errors = errors
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 30.086538
| 132
| 0.584425
|
from pprint import pformat
from six import iteritems
import re
class ApiResultEventLog(object):
def __init__(self, page_size=None, page_nr=None, total_count=None, count=None, payload=None, success=None, errors=None):
self.swagger_types = {
'page_size': 'int',
'page_nr': 'int',
'total_count': 'int',
'count': 'int',
'payload': 'list[EventLogItem]',
'success': 'bool',
'errors': 'list[ApiError]'
}
self.attribute_map = {
'page_size': 'PageSize',
'page_nr': 'PageNr',
'total_count': 'TotalCount',
'count': 'Count',
'payload': 'Payload',
'success': 'Success',
'errors': 'Errors'
}
self._page_size = page_size
self._page_nr = page_nr
self._total_count = total_count
self._count = count
self._payload = payload
self._success = success
self._errors = errors
@property
def page_size(self):
return self._page_size
@page_size.setter
def page_size(self, page_size):
if not page_size:
raise ValueError("Invalid value for `page_size`, must not be `None`")
if page_size > 2.147483647E9:
raise ValueError("Invalid value for `page_size`, must be a value less than or equal to `2.147483647E9`")
if page_size < 0.0:
raise ValueError("Invalid value for `page_size`, must be a value greater than or equal to `0.0`")
self._page_size = page_size
@property
def page_nr(self):
return self._page_nr
@page_nr.setter
def page_nr(self, page_nr):
if not page_nr:
raise ValueError("Invalid value for `page_nr`, must not be `None`")
if page_nr > 2.147483647E9:
raise ValueError("Invalid value for `page_nr`, must be a value less than or equal to `2.147483647E9`")
if page_nr < 1.0:
raise ValueError("Invalid value for `page_nr`, must be a value greater than or equal to `1.0`")
self._page_nr = page_nr
@property
def total_count(self):
return self._total_count
@total_count.setter
def total_count(self, total_count):
if not total_count:
total_count = 0
if total_count > 2.147483647E9:
raise ValueError("Invalid value for `total_count`, must be a value less than or equal to `2.147483647E9`")
if total_count < 0.0:
raise ValueError("Invalid value for `total_count`, must be a value greater than or equal to `0.0`")
self._total_count = total_count
@property
def count(self):
return self._count
@count.setter
def count(self, count):
if not count:
count = 0
if count > 2.147483647E9:
raise ValueError("Invalid value for `count`, must be a value less than or equal to `2.147483647E9`")
if count < 0.0:
raise ValueError("Invalid value for `count`, must be a value greater than or equal to `0.0`")
self._count = count
@property
def payload(self):
return self._payload
@payload.setter
def payload(self, payload):
self._payload = payload
@property
def success(self):
return self._success
@success.setter
def success(self, success):
self._success = success
@property
def errors(self):
return self._errors
@errors.setter
def errors(self, errors):
self._errors = errors
def to_dict(self):
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
return pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| true
| true
|
1c43e0f431cee83160cd2cca89590f3101053b77
| 11,888
|
py
|
Python
|
geoflow1D/GeoModule.py
|
HerminioTH/GeoFlow1D
|
44a5c11e3297827b265c1ea44bb18256b074fa66
|
[
"MIT"
] | 2
|
2020-02-10T11:23:16.000Z
|
2020-07-01T20:28:57.000Z
|
geoflow1D/GeoModule.py
|
HerminioTH/GeoFlow1D
|
44a5c11e3297827b265c1ea44bb18256b074fa66
|
[
"MIT"
] | null | null | null |
geoflow1D/GeoModule.py
|
HerminioTH/GeoFlow1D
|
44a5c11e3297827b265c1ea44bb18256b074fa66
|
[
"MIT"
] | null | null | null |
def AssemblyStiffnessMatrix(linearSystem, grid, props, uShift=0):
for region in grid.getRegions():
M = props.M.getValue(region)
for e in region.getElements():
dx = e.getLength()
f = e.getFace()
bIndex = f.getBackwardVertex().getIndex() + uShift*grid.getNumberOfVertices()
fIndex = f.getForwardVertex().getIndex() + uShift*grid.getNumberOfVertices()
forceOperator = [-M/dx, M/dx]
localIndex = 0
for v in e.getVertices():
flux = forceOperator[localIndex]
vIndex = v.getIndex() + uShift*grid.getNumberOfVertices()
linearSystem.addValueToMatrix( bIndex, vIndex, flux )
linearSystem.addValueToMatrix( fIndex, vIndex, -flux )
localIndex += 1
def AssemblyGravityToVector(linearSystem, grid, props, gravity, uShift=0):
n = grid.getNumberOfVertices()
for region in grid.getRegions():
rho = props.rho.getValue(region)
for elem in region.getElements():
face = elem.getFace()
bVertex = face.getBackwardVertex()
fVertex = face.getForwardVertex()
value = -rho*gravity*elem.getSubVolume()
linearSystem.addValueToVector(bVertex.getIndex() + uShift*n, value)
linearSystem.addValueToVector(fVertex.getIndex() + uShift*n, value)
def AssemblyPorePressureToMatrix(linearSystem, grid, props, uShift=0):
for region in grid.getRegions():
alpha = props.biot.getValue(region)
for e in region.getElements():
f = e.getFace()
bIndex = f.getBackwardVertex().getIndex() + uShift*grid.getNumberOfVertices()
fIndex = f.getForwardVertex().getIndex() + uShift*grid.getNumberOfVertices()
for i,v in enumerate(e.getVertices()):
col = v.getIndex() + (1-uShift)*grid.getNumberOfVertices()
linearSystem.addValueToMatrix( bIndex, col, -alpha/2 )
linearSystem.addValueToMatrix( fIndex, col, +alpha/2 )
def AssemblyPorePressureToVector(linearSystem, grid, props, pField, uShift=0):
for region in grid.getRegions():
alpha = props.biot.getValue(region)
for e in region.getElements():
f = e.getFace()
bIndex = f.getBackwardVertex().getIndex() + uShift*grid.getNumberOfVertices()
fIndex = f.getForwardVertex().getIndex() + uShift*grid.getNumberOfVertices()
pBack = pField.getValue(f.getBackwardVertex())
pFron = pField.getValue(f.getForwardVertex())
value = alpha/2.
linearSystem.addValueToVector(bIndex, value*pBack)
linearSystem.addValueToVector(bIndex, value*pFron)
linearSystem.addValueToVector(fIndex, -value*pBack)
linearSystem.addValueToVector(fIndex, -value*pFron)
def AssemblyUDNMatrix(cooMatrix, grid, props):
for region in grid.getRegions():
Q = props.Q.getValue(region)
alpha = props.biot.getValue(region)
coef = alpha*alpha*Q
for e in region.getElements():
dx = e.getLength()
f = e.getFace()
bIndex = f.getBackwardVertex().getIndex()
fIndex = f.getForwardVertex().getIndex()
forceOperator = [-coef/dx, coef/dx]
localIndex = 0
for v in e.getVertices():
flux = forceOperator[localIndex]
vIndex = v.getIndex()
cooMatrix.addValueToMatrix(bIndex, vIndex, flux)
cooMatrix.addValueToMatrix(fIndex, vIndex, -flux)
localIndex += 1
# ------------------------- PHYSICAL INFLUENCE SCHEME - FULL -------------------------------
def AssemblyPisFullToGeoMatrix(linearSystem, grid, props, timeStep, uShift=0):
for region in grid.getRegions():
M = props.M.getValue(region)
alpha = props.biot.getValue(region)
k = props.k.getValue(region)
Q = 1/(props.c_f*props.phi.getValue(region) + props.c_s.getValue(region)*(1 - props.phi.getValue(region)))
for e in region.getElements():
f = e.getFace()
bIndex = f.getBackwardVertex().getIndex() + uShift*grid.getNumberOfVertices()
fIndex = f.getForwardVertex().getIndex() + uShift*grid.getNumberOfVertices()
dx = e.getLength()
B = props.mu*dx*alpha/(8*k*timeStep)
pisOperator = [-alpha*B, alpha*B]
for localIndex, v in enumerate(e.getVertices()):
coef = pisOperator[localIndex]
vIndex = v.getIndex() + uShift*grid.getNumberOfVertices()
linearSystem.addValueToMatrix( bIndex, vIndex, coef )
linearSystem.addValueToMatrix( fIndex, vIndex, -coef )
def AssemblyPisFullToGeoVector(linearSystem, grid, props, timeStep, uOldField, uShift=0):
for region in grid.getRegions():
M = props.M.getValue(region)
alpha = props.biot.getValue(region)
k = props.k.getValue(region)
Q = 1/(props.c_f*props.phi.getValue(region) + props.c_s.getValue(region)*(1 - props.phi.getValue(region)))
for e in region.getElements():
f = e.getFace()
dx = e.getLength()
B = props.mu*dx*alpha/(8*k*timeStep)
bVertex = f.getBackwardVertex()
fVertex = f.getForwardVertex()
ub = uOldField.getValue(bVertex)
uf = uOldField.getValue(fVertex)
linearSystem.addValueToVector(bVertex.getIndex() + uShift*grid.getNumberOfVertices(), alpha*B*(uf - ub))
linearSystem.addValueToVector(fVertex.getIndex() + uShift*grid.getNumberOfVertices(), -alpha*B*(uf - ub))
def AssemblyPisFullToPorePressureMatrix(linearSystem, grid, props, timeStep, uShift=0):
for region in grid.getRegions():
alpha = props.biot.getValue(region)
k = props.k.getValue(region)
Q = 1/(props.c_f*props.phi.getValue(region) + props.c_s.getValue(region)*(1 - props.phi.getValue(region)))
for e in region.getElements():
dx = e.getLength()
f = e.getFace()
bIndex = f.getBackwardVertex().getIndex() + uShift*grid.getNumberOfVertices()
fIndex = f.getForwardVertex().getIndex() + uShift*grid.getNumberOfVertices()
A = props.mu*dx*dx/(16*k*Q*timeStep)
for i,v in enumerate(e.getVertices()):
col = v.getIndex() + (1-uShift)*grid.getNumberOfVertices()
linearSystem.addValueToMatrix( bIndex, col, +alpha*A )
linearSystem.addValueToMatrix( fIndex, col, -alpha*A )
def AssemblyPisFullToPorePressureVector(linearSystem, grid, props, timeStep, pOldField, uShift=0):
for region in grid.getRegions():
M = props.M.getValue(region)
alpha = props.biot.getValue(region)
k = props.k.getValue(region)
Q = 1/(props.c_f*props.phi.getValue(region) + props.c_s.getValue(region)*(1 - props.phi.getValue(region)))
for e in region.getElements():
f = e.getFace()
dx = e.getLength()
A = props.mu*dx*dx/(16*k*Q*timeStep)
bVertex = f.getBackwardVertex()
fVertex = f.getForwardVertex()
pb = pOldField.getValue(bVertex)
pf = pOldField.getValue(fVertex)
linearSystem.addValueToVector(bVertex.getIndex() + uShift*grid.getNumberOfVertices(), alpha*A*(pf + pb))
linearSystem.addValueToVector(fVertex.getIndex() + uShift*grid.getNumberOfVertices(), -alpha*A*(pf + pb))
# ------------------------------------------------------------------------------------------
# ---------------------------- LOOP BY ELEMENTS ----------------------------------
def AssemblyStiffnessMatrix_e(linearSystem, grid, modulus, uShift=0):
for element in grid.getElements():
value = modulus.getValue(element)
dx = element.getLength()
f = element.getFace()
bIndex = f.getBackwardVertex().getIndex() + uShift*grid.getNumberOfVertices()
fIndex = f.getForwardVertex().getIndex() + uShift*grid.getNumberOfVertices()
forceOperator = [-value/dx, value/dx]
localIndex = 0
for v in element.getVertices():
flux = forceOperator[localIndex]
vIndex = v.getIndex() + uShift*grid.getNumberOfVertices()
linearSystem.addValueToMatrix( bIndex, vIndex, flux )
linearSystem.addValueToMatrix( fIndex, vIndex, -flux )
localIndex += 1
def AssemblyGravityToVector_e(linearSystem, grid, densityOnElements, gravity, uShift=0):
for element in grid.getElements():
rho = densityOnElements.getValue(element)
face = element.getFace()
bVertex = face.getBackwardVertex()
fVertex = face.getForwardVertex()
value = -rho*gravity*element.getSubVolume()
linearSystem.addValueToVector(bVertex.getIndex() + uShift*grid.getNumberOfVertices(), value)
linearSystem.addValueToVector(fVertex.getIndex() + uShift*grid.getNumberOfVertices(), value)
def AssemblyPorePressureToVector_e(linearSystem, grid, biotOnElements, pField, uShift=0):
for element in grid.getElements():
alpha = biotOnElements.getValue(element)
f = element.getFace()
backVertex = f.getBackwardVertex()
forVertex = f.getForwardVertex()
bIndex = backVertex.getIndex() + uShift*grid.getNumberOfVertices()
fIndex = forVertex.getIndex() + uShift*grid.getNumberOfVertices()
pBack = pField.getValue(backVertex)
pFron = pField.getValue(forVertex)
value = alpha/2.
linearSystem.addValueToVector(bIndex, value*pBack)
linearSystem.addValueToVector(bIndex, value*pFron)
linearSystem.addValueToVector(fIndex, -value*pBack)
linearSystem.addValueToVector(fIndex, -value*pFron)
# if __name__ == '__main__':
# from GridLib import *
# from FieldsLib import *
# from LinearSystemLib import *
# L_0 = 4.
# L_1 = 6.
# L = L_0 + L_1
# nVertices = 10
# nodesCoord, elemConn = createGridData( L, nVertices )
# # -------------- GRID DATA ----------------------------
# gridData = GridData()
# gridData.setElementConnectivity( elemConn )
# gridData.setNodeCoordinates( nodesCoord )
# centroidCoord = []
# for e in elemConn:
# x_0 = gridData.nodeCoordinates[e[0]]
# x_1 = gridData.nodeCoordinates[e[1]]
# centroidCoord.append((x_0 + x_1)/2.)
# region_1 = []
# region_2 = []
# namesOfRegions = ['bottom', 'top']
# for e, x in enumerate(centroidCoord):
# if x <= L_0:
# region_1.append(e)
# elif x > L_0:
# region_2.append(e)
# gridData.setElementsToRegion(region_1, 'lower_layer')
# gridData.setElementsToRegion(region_2, 'upper_layer')
# g = Grid_1D( gridData )
# for region in g.getRegions():
# print(region.getName())
# for element in region.getElements():
# vec = [element.getIndex()]
# for v in element.getVertices():
# vec.append(v.getIndex())
# print(vec)
# print('\n')
# # -----------------------------------------------------
# # -------------- PROPERTIES ----------------------------
# M = ScalarField(g.getNumberOfRegions())
# M.setValue(g.getRegions()[0], 1000.)
# M.setValue(g.getRegions()[1], 2000.)
# # -----------------------------------------------------
# # -------------- LINEAR SYSTEM ------------------------
# ls = LinearSystem(g.getNumberOfVertices())
# AssemblyStiffnessMatrix(ls, g, M, 0)
# ls.applyDirichlet(0, 0)
# ls.applyNeumann(-1, -1000)
# print(g.getNumberOfVertices())
# print(ls.getMatrix())
# print(ls.getVector())
# ls.solve()
# print(ls.getSolution())
# # -----------------------------------------------------
| 46.619608
| 117
| 0.597662
|
def AssemblyStiffnessMatrix(linearSystem, grid, props, uShift=0):
for region in grid.getRegions():
M = props.M.getValue(region)
for e in region.getElements():
dx = e.getLength()
f = e.getFace()
bIndex = f.getBackwardVertex().getIndex() + uShift*grid.getNumberOfVertices()
fIndex = f.getForwardVertex().getIndex() + uShift*grid.getNumberOfVertices()
forceOperator = [-M/dx, M/dx]
localIndex = 0
for v in e.getVertices():
flux = forceOperator[localIndex]
vIndex = v.getIndex() + uShift*grid.getNumberOfVertices()
linearSystem.addValueToMatrix( bIndex, vIndex, flux )
linearSystem.addValueToMatrix( fIndex, vIndex, -flux )
localIndex += 1
def AssemblyGravityToVector(linearSystem, grid, props, gravity, uShift=0):
n = grid.getNumberOfVertices()
for region in grid.getRegions():
rho = props.rho.getValue(region)
for elem in region.getElements():
face = elem.getFace()
bVertex = face.getBackwardVertex()
fVertex = face.getForwardVertex()
value = -rho*gravity*elem.getSubVolume()
linearSystem.addValueToVector(bVertex.getIndex() + uShift*n, value)
linearSystem.addValueToVector(fVertex.getIndex() + uShift*n, value)
def AssemblyPorePressureToMatrix(linearSystem, grid, props, uShift=0):
for region in grid.getRegions():
alpha = props.biot.getValue(region)
for e in region.getElements():
f = e.getFace()
bIndex = f.getBackwardVertex().getIndex() + uShift*grid.getNumberOfVertices()
fIndex = f.getForwardVertex().getIndex() + uShift*grid.getNumberOfVertices()
for i,v in enumerate(e.getVertices()):
col = v.getIndex() + (1-uShift)*grid.getNumberOfVertices()
linearSystem.addValueToMatrix( bIndex, col, -alpha/2 )
linearSystem.addValueToMatrix( fIndex, col, +alpha/2 )
def AssemblyPorePressureToVector(linearSystem, grid, props, pField, uShift=0):
for region in grid.getRegions():
alpha = props.biot.getValue(region)
for e in region.getElements():
f = e.getFace()
bIndex = f.getBackwardVertex().getIndex() + uShift*grid.getNumberOfVertices()
fIndex = f.getForwardVertex().getIndex() + uShift*grid.getNumberOfVertices()
pBack = pField.getValue(f.getBackwardVertex())
pFron = pField.getValue(f.getForwardVertex())
value = alpha/2.
linearSystem.addValueToVector(bIndex, value*pBack)
linearSystem.addValueToVector(bIndex, value*pFron)
linearSystem.addValueToVector(fIndex, -value*pBack)
linearSystem.addValueToVector(fIndex, -value*pFron)
def AssemblyUDNMatrix(cooMatrix, grid, props):
for region in grid.getRegions():
Q = props.Q.getValue(region)
alpha = props.biot.getValue(region)
coef = alpha*alpha*Q
for e in region.getElements():
dx = e.getLength()
f = e.getFace()
bIndex = f.getBackwardVertex().getIndex()
fIndex = f.getForwardVertex().getIndex()
forceOperator = [-coef/dx, coef/dx]
localIndex = 0
for v in e.getVertices():
flux = forceOperator[localIndex]
vIndex = v.getIndex()
cooMatrix.addValueToMatrix(bIndex, vIndex, flux)
cooMatrix.addValueToMatrix(fIndex, vIndex, -flux)
localIndex += 1
def AssemblyPisFullToGeoMatrix(linearSystem, grid, props, timeStep, uShift=0):
for region in grid.getRegions():
M = props.M.getValue(region)
alpha = props.biot.getValue(region)
k = props.k.getValue(region)
Q = 1/(props.c_f*props.phi.getValue(region) + props.c_s.getValue(region)*(1 - props.phi.getValue(region)))
for e in region.getElements():
f = e.getFace()
bIndex = f.getBackwardVertex().getIndex() + uShift*grid.getNumberOfVertices()
fIndex = f.getForwardVertex().getIndex() + uShift*grid.getNumberOfVertices()
dx = e.getLength()
B = props.mu*dx*alpha/(8*k*timeStep)
pisOperator = [-alpha*B, alpha*B]
for localIndex, v in enumerate(e.getVertices()):
coef = pisOperator[localIndex]
vIndex = v.getIndex() + uShift*grid.getNumberOfVertices()
linearSystem.addValueToMatrix( bIndex, vIndex, coef )
linearSystem.addValueToMatrix( fIndex, vIndex, -coef )
def AssemblyPisFullToGeoVector(linearSystem, grid, props, timeStep, uOldField, uShift=0):
for region in grid.getRegions():
M = props.M.getValue(region)
alpha = props.biot.getValue(region)
k = props.k.getValue(region)
Q = 1/(props.c_f*props.phi.getValue(region) + props.c_s.getValue(region)*(1 - props.phi.getValue(region)))
for e in region.getElements():
f = e.getFace()
dx = e.getLength()
B = props.mu*dx*alpha/(8*k*timeStep)
bVertex = f.getBackwardVertex()
fVertex = f.getForwardVertex()
ub = uOldField.getValue(bVertex)
uf = uOldField.getValue(fVertex)
linearSystem.addValueToVector(bVertex.getIndex() + uShift*grid.getNumberOfVertices(), alpha*B*(uf - ub))
linearSystem.addValueToVector(fVertex.getIndex() + uShift*grid.getNumberOfVertices(), -alpha*B*(uf - ub))
def AssemblyPisFullToPorePressureMatrix(linearSystem, grid, props, timeStep, uShift=0):
for region in grid.getRegions():
alpha = props.biot.getValue(region)
k = props.k.getValue(region)
Q = 1/(props.c_f*props.phi.getValue(region) + props.c_s.getValue(region)*(1 - props.phi.getValue(region)))
for e in region.getElements():
dx = e.getLength()
f = e.getFace()
bIndex = f.getBackwardVertex().getIndex() + uShift*grid.getNumberOfVertices()
fIndex = f.getForwardVertex().getIndex() + uShift*grid.getNumberOfVertices()
A = props.mu*dx*dx/(16*k*Q*timeStep)
for i,v in enumerate(e.getVertices()):
col = v.getIndex() + (1-uShift)*grid.getNumberOfVertices()
linearSystem.addValueToMatrix( bIndex, col, +alpha*A )
linearSystem.addValueToMatrix( fIndex, col, -alpha*A )
def AssemblyPisFullToPorePressureVector(linearSystem, grid, props, timeStep, pOldField, uShift=0):
for region in grid.getRegions():
M = props.M.getValue(region)
alpha = props.biot.getValue(region)
k = props.k.getValue(region)
Q = 1/(props.c_f*props.phi.getValue(region) + props.c_s.getValue(region)*(1 - props.phi.getValue(region)))
for e in region.getElements():
f = e.getFace()
dx = e.getLength()
A = props.mu*dx*dx/(16*k*Q*timeStep)
bVertex = f.getBackwardVertex()
fVertex = f.getForwardVertex()
pb = pOldField.getValue(bVertex)
pf = pOldField.getValue(fVertex)
linearSystem.addValueToVector(bVertex.getIndex() + uShift*grid.getNumberOfVertices(), alpha*A*(pf + pb))
linearSystem.addValueToVector(fVertex.getIndex() + uShift*grid.getNumberOfVertices(), -alpha*A*(pf + pb))
def AssemblyStiffnessMatrix_e(linearSystem, grid, modulus, uShift=0):
for element in grid.getElements():
value = modulus.getValue(element)
dx = element.getLength()
f = element.getFace()
bIndex = f.getBackwardVertex().getIndex() + uShift*grid.getNumberOfVertices()
fIndex = f.getForwardVertex().getIndex() + uShift*grid.getNumberOfVertices()
forceOperator = [-value/dx, value/dx]
localIndex = 0
for v in element.getVertices():
flux = forceOperator[localIndex]
vIndex = v.getIndex() + uShift*grid.getNumberOfVertices()
linearSystem.addValueToMatrix( bIndex, vIndex, flux )
linearSystem.addValueToMatrix( fIndex, vIndex, -flux )
localIndex += 1
def AssemblyGravityToVector_e(linearSystem, grid, densityOnElements, gravity, uShift=0):
for element in grid.getElements():
rho = densityOnElements.getValue(element)
face = element.getFace()
bVertex = face.getBackwardVertex()
fVertex = face.getForwardVertex()
value = -rho*gravity*element.getSubVolume()
linearSystem.addValueToVector(bVertex.getIndex() + uShift*grid.getNumberOfVertices(), value)
linearSystem.addValueToVector(fVertex.getIndex() + uShift*grid.getNumberOfVertices(), value)
def AssemblyPorePressureToVector_e(linearSystem, grid, biotOnElements, pField, uShift=0):
for element in grid.getElements():
alpha = biotOnElements.getValue(element)
f = element.getFace()
backVertex = f.getBackwardVertex()
forVertex = f.getForwardVertex()
bIndex = backVertex.getIndex() + uShift*grid.getNumberOfVertices()
fIndex = forVertex.getIndex() + uShift*grid.getNumberOfVertices()
pBack = pField.getValue(backVertex)
pFron = pField.getValue(forVertex)
value = alpha/2.
linearSystem.addValueToVector(bIndex, value*pBack)
linearSystem.addValueToVector(bIndex, value*pFron)
linearSystem.addValueToVector(fIndex, -value*pBack)
linearSystem.addValueToVector(fIndex, -value*pFron)
| true
| true
|
1c43e105b918ef473175c9aefbc7dbf6367f1764
| 22,146
|
py
|
Python
|
lib/utils/paf_to_pose.py
|
kacel33/ActionAI_PC
|
a0528f49ea61cc07d7c1e9a3cd6846e5f50cfae7
|
[
"MIT"
] | 1,311
|
2017-03-28T09:24:20.000Z
|
2022-03-30T02:43:11.000Z
|
lib/utils/paf_to_pose.py
|
kacel33/ActionAI_PC
|
a0528f49ea61cc07d7c1e9a3cd6846e5f50cfae7
|
[
"MIT"
] | 144
|
2017-05-09T16:35:40.000Z
|
2022-03-25T03:14:42.000Z
|
lib/utils/paf_to_pose.py
|
kacel33/ActionAI_PC
|
a0528f49ea61cc07d7c1e9a3cd6846e5f50cfae7
|
[
"MIT"
] | 437
|
2017-03-30T15:23:14.000Z
|
2022-03-25T09:18:50.000Z
|
import cv2
import numpy as np
import time
from scipy.ndimage.filters import gaussian_filter, maximum_filter
from scipy.ndimage.morphology import generate_binary_structure
from lib.pafprocess import pafprocess
from lib.utils.common import Human, BodyPart, CocoPart, CocoColors, CocoPairsRender
# Heatmap indices to find each limb (joint connection). Eg: limb_type=1 is
# Neck->LShoulder, so joint_to_limb_heatmap_relationship[1] represents the
# indices of heatmaps to look for joints: neck=1, LShoulder=5
joint_to_limb_heatmap_relationship = [[1, 2], [2, 3], [3, 4], [1, 5], [5, 6], [6, 7], [1, 0]]
# PAF indices containing the x and y coordinates of the PAF for a given limb.
# Eg: limb_type=1 is Neck->LShoulder, so
# PAFneckLShoulder_x=paf_xy_coords_per_limb[1][0] and
# PAFneckLShoulder_y=paf_xy_coords_per_limb[1][1]
paf_xy_coords_per_limb = np.arange(14).reshape(7, 2)
NUM_LIMBS = len(joint_to_limb_heatmap_relationship)
def find_peaks(param, img):
"""
Given a (grayscale) image, find local maxima whose value is above a given
threshold (param['thre1'])
:param img: Input image (2d array) where we want to find peaks
:return: 2d np.array containing the [x,y] coordinates of each peak found
in the image
"""
peaks_binary = (maximum_filter(img, footprint=generate_binary_structure(
2, 1)) == img) * (img > param)
# Note reverse ([::-1]): we return [[x y], [x y]...] instead of [[y x], [y
# x]...]
return np.array(np.nonzero(peaks_binary)[::-1]).T
def compute_resized_coords(coords, resizeFactor):
"""
Given the index/coordinates of a cell in some input array (e.g. image),
provides the new coordinates if that array was resized by making it
resizeFactor times bigger.
E.g.: image of size 3x3 is resized to 6x6 (resizeFactor=2), we'd like to
know the new coordinates of cell [1,2] -> Function would return [2.5,4.5]
:param coords: Coordinates (indices) of a cell in some input array
:param resizeFactor: Resize coefficient = shape_dest/shape_source. E.g.:
resizeFactor=2 means the destination array is twice as big as the
original one
:return: Coordinates in an array of size
shape_dest=resizeFactor*shape_source, expressing the array indices of the
closest point to 'coords' if an image of size shape_source was resized to
shape_dest
"""
# 1) Add 0.5 to coords to get coordinates of center of the pixel (e.g.
# index [0,0] represents the pixel at location [0.5,0.5])
# 2) Transform those coordinates to shape_dest, by multiplying by resizeFactor
# 3) That number represents the location of the pixel center in the new array,
# so subtract 0.5 to get coordinates of the array index/indices (revert
# step 1)
return (np.array(coords, dtype=float) + 0.5) * resizeFactor - 0.5
def NMS(heatmaps, upsampFactor=1., bool_refine_center=True, bool_gaussian_filt=False, config=None):
"""
NonMaximaSuppression: find peaks (local maxima) in a set of grayscale images
:param heatmaps: set of grayscale images on which to find local maxima (3d np.array,
with dimensions image_height x image_width x num_heatmaps)
:param upsampFactor: Size ratio between CPM heatmap output and the input image size.
Eg: upsampFactor=16 if original image was 480x640 and heatmaps are 30x40xN
:param bool_refine_center: Flag indicating whether:
- False: Simply return the low-res peak found upscaled by upsampFactor (subject to grid-snap)
- True: (Recommended, very accurate) Upsample a small patch around each low-res peak and
fine-tune the location of the peak at the resolution of the original input image
:param bool_gaussian_filt: Flag indicating whether to apply a 1d-GaussianFilter (smoothing)
to each upsampled patch before fine-tuning the location of each peak.
:return: a NUM_JOINTS x 4 np.array where each row represents a joint type (0=nose, 1=neck...)
and the columns indicate the {x,y} position, the score (probability) and a unique id (counter)
"""
# MODIFIED BY CARLOS: Instead of upsampling the heatmaps to heatmap_avg and
# then performing NMS to find peaks, this step can be sped up by ~25-50x by:
# (9-10ms [with GaussFilt] or 5-6ms [without GaussFilt] vs 250-280ms on RoG
# 1. Perform NMS at (low-res) CPM's output resolution
# 1.1. Find peaks using scipy.ndimage.filters.maximum_filter
# 2. Once a peak is found, take a patch of 5x5 centered around the peak, upsample it, and
# fine-tune the position of the actual maximum.
# '-> That's equivalent to having found the peak on heatmap_avg, but much faster because we only
# upsample and scan the 5x5 patch instead of the full (e.g.) 480x640
joint_list_per_joint_type = []
cnt_total_joints = 0
# For every peak found, win_size specifies how many pixels in each
# direction from the peak we take to obtain the patch that will be
# upsampled. Eg: win_size=1 -> patch is 3x3; win_size=2 -> 5x5
# (for BICUBIC interpolation to be accurate, win_size needs to be >=2!)
win_size = 2
for joint in range(config.MODEL.NUM_KEYPOINTS):
map_orig = heatmaps[:, :, joint]
peak_coords = find_peaks(config.TEST.THRESH_HEATMAP, map_orig)
peaks = np.zeros((len(peak_coords), 4))
for i, peak in enumerate(peak_coords):
if bool_refine_center:
x_min, y_min = np.maximum(0, peak - win_size)
x_max, y_max = np.minimum(
np.array(map_orig.T.shape) - 1, peak + win_size)
# Take a small patch around each peak and only upsample that
# tiny region
patch = map_orig[y_min:y_max + 1, x_min:x_max + 1]
map_upsamp = cv2.resize(
patch, None, fx=upsampFactor, fy=upsampFactor, interpolation=cv2.INTER_CUBIC)
# Gaussian filtering takes an average of 0.8ms/peak (and there might be
# more than one peak per joint!) -> For now, skip it (it's
# accurate enough)
map_upsamp = gaussian_filter(
map_upsamp, sigma=3) if bool_gaussian_filt else map_upsamp
# Obtain the coordinates of the maximum value in the patch
location_of_max = np.unravel_index(
map_upsamp.argmax(), map_upsamp.shape)
# Remember that peaks indicates [x,y] -> need to reverse it for
# [y,x]
location_of_patch_center = compute_resized_coords(
peak[::-1] - [y_min, x_min], upsampFactor)
# Calculate the offset wrt to the patch center where the actual
# maximum is
refined_center = (location_of_max - location_of_patch_center)
peak_score = map_upsamp[location_of_max]
else:
refined_center = [0, 0]
# Flip peak coordinates since they are [x,y] instead of [y,x]
peak_score = map_orig[tuple(peak[::-1])]
peaks[i, :] = tuple(
x for x in compute_resized_coords(peak_coords[i], upsampFactor) + refined_center[::-1]) + (
peak_score, cnt_total_joints)
cnt_total_joints += 1
joint_list_per_joint_type.append(peaks)
return joint_list_per_joint_type
def find_connected_joints(paf_upsamp, joint_list_per_joint_type, num_intermed_pts=10, config=None):
"""
For every type of limb (eg: forearm, shin, etc.), look for every potential
pair of joints (eg: every wrist-elbow combination) and evaluate the PAFs to
determine which pairs are indeed body limbs.
:param paf_upsamp: PAFs upsampled to the original input image resolution
:param joint_list_per_joint_type: See 'return' doc of NMS()
:param num_intermed_pts: Int indicating how many intermediate points to take
between joint_src and joint_dst, at which the PAFs will be evaluated
:return: List of NUM_LIMBS rows. For every limb_type (a row) we store
a list of all limbs of that type found (eg: all the right forearms).
For each limb (each item in connected_limbs[limb_type]), we store 5 cells:
# {joint_src_id,joint_dst_id}: a unique number associated with each joint,
# limb_score_penalizing_long_dist: a score of how good a connection
of the joints is, penalized if the limb length is too long
# {joint_src_index,joint_dst_index}: the index of the joint within
all the joints of that type found (eg: the 3rd right elbow found)
"""
connected_limbs = []
# Auxiliary array to access paf_upsamp quickly
limb_intermed_coords = np.empty((4, num_intermed_pts), dtype=np.intp)
for limb_type in range(NUM_LIMBS):
# List of all joints of type A found, where A is specified by limb_type
# (eg: a right forearm starts in a right elbow)
joints_src = joint_list_per_joint_type[joint_to_limb_heatmap_relationship[limb_type][0]]
# List of all joints of type B found, where B is specified by limb_type
# (eg: a right forearm ends in a right wrist)
joints_dst = joint_list_per_joint_type[joint_to_limb_heatmap_relationship[limb_type][1]]
# print(joint_to_limb_heatmap_relationship[limb_type][0])
# print(joint_to_limb_heatmap_relationship[limb_type][1])
# print(paf_xy_coords_per_limb[limb_type][0])
# print(paf_xy_coords_per_limb[limb_type][1])
if len(joints_src) == 0 or len(joints_dst) == 0:
# No limbs of this type found (eg: no right forearms found because
# we didn't find any right wrists or right elbows)
connected_limbs.append([])
else:
connection_candidates = []
# Specify the paf index that contains the x-coord of the paf for
# this limb
limb_intermed_coords[2, :] = paf_xy_coords_per_limb[limb_type][0]
# And the y-coord paf index
limb_intermed_coords[3, :] = paf_xy_coords_per_limb[limb_type][1]
for i, joint_src in enumerate(joints_src):
# Try every possible joints_src[i]-joints_dst[j] pair and see
# if it's a feasible limb
for j, joint_dst in enumerate(joints_dst):
# Subtract the position of both joints to obtain the
# direction of the potential limb
limb_dir = joint_dst[:2] - joint_src[:2]
# Compute the distance/length of the potential limb (norm
# of limb_dir)
limb_dist = np.sqrt(np.sum(limb_dir ** 2)) + 1e-8
limb_dir = limb_dir / limb_dist # Normalize limb_dir to be a unit vector
# Linearly distribute num_intermed_pts points from the x
# coordinate of joint_src to the x coordinate of joint_dst
limb_intermed_coords[1, :] = np.round(np.linspace(
joint_src[0], joint_dst[0], num=num_intermed_pts))
limb_intermed_coords[0, :] = np.round(np.linspace(
joint_src[1], joint_dst[1], num=num_intermed_pts)) # Same for the y coordinate
intermed_paf = paf_upsamp[limb_intermed_coords[0, :],
limb_intermed_coords[1, :], limb_intermed_coords[2:4, :]].T
score_intermed_pts = intermed_paf.dot(limb_dir)
score_penalizing_long_dist = score_intermed_pts.mean(
) + min(0.5 * paf_upsamp.shape[0] / limb_dist - 1, 0)
# Criterion 1: At least 80% of the intermediate points have
# a score higher than thre2
criterion1 = (np.count_nonzero(
score_intermed_pts > config.TEST.THRESH_PAF) > 0.8 * num_intermed_pts)
# Criterion 2: Mean score, penalized for large limb
# distances (larger than half the image height), is
# positive
criterion2 = (score_penalizing_long_dist > 0)
if criterion1 and criterion2:
# Last value is the combined paf(+limb_dist) + heatmap
# scores of both joints
connection_candidates.append(
[i, j, score_penalizing_long_dist,
score_penalizing_long_dist + joint_src[2] + joint_dst[2]])
# Sort connection candidates based on their
# score_penalizing_long_dist
connection_candidates = sorted(
connection_candidates, key=lambda x: x[2], reverse=True)
connections = np.empty((0, 5))
# There can only be as many limbs as the smallest number of source
# or destination joints (eg: only 2 forearms if there's 5 wrists
# but 2 elbows)
max_connections = min(len(joints_src), len(joints_dst))
# Traverse all potential joint connections (sorted by their score)
for potential_connection in connection_candidates:
i, j, s = potential_connection[0:3]
# Make sure joints_src[i] or joints_dst[j] haven't already been
# connected to other joints_dst or joints_src
if i not in connections[:, 3] and j not in connections[:, 4]:
# [joint_src_id, joint_dst_id, limb_score_penalizing_long_dist, joint_src_index, joint_dst_index]
connections = np.vstack(
[connections, [joints_src[i][3], joints_dst[j][3], s, i, j]])
# Exit if we've already established max_connections
# connections (each joint can't be connected to more than
# one joint)
if len(connections) >= max_connections:
break
connected_limbs.append(connections)
return connected_limbs
def group_limbs_of_same_person(connected_limbs, joint_list, config):
"""
Associate limbs belonging to the same person together.
:param connected_limbs: See 'return' doc of find_connected_joints()
:param joint_list: unravel'd version of joint_list_per_joint [See 'return' doc of NMS()]
:return: 2d np.array of size num_people x (NUM_JOINTS+2). For each person found:
# First NUM_JOINTS columns contain the index (in joint_list) of the joints associated
with that person (or -1 if their i-th joint wasn't found)
# 2nd-to-last column: Overall score of the joints+limbs that belong to this person
# Last column: Total count of joints found for this person
"""
person_to_joint_assoc = []
for limb_type in range(NUM_LIMBS):
joint_src_type, joint_dst_type = joint_to_limb_heatmap_relationship[limb_type]
for limb_info in connected_limbs[limb_type]:
person_assoc_idx = []
for person, person_limbs in enumerate(person_to_joint_assoc):
if person_limbs[joint_src_type] == limb_info[0] or person_limbs[joint_dst_type] == limb_info[1]:
person_assoc_idx.append(person)
# If one of the joints has been associated to a person, and either
# the other joint is also associated with the same person or not
# associated to anyone yet:
if len(person_assoc_idx) == 1:
person_limbs = person_to_joint_assoc[person_assoc_idx[0]]
# If the other joint is not associated to anyone yet,
if person_limbs[joint_dst_type] != limb_info[1]:
# Associate it with the current person
person_limbs[joint_dst_type] = limb_info[1]
# Increase the number of limbs associated to this person
person_limbs[-1] += 1
# And update the total score (+= heatmap score of joint_dst
# + score of connecting joint_src with joint_dst)
person_limbs[-2] += joint_list[limb_info[1]
.astype(int), 2] + limb_info[2]
elif len(person_assoc_idx) == 2: # if found 2 and disjoint, merge them
person1_limbs = person_to_joint_assoc[person_assoc_idx[0]]
person2_limbs = person_to_joint_assoc[person_assoc_idx[1]]
membership = ((person1_limbs >= 0) & (person2_limbs >= 0))[:-2]
if not membership.any(): # If both people have no same joints connected, merge into a single person
# Update which joints are connected
person1_limbs[:-2] += (person2_limbs[:-2] + 1)
# Update the overall score and total count of joints
# connected by summing their counters
person1_limbs[-2:] += person2_limbs[-2:]
# Add the score of the current joint connection to the
# overall score
person1_limbs[-2] += limb_info[2]
person_to_joint_assoc.pop(person_assoc_idx[1])
else: # Same case as len(person_assoc_idx)==1 above
person1_limbs[joint_dst_type] = limb_info[1]
person1_limbs[-1] += 1
person1_limbs[-2] += joint_list[limb_info[1]
.astype(int), 2] + limb_info[2]
else: # No person has claimed any of these joints, create a new person
# Initialize person info to all -1 (no joint associations)
row = -1 * np.ones(config.MODEL.NUM_KEYPOINTS + 2)
# Store the joint info of the new connection
row[joint_src_type] = limb_info[0]
row[joint_dst_type] = limb_info[1]
# Total count of connected joints for this person: 2
row[-1] = 2
# Compute overall score: score joint_src + score joint_dst + score connection
# {joint_src,joint_dst}
row[-2] = sum(joint_list[limb_info[:2].astype(int), 2]
) + limb_info[2]
person_to_joint_assoc.append(row)
# Delete people who have very few parts connected
people_to_delete = []
for person_id, person_info in enumerate(person_to_joint_assoc):
if person_info[-1] < 3 or person_info[-2] / person_info[-1] < 0.2:
people_to_delete.append(person_id)
# Traverse the list in reverse order so we delete indices starting from the
# last one (otherwise, removing item for example 0 would modify the indices of
# the remaining people to be deleted!)
for index in people_to_delete[::-1]:
person_to_joint_assoc.pop(index)
# Appending items to a np.array can be costly (allocating new memory, copying over the array, then adding new row)
# Instead, we treat the set of people as a list (fast to append items) and
# only convert to np.array at the end
return np.array(person_to_joint_assoc)
def paf_to_pose(heatmaps, pafs, config):
# Bottom-up approach:
# Step 1: find all joints in the image (organized by joint type: [0]=nose,
# [1]=neck...)
joint_list_per_joint_type = NMS(heatmaps, upsampFactor=config.MODEL.DOWNSAMPLE, config=config)
# joint_list is an unravel'd version of joint_list_per_joint, where we add
# a 5th column to indicate the joint_type (0=nose, 1=neck...)
joint_list = np.array([tuple(peak) + (joint_type,) for joint_type,
joint_peaks in enumerate(joint_list_per_joint_type) for peak in joint_peaks])
# import ipdb
# ipdb.set_trace()
# Step 2: find which joints go together to form limbs (which wrists go
# with which elbows)
paf_upsamp = cv2.resize(
pafs, None, fx=config.MODEL.DOWNSAMPLE, fy=config.MODEL.DOWNSAMPLE, interpolation=cv2.INTER_CUBIC)
connected_limbs = find_connected_joints(paf_upsamp, joint_list_per_joint_type,
config.TEST.NUM_INTERMED_PTS_BETWEEN_KEYPOINTS, config)
# Step 3: associate limbs that belong to the same person
person_to_joint_assoc = group_limbs_of_same_person(
connected_limbs, joint_list, config)
return joint_list, person_to_joint_assoc
def paf_to_pose_cpp(heatmaps, pafs, config):
humans = []
joint_list_per_joint_type = NMS(heatmaps, upsampFactor=config.MODEL.DOWNSAMPLE, config=config)
joint_list = np.array(
[tuple(peak) + (joint_type,) for joint_type, joint_peaks in enumerate(joint_list_per_joint_type) for peak in
joint_peaks]).astype(np.float32)
if joint_list.shape[0] > 0:
joint_list = np.expand_dims(joint_list, 0)
paf_upsamp = cv2.resize(
pafs, None, fx=config.MODEL.DOWNSAMPLE, fy=config.MODEL.DOWNSAMPLE, interpolation=cv2.INTER_NEAREST)
heatmap_upsamp = cv2.resize(
heatmaps, None, fx=config.MODEL.DOWNSAMPLE, fy=config.MODEL.DOWNSAMPLE, interpolation=cv2.INTER_NEAREST)
pafprocess.process_paf(joint_list, heatmap_upsamp, paf_upsamp)
for human_id in range(pafprocess.get_num_humans()):
human = Human([])
is_added = False
for part_idx in range(config.MODEL.NUM_KEYPOINTS):
c_idx = int(pafprocess.get_part_cid(human_id, part_idx))
if c_idx < 0:
continue
is_added = True
human.body_parts[part_idx] = BodyPart(
'%d-%d' % (human_id, part_idx), part_idx,
float(pafprocess.get_part_x(c_idx)) / heatmap_upsamp.shape[1],
float(pafprocess.get_part_y(c_idx)) / heatmap_upsamp.shape[0],
pafprocess.get_part_score(c_idx)
)
if is_added:
score = pafprocess.get_score(human_id)
human.score = score
humans.append(human)
return humans
| 54.412776
| 136
| 0.635916
|
import cv2
import numpy as np
import time
from scipy.ndimage.filters import gaussian_filter, maximum_filter
from scipy.ndimage.morphology import generate_binary_structure
from lib.pafprocess import pafprocess
from lib.utils.common import Human, BodyPart, CocoPart, CocoColors, CocoPairsRender
joint_to_limb_heatmap_relationship = [[1, 2], [2, 3], [3, 4], [1, 5], [5, 6], [6, 7], [1, 0]]
paf_xy_coords_per_limb = np.arange(14).reshape(7, 2)
NUM_LIMBS = len(joint_to_limb_heatmap_relationship)
def find_peaks(param, img):
peaks_binary = (maximum_filter(img, footprint=generate_binary_structure(
2, 1)) == img) * (img > param)
return np.array(np.nonzero(peaks_binary)[::-1]).T
def compute_resized_coords(coords, resizeFactor):
return (np.array(coords, dtype=float) + 0.5) * resizeFactor - 0.5
def NMS(heatmaps, upsampFactor=1., bool_refine_center=True, bool_gaussian_filt=False, config=None):
# 1.1. Find peaks using scipy.ndimage.filters.maximum_filter
# 2. Once a peak is found, take a patch of 5x5 centered around the peak, upsample it, and
# fine-tune the position of the actual maximum.
# '-> That's equivalent to having found the peak on heatmap_avg, but much faster because we only
# upsample and scan the 5x5 patch instead of the full (e.g.) 480x640
joint_list_per_joint_type = []
cnt_total_joints = 0
# For every peak found, win_size specifies how many pixels in each
# direction from the peak we take to obtain the patch that will be
# upsampled. Eg: win_size=1 -> patch is 3x3; win_size=2 -> 5x5
# (for BICUBIC interpolation to be accurate, win_size needs to be >=2!)
win_size = 2
for joint in range(config.MODEL.NUM_KEYPOINTS):
map_orig = heatmaps[:, :, joint]
peak_coords = find_peaks(config.TEST.THRESH_HEATMAP, map_orig)
peaks = np.zeros((len(peak_coords), 4))
for i, peak in enumerate(peak_coords):
if bool_refine_center:
x_min, y_min = np.maximum(0, peak - win_size)
x_max, y_max = np.minimum(
np.array(map_orig.T.shape) - 1, peak + win_size)
# Take a small patch around each peak and only upsample that
# tiny region
patch = map_orig[y_min:y_max + 1, x_min:x_max + 1]
map_upsamp = cv2.resize(
patch, None, fx=upsampFactor, fy=upsampFactor, interpolation=cv2.INTER_CUBIC)
# Gaussian filtering takes an average of 0.8ms/peak (and there might be
# more than one peak per joint!) -> For now, skip it (it's
map_upsamp = gaussian_filter(
map_upsamp, sigma=3) if bool_gaussian_filt else map_upsamp
location_of_max = np.unravel_index(
map_upsamp.argmax(), map_upsamp.shape)
location_of_patch_center = compute_resized_coords(
peak[::-1] - [y_min, x_min], upsampFactor)
refined_center = (location_of_max - location_of_patch_center)
peak_score = map_upsamp[location_of_max]
else:
refined_center = [0, 0]
peak_score = map_orig[tuple(peak[::-1])]
peaks[i, :] = tuple(
x for x in compute_resized_coords(peak_coords[i], upsampFactor) + refined_center[::-1]) + (
peak_score, cnt_total_joints)
cnt_total_joints += 1
joint_list_per_joint_type.append(peaks)
return joint_list_per_joint_type
def find_connected_joints(paf_upsamp, joint_list_per_joint_type, num_intermed_pts=10, config=None):
connected_limbs = []
limb_intermed_coords = np.empty((4, num_intermed_pts), dtype=np.intp)
for limb_type in range(NUM_LIMBS):
joints_src = joint_list_per_joint_type[joint_to_limb_heatmap_relationship[limb_type][0]]
joints_dst = joint_list_per_joint_type[joint_to_limb_heatmap_relationship[limb_type][1]]
if len(joints_src) == 0 or len(joints_dst) == 0:
connected_limbs.append([])
else:
connection_candidates = []
# Specify the paf index that contains the x-coord of the paf for
# this limb
limb_intermed_coords[2, :] = paf_xy_coords_per_limb[limb_type][0]
# And the y-coord paf index
limb_intermed_coords[3, :] = paf_xy_coords_per_limb[limb_type][1]
for i, joint_src in enumerate(joints_src):
# Try every possible joints_src[i]-joints_dst[j] pair and see
# if it's a feasible limb
for j, joint_dst in enumerate(joints_dst):
limb_dir = joint_dst[:2] - joint_src[:2]
limb_dist = np.sqrt(np.sum(limb_dir ** 2)) + 1e-8
limb_dir = limb_dir / limb_dist
limb_intermed_coords[1, :] = np.round(np.linspace(
joint_src[0], joint_dst[0], num=num_intermed_pts))
limb_intermed_coords[0, :] = np.round(np.linspace(
joint_src[1], joint_dst[1], num=num_intermed_pts))
intermed_paf = paf_upsamp[limb_intermed_coords[0, :],
limb_intermed_coords[1, :], limb_intermed_coords[2:4, :]].T
score_intermed_pts = intermed_paf.dot(limb_dir)
score_penalizing_long_dist = score_intermed_pts.mean(
) + min(0.5 * paf_upsamp.shape[0] / limb_dist - 1, 0)
criterion1 = (np.count_nonzero(
score_intermed_pts > config.TEST.THRESH_PAF) > 0.8 * num_intermed_pts)
criterion2 = (score_penalizing_long_dist > 0)
if criterion1 and criterion2:
connection_candidates.append(
[i, j, score_penalizing_long_dist,
score_penalizing_long_dist + joint_src[2] + joint_dst[2]])
connection_candidates = sorted(
connection_candidates, key=lambda x: x[2], reverse=True)
connections = np.empty((0, 5))
# but 2 elbows)
max_connections = min(len(joints_src), len(joints_dst))
# Traverse all potential joint connections (sorted by their score)
for potential_connection in connection_candidates:
i, j, s = potential_connection[0:3]
# Make sure joints_src[i] or joints_dst[j] haven't already been
if i not in connections[:, 3] and j not in connections[:, 4]:
connections = np.vstack(
[connections, [joints_src[i][3], joints_dst[j][3], s, i, j]])
# connections (each joint can't be connected to more than
if len(connections) >= max_connections:
break
connected_limbs.append(connections)
return connected_limbs
def group_limbs_of_same_person(connected_limbs, joint_list, config):
person_to_joint_assoc = []
for limb_type in range(NUM_LIMBS):
joint_src_type, joint_dst_type = joint_to_limb_heatmap_relationship[limb_type]
for limb_info in connected_limbs[limb_type]:
person_assoc_idx = []
for person, person_limbs in enumerate(person_to_joint_assoc):
if person_limbs[joint_src_type] == limb_info[0] or person_limbs[joint_dst_type] == limb_info[1]:
person_assoc_idx.append(person)
if len(person_assoc_idx) == 1:
person_limbs = person_to_joint_assoc[person_assoc_idx[0]]
if person_limbs[joint_dst_type] != limb_info[1]:
person_limbs[joint_dst_type] = limb_info[1]
person_limbs[-1] += 1
person_limbs[-2] += joint_list[limb_info[1]
.astype(int), 2] + limb_info[2]
elif len(person_assoc_idx) == 2:
person1_limbs = person_to_joint_assoc[person_assoc_idx[0]]
person2_limbs = person_to_joint_assoc[person_assoc_idx[1]]
membership = ((person1_limbs >= 0) & (person2_limbs >= 0))[:-2]
if not membership.any():
person1_limbs[:-2] += (person2_limbs[:-2] + 1)
person1_limbs[-2:] += person2_limbs[-2:]
person1_limbs[-2] += limb_info[2]
person_to_joint_assoc.pop(person_assoc_idx[1])
else:
person1_limbs[joint_dst_type] = limb_info[1]
person1_limbs[-1] += 1
person1_limbs[-2] += joint_list[limb_info[1]
.astype(int), 2] + limb_info[2]
else:
row = -1 * np.ones(config.MODEL.NUM_KEYPOINTS + 2)
row[joint_src_type] = limb_info[0]
row[joint_dst_type] = limb_info[1]
row[-1] = 2
row[-2] = sum(joint_list[limb_info[:2].astype(int), 2]
) + limb_info[2]
person_to_joint_assoc.append(row)
people_to_delete = []
for person_id, person_info in enumerate(person_to_joint_assoc):
if person_info[-1] < 3 or person_info[-2] / person_info[-1] < 0.2:
people_to_delete.append(person_id)
for index in people_to_delete[::-1]:
person_to_joint_assoc.pop(index)
return np.array(person_to_joint_assoc)
def paf_to_pose(heatmaps, pafs, config):
joint_list_per_joint_type = NMS(heatmaps, upsampFactor=config.MODEL.DOWNSAMPLE, config=config)
# a 5th column to indicate the joint_type (0=nose, 1=neck...)
joint_list = np.array([tuple(peak) + (joint_type,) for joint_type,
joint_peaks in enumerate(joint_list_per_joint_type) for peak in joint_peaks])
# import ipdb
# ipdb.set_trace()
# Step 2: find which joints go together to form limbs (which wrists go
# with which elbows)
paf_upsamp = cv2.resize(
pafs, None, fx=config.MODEL.DOWNSAMPLE, fy=config.MODEL.DOWNSAMPLE, interpolation=cv2.INTER_CUBIC)
connected_limbs = find_connected_joints(paf_upsamp, joint_list_per_joint_type,
config.TEST.NUM_INTERMED_PTS_BETWEEN_KEYPOINTS, config)
# Step 3: associate limbs that belong to the same person
person_to_joint_assoc = group_limbs_of_same_person(
connected_limbs, joint_list, config)
return joint_list, person_to_joint_assoc
def paf_to_pose_cpp(heatmaps, pafs, config):
humans = []
joint_list_per_joint_type = NMS(heatmaps, upsampFactor=config.MODEL.DOWNSAMPLE, config=config)
joint_list = np.array(
[tuple(peak) + (joint_type,) for joint_type, joint_peaks in enumerate(joint_list_per_joint_type) for peak in
joint_peaks]).astype(np.float32)
if joint_list.shape[0] > 0:
joint_list = np.expand_dims(joint_list, 0)
paf_upsamp = cv2.resize(
pafs, None, fx=config.MODEL.DOWNSAMPLE, fy=config.MODEL.DOWNSAMPLE, interpolation=cv2.INTER_NEAREST)
heatmap_upsamp = cv2.resize(
heatmaps, None, fx=config.MODEL.DOWNSAMPLE, fy=config.MODEL.DOWNSAMPLE, interpolation=cv2.INTER_NEAREST)
pafprocess.process_paf(joint_list, heatmap_upsamp, paf_upsamp)
for human_id in range(pafprocess.get_num_humans()):
human = Human([])
is_added = False
for part_idx in range(config.MODEL.NUM_KEYPOINTS):
c_idx = int(pafprocess.get_part_cid(human_id, part_idx))
if c_idx < 0:
continue
is_added = True
human.body_parts[part_idx] = BodyPart(
'%d-%d' % (human_id, part_idx), part_idx,
float(pafprocess.get_part_x(c_idx)) / heatmap_upsamp.shape[1],
float(pafprocess.get_part_y(c_idx)) / heatmap_upsamp.shape[0],
pafprocess.get_part_score(c_idx)
)
if is_added:
score = pafprocess.get_score(human_id)
human.score = score
humans.append(human)
return humans
| true
| true
|
1c43e13d8418e3f82d49ce71c1285cf8469339e2
| 387
|
py
|
Python
|
scp_epub/download/utils.py
|
elfakyn/scp_epub
|
5d0e95d8fa0e11d9ab388c5a4083212c1c857a2f
|
[
"MIT"
] | 5
|
2020-05-27T15:57:15.000Z
|
2021-06-11T01:08:50.000Z
|
scp_epub/download/utils.py
|
elfakyn/scp_epub
|
5d0e95d8fa0e11d9ab388c5a4083212c1c857a2f
|
[
"MIT"
] | null | null | null |
scp_epub/download/utils.py
|
elfakyn/scp_epub
|
5d0e95d8fa0e11d9ab388c5a4083212c1c857a2f
|
[
"MIT"
] | 2
|
2020-11-14T04:53:51.000Z
|
2021-06-12T19:28:32.000Z
|
import re
def filter_tags(pages, include_tags=None):
if include_tags is not None:
pages = [
page for page in pages
if 'tags' in page and any(
included_tag in page['tags'] for included_tag in include_tags
)
]
return pages
def normalize_string(raw_string):
return re.sub('[^a-z0-9\\-]', '_', raw_string)
| 21.5
| 77
| 0.578811
|
import re
def filter_tags(pages, include_tags=None):
if include_tags is not None:
pages = [
page for page in pages
if 'tags' in page and any(
included_tag in page['tags'] for included_tag in include_tags
)
]
return pages
def normalize_string(raw_string):
return re.sub('[^a-z0-9\\-]', '_', raw_string)
| true
| true
|
1c43e2186ae5b7bd32f050d7f5b624c8bb3e6dc6
| 12,265
|
py
|
Python
|
offb_posctl/scripts/MinimumSnapTimeNode.py
|
SensenLiu/aggrecup
|
0c381ee259b388684205c1fa5fc41265a7e849b3
|
[
"MIT"
] | null | null | null |
offb_posctl/scripts/MinimumSnapTimeNode.py
|
SensenLiu/aggrecup
|
0c381ee259b388684205c1fa5fc41265a7e849b3
|
[
"MIT"
] | null | null | null |
offb_posctl/scripts/MinimumSnapTimeNode.py
|
SensenLiu/aggrecup
|
0c381ee259b388684205c1fa5fc41265a7e849b3
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# coding=utf-8
import socket
import numpy as np
from scipy.optimize import minimize
import time
import datetime
import math
import matplotlib.pyplot as plt
import rospy
from nav_msgs.msg import Odometry
from geometry_msgs.msg import TwistStamped
from offb_posctl.msg import controlstate # 发布自定义消息
phi=1.57
ay0=0
vy0=0
y0=0
az0=0
vz0=0
z0=0.5
aytf=-math.sin(phi)*9.8
vytf=0.2*math.sin(phi)
ytf=7.0
aztf=math.cos(phi)*9.8-9.8
vztf=-0.2*math.cos(phi)
ztf=2.0
meshpoint=np.linspace(1, 0.01, 5)
thrustmax=2*9.8
angleaccdmax=20
lbz=0.3
ubz=2.5
lbv=-5
ubv=5
currentupdateflag = False
# Objective
def J(x):
return x[-1]
def fast_jac(x):
jac = np.zeros_like(x)
jac[-1]=1
return jac
# Constraint
def eqmycon(x):
global ay0, vy0, y0, az0, vz0, z0, aytf, vytf, ytf, aztf, vztf, ztf, meshpoint, thrustmax, angleaccdmax, lbz, lbv, ubv
alpha_y=x[0]
beta_y=x[1]
gamma_y=x[2]
alpha_z=x[3]
beta_z=x[4]
gamma_z=x[5]
t=x[6]
ceq1=alpha_y/6*t**3+beta_y/2*t**2+gamma_y*t+ay0-aytf
ceq2=alpha_y/24*t**4+beta_y/6*t**3+gamma_y/2*t**2+ay0*t+vy0-vytf
ceq3=alpha_y/120*t**5+beta_y/24*t**4+gamma_y/6*t**3+ay0/2*t**2+vy0*t+y0-ytf
ceq4=alpha_z/6*t**3+beta_z/2*t**2+gamma_z*t+az0-aztf
ceq5=alpha_z/24*t**4+beta_z/6*t**3+gamma_z/2*t**2+az0*t+vz0-vztf
ceq6=alpha_z/120*t**5+beta_z/24*t**4+gamma_z/6*t**3+az0/2*t**2+vz0*t+z0-ztf
return np.hstack((ceq1,ceq2,ceq3,ceq4,ceq5,ceq6)).ravel()
# Constraint
def ineqmycon(x):
global ay0, vy0, y0, az0, vz0, z0, aytf, vytf, ytf, aztf, vztf, ztf, meshpoint, thrustmax, angleaccdmax, lbz, ubz, lbv, ubv
alpha_y=x[0]
beta_y=x[1]
gamma_y=x[2]
alpha_z=x[3]
beta_z=x[4]
gamma_z=x[5]
t=x[6]
tmesh=t*(np.array(meshpoint))
angleacc=np.zeros_like(tmesh)
for i in range(len(tmesh)):
# print("i===",tmesh)
t=tmesh[i]
angleacc[i]=((((alpha_y*t**2)/2 + beta_y*t + gamma_y)/((alpha_z*t**3)/6 + (beta_z*t**2)/2 + gamma_z*t + az0 + 49/5) - (((alpha_z*t**2)/2 + beta_z*t + gamma_z)*((alpha_y*t**3)/6 + (beta_y*t**2)/2 + gamma_y*t + ay0))/((alpha_z*t**3)/6 + (beta_z*t**2)/2 + gamma_z*t + az0 + 49/5)**2)*((2*((alpha_y*t**2)/2 + beta_y*t + gamma_y)*((alpha_y*t**3)/6 + (beta_y*t**2)/2 + gamma_y*t + ay0))/((alpha_z*t**3)/6 + (beta_z*t**2)/2 + gamma_z*t + az0 + 49/5)**2 - (2*((alpha_z*t**2)/2 + beta_z*t + gamma_z)*((alpha_y*t**3)/6 + (beta_y*t**2)/2 + gamma_y*t + ay0)**2)/((alpha_z*t**3)/6 + (beta_z*t**2)/2 + gamma_z*t + az0 + 49/5)**3))/(((alpha_y*t**3)/6 + (beta_y*t**2)/2 + gamma_y*t + ay0)**2/((alpha_z*t**3)/6 + (beta_z*t**2)/2 + gamma_z*t + az0 + 49/5)**2 + 1)**2 - ((beta_y + alpha_y*t)/((alpha_z*t**3)/6 + (beta_z*t**2)/2 + gamma_z*t + az0 + 49/5) - ((beta_z + alpha_z*t)*((alpha_y*t**3)/6 + (beta_y*t**2)/2 + gamma_y*t + ay0))/((alpha_z*t**3)/6 + (beta_z*t**2)/2 + gamma_z*t + az0 + 49/5)**2 - (2*((alpha_y*t**2)/2 + beta_y*t + gamma_y)*((alpha_z*t**2)/2 + beta_z*t + gamma_z))/((alpha_z*t**3)/6 + (beta_z*t**2)/2 + gamma_z*t + az0 + 49/5)**2 + (2*((alpha_z*t**2)/2 + beta_z*t + gamma_z)**2*((alpha_y*t**3)/6 + (beta_y*t**2)/2 + gamma_y*t + ay0))/((alpha_z*t**3)/6 + (beta_z*t**2)/2 + gamma_z*t + az0 + 49/5)**3)/(((alpha_y*t**3)/6 + (beta_y*t**2)/2 + gamma_y*t + ay0)**2/((alpha_z*t**3)/6 + (beta_z*t**2)/2 + gamma_z*t + az0 + 49/5)**2 + 1)
t=x[6]
thrust=np.sqrt(((alpha_y*tmesh**3)/6 + (beta_y*tmesh**2)/2 + gamma_y*tmesh + ay0)**2 + ((alpha_z*tmesh**3)/6 + (beta_z*tmesh**2)/2 + gamma_z*tmesh + az0 + 49/5)**2)
c0=t
# thrust constraints
c1=2*9.8-thrust
# print("c1----",c1.shape)
# z's lower bound constraints
c2=-lbz+(alpha_z/120*tmesh**5+beta_z/24*tmesh**4+gamma_z/6*tmesh**3+az0/2*tmesh**2+vz0*tmesh+z0)
c14=ubz-(alpha_z/120*tmesh**5+beta_z/24*tmesh**4+gamma_z/6*tmesh**3+az0/2*tmesh**2+vz0*tmesh+z0)
# actuator constraints
c3=angleacc*thrustmax/(4*angleaccdmax)-thrust/2+9.8
c4=-angleacc*thrustmax/(4*angleaccdmax)+thrust/2
c5=-angleacc*thrustmax/(4*angleaccdmax)-thrust/2+9.8
c6=angleacc*thrustmax/(4*angleaccdmax)+thrust/2
# phi belongs to [-1.57,1.57] constraints
c7=((alpha_z*t**3)/6 + (beta_z*t**2)/2 + gamma_z*t + az0 +9.8)
c8=1
c9=1
if beta_z*beta_z>=2*alpha_z and alpha_z!=0 :
t1=(-beta_z+math.sqrt(beta_z*beta_z-2*alpha_z))/alpha_z
t2=(-beta_z-math.sqrt(beta_z*beta_z-2*alpha_z))/alpha_z
if t1>=0 and t1<=t :
c8=((alpha_z*t1**3)/6 + (beta_z*t1**2)/2 + gamma_z*t1 + az0 +9.8)
if t2>=0 and t2<=t :
c9=((alpha_z*t2**3)/6 + (beta_z*t2**2)/2 + gamma_z*t2 + az0 +9.8)
#print('the value of t1 and t2 is',t1,t2)
c10=-(alpha_y/24*tmesh**4+beta_y/6*tmesh**3+gamma_y/2*tmesh**2+ay0*tmesh+vy0-ubv)
c11=-(lbv-(alpha_y/24*tmesh**4+beta_y/6*tmesh**3+gamma_y/2*tmesh**2+ay0*tmesh+vy0))
c12=-(alpha_z/24*tmesh**4+beta_z/6*tmesh**3+gamma_z/2*tmesh**2+az0*tmesh+vz0-ubv)
c13=-(lbv-(alpha_z/24*tmesh**4+beta_z/6*tmesh**3+gamma_z/2*tmesh**2+az0*tmesh+vz0))
# print("--------", np.vstack((c0,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13)).shape)
return np.hstack((c0,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14))
def pos_twist_callback(data):
global vy0, y0, vz0, z0, currentupdateflag
y0 = data.pose.pose.position.y # relative pos
z0 = data.pose.pose.position.z
vy0 = data.twist.twist.linear.y
vz0 = data.twist.twist.linear.z
currentupdateflag = True
def plane_vel_callback(data):
global va_ini
va_ini = data.twist.linear.x
def droneImu_callback(data):
global ay0, az0
ay0 = data.twist.linear.y
az0 = data.twist.linear.z
def main():
global currentupdateflag
constraint = [dict(type='eq', fun=eqmycon), dict(type='ineq', fun=ineqmycon)]
Initial_guess=np.array([0,0,0,0,0,0,5])
lb=-1000
ub=1000
mybounds=[(lb,ub),(lb,ub),(lb,ub),(lb,ub),(lb,ub),(lb,ub),(0,10)]
controlfreq=50
controlstate_msg = controlstate() # 要发布的控制量消息
rospy.init_node('minimumsnap_control', anonymous=True)
uav_id = rospy.get_param("~id", "")
rate = rospy.Rate(100)
rospy.Subscriber(uav_id + "current_relative_postwist",
Odometry, pos_twist_callback)
# rospy.Subscriber(uav_id + "mavros/local_position/velocity_local",
# TwistStamped, plane_vel_callback) # plane veocity
rospy.Subscriber(uav_id + "/mavros/imu/data",
TwistStamped, droneImu_callback) # plane veocity
pub = rospy.Publisher( uav_id + "bvp_controlstate", controlstate, queue_size=10)
currentupdateflag=True
while not (rospy.is_shutdown()):
if currentupdateflag:
start = time.time()
result = minimize(J, Initial_guess, method='SLSQP', jac=fast_jac,tol=1e-4, bounds=mybounds,constraints=constraint)
end = time.time()
running_time = end - start
print('time cost : %.5f sec' % running_time)
if result.success:
Initial_guess=result.x
controlstate_msg.inicounter = 1
controlstate_msg.discrepointpersecond = controlfreq
controlstate_msg.arraylength = round(result.x[-1]*50)
times=np.linspace(0, 1, controlstate_msg.arraylength)*result.x[-1]
alpha_y=result.x[0]
beta_y=result.x[1]
gamma_y=result.x[2]
alpha_z=result.x[3]
beta_z=result.x[4]
gamma_z=result.x[5]
y=alpha_y/120*times**5+beta_y/24*times**4+gamma_y/6*times**3+ay0/2*times**2+vy0*times+y0
vy=alpha_y/24*times**4+beta_y/6*times**3+gamma_y/2*times**2+ay0*times+vy0
ay=alpha_y/6*times**3+beta_y/2*times**2+gamma_y*times+ay0
z=alpha_z/120*times**5+beta_z/24*times**4+gamma_z/6*times**3+az0/2*times**2+vz0*times+z0
vz=alpha_z/24*times**4+beta_z/6*times**3+gamma_z/2*times**2+az0*times+vz0
az=alpha_z/6*times**3+beta_z/2*times**2+gamma_z*times+az0
controlstate_msg.stateXarray = np.zeros_like(times)
controlstate_msg.stateYarray = y
controlstate_msg.stateZarray = z
controlstate_msg.stateVXarray = np.zeros_like(times)
controlstate_msg.stateVYarray = vy
controlstate_msg.stateVZarray = vz
controlstate_msg.stateAXarray = np.zeros_like(times)
controlstate_msg.stateAYarray = ay
controlstate_msg.stateAZarray = az
pub.publish(controlstate_msg)
currentupdateflag = False
rate.sleep()
# times=np.linspace(0,1,100)*result.x[-1]
#
# alpha_y=result.x[0]
# beta_y=result.x[1]
# gamma_y=result.x[2]
#
# alpha_z=result.x[3]
# beta_z=result.x[4]
# gamma_z=result.x[5]
#
# y=alpha_y/120*times**5+beta_y/24*times**4+gamma_y/6*times**3+ay0/2*times**2+vy0*times+y0
# vy=alpha_y/24*times**4+beta_y/6*times**3+gamma_y/2*times**2+ay0*times+vy0
# ay=alpha_y/6*times**3+beta_y/2*times**2+gamma_y*times+ay0
#
#
# z=alpha_z/120*times**5+beta_z/24*times**4+gamma_z/6*times**3+az0/2*times**2+vz0*times+z0
# vz=alpha_z/24*times**4+beta_z/6*times**3+gamma_z/2*times**2+az0*times+vz0
# az=alpha_z/6*times**3+beta_z/2*times**2+gamma_z*times+az0
# a=np.sqrt(az**2+ay**2)
# thurst=np.sqrt((az+9.8)**2+ay**2)
# phiseries=-np.arctan(ay/(az+9.8))
# # print("az--------", az)
# angleacc=np.zeros_like(times)
# for i in range(len(times)):
# t=times[i]
# angleacc[i]=((((alpha_y*t**2)/2 + beta_y*t + gamma_y)/((alpha_z*t**3)/6 + (beta_z*t**2)/2 + gamma_z*t + az0 + 49/5) - (((alpha_z*t**2)/2 + beta_z*t + gamma_z)*((alpha_y*t**3)/6 + (beta_y*t**2)/2 + gamma_y*t + ay0))/((alpha_z*t**3)/6 + (beta_z*t**2)/2 + gamma_z*t + az0 + 49/5)**2)*((2*((alpha_y*t**2)/2 + beta_y*t + gamma_y)*((alpha_y*t**3)/6 + (beta_y*t**2)/2 + gamma_y*t + ay0))/((alpha_z*t**3)/6 + (beta_z*t**2)/2 + gamma_z*t + az0 + 49/5)**2 - (2*((alpha_z*t**2)/2 + beta_z*t + gamma_z)*((alpha_y*t**3)/6 + (beta_y*t**2)/2 + gamma_y*t + ay0)**2)/((alpha_z*t**3)/6 + (beta_z*t**2)/2 + gamma_z*t + az0 + 49/5)**3))/(((alpha_y*t**3)/6 + (beta_y*t**2)/2 + gamma_y*t + ay0)**2/((alpha_z*t**3)/6 + (beta_z*t**2)/2 + gamma_z*t + az0 + 49/5)**2 + 1)**2 - ((beta_y + alpha_y*t)/((alpha_z*t**3)/6 + (beta_z*t**2)/2 + gamma_z*t + az0 + 49/5) - ((beta_z + alpha_z*t)*((alpha_y*t**3)/6 + (beta_y*t**2)/2 + gamma_y*t + ay0))/((alpha_z*t**3)/6 + (beta_z*t**2)/2 + gamma_z*t + az0 + 49/5)**2 - (2*((alpha_y*t**2)/2 + beta_y*t + gamma_y)*((alpha_z*t**2)/2 + beta_z*t + gamma_z))/((alpha_z*t**3)/6 + (beta_z*t**2)/2 + gamma_z*t + az0 + 49/5)**2 + (2*((alpha_z*t**2)/2 + beta_z*t + gamma_z)**2*((alpha_y*t**3)/6 + (beta_y*t**2)/2 + gamma_y*t + ay0))/((alpha_z*t**3)/6 + (beta_z*t**2)/2 + gamma_z*t + az0 + 49/5)**3)/(((alpha_y*t**3)/6 + (beta_y*t**2)/2 + gamma_y*t + ay0)**2/((alpha_z*t**3)/6 + (beta_z*t**2)/2 + gamma_z*t + az0 + 49/5)**2 + 1)
#
# F1=angleacc*thrustmax/(4*angleaccdmax)+thurst/2
# F2=-angleacc*thrustmax/(4*angleaccdmax)+thurst/2
#
# plotlinewidth = 2
# plotfontsize = 16
# plt.subplot(2,2,1)
# plt.plot(times,y, color='blue',LineWidth=plotlinewidth,label="y")
# plt.plot(times,vy, color='green',LineWidth=plotlinewidth,label="vy")
# plt.plot(times,ay, color='black', LineWidth=plotlinewidth,label="ay")
# plt.plot(times,phiseries, color='yellow',LineWidth=plotlinewidth,label="phi")
# plt.legend(loc="best")
#
# plt.subplot(2,2,2)
# plt.plot(times,z, color='blue',LineWidth=plotlinewidth,label="z")
# plt.plot(times,vz, color='green',LineWidth=plotlinewidth,label="vz")
# plt.plot(times,az, color='black', LineWidth=plotlinewidth,label="az")
# plt.plot(times,thurst, color='yellow',LineWidth=plotlinewidth,label="thurst")
# plt.legend(loc="best")
#
# plt.subplot(2,2,3)
# plt.plot(-y,z, color='blue',LineWidth=plotlinewidth,label="y-z")
# plt.legend(loc="best")
#
# plt.subplot(2,2,4)
# plt.plot(times,F1, color='blue',LineWidth=plotlinewidth,label="F1")
# plt.plot(times,F2, color='black', LineWidth=plotlinewidth,label="F2")
# plt.legend(loc="best")
#
# # print(res)
# # core calculate code
# plt.show()
if __name__ == '__main__': # 主函数
main()
| 45.594796
| 1,439
| 0.609458
|
import socket
import numpy as np
from scipy.optimize import minimize
import time
import datetime
import math
import matplotlib.pyplot as plt
import rospy
from nav_msgs.msg import Odometry
from geometry_msgs.msg import TwistStamped
from offb_posctl.msg import controlstate
phi=1.57
ay0=0
vy0=0
y0=0
az0=0
vz0=0
z0=0.5
aytf=-math.sin(phi)*9.8
vytf=0.2*math.sin(phi)
ytf=7.0
aztf=math.cos(phi)*9.8-9.8
vztf=-0.2*math.cos(phi)
ztf=2.0
meshpoint=np.linspace(1, 0.01, 5)
thrustmax=2*9.8
angleaccdmax=20
lbz=0.3
ubz=2.5
lbv=-5
ubv=5
currentupdateflag = False
def J(x):
return x[-1]
def fast_jac(x):
jac = np.zeros_like(x)
jac[-1]=1
return jac
def eqmycon(x):
global ay0, vy0, y0, az0, vz0, z0, aytf, vytf, ytf, aztf, vztf, ztf, meshpoint, thrustmax, angleaccdmax, lbz, lbv, ubv
alpha_y=x[0]
beta_y=x[1]
gamma_y=x[2]
alpha_z=x[3]
beta_z=x[4]
gamma_z=x[5]
t=x[6]
ceq1=alpha_y/6*t**3+beta_y/2*t**2+gamma_y*t+ay0-aytf
ceq2=alpha_y/24*t**4+beta_y/6*t**3+gamma_y/2*t**2+ay0*t+vy0-vytf
ceq3=alpha_y/120*t**5+beta_y/24*t**4+gamma_y/6*t**3+ay0/2*t**2+vy0*t+y0-ytf
ceq4=alpha_z/6*t**3+beta_z/2*t**2+gamma_z*t+az0-aztf
ceq5=alpha_z/24*t**4+beta_z/6*t**3+gamma_z/2*t**2+az0*t+vz0-vztf
ceq6=alpha_z/120*t**5+beta_z/24*t**4+gamma_z/6*t**3+az0/2*t**2+vz0*t+z0-ztf
return np.hstack((ceq1,ceq2,ceq3,ceq4,ceq5,ceq6)).ravel()
def ineqmycon(x):
global ay0, vy0, y0, az0, vz0, z0, aytf, vytf, ytf, aztf, vztf, ztf, meshpoint, thrustmax, angleaccdmax, lbz, ubz, lbv, ubv
alpha_y=x[0]
beta_y=x[1]
gamma_y=x[2]
alpha_z=x[3]
beta_z=x[4]
gamma_z=x[5]
t=x[6]
tmesh=t*(np.array(meshpoint))
angleacc=np.zeros_like(tmesh)
for i in range(len(tmesh)):
t=tmesh[i]
angleacc[i]=((((alpha_y*t**2)/2 + beta_y*t + gamma_y)/((alpha_z*t**3)/6 + (beta_z*t**2)/2 + gamma_z*t + az0 + 49/5) - (((alpha_z*t**2)/2 + beta_z*t + gamma_z)*((alpha_y*t**3)/6 + (beta_y*t**2)/2 + gamma_y*t + ay0))/((alpha_z*t**3)/6 + (beta_z*t**2)/2 + gamma_z*t + az0 + 49/5)**2)*((2*((alpha_y*t**2)/2 + beta_y*t + gamma_y)*((alpha_y*t**3)/6 + (beta_y*t**2)/2 + gamma_y*t + ay0))/((alpha_z*t**3)/6 + (beta_z*t**2)/2 + gamma_z*t + az0 + 49/5)**2 - (2*((alpha_z*t**2)/2 + beta_z*t + gamma_z)*((alpha_y*t**3)/6 + (beta_y*t**2)/2 + gamma_y*t + ay0)**2)/((alpha_z*t**3)/6 + (beta_z*t**2)/2 + gamma_z*t + az0 + 49/5)**3))/(((alpha_y*t**3)/6 + (beta_y*t**2)/2 + gamma_y*t + ay0)**2/((alpha_z*t**3)/6 + (beta_z*t**2)/2 + gamma_z*t + az0 + 49/5)**2 + 1)**2 - ((beta_y + alpha_y*t)/((alpha_z*t**3)/6 + (beta_z*t**2)/2 + gamma_z*t + az0 + 49/5) - ((beta_z + alpha_z*t)*((alpha_y*t**3)/6 + (beta_y*t**2)/2 + gamma_y*t + ay0))/((alpha_z*t**3)/6 + (beta_z*t**2)/2 + gamma_z*t + az0 + 49/5)**2 - (2*((alpha_y*t**2)/2 + beta_y*t + gamma_y)*((alpha_z*t**2)/2 + beta_z*t + gamma_z))/((alpha_z*t**3)/6 + (beta_z*t**2)/2 + gamma_z*t + az0 + 49/5)**2 + (2*((alpha_z*t**2)/2 + beta_z*t + gamma_z)**2*((alpha_y*t**3)/6 + (beta_y*t**2)/2 + gamma_y*t + ay0))/((alpha_z*t**3)/6 + (beta_z*t**2)/2 + gamma_z*t + az0 + 49/5)**3)/(((alpha_y*t**3)/6 + (beta_y*t**2)/2 + gamma_y*t + ay0)**2/((alpha_z*t**3)/6 + (beta_z*t**2)/2 + gamma_z*t + az0 + 49/5)**2 + 1)
t=x[6]
thrust=np.sqrt(((alpha_y*tmesh**3)/6 + (beta_y*tmesh**2)/2 + gamma_y*tmesh + ay0)**2 + ((alpha_z*tmesh**3)/6 + (beta_z*tmesh**2)/2 + gamma_z*tmesh + az0 + 49/5)**2)
c0=t
c1=2*9.8-thrust
c2=-lbz+(alpha_z/120*tmesh**5+beta_z/24*tmesh**4+gamma_z/6*tmesh**3+az0/2*tmesh**2+vz0*tmesh+z0)
c14=ubz-(alpha_z/120*tmesh**5+beta_z/24*tmesh**4+gamma_z/6*tmesh**3+az0/2*tmesh**2+vz0*tmesh+z0)
# actuator constraints
c3=angleacc*thrustmax/(4*angleaccdmax)-thrust/2+9.8
c4=-angleacc*thrustmax/(4*angleaccdmax)+thrust/2
c5=-angleacc*thrustmax/(4*angleaccdmax)-thrust/2+9.8
c6=angleacc*thrustmax/(4*angleaccdmax)+thrust/2
# phi belongs to [-1.57,1.57] constraints
c7=((alpha_z*t**3)/6 + (beta_z*t**2)/2 + gamma_z*t + az0 +9.8)
c8=1
c9=1
if beta_z*beta_z>=2*alpha_z and alpha_z!=0 :
t1=(-beta_z+math.sqrt(beta_z*beta_z-2*alpha_z))/alpha_z
t2=(-beta_z-math.sqrt(beta_z*beta_z-2*alpha_z))/alpha_z
if t1>=0 and t1<=t :
c8=((alpha_z*t1**3)/6 + (beta_z*t1**2)/2 + gamma_z*t1 + az0 +9.8)
if t2>=0 and t2<=t :
c9=((alpha_z*t2**3)/6 + (beta_z*t2**2)/2 + gamma_z*t2 + az0 +9.8)
#print('the value of t1 and t2 is',t1,t2)
c10=-(alpha_y/24*tmesh**4+beta_y/6*tmesh**3+gamma_y/2*tmesh**2+ay0*tmesh+vy0-ubv)
c11=-(lbv-(alpha_y/24*tmesh**4+beta_y/6*tmesh**3+gamma_y/2*tmesh**2+ay0*tmesh+vy0))
c12=-(alpha_z/24*tmesh**4+beta_z/6*tmesh**3+gamma_z/2*tmesh**2+az0*tmesh+vz0-ubv)
c13=-(lbv-(alpha_z/24*tmesh**4+beta_z/6*tmesh**3+gamma_z/2*tmesh**2+az0*tmesh+vz0))
# print("--------", np.vstack((c0,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13)).shape)
return np.hstack((c0,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14))
def pos_twist_callback(data):
global vy0, y0, vz0, z0, currentupdateflag
y0 = data.pose.pose.position.y # relative pos
z0 = data.pose.pose.position.z
vy0 = data.twist.twist.linear.y
vz0 = data.twist.twist.linear.z
currentupdateflag = True
def plane_vel_callback(data):
global va_ini
va_ini = data.twist.linear.x
def droneImu_callback(data):
global ay0, az0
ay0 = data.twist.linear.y
az0 = data.twist.linear.z
def main():
global currentupdateflag
constraint = [dict(type='eq', fun=eqmycon), dict(type='ineq', fun=ineqmycon)]
Initial_guess=np.array([0,0,0,0,0,0,5])
lb=-1000
ub=1000
mybounds=[(lb,ub),(lb,ub),(lb,ub),(lb,ub),(lb,ub),(lb,ub),(0,10)]
controlfreq=50
controlstate_msg = controlstate() # 要发布的控制量消息
rospy.init_node('minimumsnap_control', anonymous=True)
uav_id = rospy.get_param("~id", "")
rate = rospy.Rate(100)
rospy.Subscriber(uav_id + "current_relative_postwist",
Odometry, pos_twist_callback)
# rospy.Subscriber(uav_id + "mavros/local_position/velocity_local",
# TwistStamped, plane_vel_callback) # plane veocity
rospy.Subscriber(uav_id + "/mavros/imu/data",
TwistStamped, droneImu_callback) # plane veocity
pub = rospy.Publisher( uav_id + "bvp_controlstate", controlstate, queue_size=10)
currentupdateflag=True
while not (rospy.is_shutdown()):
if currentupdateflag:
start = time.time()
result = minimize(J, Initial_guess, method='SLSQP', jac=fast_jac,tol=1e-4, bounds=mybounds,constraints=constraint)
end = time.time()
running_time = end - start
print('time cost : %.5f sec' % running_time)
if result.success:
Initial_guess=result.x
controlstate_msg.inicounter = 1
controlstate_msg.discrepointpersecond = controlfreq
controlstate_msg.arraylength = round(result.x[-1]*50)
times=np.linspace(0, 1, controlstate_msg.arraylength)*result.x[-1]
alpha_y=result.x[0]
beta_y=result.x[1]
gamma_y=result.x[2]
alpha_z=result.x[3]
beta_z=result.x[4]
gamma_z=result.x[5]
y=alpha_y/120*times**5+beta_y/24*times**4+gamma_y/6*times**3+ay0/2*times**2+vy0*times+y0
vy=alpha_y/24*times**4+beta_y/6*times**3+gamma_y/2*times**2+ay0*times+vy0
ay=alpha_y/6*times**3+beta_y/2*times**2+gamma_y*times+ay0
z=alpha_z/120*times**5+beta_z/24*times**4+gamma_z/6*times**3+az0/2*times**2+vz0*times+z0
vz=alpha_z/24*times**4+beta_z/6*times**3+gamma_z/2*times**2+az0*times+vz0
az=alpha_z/6*times**3+beta_z/2*times**2+gamma_z*times+az0
controlstate_msg.stateXarray = np.zeros_like(times)
controlstate_msg.stateYarray = y
controlstate_msg.stateZarray = z
controlstate_msg.stateVXarray = np.zeros_like(times)
controlstate_msg.stateVYarray = vy
controlstate_msg.stateVZarray = vz
controlstate_msg.stateAXarray = np.zeros_like(times)
controlstate_msg.stateAYarray = ay
controlstate_msg.stateAZarray = az
pub.publish(controlstate_msg)
currentupdateflag = False
rate.sleep()
# times=np.linspace(0,1,100)*result.x[-1]
#
# alpha_y=result.x[0]
# beta_y=result.x[1]
# gamma_y=result.x[2]
#
# alpha_z=result.x[3]
# beta_z=result.x[4]
# gamma_z=result.x[5]
#
# y=alpha_y/120*times**5+beta_y/24*times**4+gamma_y/6*times**3+ay0/2*times**2+vy0*times+y0
# vy=alpha_y/24*times**4+beta_y/6*times**3+gamma_y/2*times**2+ay0*times+vy0
# ay=alpha_y/6*times**3+beta_y/2*times**2+gamma_y*times+ay0
#
#
# z=alpha_z/120*times**5+beta_z/24*times**4+gamma_z/6*times**3+az0/2*times**2+vz0*times+z0
# vz=alpha_z/24*times**4+beta_z/6*times**3+gamma_z/2*times**2+az0*times+vz0
# az=alpha_z/6*times**3+beta_z/2*times**2+gamma_z*times+az0
# a=np.sqrt(az**2+ay**2)
# thurst=np.sqrt((az+9.8)**2+ay**2)
# phiseries=-np.arctan(ay/(az+9.8))
# # print("az--------", az)
# angleacc=np.zeros_like(times)
# for i in range(len(times)):
# t=times[i]
# angleacc[i]=((((alpha_y*t**2)/2 + beta_y*t + gamma_y)/((alpha_z*t**3)/6 + (beta_z*t**2)/2 + gamma_z*t + az0 + 49/5) - (((alpha_z*t**2)/2 + beta_z*t + gamma_z)*((alpha_y*t**3)/6 + (beta_y*t**2)/2 + gamma_y*t + ay0))/((alpha_z*t**3)/6 + (beta_z*t**2)/2 + gamma_z*t + az0 + 49/5)**2)*((2*((alpha_y*t**2)/2 + beta_y*t + gamma_y)*((alpha_y*t**3)/6 + (beta_y*t**2)/2 + gamma_y*t + ay0))/((alpha_z*t**3)/6 + (beta_z*t**2)/2 + gamma_z*t + az0 + 49/5)**2 - (2*((alpha_z*t**2)/2 + beta_z*t + gamma_z)*((alpha_y*t**3)/6 + (beta_y*t**2)/2 + gamma_y*t + ay0)**2)/((alpha_z*t**3)/6 + (beta_z*t**2)/2 + gamma_z*t + az0 + 49/5)**3))/(((alpha_y*t**3)/6 + (beta_y*t**2)/2 + gamma_y*t + ay0)**2/((alpha_z*t**3)/6 + (beta_z*t**2)/2 + gamma_z*t + az0 + 49/5)**2 + 1)**2 - ((beta_y + alpha_y*t)/((alpha_z*t**3)/6 + (beta_z*t**2)/2 + gamma_z*t + az0 + 49/5) - ((beta_z + alpha_z*t)*((alpha_y*t**3)/6 + (beta_y*t**2)/2 + gamma_y*t + ay0))/((alpha_z*t**3)/6 + (beta_z*t**2)/2 + gamma_z*t + az0 + 49/5)**2 - (2*((alpha_y*t**2)/2 + beta_y*t + gamma_y)*((alpha_z*t**2)/2 + beta_z*t + gamma_z))/((alpha_z*t**3)/6 + (beta_z*t**2)/2 + gamma_z*t + az0 + 49/5)**2 + (2*((alpha_z*t**2)/2 + beta_z*t + gamma_z)**2*((alpha_y*t**3)/6 + (beta_y*t**2)/2 + gamma_y*t + ay0))/((alpha_z*t**3)/6 + (beta_z*t**2)/2 + gamma_z*t + az0 + 49/5)**3)/(((alpha_y*t**3)/6 + (beta_y*t**2)/2 + gamma_y*t + ay0)**2/((alpha_z*t**3)/6 + (beta_z*t**2)/2 + gamma_z*t + az0 + 49/5)**2 + 1)
#
# F1=angleacc*thrustmax/(4*angleaccdmax)+thurst/2
# F2=-angleacc*thrustmax/(4*angleaccdmax)+thurst/2
#
# plotlinewidth = 2
# plotfontsize = 16
# plt.subplot(2,2,1)
# plt.plot(times,y, color='blue',LineWidth=plotlinewidth,label="y")
# plt.plot(times,vy, color='green',LineWidth=plotlinewidth,label="vy")
# plt.plot(times,ay, color='black', LineWidth=plotlinewidth,label="ay")
# plt.plot(times,phiseries, color='yellow',LineWidth=plotlinewidth,label="phi")
# plt.legend(loc="best")
#
# plt.subplot(2,2,2)
# plt.plot(times,z, color='blue',LineWidth=plotlinewidth,label="z")
# plt.plot(times,vz, color='green',LineWidth=plotlinewidth,label="vz")
# plt.plot(times,az, color='black', LineWidth=plotlinewidth,label="az")
# plt.plot(times,thurst, color='yellow',LineWidth=plotlinewidth,label="thurst")
# plt.legend(loc="best")
#
# plt.subplot(2,2,3)
# plt.plot(-y,z, color='blue',LineWidth=plotlinewidth,label="y-z")
# plt.legend(loc="best")
#
# plt.subplot(2,2,4)
# plt.plot(times,F1, color='blue',LineWidth=plotlinewidth,label="F1")
# plt.plot(times,F2, color='black', LineWidth=plotlinewidth,label="F2")
# plt.legend(loc="best")
#
# # print(res)
# # core calculate code
# plt.show()
if __name__ == '__main__': # 主函数
main()
| true
| true
|
1c43e31b2a6419a99dbce0307a341681e94bc888
| 27,753
|
py
|
Python
|
toontown/pickatoon/PickAToonOptions.py
|
cmarshall108/Project-Altis
|
7ead614abdb5072ca06323982de461f4e775d1b3
|
[
"Apache-2.0"
] | 1
|
2021-02-25T06:02:04.000Z
|
2021-02-25T06:02:04.000Z
|
toontown/pickatoon/PickAToonOptions.py
|
AnythingTechPro/Project-Altis
|
7ead614abdb5072ca06323982de461f4e775d1b3
|
[
"Apache-2.0"
] | null | null | null |
toontown/pickatoon/PickAToonOptions.py
|
AnythingTechPro/Project-Altis
|
7ead614abdb5072ca06323982de461f4e775d1b3
|
[
"Apache-2.0"
] | 2
|
2021-02-25T06:02:05.000Z
|
2021-06-19T03:11:22.000Z
|
'''
Created on Apr 2, 2016
@author: Drew
'''
from direct.gui.DirectGui import *
from direct.interval.IntervalGlobal import Wait, Func, Sequence, LerpColorScaleInterval, Parallel, LerpScaleInterval
from direct.showbase.DirectObject import DirectObject
from panda3d.core import TransparencyAttrib, Point3, Vec4, TextNode, Vec3
from toontown.toonbase import TTLocalizer
from toontown.toonbase import ToontownGlobals
from toontown.toontowngui.TTGui import btnDn, btnRlvr, btnUp
from toontown.toontowngui import TTDialog
from toontown.options import GraphicsOptions
from toontown.shtiker import ControlRemapDialog, DisplaySettingsDialog
from decimal import Decimal
resolution_table = [
(800, 600),
(1024, 768),
(1280, 1024),
(1600, 1200),
(1280, 720),
(1920, 1080)]
class PickAToonOptions:
def __init__(self):
self.optionsOpenSfx = None #base.loadSfx(DMenuResources.Settings_Open) # ALTIS: TODO: Add sound effects
self.optionsCloseSfx = None #base.loadSfx(DMenuResources.Settings_Close) # ALTIS: TODO: Add sound effects
def showOptions(self):
#base.playSfx(self.optionsOpenSfx) # ALTIS: TODO: Add sound effects
self.displayOptions()
zoomIn = (LerpScaleInterval(self.optionsNode, .4, Vec3(1, 1, 1), Vec3(0, 0, 0), blendType = 'easeInOut')).start()
def hideOptions(self):
#base.playSfx(self.optionsCloseSfx) # ALTIS: TODO: Add sound effects
zoomOut = (LerpScaleInterval(self.optionsNode, .4, Vec3(0, 0, 0), Vec3(1, 1, 1), blendType = 'easeInOut')).start()
Sequence (
Wait(.4),
Func(self.delOptions)).start()
def displayOptions(self):
self.optionsNode = aspect2d.attachNewNode('optionsNode')
self.optionsNode.reparentTo(aspect2d)
gui = loader.loadModel('phase_3/models/gui/pick_a_toon_gui')
guiButton = loader.loadModel('phase_3/models/gui/quit_button')
quitHover = gui.find('**/QuitBtn_RLVR')
self.optionsBox = OnscreenImage(image = 'phase_3/maps/stat_board.png')
self.optionsBox.setTransparency(TransparencyAttrib.MAlpha)
self.optionsBox.setPos(0, 0, 0)
self.optionsBox.setScale(0.7)
self.optionsBox.reparentTo(self.optionsNode)
# Music Label
self.Music_Label = DirectLabel(parent = self.optionsNode, relief = None, text = 'Music Volume', text_align = TextNode.ACenter, text_scale = 0.052, pos = (0, 0, 0.5))
# Music Slider
self.Music_toggleSlider = DirectSlider(parent = self.optionsNode, pos = (0, 0, 0.4),
value = settings['musicVol'] * 100, pageSize = 5, range = (0, 100), command = self.__doMusicLevel,)
self.Music_toggleSlider.setScale(0.4, 0.4, 0.4)
self.Music_toggleSlider.show()
# SFX Slider
self.SoundFX_toggleSlider = DirectSlider(parent = self.optionsNode, pos = (0, 0.0, 0.2),
value = settings['sfxVol'] * 100, pageSize = 5, range = (0, 100), command = self.__doSfxLevel)
self.SoundFX_toggleSlider.setScale(0.4, 0.4, 0.4)
# SFX Label
self.SoundFX_Label = DirectLabel(parent = self.optionsNode, relief = None, text = 'SFX Volume', text_align = TextNode.ACenter, text_scale = 0.052, pos = (0, 0, 0.3))
# Toon Chat Sound Effects
self.ToonChatSounds_toggleButton = DirectButton(parent = self.optionsNode, relief = None, image = (guiButton.find('**/QuitBtn_UP'),
guiButton.find('**/QuitBtn_DN'),
guiButton.find('**/QuitBtn_RLVR'),
guiButton.find('**/QuitBtn_UP')), image3_color = Vec4(0.5, 0.5, 0.5, 0.5), image_scale = (0.7, 1, 1), text = '', text3_fg = (0.5, 0.5, 0.5, 0.75), text_scale = 0.052, text_pos = (0, -.02), pos = (0, 0, 0), command = self.__doToggleToonChatSounds)
self.ToonChatSounds_toggleButton.setScale(0.8)
self.ToonChatSounds_Label = DirectLabel(parent = self.optionsNode, relief = None, text = 'Toon Chat Sounds', text_align = TextNode.ACenter, text_scale = 0.052, pos = (0, 0, .1))
# Key Remapping
self.WASD_Label = DirectLabel(parent=self.optionsNode, relief=None, text='', text_align=TextNode.ACenter, text_scale=0.052, text_wordwrap=16, pos=(0, 0, -0.1))
self.WASD_toggleButton = DirectButton(parent=self.optionsNode, relief=None, image=(guiButton.find('**/QuitBtn_UP'), guiButton.find('**/QuitBtn_DN'), guiButton.find('**/QuitBtn_RLVR')), image_scale = (0.7, 1, 1), text='', text_scale = 0.052, text_pos=(0, -.02), pos=(0, 0, -0.2), command=self.__doToggleWASD)
self.keymapDialogButton = DirectButton(parent=self.optionsNode, relief = None, image = (guiButton.find('**/QuitBtn_UP'), guiButton.find('**/QuitBtn_DN'), guiButton.find('**/QuitBtn_RLVR')), image_scale = (0.7, 1, 1), text='Change Keybinds', text_scale=(0.03, 0.05, 1), text_pos = (0, -.02), pos = (0, 0, -0.3), command = self.__openKeyRemapDialog)
self.keymapDialogButton.setScale(1.55, 1.0, 1.0)
# Aspect Ratio Options
self.AspectRatioList = DirectOptionMenu(relief = None, parent = self.optionsNode, text_align = TextNode.ACenter, items = GraphicsOptions.AspectRatioLabels, command = self.__doWidescreen, text_scale = .6,
popupMarker_pos = (-1, 0, 0),
popupMarker_relief = None,
highlightScale = (1.1, 1.1),
image = (guiButton.find('**/QuitBtn_UP'),
guiButton.find('**/QuitBtn_DN'),
guiButton.find('**/QuitBtn_RLVR'),
guiButton.find('**/QuitBtn_UP')), image_scale = 8, image3_color = Vec4(0.5, 0.5, 0.5, 0.5), text = '', text3_fg = (0.5, 0.5, 0.5, 0.75), text_pos = (0, -.02), pos = (0, 0, -0.5), image_pos = (0, 0, 0), item_text_align = TextNode.ACenter, popupMenu_text_scale = .5, item_relief = None, item_pressEffect = 1)
self.AspectRatioList.setScale(0.1)
self.AspectRatioList.set(base.Widescreen)
self.Widescreen_Label = DirectLabel(parent = self.optionsNode, relief = None, text = 'Aspect Ratio', text_align = TextNode.ACenter, text_scale = 0.052, pos = (0, 0, -0.4))
# TODO: Add more graphics options like Resolution, and more graphics options like in POTCO to allow changing quality of textures, etc.
# Set Button Text
self.__setToonChatSoundsButton()
self.__setWASDButton()
def delOptions(self):
self.optionsBox.destroy()
del self.optionsBox
self.Music_Label.destroy()
del self.Music_Label
self.Music_toggleSlider.destroy()
del self.Music_toggleSlider
self.SoundFX_Label.destroy()
del self.SoundFX_Label
self.SoundFX_toggleSlider.destroy()
del self.SoundFX_toggleSlider
self.ToonChatSounds_Label.destroy()
del self.ToonChatSounds_Label
self.ToonChatSounds_toggleButton.destroy()
del self.ToonChatSounds_toggleButton
self.Widescreen_Label.destroy()
del self.Widescreen_Label
self.AspectRatioList.destroy()
del self.AspectRatioList
self.WASD_Label.destroy()
del self.WASD_Label
self.WASD_toggleButton.destroy()
del self.WASD_toggleButton
self.keymapDialogButton.destroy()
del self.keymapDialogButton
self.optionsNode.removeNode()
del self.optionsNode
# EZ copy from optionspage.py
def __doMusicLevel(self):
vol = self.Music_toggleSlider['value']
vol = float(vol) / 100
settings['musicVol'] = vol
base.musicManager.setVolume(vol)
base.musicActive = vol > 0.0
def __doSfxLevel(self):
vol = self.SoundFX_toggleSlider['value']
vol = float(vol) / 100
settings['sfxVol'] = vol
for sfm in base.sfxManagerList:
sfm.setVolume(vol)
base.sfxActive = vol > 0.0
def __doToggleToonChatSounds(self):
messenger.send('wakeup')
if base.toonChatSounds:
base.toonChatSounds = 0
settings['toonChatSounds'] = False
else:
base.toonChatSounds = 1
settings['toonChatSounds'] = True
self.settingsChanged = 1
self.__setToonChatSoundsButton()
def __setToonChatSoundsButton(self):
if base.toonChatSounds:
self.ToonChatSounds_Label['text'] = TTLocalizer.OptionsPageToonChatSoundsOnLabel
self.ToonChatSounds_toggleButton['text'] = TTLocalizer.OptionsPageToggleOff
else:
self.ToonChatSounds_Label['text'] = TTLocalizer.OptionsPageToonChatSoundsOffLabel
self.ToonChatSounds_toggleButton['text'] = TTLocalizer.OptionsPageToggleOn
if base.sfxActive:
self.ToonChatSounds_Label.setColorScale(1.0, 1.0, 1.0, 1.0)
self.ToonChatSounds_toggleButton['state'] = DGG.NORMAL
else:
self.ToonChatSounds_Label.setColorScale(0.5, 0.5, 0.5, 0.5)
self.ToonChatSounds_toggleButton['state'] = DGG.DISABLED
def __doWidescreen(self, ratio):
messenger.send('wakeup')
ratio = self.AspectRatioList.selectedIndex
if base.Widescreen != ratio:
base.Widescreen = ratio
settings['Widescreen'] = ratio
self.settingsChanged = 1
base.updateAspectRatio()
def __doToggleWASD(self):
messenger.send('wakeup')
if base.wantCustomControls:
base.wantCustomControls = False
settings['want-Custom-Controls'] = False
else:
base.wantCustomControls = True
settings['want-Custom-Controls'] = True
base.reloadControls()
self.settingsChanged = 1
self.__setWASDButton()
def __setWASDButton(self):
if base.wantCustomControls:
self.WASD_Label['text'] = 'Custom Keymapping is enabled.'
self.WASD_toggleButton['text'] = TTLocalizer.OptionsPageToggleOff
self.keymapDialogButton.show()
else:
self.WASD_Label['text'] = 'Custom Keymapping is disabled.'
self.WASD_toggleButton['text'] = TTLocalizer.OptionsPageToggleOn
self.keymapDialogButton.hide()
def __openKeyRemapDialog(self):
if base.wantCustomControls:
self.controlDialog = ControlRemapDialog.ControlRemap()
# I will be revamping the options screen, here is the class for it
class NewPickAToonOptions:
def __init__(self):
self.optionsOpenSfx = None #base.loadSfx(DMenuResources.Settings_Open) # ALTIS: TODO: Add sound effects
self.optionsCloseSfx = None #base.loadSfx(DMenuResources.Settings_Close) # ALTIS: TODO: Add sound effects
self.Music_Label = None
self.Music_toggleSlider = None
self.SoundFX_Label = None
self.SoundFX_toggleSlider = None
self.ToonChatSounds_Label = None
self.ToonChatSounds_toggleButton = None
self.WASD_Label = None
self.WASD_toggleButton = None
self.keymapDialogButton = None
self.Widescreen_Label = None
self.AspectRatioList = None
self.DisplaySettings_Label = None
self.DisplaySettingsButton = None
self.fov_toggleSlider = None
self.fov_Label = None
self.fov_resetButton = None
self.displaySettings = None
self.displaySettingsChanged = 0
self.displaySettingsSize = (None, None)
self.displaySettingsFullscreen = None
self.displaySettingsBorderless = None
self.displaySettingsApi = None
self.displaySettingsApiChanged = 0
def showOptions(self):
#base.playSfx(self.optionsOpenSfx) # ALTIS: TODO: Add sound effects
self.displayOptions()
zoomIn = (LerpScaleInterval(self.optionsNode, .1, Vec3(1, 1, 1), Vec3(0, 0, 0), blendType = 'easeOut')).start()
def hideOptions(self):
#base.playSfx(self.optionsCloseSfx) # ALTIS: TODO: Add sound effects
zoomOut = (LerpScaleInterval(self.optionsNode, .1, Vec3(.5, .5, .5), Vec3(1, 1, 1), blendType = 'easeIn')).start()
Sequence (
Wait(.1),
Func(self.delAllOptions)).start()
def displayOptions(self):
self.optionsNode = aspect2d.attachNewNode('optionsNode')
self.optionsNode.reparentTo(aspect2d)
self.guimodel = loader.loadModel('phase_3/models/gui/pick_a_toon_gui')
self.guiButton = loader.loadModel('phase_3/models/gui/quit_button')
self.quitHover = self.guimodel.find('**/QuitBtn_RLVR')
self.optionsBox = OnscreenImage(image = 'phase_3/maps/stat_board.png')
self.optionsBox.setTransparency(TransparencyAttrib.MAlpha)
self.optionsBox.setPos(0, 0, 0)
self.optionsBox.setScale(1.3, 1, 1)
self.optionsBox.reparentTo(self.optionsNode)
self.soundOptionsButton = DirectButton(relief = None, text_style = 3, text_fg = (1, 1, 1, 1), text = 'Sound', text_scale = .1, scale = 0.95, command = self.displaySoundOptions)
self.soundOptionsButton.reparentTo(self.optionsNode)
self.soundOptionsButton.setPos(-.6, 0, .7)
self.soundOptionsButton.show()
self.controlOptionsButton = DirectButton(relief = None, text_style = 3, text_fg = (1, 1, 1, 1), text = 'Controls', text_scale = .1, scale = 0.95, command = self.displayControlOptions)
self.controlOptionsButton.reparentTo(self.optionsNode)
self.controlOptionsButton.setPos(0, 0, .7)
self.controlOptionsButton.show()
self.videoOptionsButton = DirectButton(relief = None, text_style = 3, text_fg = (1, 1, 1, 1), text = 'Video', text_scale = .1, scale = 0.95, command = self.displayVideoOptions)
self.videoOptionsButton.reparentTo(self.optionsNode)
self.videoOptionsButton.setPos(.6, 0, .7)
self.videoOptionsButton.show()
self.displaySoundOptions()
def displaySoundOptions(self):
self.delSoundOptions()
self.delControlOptions()
self.delVideoOptions()
# Music Label
self.Music_Label = DirectLabel(parent = self.optionsNode, relief = None, text = 'Music Volume', text_align = TextNode.ACenter, text_scale = 0.052, pos = (0, 0, 0.4))
# Music Slider
self.Music_toggleSlider = DirectSlider(parent = self.optionsNode, pos = (0, 0, 0.3),
value = settings['musicVol'] * 100, pageSize = 5, range = (0, 100), command = self.__doMusicLevel, thumb_geom=(self.guiButton.find('**/QuitBtn_UP')), thumb_relief=None, thumb_geom_scale=1)
self.Music_toggleSlider.setScale(0.4, 0.4, 0.4)
self.Music_toggleSlider.show()
# SFX Slider
self.SoundFX_toggleSlider = DirectSlider(parent = self.optionsNode, pos = (0, 0.0, 0.1),
value = settings['sfxVol'] * 100, pageSize = 5, range = (0, 100), command = self.__doSfxLevel, thumb_geom=(self.guiButton.find('**/QuitBtn_UP')), thumb_relief=None, thumb_geom_scale=1)
self.SoundFX_toggleSlider.setScale(0.4, 0.4, 0.4)
# SFX Label
self.SoundFX_Label = DirectLabel(parent = self.optionsNode, relief = None, text = 'SFX Volume', text_align = TextNode.ACenter, text_scale = 0.052, pos = (0, 0, 0.2))
# Toon Chat Sound Effects
self.ToonChatSounds_toggleButton = DirectButton(parent = self.optionsNode, relief = None, image = (self.guiButton.find('**/QuitBtn_UP'),
self.guiButton.find('**/QuitBtn_DN'),
self.guiButton.find('**/QuitBtn_RLVR'),
self.guiButton.find('**/QuitBtn_UP')), image3_color = Vec4(0.5, 0.5, 0.5, 0.5), image_scale = (0.7, 1, 1), text = '', text3_fg = (0.5, 0.5, 0.5, 0.75), text_scale = 0.052, text_pos = (0, -.02), pos = (0, 0, -.1), command = self.__doToggleToonChatSounds)
self.ToonChatSounds_toggleButton.setScale(0.8)
self.ToonChatSounds_Label = DirectLabel(parent = self.optionsNode, relief = None, text = 'Toon Chat Sounds', text_align = TextNode.ACenter, text_scale = 0.052, pos = (0, 0, 0))
# Set Button Text
self.__setToonChatSoundsButton()
def displayControlOptions(self):
self.delSoundOptions()
self.delControlOptions()
self.delVideoOptions()
# Key Remapping
self.WASD_Label = DirectLabel(parent=self.optionsNode, relief=None, text='', text_align=TextNode.ACenter, text_scale=0.052, text_wordwrap=16, pos=(0, 0, .4))
self.WASD_toggleButton = DirectButton(parent=self.optionsNode, relief=None, image=(self.guiButton.find('**/QuitBtn_UP'), self.guiButton.find('**/QuitBtn_DN'), self.guiButton.find('**/QuitBtn_RLVR')), image_scale = (0.7, 1, 1), text='', text_scale = 0.052, text_pos=(0, -.02), pos=(0, 0, .3), command=self.__doToggleWASD)
self.keymapDialogButton = DirectButton(parent=self.optionsNode, relief = None, image = (self.guiButton.find('**/QuitBtn_UP'), self.guiButton.find('**/QuitBtn_DN'), self.guiButton.find('**/QuitBtn_RLVR')), image_scale = (0.7, 1, 1), text='Change Keybinds', text_scale=(0.03, 0.05, 1), text_pos = (0, -.02), pos = (0, 0, .2), command = self.__openKeyRemapDialog)
self.keymapDialogButton.setScale(1.55, 1.0, 1.0)
# Set Button Text
self.__setWASDButton()
def displayVideoOptions(self):
self.delSoundOptions()
self.delControlOptions()
self.delVideoOptions()
# Aspect Ratio Options
self.AspectRatioList = DirectOptionMenu(relief = None, parent = self.optionsNode, text_align = TextNode.ACenter, items = GraphicsOptions.AspectRatioLabels, command = self.__doWidescreen, text_scale = .6,
popupMarker_pos = (-1, 0, 0),
popupMarker_relief = None,
highlightScale = (1.1, 1.1),
image = (self.guiButton.find('**/QuitBtn_UP'),
self.guiButton.find('**/QuitBtn_DN'),
self.guiButton.find('**/QuitBtn_RLVR'),
self.guiButton.find('**/QuitBtn_UP')), image_scale = 8, image3_color = Vec4(0.5, 0.5, 0.5, 0.5), text = '', text3_fg = (0.5, 0.5, 0.5, 0.75), text_pos = (0, -.02), pos = (0, 0, .3), image_pos = (0, 0, 0), item_text_align = TextNode.ACenter, popupMenu_text_scale = .5, item_relief = None, item_pressEffect = 1)
self.AspectRatioList.setScale(0.1)
self.AspectRatioList.set(base.Widescreen)
self.Widescreen_Label = DirectLabel(parent = self.optionsNode, relief = None, text = 'Aspect Ratio', text_align = TextNode.ACenter, text_scale = 0.052, pos = (0, 0, .4))
self.DisplaySettings_Label = DirectLabel(parent=self.optionsNode, relief=None, text='', text_align=TextNode.ACenter, text_scale=0.052, text_wordwrap=16, pos=(0, 0, .2))
self.DisplaySettingsButton = DirectButton(parent=self.optionsNode, relief=None, image=(self.guiButton.find('**/QuitBtn_UP'), self.guiButton.find('**/QuitBtn_DN'), self.guiButton.find('**/QuitBtn_RLVR')), image3_color=Vec4(0.5, 0.5, 0.5, 0.5), image_scale=(0.7, 1, 1), text=TTLocalizer.OptionsPageChange, text3_fg=(0.5, 0.5, 0.5, 0.75), text_scale=0.052, text_pos=(0, -.02), pos=(0, 0, .1), command=self.__doDisplaySettings)
self.fov_Label = DirectLabel(parent=self.optionsNode, relief=None, text='Field of view', text_align=TextNode.ACenter, text_scale = 0.052, text_wordwrap=16, pos=(0, 0, 0))
self.fov_toggleSlider = DirectSlider(parent=self.optionsNode, pos=(0, 0, -.1),
value=settings['fieldofview'], pageSize=5, range=(30, 120), command=self.__doFovLevel, thumb_geom=(self.guiButton.find('**/QuitBtn_UP')), thumb_relief=None, thumb_geom_scale=1)
self.fov_toggleSlider.setScale(0.25)
self.fov_resetButton = DirectButton(parent=self.optionsNode, relief=None, image=(self.guiButton.find('**/QuitBtn_UP'), self.guiButton.find('**/QuitBtn_DN'), self.guiButton.find('**/QuitBtn_RLVR')), image_scale=(0.7, 1, 1), text='Reset FOV', text_scale=0.052, text_pos = (0, -.02), pos=(0, 0, -.2), command=self.__resetFov)
self.fovsliderText = OnscreenText('0.0', scale=.3, pos=(0, .1), fg=(1, 1, 1, 1), style = 3)
self.fovsliderText.reparentTo(self.fov_toggleSlider.thumb)
self.__doFovLevel()
self.__setDisplaySettings()
# TODO: Add more graphics options like Resolution, and more graphics options like in POTCO to allow changing quality of textures, etc.
def delSoundOptions(self):
if self.Music_Label:
self.Music_Label.destroy()
self.Music_Label = None
if self.Music_toggleSlider:
self.Music_toggleSlider.destroy()
self.Music_toggleSlider = None
if self.SoundFX_Label:
self.SoundFX_Label.destroy()
self.SoundFX_Label = None
if self.SoundFX_toggleSlider:
self.SoundFX_toggleSlider.destroy()
self.SoundFX_toggleSlider = None
if self.ToonChatSounds_Label:
self.ToonChatSounds_Label.destroy()
self.ToonChatSounds_Label = None
if self.ToonChatSounds_toggleButton:
self.ToonChatSounds_toggleButton.destroy()
self.ToonChatSounds_toggleButton = None
def delControlOptions(self):
if self.WASD_Label:
self.WASD_Label.destroy()
self.WASD_Label = None
if self.WASD_toggleButton:
self.WASD_toggleButton.destroy()
self.WASD_toggleButton = None
if self.keymapDialogButton:
self.keymapDialogButton.destroy()
self.keymapDialogButton = None
def delVideoOptions(self):
if self.Widescreen_Label:
self.Widescreen_Label.destroy()
self.Widescreen_Label = None
if self.AspectRatioList:
self.AspectRatioList.destroy()
self.AspectRatioList = None
if self.DisplaySettings_Label:
self.DisplaySettings_Label.destroy()
self.DisplaySettings_Label = None
if self.DisplaySettingsButton:
self.DisplaySettingsButton.destroy()
self.DisplaySettingsButton = None
if self.fov_toggleSlider:
self.fov_toggleSlider.destroy()
self.fov_toggleSlider = None
self.fov_Label.destroy()
self.fov_Label = None
self.fov_resetButton.destroy()
self.fov_resetButton = None
def delAllOptions(self):
self.delSoundOptions()
self.delControlOptions()
self.delVideoOptions()
self.optionsBox.destroy()
del self.optionsBox
self.optionsNode.removeNode()
del self.optionsNode
# EZ copy from optionspage.py
def __doMusicLevel(self):
vol = self.Music_toggleSlider['value']
vol = float(vol) / 100
settings['musicVol'] = vol
base.musicManager.setVolume(vol)
base.musicActive = vol > 0.0
def __doSfxLevel(self):
vol = self.SoundFX_toggleSlider['value']
vol = float(vol) / 100
settings['sfxVol'] = vol
for sfm in base.sfxManagerList:
sfm.setVolume(vol)
base.sfxActive = vol > 0.0
def __doToggleToonChatSounds(self):
messenger.send('wakeup')
if base.toonChatSounds:
base.toonChatSounds = 0
settings['toonChatSounds'] = False
else:
base.toonChatSounds = 1
settings['toonChatSounds'] = True
self.settingsChanged = 1
self.__setToonChatSoundsButton()
def __setToonChatSoundsButton(self):
if base.toonChatSounds:
self.ToonChatSounds_Label['text'] = TTLocalizer.OptionsPageToonChatSoundsOnLabel
self.ToonChatSounds_toggleButton['text'] = TTLocalizer.OptionsPageToggleOff
else:
self.ToonChatSounds_Label['text'] = TTLocalizer.OptionsPageToonChatSoundsOffLabel
self.ToonChatSounds_toggleButton['text'] = TTLocalizer.OptionsPageToggleOn
if base.sfxActive:
self.ToonChatSounds_Label.setColorScale(1.0, 1.0, 1.0, 1.0)
self.ToonChatSounds_toggleButton['state'] = DGG.NORMAL
else:
self.ToonChatSounds_Label.setColorScale(0.5, 0.5, 0.5, 0.5)
self.ToonChatSounds_toggleButton['state'] = DGG.DISABLED
def __doWidescreen(self, ratio):
messenger.send('wakeup')
ratio = self.AspectRatioList.selectedIndex
if base.Widescreen != ratio:
base.Widescreen = ratio
settings['Widescreen'] = ratio
self.settingsChanged = 1
base.updateAspectRatio()
def __doToggleWASD(self):
messenger.send('wakeup')
if base.wantCustomControls:
base.wantCustomControls = False
settings['want-Custom-Controls'] = False
else:
base.wantCustomControls = True
settings['want-Custom-Controls'] = True
base.reloadControls()
self.settingsChanged = 1
self.__setWASDButton()
def __setWASDButton(self):
if base.wantCustomControls:
self.WASD_Label['text'] = 'Custom Keymapping is enabled.'
self.WASD_toggleButton['text'] = TTLocalizer.OptionsPageToggleOff
self.keymapDialogButton.show()
else:
self.WASD_Label['text'] = 'Custom Keymapping is disabled.'
self.WASD_toggleButton['text'] = TTLocalizer.OptionsPageToggleOn
self.keymapDialogButton.hide()
def __openKeyRemapDialog(self):
if base.wantCustomControls:
self.controlDialog = ControlRemapDialog.ControlRemap()
def __doDisplaySettings(self):
if self.displaySettings == None:
self.displaySettings = DisplaySettingsDialog.DisplaySettingsDialog()
self.displaySettings.load()
base.accept(self.displaySettings.doneEvent, self.__doneDisplaySettings)
self.displaySettings.enter(True, False)
def __doneDisplaySettings(self, anyChanged, apiChanged):
if anyChanged:
self.__setDisplaySettings()
properties = base.win.getProperties()
self.displaySettingsChanged = 1
self.displaySettingsSize = (properties.getXSize(), properties.getYSize())
self.displaySettingsFullscreen = properties.getFullscreen()
self.displaySettingsBorderless = properties.getUndecorated()
self.displaySettingsApi = base.pipe.getInterfaceName()
self.displaySettingsApiChanged = apiChanged
def __setDisplaySettings(self):
properties = base.win.getProperties()
if properties.getFullscreen():
screensize = 'Fullscreen | %s x %s' % (properties.getXSize(), properties.getYSize())
elif properties.getUndecorated():
screensize = 'Borderless Windowed | %s x %s' % (properties.getXSize(), properties.getYSize())
else:
screensize = 'Windowed'
api = base.pipe.getInterfaceName()
settings = {'screensize': screensize, 'api': api}
text = TTLocalizer.OptionsPageDisplaySettings % settings
self.DisplaySettings_Label['text'] = text
def __doFovLevel(self):
fov = self.fov_toggleSlider['value']
settings['fieldofview'] = fov
base.camLens.setMinFov(fov/(4./3.))
dec = Decimal(fov)
self.fovsliderText['text'] = str(round(fov, 1))
def __resetFov(self):
self.fov_toggleSlider['value'] = 52
settings['fieldofview'] = 52
base.camLens.setMinFov(52/(4./3.))
self.fovsliderText['text'] = str(52)
| 49.033569
| 431
| 0.644723
|
from direct.gui.DirectGui import *
from direct.interval.IntervalGlobal import Wait, Func, Sequence, LerpColorScaleInterval, Parallel, LerpScaleInterval
from direct.showbase.DirectObject import DirectObject
from panda3d.core import TransparencyAttrib, Point3, Vec4, TextNode, Vec3
from toontown.toonbase import TTLocalizer
from toontown.toonbase import ToontownGlobals
from toontown.toontowngui.TTGui import btnDn, btnRlvr, btnUp
from toontown.toontowngui import TTDialog
from toontown.options import GraphicsOptions
from toontown.shtiker import ControlRemapDialog, DisplaySettingsDialog
from decimal import Decimal
resolution_table = [
(800, 600),
(1024, 768),
(1280, 1024),
(1600, 1200),
(1280, 720),
(1920, 1080)]
class PickAToonOptions:
def __init__(self):
self.optionsOpenSfx = None None zoomIn = (LerpScaleInterval(self.optionsNode, .4, Vec3(1, 1, 1), Vec3(0, 0, 0), blendType = 'easeInOut')).start()
def hideOptions(self):
erval(self.optionsNode, .4, Vec3(0, 0, 0), Vec3(1, 1, 1), blendType = 'easeInOut')).start()
Sequence (
Wait(.4),
Func(self.delOptions)).start()
def displayOptions(self):
self.optionsNode = aspect2d.attachNewNode('optionsNode')
self.optionsNode.reparentTo(aspect2d)
gui = loader.loadModel('phase_3/models/gui/pick_a_toon_gui')
guiButton = loader.loadModel('phase_3/models/gui/quit_button')
quitHover = gui.find('**/QuitBtn_RLVR')
self.optionsBox = OnscreenImage(image = 'phase_3/maps/stat_board.png')
self.optionsBox.setTransparency(TransparencyAttrib.MAlpha)
self.optionsBox.setPos(0, 0, 0)
self.optionsBox.setScale(0.7)
self.optionsBox.reparentTo(self.optionsNode)
self.Music_Label = DirectLabel(parent = self.optionsNode, relief = None, text = 'Music Volume', text_align = TextNode.ACenter, text_scale = 0.052, pos = (0, 0, 0.5))
self.Music_toggleSlider = DirectSlider(parent = self.optionsNode, pos = (0, 0, 0.4),
value = settings['musicVol'] * 100, pageSize = 5, range = (0, 100), command = self.__doMusicLevel,)
self.Music_toggleSlider.setScale(0.4, 0.4, 0.4)
self.Music_toggleSlider.show()
self.SoundFX_toggleSlider = DirectSlider(parent = self.optionsNode, pos = (0, 0.0, 0.2),
value = settings['sfxVol'] * 100, pageSize = 5, range = (0, 100), command = self.__doSfxLevel)
self.SoundFX_toggleSlider.setScale(0.4, 0.4, 0.4)
self.SoundFX_Label = DirectLabel(parent = self.optionsNode, relief = None, text = 'SFX Volume', text_align = TextNode.ACenter, text_scale = 0.052, pos = (0, 0, 0.3))
self.ToonChatSounds_toggleButton = DirectButton(parent = self.optionsNode, relief = None, image = (guiButton.find('**/QuitBtn_UP'),
guiButton.find('**/QuitBtn_DN'),
guiButton.find('**/QuitBtn_RLVR'),
guiButton.find('**/QuitBtn_UP')), image3_color = Vec4(0.5, 0.5, 0.5, 0.5), image_scale = (0.7, 1, 1), text = '', text3_fg = (0.5, 0.5, 0.5, 0.75), text_scale = 0.052, text_pos = (0, -.02), pos = (0, 0, 0), command = self.__doToggleToonChatSounds)
self.ToonChatSounds_toggleButton.setScale(0.8)
self.ToonChatSounds_Label = DirectLabel(parent = self.optionsNode, relief = None, text = 'Toon Chat Sounds', text_align = TextNode.ACenter, text_scale = 0.052, pos = (0, 0, .1))
self.WASD_Label = DirectLabel(parent=self.optionsNode, relief=None, text='', text_align=TextNode.ACenter, text_scale=0.052, text_wordwrap=16, pos=(0, 0, -0.1))
self.WASD_toggleButton = DirectButton(parent=self.optionsNode, relief=None, image=(guiButton.find('**/QuitBtn_UP'), guiButton.find('**/QuitBtn_DN'), guiButton.find('**/QuitBtn_RLVR')), image_scale = (0.7, 1, 1), text='', text_scale = 0.052, text_pos=(0, -.02), pos=(0, 0, -0.2), command=self.__doToggleWASD)
self.keymapDialogButton = DirectButton(parent=self.optionsNode, relief = None, image = (guiButton.find('**/QuitBtn_UP'), guiButton.find('**/QuitBtn_DN'), guiButton.find('**/QuitBtn_RLVR')), image_scale = (0.7, 1, 1), text='Change Keybinds', text_scale=(0.03, 0.05, 1), text_pos = (0, -.02), pos = (0, 0, -0.3), command = self.__openKeyRemapDialog)
self.keymapDialogButton.setScale(1.55, 1.0, 1.0)
self.AspectRatioList = DirectOptionMenu(relief = None, parent = self.optionsNode, text_align = TextNode.ACenter, items = GraphicsOptions.AspectRatioLabels, command = self.__doWidescreen, text_scale = .6,
popupMarker_pos = (-1, 0, 0),
popupMarker_relief = None,
highlightScale = (1.1, 1.1),
image = (guiButton.find('**/QuitBtn_UP'),
guiButton.find('**/QuitBtn_DN'),
guiButton.find('**/QuitBtn_RLVR'),
guiButton.find('**/QuitBtn_UP')), image_scale = 8, image3_color = Vec4(0.5, 0.5, 0.5, 0.5), text = '', text3_fg = (0.5, 0.5, 0.5, 0.75), text_pos = (0, -.02), pos = (0, 0, -0.5), image_pos = (0, 0, 0), item_text_align = TextNode.ACenter, popupMenu_text_scale = .5, item_relief = None, item_pressEffect = 1)
self.AspectRatioList.setScale(0.1)
self.AspectRatioList.set(base.Widescreen)
self.Widescreen_Label = DirectLabel(parent = self.optionsNode, relief = None, text = 'Aspect Ratio', text_align = TextNode.ACenter, text_scale = 0.052, pos = (0, 0, -0.4))
self.__setToonChatSoundsButton()
self.__setWASDButton()
def delOptions(self):
self.optionsBox.destroy()
del self.optionsBox
self.Music_Label.destroy()
del self.Music_Label
self.Music_toggleSlider.destroy()
del self.Music_toggleSlider
self.SoundFX_Label.destroy()
del self.SoundFX_Label
self.SoundFX_toggleSlider.destroy()
del self.SoundFX_toggleSlider
self.ToonChatSounds_Label.destroy()
del self.ToonChatSounds_Label
self.ToonChatSounds_toggleButton.destroy()
del self.ToonChatSounds_toggleButton
self.Widescreen_Label.destroy()
del self.Widescreen_Label
self.AspectRatioList.destroy()
del self.AspectRatioList
self.WASD_Label.destroy()
del self.WASD_Label
self.WASD_toggleButton.destroy()
del self.WASD_toggleButton
self.keymapDialogButton.destroy()
del self.keymapDialogButton
self.optionsNode.removeNode()
del self.optionsNode
def __doMusicLevel(self):
vol = self.Music_toggleSlider['value']
vol = float(vol) / 100
settings['musicVol'] = vol
base.musicManager.setVolume(vol)
base.musicActive = vol > 0.0
def __doSfxLevel(self):
vol = self.SoundFX_toggleSlider['value']
vol = float(vol) / 100
settings['sfxVol'] = vol
for sfm in base.sfxManagerList:
sfm.setVolume(vol)
base.sfxActive = vol > 0.0
def __doToggleToonChatSounds(self):
messenger.send('wakeup')
if base.toonChatSounds:
base.toonChatSounds = 0
settings['toonChatSounds'] = False
else:
base.toonChatSounds = 1
settings['toonChatSounds'] = True
self.settingsChanged = 1
self.__setToonChatSoundsButton()
def __setToonChatSoundsButton(self):
if base.toonChatSounds:
self.ToonChatSounds_Label['text'] = TTLocalizer.OptionsPageToonChatSoundsOnLabel
self.ToonChatSounds_toggleButton['text'] = TTLocalizer.OptionsPageToggleOff
else:
self.ToonChatSounds_Label['text'] = TTLocalizer.OptionsPageToonChatSoundsOffLabel
self.ToonChatSounds_toggleButton['text'] = TTLocalizer.OptionsPageToggleOn
if base.sfxActive:
self.ToonChatSounds_Label.setColorScale(1.0, 1.0, 1.0, 1.0)
self.ToonChatSounds_toggleButton['state'] = DGG.NORMAL
else:
self.ToonChatSounds_Label.setColorScale(0.5, 0.5, 0.5, 0.5)
self.ToonChatSounds_toggleButton['state'] = DGG.DISABLED
def __doWidescreen(self, ratio):
messenger.send('wakeup')
ratio = self.AspectRatioList.selectedIndex
if base.Widescreen != ratio:
base.Widescreen = ratio
settings['Widescreen'] = ratio
self.settingsChanged = 1
base.updateAspectRatio()
def __doToggleWASD(self):
messenger.send('wakeup')
if base.wantCustomControls:
base.wantCustomControls = False
settings['want-Custom-Controls'] = False
else:
base.wantCustomControls = True
settings['want-Custom-Controls'] = True
base.reloadControls()
self.settingsChanged = 1
self.__setWASDButton()
def __setWASDButton(self):
if base.wantCustomControls:
self.WASD_Label['text'] = 'Custom Keymapping is enabled.'
self.WASD_toggleButton['text'] = TTLocalizer.OptionsPageToggleOff
self.keymapDialogButton.show()
else:
self.WASD_Label['text'] = 'Custom Keymapping is disabled.'
self.WASD_toggleButton['text'] = TTLocalizer.OptionsPageToggleOn
self.keymapDialogButton.hide()
def __openKeyRemapDialog(self):
if base.wantCustomControls:
self.controlDialog = ControlRemapDialog.ControlRemap()
class NewPickAToonOptions:
def __init__(self):
self.optionsOpenSfx = None None el = None
self.Music_toggleSlider = None
self.SoundFX_Label = None
self.SoundFX_toggleSlider = None
self.ToonChatSounds_Label = None
self.ToonChatSounds_toggleButton = None
self.WASD_Label = None
self.WASD_toggleButton = None
self.keymapDialogButton = None
self.Widescreen_Label = None
self.AspectRatioList = None
self.DisplaySettings_Label = None
self.DisplaySettingsButton = None
self.fov_toggleSlider = None
self.fov_Label = None
self.fov_resetButton = None
self.displaySettings = None
self.displaySettingsChanged = 0
self.displaySettingsSize = (None, None)
self.displaySettingsFullscreen = None
self.displaySettingsBorderless = None
self.displaySettingsApi = None
self.displaySettingsApiChanged = 0
def showOptions(self):
zoomIn = (LerpScaleInterval(self.optionsNode, .1, Vec3(1, 1, 1), Vec3(0, 0, 0), blendType = 'easeOut')).start()
def hideOptions(self):
erval(self.optionsNode, .1, Vec3(.5, .5, .5), Vec3(1, 1, 1), blendType = 'easeIn')).start()
Sequence (
Wait(.1),
Func(self.delAllOptions)).start()
def displayOptions(self):
self.optionsNode = aspect2d.attachNewNode('optionsNode')
self.optionsNode.reparentTo(aspect2d)
self.guimodel = loader.loadModel('phase_3/models/gui/pick_a_toon_gui')
self.guiButton = loader.loadModel('phase_3/models/gui/quit_button')
self.quitHover = self.guimodel.find('**/QuitBtn_RLVR')
self.optionsBox = OnscreenImage(image = 'phase_3/maps/stat_board.png')
self.optionsBox.setTransparency(TransparencyAttrib.MAlpha)
self.optionsBox.setPos(0, 0, 0)
self.optionsBox.setScale(1.3, 1, 1)
self.optionsBox.reparentTo(self.optionsNode)
self.soundOptionsButton = DirectButton(relief = None, text_style = 3, text_fg = (1, 1, 1, 1), text = 'Sound', text_scale = .1, scale = 0.95, command = self.displaySoundOptions)
self.soundOptionsButton.reparentTo(self.optionsNode)
self.soundOptionsButton.setPos(-.6, 0, .7)
self.soundOptionsButton.show()
self.controlOptionsButton = DirectButton(relief = None, text_style = 3, text_fg = (1, 1, 1, 1), text = 'Controls', text_scale = .1, scale = 0.95, command = self.displayControlOptions)
self.controlOptionsButton.reparentTo(self.optionsNode)
self.controlOptionsButton.setPos(0, 0, .7)
self.controlOptionsButton.show()
self.videoOptionsButton = DirectButton(relief = None, text_style = 3, text_fg = (1, 1, 1, 1), text = 'Video', text_scale = .1, scale = 0.95, command = self.displayVideoOptions)
self.videoOptionsButton.reparentTo(self.optionsNode)
self.videoOptionsButton.setPos(.6, 0, .7)
self.videoOptionsButton.show()
self.displaySoundOptions()
def displaySoundOptions(self):
self.delSoundOptions()
self.delControlOptions()
self.delVideoOptions()
self.Music_Label = DirectLabel(parent = self.optionsNode, relief = None, text = 'Music Volume', text_align = TextNode.ACenter, text_scale = 0.052, pos = (0, 0, 0.4))
self.Music_toggleSlider = DirectSlider(parent = self.optionsNode, pos = (0, 0, 0.3),
value = settings['musicVol'] * 100, pageSize = 5, range = (0, 100), command = self.__doMusicLevel, thumb_geom=(self.guiButton.find('**/QuitBtn_UP')), thumb_relief=None, thumb_geom_scale=1)
self.Music_toggleSlider.setScale(0.4, 0.4, 0.4)
self.Music_toggleSlider.show()
self.SoundFX_toggleSlider = DirectSlider(parent = self.optionsNode, pos = (0, 0.0, 0.1),
value = settings['sfxVol'] * 100, pageSize = 5, range = (0, 100), command = self.__doSfxLevel, thumb_geom=(self.guiButton.find('**/QuitBtn_UP')), thumb_relief=None, thumb_geom_scale=1)
self.SoundFX_toggleSlider.setScale(0.4, 0.4, 0.4)
self.SoundFX_Label = DirectLabel(parent = self.optionsNode, relief = None, text = 'SFX Volume', text_align = TextNode.ACenter, text_scale = 0.052, pos = (0, 0, 0.2))
self.ToonChatSounds_toggleButton = DirectButton(parent = self.optionsNode, relief = None, image = (self.guiButton.find('**/QuitBtn_UP'),
self.guiButton.find('**/QuitBtn_DN'),
self.guiButton.find('**/QuitBtn_RLVR'),
self.guiButton.find('**/QuitBtn_UP')), image3_color = Vec4(0.5, 0.5, 0.5, 0.5), image_scale = (0.7, 1, 1), text = '', text3_fg = (0.5, 0.5, 0.5, 0.75), text_scale = 0.052, text_pos = (0, -.02), pos = (0, 0, -.1), command = self.__doToggleToonChatSounds)
self.ToonChatSounds_toggleButton.setScale(0.8)
self.ToonChatSounds_Label = DirectLabel(parent = self.optionsNode, relief = None, text = 'Toon Chat Sounds', text_align = TextNode.ACenter, text_scale = 0.052, pos = (0, 0, 0))
self.__setToonChatSoundsButton()
def displayControlOptions(self):
self.delSoundOptions()
self.delControlOptions()
self.delVideoOptions()
self.WASD_Label = DirectLabel(parent=self.optionsNode, relief=None, text='', text_align=TextNode.ACenter, text_scale=0.052, text_wordwrap=16, pos=(0, 0, .4))
self.WASD_toggleButton = DirectButton(parent=self.optionsNode, relief=None, image=(self.guiButton.find('**/QuitBtn_UP'), self.guiButton.find('**/QuitBtn_DN'), self.guiButton.find('**/QuitBtn_RLVR')), image_scale = (0.7, 1, 1), text='', text_scale = 0.052, text_pos=(0, -.02), pos=(0, 0, .3), command=self.__doToggleWASD)
self.keymapDialogButton = DirectButton(parent=self.optionsNode, relief = None, image = (self.guiButton.find('**/QuitBtn_UP'), self.guiButton.find('**/QuitBtn_DN'), self.guiButton.find('**/QuitBtn_RLVR')), image_scale = (0.7, 1, 1), text='Change Keybinds', text_scale=(0.03, 0.05, 1), text_pos = (0, -.02), pos = (0, 0, .2), command = self.__openKeyRemapDialog)
self.keymapDialogButton.setScale(1.55, 1.0, 1.0)
self.__setWASDButton()
def displayVideoOptions(self):
self.delSoundOptions()
self.delControlOptions()
self.delVideoOptions()
self.AspectRatioList = DirectOptionMenu(relief = None, parent = self.optionsNode, text_align = TextNode.ACenter, items = GraphicsOptions.AspectRatioLabels, command = self.__doWidescreen, text_scale = .6,
popupMarker_pos = (-1, 0, 0),
popupMarker_relief = None,
highlightScale = (1.1, 1.1),
image = (self.guiButton.find('**/QuitBtn_UP'),
self.guiButton.find('**/QuitBtn_DN'),
self.guiButton.find('**/QuitBtn_RLVR'),
self.guiButton.find('**/QuitBtn_UP')), image_scale = 8, image3_color = Vec4(0.5, 0.5, 0.5, 0.5), text = '', text3_fg = (0.5, 0.5, 0.5, 0.75), text_pos = (0, -.02), pos = (0, 0, .3), image_pos = (0, 0, 0), item_text_align = TextNode.ACenter, popupMenu_text_scale = .5, item_relief = None, item_pressEffect = 1)
self.AspectRatioList.setScale(0.1)
self.AspectRatioList.set(base.Widescreen)
self.Widescreen_Label = DirectLabel(parent = self.optionsNode, relief = None, text = 'Aspect Ratio', text_align = TextNode.ACenter, text_scale = 0.052, pos = (0, 0, .4))
self.DisplaySettings_Label = DirectLabel(parent=self.optionsNode, relief=None, text='', text_align=TextNode.ACenter, text_scale=0.052, text_wordwrap=16, pos=(0, 0, .2))
self.DisplaySettingsButton = DirectButton(parent=self.optionsNode, relief=None, image=(self.guiButton.find('**/QuitBtn_UP'), self.guiButton.find('**/QuitBtn_DN'), self.guiButton.find('**/QuitBtn_RLVR')), image3_color=Vec4(0.5, 0.5, 0.5, 0.5), image_scale=(0.7, 1, 1), text=TTLocalizer.OptionsPageChange, text3_fg=(0.5, 0.5, 0.5, 0.75), text_scale=0.052, text_pos=(0, -.02), pos=(0, 0, .1), command=self.__doDisplaySettings)
self.fov_Label = DirectLabel(parent=self.optionsNode, relief=None, text='Field of view', text_align=TextNode.ACenter, text_scale = 0.052, text_wordwrap=16, pos=(0, 0, 0))
self.fov_toggleSlider = DirectSlider(parent=self.optionsNode, pos=(0, 0, -.1),
value=settings['fieldofview'], pageSize=5, range=(30, 120), command=self.__doFovLevel, thumb_geom=(self.guiButton.find('**/QuitBtn_UP')), thumb_relief=None, thumb_geom_scale=1)
self.fov_toggleSlider.setScale(0.25)
self.fov_resetButton = DirectButton(parent=self.optionsNode, relief=None, image=(self.guiButton.find('**/QuitBtn_UP'), self.guiButton.find('**/QuitBtn_DN'), self.guiButton.find('**/QuitBtn_RLVR')), image_scale=(0.7, 1, 1), text='Reset FOV', text_scale=0.052, text_pos = (0, -.02), pos=(0, 0, -.2), command=self.__resetFov)
self.fovsliderText = OnscreenText('0.0', scale=.3, pos=(0, .1), fg=(1, 1, 1, 1), style = 3)
self.fovsliderText.reparentTo(self.fov_toggleSlider.thumb)
self.__doFovLevel()
self.__setDisplaySettings()
def delSoundOptions(self):
if self.Music_Label:
self.Music_Label.destroy()
self.Music_Label = None
if self.Music_toggleSlider:
self.Music_toggleSlider.destroy()
self.Music_toggleSlider = None
if self.SoundFX_Label:
self.SoundFX_Label.destroy()
self.SoundFX_Label = None
if self.SoundFX_toggleSlider:
self.SoundFX_toggleSlider.destroy()
self.SoundFX_toggleSlider = None
if self.ToonChatSounds_Label:
self.ToonChatSounds_Label.destroy()
self.ToonChatSounds_Label = None
if self.ToonChatSounds_toggleButton:
self.ToonChatSounds_toggleButton.destroy()
self.ToonChatSounds_toggleButton = None
def delControlOptions(self):
if self.WASD_Label:
self.WASD_Label.destroy()
self.WASD_Label = None
if self.WASD_toggleButton:
self.WASD_toggleButton.destroy()
self.WASD_toggleButton = None
if self.keymapDialogButton:
self.keymapDialogButton.destroy()
self.keymapDialogButton = None
def delVideoOptions(self):
if self.Widescreen_Label:
self.Widescreen_Label.destroy()
self.Widescreen_Label = None
if self.AspectRatioList:
self.AspectRatioList.destroy()
self.AspectRatioList = None
if self.DisplaySettings_Label:
self.DisplaySettings_Label.destroy()
self.DisplaySettings_Label = None
if self.DisplaySettingsButton:
self.DisplaySettingsButton.destroy()
self.DisplaySettingsButton = None
if self.fov_toggleSlider:
self.fov_toggleSlider.destroy()
self.fov_toggleSlider = None
self.fov_Label.destroy()
self.fov_Label = None
self.fov_resetButton.destroy()
self.fov_resetButton = None
def delAllOptions(self):
self.delSoundOptions()
self.delControlOptions()
self.delVideoOptions()
self.optionsBox.destroy()
del self.optionsBox
self.optionsNode.removeNode()
del self.optionsNode
def __doMusicLevel(self):
vol = self.Music_toggleSlider['value']
vol = float(vol) / 100
settings['musicVol'] = vol
base.musicManager.setVolume(vol)
base.musicActive = vol > 0.0
def __doSfxLevel(self):
vol = self.SoundFX_toggleSlider['value']
vol = float(vol) / 100
settings['sfxVol'] = vol
for sfm in base.sfxManagerList:
sfm.setVolume(vol)
base.sfxActive = vol > 0.0
def __doToggleToonChatSounds(self):
messenger.send('wakeup')
if base.toonChatSounds:
base.toonChatSounds = 0
settings['toonChatSounds'] = False
else:
base.toonChatSounds = 1
settings['toonChatSounds'] = True
self.settingsChanged = 1
self.__setToonChatSoundsButton()
def __setToonChatSoundsButton(self):
if base.toonChatSounds:
self.ToonChatSounds_Label['text'] = TTLocalizer.OptionsPageToonChatSoundsOnLabel
self.ToonChatSounds_toggleButton['text'] = TTLocalizer.OptionsPageToggleOff
else:
self.ToonChatSounds_Label['text'] = TTLocalizer.OptionsPageToonChatSoundsOffLabel
self.ToonChatSounds_toggleButton['text'] = TTLocalizer.OptionsPageToggleOn
if base.sfxActive:
self.ToonChatSounds_Label.setColorScale(1.0, 1.0, 1.0, 1.0)
self.ToonChatSounds_toggleButton['state'] = DGG.NORMAL
else:
self.ToonChatSounds_Label.setColorScale(0.5, 0.5, 0.5, 0.5)
self.ToonChatSounds_toggleButton['state'] = DGG.DISABLED
def __doWidescreen(self, ratio):
messenger.send('wakeup')
ratio = self.AspectRatioList.selectedIndex
if base.Widescreen != ratio:
base.Widescreen = ratio
settings['Widescreen'] = ratio
self.settingsChanged = 1
base.updateAspectRatio()
def __doToggleWASD(self):
messenger.send('wakeup')
if base.wantCustomControls:
base.wantCustomControls = False
settings['want-Custom-Controls'] = False
else:
base.wantCustomControls = True
settings['want-Custom-Controls'] = True
base.reloadControls()
self.settingsChanged = 1
self.__setWASDButton()
def __setWASDButton(self):
if base.wantCustomControls:
self.WASD_Label['text'] = 'Custom Keymapping is enabled.'
self.WASD_toggleButton['text'] = TTLocalizer.OptionsPageToggleOff
self.keymapDialogButton.show()
else:
self.WASD_Label['text'] = 'Custom Keymapping is disabled.'
self.WASD_toggleButton['text'] = TTLocalizer.OptionsPageToggleOn
self.keymapDialogButton.hide()
def __openKeyRemapDialog(self):
if base.wantCustomControls:
self.controlDialog = ControlRemapDialog.ControlRemap()
def __doDisplaySettings(self):
if self.displaySettings == None:
self.displaySettings = DisplaySettingsDialog.DisplaySettingsDialog()
self.displaySettings.load()
base.accept(self.displaySettings.doneEvent, self.__doneDisplaySettings)
self.displaySettings.enter(True, False)
def __doneDisplaySettings(self, anyChanged, apiChanged):
if anyChanged:
self.__setDisplaySettings()
properties = base.win.getProperties()
self.displaySettingsChanged = 1
self.displaySettingsSize = (properties.getXSize(), properties.getYSize())
self.displaySettingsFullscreen = properties.getFullscreen()
self.displaySettingsBorderless = properties.getUndecorated()
self.displaySettingsApi = base.pipe.getInterfaceName()
self.displaySettingsApiChanged = apiChanged
def __setDisplaySettings(self):
properties = base.win.getProperties()
if properties.getFullscreen():
screensize = 'Fullscreen | %s x %s' % (properties.getXSize(), properties.getYSize())
elif properties.getUndecorated():
screensize = 'Borderless Windowed | %s x %s' % (properties.getXSize(), properties.getYSize())
else:
screensize = 'Windowed'
api = base.pipe.getInterfaceName()
settings = {'screensize': screensize, 'api': api}
text = TTLocalizer.OptionsPageDisplaySettings % settings
self.DisplaySettings_Label['text'] = text
def __doFovLevel(self):
fov = self.fov_toggleSlider['value']
settings['fieldofview'] = fov
base.camLens.setMinFov(fov/(4./3.))
dec = Decimal(fov)
self.fovsliderText['text'] = str(round(fov, 1))
def __resetFov(self):
self.fov_toggleSlider['value'] = 52
settings['fieldofview'] = 52
base.camLens.setMinFov(52/(4./3.))
self.fovsliderText['text'] = str(52)
| true
| true
|
1c43e36be51bc3b9156c0575a20c0ca5254421ba
| 1,708
|
py
|
Python
|
thirdParty/lxml/__init__.py
|
knittledan/Location_Search_Prediction
|
c96e3bfc0c73b646b9a7620bb1655285458fb20d
|
[
"MIT"
] | null | null | null |
thirdParty/lxml/__init__.py
|
knittledan/Location_Search_Prediction
|
c96e3bfc0c73b646b9a7620bb1655285458fb20d
|
[
"MIT"
] | null | null | null |
thirdParty/lxml/__init__.py
|
knittledan/Location_Search_Prediction
|
c96e3bfc0c73b646b9a7620bb1655285458fb20d
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
#----------------------------------------------------------------------------------------
# __init__.py initialization file for PIL
#----------------------------------------------------------------------------------------
import os
import sys
import platform
#----------------------------------------------------------------------------------------
# Defines
#----------------------------------------------------------------------------------------
kMac = 0
kLinux = 1
kWindows = 2
currentDir = os.path.dirname(os.path.realpath(__file__))
version = sys.version_info[:2]
#----------------------------------------------------------------------------------------
# Methods
#----------------------------------------------------------------------------------------
if version == (2, 7):
lxmlVersion = "lxml_py27"
if version == (3, 2):
lxmlVersion = "lxml_py32"
def getOs():
name = platform.system()
if name == 'Darwin':
return kMac
if name == 'Linux':
return kLinux
if name == 'Windows':
return kWindows
if getOs() == kMac:
module = os.path.join(currentDir, 'mac', lxmlVersion)
if getOs() == kLinux:
module = os.path.join(currentDir, 'linux', lxmlVersion)
if getOs() == kWindows:
module = os.path.join(currentDir, 'windows', lxmlVersion)
#----------------------------------------------------------------------------------------
# Package handler
#----------------------------------------------------------------------------------------
# insert os specific PIL package path into sys.path
sys.path.insert(0, module)
# delete empty PIL package
del sys.modules[__name__]
# import os specific PIL package
import lxml
| 30.5
| 89
| 0.384075
|
import os
import sys
import platform
kMac = 0
kLinux = 1
kWindows = 2
currentDir = os.path.dirname(os.path.realpath(__file__))
version = sys.version_info[:2]
if version == (2, 7):
lxmlVersion = "lxml_py27"
if version == (3, 2):
lxmlVersion = "lxml_py32"
def getOs():
name = platform.system()
if name == 'Darwin':
return kMac
if name == 'Linux':
return kLinux
if name == 'Windows':
return kWindows
if getOs() == kMac:
module = os.path.join(currentDir, 'mac', lxmlVersion)
if getOs() == kLinux:
module = os.path.join(currentDir, 'linux', lxmlVersion)
if getOs() == kWindows:
module = os.path.join(currentDir, 'windows', lxmlVersion)
sys.path.insert(0, module)
del sys.modules[__name__]
import lxml
| true
| true
|
1c43e4d4ab4dc301594d8265fd0f7be7719b47ae
| 2,611
|
py
|
Python
|
examples/plot_samples.py
|
AWehrhahn/exoplanet_transit_snr
|
f1bdaddb89e1c8b819651bcd2d80ed95d2a1fc0f
|
[
"MIT"
] | null | null | null |
examples/plot_samples.py
|
AWehrhahn/exoplanet_transit_snr
|
f1bdaddb89e1c8b819651bcd2d80ed95d2a1fc0f
|
[
"MIT"
] | null | null | null |
examples/plot_samples.py
|
AWehrhahn/exoplanet_transit_snr
|
f1bdaddb89e1c8b819651bcd2d80ed95d2a1fc0f
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import corner
import emcee
import matplotlib.pyplot as plt
import numpy as np
from astropy import units as u
from exoorbit.orbit import Orbit
from exoplanet_transit_snr.stellardb import StellarDb
star, planet = "WASP-107", "b"
datasets = {50: "WASP-107b_SNR50", 100: "WASP-107b_SNR100", 200: "WASP-107b_SNR200"}
# Load the nominal data for this star and planet from simbad/nasa exoplanet archive
sdb = StellarDb()
star = sdb.get(star)
planet = star.planets[planet]
orbit = Orbit(star, planet)
rv = orbit.radial_velocity_semiamplitude_planet()
snr = 200
nsysrem = 5
fname = f"MCMC_{star.name}_{planet.name}_SNR{snr}_sysrem{nsysrem}.h5"
# fname = "mcmc_samples.npy"
ndim = 10
nwalkers = 32
labels = ["a", "v_sys", "mass", "radius", "sma", "per", "inc", "ecc", "w", "t0"]
truths = np.array(
[
1,
star.radial_velocity.to_value(u.km / u.s),
planet.mass.to_value(u.M_jup),
planet.radius.to_value(u.R_jup),
planet.sma.to_value(u.AU),
planet.period.to_value(u.day),
planet.inc.to_value(u.deg),
planet.ecc.to_value(u.one),
planet.omega.to_value(u.deg),
planet.t0.mjd,
]
)
sampler = emcee.backends.HDFBackend(fname)
samples = sampler.get_chain()
# samples = np.load(fname)
tau = emcee.autocorr.integrated_time(samples, quiet=True)
# tau = sampler.get_autocorr_time()
burnin = int(2 * np.max(tau))
thin = int(0.5 * np.min(tau))
# Print results
fig, axes = plt.subplots(ndim, figsize=(10, 7), sharex=True)
for i in range(ndim):
ax = axes[i]
ax.plot(samples[:, :, i], "k", alpha=0.3)
ax.set_xlim(0, len(samples))
ax.set_ylabel(labels[i])
ax.yaxis.set_label_coords(-0.1, 0.5)
axes[-1].set_xlabel("step number")
plt.show()
# sampler.get_chain(discard=2000, flat=True)
# ranges=[(1.0, 1.015), (-150, 150), (0, 1), (0, 2), (0, 5), (0, 10), (70, 110), (0, 1), (40, 160), 0.99]
ranges = [0.9] * len(labels)
flat_samples = samples[burnin::thin].reshape((-1, ndim))
fig = corner.corner(flat_samples, labels=labels, truths=truths, range=ranges)
plt.show()
for i in range(ndim):
low, mid, upp = np.percentile(flat_samples[:, i], [16, 50, 84], axis=0)
sigma = (upp - low) / 2
print(f"{labels[i]}: {mid:.5g} + {upp-mid:.5g} - {mid-low:.5g} ; {truths[i]:.5g}")
# a: 1.0065 + 0.00016413 - 0.00014236 ; 1
# v_sys: 12.922 + 31.738 - 30.919 ; 13.74
# mass: 4.1251 + 4.2402 - 3.6097 ; 0.096
# per: 12.947 + 20.586 - 7.4242 ; 5.7215
# inc: 91.563 + 41.107 - 65.425 ; 89.56
# ecc: 0.49631 + 0.32373 - 0.37054 ; 0.06
# w: 96.66 + 29.538 - 38.358 ; 90
# t0: 57578 + 61.426 - 164.84 ; 57584
| 30.717647
| 105
| 0.640368
|
import corner
import emcee
import matplotlib.pyplot as plt
import numpy as np
from astropy import units as u
from exoorbit.orbit import Orbit
from exoplanet_transit_snr.stellardb import StellarDb
star, planet = "WASP-107", "b"
datasets = {50: "WASP-107b_SNR50", 100: "WASP-107b_SNR100", 200: "WASP-107b_SNR200"}
sdb = StellarDb()
star = sdb.get(star)
planet = star.planets[planet]
orbit = Orbit(star, planet)
rv = orbit.radial_velocity_semiamplitude_planet()
snr = 200
nsysrem = 5
fname = f"MCMC_{star.name}_{planet.name}_SNR{snr}_sysrem{nsysrem}.h5"
ndim = 10
nwalkers = 32
labels = ["a", "v_sys", "mass", "radius", "sma", "per", "inc", "ecc", "w", "t0"]
truths = np.array(
[
1,
star.radial_velocity.to_value(u.km / u.s),
planet.mass.to_value(u.M_jup),
planet.radius.to_value(u.R_jup),
planet.sma.to_value(u.AU),
planet.period.to_value(u.day),
planet.inc.to_value(u.deg),
planet.ecc.to_value(u.one),
planet.omega.to_value(u.deg),
planet.t0.mjd,
]
)
sampler = emcee.backends.HDFBackend(fname)
samples = sampler.get_chain()
tau = emcee.autocorr.integrated_time(samples, quiet=True)
burnin = int(2 * np.max(tau))
thin = int(0.5 * np.min(tau))
fig, axes = plt.subplots(ndim, figsize=(10, 7), sharex=True)
for i in range(ndim):
ax = axes[i]
ax.plot(samples[:, :, i], "k", alpha=0.3)
ax.set_xlim(0, len(samples))
ax.set_ylabel(labels[i])
ax.yaxis.set_label_coords(-0.1, 0.5)
axes[-1].set_xlabel("step number")
plt.show()
ranges = [0.9] * len(labels)
flat_samples = samples[burnin::thin].reshape((-1, ndim))
fig = corner.corner(flat_samples, labels=labels, truths=truths, range=ranges)
plt.show()
for i in range(ndim):
low, mid, upp = np.percentile(flat_samples[:, i], [16, 50, 84], axis=0)
sigma = (upp - low) / 2
print(f"{labels[i]}: {mid:.5g} + {upp-mid:.5g} - {mid-low:.5g} ; {truths[i]:.5g}")
| true
| true
|
1c43e6f20ef928918c97de09f2b91fbfbcc389dc
| 669
|
py
|
Python
|
app/core/management/commands/wait_for_db.py
|
amirhosseyn/Django-REST
|
e8c031c8e5d00ae5a9a8732b7c298bb9c2afa8f9
|
[
"MIT"
] | null | null | null |
app/core/management/commands/wait_for_db.py
|
amirhosseyn/Django-REST
|
e8c031c8e5d00ae5a9a8732b7c298bb9c2afa8f9
|
[
"MIT"
] | null | null | null |
app/core/management/commands/wait_for_db.py
|
amirhosseyn/Django-REST
|
e8c031c8e5d00ae5a9a8732b7c298bb9c2afa8f9
|
[
"MIT"
] | null | null | null |
import time
from django.db import connections
from django.db.utils import OperationalError
from django.core.management.base import BaseCommand
class Command(BaseCommand):
"""Django command to pause execution until db is up 'n running"""
def handle(self, *args, **options):
self.stdout.write('Waiting for database...')
db_conn = None
while not db_conn:
try:
db_conn = connections['default']
except OperationalError:
self.stdout.write('Database unavailable,waiting 1 second...')
time.sleep(1)
self.stdout.write(self.style.SUCCESS('Database available!'))
| 30.409091
| 77
| 0.647235
|
import time
from django.db import connections
from django.db.utils import OperationalError
from django.core.management.base import BaseCommand
class Command(BaseCommand):
def handle(self, *args, **options):
self.stdout.write('Waiting for database...')
db_conn = None
while not db_conn:
try:
db_conn = connections['default']
except OperationalError:
self.stdout.write('Database unavailable,waiting 1 second...')
time.sleep(1)
self.stdout.write(self.style.SUCCESS('Database available!'))
| true
| true
|
1c43e74182739e0186666a6172b8f37cc901b2d5
| 11,035
|
py
|
Python
|
mistral/services/scheduler.py
|
soda-research/mistral
|
550a3de9c2defc7ce26336cb705d9c8d87bbaddd
|
[
"Apache-2.0"
] | 3
|
2015-08-28T04:57:56.000Z
|
2017-03-27T10:59:56.000Z
|
mistral/services/scheduler.py
|
soda-research/mistral
|
550a3de9c2defc7ce26336cb705d9c8d87bbaddd
|
[
"Apache-2.0"
] | 21
|
2015-04-14T22:41:53.000Z
|
2019-02-20T09:30:10.000Z
|
mistral/services/scheduler.py
|
soda-research/mistral
|
550a3de9c2defc7ce26336cb705d9c8d87bbaddd
|
[
"Apache-2.0"
] | 12
|
2015-08-14T02:27:37.000Z
|
2020-12-31T10:09:21.000Z
|
# Copyright 2014 - Mirantis, Inc.
# Copyright 2015 - StackStorm, Inc.
# Copyright 2016 - Brocade Communications Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import datetime
import eventlet
import random
import sys
import threading
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import importutils
from mistral import context
from mistral.db import utils as db_utils
from mistral.db.v2 import api as db_api
from mistral import exceptions as exc
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
# All schedulers.
_schedulers = set()
def schedule_call(factory_method_path, target_method_name,
run_after, serializers=None, key=None, **method_args):
"""Schedules call and lately invokes target_method.
Add this call specification to DB, and then after run_after
seconds service CallScheduler invokes the target_method.
:param factory_method_path: Full python-specific path to
factory method that creates a target object that the call will be
made against.
:param target_method_name: Name of a method which will be invoked.
:param run_after: Value in seconds.
:param serializers: map of argument names and their serializer class
paths. Use when an argument is an object of specific type, and needs
to be serialized. Example:
{ "result": "mistral.utils.serializer.ResultSerializer"}
Serializer for the object type must implement serializer interface
in mistral/utils/serializer.py
:param key: Key which can potentially be used for squashing similar
delayed calls.
:param method_args: Target method keyword arguments.
"""
ctx_serializer = context.RpcContextSerializer()
ctx = (
ctx_serializer.serialize_context(context.ctx())
if context.has_ctx() else {}
)
execution_time = (datetime.datetime.now() +
datetime.timedelta(seconds=run_after))
if serializers:
for arg_name, serializer_path in serializers.items():
if arg_name not in method_args:
raise exc.MistralException(
"Serializable method argument %s"
" not found in method_args=%s"
% (arg_name, method_args))
try:
serializer = importutils.import_class(serializer_path)()
except ImportError as e:
raise ImportError(
"Cannot import class %s: %s" % (serializer_path, e)
)
method_args[arg_name] = serializer.serialize(method_args[arg_name])
values = {
'factory_method_path': factory_method_path,
'target_method_name': target_method_name,
'execution_time': execution_time,
'auth_context': ctx,
'serializers': serializers,
'key': key,
'method_arguments': method_args,
'processing': False
}
db_api.create_delayed_call(values)
class Scheduler(object):
def __init__(self, fixed_delay, random_delay, batch_size):
self._stopped = False
self._thread = threading.Thread(target=self._loop)
self._thread.daemon = True
self._fixed_delay = fixed_delay
self._random_delay = random_delay
self._batch_size = batch_size
def start(self):
self._thread.start()
def stop(self, graceful=False):
self._stopped = True
if graceful:
self._thread.join()
def _loop(self):
while not self._stopped:
LOG.debug("Starting Scheduler loop [scheduler=%s]...", self)
try:
self._process_delayed_calls()
except Exception:
LOG.exception(
"Scheduler failed to process delayed calls"
" due to unexpected exception."
)
# For some mysterious reason (probably eventlet related)
# the exception is not cleared from the context automatically.
# This results in subsequent log.warning calls to show invalid
# info.
if sys.version_info < (3,):
sys.exc_clear()
eventlet.sleep(
self._fixed_delay +
random.Random().randint(0, self._random_delay * 1000) * 0.001
)
def _process_delayed_calls(self, ctx=None):
"""Run delayed required calls.
This algorithm should work with transactions having at least
'READ-COMMITTED' isolation mode.
:param ctx: Auth context.
"""
# Select and capture calls matching time criteria.
db_calls = self._capture_calls(self._batch_size)
if not db_calls:
return
# Determine target methods, deserialize arguments etc.
prepared_calls = self._prepare_calls(db_calls)
# Invoke prepared calls.
self._invoke_calls(prepared_calls)
# Delete invoked calls from DB.
self.delete_calls(db_calls)
@staticmethod
@db_utils.retry_on_db_error
def _capture_calls(batch_size):
"""Captures delayed calls eligible for processing (based on time).
The intention of this method is to select delayed calls based on time
criteria and mark them in DB as being processed so that no other
threads could process them in parallel.
:return: A list of delayed calls captured for further processing.
"""
result = []
time_filter = datetime.datetime.now() + datetime.timedelta(seconds=1)
with db_api.transaction():
candidates = db_api.get_delayed_calls_to_start(
time_filter,
batch_size
)
for call in candidates:
# Mark this delayed call has been processed in order to
# prevent calling from parallel transaction.
db_call, updated_cnt = db_api.update_delayed_call(
id=call.id,
values={'processing': True},
query_filter={'processing': False}
)
# If updated_cnt != 1 then another scheduler
# has already updated it.
if updated_cnt == 1:
result.append(db_call)
LOG.debug("Scheduler captured %s delayed calls.", len(result))
return result
@staticmethod
def _prepare_calls(raw_calls):
"""Prepares delayed calls for invocation.
After delayed calls were selected from DB they still need to be
prepared for further usage, we need to build final target methods
and deserialize arguments, if needed.
:param raw_calls: Delayed calls fetched from DB (DB models).
:return: A list of tuples (target_auth_context, target_method,
method_args) where all data is properly deserialized.
"""
result = []
for call in raw_calls:
LOG.debug(
'Preparing next delayed call. '
'[ID=%s, factory_method_path=%s, target_method_name=%s, '
'method_arguments=%s]', call.id, call.factory_method_path,
call.target_method_name, call.method_arguments
)
target_auth_context = copy.deepcopy(call.auth_context)
if call.factory_method_path:
factory = importutils.import_class(call.factory_method_path)
target_method = getattr(factory(), call.target_method_name)
else:
target_method = importutils.import_class(
call.target_method_name
)
method_args = copy.deepcopy(call.method_arguments)
if call.serializers:
# Deserialize arguments.
for arg_name, ser_path in call.serializers.items():
serializer = importutils.import_class(ser_path)()
deserialized = serializer.deserialize(
method_args[arg_name]
)
method_args[arg_name] = deserialized
result.append((target_auth_context, target_method, method_args))
return result
@staticmethod
def _invoke_calls(delayed_calls):
"""Invokes prepared delayed calls.
:param delayed_calls: Prepared delayed calls represented as tuples
(target_auth_context, target_method, method_args).
"""
ctx_serializer = context.RpcContextSerializer()
for (target_auth_context, target_method, method_args) in delayed_calls:
try:
# Set the correct context for the method.
ctx_serializer.deserialize_context(target_auth_context)
# Invoke the method.
target_method(**method_args)
except Exception as e:
LOG.exception(
"Delayed call failed, method: %s, exception: %s",
target_method,
e
)
finally:
# Remove context.
context.set_ctx(None)
@staticmethod
@db_utils.retry_on_db_error
def delete_calls(db_calls):
"""Deletes delayed calls.
:param db_calls: Delayed calls to delete from DB.
"""
with db_api.transaction():
for call in db_calls:
try:
db_api.delete_delayed_call(call.id)
except Exception as e:
LOG.error(
"Failed to delete delayed call [call=%s, "
"exception=%s]", call, e
)
# We have to re-raise any exception because the transaction
# would be already invalid anyway. If it's a deadlock then
# it will be handled.
raise e
LOG.debug("Scheduler deleted %s delayed calls.", len(db_calls))
def start():
sched = Scheduler(
CONF.scheduler.fixed_delay,
CONF.scheduler.random_delay,
CONF.scheduler.batch_size
)
_schedulers.add(sched)
sched.start()
return sched
def stop_scheduler(sched, graceful=False):
if not sched:
return
sched.stop(graceful)
_schedulers.remove(sched)
def stop_all_schedulers():
for sched in _schedulers:
sched.stop(graceful=True)
_schedulers.clear()
| 32.360704
| 79
| 0.610603
|
import copy
import datetime
import eventlet
import random
import sys
import threading
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import importutils
from mistral import context
from mistral.db import utils as db_utils
from mistral.db.v2 import api as db_api
from mistral import exceptions as exc
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
_schedulers = set()
def schedule_call(factory_method_path, target_method_name,
run_after, serializers=None, key=None, **method_args):
ctx_serializer = context.RpcContextSerializer()
ctx = (
ctx_serializer.serialize_context(context.ctx())
if context.has_ctx() else {}
)
execution_time = (datetime.datetime.now() +
datetime.timedelta(seconds=run_after))
if serializers:
for arg_name, serializer_path in serializers.items():
if arg_name not in method_args:
raise exc.MistralException(
"Serializable method argument %s"
" not found in method_args=%s"
% (arg_name, method_args))
try:
serializer = importutils.import_class(serializer_path)()
except ImportError as e:
raise ImportError(
"Cannot import class %s: %s" % (serializer_path, e)
)
method_args[arg_name] = serializer.serialize(method_args[arg_name])
values = {
'factory_method_path': factory_method_path,
'target_method_name': target_method_name,
'execution_time': execution_time,
'auth_context': ctx,
'serializers': serializers,
'key': key,
'method_arguments': method_args,
'processing': False
}
db_api.create_delayed_call(values)
class Scheduler(object):
def __init__(self, fixed_delay, random_delay, batch_size):
self._stopped = False
self._thread = threading.Thread(target=self._loop)
self._thread.daemon = True
self._fixed_delay = fixed_delay
self._random_delay = random_delay
self._batch_size = batch_size
def start(self):
self._thread.start()
def stop(self, graceful=False):
self._stopped = True
if graceful:
self._thread.join()
def _loop(self):
while not self._stopped:
LOG.debug("Starting Scheduler loop [scheduler=%s]...", self)
try:
self._process_delayed_calls()
except Exception:
LOG.exception(
"Scheduler failed to process delayed calls"
" due to unexpected exception."
)
if sys.version_info < (3,):
sys.exc_clear()
eventlet.sleep(
self._fixed_delay +
random.Random().randint(0, self._random_delay * 1000) * 0.001
)
def _process_delayed_calls(self, ctx=None):
db_calls = self._capture_calls(self._batch_size)
if not db_calls:
return
prepared_calls = self._prepare_calls(db_calls)
self._invoke_calls(prepared_calls)
self.delete_calls(db_calls)
@staticmethod
@db_utils.retry_on_db_error
def _capture_calls(batch_size):
result = []
time_filter = datetime.datetime.now() + datetime.timedelta(seconds=1)
with db_api.transaction():
candidates = db_api.get_delayed_calls_to_start(
time_filter,
batch_size
)
for call in candidates:
db_call, updated_cnt = db_api.update_delayed_call(
id=call.id,
values={'processing': True},
query_filter={'processing': False}
)
if updated_cnt == 1:
result.append(db_call)
LOG.debug("Scheduler captured %s delayed calls.", len(result))
return result
@staticmethod
def _prepare_calls(raw_calls):
result = []
for call in raw_calls:
LOG.debug(
'Preparing next delayed call. '
'[ID=%s, factory_method_path=%s, target_method_name=%s, '
'method_arguments=%s]', call.id, call.factory_method_path,
call.target_method_name, call.method_arguments
)
target_auth_context = copy.deepcopy(call.auth_context)
if call.factory_method_path:
factory = importutils.import_class(call.factory_method_path)
target_method = getattr(factory(), call.target_method_name)
else:
target_method = importutils.import_class(
call.target_method_name
)
method_args = copy.deepcopy(call.method_arguments)
if call.serializers:
for arg_name, ser_path in call.serializers.items():
serializer = importutils.import_class(ser_path)()
deserialized = serializer.deserialize(
method_args[arg_name]
)
method_args[arg_name] = deserialized
result.append((target_auth_context, target_method, method_args))
return result
@staticmethod
def _invoke_calls(delayed_calls):
ctx_serializer = context.RpcContextSerializer()
for (target_auth_context, target_method, method_args) in delayed_calls:
try:
ctx_serializer.deserialize_context(target_auth_context)
target_method(**method_args)
except Exception as e:
LOG.exception(
"Delayed call failed, method: %s, exception: %s",
target_method,
e
)
finally:
context.set_ctx(None)
@staticmethod
@db_utils.retry_on_db_error
def delete_calls(db_calls):
with db_api.transaction():
for call in db_calls:
try:
db_api.delete_delayed_call(call.id)
except Exception as e:
LOG.error(
"Failed to delete delayed call [call=%s, "
"exception=%s]", call, e
)
# it will be handled.
raise e
LOG.debug("Scheduler deleted %s delayed calls.", len(db_calls))
def start():
sched = Scheduler(
CONF.scheduler.fixed_delay,
CONF.scheduler.random_delay,
CONF.scheduler.batch_size
)
_schedulers.add(sched)
sched.start()
return sched
def stop_scheduler(sched, graceful=False):
if not sched:
return
sched.stop(graceful)
_schedulers.remove(sched)
def stop_all_schedulers():
for sched in _schedulers:
sched.stop(graceful=True)
_schedulers.clear()
| true
| true
|
1c43e8343e5b9fcf66ef7bb70fa6262538d43d26
| 1,366
|
py
|
Python
|
src/room.py
|
ThaDeveloper/docopt_dojo
|
adc09fda16a84f81776a284249615aa69ebc6861
|
[
"MIT"
] | null | null | null |
src/room.py
|
ThaDeveloper/docopt_dojo
|
adc09fda16a84f81776a284249615aa69ebc6861
|
[
"MIT"
] | 14
|
2017-11-04T09:26:08.000Z
|
2017-11-13T19:24:30.000Z
|
src/room.py
|
ThaDeveloper/docopt_dojo
|
adc09fda16a84f81776a284249615aa69ebc6861
|
[
"MIT"
] | null | null | null |
class Room(object):
'''
The Room class models the rooms in Dojo and
is used as the blueprint for how the LivingSpace
and OfficeSpace classes inehrit properties such
as room_name,room_type and capacity.s
'''
def __init__(self, room_name, room_type, capacity):
self.room_type = room_type.strip().title()
self.capacity = capacity
self.room_name = room_name.title()
self.occupants = []
def add_person(self, person):
'''
This is what will check capacity and reduce
by one when someone is added to a room.
'''
self.occupants.append(person)
self.capacity = self.capacity - 1
return self.capacity
class LivingSpace(Room):
'''
The LivingSpace class inherits its properties and
methods from the Room class and overrides properties
such as capacity using the super function call.
'''
def __init__(self, room_name):
super(LivingSpace, self).__init__(
room_name, room_type='Living Space', capacity=4)
class Office(Room):
'''
The Office class inherits its properties and
methods from the Room class and overrides properties
such as capacity using the super function call.
'''
def __init__(self, room_name):
super(Office, self).__init__(room_name, room_type='Office', capacity=6)
| 31.767442
| 79
| 0.664714
|
class Room(object):
def __init__(self, room_name, room_type, capacity):
self.room_type = room_type.strip().title()
self.capacity = capacity
self.room_name = room_name.title()
self.occupants = []
def add_person(self, person):
self.occupants.append(person)
self.capacity = self.capacity - 1
return self.capacity
class LivingSpace(Room):
def __init__(self, room_name):
super(LivingSpace, self).__init__(
room_name, room_type='Living Space', capacity=4)
class Office(Room):
def __init__(self, room_name):
super(Office, self).__init__(room_name, room_type='Office', capacity=6)
| true
| true
|
1c43e8deb31d64389ccc2664be037b8b793fb6b7
| 1,425
|
py
|
Python
|
app/settings/migrations/0007_default_statuses.py
|
mandarhan/mandarhan
|
9ce38d10e536e0d3e2f907c3b5c560d66ccf8e40
|
[
"MIT"
] | null | null | null |
app/settings/migrations/0007_default_statuses.py
|
mandarhan/mandarhan
|
9ce38d10e536e0d3e2f907c3b5c560d66ccf8e40
|
[
"MIT"
] | 6
|
2020-02-18T03:49:09.000Z
|
2022-03-12T00:10:05.000Z
|
app/settings/migrations/0007_default_statuses.py
|
mandarhan/mandarhan
|
9ce38d10e536e0d3e2f907c3b5c560d66ccf8e40
|
[
"MIT"
] | 1
|
2020-03-25T10:25:43.000Z
|
2020-03-25T10:25:43.000Z
|
from django.db import migrations
DEFAULT_STATUSES = [
{
'name': 'Не подтверждено',
'color': '#ffffff',
},
{
'name': 'Отменено',
'color': '#ff0000',
},
{
'name': 'Подтверждено',
'color': '#daf9d3',
},
{
'name': 'Выезд',
'color': '#000000',
},
{
'name': 'Незаезд',
'color': '#1decf6',
},
{
'name': 'Проживание',
'color': '#048e08',
},
{
'name': 'Резерв',
'color': '#fff900',
},
]
def create_default_statuses(apps, schema_editor):
Status = apps.get_model('app_settings', 'Status')
db_alias = schema_editor.connection.alias
default_statuses = []
order = 0
for default_status in DEFAULT_STATUSES:
order += 1
default_statuses.append(Status(**default_status, my_order=order))
Status.objects.using(db_alias).bulk_create(default_statuses)
def delete_default_statuses(apps, schema_editor):
Status = apps.get_model('app_settings', 'Status')
db_alias = schema_editor.connection.alias
for default_status in DEFAULT_STATUSES:
Status.objects.using(db_alias).filter(**default_status).delete()
class Migration(migrations.Migration):
dependencies = [
('app_settings', '0006_status'),
]
operations = [
migrations.RunPython(create_default_statuses, delete_default_statuses),
]
| 23.360656
| 79
| 0.592281
|
from django.db import migrations
DEFAULT_STATUSES = [
{
'name': 'Не подтверждено',
'color': '#ffffff',
},
{
'name': 'Отменено',
'color': '#ff0000',
},
{
'name': 'Подтверждено',
'color': '#daf9d3',
},
{
'name': 'Выезд',
'color': '#000000',
},
{
'name': 'Незаезд',
'color': '#1decf6',
},
{
'name': 'Проживание',
'color': '#048e08',
},
{
'name': 'Резерв',
'color': '#fff900',
},
]
def create_default_statuses(apps, schema_editor):
Status = apps.get_model('app_settings', 'Status')
db_alias = schema_editor.connection.alias
default_statuses = []
order = 0
for default_status in DEFAULT_STATUSES:
order += 1
default_statuses.append(Status(**default_status, my_order=order))
Status.objects.using(db_alias).bulk_create(default_statuses)
def delete_default_statuses(apps, schema_editor):
Status = apps.get_model('app_settings', 'Status')
db_alias = schema_editor.connection.alias
for default_status in DEFAULT_STATUSES:
Status.objects.using(db_alias).filter(**default_status).delete()
class Migration(migrations.Migration):
dependencies = [
('app_settings', '0006_status'),
]
operations = [
migrations.RunPython(create_default_statuses, delete_default_statuses),
]
| true
| true
|
1c43e93aec0b6c7b8788a78435e14115b15fa430
| 883
|
py
|
Python
|
arjuna/tpi/guiauto/source/page.py
|
StefanIGit/arjuna
|
6c7d9099e0d766e7b30936ef25d32c1414133b96
|
[
"Apache-2.0"
] | 13
|
2020-05-12T06:32:51.000Z
|
2022-01-24T18:21:19.000Z
|
arjuna/tpi/guiauto/source/page.py
|
StefanIGit/arjuna
|
6c7d9099e0d766e7b30936ef25d32c1414133b96
|
[
"Apache-2.0"
] | 5
|
2020-02-14T12:51:07.000Z
|
2021-12-01T10:39:51.000Z
|
arjuna/tpi/guiauto/source/page.py
|
StefanIGit/arjuna
|
6c7d9099e0d766e7b30936ef25d32c1414133b96
|
[
"Apache-2.0"
] | 25
|
2020-01-16T10:44:25.000Z
|
2022-02-24T13:22:22.000Z
|
# This file is a part of Arjuna
# Copyright 2015-2021 Rahul Verma
# Website: www.RahulVerma.net
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from arjuna.tpi.tracker import track
from .base import SingleGuiEntitySource
class GuiPageSource(SingleGuiEntitySource):
'''
Source of a **GuiPage**.
'''
def __init__(self, raw_source):
super().__init__(raw_source)
| 32.703704
| 74
| 0.745187
|
from arjuna.tpi.tracker import track
from .base import SingleGuiEntitySource
class GuiPageSource(SingleGuiEntitySource):
def __init__(self, raw_source):
super().__init__(raw_source)
| true
| true
|
1c43e9d6ffce0cb69156cdc14c9713e3fe44aeb5
| 1,048
|
py
|
Python
|
Crawler/crawlerpchome.py
|
2017HackNTU/J94FintechLa
|
85018fae23cff1d64f3c95c1e0f9c312dd47eade
|
[
"MIT"
] | null | null | null |
Crawler/crawlerpchome.py
|
2017HackNTU/J94FintechLa
|
85018fae23cff1d64f3c95c1e0f9c312dd47eade
|
[
"MIT"
] | null | null | null |
Crawler/crawlerpchome.py
|
2017HackNTU/J94FintechLa
|
85018fae23cff1d64f3c95c1e0f9c312dd47eade
|
[
"MIT"
] | null | null | null |
import requests
from selenium import webdriver
from lxml import etree
import re
import csv
import sys
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
# reference: http://tw.pyladies.com/~marsw/crawler02.slides.html#/3
# change to mobile website
# url = sys.argv[1]
# match = re.split(r'\?', 'http://24h.pchome.com.tw/prod/DGAD7P-A9007UFW1?q=/S/DGADAH')
# url = match[0]
# url = url[:11] + 'm.' + url[11:]
driver = webdriver.PhantomJS(executable_path=r'path_to_phantomjs/bin/phantomjs')
driver.get(url)
pageSource = driver.page_source
driver.close()
page = etree.HTML(pageSource)
try:
tags = page.xpath('//*[@id="ProdNick"]/text()')
tagtotal =''
for tag in tags:
tagtotal += tag
print(tagtotal.strip())
except:
print("name craw fail")
try:
tags2 = page.xpath('//*[@id="ProdInfo"]/ul[1]/li[1]/span/span/text()')[-1]
print(tags2)
except:
print("price craw fail")
try:
tags3 = page.xpath('//*[@id="ProdImg"]/img/@src')[-1]
print(tags3)
except:
print("img craw fail")
| 26.871795
| 87
| 0.678435
|
import requests
from selenium import webdriver
from lxml import etree
import re
import csv
import sys
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
driver = webdriver.PhantomJS(executable_path=r'path_to_phantomjs/bin/phantomjs')
driver.get(url)
pageSource = driver.page_source
driver.close()
page = etree.HTML(pageSource)
try:
tags = page.xpath('//*[@id="ProdNick"]/text()')
tagtotal =''
for tag in tags:
tagtotal += tag
print(tagtotal.strip())
except:
print("name craw fail")
try:
tags2 = page.xpath('//*[@id="ProdInfo"]/ul[1]/li[1]/span/span/text()')[-1]
print(tags2)
except:
print("price craw fail")
try:
tags3 = page.xpath('//*[@id="ProdImg"]/img/@src')[-1]
print(tags3)
except:
print("img craw fail")
| true
| true
|
1c43eae2870d470e535993d01691bd86de721d61
| 714
|
py
|
Python
|
example/mulit_sleep.py
|
zhzLuke96/Yoi
|
8f5a0b6881c540aab71b8a360002b4d1e9de869a
|
[
"MIT"
] | null | null | null |
example/mulit_sleep.py
|
zhzLuke96/Yoi
|
8f5a0b6881c540aab71b8a360002b4d1e9de869a
|
[
"MIT"
] | null | null | null |
example/mulit_sleep.py
|
zhzLuke96/Yoi
|
8f5a0b6881c540aab71b8a360002b4d1e9de869a
|
[
"MIT"
] | null | null | null |
from yoi.application import Application
import time
app = Application()
@app.router(r"^/sleep/(.+)/?$", methods=["GET"])
def index(request,timer):
time.sleep(int(timer))
return f"server sleep {timer}s"
@app.router(r"^/do/?$", methods=["GET"])
def index():
return f"server do something"
if __name__ == '__main__':
from yoi.server.sel_wsgiServer import WSGIServer
sev = WSGIServer("127.0.0.1",8000).set_application(app)
sev.run()
# from wsgiref.simple_server import make_server
# # httpd = make_server("127.0.0.1", 8000, app)
# httpd = make_server("localhost", 8000, app)
# try:
# httpd.serve_forever()
# except:
# httpd.shutdown()
# raise
| 23.032258
| 59
| 0.635854
|
from yoi.application import Application
import time
app = Application()
@app.router(r"^/sleep/(.+)/?$", methods=["GET"])
def index(request,timer):
time.sleep(int(timer))
return f"server sleep {timer}s"
@app.router(r"^/do/?$", methods=["GET"])
def index():
return f"server do something"
if __name__ == '__main__':
from yoi.server.sel_wsgiServer import WSGIServer
sev = WSGIServer("127.0.0.1",8000).set_application(app)
sev.run()
| true
| true
|
1c43eb9d91553b0505a12be5e48dae5a530b845e
| 55,590
|
py
|
Python
|
nitro/resource/config/ns/nsip.py
|
HanseMerkur/nitro-python
|
d03eb11f492a35a2a8b2a140322fbce22d25a8f7
|
[
"Apache-2.0"
] | 2
|
2020-08-24T18:04:22.000Z
|
2020-08-24T18:04:47.000Z
|
nitro/resource/config/ns/nsip.py
|
HanseMerkur/nitro-python
|
d03eb11f492a35a2a8b2a140322fbce22d25a8f7
|
[
"Apache-2.0"
] | null | null | null |
nitro/resource/config/ns/nsip.py
|
HanseMerkur/nitro-python
|
d03eb11f492a35a2a8b2a140322fbce22d25a8f7
|
[
"Apache-2.0"
] | null | null | null |
#
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nitro.resource.base.base_resource import base_resource
from nitro.resource.base.base_resource import base_response
from nitro.service.options import options
from nitro.exception.nitro_exception import nitro_exception
from nitro.util.nitro_util import nitro_util
class nsip(base_resource) :
"""Configuration for ip resource."""
def __init__(self) :
self._ipaddress = ""
self._netmask = ""
self._type = ""
self._arp = ""
self._icmp = ""
self._vserver = ""
self._telnet = ""
self._ftp = ""
self._gui = ""
self._ssh = ""
self._snmp = ""
self._mgmtaccess = ""
self._restrictaccess = ""
self._dynamicrouting = ""
self._ospf = ""
self._bgp = ""
self._rip = ""
self._hostroute = ""
self._hostrtgw = ""
self._metric = 0
self._vserverrhilevel = ""
self._vserverrhimode = ""
self._ospflsatype = ""
self._ospfarea = 0
self._state = ""
self._vrid = 0
self._icmpresponse = ""
self._ownernode = 0
self._arpresponse = ""
self._td = 0
self._flags = 0
self._hostrtgwact = ""
self._ospfareaval = 0
self._viprtadv2bsd = False
self._vipvsercount = 0
self._vipvserdowncount = 0
self._vipvsrvrrhiactivecount = 0
self._vipvsrvrrhiactiveupcount = 0
self._freeports = 0
self._riserhimsgcode = 0
self._iptype = []
self.___count = 0
@property
def ipaddress(self) :
"""IPv4 address to create on the NetScaler appliance. Cannot be changed after the IP address is created.<br/>Minimum length = 1."""
try :
return self._ipaddress
except Exception as e:
raise e
@ipaddress.setter
def ipaddress(self, ipaddress) :
"""IPv4 address to create on the NetScaler appliance. Cannot be changed after the IP address is created.<br/>Minimum length = 1
:param ipaddress:
"""
try :
self._ipaddress = ipaddress
except Exception as e:
raise e
@property
def netmask(self) :
"""Subnet mask associated with the IP address."""
try :
return self._netmask
except Exception as e:
raise e
@netmask.setter
def netmask(self, netmask) :
"""Subnet mask associated with the IP address.
:param netmask:
"""
try :
self._netmask = netmask
except Exception as e:
raise e
@property
def type(self) :
"""Type of the IP address to create on the NetScaler appliance. Cannot be changed after the IP address is created. The following are the different types of NetScaler owned IP addresses:
* A Subnet IP (SNIP) address is used by the NetScaler ADC to communicate with the servers. The NetScaler also uses the subnet IP address when generating its own packets, such as packets related to dynamic routing protocols, or to send monitor probes to check the health of the servers.
* A Virtual IP (VIP) address is the IP address associated with a virtual server. It is the IP address to which clients connect. An appliance managing a wide range of traffic may have many VIPs configured. Some of the attributes of the VIP address are customized to meet the requirements of the virtual server.
* A GSLB site IP (GSLBIP) address is associated with a GSLB site. It is not mandatory to specify a GSLBIP address when you initially configure the NetScaler appliance. A GSLBIP address is used only when you create a GSLB site.
* A Cluster IP (CLIP) address is the management address of the cluster. All cluster configurations must be performed by accessing the cluster through this IP address.<br/>Default value: SNIP<br/>Possible values = SNIP, VIP, NSIP, GSLBsiteIP, CLIP.
"""
try :
return self._type
except Exception as e:
raise e
@type.setter
def type(self, type) :
"""Type of the IP address to create on the NetScaler appliance. Cannot be changed after the IP address is created. The following are the different types of NetScaler owned IP addresses:
* A Subnet IP (SNIP) address is used by the NetScaler ADC to communicate with the servers. The NetScaler also uses the subnet IP address when generating its own packets, such as packets related to dynamic routing protocols, or to send monitor probes to check the health of the servers.
* A Virtual IP (VIP) address is the IP address associated with a virtual server. It is the IP address to which clients connect. An appliance managing a wide range of traffic may have many VIPs configured. Some of the attributes of the VIP address are customized to meet the requirements of the virtual server.
* A GSLB site IP (GSLBIP) address is associated with a GSLB site. It is not mandatory to specify a GSLBIP address when you initially configure the NetScaler appliance. A GSLBIP address is used only when you create a GSLB site.
* A Cluster IP (CLIP) address is the management address of the cluster. All cluster configurations must be performed by accessing the cluster through this IP address.<br/>Default value: SNIP<br/>Possible values = SNIP, VIP, NSIP, GSLBsiteIP, CLIP
:param type:
"""
try :
self._type = type
except Exception as e:
raise e
@property
def arp(self) :
"""Respond to ARP requests for this IP address.<br/>Default value: ENABLED<br/>Possible values = ENABLED, DISABLED."""
try :
return self._arp
except Exception as e:
raise e
@arp.setter
def arp(self, arp) :
"""Respond to ARP requests for this IP address.<br/>Default value: ENABLED<br/>Possible values = ENABLED, DISABLED
:param arp:
"""
try :
self._arp = arp
except Exception as e:
raise e
@property
def icmp(self) :
"""Respond to ICMP requests for this IP address.<br/>Default value: ENABLED<br/>Possible values = ENABLED, DISABLED."""
try :
return self._icmp
except Exception as e:
raise e
@icmp.setter
def icmp(self, icmp) :
"""Respond to ICMP requests for this IP address.<br/>Default value: ENABLED<br/>Possible values = ENABLED, DISABLED
:param icmp:
"""
try :
self._icmp = icmp
except Exception as e:
raise e
@property
def vserver(self) :
"""Use this option to set (enable or disable) the virtual server attribute for this IP address.<br/>Default value: ENABLED<br/>Possible values = ENABLED, DISABLED."""
try :
return self._vserver
except Exception as e:
raise e
@vserver.setter
def vserver(self, vserver) :
"""Use this option to set (enable or disable) the virtual server attribute for this IP address.<br/>Default value: ENABLED<br/>Possible values = ENABLED, DISABLED
:param vserver:
"""
try :
self._vserver = vserver
except Exception as e:
raise e
@property
def telnet(self) :
"""Allow Telnet access to this IP address.<br/>Default value: ENABLED<br/>Possible values = ENABLED, DISABLED."""
try :
return self._telnet
except Exception as e:
raise e
@telnet.setter
def telnet(self, telnet) :
"""Allow Telnet access to this IP address.<br/>Default value: ENABLED<br/>Possible values = ENABLED, DISABLED
:param telnet:
"""
try :
self._telnet = telnet
except Exception as e:
raise e
@property
def ftp(self) :
"""Allow File Transfer Protocol (FTP) access to this IP address.<br/>Default value: ENABLED<br/>Possible values = ENABLED, DISABLED."""
try :
return self._ftp
except Exception as e:
raise e
@ftp.setter
def ftp(self, ftp) :
"""Allow File Transfer Protocol (FTP) access to this IP address.<br/>Default value: ENABLED<br/>Possible values = ENABLED, DISABLED
:param ftp:
"""
try :
self._ftp = ftp
except Exception as e:
raise e
@property
def gui(self) :
"""Allow graphical user interface (GUI) access to this IP address.<br/>Default value: ENABLED<br/>Possible values = ENABLED, SECUREONLY, DISABLED."""
try :
return self._gui
except Exception as e:
raise e
@gui.setter
def gui(self, gui) :
"""Allow graphical user interface (GUI) access to this IP address.<br/>Default value: ENABLED<br/>Possible values = ENABLED, SECUREONLY, DISABLED
:param gui:
"""
try :
self._gui = gui
except Exception as e:
raise e
@property
def ssh(self) :
"""Allow secure shell (SSH) access to this IP address.<br/>Default value: ENABLED<br/>Possible values = ENABLED, DISABLED."""
try :
return self._ssh
except Exception as e:
raise e
@ssh.setter
def ssh(self, ssh) :
"""Allow secure shell (SSH) access to this IP address.<br/>Default value: ENABLED<br/>Possible values = ENABLED, DISABLED
:param ssh:
"""
try :
self._ssh = ssh
except Exception as e:
raise e
@property
def snmp(self) :
"""Allow Simple Network Management Protocol (SNMP) access to this IP address.<br/>Default value: ENABLED<br/>Possible values = ENABLED, DISABLED."""
try :
return self._snmp
except Exception as e:
raise e
@snmp.setter
def snmp(self, snmp) :
"""Allow Simple Network Management Protocol (SNMP) access to this IP address.<br/>Default value: ENABLED<br/>Possible values = ENABLED, DISABLED
:param snmp:
"""
try :
self._snmp = snmp
except Exception as e:
raise e
@property
def mgmtaccess(self) :
"""Allow access to management applications on this IP address.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED."""
try :
return self._mgmtaccess
except Exception as e:
raise e
@mgmtaccess.setter
def mgmtaccess(self, mgmtaccess) :
"""Allow access to management applications on this IP address.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED
:param mgmtaccess:
"""
try :
self._mgmtaccess = mgmtaccess
except Exception as e:
raise e
@property
def restrictaccess(self) :
"""Block access to nonmanagement applications on this IP. This option is applicable for MIPs, SNIPs, and NSIP, and is disabled by default. Nonmanagement applications can run on the underlying NetScaler Free BSD operating system.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED."""
try :
return self._restrictaccess
except Exception as e:
raise e
@restrictaccess.setter
def restrictaccess(self, restrictaccess) :
"""Block access to nonmanagement applications on this IP. This option is applicable for MIPs, SNIPs, and NSIP, and is disabled by default. Nonmanagement applications can run on the underlying NetScaler Free BSD operating system.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED
:param restrictaccess:
"""
try :
self._restrictaccess = restrictaccess
except Exception as e:
raise e
@property
def dynamicrouting(self) :
"""Allow dynamic routing on this IP address. Specific to Subnet IP (SNIP) address.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED."""
try :
return self._dynamicrouting
except Exception as e:
raise e
@dynamicrouting.setter
def dynamicrouting(self, dynamicrouting) :
"""Allow dynamic routing on this IP address. Specific to Subnet IP (SNIP) address.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED
:param dynamicrouting:
"""
try :
self._dynamicrouting = dynamicrouting
except Exception as e:
raise e
@property
def ospf(self) :
"""Use this option to enable or disable OSPF on this IP address for the entity.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED."""
try :
return self._ospf
except Exception as e:
raise e
@ospf.setter
def ospf(self, ospf) :
"""Use this option to enable or disable OSPF on this IP address for the entity.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED
:param ospf:
"""
try :
self._ospf = ospf
except Exception as e:
raise e
@property
def bgp(self) :
"""Use this option to enable or disable BGP on this IP address for the entity.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED."""
try :
return self._bgp
except Exception as e:
raise e
@bgp.setter
def bgp(self, bgp) :
"""Use this option to enable or disable BGP on this IP address for the entity.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED
:param bgp:
"""
try :
self._bgp = bgp
except Exception as e:
raise e
@property
def rip(self) :
"""Use this option to enable or disable RIP on this IP address for the entity.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED."""
try :
return self._rip
except Exception as e:
raise e
@rip.setter
def rip(self, rip) :
"""Use this option to enable or disable RIP on this IP address for the entity.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED
:param rip:
"""
try :
self._rip = rip
except Exception as e:
raise e
@property
def hostroute(self) :
"""Advertise a route for the VIP address using the dynamic routing protocols running on the NetScaler appliance.<br/>Possible values = ENABLED, DISABLED."""
try :
return self._hostroute
except Exception as e:
raise e
@hostroute.setter
def hostroute(self, hostroute) :
"""Advertise a route for the VIP address using the dynamic routing protocols running on the NetScaler appliance.<br/>Possible values = ENABLED, DISABLED
:param hostroute:
"""
try :
self._hostroute = hostroute
except Exception as e:
raise e
@property
def hostrtgw(self) :
"""IP address of the gateway of the route for this VIP address.<br/>Default value: -1."""
try :
return self._hostrtgw
except Exception as e:
raise e
@hostrtgw.setter
def hostrtgw(self, hostrtgw) :
"""IP address of the gateway of the route for this VIP address.<br/>Default value: -1
:param hostrtgw:
"""
try :
self._hostrtgw = hostrtgw
except Exception as e:
raise e
@property
def metric(self) :
"""Integer value to add to or subtract from the cost of the route advertised for the VIP address.<br/>Minimum length = -16777215."""
try :
return self._metric
except Exception as e:
raise e
@metric.setter
def metric(self, metric) :
"""Integer value to add to or subtract from the cost of the route advertised for the VIP address.<br/>Minimum length = -16777215
:param metric:
"""
try :
self._metric = metric
except Exception as e:
raise e
@property
def vserverrhilevel(self) :
"""Advertise the route for the Virtual IP (VIP) address on the basis of the state of the virtual servers associated with that VIP.
* NONE - Advertise the route for the VIP address, regardless of the state of the virtual servers associated with the address.
* ONE VSERVER - Advertise the route for the VIP address if at least one of the associated virtual servers is in UP state.
* ALL VSERVER - Advertise the route for the VIP address if all of the associated virtual servers are in UP state.
* VSVR_CNTRLD - Advertise the route for the VIP address according to the RHIstate (RHI STATE) parameter setting on all the associated virtual servers of the VIP address along with their states.
When Vserver RHI Level (RHI) parameter is set to VSVR_CNTRLD, the following are different RHI behaviors for the VIP address on the basis of RHIstate (RHI STATE) settings on the virtual servers associated with the VIP address:
* If you set RHI STATE to PASSIVE on all virtual servers, the NetScaler ADC always advertises the route for the VIP address.
* If you set RHI STATE to ACTIVE on all virtual servers, the NetScaler ADC advertises the route for the VIP address if at least one of the associated virtual servers is in UP state.
*If you set RHI STATE to ACTIVE on some and PASSIVE on others, the NetScaler ADC advertises the route for the VIP address if at least one of the associated virtual servers, whose RHI STATE set to ACTIVE, is in UP state.
<br/>Default value: ONE_VSERVER<br/>Possible values = ONE_VSERVER, ALL_VSERVERS, NONE, VSVR_CNTRLD.
"""
try :
return self._vserverrhilevel
except Exception as e:
raise e
@vserverrhilevel.setter
def vserverrhilevel(self, vserverrhilevel) :
"""Advertise the route for the Virtual IP (VIP) address on the basis of the state of the virtual servers associated with that VIP.
* NONE - Advertise the route for the VIP address, regardless of the state of the virtual servers associated with the address.
* ONE VSERVER - Advertise the route for the VIP address if at least one of the associated virtual servers is in UP state.
* ALL VSERVER - Advertise the route for the VIP address if all of the associated virtual servers are in UP state.
* VSVR_CNTRLD - Advertise the route for the VIP address according to the RHIstate (RHI STATE) parameter setting on all the associated virtual servers of the VIP address along with their states.
When Vserver RHI Level (RHI) parameter is set to VSVR_CNTRLD, the following are different RHI behaviors for the VIP address on the basis of RHIstate (RHI STATE) settings on the virtual servers associated with the VIP address:
* If you set RHI STATE to PASSIVE on all virtual servers, the NetScaler ADC always advertises the route for the VIP address.
* If you set RHI STATE to ACTIVE on all virtual servers, the NetScaler ADC advertises the route for the VIP address if at least one of the associated virtual servers is in UP state.
*If you set RHI STATE to ACTIVE on some and PASSIVE on others, the NetScaler ADC advertises the route for the VIP address if at least one of the associated virtual servers, whose RHI STATE set to ACTIVE, is in UP state.
<br/>Default value: ONE_VSERVER<br/>Possible values = ONE_VSERVER, ALL_VSERVERS, NONE, VSVR_CNTRLD
:param vserverrhilevel:
"""
try :
self._vserverrhilevel = vserverrhilevel
except Exception as e:
raise e
@property
def vserverrhimode(self) :
"""Advertise the route for the Virtual IP (VIP) address using dynamic routing protocols or using RISE
* DYNMAIC_ROUTING - Advertise the route for the VIP address using dynamic routing protocols (default)
* RISE - Advertise the route for the VIP address using RISE.<br/>Default value: DYNAMIC_ROUTING<br/>Possible values = DYNAMIC_ROUTING, RISE.
"""
try :
return self._vserverrhimode
except Exception as e:
raise e
@vserverrhimode.setter
def vserverrhimode(self, vserverrhimode) :
"""Advertise the route for the Virtual IP (VIP) address using dynamic routing protocols or using RISE
* DYNMAIC_ROUTING - Advertise the route for the VIP address using dynamic routing protocols (default)
* RISE - Advertise the route for the VIP address using RISE.<br/>Default value: DYNAMIC_ROUTING<br/>Possible values = DYNAMIC_ROUTING, RISE
:param vserverrhimode:
"""
try :
self._vserverrhimode = vserverrhimode
except Exception as e:
raise e
@property
def ospflsatype(self) :
"""Type of LSAs to be used by the OSPF protocol, running on the NetScaler appliance, for advertising the route for this VIP address.<br/>Default value: TYPE5<br/>Possible values = TYPE1, TYPE5."""
try :
return self._ospflsatype
except Exception as e:
raise e
@ospflsatype.setter
def ospflsatype(self, ospflsatype) :
"""Type of LSAs to be used by the OSPF protocol, running on the NetScaler appliance, for advertising the route for this VIP address.<br/>Default value: TYPE5<br/>Possible values = TYPE1, TYPE5
:param ospflsatype:
"""
try :
self._ospflsatype = ospflsatype
except Exception as e:
raise e
@property
def ospfarea(self) :
"""ID of the area in which the type1 link-state advertisements (LSAs) are to be advertised for this virtual IP (VIP) address by the OSPF protocol running on the NetScaler appliance. When this parameter is not set, the VIP is advertised on all areas.<br/>Default value: -1<br/>Maximum length = 4294967294LU."""
try :
return self._ospfarea
except Exception as e:
raise e
@ospfarea.setter
def ospfarea(self, ospfarea) :
"""ID of the area in which the type1 link-state advertisements (LSAs) are to be advertised for this virtual IP (VIP) address by the OSPF protocol running on the NetScaler appliance. When this parameter is not set, the VIP is advertised on all areas.<br/>Default value: -1<br/>Maximum length = 4294967294LU
:param ospfarea:
"""
try :
self._ospfarea = ospfarea
except Exception as e:
raise e
@property
def state(self) :
"""Enable or disable the IP address.<br/>Default value: ENABLED<br/>Possible values = ENABLED, DISABLED."""
try :
return self._state
except Exception as e:
raise e
@state.setter
def state(self, state) :
"""Enable or disable the IP address.<br/>Default value: ENABLED<br/>Possible values = ENABLED, DISABLED
:param state:
"""
try :
self._state = state
except Exception as e:
raise e
@property
def vrid(self) :
"""A positive integer that uniquely identifies a VMAC address for binding to this VIP address. This binding is used to set up NetScaler appliances in an active-active configuration using VRRP.<br/>Minimum length = 1<br/>Maximum length = 255."""
try :
return self._vrid
except Exception as e:
raise e
@vrid.setter
def vrid(self, vrid) :
"""A positive integer that uniquely identifies a VMAC address for binding to this VIP address. This binding is used to set up NetScaler appliances in an active-active configuration using VRRP.<br/>Minimum length = 1<br/>Maximum length = 255
:param vrid:
"""
try :
self._vrid = vrid
except Exception as e:
raise e
@property
def icmpresponse(self) :
"""Respond to ICMP requests for a Virtual IP (VIP) address on the basis of the states of the virtual servers associated with that VIP. Available settings function as follows:
* NONE - The NetScaler appliance responds to any ICMP request for the VIP address, irrespective of the states of the virtual servers associated with the address.
* ONE VSERVER - The NetScaler appliance responds to any ICMP request for the VIP address if at least one of the associated virtual servers is in UP state.
* ALL VSERVER - The NetScaler appliance responds to any ICMP request for the VIP address if all of the associated virtual servers are in UP state.
* VSVR_CNTRLD - The behavior depends on the ICMP VSERVER RESPONSE setting on all the associated virtual servers.
The following settings can be made for the ICMP VSERVER RESPONSE parameter on a virtual server:
* If you set ICMP VSERVER RESPONSE to PASSIVE on all virtual servers, NetScaler always responds.
* If you set ICMP VSERVER RESPONSE to ACTIVE on all virtual servers, NetScaler responds if even one virtual server is UP.
* When you set ICMP VSERVER RESPONSE to ACTIVE on some and PASSIVE on others, NetScaler responds if even one virtual server set to ACTIVE is UP.<br/>Default value: 5<br/>Possible values = NONE, ONE_VSERVER, ALL_VSERVERS, VSVR_CNTRLD.
"""
try :
return self._icmpresponse
except Exception as e:
raise e
@icmpresponse.setter
def icmpresponse(self, icmpresponse) :
"""Respond to ICMP requests for a Virtual IP (VIP) address on the basis of the states of the virtual servers associated with that VIP. Available settings function as follows:
* NONE - The NetScaler appliance responds to any ICMP request for the VIP address, irrespective of the states of the virtual servers associated with the address.
* ONE VSERVER - The NetScaler appliance responds to any ICMP request for the VIP address if at least one of the associated virtual servers is in UP state.
* ALL VSERVER - The NetScaler appliance responds to any ICMP request for the VIP address if all of the associated virtual servers are in UP state.
* VSVR_CNTRLD - The behavior depends on the ICMP VSERVER RESPONSE setting on all the associated virtual servers.
The following settings can be made for the ICMP VSERVER RESPONSE parameter on a virtual server:
* If you set ICMP VSERVER RESPONSE to PASSIVE on all virtual servers, NetScaler always responds.
* If you set ICMP VSERVER RESPONSE to ACTIVE on all virtual servers, NetScaler responds if even one virtual server is UP.
* When you set ICMP VSERVER RESPONSE to ACTIVE on some and PASSIVE on others, NetScaler responds if even one virtual server set to ACTIVE is UP.<br/>Default value: 5<br/>Possible values = NONE, ONE_VSERVER, ALL_VSERVERS, VSVR_CNTRLD
:param icmpresponse:
"""
try :
self._icmpresponse = icmpresponse
except Exception as e:
raise e
@property
def ownernode(self) :
"""The owner node in a Cluster for this IP address. Owner node can vary from 0 to 31. If ownernode is not specified then the IP is treated as Striped IP.<br/>Default value: 255."""
try :
return self._ownernode
except Exception as e:
raise e
@ownernode.setter
def ownernode(self, ownernode) :
"""The owner node in a Cluster for this IP address. Owner node can vary from 0 to 31. If ownernode is not specified then the IP is treated as Striped IP.<br/>Default value: 255
:param ownernode:
"""
try :
self._ownernode = ownernode
except Exception as e:
raise e
@property
def arpresponse(self) :
"""Respond to ARP requests for a Virtual IP (VIP) address on the basis of the states of the virtual servers associated with that VIP. Available settings function as follows:
* NONE - The NetScaler appliance responds to any ARP request for the VIP address, irrespective of the states of the virtual servers associated with the address.
* ONE VSERVER - The NetScaler appliance responds to any ARP request for the VIP address if at least one of the associated virtual servers is in UP state.
* ALL VSERVER - The NetScaler appliance responds to any ARP request for the VIP address if all of the associated virtual servers are in UP state.<br/>Default value: 5<br/>Possible values = NONE, ONE_VSERVER, ALL_VSERVERS.
"""
try :
return self._arpresponse
except Exception as e:
raise e
@arpresponse.setter
def arpresponse(self, arpresponse) :
"""Respond to ARP requests for a Virtual IP (VIP) address on the basis of the states of the virtual servers associated with that VIP. Available settings function as follows:
* NONE - The NetScaler appliance responds to any ARP request for the VIP address, irrespective of the states of the virtual servers associated with the address.
* ONE VSERVER - The NetScaler appliance responds to any ARP request for the VIP address if at least one of the associated virtual servers is in UP state.
* ALL VSERVER - The NetScaler appliance responds to any ARP request for the VIP address if all of the associated virtual servers are in UP state.<br/>Default value: 5<br/>Possible values = NONE, ONE_VSERVER, ALL_VSERVERS
:param arpresponse:
"""
try :
self._arpresponse = arpresponse
except Exception as e:
raise e
@property
def td(self) :
"""Integer value that uniquely identifies the traffic domain in which you want to configure the entity. If you do not specify an ID, the entity becomes part of the default traffic domain, which has an ID of 0.<br/>Maximum length = 4094."""
try :
return self._td
except Exception as e:
raise e
@td.setter
def td(self, td) :
"""Integer value that uniquely identifies the traffic domain in which you want to configure the entity. If you do not specify an ID, the entity becomes part of the default traffic domain, which has an ID of 0.<br/>Maximum length = 4094
:param td:
"""
try :
self._td = td
except Exception as e:
raise e
@property
def flags(self) :
"""The flags for this entry."""
try :
return self._flags
except Exception as e:
raise e
@property
def hostrtgwact(self) :
"""Actual Gateway used for advertising host route."""
try :
return self._hostrtgwact
except Exception as e:
raise e
@property
def ospfareaval(self) :
"""The area ID of the area in which OSPF Type1 LSAs are advertised."""
try :
return self._ospfareaval
except Exception as e:
raise e
@property
def viprtadv2bsd(self) :
"""Whether this route is advertised to FreeBSD."""
try :
return self._viprtadv2bsd
except Exception as e:
raise e
@property
def vipvsercount(self) :
"""Number of vservers bound to this VIP."""
try :
return self._vipvsercount
except Exception as e:
raise e
@property
def vipvserdowncount(self) :
"""Number of vservers bound to this VIP, which are down."""
try :
return self._vipvserdowncount
except Exception as e:
raise e
@property
def vipvsrvrrhiactivecount(self) :
"""Number of vservers that have RHI state ACTIVE."""
try :
return self._vipvsrvrrhiactivecount
except Exception as e:
raise e
@property
def vipvsrvrrhiactiveupcount(self) :
"""Number of vservers that have RHI state ACTIVE, which are UP."""
try :
return self._vipvsrvrrhiactiveupcount
except Exception as e:
raise e
@property
def freeports(self) :
"""Number of free Ports available on this IP."""
try :
return self._freeports
except Exception as e:
raise e
@property
def riserhimsgcode(self) :
"""The code indicating the rise rhi status."""
try :
return self._riserhimsgcode
except Exception as e:
raise e
@property
def iptype(self) :
""".<br/>Possible values = SNIP, VIP, NSIP, GSLBsiteIP, CLIP."""
try :
return self._iptype
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
"""converts nitro response into object and returns the object array in case of get request.
:param service:
:param response:
"""
try :
result = service.payload_formatter.string_to_resource(nsip_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.nsip
except Exception as e :
raise e
def _get_object_name(self) :
"""Returns the value of object identifier argument"""
try :
if self.ipaddress is not None :
return str(self.ipaddress)
return None
except Exception as e :
raise e
@classmethod
def add(cls, client, resource) :
"""Use this API to add nsip.
:param client:
:param resource:
"""
try :
if type(resource) is not list :
addresource = nsip()
addresource.ipaddress = resource.ipaddress
addresource.netmask = resource.netmask
addresource.type = resource.type
addresource.arp = resource.arp
addresource.icmp = resource.icmp
addresource.vserver = resource.vserver
addresource.telnet = resource.telnet
addresource.ftp = resource.ftp
addresource.gui = resource.gui
addresource.ssh = resource.ssh
addresource.snmp = resource.snmp
addresource.mgmtaccess = resource.mgmtaccess
addresource.restrictaccess = resource.restrictaccess
addresource.dynamicrouting = resource.dynamicrouting
addresource.ospf = resource.ospf
addresource.bgp = resource.bgp
addresource.rip = resource.rip
addresource.hostroute = resource.hostroute
addresource.hostrtgw = resource.hostrtgw
addresource.metric = resource.metric
addresource.vserverrhilevel = resource.vserverrhilevel
addresource.vserverrhimode = resource.vserverrhimode
addresource.ospflsatype = resource.ospflsatype
addresource.ospfarea = resource.ospfarea
addresource.state = resource.state
addresource.vrid = resource.vrid
addresource.icmpresponse = resource.icmpresponse
addresource.ownernode = resource.ownernode
addresource.arpresponse = resource.arpresponse
addresource.td = resource.td
return addresource.add_resource(client)
else :
if (resource and len(resource) > 0) :
addresources = [ nsip() for _ in range(len(resource))]
for i in range(len(resource)) :
addresources[i].ipaddress = resource[i].ipaddress
addresources[i].netmask = resource[i].netmask
addresources[i].type = resource[i].type
addresources[i].arp = resource[i].arp
addresources[i].icmp = resource[i].icmp
addresources[i].vserver = resource[i].vserver
addresources[i].telnet = resource[i].telnet
addresources[i].ftp = resource[i].ftp
addresources[i].gui = resource[i].gui
addresources[i].ssh = resource[i].ssh
addresources[i].snmp = resource[i].snmp
addresources[i].mgmtaccess = resource[i].mgmtaccess
addresources[i].restrictaccess = resource[i].restrictaccess
addresources[i].dynamicrouting = resource[i].dynamicrouting
addresources[i].ospf = resource[i].ospf
addresources[i].bgp = resource[i].bgp
addresources[i].rip = resource[i].rip
addresources[i].hostroute = resource[i].hostroute
addresources[i].hostrtgw = resource[i].hostrtgw
addresources[i].metric = resource[i].metric
addresources[i].vserverrhilevel = resource[i].vserverrhilevel
addresources[i].vserverrhimode = resource[i].vserverrhimode
addresources[i].ospflsatype = resource[i].ospflsatype
addresources[i].ospfarea = resource[i].ospfarea
addresources[i].state = resource[i].state
addresources[i].vrid = resource[i].vrid
addresources[i].icmpresponse = resource[i].icmpresponse
addresources[i].ownernode = resource[i].ownernode
addresources[i].arpresponse = resource[i].arpresponse
addresources[i].td = resource[i].td
result = cls.add_bulk_request(client, addresources)
return result
except Exception as e :
raise e
@classmethod
def delete(cls, client, resource) :
"""Use this API to delete nsip.
:param client:
:param resource:
"""
try :
if type(resource) is not list :
deleteresource = nsip()
if type(resource) != type(deleteresource):
deleteresource.ipaddress = resource
else :
deleteresource.ipaddress = resource.ipaddress
deleteresource.td = resource.td
return deleteresource.delete_resource(client)
else :
if type(resource[0]) != cls :
if (resource and len(resource) > 0) :
deleteresources = [ nsip() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].ipaddress = resource[i]
else :
if (resource and len(resource) > 0) :
deleteresources = [ nsip() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].ipaddress = resource[i].ipaddress
deleteresources[i].td = resource[i].td
result = cls.delete_bulk_request(client, deleteresources)
return result
except Exception as e :
raise e
@classmethod
def update(cls, client, resource) :
"""Use this API to update nsip.
:param client:
:param resource:
"""
try :
if type(resource) is not list :
updateresource = nsip()
updateresource.ipaddress = resource.ipaddress
updateresource.td = resource.td
updateresource.netmask = resource.netmask
updateresource.arp = resource.arp
updateresource.icmp = resource.icmp
updateresource.vserver = resource.vserver
updateresource.telnet = resource.telnet
updateresource.ftp = resource.ftp
updateresource.gui = resource.gui
updateresource.ssh = resource.ssh
updateresource.snmp = resource.snmp
updateresource.mgmtaccess = resource.mgmtaccess
updateresource.restrictaccess = resource.restrictaccess
updateresource.dynamicrouting = resource.dynamicrouting
updateresource.ospf = resource.ospf
updateresource.bgp = resource.bgp
updateresource.rip = resource.rip
updateresource.hostroute = resource.hostroute
updateresource.hostrtgw = resource.hostrtgw
updateresource.metric = resource.metric
updateresource.vserverrhilevel = resource.vserverrhilevel
updateresource.vserverrhimode = resource.vserverrhimode
updateresource.ospflsatype = resource.ospflsatype
updateresource.ospfarea = resource.ospfarea
updateresource.vrid = resource.vrid
updateresource.icmpresponse = resource.icmpresponse
updateresource.arpresponse = resource.arpresponse
return updateresource.update_resource(client)
else :
if (resource and len(resource) > 0) :
updateresources = [ nsip() for _ in range(len(resource))]
for i in range(len(resource)) :
updateresources[i].ipaddress = resource[i].ipaddress
updateresources[i].td = resource[i].td
updateresources[i].netmask = resource[i].netmask
updateresources[i].arp = resource[i].arp
updateresources[i].icmp = resource[i].icmp
updateresources[i].vserver = resource[i].vserver
updateresources[i].telnet = resource[i].telnet
updateresources[i].ftp = resource[i].ftp
updateresources[i].gui = resource[i].gui
updateresources[i].ssh = resource[i].ssh
updateresources[i].snmp = resource[i].snmp
updateresources[i].mgmtaccess = resource[i].mgmtaccess
updateresources[i].restrictaccess = resource[i].restrictaccess
updateresources[i].dynamicrouting = resource[i].dynamicrouting
updateresources[i].ospf = resource[i].ospf
updateresources[i].bgp = resource[i].bgp
updateresources[i].rip = resource[i].rip
updateresources[i].hostroute = resource[i].hostroute
updateresources[i].hostrtgw = resource[i].hostrtgw
updateresources[i].metric = resource[i].metric
updateresources[i].vserverrhilevel = resource[i].vserverrhilevel
updateresources[i].vserverrhimode = resource[i].vserverrhimode
updateresources[i].ospflsatype = resource[i].ospflsatype
updateresources[i].ospfarea = resource[i].ospfarea
updateresources[i].vrid = resource[i].vrid
updateresources[i].icmpresponse = resource[i].icmpresponse
updateresources[i].arpresponse = resource[i].arpresponse
result = cls.update_bulk_request(client, updateresources)
return result
except Exception as e :
raise e
@classmethod
def unset(cls, client, resource, args) :
"""Use this API to unset the properties of nsip resource.
Properties that need to be unset are specified in args array.
:param client:
:param resource:
:param args:
"""
try :
if type(resource) is not list :
unsetresource = nsip()
if type(resource) != type(unsetresource):
unsetresource.ipaddress = resource
else :
unsetresource.ipaddress = resource.ipaddress
unsetresource.td = resource.td
return unsetresource.unset_resource(client, args)
else :
if type(resource[0]) != cls :
if (resource and len(resource) > 0) :
unsetresources = [ nsip() for _ in range(len(resource))]
for i in range(len(resource)) :
unsetresources[i].ipaddress = resource[i]
else :
if (resource and len(resource) > 0) :
unsetresources = [ nsip() for _ in range(len(resource))]
for i in range(len(resource)) :
unsetresources[i].ipaddress = resource[i].ipaddress
unsetresources[i].td = resource[i].td
result = cls.unset_bulk_request(client, unsetresources, args)
return result
except Exception as e :
raise e
@classmethod
def enable(cls, client, resource) :
"""Use this API to enable nsip.
:param client:
:param resource:
"""
try :
if type(resource) is not list :
enableresource = nsip()
if type(resource) != type(enableresource):
enableresource.ipaddress = resource
else :
enableresource.ipaddress = resource.ipaddress
enableresource.td = resource.td
return enableresource.perform_operation(client,"enable")
else :
if type(resource[0]) != cls :
if (resource and len(resource) > 0) :
enableresources = [ nsip() for _ in range(len(resource))]
for i in range(len(resource)) :
enableresources[i].ipaddress = resource[i]
else :
if (resource and len(resource) > 0) :
enableresources = [ nsip() for _ in range(len(resource))]
for i in range(len(resource)) :
enableresources[i].ipaddress = resource[i].ipaddress
enableresources[i].td = resource[i].td
result = cls.perform_operation_bulk_request(client, enableresources,"enable")
return result
except Exception as e :
raise e
@classmethod
def disable(cls, client, resource) :
"""Use this API to disable nsip.
:param client:
:param resource:
"""
try :
if type(resource) is not list :
disableresource = nsip()
if type(resource) != type(disableresource):
disableresource.ipaddress = resource
else :
disableresource.ipaddress = resource.ipaddress
disableresource.td = resource.td
return disableresource.perform_operation(client,"disable")
else :
if type(resource[0]) != cls :
if (resource and len(resource) > 0) :
disableresources = [ nsip() for _ in range(len(resource))]
for i in range(len(resource)) :
disableresources[i].ipaddress = resource[i]
else :
if (resource and len(resource) > 0) :
disableresources = [ nsip() for _ in range(len(resource))]
for i in range(len(resource)) :
disableresources[i].ipaddress = resource[i].ipaddress
disableresources[i].td = resource[i].td
result = cls.perform_operation_bulk_request(client, disableresources,"disable")
return result
except Exception as e :
raise e
@classmethod
def get(cls, client, name="", option_="") :
"""Use this API to fetch all the nsip resources that are configured on netscaler.
:param client:
:param name: (Default value = "")
:param option_: (Default value = "")
"""
try :
if not name :
obj = nsip()
response = obj.get_resources(client, option_)
else :
if type(name) == cls :
if type(name) is not list :
option_ = options()
option_.args = nitro_util.object_to_string_withoutquotes(name)
response = name.get_resource(client, option_)
else :
if name and len(name) > 0 :
response = [nsip() for _ in range(len(name))]
for i in range(len(name)) :
option_ = options()
option_.args = nitro_util.object_to_string_withoutquotes(name[i])
response[i] = name[i].get_resource(client, option_)
return response
except Exception as e :
raise e
@classmethod
def get_args(cls, client, args) :
"""Use this API to fetch all the nsip resources that are configured on netscaler.
# This uses nsip_args which is a way to provide additional arguments while fetching the resources.
:param client:
:param args:
"""
try :
obj = nsip()
option_ = options()
option_.args = nitro_util.object_to_string_withoutquotes(args)
response = obj.get_resources(client, option_)
return response
except Exception as e :
raise e
@classmethod
def get_filtered(cls, client, filter_) :
"""Use this API to fetch filtered set of nsip resources.
filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
:param client:
:param filter_:
"""
try :
obj = nsip()
option_ = options()
option_.filter = filter_
response = obj.getfiltered(client, option_)
return response
except Exception as e :
raise e
@classmethod
def count(cls, client) :
"""Use this API to count the nsip resources configured on NetScaler.
:param client:
"""
try :
obj = nsip()
option_ = options()
option_.count = True
response = obj.get_resources(client, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e :
raise e
@classmethod
def count_filtered(cls, client, filter_) :
"""Use this API to count filtered the set of nsip resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
:param client:
:param filter_:
"""
try :
obj = nsip()
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(client, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e :
raise e
class Arpresponse:
""" """
NONE = "NONE"
ONE_VSERVER = "ONE_VSERVER"
ALL_VSERVERS = "ALL_VSERVERS"
class Iptype:
""" """
SNIP = "SNIP"
VIP = "VIP"
NSIP = "NSIP"
GSLBsiteIP = "GSLBsiteIP"
CLIP = "CLIP"
class Ssh:
""" """
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class State:
""" """
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Rip:
""" """
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Gui:
""" """
ENABLED = "ENABLED"
SECUREONLY = "SECUREONLY"
DISABLED = "DISABLED"
class Ospf:
""" """
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Dynamicrouting:
""" """
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Type:
""" """
SNIP = "SNIP"
VIP = "VIP"
NSIP = "NSIP"
GSLBsiteIP = "GSLBsiteIP"
CLIP = "CLIP"
class Ospflsatype:
""" """
TYPE1 = "TYPE1"
TYPE5 = "TYPE5"
class Bgp:
""" """
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Arp:
""" """
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Mgmtaccess:
""" """
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Hostroute:
""" """
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Ftp:
""" """
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Vserverrhilevel:
""" """
ONE_VSERVER = "ONE_VSERVER"
ALL_VSERVERS = "ALL_VSERVERS"
NONE = "NONE"
VSVR_CNTRLD = "VSVR_CNTRLD"
class Icmp:
""" """
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Icmpresponse:
""" """
NONE = "NONE"
ONE_VSERVER = "ONE_VSERVER"
ALL_VSERVERS = "ALL_VSERVERS"
VSVR_CNTRLD = "VSVR_CNTRLD"
class Vserver:
""" """
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Snmp:
""" """
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Restrictaccess:
""" """
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Vserverrhimode:
""" """
DYNAMIC_ROUTING = "DYNAMIC_ROUTING"
RISE = "RISE"
class Telnet:
""" """
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class nsip_response(base_response) :
""" """
def __init__(self, length=1) :
self.nsip = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.nsip = [nsip() for _ in range(length)]
| 39.792412
| 320
| 0.595215
|
from nitro.resource.base.base_resource import base_resource
from nitro.resource.base.base_resource import base_response
from nitro.service.options import options
from nitro.exception.nitro_exception import nitro_exception
from nitro.util.nitro_util import nitro_util
class nsip(base_resource) :
def __init__(self) :
self._ipaddress = ""
self._netmask = ""
self._type = ""
self._arp = ""
self._icmp = ""
self._vserver = ""
self._telnet = ""
self._ftp = ""
self._gui = ""
self._ssh = ""
self._snmp = ""
self._mgmtaccess = ""
self._restrictaccess = ""
self._dynamicrouting = ""
self._ospf = ""
self._bgp = ""
self._rip = ""
self._hostroute = ""
self._hostrtgw = ""
self._metric = 0
self._vserverrhilevel = ""
self._vserverrhimode = ""
self._ospflsatype = ""
self._ospfarea = 0
self._state = ""
self._vrid = 0
self._icmpresponse = ""
self._ownernode = 0
self._arpresponse = ""
self._td = 0
self._flags = 0
self._hostrtgwact = ""
self._ospfareaval = 0
self._viprtadv2bsd = False
self._vipvsercount = 0
self._vipvserdowncount = 0
self._vipvsrvrrhiactivecount = 0
self._vipvsrvrrhiactiveupcount = 0
self._freeports = 0
self._riserhimsgcode = 0
self._iptype = []
self.___count = 0
@property
def ipaddress(self) :
try :
return self._ipaddress
except Exception as e:
raise e
@ipaddress.setter
def ipaddress(self, ipaddress) :
try :
self._ipaddress = ipaddress
except Exception as e:
raise e
@property
def netmask(self) :
try :
return self._netmask
except Exception as e:
raise e
@netmask.setter
def netmask(self, netmask) :
try :
self._netmask = netmask
except Exception as e:
raise e
@property
def type(self) :
try :
return self._type
except Exception as e:
raise e
@type.setter
def type(self, type) :
try :
self._type = type
except Exception as e:
raise e
@property
def arp(self) :
try :
return self._arp
except Exception as e:
raise e
@arp.setter
def arp(self, arp) :
try :
self._arp = arp
except Exception as e:
raise e
@property
def icmp(self) :
try :
return self._icmp
except Exception as e:
raise e
@icmp.setter
def icmp(self, icmp) :
try :
self._icmp = icmp
except Exception as e:
raise e
@property
def vserver(self) :
try :
return self._vserver
except Exception as e:
raise e
@vserver.setter
def vserver(self, vserver) :
try :
self._vserver = vserver
except Exception as e:
raise e
@property
def telnet(self) :
try :
return self._telnet
except Exception as e:
raise e
@telnet.setter
def telnet(self, telnet) :
try :
self._telnet = telnet
except Exception as e:
raise e
@property
def ftp(self) :
try :
return self._ftp
except Exception as e:
raise e
@ftp.setter
def ftp(self, ftp) :
try :
self._ftp = ftp
except Exception as e:
raise e
@property
def gui(self) :
try :
return self._gui
except Exception as e:
raise e
@gui.setter
def gui(self, gui) :
try :
self._gui = gui
except Exception as e:
raise e
@property
def ssh(self) :
try :
return self._ssh
except Exception as e:
raise e
@ssh.setter
def ssh(self, ssh) :
try :
self._ssh = ssh
except Exception as e:
raise e
@property
def snmp(self) :
try :
return self._snmp
except Exception as e:
raise e
@snmp.setter
def snmp(self, snmp) :
try :
self._snmp = snmp
except Exception as e:
raise e
@property
def mgmtaccess(self) :
try :
return self._mgmtaccess
except Exception as e:
raise e
@mgmtaccess.setter
def mgmtaccess(self, mgmtaccess) :
try :
self._mgmtaccess = mgmtaccess
except Exception as e:
raise e
@property
def restrictaccess(self) :
try :
return self._restrictaccess
except Exception as e:
raise e
@restrictaccess.setter
def restrictaccess(self, restrictaccess) :
try :
self._restrictaccess = restrictaccess
except Exception as e:
raise e
@property
def dynamicrouting(self) :
try :
return self._dynamicrouting
except Exception as e:
raise e
@dynamicrouting.setter
def dynamicrouting(self, dynamicrouting) :
try :
self._dynamicrouting = dynamicrouting
except Exception as e:
raise e
@property
def ospf(self) :
try :
return self._ospf
except Exception as e:
raise e
@ospf.setter
def ospf(self, ospf) :
try :
self._ospf = ospf
except Exception as e:
raise e
@property
def bgp(self) :
try :
return self._bgp
except Exception as e:
raise e
@bgp.setter
def bgp(self, bgp) :
try :
self._bgp = bgp
except Exception as e:
raise e
@property
def rip(self) :
try :
return self._rip
except Exception as e:
raise e
@rip.setter
def rip(self, rip) :
try :
self._rip = rip
except Exception as e:
raise e
@property
def hostroute(self) :
try :
return self._hostroute
except Exception as e:
raise e
@hostroute.setter
def hostroute(self, hostroute) :
try :
self._hostroute = hostroute
except Exception as e:
raise e
@property
def hostrtgw(self) :
try :
return self._hostrtgw
except Exception as e:
raise e
@hostrtgw.setter
def hostrtgw(self, hostrtgw) :
try :
self._hostrtgw = hostrtgw
except Exception as e:
raise e
@property
def metric(self) :
try :
return self._metric
except Exception as e:
raise e
@metric.setter
def metric(self, metric) :
try :
self._metric = metric
except Exception as e:
raise e
@property
def vserverrhilevel(self) :
try :
return self._vserverrhilevel
except Exception as e:
raise e
@vserverrhilevel.setter
def vserverrhilevel(self, vserverrhilevel) :
try :
self._vserverrhilevel = vserverrhilevel
except Exception as e:
raise e
@property
def vserverrhimode(self) :
try :
return self._vserverrhimode
except Exception as e:
raise e
@vserverrhimode.setter
def vserverrhimode(self, vserverrhimode) :
try :
self._vserverrhimode = vserverrhimode
except Exception as e:
raise e
@property
def ospflsatype(self) :
try :
return self._ospflsatype
except Exception as e:
raise e
@ospflsatype.setter
def ospflsatype(self, ospflsatype) :
try :
self._ospflsatype = ospflsatype
except Exception as e:
raise e
@property
def ospfarea(self) :
try :
return self._ospfarea
except Exception as e:
raise e
@ospfarea.setter
def ospfarea(self, ospfarea) :
try :
self._ospfarea = ospfarea
except Exception as e:
raise e
@property
def state(self) :
try :
return self._state
except Exception as e:
raise e
@state.setter
def state(self, state) :
try :
self._state = state
except Exception as e:
raise e
@property
def vrid(self) :
try :
return self._vrid
except Exception as e:
raise e
@vrid.setter
def vrid(self, vrid) :
try :
self._vrid = vrid
except Exception as e:
raise e
@property
def icmpresponse(self) :
try :
return self._icmpresponse
except Exception as e:
raise e
@icmpresponse.setter
def icmpresponse(self, icmpresponse) :
try :
self._icmpresponse = icmpresponse
except Exception as e:
raise e
@property
def ownernode(self) :
try :
return self._ownernode
except Exception as e:
raise e
@ownernode.setter
def ownernode(self, ownernode) :
try :
self._ownernode = ownernode
except Exception as e:
raise e
@property
def arpresponse(self) :
try :
return self._arpresponse
except Exception as e:
raise e
@arpresponse.setter
def arpresponse(self, arpresponse) :
try :
self._arpresponse = arpresponse
except Exception as e:
raise e
@property
def td(self) :
try :
return self._td
except Exception as e:
raise e
@td.setter
def td(self, td) :
try :
self._td = td
except Exception as e:
raise e
@property
def flags(self) :
try :
return self._flags
except Exception as e:
raise e
@property
def hostrtgwact(self) :
try :
return self._hostrtgwact
except Exception as e:
raise e
@property
def ospfareaval(self) :
try :
return self._ospfareaval
except Exception as e:
raise e
@property
def viprtadv2bsd(self) :
try :
return self._viprtadv2bsd
except Exception as e:
raise e
@property
def vipvsercount(self) :
try :
return self._vipvsercount
except Exception as e:
raise e
@property
def vipvserdowncount(self) :
try :
return self._vipvserdowncount
except Exception as e:
raise e
@property
def vipvsrvrrhiactivecount(self) :
try :
return self._vipvsrvrrhiactivecount
except Exception as e:
raise e
@property
def vipvsrvrrhiactiveupcount(self) :
try :
return self._vipvsrvrrhiactiveupcount
except Exception as e:
raise e
@property
def freeports(self) :
try :
return self._freeports
except Exception as e:
raise e
@property
def riserhimsgcode(self) :
try :
return self._riserhimsgcode
except Exception as e:
raise e
@property
def iptype(self) :
try :
return self._iptype
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
try :
result = service.payload_formatter.string_to_resource(nsip_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.nsip
except Exception as e :
raise e
def _get_object_name(self) :
try :
if self.ipaddress is not None :
return str(self.ipaddress)
return None
except Exception as e :
raise e
@classmethod
def add(cls, client, resource) :
try :
if type(resource) is not list :
addresource = nsip()
addresource.ipaddress = resource.ipaddress
addresource.netmask = resource.netmask
addresource.type = resource.type
addresource.arp = resource.arp
addresource.icmp = resource.icmp
addresource.vserver = resource.vserver
addresource.telnet = resource.telnet
addresource.ftp = resource.ftp
addresource.gui = resource.gui
addresource.ssh = resource.ssh
addresource.snmp = resource.snmp
addresource.mgmtaccess = resource.mgmtaccess
addresource.restrictaccess = resource.restrictaccess
addresource.dynamicrouting = resource.dynamicrouting
addresource.ospf = resource.ospf
addresource.bgp = resource.bgp
addresource.rip = resource.rip
addresource.hostroute = resource.hostroute
addresource.hostrtgw = resource.hostrtgw
addresource.metric = resource.metric
addresource.vserverrhilevel = resource.vserverrhilevel
addresource.vserverrhimode = resource.vserverrhimode
addresource.ospflsatype = resource.ospflsatype
addresource.ospfarea = resource.ospfarea
addresource.state = resource.state
addresource.vrid = resource.vrid
addresource.icmpresponse = resource.icmpresponse
addresource.ownernode = resource.ownernode
addresource.arpresponse = resource.arpresponse
addresource.td = resource.td
return addresource.add_resource(client)
else :
if (resource and len(resource) > 0) :
addresources = [ nsip() for _ in range(len(resource))]
for i in range(len(resource)) :
addresources[i].ipaddress = resource[i].ipaddress
addresources[i].netmask = resource[i].netmask
addresources[i].type = resource[i].type
addresources[i].arp = resource[i].arp
addresources[i].icmp = resource[i].icmp
addresources[i].vserver = resource[i].vserver
addresources[i].telnet = resource[i].telnet
addresources[i].ftp = resource[i].ftp
addresources[i].gui = resource[i].gui
addresources[i].ssh = resource[i].ssh
addresources[i].snmp = resource[i].snmp
addresources[i].mgmtaccess = resource[i].mgmtaccess
addresources[i].restrictaccess = resource[i].restrictaccess
addresources[i].dynamicrouting = resource[i].dynamicrouting
addresources[i].ospf = resource[i].ospf
addresources[i].bgp = resource[i].bgp
addresources[i].rip = resource[i].rip
addresources[i].hostroute = resource[i].hostroute
addresources[i].hostrtgw = resource[i].hostrtgw
addresources[i].metric = resource[i].metric
addresources[i].vserverrhilevel = resource[i].vserverrhilevel
addresources[i].vserverrhimode = resource[i].vserverrhimode
addresources[i].ospflsatype = resource[i].ospflsatype
addresources[i].ospfarea = resource[i].ospfarea
addresources[i].state = resource[i].state
addresources[i].vrid = resource[i].vrid
addresources[i].icmpresponse = resource[i].icmpresponse
addresources[i].ownernode = resource[i].ownernode
addresources[i].arpresponse = resource[i].arpresponse
addresources[i].td = resource[i].td
result = cls.add_bulk_request(client, addresources)
return result
except Exception as e :
raise e
@classmethod
def delete(cls, client, resource) :
try :
if type(resource) is not list :
deleteresource = nsip()
if type(resource) != type(deleteresource):
deleteresource.ipaddress = resource
else :
deleteresource.ipaddress = resource.ipaddress
deleteresource.td = resource.td
return deleteresource.delete_resource(client)
else :
if type(resource[0]) != cls :
if (resource and len(resource) > 0) :
deleteresources = [ nsip() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].ipaddress = resource[i]
else :
if (resource and len(resource) > 0) :
deleteresources = [ nsip() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].ipaddress = resource[i].ipaddress
deleteresources[i].td = resource[i].td
result = cls.delete_bulk_request(client, deleteresources)
return result
except Exception as e :
raise e
@classmethod
def update(cls, client, resource) :
try :
if type(resource) is not list :
updateresource = nsip()
updateresource.ipaddress = resource.ipaddress
updateresource.td = resource.td
updateresource.netmask = resource.netmask
updateresource.arp = resource.arp
updateresource.icmp = resource.icmp
updateresource.vserver = resource.vserver
updateresource.telnet = resource.telnet
updateresource.ftp = resource.ftp
updateresource.gui = resource.gui
updateresource.ssh = resource.ssh
updateresource.snmp = resource.snmp
updateresource.mgmtaccess = resource.mgmtaccess
updateresource.restrictaccess = resource.restrictaccess
updateresource.dynamicrouting = resource.dynamicrouting
updateresource.ospf = resource.ospf
updateresource.bgp = resource.bgp
updateresource.rip = resource.rip
updateresource.hostroute = resource.hostroute
updateresource.hostrtgw = resource.hostrtgw
updateresource.metric = resource.metric
updateresource.vserverrhilevel = resource.vserverrhilevel
updateresource.vserverrhimode = resource.vserverrhimode
updateresource.ospflsatype = resource.ospflsatype
updateresource.ospfarea = resource.ospfarea
updateresource.vrid = resource.vrid
updateresource.icmpresponse = resource.icmpresponse
updateresource.arpresponse = resource.arpresponse
return updateresource.update_resource(client)
else :
if (resource and len(resource) > 0) :
updateresources = [ nsip() for _ in range(len(resource))]
for i in range(len(resource)) :
updateresources[i].ipaddress = resource[i].ipaddress
updateresources[i].td = resource[i].td
updateresources[i].netmask = resource[i].netmask
updateresources[i].arp = resource[i].arp
updateresources[i].icmp = resource[i].icmp
updateresources[i].vserver = resource[i].vserver
updateresources[i].telnet = resource[i].telnet
updateresources[i].ftp = resource[i].ftp
updateresources[i].gui = resource[i].gui
updateresources[i].ssh = resource[i].ssh
updateresources[i].snmp = resource[i].snmp
updateresources[i].mgmtaccess = resource[i].mgmtaccess
updateresources[i].restrictaccess = resource[i].restrictaccess
updateresources[i].dynamicrouting = resource[i].dynamicrouting
updateresources[i].ospf = resource[i].ospf
updateresources[i].bgp = resource[i].bgp
updateresources[i].rip = resource[i].rip
updateresources[i].hostroute = resource[i].hostroute
updateresources[i].hostrtgw = resource[i].hostrtgw
updateresources[i].metric = resource[i].metric
updateresources[i].vserverrhilevel = resource[i].vserverrhilevel
updateresources[i].vserverrhimode = resource[i].vserverrhimode
updateresources[i].ospflsatype = resource[i].ospflsatype
updateresources[i].ospfarea = resource[i].ospfarea
updateresources[i].vrid = resource[i].vrid
updateresources[i].icmpresponse = resource[i].icmpresponse
updateresources[i].arpresponse = resource[i].arpresponse
result = cls.update_bulk_request(client, updateresources)
return result
except Exception as e :
raise e
@classmethod
def unset(cls, client, resource, args) :
try :
if type(resource) is not list :
unsetresource = nsip()
if type(resource) != type(unsetresource):
unsetresource.ipaddress = resource
else :
unsetresource.ipaddress = resource.ipaddress
unsetresource.td = resource.td
return unsetresource.unset_resource(client, args)
else :
if type(resource[0]) != cls :
if (resource and len(resource) > 0) :
unsetresources = [ nsip() for _ in range(len(resource))]
for i in range(len(resource)) :
unsetresources[i].ipaddress = resource[i]
else :
if (resource and len(resource) > 0) :
unsetresources = [ nsip() for _ in range(len(resource))]
for i in range(len(resource)) :
unsetresources[i].ipaddress = resource[i].ipaddress
unsetresources[i].td = resource[i].td
result = cls.unset_bulk_request(client, unsetresources, args)
return result
except Exception as e :
raise e
@classmethod
def enable(cls, client, resource) :
try :
if type(resource) is not list :
enableresource = nsip()
if type(resource) != type(enableresource):
enableresource.ipaddress = resource
else :
enableresource.ipaddress = resource.ipaddress
enableresource.td = resource.td
return enableresource.perform_operation(client,"enable")
else :
if type(resource[0]) != cls :
if (resource and len(resource) > 0) :
enableresources = [ nsip() for _ in range(len(resource))]
for i in range(len(resource)) :
enableresources[i].ipaddress = resource[i]
else :
if (resource and len(resource) > 0) :
enableresources = [ nsip() for _ in range(len(resource))]
for i in range(len(resource)) :
enableresources[i].ipaddress = resource[i].ipaddress
enableresources[i].td = resource[i].td
result = cls.perform_operation_bulk_request(client, enableresources,"enable")
return result
except Exception as e :
raise e
@classmethod
def disable(cls, client, resource) :
try :
if type(resource) is not list :
disableresource = nsip()
if type(resource) != type(disableresource):
disableresource.ipaddress = resource
else :
disableresource.ipaddress = resource.ipaddress
disableresource.td = resource.td
return disableresource.perform_operation(client,"disable")
else :
if type(resource[0]) != cls :
if (resource and len(resource) > 0) :
disableresources = [ nsip() for _ in range(len(resource))]
for i in range(len(resource)) :
disableresources[i].ipaddress = resource[i]
else :
if (resource and len(resource) > 0) :
disableresources = [ nsip() for _ in range(len(resource))]
for i in range(len(resource)) :
disableresources[i].ipaddress = resource[i].ipaddress
disableresources[i].td = resource[i].td
result = cls.perform_operation_bulk_request(client, disableresources,"disable")
return result
except Exception as e :
raise e
@classmethod
def get(cls, client, name="", option_="") :
try :
if not name :
obj = nsip()
response = obj.get_resources(client, option_)
else :
if type(name) == cls :
if type(name) is not list :
option_ = options()
option_.args = nitro_util.object_to_string_withoutquotes(name)
response = name.get_resource(client, option_)
else :
if name and len(name) > 0 :
response = [nsip() for _ in range(len(name))]
for i in range(len(name)) :
option_ = options()
option_.args = nitro_util.object_to_string_withoutquotes(name[i])
response[i] = name[i].get_resource(client, option_)
return response
except Exception as e :
raise e
@classmethod
def get_args(cls, client, args) :
try :
obj = nsip()
option_ = options()
option_.args = nitro_util.object_to_string_withoutquotes(args)
response = obj.get_resources(client, option_)
return response
except Exception as e :
raise e
@classmethod
def get_filtered(cls, client, filter_) :
try :
obj = nsip()
option_ = options()
option_.filter = filter_
response = obj.getfiltered(client, option_)
return response
except Exception as e :
raise e
@classmethod
def count(cls, client) :
try :
obj = nsip()
option_ = options()
option_.count = True
response = obj.get_resources(client, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e :
raise e
@classmethod
def count_filtered(cls, client, filter_) :
try :
obj = nsip()
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(client, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e :
raise e
class Arpresponse:
NONE = "NONE"
ONE_VSERVER = "ONE_VSERVER"
ALL_VSERVERS = "ALL_VSERVERS"
class Iptype:
SNIP = "SNIP"
VIP = "VIP"
NSIP = "NSIP"
GSLBsiteIP = "GSLBsiteIP"
CLIP = "CLIP"
class Ssh:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class State:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Rip:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Gui:
ENABLED = "ENABLED"
SECUREONLY = "SECUREONLY"
DISABLED = "DISABLED"
class Ospf:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Dynamicrouting:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Type:
SNIP = "SNIP"
VIP = "VIP"
NSIP = "NSIP"
GSLBsiteIP = "GSLBsiteIP"
CLIP = "CLIP"
class Ospflsatype:
TYPE1 = "TYPE1"
TYPE5 = "TYPE5"
class Bgp:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Arp:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Mgmtaccess:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Hostroute:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Ftp:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Vserverrhilevel:
ONE_VSERVER = "ONE_VSERVER"
ALL_VSERVERS = "ALL_VSERVERS"
NONE = "NONE"
VSVR_CNTRLD = "VSVR_CNTRLD"
class Icmp:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Icmpresponse:
NONE = "NONE"
ONE_VSERVER = "ONE_VSERVER"
ALL_VSERVERS = "ALL_VSERVERS"
VSVR_CNTRLD = "VSVR_CNTRLD"
class Vserver:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Snmp:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Restrictaccess:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Vserverrhimode:
DYNAMIC_ROUTING = "DYNAMIC_ROUTING"
RISE = "RISE"
class Telnet:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class nsip_response(base_response) :
def __init__(self, length=1) :
self.nsip = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.nsip = [nsip() for _ in range(length)]
| true
| true
|
1c43ec75bb5086504e75a9f3c53c197ac7943cec
| 27,444
|
py
|
Python
|
pytorch_lightning/metrics/functional/classification.py
|
rwbfd/pytorch-lightning
|
f518ee6e25d1499f73cec86ca8b3f584d0fa440d
|
[
"Apache-2.0"
] | null | null | null |
pytorch_lightning/metrics/functional/classification.py
|
rwbfd/pytorch-lightning
|
f518ee6e25d1499f73cec86ca8b3f584d0fa440d
|
[
"Apache-2.0"
] | null | null | null |
pytorch_lightning/metrics/functional/classification.py
|
rwbfd/pytorch-lightning
|
f518ee6e25d1499f73cec86ca8b3f584d0fa440d
|
[
"Apache-2.0"
] | null | null | null |
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import wraps
from typing import Callable, Optional, Sequence, Tuple
import torch
from torch.nn import functional as F
from pytorch_lightning.metrics.utils import to_categorical, get_num_classes, reduce, class_reduce
from pytorch_lightning.utilities import rank_zero_warn
def stat_scores(
pred: torch.Tensor,
target: torch.Tensor,
class_index: int, argmax_dim: int = 1,
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
"""
Calculates the number of true positive, false positive, true negative
and false negative for a specific class
Args:
pred: prediction tensor
target: target tensor
class_index: class to calculate over
argmax_dim: if pred is a tensor of probabilities, this indicates the
axis the argmax transformation will be applied over
Return:
True Positive, False Positive, True Negative, False Negative, Support
Example:
>>> x = torch.tensor([1, 2, 3])
>>> y = torch.tensor([0, 2, 3])
>>> tp, fp, tn, fn, sup = stat_scores(x, y, class_index=1)
>>> tp, fp, tn, fn, sup
(tensor(0), tensor(1), tensor(2), tensor(0), tensor(0))
"""
if pred.ndim == target.ndim + 1:
pred = to_categorical(pred, argmax_dim=argmax_dim)
tp = ((pred == class_index) * (target == class_index)).to(torch.long).sum()
fp = ((pred == class_index) * (target != class_index)).to(torch.long).sum()
tn = ((pred != class_index) * (target != class_index)).to(torch.long).sum()
fn = ((pred != class_index) * (target == class_index)).to(torch.long).sum()
sup = (target == class_index).to(torch.long).sum()
return tp, fp, tn, fn, sup
def stat_scores_multiple_classes(
pred: torch.Tensor,
target: torch.Tensor,
num_classes: Optional[int] = None,
argmax_dim: int = 1,
reduction: str = 'none',
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
"""
Calculates the number of true positive, false positive, true negative
and false negative for each class
Args:
pred: prediction tensor
target: target tensor
num_classes: number of classes if known
argmax_dim: if pred is a tensor of probabilities, this indicates the
axis the argmax transformation will be applied over
reduction: a method to reduce metric score over labels (default: none)
Available reduction methods:
- elementwise_mean: takes the mean
- none: pass array
- sum: add elements
Return:
True Positive, False Positive, True Negative, False Negative, Support
Example:
>>> x = torch.tensor([1, 2, 3])
>>> y = torch.tensor([0, 2, 3])
>>> tps, fps, tns, fns, sups = stat_scores_multiple_classes(x, y)
>>> tps
tensor([0., 0., 1., 1.])
>>> fps
tensor([0., 1., 0., 0.])
>>> tns
tensor([2., 2., 2., 2.])
>>> fns
tensor([1., 0., 0., 0.])
>>> sups
tensor([1., 0., 1., 1.])
"""
if pred.ndim == target.ndim + 1:
pred = to_categorical(pred, argmax_dim=argmax_dim)
num_classes = get_num_classes(pred=pred, target=target, num_classes=num_classes)
if pred.dtype != torch.bool:
pred = pred.clamp_max(max=num_classes)
if target.dtype != torch.bool:
target = target.clamp_max(max=num_classes)
possible_reductions = ('none', 'sum', 'elementwise_mean')
if reduction not in possible_reductions:
raise ValueError("reduction type %s not supported" % reduction)
if reduction == 'none':
pred = pred.view((-1, )).long()
target = target.view((-1, )).long()
tps = torch.zeros((num_classes + 1,), device=pred.device)
fps = torch.zeros((num_classes + 1,), device=pred.device)
tns = torch.zeros((num_classes + 1,), device=pred.device)
fns = torch.zeros((num_classes + 1,), device=pred.device)
sups = torch.zeros((num_classes + 1,), device=pred.device)
match_true = (pred == target).float()
match_false = 1 - match_true
tps.scatter_add_(0, pred, match_true)
fps.scatter_add_(0, pred, match_false)
fns.scatter_add_(0, target, match_false)
tns = pred.size(0) - (tps + fps + fns)
sups.scatter_add_(0, target, torch.ones_like(match_true))
tps = tps[:num_classes]
fps = fps[:num_classes]
tns = tns[:num_classes]
fns = fns[:num_classes]
sups = sups[:num_classes]
elif reduction == 'sum' or reduction == 'elementwise_mean':
count_match_true = (pred == target).sum().float()
oob_tp, oob_fp, oob_tn, oob_fn, oob_sup = stat_scores(pred, target, num_classes, argmax_dim)
tps = count_match_true - oob_tp
fps = pred.nelement() - count_match_true - oob_fp
fns = pred.nelement() - count_match_true - oob_fn
tns = pred.nelement() * (num_classes + 1) - (tps + fps + fns + oob_tn)
sups = pred.nelement() - oob_sup.float()
if reduction == 'elementwise_mean':
tps /= num_classes
fps /= num_classes
fns /= num_classes
tns /= num_classes
sups /= num_classes
return tps.float(), fps.float(), tns.float(), fns.float(), sups.float()
def accuracy(
pred: torch.Tensor,
target: torch.Tensor,
num_classes: Optional[int] = None,
class_reduction: str = 'micro',
return_state: bool = False
) -> torch.Tensor:
"""
Computes the accuracy classification score
Args:
pred: predicted labels
target: ground truth labels
num_classes: number of classes
class_reduction: method to reduce metric score over labels
- ``'micro'``: calculate metrics globally (default)
- ``'macro'``: calculate metrics for each label, and find their unweighted mean.
- ``'weighted'``: calculate metrics for each label, and find their weighted mean.
- ``'none'``: returns calculated metric per class
return_state: returns a internal state that can be ddp reduced
before doing the final calculation
Return:
A Tensor with the accuracy score.
Example:
>>> x = torch.tensor([0, 1, 2, 3])
>>> y = torch.tensor([0, 1, 2, 2])
>>> accuracy(x, y)
tensor(0.7500)
"""
tps, fps, tns, fns, sups = stat_scores_multiple_classes(
pred=pred, target=target, num_classes=num_classes)
if return_state:
return {'tps': tps, 'sups': sups}
return class_reduce(tps, sups, sups, class_reduction=class_reduction)
def _confmat_normalize(cm):
""" Normalization function for confusion matrix """
cm = cm / cm.sum(-1, keepdim=True)
nan_elements = cm[torch.isnan(cm)].nelement()
if nan_elements != 0:
cm[torch.isnan(cm)] = 0
rank_zero_warn(f'{nan_elements} nan values found in confusion matrix have been replaced with zeros.')
return cm
def precision_recall(
pred: torch.Tensor,
target: torch.Tensor,
num_classes: Optional[int] = None,
class_reduction: str = 'micro',
return_support: bool = False,
return_state: bool = False
) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Computes precision and recall for different thresholds
Args:
pred: estimated probabilities
target: ground-truth labels
num_classes: number of classes
class_reduction: method to reduce metric score over labels
- ``'micro'``: calculate metrics globally (default)
- ``'macro'``: calculate metrics for each label, and find their unweighted mean.
- ``'weighted'``: calculate metrics for each label, and find their weighted mean.
- ``'none'``: returns calculated metric per class
return_support: returns the support for each class, need for fbeta/f1 calculations
return_state: returns a internal state that can be ddp reduced
before doing the final calculation
Return:
Tensor with precision and recall
Example:
>>> x = torch.tensor([0, 1, 2, 3])
>>> y = torch.tensor([0, 2, 2, 2])
>>> precision_recall(x, y, class_reduction='macro')
(tensor(0.5000), tensor(0.3333))
"""
tps, fps, tns, fns, sups = stat_scores_multiple_classes(pred=pred, target=target, num_classes=num_classes)
precision = class_reduce(tps, tps + fps, sups, class_reduction=class_reduction)
recall = class_reduce(tps, tps + fns, sups, class_reduction=class_reduction)
if return_state:
return {'tps': tps, 'fps': fps, 'fns': fns, 'sups': sups}
if return_support:
return precision, recall, sups
return precision, recall
def precision(
pred: torch.Tensor,
target: torch.Tensor,
num_classes: Optional[int] = None,
class_reduction: str = 'micro',
) -> torch.Tensor:
"""
Computes precision score.
Args:
pred: estimated probabilities
target: ground-truth labels
num_classes: number of classes
class_reduction: method to reduce metric score over labels
- ``'micro'``: calculate metrics globally (default)
- ``'macro'``: calculate metrics for each label, and find their unweighted mean.
- ``'weighted'``: calculate metrics for each label, and find their weighted mean.
- ``'none'``: returns calculated metric per class
Return:
Tensor with precision.
Example:
>>> x = torch.tensor([0, 1, 2, 3])
>>> y = torch.tensor([0, 1, 2, 2])
>>> precision(x, y)
tensor(0.7500)
"""
return precision_recall(pred=pred, target=target,
num_classes=num_classes, class_reduction=class_reduction)[0]
def recall(
pred: torch.Tensor,
target: torch.Tensor,
num_classes: Optional[int] = None,
class_reduction: str = 'micro',
) -> torch.Tensor:
"""
Computes recall score.
Args:
pred: estimated probabilities
target: ground-truth labels
num_classes: number of classes
class_reduction: method to reduce metric score over labels
- ``'micro'``: calculate metrics globally (default)
- ``'macro'``: calculate metrics for each label, and find their unweighted mean.
- ``'weighted'``: calculate metrics for each label, and find their weighted mean.
- ``'none'``: returns calculated metric per class
Return:
Tensor with recall.
Example:
>>> x = torch.tensor([0, 1, 2, 3])
>>> y = torch.tensor([0, 1, 2, 2])
>>> recall(x, y)
tensor(0.7500)
"""
return precision_recall(pred=pred, target=target,
num_classes=num_classes, class_reduction=class_reduction)[1]
def _binary_clf_curve(
pred: torch.Tensor,
target: torch.Tensor,
sample_weight: Optional[Sequence] = None,
pos_label: int = 1.,
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""
adapted from https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/metrics/_ranking.py
"""
if sample_weight is not None and not isinstance(sample_weight, torch.Tensor):
sample_weight = torch.tensor(sample_weight, device=pred.device, dtype=torch.float)
# remove class dimension if necessary
if pred.ndim > target.ndim:
pred = pred[:, 0]
desc_score_indices = torch.argsort(pred, descending=True)
pred = pred[desc_score_indices]
target = target[desc_score_indices]
if sample_weight is not None:
weight = sample_weight[desc_score_indices]
else:
weight = 1.
# pred typically has many tied values. Here we extract
# the indices associated with the distinct values. We also
# concatenate a value for the end of the curve.
distinct_value_indices = torch.where(pred[1:] - pred[:-1])[0]
threshold_idxs = F.pad(distinct_value_indices, (0, 1), value=target.size(0) - 1)
target = (target == pos_label).to(torch.long)
tps = torch.cumsum(target * weight, dim=0)[threshold_idxs]
if sample_weight is not None:
# express fps as a cumsum to ensure fps is increasing even in
# the presence of floating point errors
fps = torch.cumsum((1 - target) * weight, dim=0)[threshold_idxs]
else:
fps = 1 + threshold_idxs - tps
return fps, tps, pred[threshold_idxs]
# TODO: deprecated in favor of general ROC in pytorch_lightning/metrics/functional/roc.py
def __roc(
pred: torch.Tensor,
target: torch.Tensor,
sample_weight: Optional[Sequence] = None,
pos_label: int = 1.,
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""
Computes the Receiver Operating Characteristic (ROC). It assumes classifier is binary.
.. warning:: Deprecated
Args:
pred: estimated probabilities
target: ground-truth labels
sample_weight: sample weights
pos_label: the label for the positive class
Return:
false-positive rate (fpr), true-positive rate (tpr), thresholds
Example:
>>> x = torch.tensor([0, 1, 2, 3])
>>> y = torch.tensor([0, 1, 1, 1])
>>> fpr, tpr, thresholds = __roc(x, y)
>>> fpr
tensor([0., 0., 0., 0., 1.])
>>> tpr
tensor([0.0000, 0.3333, 0.6667, 1.0000, 1.0000])
>>> thresholds
tensor([4, 3, 2, 1, 0])
"""
fps, tps, thresholds = _binary_clf_curve(pred=pred, target=target,
sample_weight=sample_weight,
pos_label=pos_label)
# Add an extra threshold position
# to make sure that the curve starts at (0, 0)
tps = torch.cat([torch.zeros(1, dtype=tps.dtype, device=tps.device), tps])
fps = torch.cat([torch.zeros(1, dtype=fps.dtype, device=fps.device), fps])
thresholds = torch.cat([thresholds[0][None] + 1, thresholds])
if fps[-1] <= 0:
raise ValueError("No negative samples in targets, false positive value should be meaningless")
fpr = fps / fps[-1]
if tps[-1] <= 0:
raise ValueError("No positive samples in targets, true positive value should be meaningless")
tpr = tps / tps[-1]
return fpr, tpr, thresholds
# TODO: deprecated in favor of general ROC in pytorch_lightning/metrics/functional/roc.py
def __multiclass_roc(
pred: torch.Tensor,
target: torch.Tensor,
sample_weight: Optional[Sequence] = None,
num_classes: Optional[int] = None,
) -> Tuple[Tuple[torch.Tensor, torch.Tensor, torch.Tensor]]:
"""
Computes the Receiver Operating Characteristic (ROC) for multiclass predictors.
.. warning:: Deprecated
Args:
pred: estimated probabilities
target: ground-truth labels
sample_weight: sample weights
num_classes: number of classes (default: None, computes automatically from data)
Return:
returns roc for each class.
Number of classes, false-positive rate (fpr), true-positive rate (tpr), thresholds
Example:
>>> pred = torch.tensor([[0.85, 0.05, 0.05, 0.05],
... [0.05, 0.85, 0.05, 0.05],
... [0.05, 0.05, 0.85, 0.05],
... [0.05, 0.05, 0.05, 0.85]])
>>> target = torch.tensor([0, 1, 3, 2])
>>> __multiclass_roc(pred, target) # doctest: +NORMALIZE_WHITESPACE
((tensor([0., 0., 1.]), tensor([0., 1., 1.]), tensor([1.8500, 0.8500, 0.0500])),
(tensor([0., 0., 1.]), tensor([0., 1., 1.]), tensor([1.8500, 0.8500, 0.0500])),
(tensor([0.0000, 0.3333, 1.0000]), tensor([0., 0., 1.]), tensor([1.8500, 0.8500, 0.0500])),
(tensor([0.0000, 0.3333, 1.0000]), tensor([0., 0., 1.]), tensor([1.8500, 0.8500, 0.0500])))
"""
num_classes = get_num_classes(pred, target, num_classes)
class_roc_vals = []
for c in range(num_classes):
pred_c = pred[:, c]
class_roc_vals.append(__roc(pred=pred_c, target=target, sample_weight=sample_weight, pos_label=c))
return tuple(class_roc_vals)
def auc(
x: torch.Tensor,
y: torch.Tensor,
) -> torch.Tensor:
"""
Computes Area Under the Curve (AUC) using the trapezoidal rule
Args:
x: x-coordinates
y: y-coordinates
Return:
Tensor containing AUC score (float)
Example:
>>> x = torch.tensor([0, 1, 2, 3])
>>> y = torch.tensor([0, 1, 2, 2])
>>> auc(x, y)
tensor(4.)
"""
dx = x[1:] - x[:-1]
if (dx < 0).any():
if (dx <= 0).all():
direction = -1.
else:
raise ValueError(f"The 'x' array is neither increasing or decreasing: {x}. Reorder is not supported.")
else:
direction = 1.
return direction * torch.trapz(y, x)
def auc_decorator() -> Callable:
def wrapper(func_to_decorate: Callable) -> Callable:
@wraps(func_to_decorate)
def new_func(*args, **kwargs) -> torch.Tensor:
x, y = func_to_decorate(*args, **kwargs)[:2]
return auc(x, y)
return new_func
return wrapper
def multiclass_auc_decorator() -> Callable:
def wrapper(func_to_decorate: Callable) -> Callable:
@wraps(func_to_decorate)
def new_func(*args, **kwargs) -> torch.Tensor:
results = []
for class_result in func_to_decorate(*args, **kwargs):
x, y = class_result[:2]
results.append(auc(x, y))
return torch.stack(results)
return new_func
return wrapper
def auroc(
pred: torch.Tensor,
target: torch.Tensor,
sample_weight: Optional[Sequence] = None,
pos_label: int = 1.,
) -> torch.Tensor:
"""
Compute Area Under the Receiver Operating Characteristic Curve (ROC AUC) from prediction scores
Args:
pred: estimated probabilities
target: ground-truth labels
sample_weight: sample weights
pos_label: the label for the positive class
Return:
Tensor containing ROCAUC score
Example:
>>> x = torch.tensor([0, 1, 2, 3])
>>> y = torch.tensor([0, 1, 1, 0])
>>> auroc(x, y)
tensor(0.5000)
"""
if any(target > 1):
raise ValueError('AUROC metric is meant for binary classification, but'
' target tensor contains value different from 0 and 1.'
' Use `multiclass_auroc` for multi class classification.')
@auc_decorator()
def _auroc(pred, target, sample_weight, pos_label):
return __roc(pred, target, sample_weight, pos_label)
return _auroc(pred=pred, target=target, sample_weight=sample_weight, pos_label=pos_label)
def multiclass_auroc(
pred: torch.Tensor,
target: torch.Tensor,
sample_weight: Optional[Sequence] = None,
num_classes: Optional[int] = None,
) -> torch.Tensor:
"""
Compute Area Under the Receiver Operating Characteristic Curve (ROC AUC) from multiclass
prediction scores
Args:
pred: estimated probabilities, with shape [N, C]
target: ground-truth labels, with shape [N,]
sample_weight: sample weights
num_classes: number of classes (default: None, computes automatically from data)
Return:
Tensor containing ROCAUC score
Example:
>>> pred = torch.tensor([[0.85, 0.05, 0.05, 0.05],
... [0.05, 0.85, 0.05, 0.05],
... [0.05, 0.05, 0.85, 0.05],
... [0.05, 0.05, 0.05, 0.85]])
>>> target = torch.tensor([0, 1, 3, 2])
>>> multiclass_auroc(pred, target, num_classes=4)
tensor(0.6667)
"""
if not torch.allclose(pred.sum(dim=1), torch.tensor(1.0)):
raise ValueError(
"Multiclass AUROC metric expects the target scores to be"
" probabilities, i.e. they should sum up to 1.0 over classes")
if torch.unique(target).size(0) != pred.size(1):
raise ValueError(
f"Number of classes found in in 'target' ({torch.unique(target).size(0)})"
f" does not equal the number of columns in 'pred' ({pred.size(1)})."
" Multiclass AUROC is not defined when all of the classes do not"
" occur in the target labels.")
if num_classes is not None and num_classes != pred.size(1):
raise ValueError(
f"Number of classes deduced from 'pred' ({pred.size(1)}) does not equal"
f" the number of classes passed in 'num_classes' ({num_classes}).")
@multiclass_auc_decorator()
def _multiclass_auroc(pred, target, sample_weight, num_classes):
return __multiclass_roc(pred, target, sample_weight, num_classes)
class_aurocs = _multiclass_auroc(pred=pred, target=target,
sample_weight=sample_weight,
num_classes=num_classes)
return torch.mean(class_aurocs)
def dice_score(
pred: torch.Tensor,
target: torch.Tensor,
bg: bool = False,
nan_score: float = 0.0,
no_fg_score: float = 0.0,
reduction: str = 'elementwise_mean',
) -> torch.Tensor:
"""
Compute dice score from prediction scores
Args:
pred: estimated probabilities
target: ground-truth labels
bg: whether to also compute dice for the background
nan_score: score to return, if a NaN occurs during computation
no_fg_score: score to return, if no foreground pixel was found in target
reduction: a method to reduce metric score over labels.
- ``'elementwise_mean'``: takes the mean (default)
- ``'sum'``: takes the sum
- ``'none'``: no reduction will be applied
Return:
Tensor containing dice score
Example:
>>> pred = torch.tensor([[0.85, 0.05, 0.05, 0.05],
... [0.05, 0.85, 0.05, 0.05],
... [0.05, 0.05, 0.85, 0.05],
... [0.05, 0.05, 0.05, 0.85]])
>>> target = torch.tensor([0, 1, 3, 2])
>>> dice_score(pred, target)
tensor(0.3333)
"""
num_classes = pred.shape[1]
bg = (1 - int(bool(bg)))
scores = torch.zeros(num_classes - bg, device=pred.device, dtype=torch.float32)
for i in range(bg, num_classes):
if not (target == i).any():
# no foreground class
scores[i - bg] += no_fg_score
continue
tp, fp, tn, fn, sup = stat_scores(pred=pred, target=target, class_index=i)
denom = (2 * tp + fp + fn).to(torch.float)
# nan result
score_cls = (2 * tp).to(torch.float) / denom if torch.is_nonzero(denom) else nan_score
scores[i - bg] += score_cls
return reduce(scores, reduction=reduction)
def iou(
pred: torch.Tensor,
target: torch.Tensor,
ignore_index: Optional[int] = None,
absent_score: float = 0.0,
num_classes: Optional[int] = None,
reduction: str = 'elementwise_mean',
) -> torch.Tensor:
"""
Intersection over union, or Jaccard index calculation.
Args:
pred: Tensor containing integer predictions, with shape [N, d1, d2, ...]
target: Tensor containing integer targets, with shape [N, d1, d2, ...]
ignore_index: optional int specifying a target class to ignore. If given, this class index does not contribute
to the returned score, regardless of reduction method. Has no effect if given an int that is not in the
range [0, num_classes-1], where num_classes is either given or derived from pred and target. By default, no
index is ignored, and all classes are used.
absent_score: score to use for an individual class, if no instances of the class index were present in
`pred` AND no instances of the class index were present in `target`. For example, if we have 3 classes,
[0, 0] for `pred`, and [0, 2] for `target`, then class 1 would be assigned the `absent_score`. Default is
0.0.
num_classes: Optionally specify the number of classes
reduction: a method to reduce metric score over labels.
- ``'elementwise_mean'``: takes the mean (default)
- ``'sum'``: takes the sum
- ``'none'``: no reduction will be applied
Return:
IoU score : Tensor containing single value if reduction is
'elementwise_mean', or number of classes if reduction is 'none'
Example:
>>> target = torch.randint(0, 2, (10, 25, 25))
>>> pred = torch.tensor(target)
>>> pred[2:5, 7:13, 9:15] = 1 - pred[2:5, 7:13, 9:15]
>>> iou(pred, target)
tensor(0.9660)
"""
if pred.size() != target.size():
raise ValueError(f"'pred' shape ({pred.size()}) must equal 'target' shape ({target.size()})")
if not torch.allclose(pred.float(), pred.int().float()):
raise ValueError("'pred' must contain integer targets.")
num_classes = get_num_classes(pred=pred, target=target, num_classes=num_classes)
tps, fps, tns, fns, sups = stat_scores_multiple_classes(pred, target, num_classes)
scores = torch.zeros(num_classes, device=pred.device, dtype=torch.float32)
for class_idx in range(num_classes):
if class_idx == ignore_index:
continue
tp = tps[class_idx]
fp = fps[class_idx]
fn = fns[class_idx]
sup = sups[class_idx]
# If this class is absent in the target (no support) AND absent in the pred (no true or false
# positives), then use the absent_score for this class.
if sup + tp + fp == 0:
scores[class_idx] = absent_score
continue
denom = tp + fp + fn
# Note that we do not need to worry about division-by-zero here since we know (sup + tp + fp != 0) from above,
# which means ((tp+fn) + tp + fp != 0), which means (2tp + fp + fn != 0). Since all vars are non-negative, we
# can conclude (tp + fp + fn > 0), meaning the denominator is non-zero for each class.
score = tp.to(torch.float) / denom
scores[class_idx] = score
# Remove the ignored class index from the scores.
if ignore_index is not None and ignore_index >= 0 and ignore_index < num_classes:
scores = torch.cat([
scores[:ignore_index],
scores[ignore_index + 1:],
])
return reduce(scores, reduction=reduction)
| 35.411613
| 119
| 0.606872
|
from functools import wraps
from typing import Callable, Optional, Sequence, Tuple
import torch
from torch.nn import functional as F
from pytorch_lightning.metrics.utils import to_categorical, get_num_classes, reduce, class_reduce
from pytorch_lightning.utilities import rank_zero_warn
def stat_scores(
pred: torch.Tensor,
target: torch.Tensor,
class_index: int, argmax_dim: int = 1,
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
if pred.ndim == target.ndim + 1:
pred = to_categorical(pred, argmax_dim=argmax_dim)
tp = ((pred == class_index) * (target == class_index)).to(torch.long).sum()
fp = ((pred == class_index) * (target != class_index)).to(torch.long).sum()
tn = ((pred != class_index) * (target != class_index)).to(torch.long).sum()
fn = ((pred != class_index) * (target == class_index)).to(torch.long).sum()
sup = (target == class_index).to(torch.long).sum()
return tp, fp, tn, fn, sup
def stat_scores_multiple_classes(
pred: torch.Tensor,
target: torch.Tensor,
num_classes: Optional[int] = None,
argmax_dim: int = 1,
reduction: str = 'none',
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
if pred.ndim == target.ndim + 1:
pred = to_categorical(pred, argmax_dim=argmax_dim)
num_classes = get_num_classes(pred=pred, target=target, num_classes=num_classes)
if pred.dtype != torch.bool:
pred = pred.clamp_max(max=num_classes)
if target.dtype != torch.bool:
target = target.clamp_max(max=num_classes)
possible_reductions = ('none', 'sum', 'elementwise_mean')
if reduction not in possible_reductions:
raise ValueError("reduction type %s not supported" % reduction)
if reduction == 'none':
pred = pred.view((-1, )).long()
target = target.view((-1, )).long()
tps = torch.zeros((num_classes + 1,), device=pred.device)
fps = torch.zeros((num_classes + 1,), device=pred.device)
tns = torch.zeros((num_classes + 1,), device=pred.device)
fns = torch.zeros((num_classes + 1,), device=pred.device)
sups = torch.zeros((num_classes + 1,), device=pred.device)
match_true = (pred == target).float()
match_false = 1 - match_true
tps.scatter_add_(0, pred, match_true)
fps.scatter_add_(0, pred, match_false)
fns.scatter_add_(0, target, match_false)
tns = pred.size(0) - (tps + fps + fns)
sups.scatter_add_(0, target, torch.ones_like(match_true))
tps = tps[:num_classes]
fps = fps[:num_classes]
tns = tns[:num_classes]
fns = fns[:num_classes]
sups = sups[:num_classes]
elif reduction == 'sum' or reduction == 'elementwise_mean':
count_match_true = (pred == target).sum().float()
oob_tp, oob_fp, oob_tn, oob_fn, oob_sup = stat_scores(pred, target, num_classes, argmax_dim)
tps = count_match_true - oob_tp
fps = pred.nelement() - count_match_true - oob_fp
fns = pred.nelement() - count_match_true - oob_fn
tns = pred.nelement() * (num_classes + 1) - (tps + fps + fns + oob_tn)
sups = pred.nelement() - oob_sup.float()
if reduction == 'elementwise_mean':
tps /= num_classes
fps /= num_classes
fns /= num_classes
tns /= num_classes
sups /= num_classes
return tps.float(), fps.float(), tns.float(), fns.float(), sups.float()
def accuracy(
pred: torch.Tensor,
target: torch.Tensor,
num_classes: Optional[int] = None,
class_reduction: str = 'micro',
return_state: bool = False
) -> torch.Tensor:
tps, fps, tns, fns, sups = stat_scores_multiple_classes(
pred=pred, target=target, num_classes=num_classes)
if return_state:
return {'tps': tps, 'sups': sups}
return class_reduce(tps, sups, sups, class_reduction=class_reduction)
def _confmat_normalize(cm):
cm = cm / cm.sum(-1, keepdim=True)
nan_elements = cm[torch.isnan(cm)].nelement()
if nan_elements != 0:
cm[torch.isnan(cm)] = 0
rank_zero_warn(f'{nan_elements} nan values found in confusion matrix have been replaced with zeros.')
return cm
def precision_recall(
pred: torch.Tensor,
target: torch.Tensor,
num_classes: Optional[int] = None,
class_reduction: str = 'micro',
return_support: bool = False,
return_state: bool = False
) -> Tuple[torch.Tensor, torch.Tensor]:
tps, fps, tns, fns, sups = stat_scores_multiple_classes(pred=pred, target=target, num_classes=num_classes)
precision = class_reduce(tps, tps + fps, sups, class_reduction=class_reduction)
recall = class_reduce(tps, tps + fns, sups, class_reduction=class_reduction)
if return_state:
return {'tps': tps, 'fps': fps, 'fns': fns, 'sups': sups}
if return_support:
return precision, recall, sups
return precision, recall
def precision(
pred: torch.Tensor,
target: torch.Tensor,
num_classes: Optional[int] = None,
class_reduction: str = 'micro',
) -> torch.Tensor:
return precision_recall(pred=pred, target=target,
num_classes=num_classes, class_reduction=class_reduction)[0]
def recall(
pred: torch.Tensor,
target: torch.Tensor,
num_classes: Optional[int] = None,
class_reduction: str = 'micro',
) -> torch.Tensor:
return precision_recall(pred=pred, target=target,
num_classes=num_classes, class_reduction=class_reduction)[1]
def _binary_clf_curve(
pred: torch.Tensor,
target: torch.Tensor,
sample_weight: Optional[Sequence] = None,
pos_label: int = 1.,
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
if sample_weight is not None and not isinstance(sample_weight, torch.Tensor):
sample_weight = torch.tensor(sample_weight, device=pred.device, dtype=torch.float)
if pred.ndim > target.ndim:
pred = pred[:, 0]
desc_score_indices = torch.argsort(pred, descending=True)
pred = pred[desc_score_indices]
target = target[desc_score_indices]
if sample_weight is not None:
weight = sample_weight[desc_score_indices]
else:
weight = 1.
distinct_value_indices = torch.where(pred[1:] - pred[:-1])[0]
threshold_idxs = F.pad(distinct_value_indices, (0, 1), value=target.size(0) - 1)
target = (target == pos_label).to(torch.long)
tps = torch.cumsum(target * weight, dim=0)[threshold_idxs]
if sample_weight is not None:
fps = torch.cumsum((1 - target) * weight, dim=0)[threshold_idxs]
else:
fps = 1 + threshold_idxs - tps
return fps, tps, pred[threshold_idxs]
def __roc(
pred: torch.Tensor,
target: torch.Tensor,
sample_weight: Optional[Sequence] = None,
pos_label: int = 1.,
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
fps, tps, thresholds = _binary_clf_curve(pred=pred, target=target,
sample_weight=sample_weight,
pos_label=pos_label)
tps = torch.cat([torch.zeros(1, dtype=tps.dtype, device=tps.device), tps])
fps = torch.cat([torch.zeros(1, dtype=fps.dtype, device=fps.device), fps])
thresholds = torch.cat([thresholds[0][None] + 1, thresholds])
if fps[-1] <= 0:
raise ValueError("No negative samples in targets, false positive value should be meaningless")
fpr = fps / fps[-1]
if tps[-1] <= 0:
raise ValueError("No positive samples in targets, true positive value should be meaningless")
tpr = tps / tps[-1]
return fpr, tpr, thresholds
def __multiclass_roc(
pred: torch.Tensor,
target: torch.Tensor,
sample_weight: Optional[Sequence] = None,
num_classes: Optional[int] = None,
) -> Tuple[Tuple[torch.Tensor, torch.Tensor, torch.Tensor]]:
num_classes = get_num_classes(pred, target, num_classes)
class_roc_vals = []
for c in range(num_classes):
pred_c = pred[:, c]
class_roc_vals.append(__roc(pred=pred_c, target=target, sample_weight=sample_weight, pos_label=c))
return tuple(class_roc_vals)
def auc(
x: torch.Tensor,
y: torch.Tensor,
) -> torch.Tensor:
dx = x[1:] - x[:-1]
if (dx < 0).any():
if (dx <= 0).all():
direction = -1.
else:
raise ValueError(f"The 'x' array is neither increasing or decreasing: {x}. Reorder is not supported.")
else:
direction = 1.
return direction * torch.trapz(y, x)
def auc_decorator() -> Callable:
def wrapper(func_to_decorate: Callable) -> Callable:
@wraps(func_to_decorate)
def new_func(*args, **kwargs) -> torch.Tensor:
x, y = func_to_decorate(*args, **kwargs)[:2]
return auc(x, y)
return new_func
return wrapper
def multiclass_auc_decorator() -> Callable:
def wrapper(func_to_decorate: Callable) -> Callable:
@wraps(func_to_decorate)
def new_func(*args, **kwargs) -> torch.Tensor:
results = []
for class_result in func_to_decorate(*args, **kwargs):
x, y = class_result[:2]
results.append(auc(x, y))
return torch.stack(results)
return new_func
return wrapper
def auroc(
pred: torch.Tensor,
target: torch.Tensor,
sample_weight: Optional[Sequence] = None,
pos_label: int = 1.,
) -> torch.Tensor:
if any(target > 1):
raise ValueError('AUROC metric is meant for binary classification, but'
' target tensor contains value different from 0 and 1.'
' Use `multiclass_auroc` for multi class classification.')
@auc_decorator()
def _auroc(pred, target, sample_weight, pos_label):
return __roc(pred, target, sample_weight, pos_label)
return _auroc(pred=pred, target=target, sample_weight=sample_weight, pos_label=pos_label)
def multiclass_auroc(
pred: torch.Tensor,
target: torch.Tensor,
sample_weight: Optional[Sequence] = None,
num_classes: Optional[int] = None,
) -> torch.Tensor:
if not torch.allclose(pred.sum(dim=1), torch.tensor(1.0)):
raise ValueError(
"Multiclass AUROC metric expects the target scores to be"
" probabilities, i.e. they should sum up to 1.0 over classes")
if torch.unique(target).size(0) != pred.size(1):
raise ValueError(
f"Number of classes found in in 'target' ({torch.unique(target).size(0)})"
f" does not equal the number of columns in 'pred' ({pred.size(1)})."
" Multiclass AUROC is not defined when all of the classes do not"
" occur in the target labels.")
if num_classes is not None and num_classes != pred.size(1):
raise ValueError(
f"Number of classes deduced from 'pred' ({pred.size(1)}) does not equal"
f" the number of classes passed in 'num_classes' ({num_classes}).")
@multiclass_auc_decorator()
def _multiclass_auroc(pred, target, sample_weight, num_classes):
return __multiclass_roc(pred, target, sample_weight, num_classes)
class_aurocs = _multiclass_auroc(pred=pred, target=target,
sample_weight=sample_weight,
num_classes=num_classes)
return torch.mean(class_aurocs)
def dice_score(
pred: torch.Tensor,
target: torch.Tensor,
bg: bool = False,
nan_score: float = 0.0,
no_fg_score: float = 0.0,
reduction: str = 'elementwise_mean',
) -> torch.Tensor:
num_classes = pred.shape[1]
bg = (1 - int(bool(bg)))
scores = torch.zeros(num_classes - bg, device=pred.device, dtype=torch.float32)
for i in range(bg, num_classes):
if not (target == i).any():
scores[i - bg] += no_fg_score
continue
tp, fp, tn, fn, sup = stat_scores(pred=pred, target=target, class_index=i)
denom = (2 * tp + fp + fn).to(torch.float)
score_cls = (2 * tp).to(torch.float) / denom if torch.is_nonzero(denom) else nan_score
scores[i - bg] += score_cls
return reduce(scores, reduction=reduction)
def iou(
pred: torch.Tensor,
target: torch.Tensor,
ignore_index: Optional[int] = None,
absent_score: float = 0.0,
num_classes: Optional[int] = None,
reduction: str = 'elementwise_mean',
) -> torch.Tensor:
if pred.size() != target.size():
raise ValueError(f"'pred' shape ({pred.size()}) must equal 'target' shape ({target.size()})")
if not torch.allclose(pred.float(), pred.int().float()):
raise ValueError("'pred' must contain integer targets.")
num_classes = get_num_classes(pred=pred, target=target, num_classes=num_classes)
tps, fps, tns, fns, sups = stat_scores_multiple_classes(pred, target, num_classes)
scores = torch.zeros(num_classes, device=pred.device, dtype=torch.float32)
for class_idx in range(num_classes):
if class_idx == ignore_index:
continue
tp = tps[class_idx]
fp = fps[class_idx]
fn = fns[class_idx]
sup = sups[class_idx]
if sup + tp + fp == 0:
scores[class_idx] = absent_score
continue
denom = tp + fp + fn
score = tp.to(torch.float) / denom
scores[class_idx] = score
if ignore_index is not None and ignore_index >= 0 and ignore_index < num_classes:
scores = torch.cat([
scores[:ignore_index],
scores[ignore_index + 1:],
])
return reduce(scores, reduction=reduction)
| true
| true
|
1c43ed7d220ae5c354a0880adcfe135d8c75bc34
| 533
|
py
|
Python
|
apps/discovery_pyre/setup.py
|
danieldUKIM/uniflex_wishrem
|
44ca1cfaafc33a83e856dbf9eaf9c1b83d0a477b
|
[
"Apache-2.0"
] | null | null | null |
apps/discovery_pyre/setup.py
|
danieldUKIM/uniflex_wishrem
|
44ca1cfaafc33a83e856dbf9eaf9c1b83d0a477b
|
[
"Apache-2.0"
] | null | null | null |
apps/discovery_pyre/setup.py
|
danieldUKIM/uniflex_wishrem
|
44ca1cfaafc33a83e856dbf9eaf9c1b83d0a477b
|
[
"Apache-2.0"
] | null | null | null |
from setuptools import setup, find_packages
def readme():
with open('README.md') as f:
return f.read()
setup(
name='uniflex_app_discovery_pyre',
version='0.1.0',
packages=find_packages(),
url='https://github.com/uniflex',
license='',
author='Piotr Gawlowicz',
author_email='gawlowicz@tu-berlin.de',
description='UniFlex PYRE Discovery Module',
long_description='Implementation of a Dynamic Discovery Module.',
keywords='wireless control',
install_requires=['pyre>=0.3'],
)
| 24.227273
| 69
| 0.679174
|
from setuptools import setup, find_packages
def readme():
with open('README.md') as f:
return f.read()
setup(
name='uniflex_app_discovery_pyre',
version='0.1.0',
packages=find_packages(),
url='https://github.com/uniflex',
license='',
author='Piotr Gawlowicz',
author_email='gawlowicz@tu-berlin.de',
description='UniFlex PYRE Discovery Module',
long_description='Implementation of a Dynamic Discovery Module.',
keywords='wireless control',
install_requires=['pyre>=0.3'],
)
| true
| true
|
1c43edf8164ff697a3643279f919165a53782629
| 3,027
|
py
|
Python
|
analysis/ShowerLLH/reco-vs-true-containment.py
|
jrbourbeau/composition
|
f8debd81b0467a6094d5ba56a5f0fc6047369d30
|
[
"MIT"
] | null | null | null |
analysis/ShowerLLH/reco-vs-true-containment.py
|
jrbourbeau/composition
|
f8debd81b0467a6094d5ba56a5f0fc6047369d30
|
[
"MIT"
] | 7
|
2017-08-29T16:20:04.000Z
|
2018-06-12T16:58:36.000Z
|
analysis/ShowerLLH/reco-vs-true-containment.py
|
jrbourbeau/composition
|
f8debd81b0467a6094d5ba56a5f0fc6047369d30
|
[
"MIT"
] | 1
|
2018-04-03T20:56:40.000Z
|
2018-04-03T20:56:40.000Z
|
#!/usr/bin/env python
import os
import sys
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import LogNorm
import argparse
import seaborn.apionly as sns
import composition.support_functions.paths as paths
from composition.support_functions.checkdir import checkdir
from composition.analysis.load_sim import load_sim
# from effective_area import getEff
from ShowerLLH_scripts.analysis.LLH_tools import *
# from LLH_tools import *
# from zfix import zfix
def histogram_2D(x, y, bins, log_counts=False, **opts):
h, xedges, yedges = np.histogram2d(x, y, bins=bins, normed=False)
h = np.rot90(h)
h = np.flipud(h)
h = np.ma.masked_where(h == 0, h)
if log_counts:
h = np.log10(h)
extent = [yedges[0], yedges[-1], xedges[0], xedges[-1]]
colormap = 'viridis'
plt.imshow(h, extent=extent, origin='lower',
interpolation='none', cmap=colormap)
# plt.xlabel('$\log_{10}(E_\mathrm{MC}/\mathrm{GeV})$')
# plt.ylabel('$\log_{10}(E_{\mathrm{ML}}/\mathrm{GeV})$')
# plt.title(r'ShowerLLH - IT73 - {} LLH bins'.format(opts['bintype']))
# plt.xlim([5, 9.5])
# plt.ylim([5, 9.5])
# cb = plt.colorbar(
# label='$\log_{10}{P(E_{\mathrm{ML}}|E_{\mathrm{MC}})}$')
# plt.plot([0, 10], [0, 10], linestyle='--', color='k')
# outfile = opts['outdir'] + '/' + \
# 'MLenergy_vs_MCenergy_{}.png'.format(opts['bintype'])
# plt.savefig(outfile)
# plt.close()
if __name__ == "__main__":
# Global variables setup for path names
mypaths = paths.Paths()
p = argparse.ArgumentParser(
description='Creates performance plots for ShowerLLH')
p.add_argument('-c', '--config', dest='config',
default='IT73',
choices=['IT73', 'IT81'],
help='Detector configuration')
p.add_argument('-o', '--outdir', dest='outdir',
default='/home/jbourbeau/public_html/figures/composition/ShowerLLH',
help='Output directory')
p.add_argument('-b', '--bintype', dest='bintype',
default='logdist',
choices=['standard', 'nozenith', 'logdist'],
help='Option for a variety of preset bin values')
p.add_argument('-n', '--numbins', dest='numbins', type=float,
default=30, help='Number of energy bins')
args = p.parse_args()
checkdir(args.outdir + '/')
opts = vars(args).copy()
# df = load_sim()
df, cut_dict = load_sim(return_cut_dict=True)
selection_mask = np.array([True] * len(df))
standard_cut_keys = ['reco_exists', 'MC_zenith',
'IceTopMaxSignalInEdge', 'IceTopMaxSignal']
for key in standard_cut_keys:
selection_mask *= cut_dict[key]
print('n_events before cuts = {}'.format(len(df)))
df = df[selection_mask]
print('n_events after cuts = {}'.format(len(df)))
MC_IT_containment = df.IceTop_FractionContainment
reco_IT_containment = df.reco_IT_containment
| 36.914634
| 87
| 0.620747
|
import os
import sys
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import LogNorm
import argparse
import seaborn.apionly as sns
import composition.support_functions.paths as paths
from composition.support_functions.checkdir import checkdir
from composition.analysis.load_sim import load_sim
from ShowerLLH_scripts.analysis.LLH_tools import *
def histogram_2D(x, y, bins, log_counts=False, **opts):
h, xedges, yedges = np.histogram2d(x, y, bins=bins, normed=False)
h = np.rot90(h)
h = np.flipud(h)
h = np.ma.masked_where(h == 0, h)
if log_counts:
h = np.log10(h)
extent = [yedges[0], yedges[-1], xedges[0], xedges[-1]]
colormap = 'viridis'
plt.imshow(h, extent=extent, origin='lower',
interpolation='none', cmap=colormap)
if __name__ == "__main__":
mypaths = paths.Paths()
p = argparse.ArgumentParser(
description='Creates performance plots for ShowerLLH')
p.add_argument('-c', '--config', dest='config',
default='IT73',
choices=['IT73', 'IT81'],
help='Detector configuration')
p.add_argument('-o', '--outdir', dest='outdir',
default='/home/jbourbeau/public_html/figures/composition/ShowerLLH',
help='Output directory')
p.add_argument('-b', '--bintype', dest='bintype',
default='logdist',
choices=['standard', 'nozenith', 'logdist'],
help='Option for a variety of preset bin values')
p.add_argument('-n', '--numbins', dest='numbins', type=float,
default=30, help='Number of energy bins')
args = p.parse_args()
checkdir(args.outdir + '/')
opts = vars(args).copy()
df, cut_dict = load_sim(return_cut_dict=True)
selection_mask = np.array([True] * len(df))
standard_cut_keys = ['reco_exists', 'MC_zenith',
'IceTopMaxSignalInEdge', 'IceTopMaxSignal']
for key in standard_cut_keys:
selection_mask *= cut_dict[key]
print('n_events before cuts = {}'.format(len(df)))
df = df[selection_mask]
print('n_events after cuts = {}'.format(len(df)))
MC_IT_containment = df.IceTop_FractionContainment
reco_IT_containment = df.reco_IT_containment
| true
| true
|
1c43ef84bcf1442ec9423ec76142e69ad5abe1c0
| 8,496
|
py
|
Python
|
moto/awslambda/responses.py
|
kitagawa-hr/moto
|
97408552a323af27d9b755e5456888c496a3739d
|
[
"Apache-2.0"
] | 1
|
2019-07-09T17:53:48.000Z
|
2019-07-09T17:53:48.000Z
|
moto/awslambda/responses.py
|
kitagawa-hr/moto
|
97408552a323af27d9b755e5456888c496a3739d
|
[
"Apache-2.0"
] | null | null | null |
moto/awslambda/responses.py
|
kitagawa-hr/moto
|
97408552a323af27d9b755e5456888c496a3739d
|
[
"Apache-2.0"
] | 1
|
2019-03-22T16:06:53.000Z
|
2019-03-22T16:06:53.000Z
|
from __future__ import unicode_literals
import json
try:
from urllib import unquote
except ImportError:
from urllib.parse import unquote
from moto.core.utils import amz_crc32, amzn_request_id, path_url
from moto.core.responses import BaseResponse
from .models import lambda_backends
class LambdaResponse(BaseResponse):
@property
def json_body(self):
"""
:return: JSON
:rtype: dict
"""
return json.loads(self.body)
@property
def lambda_backend(self):
"""
Get backend
:return: Lambda Backend
:rtype: moto.awslambda.models.LambdaBackend
"""
return lambda_backends[self.region]
def root(self, request, full_url, headers):
self.setup_class(request, full_url, headers)
if request.method == 'GET':
return self._list_functions(request, full_url, headers)
elif request.method == 'POST':
return self._create_function(request, full_url, headers)
else:
raise ValueError("Cannot handle request")
def function(self, request, full_url, headers):
self.setup_class(request, full_url, headers)
if request.method == 'GET':
return self._get_function(request, full_url, headers)
elif request.method == 'DELETE':
return self._delete_function(request, full_url, headers)
else:
raise ValueError("Cannot handle request")
def versions(self, request, full_url, headers):
self.setup_class(request, full_url, headers)
if request.method == 'GET':
# This is ListVersionByFunction
path = request.path if hasattr(request, 'path') else path_url(request.url)
function_name = path.split('/')[-2]
return self._list_versions_by_function(function_name)
elif request.method == 'POST':
return self._publish_function(request, full_url, headers)
else:
raise ValueError("Cannot handle request")
@amz_crc32
@amzn_request_id
def invoke(self, request, full_url, headers):
self.setup_class(request, full_url, headers)
if request.method == 'POST':
return self._invoke(request, full_url)
else:
raise ValueError("Cannot handle request")
@amz_crc32
@amzn_request_id
def invoke_async(self, request, full_url, headers):
self.setup_class(request, full_url, headers)
if request.method == 'POST':
return self._invoke_async(request, full_url)
else:
raise ValueError("Cannot handle request")
def tag(self, request, full_url, headers):
self.setup_class(request, full_url, headers)
if request.method == 'GET':
return self._list_tags(request, full_url)
elif request.method == 'POST':
return self._tag_resource(request, full_url)
elif request.method == 'DELETE':
return self._untag_resource(request, full_url)
else:
raise ValueError("Cannot handle {0} request".format(request.method))
def policy(self, request, full_url, headers):
if request.method == 'GET':
return self._get_policy(request, full_url, headers)
if request.method == 'POST':
return self._add_policy(request, full_url, headers)
def _add_policy(self, request, full_url, headers):
path = request.path if hasattr(request, 'path') else path_url(request.url)
function_name = path.split('/')[-2]
if self.lambda_backend.get_function(function_name):
policy = request.body.decode('utf8')
self.lambda_backend.add_policy(function_name, policy)
return 200, {}, json.dumps(dict(Statement=policy))
else:
return 404, {}, "{}"
def _get_policy(self, request, full_url, headers):
path = request.path if hasattr(request, 'path') else path_url(request.url)
function_name = path.split('/')[-2]
if self.lambda_backend.get_function(function_name):
lambda_function = self.lambda_backend.get_function(function_name)
return 200, {}, json.dumps(dict(Policy="{\"Statement\":[" + lambda_function.policy + "]}"))
else:
return 404, {}, "{}"
def _invoke(self, request, full_url):
response_headers = {}
function_name = self.path.rsplit('/', 2)[-2]
qualifier = self._get_param('qualifier')
fn = self.lambda_backend.get_function(function_name, qualifier)
if fn:
payload = fn.invoke(self.body, self.headers, response_headers)
response_headers['Content-Length'] = str(len(payload))
return 202, response_headers, payload
else:
return 404, response_headers, "{}"
def _invoke_async(self, request, full_url):
response_headers = {}
function_name = self.path.rsplit('/', 3)[-3]
fn = self.lambda_backend.get_function(function_name, None)
if fn:
payload = fn.invoke(self.body, self.headers, response_headers)
response_headers['Content-Length'] = str(len(payload))
return 202, response_headers, payload
else:
return 404, response_headers, "{}"
def _list_functions(self, request, full_url, headers):
result = {
'Functions': []
}
for fn in self.lambda_backend.list_functions():
json_data = fn.get_configuration()
result['Functions'].append(json_data)
return 200, {}, json.dumps(result)
def _list_versions_by_function(self, function_name):
result = {
'Versions': []
}
functions = self.lambda_backend.list_versions_by_function(function_name)
if functions:
for fn in functions:
json_data = fn.get_configuration()
result['Versions'].append(json_data)
return 200, {}, json.dumps(result)
def _create_function(self, request, full_url, headers):
try:
fn = self.lambda_backend.create_function(self.json_body)
except ValueError as e:
return 400, {}, json.dumps({"Error": {"Code": e.args[0], "Message": e.args[1]}})
else:
config = fn.get_configuration()
return 201, {}, json.dumps(config)
def _publish_function(self, request, full_url, headers):
function_name = self.path.rsplit('/', 2)[-2]
fn = self.lambda_backend.publish_function(function_name)
if fn:
config = fn.get_configuration()
return 201, {}, json.dumps(config)
else:
return 404, {}, "{}"
def _delete_function(self, request, full_url, headers):
function_name = self.path.rsplit('/', 1)[-1]
qualifier = self._get_param('Qualifier', None)
if self.lambda_backend.delete_function(function_name, qualifier):
return 204, {}, ""
else:
return 404, {}, "{}"
def _get_function(self, request, full_url, headers):
function_name = self.path.rsplit('/', 1)[-1]
qualifier = self._get_param('Qualifier', None)
fn = self.lambda_backend.get_function(function_name, qualifier)
if fn:
code = fn.get_code()
return 200, {}, json.dumps(code)
else:
return 404, {}, "{}"
def _get_aws_region(self, full_url):
region = self.region_regex.search(full_url)
if region:
return region.group(1)
else:
return self.default_region
def _list_tags(self, request, full_url):
function_arn = unquote(self.path.rsplit('/', 1)[-1])
fn = self.lambda_backend.get_function_by_arn(function_arn)
if fn:
return 200, {}, json.dumps({'Tags': fn.tags})
else:
return 404, {}, "{}"
def _tag_resource(self, request, full_url):
function_arn = unquote(self.path.rsplit('/', 1)[-1])
if self.lambda_backend.tag_resource(function_arn, self.json_body['Tags']):
return 200, {}, "{}"
else:
return 404, {}, "{}"
def _untag_resource(self, request, full_url):
function_arn = unquote(self.path.rsplit('/', 1)[-1])
tag_keys = self.querystring['tagKeys']
if self.lambda_backend.untag_resource(function_arn, tag_keys):
return 204, {}, "{}"
else:
return 404, {}, "{}"
| 34.819672
| 103
| 0.608639
|
from __future__ import unicode_literals
import json
try:
from urllib import unquote
except ImportError:
from urllib.parse import unquote
from moto.core.utils import amz_crc32, amzn_request_id, path_url
from moto.core.responses import BaseResponse
from .models import lambda_backends
class LambdaResponse(BaseResponse):
@property
def json_body(self):
return json.loads(self.body)
@property
def lambda_backend(self):
return lambda_backends[self.region]
def root(self, request, full_url, headers):
self.setup_class(request, full_url, headers)
if request.method == 'GET':
return self._list_functions(request, full_url, headers)
elif request.method == 'POST':
return self._create_function(request, full_url, headers)
else:
raise ValueError("Cannot handle request")
def function(self, request, full_url, headers):
self.setup_class(request, full_url, headers)
if request.method == 'GET':
return self._get_function(request, full_url, headers)
elif request.method == 'DELETE':
return self._delete_function(request, full_url, headers)
else:
raise ValueError("Cannot handle request")
def versions(self, request, full_url, headers):
self.setup_class(request, full_url, headers)
if request.method == 'GET':
path = request.path if hasattr(request, 'path') else path_url(request.url)
function_name = path.split('/')[-2]
return self._list_versions_by_function(function_name)
elif request.method == 'POST':
return self._publish_function(request, full_url, headers)
else:
raise ValueError("Cannot handle request")
@amz_crc32
@amzn_request_id
def invoke(self, request, full_url, headers):
self.setup_class(request, full_url, headers)
if request.method == 'POST':
return self._invoke(request, full_url)
else:
raise ValueError("Cannot handle request")
@amz_crc32
@amzn_request_id
def invoke_async(self, request, full_url, headers):
self.setup_class(request, full_url, headers)
if request.method == 'POST':
return self._invoke_async(request, full_url)
else:
raise ValueError("Cannot handle request")
def tag(self, request, full_url, headers):
self.setup_class(request, full_url, headers)
if request.method == 'GET':
return self._list_tags(request, full_url)
elif request.method == 'POST':
return self._tag_resource(request, full_url)
elif request.method == 'DELETE':
return self._untag_resource(request, full_url)
else:
raise ValueError("Cannot handle {0} request".format(request.method))
def policy(self, request, full_url, headers):
if request.method == 'GET':
return self._get_policy(request, full_url, headers)
if request.method == 'POST':
return self._add_policy(request, full_url, headers)
def _add_policy(self, request, full_url, headers):
path = request.path if hasattr(request, 'path') else path_url(request.url)
function_name = path.split('/')[-2]
if self.lambda_backend.get_function(function_name):
policy = request.body.decode('utf8')
self.lambda_backend.add_policy(function_name, policy)
return 200, {}, json.dumps(dict(Statement=policy))
else:
return 404, {}, "{}"
def _get_policy(self, request, full_url, headers):
path = request.path if hasattr(request, 'path') else path_url(request.url)
function_name = path.split('/')[-2]
if self.lambda_backend.get_function(function_name):
lambda_function = self.lambda_backend.get_function(function_name)
return 200, {}, json.dumps(dict(Policy="{\"Statement\":[" + lambda_function.policy + "]}"))
else:
return 404, {}, "{}"
def _invoke(self, request, full_url):
response_headers = {}
function_name = self.path.rsplit('/', 2)[-2]
qualifier = self._get_param('qualifier')
fn = self.lambda_backend.get_function(function_name, qualifier)
if fn:
payload = fn.invoke(self.body, self.headers, response_headers)
response_headers['Content-Length'] = str(len(payload))
return 202, response_headers, payload
else:
return 404, response_headers, "{}"
def _invoke_async(self, request, full_url):
response_headers = {}
function_name = self.path.rsplit('/', 3)[-3]
fn = self.lambda_backend.get_function(function_name, None)
if fn:
payload = fn.invoke(self.body, self.headers, response_headers)
response_headers['Content-Length'] = str(len(payload))
return 202, response_headers, payload
else:
return 404, response_headers, "{}"
def _list_functions(self, request, full_url, headers):
result = {
'Functions': []
}
for fn in self.lambda_backend.list_functions():
json_data = fn.get_configuration()
result['Functions'].append(json_data)
return 200, {}, json.dumps(result)
def _list_versions_by_function(self, function_name):
result = {
'Versions': []
}
functions = self.lambda_backend.list_versions_by_function(function_name)
if functions:
for fn in functions:
json_data = fn.get_configuration()
result['Versions'].append(json_data)
return 200, {}, json.dumps(result)
def _create_function(self, request, full_url, headers):
try:
fn = self.lambda_backend.create_function(self.json_body)
except ValueError as e:
return 400, {}, json.dumps({"Error": {"Code": e.args[0], "Message": e.args[1]}})
else:
config = fn.get_configuration()
return 201, {}, json.dumps(config)
def _publish_function(self, request, full_url, headers):
function_name = self.path.rsplit('/', 2)[-2]
fn = self.lambda_backend.publish_function(function_name)
if fn:
config = fn.get_configuration()
return 201, {}, json.dumps(config)
else:
return 404, {}, "{}"
def _delete_function(self, request, full_url, headers):
function_name = self.path.rsplit('/', 1)[-1]
qualifier = self._get_param('Qualifier', None)
if self.lambda_backend.delete_function(function_name, qualifier):
return 204, {}, ""
else:
return 404, {}, "{}"
def _get_function(self, request, full_url, headers):
function_name = self.path.rsplit('/', 1)[-1]
qualifier = self._get_param('Qualifier', None)
fn = self.lambda_backend.get_function(function_name, qualifier)
if fn:
code = fn.get_code()
return 200, {}, json.dumps(code)
else:
return 404, {}, "{}"
def _get_aws_region(self, full_url):
region = self.region_regex.search(full_url)
if region:
return region.group(1)
else:
return self.default_region
def _list_tags(self, request, full_url):
function_arn = unquote(self.path.rsplit('/', 1)[-1])
fn = self.lambda_backend.get_function_by_arn(function_arn)
if fn:
return 200, {}, json.dumps({'Tags': fn.tags})
else:
return 404, {}, "{}"
def _tag_resource(self, request, full_url):
function_arn = unquote(self.path.rsplit('/', 1)[-1])
if self.lambda_backend.tag_resource(function_arn, self.json_body['Tags']):
return 200, {}, "{}"
else:
return 404, {}, "{}"
def _untag_resource(self, request, full_url):
function_arn = unquote(self.path.rsplit('/', 1)[-1])
tag_keys = self.querystring['tagKeys']
if self.lambda_backend.untag_resource(function_arn, tag_keys):
return 204, {}, "{}"
else:
return 404, {}, "{}"
| true
| true
|
1c43efd4690d44c896278c222e4064eae7a1c463
| 766
|
py
|
Python
|
chalicelib/filter.py
|
uchimanajet7/reacjilator-chalice
|
338daf544432f669f9bd6e78cf91d4363d6b914f
|
[
"MIT"
] | null | null | null |
chalicelib/filter.py
|
uchimanajet7/reacjilator-chalice
|
338daf544432f669f9bd6e78cf91d4363d6b914f
|
[
"MIT"
] | 1
|
2017-12-17T09:35:24.000Z
|
2017-12-18T01:26:54.000Z
|
chalicelib/filter.py
|
uchimanajet7/reacjilator-chalice
|
338daf544432f669f9bd6e78cf91d4363d6b914f
|
[
"MIT"
] | null | null | null |
# List of channels you want to translate.
import os
import json
class Filter:
def __init__(self):
self.dict_filter = self.__open_json_file__('filter.json')
def __open_json_file__(self, file_name):
try:
dir_name = os.path.dirname(os.path.abspath(__file__))
path = os.path.join(dir_name, file_name)
with open(path) as f:
return json.load(f)
except:
return None
def is_allowed(self, name):
if self.dict_filter is None:
# all allowed
return True
if len(self.dict_filter) == 0:
# all allowed
return True
if self.dict_filter.get(name) is not None:
return True
return False
| 23.9375
| 65
| 0.571802
|
import os
import json
class Filter:
def __init__(self):
self.dict_filter = self.__open_json_file__('filter.json')
def __open_json_file__(self, file_name):
try:
dir_name = os.path.dirname(os.path.abspath(__file__))
path = os.path.join(dir_name, file_name)
with open(path) as f:
return json.load(f)
except:
return None
def is_allowed(self, name):
if self.dict_filter is None:
return True
if len(self.dict_filter) == 0:
return True
if self.dict_filter.get(name) is not None:
return True
return False
| true
| true
|
1c43f070abcedc58bf17a00a3203fb43ef6b40c7
| 20
|
py
|
Python
|
sdk/python-sdk/verity_sdk/protocols/v0_7/__init__.py
|
tw-bc-group/verity-sdk
|
e932209ab849f04a389bdda0718cd6227187e5cf
|
[
"Apache-2.0"
] | 40
|
2020-07-09T01:52:31.000Z
|
2022-02-19T04:01:23.000Z
|
sdk/python-sdk/verity_sdk/protocols/v0_7/__init__.py
|
tw-bc-group/verity-sdk
|
e932209ab849f04a389bdda0718cd6227187e5cf
|
[
"Apache-2.0"
] | 45
|
2020-06-19T11:00:20.000Z
|
2022-03-02T14:48:12.000Z
|
sdk/python-sdk/verity_sdk/protocols/v0_7/__init__.py
|
tw-bc-group/verity-sdk
|
e932209ab849f04a389bdda0718cd6227187e5cf
|
[
"Apache-2.0"
] | 37
|
2020-06-19T10:37:04.000Z
|
2022-03-15T14:06:40.000Z
|
"""0.7 Protocols"""
| 10
| 19
| 0.55
| true
| true
|
|
1c43f09b4f119c9983b09c54d4a22142b88b1195
| 3,909
|
py
|
Python
|
pong/2_pong_singlecubebouncing.py
|
CrtomirJuren/pygame-projects
|
f710f36050bfe3ece866bbda7d570caa1e037d7a
|
[
"MIT"
] | null | null | null |
pong/2_pong_singlecubebouncing.py
|
CrtomirJuren/pygame-projects
|
f710f36050bfe3ece866bbda7d570caa1e037d7a
|
[
"MIT"
] | null | null | null |
pong/2_pong_singlecubebouncing.py
|
CrtomirJuren/pygame-projects
|
f710f36050bfe3ece866bbda7d570caa1e037d7a
|
[
"MIT"
] | null | null | null |
import sys
import math
import random
import pygame
from pygame.locals import *
import tkinter as tk
from tkinter import messagebox
clock = pygame.time.Clock()
WHITE = (255,255,255)
BLACK = (0,0,0)
RED = (255,0,0)
#--------------------------draw grid fuction----------------------------
def drawGrid(w,rows,surface):
"""
This function draws a square grid on main display
"""
#distance between grid lines
sizeBtwn = w // rows
x = 0
y = 0
#create grid by drawing lines
for l in range(rows):
x = x + sizeBtwn
y = y + sizeBtwn
#vertical lines
pygame.draw.line(surface, WHITE, (x,0), (x,w))
#horizontal lines
pygame.draw.line(surface, WHITE, (0,y), (w,y))
#-------------------------cube object-----------------------------------------
class cube(object):
"""
class to create a single grid cube, that has position and movement
"""
rows = 20 #set number of rows
w = 500 #set pixel screen width
def __init__(self, start, dirnx, dirny, color = WHITE):
self.pos = start #touple (x,y)
self.dirnx = 0
self.dirny = 0
self.color = color #touple(r,g,b)
def set_direction(self,dirnx,dirny):
self.dirnx = dirnx
self.dirny = dirny
def move(self):
"""
move cube, by adding new direction to previous position
"""
self.pos = (self.pos[0]+self.dirnx, self.pos[1]+self.dirny)
def draw(self,surface):
"""
drawing: convert x,y grid position to pixel position
"""
dis = self.w // self.rows #distance between x and y values
#variables for easy coding
i = self.pos[0] # row
j = self.pos[1] # column
#draw just a little bit less, so we draw inside of the square. and we dont cover grid.
pygame.draw.rect(surface, self.color, (i*dis+1,j*dis+1,dis-2,dis-2 ))
#-----------------------------------------------------------------------------------
def redrawWindow(surface):
global rows, width
#background
surface.fill(BLACK)
#draw grid
drawGrid(width, rows, surface)
#draw ball
ball.draw(surface)
#update display
pygame.display.update()
#---------------------------------------------------------------------------------------
def key_events():
for event in pygame.event.get():
if event.type == QUIT or (event.type == KEYDOWN and event.key == K_ESCAPE):
#pygame.quit()
#sys.exit()
return True
#in pong we move in only y directions
keys = pygame.key.get_pressed()
for key in keys:
if keys[pygame.K_UP]:
self.dirny = -1
self.turns[self.head.pos[:]] = [self.dirnx, self.dirny]
elif keys[pygame.K_DOWN]:
self.dirny = 1
self.turns[self.head.pos[:]] = [self.dirnx, self.dirny]
return False
#---------------------------------------------------------------------------------------
def main():
global width, rows, ball
#---------------game initialization---------------------------
#create game display
width = 500
rows = 20
#create game objects
win = pygame.display.set_mode((width, width)) #square display
ball = cube((10,1),0,0,WHITE)
ball.set_direction(1,1) #set initial ball movement direction
FPScount = 0
#-----------------------continuous game loop-------------
GameOver = False
while not GameOver:
#pygame.time.delay(50)
clock.tick(10) #game max speed 10 FPS
GameOver = key_events()
#update ball position
ball.move()
#check next direction-------------------------------
#CONSTRAINTS X
if ball.pos[0] <= 0 or ball.pos[0] >= rows-1:
ball.dirnx = -ball.dirnx
#CONSTRAINTS Y
if ball.pos[1] <= 0 or ball.pos[1] >= rows-1:
ball.dirny = -ball.dirny
#-------------------------------------------------
#if we are moving in right direction
#print(f'rows:{rows} ball.pos[0]:{ball.pos[0]} ball.pos[1]:{ball.pos[1]}')
#FPScount += 1
#print(FPScount)
redrawWindow(win)
#---------------------------------------------------------------------------------------
#---------------------------------------------------------------------------------------
main()
pygame.quit()
sys.exit()
| 27.723404
| 88
| 0.553083
|
import sys
import math
import random
import pygame
from pygame.locals import *
import tkinter as tk
from tkinter import messagebox
clock = pygame.time.Clock()
WHITE = (255,255,255)
BLACK = (0,0,0)
RED = (255,0,0)
def drawGrid(w,rows,surface):
sizeBtwn = w // rows
x = 0
y = 0
for l in range(rows):
x = x + sizeBtwn
y = y + sizeBtwn
pygame.draw.line(surface, WHITE, (x,0), (x,w))
pygame.draw.line(surface, WHITE, (0,y), (w,y))
class cube(object):
rows = 20
w = 500
def __init__(self, start, dirnx, dirny, color = WHITE):
self.pos = start
self.dirnx = 0
self.dirny = 0
self.color = color
def set_direction(self,dirnx,dirny):
self.dirnx = dirnx
self.dirny = dirny
def move(self):
self.pos = (self.pos[0]+self.dirnx, self.pos[1]+self.dirny)
def draw(self,surface):
dis = self.w // self.rows
i = self.pos[0]
j = self.pos[1]
pygame.draw.rect(surface, self.color, (i*dis+1,j*dis+1,dis-2,dis-2 ))
def redrawWindow(surface):
global rows, width
surface.fill(BLACK)
drawGrid(width, rows, surface)
ball.draw(surface)
pygame.display.update()
def key_events():
for event in pygame.event.get():
if event.type == QUIT or (event.type == KEYDOWN and event.key == K_ESCAPE):
return True
keys = pygame.key.get_pressed()
for key in keys:
if keys[pygame.K_UP]:
self.dirny = -1
self.turns[self.head.pos[:]] = [self.dirnx, self.dirny]
elif keys[pygame.K_DOWN]:
self.dirny = 1
self.turns[self.head.pos[:]] = [self.dirnx, self.dirny]
return False
def main():
global width, rows, ball
width = 500
rows = 20
win = pygame.display.set_mode((width, width))
ball = cube((10,1),0,0,WHITE)
ball.set_direction(1,1)
FPScount = 0
GameOver = False
while not GameOver:
clock.tick(10)
GameOver = key_events()
ball.move()
if ball.pos[0] <= 0 or ball.pos[0] >= rows-1:
ball.dirnx = -ball.dirnx
if ball.pos[1] <= 0 or ball.pos[1] >= rows-1:
ball.dirny = -ball.dirny
redrawWindow(win)
main()
pygame.quit()
sys.exit()
| true
| true
|
1c43f0cb68057fe546f78196c9cc49dd1da135d3
| 6,696
|
py
|
Python
|
source/minefield.py
|
BastiHz/Minefields
|
46bb66cb3a809f6d21d7811e9a7df214be044fbd
|
[
"MIT"
] | 1
|
2021-02-22T15:32:31.000Z
|
2021-02-22T15:32:31.000Z
|
source/minefield.py
|
BastiHz/Minefields
|
46bb66cb3a809f6d21d7811e9a7df214be044fbd
|
[
"MIT"
] | null | null | null |
source/minefield.py
|
BastiHz/Minefields
|
46bb66cb3a809f6d21d7811e9a7df214be044fbd
|
[
"MIT"
] | null | null | null |
import random
import pygame as pg
import prepare
class Minefield:
def __init__(self, width, height, number_of_mines):
self.width = width
self.height = height
self.num_mines = number_of_mines
self.tiles = prepare.minefield_tiles
self.tile_size = prepare.MINEFIELD_TILE_SIZE
self.surface = pg.Surface((width * self.tile_size,
height * self.tile_size)).convert()
self.pos = (0, 0) # the pos of the minefield surface in the window
self.mouseover_tile = None
self.game_done = False
self.mines_remaining = self.num_mines
self.mines_remaining_changed = True
self.end_message = None
self.grid = set(((x, y) for x in range(self.width)
for y in range(self.height)))
self.covered = self.grid.copy()
self.mines = set(random.sample(self.grid, self.num_mines))
self.neighbors = {}
for pos in self.grid:
neighbor_list = []
for x, y in ((-1, -1), (-1, 0), (-1, 1), (0, -1),
(0, 1), (1, -1), (1, 0), (1, 1)):
neighbor = (pos[0] + x, pos[1] + y)
if neighbor in self.grid:
neighbor_list.append(neighbor)
self.neighbors[pos] = neighbor_list
self.hints = {}
for pos in self.grid:
if pos not in self.mines:
hint = 0
for neighbor in self.neighbors[pos]:
if neighbor in self.mines:
hint += 1
self.hints[pos] = hint
self.flags = set()
self.questionmarks = set()
self.exploded_mines = set()
self.wrong_flags = set()
self.refresh_surface()
def refresh_surface(self):
for pos in self.grid:
blit_pos = (pos[0] * self.tile_size,
pos[1] * self.tile_size)
if pos in self.covered:
if pos in self.flags:
self.surface.blit(self.tiles["flag"], blit_pos)
if pos in self.wrong_flags:
self.surface.blit(self.tiles["flag_wrong"], blit_pos)
elif pos in self.questionmarks:
self.surface.blit(self.tiles["questionmark"], blit_pos)
else:
self.surface.blit(self.tiles["covered"], blit_pos)
elif pos in self.mines:
if pos in self.exploded_mines:
self.surface.blit(self.tiles["mine_exploded"], blit_pos)
else:
self.surface.blit(self.tiles["mine"], blit_pos)
else:
self.surface.blit(self.tiles[self.hints[pos]], blit_pos)
def update(self, mouse_pos, left_click, right_click, double_click):
if self.game_done:
return
pos = ((mouse_pos[0] - self.pos[0]) // self.tile_size,
(mouse_pos[1] - self.pos[1]) // self.tile_size)
self.mouseover_tile = pos
if right_click and pos in self.covered:
self.set_mark(pos)
elif all((left_click,
pos in self.covered,
pos not in self.flags,
pos not in self.questionmarks)):
self.uncover(pos)
self.check_defeat()
self.check_win()
self.refresh_surface()
elif all((double_click,
pos not in self.covered,
self.hints.get(pos) != 0)):
num_flags = sum((1 for n in self.neighbors[pos] if n in self.flags))
if num_flags == self.hints[pos]:
for neighbor in self.neighbors[pos]:
if all((neighbor in self.covered,
neighbor not in self.flags,
neighbor not in self.questionmarks)):
self.uncover(neighbor)
self.check_defeat()
self.check_win()
self.refresh_surface()
def set_mark(self, pos):
if pos in self.flags:
self.flags.remove(pos)
self.questionmarks.add(pos)
elif pos in self.questionmarks:
self.questionmarks.remove(pos)
else:
self.flags.add(pos)
self.refresh_surface()
self.mines_remaining = self.num_mines - len(self.flags)
self.mines_remaining_changed = True
def uncover(self, pos):
"""Uncovers the tile at pos and all its neighbors which are not
mines, flags or questionmarks. Uses an iterative flood fill because
a recursive approach can exceed the maximum recursion depth.
"""
tiles_to_uncover = {pos}
while tiles_to_uncover:
pos = tiles_to_uncover.pop()
self.covered.remove(pos)
if (pos not in self.mines) and self.hints[pos] == 0:
for neighbor in self.neighbors[pos]:
if all((neighbor in self.covered,
neighbor not in self.flags,
neighbor not in self.questionmarks)):
tiles_to_uncover.add(neighbor)
def check_win(self):
if len(self.covered) == self.num_mines and not self.game_done:
self.game_done = True
self.end_message = "YOU WIN"
def check_defeat(self):
for pos in self.mines:
if pos not in self.covered:
self.exploded_mines.add(pos)
if self.exploded_mines:
self.game_done = True
self.end_message = "GAME OVER"
for pos in self.covered.copy():
if all((pos in self.mines,
pos not in self.flags,
pos not in self.questionmarks)):
self.covered.remove(pos)
elif (pos not in self.mines) and (pos in self.flags):
self.wrong_flags.add(pos)
def draw(self, surface):
surface.blit(self.surface, self.pos)
if self.mouseover_tile is not None:
blit_pos = (self.mouseover_tile[0] * self.tile_size + self.pos[0],
self.mouseover_tile[1] * self.tile_size + self.pos[1])
if self.mouseover_tile in self.flags:
surface.blit(self.tiles["flag_highlighted"], blit_pos)
elif self.mouseover_tile in self.questionmarks:
surface.blit(self.tiles["questionmark_highlighted"], blit_pos)
elif self.mouseover_tile in self.covered:
surface.blit(self.tiles["covered_highlighted"], blit_pos)
self.mouseover_tile = None
| 39.621302
| 80
| 0.538082
|
import random
import pygame as pg
import prepare
class Minefield:
def __init__(self, width, height, number_of_mines):
self.width = width
self.height = height
self.num_mines = number_of_mines
self.tiles = prepare.minefield_tiles
self.tile_size = prepare.MINEFIELD_TILE_SIZE
self.surface = pg.Surface((width * self.tile_size,
height * self.tile_size)).convert()
self.pos = (0, 0)
self.mouseover_tile = None
self.game_done = False
self.mines_remaining = self.num_mines
self.mines_remaining_changed = True
self.end_message = None
self.grid = set(((x, y) for x in range(self.width)
for y in range(self.height)))
self.covered = self.grid.copy()
self.mines = set(random.sample(self.grid, self.num_mines))
self.neighbors = {}
for pos in self.grid:
neighbor_list = []
for x, y in ((-1, -1), (-1, 0), (-1, 1), (0, -1),
(0, 1), (1, -1), (1, 0), (1, 1)):
neighbor = (pos[0] + x, pos[1] + y)
if neighbor in self.grid:
neighbor_list.append(neighbor)
self.neighbors[pos] = neighbor_list
self.hints = {}
for pos in self.grid:
if pos not in self.mines:
hint = 0
for neighbor in self.neighbors[pos]:
if neighbor in self.mines:
hint += 1
self.hints[pos] = hint
self.flags = set()
self.questionmarks = set()
self.exploded_mines = set()
self.wrong_flags = set()
self.refresh_surface()
def refresh_surface(self):
for pos in self.grid:
blit_pos = (pos[0] * self.tile_size,
pos[1] * self.tile_size)
if pos in self.covered:
if pos in self.flags:
self.surface.blit(self.tiles["flag"], blit_pos)
if pos in self.wrong_flags:
self.surface.blit(self.tiles["flag_wrong"], blit_pos)
elif pos in self.questionmarks:
self.surface.blit(self.tiles["questionmark"], blit_pos)
else:
self.surface.blit(self.tiles["covered"], blit_pos)
elif pos in self.mines:
if pos in self.exploded_mines:
self.surface.blit(self.tiles["mine_exploded"], blit_pos)
else:
self.surface.blit(self.tiles["mine"], blit_pos)
else:
self.surface.blit(self.tiles[self.hints[pos]], blit_pos)
def update(self, mouse_pos, left_click, right_click, double_click):
if self.game_done:
return
pos = ((mouse_pos[0] - self.pos[0]) // self.tile_size,
(mouse_pos[1] - self.pos[1]) // self.tile_size)
self.mouseover_tile = pos
if right_click and pos in self.covered:
self.set_mark(pos)
elif all((left_click,
pos in self.covered,
pos not in self.flags,
pos not in self.questionmarks)):
self.uncover(pos)
self.check_defeat()
self.check_win()
self.refresh_surface()
elif all((double_click,
pos not in self.covered,
self.hints.get(pos) != 0)):
num_flags = sum((1 for n in self.neighbors[pos] if n in self.flags))
if num_flags == self.hints[pos]:
for neighbor in self.neighbors[pos]:
if all((neighbor in self.covered,
neighbor not in self.flags,
neighbor not in self.questionmarks)):
self.uncover(neighbor)
self.check_defeat()
self.check_win()
self.refresh_surface()
def set_mark(self, pos):
if pos in self.flags:
self.flags.remove(pos)
self.questionmarks.add(pos)
elif pos in self.questionmarks:
self.questionmarks.remove(pos)
else:
self.flags.add(pos)
self.refresh_surface()
self.mines_remaining = self.num_mines - len(self.flags)
self.mines_remaining_changed = True
def uncover(self, pos):
tiles_to_uncover = {pos}
while tiles_to_uncover:
pos = tiles_to_uncover.pop()
self.covered.remove(pos)
if (pos not in self.mines) and self.hints[pos] == 0:
for neighbor in self.neighbors[pos]:
if all((neighbor in self.covered,
neighbor not in self.flags,
neighbor not in self.questionmarks)):
tiles_to_uncover.add(neighbor)
def check_win(self):
if len(self.covered) == self.num_mines and not self.game_done:
self.game_done = True
self.end_message = "YOU WIN"
def check_defeat(self):
for pos in self.mines:
if pos not in self.covered:
self.exploded_mines.add(pos)
if self.exploded_mines:
self.game_done = True
self.end_message = "GAME OVER"
for pos in self.covered.copy():
if all((pos in self.mines,
pos not in self.flags,
pos not in self.questionmarks)):
self.covered.remove(pos)
elif (pos not in self.mines) and (pos in self.flags):
self.wrong_flags.add(pos)
def draw(self, surface):
surface.blit(self.surface, self.pos)
if self.mouseover_tile is not None:
blit_pos = (self.mouseover_tile[0] * self.tile_size + self.pos[0],
self.mouseover_tile[1] * self.tile_size + self.pos[1])
if self.mouseover_tile in self.flags:
surface.blit(self.tiles["flag_highlighted"], blit_pos)
elif self.mouseover_tile in self.questionmarks:
surface.blit(self.tiles["questionmark_highlighted"], blit_pos)
elif self.mouseover_tile in self.covered:
surface.blit(self.tiles["covered_highlighted"], blit_pos)
self.mouseover_tile = None
| true
| true
|
1c43f0d5386fd740efb998b838ef3980fb5d15bf
| 7,728
|
py
|
Python
|
torchreid/models/squeezenet.py
|
qw85639229/hardest
|
ef86536dbbe1089248e34afbbb7bb513f97f58f1
|
[
"MIT"
] | 21
|
2020-10-13T01:33:31.000Z
|
2022-01-04T15:58:31.000Z
|
torchreid/models/squeezenet.py
|
qw85639229/hardest
|
ef86536dbbe1089248e34afbbb7bb513f97f58f1
|
[
"MIT"
] | 10
|
2020-11-18T07:40:22.000Z
|
2021-10-05T07:58:25.000Z
|
torchreid/models/squeezenet.py
|
qw85639229/hardest
|
ef86536dbbe1089248e34afbbb7bb513f97f58f1
|
[
"MIT"
] | 7
|
2020-11-19T08:40:27.000Z
|
2022-02-05T06:24:08.000Z
|
"""
Code source: https://github.com/pytorch/vision
"""
from __future__ import absolute_import
from __future__ import division
__all__ = [
'squeezenet1_0',
'squeezenet1_1',
'squeezenet1_0_fc512'
]
from collections import OrderedDict
import math
import torch
import torch.nn as nn
from torch.utils import model_zoo
from torch.nn import functional as F
import torch.nn.init as init
import torchvision
import torch.utils.model_zoo as model_zoo
model_urls = {
'squeezenet1_0': 'https://download.pytorch.org/models/squeezenet1_0-a815701f.pth',
'squeezenet1_1': 'https://download.pytorch.org/models/squeezenet1_1-f364aa15.pth',
}
class Fire(nn.Module):
def __init__(self, inplanes, squeeze_planes,
expand1x1_planes, expand3x3_planes):
super(Fire, self).__init__()
self.inplanes = inplanes
self.squeeze = nn.Conv2d(inplanes, squeeze_planes, kernel_size=1)
self.squeeze_activation = nn.ReLU(inplace=True)
self.expand1x1 = nn.Conv2d(squeeze_planes, expand1x1_planes,
kernel_size=1)
self.expand1x1_activation = nn.ReLU(inplace=True)
self.expand3x3 = nn.Conv2d(squeeze_planes, expand3x3_planes,
kernel_size=3, padding=1)
self.expand3x3_activation = nn.ReLU(inplace=True)
def forward(self, x):
x = self.squeeze_activation(self.squeeze(x))
return torch.cat([
self.expand1x1_activation(self.expand1x1(x)),
self.expand3x3_activation(self.expand3x3(x))
], 1)
class SqueezeNet(nn.Module):
"""SqueezeNet.
Reference:
Iandola et al. SqueezeNet: AlexNet-level accuracy with 50x fewer parameters
and< 0.5 MB model size. arXiv:1602.07360.
Public keys:
- ``squeezenet1_0``: SqueezeNet (version=1.0).
- ``squeezenet1_1``: SqueezeNet (version=1.1).
- ``squeezenet1_0_fc512``: SqueezeNet (version=1.0) + FC.
"""
def __init__(self, num_classes, loss, version=1.0, fc_dims=None, dropout_p=None, **kwargs):
super(SqueezeNet, self).__init__()
self.loss = loss
self.feature_dim = 512
if version not in [1.0, 1.1]:
raise ValueError('Unsupported SqueezeNet version {version}:'
'1.0 or 1.1 expected'.format(version=version))
if version == 1.0:
self.features = nn.Sequential(
nn.Conv2d(3, 96, kernel_size=7, stride=2),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),
Fire(96, 16, 64, 64),
Fire(128, 16, 64, 64),
Fire(128, 32, 128, 128),
nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),
Fire(256, 32, 128, 128),
Fire(256, 48, 192, 192),
Fire(384, 48, 192, 192),
Fire(384, 64, 256, 256),
nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),
Fire(512, 64, 256, 256),
)
else:
self.features = nn.Sequential(
nn.Conv2d(3, 64, kernel_size=3, stride=2),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),
Fire(64, 16, 64, 64),
Fire(128, 16, 64, 64),
nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),
Fire(128, 32, 128, 128),
Fire(256, 32, 128, 128),
nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),
Fire(256, 48, 192, 192),
Fire(384, 48, 192, 192),
Fire(384, 64, 256, 256),
Fire(512, 64, 256, 256),
)
self.global_avgpool = nn.AdaptiveAvgPool2d(1)
self.fc = self._construct_fc_layer(fc_dims, 512, dropout_p)
self.classifier = nn.Linear(self.feature_dim, num_classes)
self._init_params()
def _construct_fc_layer(self, fc_dims, input_dim, dropout_p=None):
"""Constructs fully connected layer
Args:
fc_dims (list or tuple): dimensions of fc layers, if None, no fc layers are constructed
input_dim (int): input dimension
dropout_p (float): dropout probability, if None, dropout is unused
"""
if fc_dims is None:
self.feature_dim = input_dim
return None
assert isinstance(fc_dims, (list, tuple)), 'fc_dims must be either list or tuple, but got {}'.format(type(fc_dims))
layers = []
for dim in fc_dims:
layers.append(nn.Linear(input_dim, dim))
layers.append(nn.BatchNorm1d(dim))
layers.append(nn.ReLU(inplace=True))
if dropout_p is not None:
layers.append(nn.Dropout(p=dropout_p))
input_dim = dim
self.feature_dim = fc_dims[-1]
return nn.Sequential(*layers)
def _init_params(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm1d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
def forward(self, x):
f = self.features(x)
v = self.global_avgpool(f)
v = v.view(v.size(0), -1)
if self.fc is not None:
v = self.fc(v)
if not self.training:
return v
y = self.classifier(v)
if self.loss == 'softmax':
return y
elif self.loss == 'triplet':
return y, v
else:
raise KeyError('Unsupported loss: {}'.format(self.loss))
def init_pretrained_weights(model, model_url):
"""Initializes model with pretrained weights.
Layers that don't match with pretrained layers in name or size are kept unchanged.
"""
pretrain_dict = model_zoo.load_url(model_url, map_location=None)
model_dict = model.state_dict()
pretrain_dict = {k: v for k, v in pretrain_dict.items() if k in model_dict and model_dict[k].size() == v.size()}
model_dict.update(pretrain_dict)
model.load_state_dict(model_dict)
def squeezenet1_0(num_classes, loss='softmax', pretrained=True, **kwargs):
model = SqueezeNet(
num_classes,
loss,
version=1.0,
fc_dims=None,
dropout_p=None,
**kwargs
)
if pretrained:
init_pretrained_weights(model, model_urls['squeezenet1_0'])
return model
def squeezenet1_0_fc512(num_classes, loss='softmax', pretrained=True, **kwargs):
model = SqueezeNet(
num_classes,
loss,
version=1.0,
fc_dims=[512],
dropout_p=None,
**kwargs
)
if pretrained:
init_pretrained_weights(model, model_urls['squeezenet1_0'])
return model
def squeezenet1_1(num_classes, loss='softmax', pretrained=True, **kwargs):
model = SqueezeNet(
num_classes,
loss,
version=1.1,
fc_dims=None,
dropout_p=None,
**kwargs
)
if pretrained:
init_pretrained_weights(model, model_urls['squeezenet1_1'])
return model
| 33.454545
| 123
| 0.583075
|
from __future__ import absolute_import
from __future__ import division
__all__ = [
'squeezenet1_0',
'squeezenet1_1',
'squeezenet1_0_fc512'
]
from collections import OrderedDict
import math
import torch
import torch.nn as nn
from torch.utils import model_zoo
from torch.nn import functional as F
import torch.nn.init as init
import torchvision
import torch.utils.model_zoo as model_zoo
model_urls = {
'squeezenet1_0': 'https://download.pytorch.org/models/squeezenet1_0-a815701f.pth',
'squeezenet1_1': 'https://download.pytorch.org/models/squeezenet1_1-f364aa15.pth',
}
class Fire(nn.Module):
def __init__(self, inplanes, squeeze_planes,
expand1x1_planes, expand3x3_planes):
super(Fire, self).__init__()
self.inplanes = inplanes
self.squeeze = nn.Conv2d(inplanes, squeeze_planes, kernel_size=1)
self.squeeze_activation = nn.ReLU(inplace=True)
self.expand1x1 = nn.Conv2d(squeeze_planes, expand1x1_planes,
kernel_size=1)
self.expand1x1_activation = nn.ReLU(inplace=True)
self.expand3x3 = nn.Conv2d(squeeze_planes, expand3x3_planes,
kernel_size=3, padding=1)
self.expand3x3_activation = nn.ReLU(inplace=True)
def forward(self, x):
x = self.squeeze_activation(self.squeeze(x))
return torch.cat([
self.expand1x1_activation(self.expand1x1(x)),
self.expand3x3_activation(self.expand3x3(x))
], 1)
class SqueezeNet(nn.Module):
def __init__(self, num_classes, loss, version=1.0, fc_dims=None, dropout_p=None, **kwargs):
super(SqueezeNet, self).__init__()
self.loss = loss
self.feature_dim = 512
if version not in [1.0, 1.1]:
raise ValueError('Unsupported SqueezeNet version {version}:'
'1.0 or 1.1 expected'.format(version=version))
if version == 1.0:
self.features = nn.Sequential(
nn.Conv2d(3, 96, kernel_size=7, stride=2),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),
Fire(96, 16, 64, 64),
Fire(128, 16, 64, 64),
Fire(128, 32, 128, 128),
nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),
Fire(256, 32, 128, 128),
Fire(256, 48, 192, 192),
Fire(384, 48, 192, 192),
Fire(384, 64, 256, 256),
nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),
Fire(512, 64, 256, 256),
)
else:
self.features = nn.Sequential(
nn.Conv2d(3, 64, kernel_size=3, stride=2),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),
Fire(64, 16, 64, 64),
Fire(128, 16, 64, 64),
nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),
Fire(128, 32, 128, 128),
Fire(256, 32, 128, 128),
nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),
Fire(256, 48, 192, 192),
Fire(384, 48, 192, 192),
Fire(384, 64, 256, 256),
Fire(512, 64, 256, 256),
)
self.global_avgpool = nn.AdaptiveAvgPool2d(1)
self.fc = self._construct_fc_layer(fc_dims, 512, dropout_p)
self.classifier = nn.Linear(self.feature_dim, num_classes)
self._init_params()
def _construct_fc_layer(self, fc_dims, input_dim, dropout_p=None):
if fc_dims is None:
self.feature_dim = input_dim
return None
assert isinstance(fc_dims, (list, tuple)), 'fc_dims must be either list or tuple, but got {}'.format(type(fc_dims))
layers = []
for dim in fc_dims:
layers.append(nn.Linear(input_dim, dim))
layers.append(nn.BatchNorm1d(dim))
layers.append(nn.ReLU(inplace=True))
if dropout_p is not None:
layers.append(nn.Dropout(p=dropout_p))
input_dim = dim
self.feature_dim = fc_dims[-1]
return nn.Sequential(*layers)
def _init_params(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm1d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
def forward(self, x):
f = self.features(x)
v = self.global_avgpool(f)
v = v.view(v.size(0), -1)
if self.fc is not None:
v = self.fc(v)
if not self.training:
return v
y = self.classifier(v)
if self.loss == 'softmax':
return y
elif self.loss == 'triplet':
return y, v
else:
raise KeyError('Unsupported loss: {}'.format(self.loss))
def init_pretrained_weights(model, model_url):
pretrain_dict = model_zoo.load_url(model_url, map_location=None)
model_dict = model.state_dict()
pretrain_dict = {k: v for k, v in pretrain_dict.items() if k in model_dict and model_dict[k].size() == v.size()}
model_dict.update(pretrain_dict)
model.load_state_dict(model_dict)
def squeezenet1_0(num_classes, loss='softmax', pretrained=True, **kwargs):
model = SqueezeNet(
num_classes,
loss,
version=1.0,
fc_dims=None,
dropout_p=None,
**kwargs
)
if pretrained:
init_pretrained_weights(model, model_urls['squeezenet1_0'])
return model
def squeezenet1_0_fc512(num_classes, loss='softmax', pretrained=True, **kwargs):
model = SqueezeNet(
num_classes,
loss,
version=1.0,
fc_dims=[512],
dropout_p=None,
**kwargs
)
if pretrained:
init_pretrained_weights(model, model_urls['squeezenet1_0'])
return model
def squeezenet1_1(num_classes, loss='softmax', pretrained=True, **kwargs):
model = SqueezeNet(
num_classes,
loss,
version=1.1,
fc_dims=None,
dropout_p=None,
**kwargs
)
if pretrained:
init_pretrained_weights(model, model_urls['squeezenet1_1'])
return model
| true
| true
|
1c43f0f37d381ccc50b76c9a1eb0cc96c6d62613
| 10,065
|
py
|
Python
|
DPDNet/image_to_patch_filter.py
|
Abdullah-Abuolaim/defocus-deblurring-dual-pixel
|
21a43e7d12350c62c4038485cdeebc27a078765b
|
[
"MIT"
] | 115
|
2020-05-01T22:51:14.000Z
|
2022-03-12T13:18:37.000Z
|
DPDNet/image_to_patch_filter.py
|
panpanfei/defocus-deblurring-dual-pixel
|
8c1b812236d2eb3293b670512ef35e20471e2e48
|
[
"MIT"
] | 14
|
2020-05-12T03:38:57.000Z
|
2021-06-01T15:02:04.000Z
|
DPDNet/image_to_patch_filter.py
|
Abdullah-Abuolaim/defocus-deblurring-dual-pixel
|
21a43e7d12350c62c4038485cdeebc27a078765b
|
[
"MIT"
] | 13
|
2020-06-28T08:25:09.000Z
|
2022-02-28T16:10:46.000Z
|
"""
This code is used to extract image patches from the training and validation
sets as described in the paper. For the training set patches, we discard 30%
of the patches that have the lowest sharpness energy. Recall that we don't
extract patches for test images because we process full image at test time.
Copyright (c) 2020-present, Abdullah Abuolaim
This source code is licensed under the license found in the LICENSE file in
the root directory of this source tree.
Note: this code is the implementation of the "Defocus Deblurring Using Dual-
Pixel Data" paper accepted to ECCV 2020. Link to GitHub repository:
https://github.com/Abdullah-Abuolaim/defocus-deblurring-dual-pixel
Email: abuolaim@eecs.yorku.ca
"""
import numpy as np
import os
import cv2
import errno
from copy import deepcopy
def check_create_directory(path_to_check):
if not os.path.exists(path_to_check):
try:
os.makedirs(path_to_check)
except OSError as exc: # Guard against race condition
if exc.errno != errno.EEXIST:
raise
def shapness_measure(img_temp,kernel_size):
conv_x = cv2.Sobel(img_temp,cv2.CV_64F,1,0,ksize=kernel_size)
conv_y = cv2.Sobel(img_temp,cv2.CV_64F,0,1,ksize=kernel_size)
temp_arr_x=deepcopy(conv_x*conv_x)
temp_arr_y=deepcopy(conv_y*conv_y)
temp_sum_x_y=temp_arr_x+temp_arr_y
temp_sum_x_y=np.sqrt(temp_sum_x_y)
return np.sum(temp_sum_x_y)
def filter_patch_sharpness(patches_src_c_temp, patches_trg_c_temp, patches_src_l_temp, patches_src_r_temp):
global patches_src_c, patches_trg_c, patches_src_l, patches_src_r
fitnessVal_3=[]
fitnessVal_7=[]
fitnessVal_11=[]
fitnessVal_15=[]
num_of_img_patches=len(patches_trg_c_temp)
for i in range(num_of_img_patches):
fitnessVal_3.append(shapness_measure(cv2.cvtColor(patches_trg_c_temp[i], cv2.COLOR_BGR2GRAY),3))
fitnessVal_7.append(shapness_measure(cv2.cvtColor(patches_trg_c_temp[i], cv2.COLOR_BGR2GRAY),7))
fitnessVal_11.append(shapness_measure(cv2.cvtColor(patches_trg_c_temp[i], cv2.COLOR_BGR2GRAY),11))
fitnessVal_15.append(shapness_measure(cv2.cvtColor(patches_trg_c_temp[i], cv2.COLOR_BGR2GRAY),15))
fitnessVal_3=np.asarray(fitnessVal_3)
fitnessVal_7=np.asarray(fitnessVal_7)
fitnessVal_11=np.asarray(fitnessVal_11)
fitnessVal_15=np.asarray(fitnessVal_15)
fitnessVal_3=(fitnessVal_3-np.min(fitnessVal_3))/np.max((fitnessVal_3-np.min(fitnessVal_3)))
fitnessVal_7=(fitnessVal_7-np.min(fitnessVal_7))/np.max((fitnessVal_7-np.min(fitnessVal_7)))
fitnessVal_11=(fitnessVal_11-np.min(fitnessVal_11))/np.max((fitnessVal_11-np.min(fitnessVal_11)))
fitnessVal_15=(fitnessVal_15-np.min(fitnessVal_15))/np.max((fitnessVal_15-np.min(fitnessVal_15)))
fitnessVal_all=fitnessVal_3*fitnessVal_7*fitnessVal_11*fitnessVal_15
to_remove_patches_number=int(to_remove_ratio*num_of_img_patches)
for itr in range(to_remove_patches_number):
minArrInd=np.argmin(fitnessVal_all)
fitnessVal_all[minArrInd]=2
for itr in range(num_of_img_patches):
if fitnessVal_all[itr]!=2:
patches_src_c.append(patches_src_c_temp[itr])
patches_trg_c.append(patches_trg_c_temp[itr])
patches_src_l.append(patches_src_l_temp[itr])
patches_src_r.append(patches_src_r_temp[itr])
def slice_stride(_img_src_c, _img_trg_c, _img_src_l, _img_src_r):
global set_type, patch_size, stride, patches_src_c, patches_trg_c, patches_src_l, patches_src_r
coordinates_list=[]
coordinates_list.append([0,0,0,0])
patches_src_c_temp, patches_trg_c_temp, patches_src_l_temp, patches_src_r_temp = [], [], [], []
for r in range(0,_img_src_c.shape[0],stride[0]):
for c in range(0,_img_src_c.shape[1],stride[1]):
if (r+patch_size[0]) <= _img_src_c.shape[0] and (c+patch_size[1]) <= _img_src_c.shape[1]:
patches_src_c_temp.append(_img_src_c[r:r+patch_size[0],c:c+patch_size[1]])
patches_trg_c_temp.append(_img_trg_c[r:r+patch_size[0],c:c+patch_size[1]])
patches_src_l_temp.append(_img_src_l[r:r+patch_size[0],c:c+patch_size[1]])
patches_src_r_temp.append(_img_src_r[r:r+patch_size[0],c:c+patch_size[1]])
elif (r+patch_size[0]) <= _img_src_c.shape[0] and not ([r,r+patch_size[0],_img_src_c.shape[1]-patch_size[1],_img_src_c.shape[1]] in coordinates_list):
patches_src_c_temp.append(_img_src_c[r:r+patch_size[0],_img_src_c.shape[1]-patch_size[1]:_img_src_c.shape[1]])
patches_trg_c_temp.append(_img_trg_c[r:r+patch_size[0],_img_trg_c.shape[1]-patch_size[1]:_img_trg_c.shape[1]])
patches_src_l_temp.append(_img_src_l[r:r+patch_size[0],_img_src_l.shape[1]-patch_size[1]:_img_src_l.shape[1]])
patches_src_r_temp.append(_img_src_r[r:r+patch_size[0],_img_src_r.shape[1]-patch_size[1]:_img_src_r.shape[1]])
coordinates_list.append([r,r+patch_size[0],_img_src_c.shape[1]-patch_size[1],_img_src_c.shape[1]])
elif (c+patch_size[1]) <= _img_src_c.shape[1] and not ([_img_src_c.shape[0]-patch_size[0],_img_src_c.shape[0],c,c+patch_size[1]] in coordinates_list):
patches_src_c_temp.append(_img_src_c[_img_src_c.shape[0]-patch_size[0]:_img_src_c.shape[0],c:c+patch_size[1]])
patches_trg_c_temp.append(_img_trg_c[_img_trg_c.shape[0]-patch_size[0]:_img_trg_c.shape[0],c:c+patch_size[1]])
patches_src_l_temp.append(_img_src_l[_img_src_l.shape[0]-patch_size[0]:_img_src_l.shape[0],c:c+patch_size[1]])
patches_src_r_temp.append(_img_src_r[_img_src_r.shape[0]-patch_size[0]:_img_src_r.shape[0],c:c+patch_size[1]])
coordinates_list.append([_img_src_c.shape[0]-patch_size[0],_img_src_c.shape[0],c,c+patch_size[1]])
elif not ([_img_src_c.shape[0]-patch_size[0],_img_src_c.shape[0],_img_src_c.shape[1]-patch_size[1],_img_src_c.shape[1]] in coordinates_list):
patches_src_c_temp.append(_img_src_c[_img_src_c.shape[0]-patch_size[0]:_img_src_c.shape[0],_img_src_c.shape[1]-patch_size[1]:_img_src_c.shape[1]])
patches_trg_c_temp.append(_img_trg_c[_img_trg_c.shape[0]-patch_size[0]:_img_trg_c.shape[0],_img_trg_c.shape[1]-patch_size[1]:_img_trg_c.shape[1]])
patches_src_l_temp.append(_img_src_l[_img_src_l.shape[0]-patch_size[0]:_img_src_l.shape[0],_img_src_l.shape[1]-patch_size[1]:_img_src_l.shape[1]])
patches_src_r_temp.append(_img_src_r[_img_src_r.shape[0]-patch_size[0]:_img_src_r.shape[0],_img_src_r.shape[1]-patch_size[1]:_img_src_r.shape[1]])
coordinates_list.append([_img_src_c.shape[0]-patch_size[0],_img_src_c.shape[0],_img_src_c.shape[1]-patch_size[1],_img_src_c.shape[1]])
if set_type == 'train':
filter_patch_sharpness(patches_src_c_temp, patches_trg_c_temp, patches_src_l_temp, patches_src_r_temp)
else:
patches_src_c, patches_trg_c, patches_src_l, patches_src_r = deepcopy(patches_src_c_temp), deepcopy(patches_trg_c_temp), deepcopy(patches_src_l_temp), deepcopy(patches_src_r_temp)
set_type_arr=['train','val']
img_ex='.png'
sub_folder=['source/','target/']
dataset='./dd_dp_dataset_canon/'
# color flag used to select the reading image mode in opencv. 0:graysca,
# 1:rgb 8bits, -1:read image as it including its bit depth
color_flag=-1
patch_size=[512, 512]
to_remove_ratio=0.3 # discard 30% of the patches
for set_type in set_type_arr:
print('Image to patch of ',set_type,'set has started...')
if set_type == 'train':
# patch settings
patch_overlap_ratio=0.6
stride=[int((1-patch_overlap_ratio)*patch_size[0]),int((1-patch_overlap_ratio)*patch_size[1])]
else:
# patch settings
patch_overlap_ratio=0.01
stride=[int((1-patch_overlap_ratio)*patch_size[0]),int((1-patch_overlap_ratio)*patch_size[1])]
# pathes to write extracted patches
path_write_c= './dd_dp_dataset_canon_patch/'+set_type+'_c/'
path_write_l= './dd_dp_dataset_canon_patch/'+set_type+'_l/'
path_write_r= './dd_dp_dataset_canon_patch/'+set_type+'_r/'
# to check if directory exist, otherwise create one
check_create_directory(path_write_c+sub_folder[0])
check_create_directory(path_write_c+sub_folder[1])
check_create_directory(path_write_l+sub_folder[0])
check_create_directory(path_write_r+sub_folder[0])
# load image filenames
images_src=np.load('./file_names/'+set_type+'_src.npy')
images_trg=np.load('./file_names/'+set_type+'_trg.npy')
# set counter
img_patch_count=0
data_ims_size=len(images_src)
for i in range(data_ims_size):
patches_src_c=[]
patches_trg_c=[]
patches_src_l=[]
patches_src_r=[]
img_src_c=cv2.imread(dataset+set_type+'_c/'+sub_folder[0]+images_src[i]+img_ex,color_flag)
img_trg_c=cv2.imread(dataset+set_type+'_c/'+sub_folder[1]+images_trg[i]+img_ex,color_flag)
print(dataset+set_type+'_c/'+sub_folder[0]+images_src[i]+img_ex)
img_src_l=cv2.imread(dataset+set_type+'_l/'+sub_folder[0]+images_src[i]+'_L'+img_ex,color_flag)
img_src_r=cv2.imread(dataset+set_type+'_r/'+sub_folder[0]+images_src[i]+'_R'+img_ex,color_flag)
slice_stride(img_src_c, img_trg_c, img_src_l, img_src_r)
for j in range(len(patches_src_c)):
cv2.imwrite(path_write_c+sub_folder[0]+str(img_patch_count).zfill(5)+img_ex,(patches_src_c[j]).astype(np.uint16))
cv2.imwrite(path_write_c+sub_folder[1]+str(img_patch_count).zfill(5)+img_ex,(patches_trg_c[j]).astype(np.uint16))
cv2.imwrite(path_write_l+sub_folder[0]+str(img_patch_count).zfill(5)+img_ex,(patches_src_l[j]).astype(np.uint16))
cv2.imwrite(path_write_r+sub_folder[0]+str(img_patch_count).zfill(5)+img_ex,(patches_src_r[j]).astype(np.uint16))
img_patch_count+=1
print(set_type+': ',i,j,img_patch_count)
| 56.544944
| 187
| 0.719424
|
import numpy as np
import os
import cv2
import errno
from copy import deepcopy
def check_create_directory(path_to_check):
if not os.path.exists(path_to_check):
try:
os.makedirs(path_to_check)
except OSError as exc:
if exc.errno != errno.EEXIST:
raise
def shapness_measure(img_temp,kernel_size):
conv_x = cv2.Sobel(img_temp,cv2.CV_64F,1,0,ksize=kernel_size)
conv_y = cv2.Sobel(img_temp,cv2.CV_64F,0,1,ksize=kernel_size)
temp_arr_x=deepcopy(conv_x*conv_x)
temp_arr_y=deepcopy(conv_y*conv_y)
temp_sum_x_y=temp_arr_x+temp_arr_y
temp_sum_x_y=np.sqrt(temp_sum_x_y)
return np.sum(temp_sum_x_y)
def filter_patch_sharpness(patches_src_c_temp, patches_trg_c_temp, patches_src_l_temp, patches_src_r_temp):
global patches_src_c, patches_trg_c, patches_src_l, patches_src_r
fitnessVal_3=[]
fitnessVal_7=[]
fitnessVal_11=[]
fitnessVal_15=[]
num_of_img_patches=len(patches_trg_c_temp)
for i in range(num_of_img_patches):
fitnessVal_3.append(shapness_measure(cv2.cvtColor(patches_trg_c_temp[i], cv2.COLOR_BGR2GRAY),3))
fitnessVal_7.append(shapness_measure(cv2.cvtColor(patches_trg_c_temp[i], cv2.COLOR_BGR2GRAY),7))
fitnessVal_11.append(shapness_measure(cv2.cvtColor(patches_trg_c_temp[i], cv2.COLOR_BGR2GRAY),11))
fitnessVal_15.append(shapness_measure(cv2.cvtColor(patches_trg_c_temp[i], cv2.COLOR_BGR2GRAY),15))
fitnessVal_3=np.asarray(fitnessVal_3)
fitnessVal_7=np.asarray(fitnessVal_7)
fitnessVal_11=np.asarray(fitnessVal_11)
fitnessVal_15=np.asarray(fitnessVal_15)
fitnessVal_3=(fitnessVal_3-np.min(fitnessVal_3))/np.max((fitnessVal_3-np.min(fitnessVal_3)))
fitnessVal_7=(fitnessVal_7-np.min(fitnessVal_7))/np.max((fitnessVal_7-np.min(fitnessVal_7)))
fitnessVal_11=(fitnessVal_11-np.min(fitnessVal_11))/np.max((fitnessVal_11-np.min(fitnessVal_11)))
fitnessVal_15=(fitnessVal_15-np.min(fitnessVal_15))/np.max((fitnessVal_15-np.min(fitnessVal_15)))
fitnessVal_all=fitnessVal_3*fitnessVal_7*fitnessVal_11*fitnessVal_15
to_remove_patches_number=int(to_remove_ratio*num_of_img_patches)
for itr in range(to_remove_patches_number):
minArrInd=np.argmin(fitnessVal_all)
fitnessVal_all[minArrInd]=2
for itr in range(num_of_img_patches):
if fitnessVal_all[itr]!=2:
patches_src_c.append(patches_src_c_temp[itr])
patches_trg_c.append(patches_trg_c_temp[itr])
patches_src_l.append(patches_src_l_temp[itr])
patches_src_r.append(patches_src_r_temp[itr])
def slice_stride(_img_src_c, _img_trg_c, _img_src_l, _img_src_r):
global set_type, patch_size, stride, patches_src_c, patches_trg_c, patches_src_l, patches_src_r
coordinates_list=[]
coordinates_list.append([0,0,0,0])
patches_src_c_temp, patches_trg_c_temp, patches_src_l_temp, patches_src_r_temp = [], [], [], []
for r in range(0,_img_src_c.shape[0],stride[0]):
for c in range(0,_img_src_c.shape[1],stride[1]):
if (r+patch_size[0]) <= _img_src_c.shape[0] and (c+patch_size[1]) <= _img_src_c.shape[1]:
patches_src_c_temp.append(_img_src_c[r:r+patch_size[0],c:c+patch_size[1]])
patches_trg_c_temp.append(_img_trg_c[r:r+patch_size[0],c:c+patch_size[1]])
patches_src_l_temp.append(_img_src_l[r:r+patch_size[0],c:c+patch_size[1]])
patches_src_r_temp.append(_img_src_r[r:r+patch_size[0],c:c+patch_size[1]])
elif (r+patch_size[0]) <= _img_src_c.shape[0] and not ([r,r+patch_size[0],_img_src_c.shape[1]-patch_size[1],_img_src_c.shape[1]] in coordinates_list):
patches_src_c_temp.append(_img_src_c[r:r+patch_size[0],_img_src_c.shape[1]-patch_size[1]:_img_src_c.shape[1]])
patches_trg_c_temp.append(_img_trg_c[r:r+patch_size[0],_img_trg_c.shape[1]-patch_size[1]:_img_trg_c.shape[1]])
patches_src_l_temp.append(_img_src_l[r:r+patch_size[0],_img_src_l.shape[1]-patch_size[1]:_img_src_l.shape[1]])
patches_src_r_temp.append(_img_src_r[r:r+patch_size[0],_img_src_r.shape[1]-patch_size[1]:_img_src_r.shape[1]])
coordinates_list.append([r,r+patch_size[0],_img_src_c.shape[1]-patch_size[1],_img_src_c.shape[1]])
elif (c+patch_size[1]) <= _img_src_c.shape[1] and not ([_img_src_c.shape[0]-patch_size[0],_img_src_c.shape[0],c,c+patch_size[1]] in coordinates_list):
patches_src_c_temp.append(_img_src_c[_img_src_c.shape[0]-patch_size[0]:_img_src_c.shape[0],c:c+patch_size[1]])
patches_trg_c_temp.append(_img_trg_c[_img_trg_c.shape[0]-patch_size[0]:_img_trg_c.shape[0],c:c+patch_size[1]])
patches_src_l_temp.append(_img_src_l[_img_src_l.shape[0]-patch_size[0]:_img_src_l.shape[0],c:c+patch_size[1]])
patches_src_r_temp.append(_img_src_r[_img_src_r.shape[0]-patch_size[0]:_img_src_r.shape[0],c:c+patch_size[1]])
coordinates_list.append([_img_src_c.shape[0]-patch_size[0],_img_src_c.shape[0],c,c+patch_size[1]])
elif not ([_img_src_c.shape[0]-patch_size[0],_img_src_c.shape[0],_img_src_c.shape[1]-patch_size[1],_img_src_c.shape[1]] in coordinates_list):
patches_src_c_temp.append(_img_src_c[_img_src_c.shape[0]-patch_size[0]:_img_src_c.shape[0],_img_src_c.shape[1]-patch_size[1]:_img_src_c.shape[1]])
patches_trg_c_temp.append(_img_trg_c[_img_trg_c.shape[0]-patch_size[0]:_img_trg_c.shape[0],_img_trg_c.shape[1]-patch_size[1]:_img_trg_c.shape[1]])
patches_src_l_temp.append(_img_src_l[_img_src_l.shape[0]-patch_size[0]:_img_src_l.shape[0],_img_src_l.shape[1]-patch_size[1]:_img_src_l.shape[1]])
patches_src_r_temp.append(_img_src_r[_img_src_r.shape[0]-patch_size[0]:_img_src_r.shape[0],_img_src_r.shape[1]-patch_size[1]:_img_src_r.shape[1]])
coordinates_list.append([_img_src_c.shape[0]-patch_size[0],_img_src_c.shape[0],_img_src_c.shape[1]-patch_size[1],_img_src_c.shape[1]])
if set_type == 'train':
filter_patch_sharpness(patches_src_c_temp, patches_trg_c_temp, patches_src_l_temp, patches_src_r_temp)
else:
patches_src_c, patches_trg_c, patches_src_l, patches_src_r = deepcopy(patches_src_c_temp), deepcopy(patches_trg_c_temp), deepcopy(patches_src_l_temp), deepcopy(patches_src_r_temp)
set_type_arr=['train','val']
img_ex='.png'
sub_folder=['source/','target/']
dataset='./dd_dp_dataset_canon/'
color_flag=-1
patch_size=[512, 512]
to_remove_ratio=0.3
for set_type in set_type_arr:
print('Image to patch of ',set_type,'set has started...')
if set_type == 'train':
patch_overlap_ratio=0.6
stride=[int((1-patch_overlap_ratio)*patch_size[0]),int((1-patch_overlap_ratio)*patch_size[1])]
else:
patch_overlap_ratio=0.01
stride=[int((1-patch_overlap_ratio)*patch_size[0]),int((1-patch_overlap_ratio)*patch_size[1])]
path_write_c= './dd_dp_dataset_canon_patch/'+set_type+'_c/'
path_write_l= './dd_dp_dataset_canon_patch/'+set_type+'_l/'
path_write_r= './dd_dp_dataset_canon_patch/'+set_type+'_r/'
check_create_directory(path_write_c+sub_folder[0])
check_create_directory(path_write_c+sub_folder[1])
check_create_directory(path_write_l+sub_folder[0])
check_create_directory(path_write_r+sub_folder[0])
images_src=np.load('./file_names/'+set_type+'_src.npy')
images_trg=np.load('./file_names/'+set_type+'_trg.npy')
img_patch_count=0
data_ims_size=len(images_src)
for i in range(data_ims_size):
patches_src_c=[]
patches_trg_c=[]
patches_src_l=[]
patches_src_r=[]
img_src_c=cv2.imread(dataset+set_type+'_c/'+sub_folder[0]+images_src[i]+img_ex,color_flag)
img_trg_c=cv2.imread(dataset+set_type+'_c/'+sub_folder[1]+images_trg[i]+img_ex,color_flag)
print(dataset+set_type+'_c/'+sub_folder[0]+images_src[i]+img_ex)
img_src_l=cv2.imread(dataset+set_type+'_l/'+sub_folder[0]+images_src[i]+'_L'+img_ex,color_flag)
img_src_r=cv2.imread(dataset+set_type+'_r/'+sub_folder[0]+images_src[i]+'_R'+img_ex,color_flag)
slice_stride(img_src_c, img_trg_c, img_src_l, img_src_r)
for j in range(len(patches_src_c)):
cv2.imwrite(path_write_c+sub_folder[0]+str(img_patch_count).zfill(5)+img_ex,(patches_src_c[j]).astype(np.uint16))
cv2.imwrite(path_write_c+sub_folder[1]+str(img_patch_count).zfill(5)+img_ex,(patches_trg_c[j]).astype(np.uint16))
cv2.imwrite(path_write_l+sub_folder[0]+str(img_patch_count).zfill(5)+img_ex,(patches_src_l[j]).astype(np.uint16))
cv2.imwrite(path_write_r+sub_folder[0]+str(img_patch_count).zfill(5)+img_ex,(patches_src_r[j]).astype(np.uint16))
img_patch_count+=1
print(set_type+': ',i,j,img_patch_count)
| true
| true
|
1c43f2f1279c5e8aee0def23c69b53b4bb131a33
| 556
|
py
|
Python
|
examples/server/asgi/simple.py
|
13g10n/python-engineio
|
e882f5949bdd1618d97b0cade18a7e8af8670b41
|
[
"MIT"
] | 208
|
2015-06-22T00:44:53.000Z
|
2022-02-13T16:39:14.000Z
|
examples/server/asgi/simple.py
|
13g10n/python-engineio
|
e882f5949bdd1618d97b0cade18a7e8af8670b41
|
[
"MIT"
] | 241
|
2015-08-12T06:15:40.000Z
|
2022-03-18T19:17:46.000Z
|
examples/server/asgi/simple.py
|
13g10n/python-engineio
|
e882f5949bdd1618d97b0cade18a7e8af8670b41
|
[
"MIT"
] | 153
|
2015-08-08T15:40:45.000Z
|
2022-03-29T14:26:32.000Z
|
import os
import uvicorn
import engineio
eio = engineio.AsyncServer(async_mode='asgi')
app = engineio.ASGIApp(eio, static_files={
'/': 'simple.html',
'/static': 'static',
})
@eio.on('connect')
def connect(sid, environ):
print("connect ", sid)
@eio.on('message')
async def message(sid, data):
print('message from', sid, data)
await eio.send(sid, 'Thank you for your message!')
@eio.on('disconnect')
def disconnect(sid):
print('disconnect ', sid)
if __name__ == '__main__':
uvicorn.run(app, host='127.0.0.1', port=5000)
| 17.935484
| 54
| 0.656475
|
import os
import uvicorn
import engineio
eio = engineio.AsyncServer(async_mode='asgi')
app = engineio.ASGIApp(eio, static_files={
'/': 'simple.html',
'/static': 'static',
})
@eio.on('connect')
def connect(sid, environ):
print("connect ", sid)
@eio.on('message')
async def message(sid, data):
print('message from', sid, data)
await eio.send(sid, 'Thank you for your message!')
@eio.on('disconnect')
def disconnect(sid):
print('disconnect ', sid)
if __name__ == '__main__':
uvicorn.run(app, host='127.0.0.1', port=5000)
| true
| true
|
1c43f3b270474db102bfa0c81625d0a5e1cecaa3
| 2,053
|
py
|
Python
|
budgetportal/tests/test_guides_pages.py
|
TomaszKolek/datamanager
|
d46dbab00e30a14fc26eb9368c32dcdbbda7309d
|
[
"MIT"
] | null | null | null |
budgetportal/tests/test_guides_pages.py
|
TomaszKolek/datamanager
|
d46dbab00e30a14fc26eb9368c32dcdbbda7309d
|
[
"MIT"
] | null | null | null |
budgetportal/tests/test_guides_pages.py
|
TomaszKolek/datamanager
|
d46dbab00e30a14fc26eb9368c32dcdbbda7309d
|
[
"MIT"
] | null | null | null |
from django.core.files.images import ImageFile
from django.test import Client, TestCase
from budgetportal.models import GuideIndexPage, GuidePage, CategoryGuide
class GuideIndexPageTestCase(TestCase):
fixtures = ["test-guides-pages"]
def setUp(self):
self.guide_index_page = GuideIndexPage.objects.get(id=4)
self.guide_page = GuidePage.objects.get(id=5)
self.category_guides = CategoryGuide.objects.all()
def test_guide_index_page(self):
"""Simple test of template response for guide index page"""
response = Client().get(self.guide_index_page.url_path)
self.assertContains(response, self.guide_index_page.title)
self.assertContains(response, self.guide_index_page.intro)
self.assertGreaterEqual(self.category_guides.count(), 1)
for category_guide in self.category_guides:
self.assertContains(response, category_guide.external_url_title)
self.assertContains(response, category_guide.external_url_description)
class GuidePagesTestCase(TestCase):
fixtures = ["test-guides-pages"]
def setUp(self):
self.guide_page = GuidePage.objects.get(id=5)
def test_guide_page(self):
"""Simple test of template response for guide page"""
with open("budgetportal/tests/test_data/photo.jpg", "rb") as file:
self.guide_page.image.file = ImageFile(file, "photo.jpg")
self.guide_page.image.save()
response = Client().get(self.guide_page.url_path)
self.assertContains(response, self.guide_page.title)
self.assertIsNotNone(self.guide_page.image)
## Verify the integration of our configuration, django-storages and wagtail
## - that is - the generated image URL templated in matches the configuration of the site
self.assertContains(
response,
"http://minio:9000/budgetportal-storage/images/photo.max-320x200.jpg",
)
for body_part in self.guide_page.body:
self.assertContains(response, body_part)
| 40.254902
| 97
| 0.702874
|
from django.core.files.images import ImageFile
from django.test import Client, TestCase
from budgetportal.models import GuideIndexPage, GuidePage, CategoryGuide
class GuideIndexPageTestCase(TestCase):
fixtures = ["test-guides-pages"]
def setUp(self):
self.guide_index_page = GuideIndexPage.objects.get(id=4)
self.guide_page = GuidePage.objects.get(id=5)
self.category_guides = CategoryGuide.objects.all()
def test_guide_index_page(self):
response = Client().get(self.guide_index_page.url_path)
self.assertContains(response, self.guide_index_page.title)
self.assertContains(response, self.guide_index_page.intro)
self.assertGreaterEqual(self.category_guides.count(), 1)
for category_guide in self.category_guides:
self.assertContains(response, category_guide.external_url_title)
self.assertContains(response, category_guide.external_url_description)
class GuidePagesTestCase(TestCase):
fixtures = ["test-guides-pages"]
def setUp(self):
self.guide_page = GuidePage.objects.get(id=5)
def test_guide_page(self):
with open("budgetportal/tests/test_data/photo.jpg", "rb") as file:
self.guide_page.image.file = ImageFile(file, "photo.jpg")
self.guide_page.image.save()
response = Client().get(self.guide_page.url_path)
self.assertContains(response, self.guide_page.title)
self.assertIsNotNone(self.guide_page.image)
for body_part in self.guide_page.body:
self.assertContains(response, body_part)
| true
| true
|
1c43f415944cc54adda6eed157b9ba14a13830c1
| 948
|
py
|
Python
|
lupdate_xml.py
|
Skycoder42/QtMvvmSettingsCore
|
4489151d3e7de940790c5a93041c7381799f695a
|
[
"BSD-3-Clause"
] | null | null | null |
lupdate_xml.py
|
Skycoder42/QtMvvmSettingsCore
|
4489151d3e7de940790c5a93041c7381799f695a
|
[
"BSD-3-Clause"
] | null | null | null |
lupdate_xml.py
|
Skycoder42/QtMvvmSettingsCore
|
4489151d3e7de940790c5a93041c7381799f695a
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python3
# Usage: lupdate_xml.py bindir srcdir locales(space seperated) xml_sources...
import sys
import os
import subprocess
from xml.etree.ElementTree import Element, parse
bindir = sys.argv[1]
srcdir = sys.argv[2]
srces = sys.argv[3:]
os.chdir(srcdir)
tsmap = {}
for src in srces:
trstrings = set()
tree = parse(src)
root = Element("TS")
for elem in tree.iter():
if elem.tag == "SearchKey":
trstrings.add(elem.text)
else:
if "title" in elem.attrib:
trstrings.add(elem.attrib["title"])
if "tooltip" in elem.attrib:
trstrings.add(elem.attrib["tooltip"])
tsmap[os.path.basename(src)] = trstrings
outfile = open(".qtmvvm_settings_xml_ts.cppdummy", "w")
outfile.write("#include <QCoreApplication>\n\n")
outfile.write("void dummyfn() {\n")
for src in tsmap:
for str in tsmap[src]:
outfile.write("\tQCoreApplication::translate(\"{}\", \"{}\");\n".format(src, str))
outfile.write("}\n")
outfile.close()
| 23.7
| 84
| 0.690928
|
import sys
import os
import subprocess
from xml.etree.ElementTree import Element, parse
bindir = sys.argv[1]
srcdir = sys.argv[2]
srces = sys.argv[3:]
os.chdir(srcdir)
tsmap = {}
for src in srces:
trstrings = set()
tree = parse(src)
root = Element("TS")
for elem in tree.iter():
if elem.tag == "SearchKey":
trstrings.add(elem.text)
else:
if "title" in elem.attrib:
trstrings.add(elem.attrib["title"])
if "tooltip" in elem.attrib:
trstrings.add(elem.attrib["tooltip"])
tsmap[os.path.basename(src)] = trstrings
outfile = open(".qtmvvm_settings_xml_ts.cppdummy", "w")
outfile.write("#include <QCoreApplication>\n\n")
outfile.write("void dummyfn() {\n")
for src in tsmap:
for str in tsmap[src]:
outfile.write("\tQCoreApplication::translate(\"{}\", \"{}\");\n".format(src, str))
outfile.write("}\n")
outfile.close()
| true
| true
|
1c43f45217488b9d1f345b843dcd9e4b6f84640c
| 990
|
py
|
Python
|
6.py
|
andy0130tw/advent-of-code-2019
|
aeaeb50db3170e619aef41756ce0608793a64baa
|
[
"Unlicense"
] | null | null | null |
6.py
|
andy0130tw/advent-of-code-2019
|
aeaeb50db3170e619aef41756ce0608793a64baa
|
[
"Unlicense"
] | null | null | null |
6.py
|
andy0130tw/advent-of-code-2019
|
aeaeb50db3170e619aef41756ce0608793a64baa
|
[
"Unlicense"
] | null | null | null |
def rec_sum(root, depth):
ans = depth
for el in root.values():
ans += rec_sum(el, depth + 1)
return ans
def find_path(root, target):
for lab, sub in root.items():
if lab == target:
return [lab]
res = find_path(sub, target)
if res:
return [lab, *res]
return None
def task1(tree):
print(rec_sum(tree, 0))
def task2(tree):
pa = find_path(tree, 'SAN')
pb = find_path(tree, 'YOU')
lca_depth = 0
for ea, eb in zip(pa, pb):
if ea != eb:
break
lca_depth += 1
print(len(pa) + len(pb) - lca_depth * 2 - 2)
if __name__ == '__main__':
tree = {}
while 1:
try:
par, sub = input().strip().split(')')
except EOFError:
break
if par not in tree:
tree[par] = {}
if sub not in tree:
tree[sub] = {}
tree[par][sub] = tree[sub]
# task1(tree['COM'])
task2(tree['COM'])
| 19.038462
| 49
| 0.489899
|
def rec_sum(root, depth):
ans = depth
for el in root.values():
ans += rec_sum(el, depth + 1)
return ans
def find_path(root, target):
for lab, sub in root.items():
if lab == target:
return [lab]
res = find_path(sub, target)
if res:
return [lab, *res]
return None
def task1(tree):
print(rec_sum(tree, 0))
def task2(tree):
pa = find_path(tree, 'SAN')
pb = find_path(tree, 'YOU')
lca_depth = 0
for ea, eb in zip(pa, pb):
if ea != eb:
break
lca_depth += 1
print(len(pa) + len(pb) - lca_depth * 2 - 2)
if __name__ == '__main__':
tree = {}
while 1:
try:
par, sub = input().strip().split(')')
except EOFError:
break
if par not in tree:
tree[par] = {}
if sub not in tree:
tree[sub] = {}
tree[par][sub] = tree[sub]
task2(tree['COM'])
| true
| true
|
1c43f58337d7879e5d18e9e1149c4866747fbd4d
| 926
|
py
|
Python
|
src/partition_set_into_equal_sum.py
|
redfast00/daily-algorithm-challenge
|
3507164d5ec58abe68a6e820120625e100dee96c
|
[
"MIT"
] | null | null | null |
src/partition_set_into_equal_sum.py
|
redfast00/daily-algorithm-challenge
|
3507164d5ec58abe68a6e820120625e100dee96c
|
[
"MIT"
] | null | null | null |
src/partition_set_into_equal_sum.py
|
redfast00/daily-algorithm-challenge
|
3507164d5ec58abe68a6e820120625e100dee96c
|
[
"MIT"
] | null | null | null |
from collections import Counter
from get_subset_sum import subset_sum
def partition_into_equal_parts(l):
'''Partitions s into two subsets of l that have the same sum.
>>> problem = [15, 5, 20, 10, 35, 25, 10]
>>> first, second = partition_into_equal_parts(problem)
>>> valid_solution(first, second, problem)
True
'''
total = sum(l)
# If sum is odd, there is no way that total = sum(first) + sum(second) = 2 * sum(first)
if total % 2:
return
first = subset_sum(total // 2, l)
if first is None:
return
second = []
# Fill second with items from counter
second_counter = Counter(l) - Counter(first)
for number, amount in second_counter.items():
second.extend([number] * amount)
return first, second
def valid_solution(first, second, problem):
return sum(first) == sum(second) and Counter(first) + Counter(second) == Counter(problem)
| 30.866667
| 93
| 0.654428
|
from collections import Counter
from get_subset_sum import subset_sum
def partition_into_equal_parts(l):
total = sum(l)
if total % 2:
return
first = subset_sum(total // 2, l)
if first is None:
return
second = []
second_counter = Counter(l) - Counter(first)
for number, amount in second_counter.items():
second.extend([number] * amount)
return first, second
def valid_solution(first, second, problem):
return sum(first) == sum(second) and Counter(first) + Counter(second) == Counter(problem)
| true
| true
|
1c43f593ad0ffdb5320aa7b3fb8d314b549d0517
| 1,804
|
py
|
Python
|
colorize_sky.py
|
kcotar/Stellar_abudance_trees
|
1a4377ef53a4b4c8df1be860598a70be31626110
|
[
"MIT"
] | null | null | null |
colorize_sky.py
|
kcotar/Stellar_abudance_trees
|
1a4377ef53a4b4c8df1be860598a70be31626110
|
[
"MIT"
] | null | null | null |
colorize_sky.py
|
kcotar/Stellar_abudance_trees
|
1a4377ef53a4b4c8df1be860598a70be31626110
|
[
"MIT"
] | null | null | null |
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.basemap import Basemap
def _prepare_ra_dec(data):
ra = data['ra']
idx_trans = ra > 180
if len(idx_trans) > 0:
ra[idx_trans] -= 360
ra = np.deg2rad(ra)
dec = np.deg2rad(data['dec'])
return ra, dec
def plot_ra_dec_locations(data, path='sky_pos.png'):
# plt.subplot(111, projection='mollweide')
# ra, dec = _prepare_ra_dec(data)
# plt.scatter(ra, dec, lw=0, c='black', s=0.4)
# plt.grid(True)
# plt.colorbar()
# plt.tight_layout()
# plt.savefig(path, dpi=500)
# plt.close()
plt.figure()
map = Basemap(projection='moll', lon_0=0)
map.drawparallels(np.arange(-90., 95., 5.))
map.drawmeridians(np.arange(0., 365., 5.))
ra, dec = _prepare_ra_dec(data)
map.scatter(ra, dec, lw=0, c='black', s=0.4)
ax = plt.gca()
ax.set_xlim((np.min(ra), np.max(ra)))
ax.set_ylim((np.min(dec), np.max(dec)))
plt.tight_layout()
plt.savefig(path, dpi=250)
plt.close()
def plot_ra_dec_attribute(data, attribute, path='sky_pos_attribute.png'):
# plt.subplot(111, projection='mollweide')
# ra, dec = _prepare_ra_dec(data)
# plt.scatter(ra, dec, lw=0, c=data[attribute], s=0.4)
# plt.grid(True)
# plt.colorbar()
# plt.tight_layout()
# plt.show()
# plt.savefig(path, dpi=500)
# plt.close()
plt.figure()
map = Basemap(projection='moll', lon_0=0)
map.drawparallels(np.arange(-90., 95., 5.))
map.drawmeridians(np.arange(0., 365., 5.))
ra, dec = _prepare_ra_dec(data)
map.scatter(ra, dec, lw=0, c=data[attribute], s=2)
ax = plt.gca()
ax.set_xlim((np.min(ra), np.max(ra)))
ax.set_ylim((np.min(dec), np.max(dec)))
plt.tight_layout()
plt.savefig(path, dpi=250)
plt.close()
| 30.066667
| 73
| 0.616962
|
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.basemap import Basemap
def _prepare_ra_dec(data):
ra = data['ra']
idx_trans = ra > 180
if len(idx_trans) > 0:
ra[idx_trans] -= 360
ra = np.deg2rad(ra)
dec = np.deg2rad(data['dec'])
return ra, dec
def plot_ra_dec_locations(data, path='sky_pos.png'):
plt.figure()
map = Basemap(projection='moll', lon_0=0)
map.drawparallels(np.arange(-90., 95., 5.))
map.drawmeridians(np.arange(0., 365., 5.))
ra, dec = _prepare_ra_dec(data)
map.scatter(ra, dec, lw=0, c='black', s=0.4)
ax = plt.gca()
ax.set_xlim((np.min(ra), np.max(ra)))
ax.set_ylim((np.min(dec), np.max(dec)))
plt.tight_layout()
plt.savefig(path, dpi=250)
plt.close()
def plot_ra_dec_attribute(data, attribute, path='sky_pos_attribute.png'):
plt.figure()
map = Basemap(projection='moll', lon_0=0)
map.drawparallels(np.arange(-90., 95., 5.))
map.drawmeridians(np.arange(0., 365., 5.))
ra, dec = _prepare_ra_dec(data)
map.scatter(ra, dec, lw=0, c=data[attribute], s=2)
ax = plt.gca()
ax.set_xlim((np.min(ra), np.max(ra)))
ax.set_ylim((np.min(dec), np.max(dec)))
plt.tight_layout()
plt.savefig(path, dpi=250)
plt.close()
| true
| true
|
1c43f730fff18adef3c514b8dfe4a98cff45a408
| 4,124
|
py
|
Python
|
test/coreneuron/test_spikes.py
|
ishandutta2007/nrn
|
418d42fb7afc0ebb06138b80e511c8ae716dcad0
|
[
"BSD-3-Clause"
] | null | null | null |
test/coreneuron/test_spikes.py
|
ishandutta2007/nrn
|
418d42fb7afc0ebb06138b80e511c8ae716dcad0
|
[
"BSD-3-Clause"
] | null | null | null |
test/coreneuron/test_spikes.py
|
ishandutta2007/nrn
|
418d42fb7afc0ebb06138b80e511c8ae716dcad0
|
[
"BSD-3-Clause"
] | null | null | null |
import distutils.util
import os
import sys
# Hacky, but it's non-trivial to pass commandline arguments to pytest tests.
enable_gpu = bool(
distutils.util.strtobool(os.environ.get("CORENRN_ENABLE_GPU", "false"))
)
mpi4py_option = bool(
distutils.util.strtobool(os.environ.get("NRN_TEST_SPIKES_MPI4PY", "false"))
)
file_mode_option = bool(
distutils.util.strtobool(os.environ.get("NRN_TEST_SPIKES_FILE_MODE", "false"))
)
nrnmpi_init_option = bool(
distutils.util.strtobool(os.environ.get("NRN_TEST_SPIKES_NRNMPI_INIT", "false"))
)
# following at top level and early enough avoids...
# *** The MPI_Iprobe() function was called after MPI_FINALIZE was invoked.
# mpi4py needs to be imported before importing h
if mpi4py_option:
from mpi4py import MPI
from neuron import h, gui
# without mpi4py we need to call nrnmpi_init explicitly
elif nrnmpi_init_option:
from neuron import h, gui
h.nrnmpi_init()
# otherwise serial execution
else:
from neuron import h, gui
import pytest
import sys
import traceback
def test_spikes(
use_mpi4py=mpi4py_option,
use_nrnmpi_init=nrnmpi_init_option,
file_mode=file_mode_option,
):
print(
"test_spikes(use_mpi4py={}, use_nrnmpi_init={}, file_mode={})".format(
use_mpi4py, use_nrnmpi_init, file_mode
)
)
h("""create soma""")
h.soma.L = 5.6419
h.soma.diam = 5.6419
h.soma.insert("hh")
h.soma.nseg = 3
ic = h.IClamp(h.soma(0.25))
ic.delay = 0.1
ic.dur = 0.1
ic.amp = 0.3
ic2 = h.IClamp(h.soma(0.75))
ic2.delay = 5.5
ic2.dur = 1
ic2.amp = 0.3
h.tstop = 10
h.cvode.use_fast_imem(1)
h.cvode.cache_efficient(1)
pc = h.ParallelContext()
pc.set_gid2node(pc.id() + 1, pc.id())
myobj = h.NetCon(h.soma(0.5)._ref_v, None, sec=h.soma)
pc.cell(pc.id() + 1, myobj)
# NEURON run
nrn_spike_t = h.Vector()
nrn_spike_gids = h.Vector()
# rank 0 record spikes for all gid while others
# for specific gid. this is for better test coverage.
pc.spike_record(-1 if pc.id() == 0 else (pc.id() + 1), nrn_spike_t, nrn_spike_gids)
h.run()
nrn_spike_t = nrn_spike_t.to_python()
nrn_spike_gids = nrn_spike_gids.to_python()
# CORENEURON run
from neuron import coreneuron
coreneuron.enable = True
coreneuron.gpu = enable_gpu
coreneuron.file_mode = file_mode
coreneuron.verbose = 0
corenrn_all_spike_t = h.Vector()
corenrn_all_spike_gids = h.Vector()
pc.spike_record(-1, corenrn_all_spike_t, corenrn_all_spike_gids)
pc.set_maxstep(10)
def run(mode):
h.stdinit()
if mode == 0:
pc.psolve(h.tstop)
elif mode == 1:
while h.t < h.tstop:
pc.psolve(h.t + 1.0)
else:
while h.t < h.tstop:
h.continuerun(h.t + 0.5)
pc.psolve(h.t + 0.5)
corenrn_all_spike_t_py = corenrn_all_spike_t.to_python()
corenrn_all_spike_gids_py = corenrn_all_spike_gids.to_python()
# check spikes match
assert len(nrn_spike_t) # check we've actually got spikes
assert len(nrn_spike_t) == len(nrn_spike_gids) # matching no. of gids
if nrn_spike_t != corenrn_all_spike_t_py:
print(mode)
print(nrn_spike_t)
print(nrn_spike_gids)
print(corenrn_all_spike_t_py)
print(corenrn_all_spike_gids_py)
print(
[
corenrn_all_spike_t[i] - nrn_spike_t[i]
for i in range(len(nrn_spike_t))
]
)
assert nrn_spike_t == corenrn_all_spike_t_py
assert nrn_spike_gids == corenrn_all_spike_gids_py
if file_mode is False:
for mode in [0, 1, 2]:
run(mode)
else:
run(0)
return h
if __name__ == "__main__":
try:
h = test_spikes()
except:
traceback.print_exc()
# Make the CTest test fail
sys.exit(42)
if mpi4py_option or nrnmpi_init_option:
pc = h.ParallelContext()
pc.barrier()
h.quit()
| 26.606452
| 87
| 0.629243
|
import distutils.util
import os
import sys
enable_gpu = bool(
distutils.util.strtobool(os.environ.get("CORENRN_ENABLE_GPU", "false"))
)
mpi4py_option = bool(
distutils.util.strtobool(os.environ.get("NRN_TEST_SPIKES_MPI4PY", "false"))
)
file_mode_option = bool(
distutils.util.strtobool(os.environ.get("NRN_TEST_SPIKES_FILE_MODE", "false"))
)
nrnmpi_init_option = bool(
distutils.util.strtobool(os.environ.get("NRN_TEST_SPIKES_NRNMPI_INIT", "false"))
)
# following at top level and early enough avoids...
# *** The MPI_Iprobe() function was called after MPI_FINALIZE was invoked.
# mpi4py needs to be imported before importing h
if mpi4py_option:
from mpi4py import MPI
from neuron import h, gui
# without mpi4py we need to call nrnmpi_init explicitly
elif nrnmpi_init_option:
from neuron import h, gui
h.nrnmpi_init()
# otherwise serial execution
else:
from neuron import h, gui
import pytest
import sys
import traceback
def test_spikes(
use_mpi4py=mpi4py_option,
use_nrnmpi_init=nrnmpi_init_option,
file_mode=file_mode_option,
):
print(
"test_spikes(use_mpi4py={}, use_nrnmpi_init={}, file_mode={})".format(
use_mpi4py, use_nrnmpi_init, file_mode
)
)
h("""create soma""")
h.soma.L = 5.6419
h.soma.diam = 5.6419
h.soma.insert("hh")
h.soma.nseg = 3
ic = h.IClamp(h.soma(0.25))
ic.delay = 0.1
ic.dur = 0.1
ic.amp = 0.3
ic2 = h.IClamp(h.soma(0.75))
ic2.delay = 5.5
ic2.dur = 1
ic2.amp = 0.3
h.tstop = 10
h.cvode.use_fast_imem(1)
h.cvode.cache_efficient(1)
pc = h.ParallelContext()
pc.set_gid2node(pc.id() + 1, pc.id())
myobj = h.NetCon(h.soma(0.5)._ref_v, None, sec=h.soma)
pc.cell(pc.id() + 1, myobj)
# NEURON run
nrn_spike_t = h.Vector()
nrn_spike_gids = h.Vector()
# rank 0 record spikes for all gid while others
# for specific gid. this is for better test coverage.
pc.spike_record(-1 if pc.id() == 0 else (pc.id() + 1), nrn_spike_t, nrn_spike_gids)
h.run()
nrn_spike_t = nrn_spike_t.to_python()
nrn_spike_gids = nrn_spike_gids.to_python()
# CORENEURON run
from neuron import coreneuron
coreneuron.enable = True
coreneuron.gpu = enable_gpu
coreneuron.file_mode = file_mode
coreneuron.verbose = 0
corenrn_all_spike_t = h.Vector()
corenrn_all_spike_gids = h.Vector()
pc.spike_record(-1, corenrn_all_spike_t, corenrn_all_spike_gids)
pc.set_maxstep(10)
def run(mode):
h.stdinit()
if mode == 0:
pc.psolve(h.tstop)
elif mode == 1:
while h.t < h.tstop:
pc.psolve(h.t + 1.0)
else:
while h.t < h.tstop:
h.continuerun(h.t + 0.5)
pc.psolve(h.t + 0.5)
corenrn_all_spike_t_py = corenrn_all_spike_t.to_python()
corenrn_all_spike_gids_py = corenrn_all_spike_gids.to_python()
# check spikes match
assert len(nrn_spike_t) # check we've actually got spikes
assert len(nrn_spike_t) == len(nrn_spike_gids)
if nrn_spike_t != corenrn_all_spike_t_py:
print(mode)
print(nrn_spike_t)
print(nrn_spike_gids)
print(corenrn_all_spike_t_py)
print(corenrn_all_spike_gids_py)
print(
[
corenrn_all_spike_t[i] - nrn_spike_t[i]
for i in range(len(nrn_spike_t))
]
)
assert nrn_spike_t == corenrn_all_spike_t_py
assert nrn_spike_gids == corenrn_all_spike_gids_py
if file_mode is False:
for mode in [0, 1, 2]:
run(mode)
else:
run(0)
return h
if __name__ == "__main__":
try:
h = test_spikes()
except:
traceback.print_exc()
sys.exit(42)
if mpi4py_option or nrnmpi_init_option:
pc = h.ParallelContext()
pc.barrier()
h.quit()
| true
| true
|
1c43f74e70b164c0121e3a9b4edda8f51bbb7dec
| 984
|
py
|
Python
|
python_Project/Day_16-20/Day_16-20_Sort&Search_Algorithms/Cocktail_sort.py
|
Zzz-ww/Python-prac
|
c97f2c16b74a2c1df117f377a072811cc596f98b
|
[
"MIT"
] | null | null | null |
python_Project/Day_16-20/Day_16-20_Sort&Search_Algorithms/Cocktail_sort.py
|
Zzz-ww/Python-prac
|
c97f2c16b74a2c1df117f377a072811cc596f98b
|
[
"MIT"
] | null | null | null |
python_Project/Day_16-20/Day_16-20_Sort&Search_Algorithms/Cocktail_sort.py
|
Zzz-ww/Python-prac
|
c97f2c16b74a2c1df117f377a072811cc596f98b
|
[
"MIT"
] | null | null | null |
"""
双向冒泡:
冒泡排序,每次都是从左往右,交换相邻的元素,从而达到循环一边可以把最大的元素放在右边。
而双向冒泡排序,在完成一次从左往右的冒泡排序后,再从右往左进行冒泡,从而把小的元素放在左边。
下面这张图可以很好地表达:
"""
def bubble_sort(origin_items):
"""高质量冒泡排序(搅拌排序)/双向冒泡排序"""
comp = lambda x, y: x > y
items = origin_items[:]
for i in range(len(items) - 1):
swapped = False # 这个标志位也是可以放到简单冒泡排序中的,当已经排序好后,减少循环次数
for j in range(i, len(items) - 1 - i): # 正向:把当前循环最大的放到最后
if comp(items[j], items[j + 1]):
items[j], items[j + 1] = items[j + 1], items[j]
swapped = True
if swapped:
swapped = False
for j in range(len(items) - 2 - i, i, -1): # 反向:把当前循环最小的放到最前
if comp(items[j - 1], items[j]):
items[j], items[j - 1] = items[j - 1], items[j]
swapped = True
if not swapped:
break
return items
def main():
s = [1, 10, 2, 8, 5]
print(bubble_sort(s))
if __name__ == '__main__':
main()
| 27.333333
| 73
| 0.530488
|
def bubble_sort(origin_items):
comp = lambda x, y: x > y
items = origin_items[:]
for i in range(len(items) - 1):
swapped = False
for j in range(i, len(items) - 1 - i):
if comp(items[j], items[j + 1]):
items[j], items[j + 1] = items[j + 1], items[j]
swapped = True
if swapped:
swapped = False
for j in range(len(items) - 2 - i, i, -1):
if comp(items[j - 1], items[j]):
items[j], items[j - 1] = items[j - 1], items[j]
swapped = True
if not swapped:
break
return items
def main():
s = [1, 10, 2, 8, 5]
print(bubble_sort(s))
if __name__ == '__main__':
main()
| true
| true
|
1c43f7cc88953082721b55d83771cbbd3042f65b
| 1,154
|
py
|
Python
|
check_generated_geometry.py
|
hyuanmech/MOPSO
|
f2cbe9151d9dbd21b562957b368f22e2648232b9
|
[
"MIT"
] | null | null | null |
check_generated_geometry.py
|
hyuanmech/MOPSO
|
f2cbe9151d9dbd21b562957b368f22e2648232b9
|
[
"MIT"
] | null | null | null |
check_generated_geometry.py
|
hyuanmech/MOPSO
|
f2cbe9151d9dbd21b562957b368f22e2648232b9
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 23 16:05:17 2020
@author: yuanh
"""
import os
import shutil
from openpyxl import load_workbook
import numpy as np
it = 6
flag = 0
nPop = 100
if flag == 1:
n = 9
index = np.zeros((n, 1))
wb = load_workbook('Positions.xlsx')
sheet = wb['2_mu']
for i in range(n):
index[i,0] = sheet.cell(row=i+2,column=1).value
if flag == 1:
os.mkdir(str(it)+'_MU_all')
for hh in range(n):
source = "J:/Coupler_optimization/MOPSO_CAE/"+str(it)+"_"+str(int(index[hh,0]))+"_MU"+"/"+str(it)+"_"+str(int(index[hh,0]))+"_MU_geo.pdf"
destination = "J:/Coupler_optimization/MOPSO_CAE/"+str(it)+"_MU_all"+"/"+str(it)+"_"+str(int(index[hh,0]))+"_MU_geo.pdf"
shutil.copyfile(source, destination)
else:
os.mkdir(str(it)+'_all')
for hh in range(nPop):
source = "J:/Coupler_optimization/MOPSO_CAE/"+str(it)+"_"+str(hh)+"/"+str(it)+"_"+str(hh)+"_geo.pdf"
destination = "J:/Coupler_optimization/MOPSO_CAE/"+str(it)+"_all"+"/"+str(it)+"_"+str(hh)+"_geo.pdf"
shutil.copyfile(source, destination)
| 28.85
| 146
| 0.587522
|
import os
import shutil
from openpyxl import load_workbook
import numpy as np
it = 6
flag = 0
nPop = 100
if flag == 1:
n = 9
index = np.zeros((n, 1))
wb = load_workbook('Positions.xlsx')
sheet = wb['2_mu']
for i in range(n):
index[i,0] = sheet.cell(row=i+2,column=1).value
if flag == 1:
os.mkdir(str(it)+'_MU_all')
for hh in range(n):
source = "J:/Coupler_optimization/MOPSO_CAE/"+str(it)+"_"+str(int(index[hh,0]))+"_MU"+"/"+str(it)+"_"+str(int(index[hh,0]))+"_MU_geo.pdf"
destination = "J:/Coupler_optimization/MOPSO_CAE/"+str(it)+"_MU_all"+"/"+str(it)+"_"+str(int(index[hh,0]))+"_MU_geo.pdf"
shutil.copyfile(source, destination)
else:
os.mkdir(str(it)+'_all')
for hh in range(nPop):
source = "J:/Coupler_optimization/MOPSO_CAE/"+str(it)+"_"+str(hh)+"/"+str(it)+"_"+str(hh)+"_geo.pdf"
destination = "J:/Coupler_optimization/MOPSO_CAE/"+str(it)+"_all"+"/"+str(it)+"_"+str(hh)+"_geo.pdf"
shutil.copyfile(source, destination)
| true
| true
|
1c43fa14320229168e0e657e1dda3761504a32b4
| 992
|
py
|
Python
|
guillotina/tests/test_middlewares.py
|
psanlorenzo/guillotina
|
0840cf39914d23a9e26e35bd40939511d3ca78d7
|
[
"BSD-2-Clause"
] | null | null | null |
guillotina/tests/test_middlewares.py
|
psanlorenzo/guillotina
|
0840cf39914d23a9e26e35bd40939511d3ca78d7
|
[
"BSD-2-Clause"
] | null | null | null |
guillotina/tests/test_middlewares.py
|
psanlorenzo/guillotina
|
0840cf39914d23a9e26e35bd40939511d3ca78d7
|
[
"BSD-2-Clause"
] | null | null | null |
import asyncio
import pytest
import time
class AsgiMiddlewate:
def __init__(self, app):
self.next_app = app
async def __call__(self, scope, receive, send):
start = time.time()
await asyncio.sleep(0.1)
response = await self.next_app(scope, receive, send)
end = time.time()
response.headers["Measures"] = str(end - start)
return response
@pytest.mark.asyncio
@pytest.mark.app_settings({"middlewares": ["guillotina.tests.test_middlewares.AsgiMiddlewate"]})
async def test_asgi_middleware(container_requester):
async with container_requester as requester:
response, _, headers = await requester.make_request("GET", "/")
assert response == {
"@type": "Application",
"databases": ["db", "db-custom"],
"static_directory": ["static", "module_static", "jsapp_static"],
"static_file": ["favicon.ico"],
}
assert float(headers.get("measures")) > 0.1
| 31
| 96
| 0.633065
|
import asyncio
import pytest
import time
class AsgiMiddlewate:
def __init__(self, app):
self.next_app = app
async def __call__(self, scope, receive, send):
start = time.time()
await asyncio.sleep(0.1)
response = await self.next_app(scope, receive, send)
end = time.time()
response.headers["Measures"] = str(end - start)
return response
@pytest.mark.asyncio
@pytest.mark.app_settings({"middlewares": ["guillotina.tests.test_middlewares.AsgiMiddlewate"]})
async def test_asgi_middleware(container_requester):
async with container_requester as requester:
response, _, headers = await requester.make_request("GET", "/")
assert response == {
"@type": "Application",
"databases": ["db", "db-custom"],
"static_directory": ["static", "module_static", "jsapp_static"],
"static_file": ["favicon.ico"],
}
assert float(headers.get("measures")) > 0.1
| true
| true
|
1c43fa71bbb82846c555d0bca310adf074f93a62
| 2,024
|
py
|
Python
|
third_party/tests/Opentitan/util/tlgen/item.py
|
parzival3/Surelog
|
cf126533ebfb2af7df321057af9e3535feb30487
|
[
"Apache-2.0"
] | 156
|
2019-11-16T17:29:55.000Z
|
2022-01-21T05:41:13.000Z
|
third_party/tests/Opentitan/util/tlgen/item.py
|
parzival3/Surelog
|
cf126533ebfb2af7df321057af9e3535feb30487
|
[
"Apache-2.0"
] | 414
|
2021-06-11T07:22:01.000Z
|
2022-03-31T22:06:14.000Z
|
third_party/tests/Opentitan/util/tlgen/item.py
|
parzival3/Surelog
|
cf126533ebfb2af7df321057af9e3535feb30487
|
[
"Apache-2.0"
] | 30
|
2019-11-18T16:31:40.000Z
|
2021-12-26T01:22:51.000Z
|
# Copyright lowRISC contributors.
# Licensed under the Apache License, Version 2.0, see LICENSE for details.
# SPDX-License-Identifier: Apache-2.0
from enum import Enum
class Edge:
"""Edge class contains the connection from a node to a node.
a Node can be a host port, output of async_fifo, port in a socket,
or a device port.
"""
def __init__(self, us, ds):
self.us = us
self.ds = ds
def __repr__(self):
return "U(%s) D(%s)" % (self.us.name, self.ds.name)
#Edges = List[Edge]
#Clocks = List[str] # If length is more than one, should be exactly two
# [UpstreamClock, DownstreamClock]
class NodeType(Enum):
HOST = 1
DEVICE = 2
ASYNC_FIFO = 3
SOCKET_1N = 4
SOCKET_M1 = 5
class Node:
"""Node class is a port that communicates from/to other Node or TL-UL
input/output.
"""
name = "" # name: str
# node_type: NodeType
clocks = [] # Clocks # clock domains of the node
resets = [] # Resets # resets of the node
# e.g. async_fifo in : clk_core , out : clk_main
# If NodeType is Socket out from 1:N then address steering is used
# But this value is also propagated up to a Host from multiple Devices
# Device Node should have address_from, address_to
#address_from = 0 #: int
#address_to = 0 #: int
addr_range = []
us = [] # Edges # Number of Ports depends on the NodeType
# 1 for Host, Device, 2 for Async FIFO, N for Sockets
ds = [] # Edges
# Req/Rsp FIFO. default False
# when False, FIFO fully passthrough, no storage element
# when True, FIFO present with default depth, "pipeline_byp"
# controls passthrough option
pipeline = False
# FIFO passtru option. default True
pipeline_byp = True
def __init__(self, name, node_type, clock, reset):
self.name = name
self.node_type = node_type
self.clocks = [clock]
self.resets = [reset]
self.us = []
self.ds = []
self.addr_range = []
| 26.986667
| 74
| 0.630929
|
from enum import Enum
class Edge:
def __init__(self, us, ds):
self.us = us
self.ds = ds
def __repr__(self):
return "U(%s) D(%s)" % (self.us.name, self.ds.name)
= 2
ASYNC_FIFO = 3
SOCKET_1N = 4
SOCKET_M1 = 5
class Node:
name = ""
clocks = [] range = []
us = [] pipeline = False
pipeline_byp = True
def __init__(self, name, node_type, clock, reset):
self.name = name
self.node_type = node_type
self.clocks = [clock]
self.resets = [reset]
self.us = []
self.ds = []
self.addr_range = []
| true
| true
|
1c43fba068f52e1707fd9f7186978e03b366e299
| 1,960
|
py
|
Python
|
cli/tests/test_cli.py
|
SophieHerbst/mne-bids
|
0e9b5e261668b90efec28359772f321d999af7d7
|
[
"BSD-3-Clause"
] | null | null | null |
cli/tests/test_cli.py
|
SophieHerbst/mne-bids
|
0e9b5e261668b90efec28359772f321d999af7d7
|
[
"BSD-3-Clause"
] | null | null | null |
cli/tests/test_cli.py
|
SophieHerbst/mne-bids
|
0e9b5e261668b90efec28359772f321d999af7d7
|
[
"BSD-3-Clause"
] | null | null | null |
"""Test command line."""
# Authors: Teon L Brooks <teon.brooks@gmail.com>
# Stefan Appelhoff <stefan.appelhoff@mailbox.org>
#
# License: BSD (3-clause)
from os import path as op
import pytest
import mne
from mne.datasets import testing
from mne.utils import run_tests_if_main, ArgvSetter
from cli import mne_bids_raw_to_bids, mne_bids_cp
base_path = op.join(op.dirname(mne.__file__), 'io')
subject_id = '01'
task = 'testing'
def check_usage(module, force_help=False):
"""Ensure we print usage."""
args = ('--help',) if force_help else ()
with ArgvSetter(args) as out:
try:
module.run()
except SystemExit:
pass
assert 'Usage: ' in out.stdout.getvalue()
def test_raw_to_bids(tmpdir):
"""Test mne_bids raw_to_bids."""
output_path = str(tmpdir)
data_path = testing.data_path()
raw_fname = op.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc_raw.fif')
# Check that help is printed
check_usage(mne_bids_raw_to_bids)
# Should work
with ArgvSetter(('--subject_id', subject_id, '--task', task, '--raw',
raw_fname, '--output_path', output_path)):
mne_bids_raw_to_bids.run()
# Too few input args
with pytest.raises(SystemExit):
with ArgvSetter(('--subject_id', subject_id)):
mne_bids_cp.run()
def test_cp(tmpdir):
"""Test mne_bids cp."""
output_path = str(tmpdir)
data_path = op.join(base_path, 'brainvision', 'tests', 'data')
raw_fname = op.join(data_path, 'test.vhdr')
outname = op.join(output_path, 'test2.vhdr')
# Check that help is printed
check_usage(mne_bids_cp)
# Should work
with ArgvSetter(('--input', raw_fname, '--output', outname)):
mne_bids_cp.run()
# Too few input args
with pytest.raises(SystemExit):
with ArgvSetter(('--input', raw_fname)):
mne_bids_cp.run()
run_tests_if_main()
| 27.222222
| 73
| 0.642347
|
from os import path as op
import pytest
import mne
from mne.datasets import testing
from mne.utils import run_tests_if_main, ArgvSetter
from cli import mne_bids_raw_to_bids, mne_bids_cp
base_path = op.join(op.dirname(mne.__file__), 'io')
subject_id = '01'
task = 'testing'
def check_usage(module, force_help=False):
args = ('--help',) if force_help else ()
with ArgvSetter(args) as out:
try:
module.run()
except SystemExit:
pass
assert 'Usage: ' in out.stdout.getvalue()
def test_raw_to_bids(tmpdir):
output_path = str(tmpdir)
data_path = testing.data_path()
raw_fname = op.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc_raw.fif')
check_usage(mne_bids_raw_to_bids)
with ArgvSetter(('--subject_id', subject_id, '--task', task, '--raw',
raw_fname, '--output_path', output_path)):
mne_bids_raw_to_bids.run()
with pytest.raises(SystemExit):
with ArgvSetter(('--subject_id', subject_id)):
mne_bids_cp.run()
def test_cp(tmpdir):
output_path = str(tmpdir)
data_path = op.join(base_path, 'brainvision', 'tests', 'data')
raw_fname = op.join(data_path, 'test.vhdr')
outname = op.join(output_path, 'test2.vhdr')
check_usage(mne_bids_cp)
with ArgvSetter(('--input', raw_fname, '--output', outname)):
mne_bids_cp.run()
with pytest.raises(SystemExit):
with ArgvSetter(('--input', raw_fname)):
mne_bids_cp.run()
run_tests_if_main()
| true
| true
|
1c43fc03ab33ea2e19164c0644663693552fe20d
| 17,011
|
py
|
Python
|
opics/utils.py
|
jaspreetj/opics
|
037ed93ad9f6c9ad9fec5feb214bb89de24635f0
|
[
"MIT"
] | null | null | null |
opics/utils.py
|
jaspreetj/opics
|
037ed93ad9f6c9ad9fec5feb214bb89de24635f0
|
[
"MIT"
] | null | null | null |
opics/utils.py
|
jaspreetj/opics
|
037ed93ad9f6c9ad9fec5feb214bb89de24635f0
|
[
"MIT"
] | null | null | null |
from typing import Any, Dict, List, Tuple
import cmath as cm
import time
import re
import itertools
import inspect
from copy import deepcopy
import numpy as np
from numpy import ndarray
from pathlib import PosixPath
from defusedxml.ElementTree import parse
def fromSI(value: str) -> float:
"""converts from SI unit values to metric
Args:
value (str): a value in SI units, e.g. 1.3u
Returns:
float: the value in metric units.
"""
return float(value.replace("u", "e-6"))
def universal_sparam_filereader(
nports: int, sfilename: str, sfiledir: PosixPath, format_type: str = "auto"
) -> Tuple[ndarray, ndarray]:
"""
Function to automatically detect the sparameter file format and use appropriate method to delimit and format sparam data
This function is a unified version of sparameter reader function defined in https://github.com/BYUCamachoLab/simphony
Args:
nports: Number of ports
sfilename: XML look-up-table filename
sfiledir: Path to the directory containing the XML file
format_type: Format type. For more information: https://support.lumerical.com/hc/en-us/articles/360036618513-S-parameter-file-formats
"""
numports = nports
filename = sfiledir / sfilename
if format_type == "auto":
try:
# print("try A")
result = universal_sparam_filereader(nports, sfilename, sfiledir, "A")
return result
except Exception:
try:
# print("try B")
result = universal_sparam_filereader(nports, sfilename, sfiledir, "B")
return result
except Exception:
# print("try C")
result = universal_sparam_filereader(nports, sfilename, sfiledir, "C")
return result
elif format_type == "A":
"""
dc_halfring_te_1550
Returns the s-parameters across some frequency range for the Sparam fileformat A
input:
["port 1",""]
["port 2",""]
["port 3",""]
["port 4",""]
("port 1","mode 1",1,"port 1",1,"transmission")
(101, 3)
output:
[frequency, s-parameters]
"""
F = []
S = []
with open(filename, "r") as fid:
for i in range(5):
line = fid.readline()
line = fid.readline()
numrows = int(tuple(line[1:-2].split(","))[0])
S = np.zeros((numrows, numports, numports), dtype="complex128")
r = m = n = 0
for line in fid:
if line[0] == "(":
continue
data = line.split()
data = list(map(float, data))
if m == 0 and n == 0:
F.append(data[0])
S[r, m, n] = data[1] * np.exp(1j * data[2])
r += 1
if r == numrows:
r = 0
m += 1
if m == numports:
m = 0
n += 1
if n == numports:
break
return (np.array(F), S)
elif format_type == "B":
"""
ebeam_bdc_te1550, nanotaper, ebeam_y_1550
Returns the s-parameters across some frequency range for the Sparam fileformat A
input:
('port 1','TE',1,'port 1',1,'transmission')
(51,3)
output:
[frequency, s-parameters]
"""
F = []
S = []
with open(filename, "r") as fid:
line = fid.readline()
line = fid.readline()
numrows = int(tuple(line[1:-2].split(","))[0])
S = np.zeros((numrows, numports, numports), dtype="complex128")
r = m = n = 0
for line in fid:
if line[0] == "(":
continue
data = line.split()
data = list(map(float, data))
if m == 0 and n == 0:
F.append(data[0])
S[r, m, n] = data[1] * np.exp(1j * data[2])
r += 1
if r == numrows:
r = 0
m += 1
if m == numports:
m = 0
n += 1
if n == numports:
break
return (np.array(F), S)
elif format_type == "C":
"""
ebeam_gc_te1550
Returns the s-parameters across some frequency range for the Sparam fileformat A
input:
columns with space delimiter
output:
[frequency, s-parameters]
"""
with open(filename) as fid:
# grating coupler compact models have 100 points for each s-matrix index
arrlen = 100
lines = fid.readlines()
F = np.zeros(arrlen)
S = np.zeros((arrlen, 2, 2), "complex128")
for i in range(0, arrlen):
words = lines[i].split()
F[i] = float(words[0])
S[i, 0, 0] = cm.rect(float(words[1]), float(words[2]))
S[i, 0, 1] = cm.rect(float(words[3]), float(words[4]))
S[i, 1, 0] = cm.rect(float(words[5]), float(words[6]))
S[i, 1, 1] = cm.rect(float(words[7]), float(words[8]))
F = F[::-1]
S = S[::-1, :, :]
return (np.array(F), S)
def LUT_reader(filedir: PosixPath, lutfilename: str, lutdata: List[List[str]]):
"""
Reads look up table data.
Args:
filedir: Directory of the XML look-up-table file.
lutfilename: Look-up-table filename.
lutdata: Look-up-table arguments.
"""
xml = parse(filedir / lutfilename)
root = xml.getroot()
for node in root.iter("association"):
sample = [[each.attrib["name"], each.text] for each in node.iter("value")]
if sorted(sample[0:-1]) == sorted(lutdata):
break
sparam_file = sample[-1][1].split(";")
return (sparam_file, xml, node)
def LUT_processor(
filedir: PosixPath,
lutfilename: str,
lutdata: List[List[str]],
nports: int,
sparam_attr: str,
verbose: bool = False,
) -> Tuple[Tuple[ndarray, ndarray], str]:
"""process look up table data"""
start = time.time()
sparam_file, xml, node = LUT_reader(filedir, lutfilename, lutdata)
# read data
if ".npz" in sparam_file[0] or ".npz" in sparam_file[-1]:
npzfile = [each for each in sparam_file if ".npz" in each][0]
tempdata = np.load(filedir / npzfile)
sdata = (tempdata["f"], tempdata["s"])
npz_file = npzfile
else:
if verbose:
print("numpy datafile not found. reading sparam file instead..")
sdata = universal_sparam_filereader(nports, sparam_file[-1], filedir, "auto")
# create npz file name
npz_file = sparam_file[-1].split(".")[0]
# save as npz file
np.savez(filedir / npz_file, f=sdata[0], s=sdata[1])
# update xml file
sparam_file.append(npz_file + ".npz")
sparam_file = list(set(sparam_file))
for each in node.iter("value"):
if each.attrib["name"] == sparam_attr:
each.text = ";".join(sparam_file)
xml.write(filedir / lutfilename)
if verbose:
print("SParam data extracted in ", time.time() - start)
return (sdata, npz_file)
def NetlistProcessor(spice_filepath, Network, libraries, c_, circuitData, verbose=True):
"""
Processes a spice netlist to setup and simulate a circuit.
Args:
spice_filepath: Path to the spice netlist file.
Network:
"""
if verbose:
for key, value in circuitData.items():
print(key, str(value))
# define frequency range and resolution
freq = np.linspace(
c_ / circuitData["sim_params"][0],
c_ / circuitData["sim_params"][1],
circuitData["sim_params"][2],
)
# create a circuit
subckt = Network(network_id=circuitData["networkID"], f=freq)
# get library
all_libraries = dict(
[
each
for each in inspect.getmembers(libraries, inspect.ismodule)
if each[0][0] != "_"
]
)
libs_comps = {}
for each_lib in list(set(circuitData["compLibs"])):
# temp_comps = dict(inspect.getmembers(all_libraries[each_lib], inspect.isclass))
libs_comps[each_lib] = all_libraries[each_lib].component_factory
# add circuit components
for i in range(len(circuitData["compModels"])):
# get component model
comp_model = libs_comps[circuitData["compLibs"][i]][
circuitData["compModels"][i]
]
# clean attributes
cls_attrs = deepcopy(comp_model.cls_attrs) # class attributes
comp_attrs = circuitData["compAttrs"][i] # component attributes
# clean up attributes
for each_cls_attrs in cls_attrs.keys():
for each_comp_attrs in comp_attrs.keys():
if each_cls_attrs in each_comp_attrs:
cls_attrs[each_cls_attrs] = fromSI(comp_attrs[each_comp_attrs])
subckt.add_component(
libs_comps[circuitData["compLibs"][i]][circuitData["compModels"][i]],
params=cls_attrs,
component_id=circuitData["compLabels"][i],
)
# add circuit netlist
subckt.global_netlist[circuitData["compLabels"][i]] = circuitData[
"circuitNets"
][i]
# add unique net component connections
subckt.current_connections = circuitData["circuitConns"]
return subckt
class netlistParser:
"A netlist parser to read spi files generated by SiEPIC tools"
def __init__(self, mainfile_path: PosixPath) -> None:
self.circuitComponents = []
self.circuitConnections = []
self.mainfile_path = mainfile_path
def readfile(self) -> Dict[str, Any]:
filepath = self.mainfile_path
circuitID = ""
inp = ""
out = ""
inp_net = 0
out_net = []
circuitLabels = []
circuitModels = []
circuitConns = []
circuitNets = []
componentLibs = []
componentAttrs = []
component_locations = []
temp_file = open(filepath, "r")
temp_lines = temp_file.readlines()
free_node_idx = -1
freq_data = []
seek_component = 0
seek_ona = 0
orthogonal_ID = 0
# extract circuit connectivity
for each_line in temp_lines:
each_line = re.sub(" +", " ", each_line.strip()) # remove empty lines
if each_line.startswith("*"):
continue
else:
each_line = "".join(
[
"".join(filter(None, each_section.split(" ")))
if ('"' in each_section)
else each_section
for each_section in re.split(
r"""("[^"]*"|'[^']*')""", each_line
)
]
)
temp_data = each_line.split(" ")
if len(temp_data) > 1: # if line is not an empty one
MC_location = []
if temp_data[0] == ".subckt":
circuitID = temp_data[1]
inp = temp_data[2]
out = [temp_data[x] for x in range(3, len(temp_data))]
seek_component = 1
elif temp_data[0] == ".param":
continue
elif temp_data[0] == ".ends":
seek_component = 0
elif temp_data[0] == ".ona":
seek_ona = 1
elif seek_ona == 1:
# ONA related data
if len(temp_data) < 3:
temp_data = [0] + temp_data[-1].split("=")
if temp_data[1] == "orthogonal_identifier":
orthogonal_ID = int(temp_data[-1])
elif temp_data[1] == "start":
freq_data.append(float(temp_data[-1]))
elif temp_data[1] == "stop":
freq_data.append(float(temp_data[-1]))
elif temp_data[1] == "number_of_points":
freq_data.append(int(temp_data[-1]))
elif seek_component == 1:
# otherwise its component data
circuitLabels.append(temp_data[0])
temp_ports = []
found_ports = 0
found_library = 0
for i in range(1, len(temp_data)):
# if its an optical port
if (
"N$" in temp_data[i]
and "N$None".lower() != temp_data[i].lower()
):
temp_ports.append(int(temp_data[i].replace("N$", "")))
found_ports = 1
elif "N$None".lower() == temp_data[i].lower():
temp_ports.append(free_node_idx)
free_node_idx -= 1
found_ports = 1
elif inp == temp_data[i]:
temp_ports.append(free_node_idx)
inp_net = free_node_idx
free_node_idx -= 1
found_ports = 1
elif out[0] == temp_data[i]:
temp_ports.append(free_node_idx)
out_net.append(free_node_idx)
free_node_idx -= 1
if len(out) > 1:
out.pop(0)
if len(out) == 0:
found_ports = 1
elif found_ports == 1 and "N$" not in temp_data[i]:
circuitModels.append(temp_data[i])
temp_cls_atrr = (
{}
) # deepcopy(lib[temp_data[i]].cls_attrs)
found_ports = -1
elif "lay" in temp_data[i] or "sch" in temp_data[i]:
if "lay" in temp_data[i]:
MC_location.append(
fromSI(temp_data[i].split("=")[-1]) * 1e6
)
# ignore layout and schematic position data for now.
# adapt opics models to accept this data
# they are component parameters
elif "library" in temp_data[i]:
# cprint(temp_data[i])
temp_lib = (
temp_data[i].replace('"', "").split("=")[1].split()
)
componentLibs.append(
temp_lib[-1].split("/")[-1].lower()
)
found_library = 1
elif "=" in temp_data[i] and found_library == 1:
# if its a components' attribute
temp_attr = temp_data[i].split("=")
# print(temp_attr[0])
# if(temp_attr[0] in temp_cls_atrr):
temp_cls_atrr[temp_attr[0]] = temp_attr[1].strip('"')
componentAttrs.append(temp_cls_atrr)
circuitNets.append(temp_ports)
if bool(MC_location):
component_locations.append(MC_location)
circuitConns = list(set(list(itertools.chain(*circuitNets))))
# remove IOs from component connections' list
circuitConns = [each for each in circuitConns if each >= 0]
# return all data
return {
"circuitNets": circuitNets,
"circuitConns": circuitConns,
"compLibs": componentLibs,
"compModels": circuitModels,
"compLabels": circuitLabels,
"compAttrs": componentAttrs,
"compLocs": component_locations,
"networkID": circuitID,
"inp_net": inp_net,
"out_net": out_net,
"sim_params": freq_data,
"OID": orthogonal_ID,
}
| 35.146694
| 141
| 0.479043
|
from typing import Any, Dict, List, Tuple
import cmath as cm
import time
import re
import itertools
import inspect
from copy import deepcopy
import numpy as np
from numpy import ndarray
from pathlib import PosixPath
from defusedxml.ElementTree import parse
def fromSI(value: str) -> float:
return float(value.replace("u", "e-6"))
def universal_sparam_filereader(
nports: int, sfilename: str, sfiledir: PosixPath, format_type: str = "auto"
) -> Tuple[ndarray, ndarray]:
numports = nports
filename = sfiledir / sfilename
if format_type == "auto":
try:
result = universal_sparam_filereader(nports, sfilename, sfiledir, "A")
return result
except Exception:
try:
result = universal_sparam_filereader(nports, sfilename, sfiledir, "B")
return result
except Exception:
result = universal_sparam_filereader(nports, sfilename, sfiledir, "C")
return result
elif format_type == "A":
"""
dc_halfring_te_1550
Returns the s-parameters across some frequency range for the Sparam fileformat A
input:
["port 1",""]
["port 2",""]
["port 3",""]
["port 4",""]
("port 1","mode 1",1,"port 1",1,"transmission")
(101, 3)
output:
[frequency, s-parameters]
"""
F = []
S = []
with open(filename, "r") as fid:
for i in range(5):
line = fid.readline()
line = fid.readline()
numrows = int(tuple(line[1:-2].split(","))[0])
S = np.zeros((numrows, numports, numports), dtype="complex128")
r = m = n = 0
for line in fid:
if line[0] == "(":
continue
data = line.split()
data = list(map(float, data))
if m == 0 and n == 0:
F.append(data[0])
S[r, m, n] = data[1] * np.exp(1j * data[2])
r += 1
if r == numrows:
r = 0
m += 1
if m == numports:
m = 0
n += 1
if n == numports:
break
return (np.array(F), S)
elif format_type == "B":
"""
ebeam_bdc_te1550, nanotaper, ebeam_y_1550
Returns the s-parameters across some frequency range for the Sparam fileformat A
input:
('port 1','TE',1,'port 1',1,'transmission')
(51,3)
output:
[frequency, s-parameters]
"""
F = []
S = []
with open(filename, "r") as fid:
line = fid.readline()
line = fid.readline()
numrows = int(tuple(line[1:-2].split(","))[0])
S = np.zeros((numrows, numports, numports), dtype="complex128")
r = m = n = 0
for line in fid:
if line[0] == "(":
continue
data = line.split()
data = list(map(float, data))
if m == 0 and n == 0:
F.append(data[0])
S[r, m, n] = data[1] * np.exp(1j * data[2])
r += 1
if r == numrows:
r = 0
m += 1
if m == numports:
m = 0
n += 1
if n == numports:
break
return (np.array(F), S)
elif format_type == "C":
"""
ebeam_gc_te1550
Returns the s-parameters across some frequency range for the Sparam fileformat A
input:
columns with space delimiter
output:
[frequency, s-parameters]
"""
with open(filename) as fid:
arrlen = 100
lines = fid.readlines()
F = np.zeros(arrlen)
S = np.zeros((arrlen, 2, 2), "complex128")
for i in range(0, arrlen):
words = lines[i].split()
F[i] = float(words[0])
S[i, 0, 0] = cm.rect(float(words[1]), float(words[2]))
S[i, 0, 1] = cm.rect(float(words[3]), float(words[4]))
S[i, 1, 0] = cm.rect(float(words[5]), float(words[6]))
S[i, 1, 1] = cm.rect(float(words[7]), float(words[8]))
F = F[::-1]
S = S[::-1, :, :]
return (np.array(F), S)
def LUT_reader(filedir: PosixPath, lutfilename: str, lutdata: List[List[str]]):
xml = parse(filedir / lutfilename)
root = xml.getroot()
for node in root.iter("association"):
sample = [[each.attrib["name"], each.text] for each in node.iter("value")]
if sorted(sample[0:-1]) == sorted(lutdata):
break
sparam_file = sample[-1][1].split(";")
return (sparam_file, xml, node)
def LUT_processor(
filedir: PosixPath,
lutfilename: str,
lutdata: List[List[str]],
nports: int,
sparam_attr: str,
verbose: bool = False,
) -> Tuple[Tuple[ndarray, ndarray], str]:
start = time.time()
sparam_file, xml, node = LUT_reader(filedir, lutfilename, lutdata)
if ".npz" in sparam_file[0] or ".npz" in sparam_file[-1]:
npzfile = [each for each in sparam_file if ".npz" in each][0]
tempdata = np.load(filedir / npzfile)
sdata = (tempdata["f"], tempdata["s"])
npz_file = npzfile
else:
if verbose:
print("numpy datafile not found. reading sparam file instead..")
sdata = universal_sparam_filereader(nports, sparam_file[-1], filedir, "auto")
npz_file = sparam_file[-1].split(".")[0]
np.savez(filedir / npz_file, f=sdata[0], s=sdata[1])
sparam_file.append(npz_file + ".npz")
sparam_file = list(set(sparam_file))
for each in node.iter("value"):
if each.attrib["name"] == sparam_attr:
each.text = ";".join(sparam_file)
xml.write(filedir / lutfilename)
if verbose:
print("SParam data extracted in ", time.time() - start)
return (sdata, npz_file)
def NetlistProcessor(spice_filepath, Network, libraries, c_, circuitData, verbose=True):
if verbose:
for key, value in circuitData.items():
print(key, str(value))
freq = np.linspace(
c_ / circuitData["sim_params"][0],
c_ / circuitData["sim_params"][1],
circuitData["sim_params"][2],
)
subckt = Network(network_id=circuitData["networkID"], f=freq)
all_libraries = dict(
[
each
for each in inspect.getmembers(libraries, inspect.ismodule)
if each[0][0] != "_"
]
)
libs_comps = {}
for each_lib in list(set(circuitData["compLibs"])):
libs_comps[each_lib] = all_libraries[each_lib].component_factory
for i in range(len(circuitData["compModels"])):
comp_model = libs_comps[circuitData["compLibs"][i]][
circuitData["compModels"][i]
]
cls_attrs = deepcopy(comp_model.cls_attrs)
comp_attrs = circuitData["compAttrs"][i]
for each_cls_attrs in cls_attrs.keys():
for each_comp_attrs in comp_attrs.keys():
if each_cls_attrs in each_comp_attrs:
cls_attrs[each_cls_attrs] = fromSI(comp_attrs[each_comp_attrs])
subckt.add_component(
libs_comps[circuitData["compLibs"][i]][circuitData["compModels"][i]],
params=cls_attrs,
component_id=circuitData["compLabels"][i],
)
subckt.global_netlist[circuitData["compLabels"][i]] = circuitData[
"circuitNets"
][i]
subckt.current_connections = circuitData["circuitConns"]
return subckt
class netlistParser:
def __init__(self, mainfile_path: PosixPath) -> None:
self.circuitComponents = []
self.circuitConnections = []
self.mainfile_path = mainfile_path
def readfile(self) -> Dict[str, Any]:
filepath = self.mainfile_path
circuitID = ""
inp = ""
out = ""
inp_net = 0
out_net = []
circuitLabels = []
circuitModels = []
circuitConns = []
circuitNets = []
componentLibs = []
componentAttrs = []
component_locations = []
temp_file = open(filepath, "r")
temp_lines = temp_file.readlines()
free_node_idx = -1
freq_data = []
seek_component = 0
seek_ona = 0
orthogonal_ID = 0
for each_line in temp_lines:
each_line = re.sub(" +", " ", each_line.strip())
if each_line.startswith("*"):
continue
else:
each_line = "".join(
[
"".join(filter(None, each_section.split(" ")))
if ('"' in each_section)
else each_section
for each_section in re.split(
r"""("[^"]*"|'[^']*')""", each_line
)
]
)
temp_data = each_line.split(" ")
if len(temp_data) > 1: # if line is not an empty one
MC_location = []
if temp_data[0] == ".subckt":
circuitID = temp_data[1]
inp = temp_data[2]
out = [temp_data[x] for x in range(3, len(temp_data))]
seek_component = 1
elif temp_data[0] == ".param":
continue
elif temp_data[0] == ".ends":
seek_component = 0
elif temp_data[0] == ".ona":
seek_ona = 1
elif seek_ona == 1:
# ONA related data
if len(temp_data) < 3:
temp_data = [0] + temp_data[-1].split("=")
if temp_data[1] == "orthogonal_identifier":
orthogonal_ID = int(temp_data[-1])
elif temp_data[1] == "start":
freq_data.append(float(temp_data[-1]))
elif temp_data[1] == "stop":
freq_data.append(float(temp_data[-1]))
elif temp_data[1] == "number_of_points":
freq_data.append(int(temp_data[-1]))
elif seek_component == 1:
# otherwise its component data
circuitLabels.append(temp_data[0])
temp_ports = []
found_ports = 0
found_library = 0
for i in range(1, len(temp_data)):
# if its an optical port
if (
"N$" in temp_data[i]
and "N$None".lower() != temp_data[i].lower()
):
temp_ports.append(int(temp_data[i].replace("N$", "")))
found_ports = 1
elif "N$None".lower() == temp_data[i].lower():
temp_ports.append(free_node_idx)
free_node_idx -= 1
found_ports = 1
elif inp == temp_data[i]:
temp_ports.append(free_node_idx)
inp_net = free_node_idx
free_node_idx -= 1
found_ports = 1
elif out[0] == temp_data[i]:
temp_ports.append(free_node_idx)
out_net.append(free_node_idx)
free_node_idx -= 1
if len(out) > 1:
out.pop(0)
if len(out) == 0:
found_ports = 1
elif found_ports == 1 and "N$" not in temp_data[i]:
circuitModels.append(temp_data[i])
temp_cls_atrr = (
{}
) # deepcopy(lib[temp_data[i]].cls_attrs)
found_ports = -1
elif "lay" in temp_data[i] or "sch" in temp_data[i]:
if "lay" in temp_data[i]:
MC_location.append(
fromSI(temp_data[i].split("=")[-1]) * 1e6
)
# ignore layout and schematic position data for now.
# adapt opics models to accept this data
# they are component parameters
elif "library" in temp_data[i]:
# cprint(temp_data[i])
temp_lib = (
temp_data[i].replace('"', "").split("=")[1].split()
)
componentLibs.append(
temp_lib[-1].split("/")[-1].lower()
)
found_library = 1
elif "=" in temp_data[i] and found_library == 1:
# if its a components' attribute
temp_attr = temp_data[i].split("=")
# print(temp_attr[0])
# if(temp_attr[0] in temp_cls_atrr):
temp_cls_atrr[temp_attr[0]] = temp_attr[1].strip('"')
componentAttrs.append(temp_cls_atrr)
circuitNets.append(temp_ports)
if bool(MC_location):
component_locations.append(MC_location)
circuitConns = list(set(list(itertools.chain(*circuitNets))))
circuitConns = [each for each in circuitConns if each >= 0]
# return all data
return {
"circuitNets": circuitNets,
"circuitConns": circuitConns,
"compLibs": componentLibs,
"compModels": circuitModels,
"compLabels": circuitLabels,
"compAttrs": componentAttrs,
"compLocs": component_locations,
"networkID": circuitID,
"inp_net": inp_net,
"out_net": out_net,
"sim_params": freq_data,
"OID": orthogonal_ID,
}
| true
| true
|
1c43fd68b8e8426feafb7efe2b494da8cef3208e
| 16,870
|
py
|
Python
|
django_extensions/management/modelviz.py
|
echirchir/django-extensions
|
ae38e33309b87bf7431bc5f1321699f5d00a0431
|
[
"MIT"
] | null | null | null |
django_extensions/management/modelviz.py
|
echirchir/django-extensions
|
ae38e33309b87bf7431bc5f1321699f5d00a0431
|
[
"MIT"
] | null | null | null |
django_extensions/management/modelviz.py
|
echirchir/django-extensions
|
ae38e33309b87bf7431bc5f1321699f5d00a0431
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
modelviz.py - DOT file generator for Django Models
Based on:
Django model to DOT (Graphviz) converter
by Antonio Cavedoni <antonio@cavedoni.org>
Adapted to be used with django-extensions
"""
import datetime
import os
import re
import six
from django.apps import apps
from django.db.models.fields.related import (
ForeignKey, ManyToManyField, OneToOneField, RelatedField,
)
from django.contrib.contenttypes.fields import GenericRelation
from django.template import Context, Template, loader
from django.utils.encoding import force_str
from django.utils.safestring import mark_safe
from django.utils.translation import activate as activate_language
__version__ = "1.1"
__license__ = "Python"
__author__ = "Bas van Oostveen <v.oostveen@gmail.com>",
__contributors__ = [
"Antonio Cavedoni <http://cavedoni.com/>"
"Stefano J. Attardi <http://attardi.org/>",
"limodou <http://www.donews.net/limodou/>",
"Carlo C8E Miron",
"Andre Campos <cahenan@gmail.com>",
"Justin Findlay <jfindlay@gmail.com>",
"Alexander Houben <alexander@houben.ch>",
"Joern Hees <gitdev@joernhees.de>",
"Kevin Cherepski <cherepski@gmail.com>",
"Jose Tomas Tocino <theom3ga@gmail.com>",
"Adam Dobrawy <naczelnik@jawnosc.tk>",
"Mikkel Munch Mortensen <https://www.detfalskested.dk/>",
"Andrzej Bistram <andrzej.bistram@gmail.com>",
"Daniel Lipsitt <danlipsitt@gmail.com>",
]
def parse_file_or_list(arg):
if not arg:
return []
if isinstance(arg, (list, tuple, set)):
return arg
if ',' not in arg and os.path.isfile(arg):
return [e.strip() for e in open(arg).readlines()]
return [e.strip() for e in arg.split(',')]
class ModelGraph:
def __init__(self, app_labels, **kwargs):
self.graphs = []
self.cli_options = kwargs.get('cli_options', None)
self.disable_fields = kwargs.get('disable_fields', False)
self.disable_abstract_fields = kwargs.get('disable_abstract_fields', False)
self.include_models = parse_file_or_list(
kwargs.get('include_models', "")
)
self.all_applications = kwargs.get('all_applications', False)
self.use_subgraph = kwargs.get('group_models', False)
self.verbose_names = kwargs.get('verbose_names', False)
self.inheritance = kwargs.get('inheritance', True)
self.relations_as_fields = kwargs.get("relations_as_fields", True)
self.sort_fields = kwargs.get("sort_fields", True)
self.language = kwargs.get('language', None)
if self.language is not None:
activate_language(self.language)
self.exclude_columns = parse_file_or_list(
kwargs.get('exclude_columns', "")
)
self.exclude_models = parse_file_or_list(
kwargs.get('exclude_models', "")
)
self.hide_edge_labels = kwargs.get('hide_edge_labels', False)
self.arrow_shape = kwargs.get("arrow_shape")
if self.all_applications:
self.app_labels = [app.label for app in apps.get_app_configs()]
else:
self.app_labels = app_labels
def generate_graph_data(self):
self.process_apps()
nodes = []
for graph in self.graphs:
nodes.extend([e['name'] for e in graph['models']])
for graph in self.graphs:
for model in graph['models']:
for relation in model['relations']:
if relation is not None:
if relation['target'] in nodes:
relation['needs_node'] = False
def get_graph_data(self, as_json=False):
now = datetime.datetime.now()
graph_data = {
'created_at': now.strftime("%Y-%m-%d %H:%M"),
'cli_options': self.cli_options,
'disable_fields': self.disable_fields,
'disable_abstract_fields': self.disable_abstract_fields,
'use_subgraph': self.use_subgraph,
}
if as_json:
graph_data['graphs'] = [context.flatten() for context in self.graphs]
else:
graph_data['graphs'] = self.graphs
return graph_data
def add_attributes(self, field, abstract_fields):
if self.verbose_names and field.verbose_name:
label = force_str(field.verbose_name)
if label.islower():
label = label.capitalize()
else:
label = field.name
t = type(field).__name__
if isinstance(field, (OneToOneField, ForeignKey)):
t += " ({0})".format(field.remote_field.field_name)
# TODO: ManyToManyField, GenericRelation
return {
'name': field.name,
'label': label,
'type': t,
'blank': field.blank,
'abstract': field in abstract_fields,
'relation': isinstance(field, RelatedField),
'primary_key': field.primary_key,
}
def add_relation(self, field, model, extras=""):
if self.verbose_names and field.verbose_name:
label = force_str(field.verbose_name)
if label.islower():
label = label.capitalize()
else:
label = field.name
# show related field name
if hasattr(field, 'related_query_name'):
related_query_name = field.related_query_name()
if self.verbose_names and related_query_name.islower():
related_query_name = related_query_name.replace('_', ' ').capitalize()
label = u'{} ({})'.format(label, force_str(related_query_name))
if self.hide_edge_labels:
label = ''
# handle self-relationships and lazy-relationships
if isinstance(field.remote_field.model, six.string_types):
if field.remote_field.model == 'self':
target_model = field.model
else:
if '.' in field.remote_field.model:
app_label, model_name = field.remote_field.model.split('.', 1)
else:
app_label = field.model._meta.app_label
model_name = field.remote_field.model
target_model = apps.get_model(app_label, model_name)
else:
target_model = field.remote_field.model
_rel = self.get_relation_context(target_model, field, label, extras)
if _rel not in model['relations'] and self.use_model(_rel['target']):
return _rel
def get_abstract_models(self, appmodels):
abstract_models = []
for appmodel in appmodels:
abstract_models += [
abstract_model for abstract_model in appmodel.__bases__
if hasattr(abstract_model, '_meta') and abstract_model._meta.abstract
]
abstract_models = list(set(abstract_models)) # remove duplicates
return abstract_models
def get_app_context(self, app):
return Context({
'name': '"%s"' % app.name,
'app_name': "%s" % app.name,
'cluster_app_name': "cluster_%s" % app.name.replace(".", "_"),
'models': []
})
def get_appmodel_attributes(self, appmodel):
if self.relations_as_fields:
attributes = [field for field in appmodel._meta.local_fields]
else:
# Find all the 'real' attributes. Relations are depicted as graph edges instead of attributes
attributes = [field for field in appmodel._meta.local_fields if not
isinstance(field, RelatedField)]
return attributes
def get_appmodel_abstracts(self, appmodel):
return [
abstract_model.__name__ for abstract_model in appmodel.__bases__
if hasattr(abstract_model, '_meta') and abstract_model._meta.abstract
]
def get_appmodel_context(self, appmodel, appmodel_abstracts):
context = {
'app_name': appmodel.__module__.replace(".", "_"),
'name': appmodel.__name__,
'abstracts': appmodel_abstracts,
'fields': [],
'relations': []
}
if self.verbose_names and appmodel._meta.verbose_name:
context['label'] = force_str(appmodel._meta.verbose_name)
else:
context['label'] = context['name']
return context
def get_bases_abstract_fields(self, c):
_abstract_fields = []
for e in c.__bases__:
if hasattr(e, '_meta') and e._meta.abstract:
_abstract_fields.extend(e._meta.fields)
_abstract_fields.extend(self.get_bases_abstract_fields(e))
return _abstract_fields
def get_inheritance_context(self, appmodel, parent):
label = "multi-table"
if parent._meta.abstract:
label = "abstract"
if appmodel._meta.proxy:
label = "proxy"
label += r"\ninheritance"
if self.hide_edge_labels:
label = ''
return {
'target_app': parent.__module__.replace(".", "_"),
'target': parent.__name__,
'type': "inheritance",
'name': "inheritance",
'label': label,
'arrows': '[arrowhead=empty, arrowtail=none, dir=both]',
'needs_node': True,
}
def get_models(self, app):
appmodels = list(app.get_models())
return appmodels
def get_relation_context(self, target_model, field, label, extras):
return {
'target_app': target_model.__module__.replace('.', '_'),
'target': target_model.__name__,
'type': type(field).__name__,
'name': field.name,
'label': label,
'arrows': extras,
'needs_node': True
}
def process_attributes(self, field, model, pk, abstract_fields):
newmodel = model.copy()
if self.skip_field(field) or pk and field == pk:
return newmodel
newmodel['fields'].append(self.add_attributes(field, abstract_fields))
return newmodel
def process_apps(self):
for app_label in self.app_labels:
app = apps.get_app_config(app_label)
if not app:
continue
app_graph = self.get_app_context(app)
app_models = self.get_models(app)
abstract_models = self.get_abstract_models(app_models)
app_models = abstract_models + app_models
for appmodel in app_models:
if not self.use_model(appmodel._meta.object_name):
continue
appmodel_abstracts = self.get_appmodel_abstracts(appmodel)
abstract_fields = self.get_bases_abstract_fields(appmodel)
model = self.get_appmodel_context(appmodel, appmodel_abstracts)
attributes = self.get_appmodel_attributes(appmodel)
# find primary key and print it first, ignoring implicit id if other pk exists
pk = appmodel._meta.pk
if pk and not appmodel._meta.abstract and pk in attributes:
model['fields'].append(self.add_attributes(pk, abstract_fields))
for field in attributes:
model = self.process_attributes(field, model, pk, abstract_fields)
if self.sort_fields:
model = self.sort_model_fields(model)
for field in appmodel._meta.local_fields:
model = self.process_local_fields(field, model, abstract_fields)
for field in appmodel._meta.local_many_to_many:
model = self.process_local_many_to_many(field, model)
if self.inheritance:
# add inheritance arrows
for parent in appmodel.__bases__:
model = self.process_parent(parent, appmodel, model)
app_graph['models'].append(model)
if app_graph['models']:
self.graphs.append(app_graph)
def process_local_fields(self, field, model, abstract_fields):
newmodel = model.copy()
if field.attname.endswith('_ptr_id') or field in abstract_fields or self.skip_field(field):
# excluding field redundant with inheritance relation
# excluding fields inherited from abstract classes. they too show as local_fields
return newmodel
if isinstance(field, OneToOneField):
relation = self.add_relation(
field, newmodel, '[arrowhead=none, arrowtail=none, dir=both]'
)
elif isinstance(field, ForeignKey):
relation = self.add_relation(
field,
newmodel,
'[arrowhead=none, arrowtail={}, dir=both]'.format(
self.arrow_shape
),
)
else:
relation = None
if relation is not None:
newmodel['relations'].append(relation)
return newmodel
def process_local_many_to_many(self, field, model):
newmodel = model.copy()
if self.skip_field(field):
return newmodel
relation = None
if isinstance(field, ManyToManyField):
if hasattr(field.remote_field.through, '_meta') and field.remote_field.through._meta.auto_created:
relation = self.add_relation(
field,
newmodel,
'[arrowhead={} arrowtail={}, dir=both]'.format(
self.arrow_shape, self.arrow_shape
),
)
elif isinstance(field, GenericRelation):
relation = self.add_relation(field, newmodel, mark_safe('[style="dotted", arrowhead=normal, arrowtail=normal, dir=both]'))
if relation is not None:
newmodel['relations'].append(relation)
return newmodel
def process_parent(self, parent, appmodel, model):
newmodel = model.copy()
if hasattr(parent, "_meta"): # parent is a model
_rel = self.get_inheritance_context(appmodel, parent)
# TODO: seems as if abstract models aren't part of models.getModels, which is why they are printed by this without any attributes.
if _rel not in newmodel['relations'] and self.use_model(_rel['target']):
newmodel['relations'].append(_rel)
return newmodel
def sort_model_fields(self, model):
newmodel = model.copy()
newmodel['fields'] = sorted(newmodel['fields'], key=lambda field: (not field['primary_key'], not field['relation'], field['label']))
return newmodel
def use_model(self, model_name):
"""
Decide whether to use a model, based on the model name and the lists of
models to exclude and include.
"""
# Check against include list.
if self.include_models:
for model_pattern in self.include_models:
model_pattern = '^%s$' % model_pattern.replace('*', '.*')
if re.search(model_pattern, model_name):
return True
# Check against exclude list.
if self.exclude_models:
for model_pattern in self.exclude_models:
model_pattern = '^%s$' % model_pattern.replace('*', '.*')
if re.search(model_pattern, model_name):
return False
# Return `True` if `include_models` is falsey, otherwise return `False`.
return not self.include_models
def skip_field(self, field):
if self.exclude_columns:
if self.verbose_names and field.verbose_name:
if field.verbose_name in self.exclude_columns:
return True
if field.name in self.exclude_columns:
return True
return False
def generate_dot(graph_data, template='django_extensions/graph_models/digraph.dot'):
if isinstance(template, six.string_types):
template = loader.get_template(template)
if not isinstance(template, Template) and not (hasattr(template, 'template') and isinstance(template.template, Template)):
raise Exception("Default Django template loader isn't used. "
"This can lead to the incorrect template rendering. "
"Please, check the settings.")
c = Context(graph_data).flatten()
dot = template.render(c)
return dot
def generate_graph_data(*args, **kwargs):
generator = ModelGraph(*args, **kwargs)
generator.generate_graph_data()
return generator.get_graph_data()
def use_model(model, include_models, exclude_models):
generator = ModelGraph([], include_models=include_models, exclude_models=exclude_models)
return generator.use_model(model)
| 38.960739
| 142
| 0.605809
|
import datetime
import os
import re
import six
from django.apps import apps
from django.db.models.fields.related import (
ForeignKey, ManyToManyField, OneToOneField, RelatedField,
)
from django.contrib.contenttypes.fields import GenericRelation
from django.template import Context, Template, loader
from django.utils.encoding import force_str
from django.utils.safestring import mark_safe
from django.utils.translation import activate as activate_language
__version__ = "1.1"
__license__ = "Python"
__author__ = "Bas van Oostveen <v.oostveen@gmail.com>",
__contributors__ = [
"Antonio Cavedoni <http://cavedoni.com/>"
"Stefano J. Attardi <http://attardi.org/>",
"limodou <http://www.donews.net/limodou/>",
"Carlo C8E Miron",
"Andre Campos <cahenan@gmail.com>",
"Justin Findlay <jfindlay@gmail.com>",
"Alexander Houben <alexander@houben.ch>",
"Joern Hees <gitdev@joernhees.de>",
"Kevin Cherepski <cherepski@gmail.com>",
"Jose Tomas Tocino <theom3ga@gmail.com>",
"Adam Dobrawy <naczelnik@jawnosc.tk>",
"Mikkel Munch Mortensen <https://www.detfalskested.dk/>",
"Andrzej Bistram <andrzej.bistram@gmail.com>",
"Daniel Lipsitt <danlipsitt@gmail.com>",
]
def parse_file_or_list(arg):
if not arg:
return []
if isinstance(arg, (list, tuple, set)):
return arg
if ',' not in arg and os.path.isfile(arg):
return [e.strip() for e in open(arg).readlines()]
return [e.strip() for e in arg.split(',')]
class ModelGraph:
def __init__(self, app_labels, **kwargs):
self.graphs = []
self.cli_options = kwargs.get('cli_options', None)
self.disable_fields = kwargs.get('disable_fields', False)
self.disable_abstract_fields = kwargs.get('disable_abstract_fields', False)
self.include_models = parse_file_or_list(
kwargs.get('include_models', "")
)
self.all_applications = kwargs.get('all_applications', False)
self.use_subgraph = kwargs.get('group_models', False)
self.verbose_names = kwargs.get('verbose_names', False)
self.inheritance = kwargs.get('inheritance', True)
self.relations_as_fields = kwargs.get("relations_as_fields", True)
self.sort_fields = kwargs.get("sort_fields", True)
self.language = kwargs.get('language', None)
if self.language is not None:
activate_language(self.language)
self.exclude_columns = parse_file_or_list(
kwargs.get('exclude_columns', "")
)
self.exclude_models = parse_file_or_list(
kwargs.get('exclude_models', "")
)
self.hide_edge_labels = kwargs.get('hide_edge_labels', False)
self.arrow_shape = kwargs.get("arrow_shape")
if self.all_applications:
self.app_labels = [app.label for app in apps.get_app_configs()]
else:
self.app_labels = app_labels
def generate_graph_data(self):
self.process_apps()
nodes = []
for graph in self.graphs:
nodes.extend([e['name'] for e in graph['models']])
for graph in self.graphs:
for model in graph['models']:
for relation in model['relations']:
if relation is not None:
if relation['target'] in nodes:
relation['needs_node'] = False
def get_graph_data(self, as_json=False):
now = datetime.datetime.now()
graph_data = {
'created_at': now.strftime("%Y-%m-%d %H:%M"),
'cli_options': self.cli_options,
'disable_fields': self.disable_fields,
'disable_abstract_fields': self.disable_abstract_fields,
'use_subgraph': self.use_subgraph,
}
if as_json:
graph_data['graphs'] = [context.flatten() for context in self.graphs]
else:
graph_data['graphs'] = self.graphs
return graph_data
def add_attributes(self, field, abstract_fields):
if self.verbose_names and field.verbose_name:
label = force_str(field.verbose_name)
if label.islower():
label = label.capitalize()
else:
label = field.name
t = type(field).__name__
if isinstance(field, (OneToOneField, ForeignKey)):
t += " ({0})".format(field.remote_field.field_name)
return {
'name': field.name,
'label': label,
'type': t,
'blank': field.blank,
'abstract': field in abstract_fields,
'relation': isinstance(field, RelatedField),
'primary_key': field.primary_key,
}
def add_relation(self, field, model, extras=""):
if self.verbose_names and field.verbose_name:
label = force_str(field.verbose_name)
if label.islower():
label = label.capitalize()
else:
label = field.name
if hasattr(field, 'related_query_name'):
related_query_name = field.related_query_name()
if self.verbose_names and related_query_name.islower():
related_query_name = related_query_name.replace('_', ' ').capitalize()
label = u'{} ({})'.format(label, force_str(related_query_name))
if self.hide_edge_labels:
label = ''
if isinstance(field.remote_field.model, six.string_types):
if field.remote_field.model == 'self':
target_model = field.model
else:
if '.' in field.remote_field.model:
app_label, model_name = field.remote_field.model.split('.', 1)
else:
app_label = field.model._meta.app_label
model_name = field.remote_field.model
target_model = apps.get_model(app_label, model_name)
else:
target_model = field.remote_field.model
_rel = self.get_relation_context(target_model, field, label, extras)
if _rel not in model['relations'] and self.use_model(_rel['target']):
return _rel
def get_abstract_models(self, appmodels):
abstract_models = []
for appmodel in appmodels:
abstract_models += [
abstract_model for abstract_model in appmodel.__bases__
if hasattr(abstract_model, '_meta') and abstract_model._meta.abstract
]
abstract_models = list(set(abstract_models))
return abstract_models
def get_app_context(self, app):
return Context({
'name': '"%s"' % app.name,
'app_name': "%s" % app.name,
'cluster_app_name': "cluster_%s" % app.name.replace(".", "_"),
'models': []
})
def get_appmodel_attributes(self, appmodel):
if self.relations_as_fields:
attributes = [field for field in appmodel._meta.local_fields]
else:
attributes = [field for field in appmodel._meta.local_fields if not
isinstance(field, RelatedField)]
return attributes
def get_appmodel_abstracts(self, appmodel):
return [
abstract_model.__name__ for abstract_model in appmodel.__bases__
if hasattr(abstract_model, '_meta') and abstract_model._meta.abstract
]
def get_appmodel_context(self, appmodel, appmodel_abstracts):
context = {
'app_name': appmodel.__module__.replace(".", "_"),
'name': appmodel.__name__,
'abstracts': appmodel_abstracts,
'fields': [],
'relations': []
}
if self.verbose_names and appmodel._meta.verbose_name:
context['label'] = force_str(appmodel._meta.verbose_name)
else:
context['label'] = context['name']
return context
def get_bases_abstract_fields(self, c):
_abstract_fields = []
for e in c.__bases__:
if hasattr(e, '_meta') and e._meta.abstract:
_abstract_fields.extend(e._meta.fields)
_abstract_fields.extend(self.get_bases_abstract_fields(e))
return _abstract_fields
def get_inheritance_context(self, appmodel, parent):
label = "multi-table"
if parent._meta.abstract:
label = "abstract"
if appmodel._meta.proxy:
label = "proxy"
label += r"\ninheritance"
if self.hide_edge_labels:
label = ''
return {
'target_app': parent.__module__.replace(".", "_"),
'target': parent.__name__,
'type': "inheritance",
'name': "inheritance",
'label': label,
'arrows': '[arrowhead=empty, arrowtail=none, dir=both]',
'needs_node': True,
}
def get_models(self, app):
appmodels = list(app.get_models())
return appmodels
def get_relation_context(self, target_model, field, label, extras):
return {
'target_app': target_model.__module__.replace('.', '_'),
'target': target_model.__name__,
'type': type(field).__name__,
'name': field.name,
'label': label,
'arrows': extras,
'needs_node': True
}
def process_attributes(self, field, model, pk, abstract_fields):
newmodel = model.copy()
if self.skip_field(field) or pk and field == pk:
return newmodel
newmodel['fields'].append(self.add_attributes(field, abstract_fields))
return newmodel
def process_apps(self):
for app_label in self.app_labels:
app = apps.get_app_config(app_label)
if not app:
continue
app_graph = self.get_app_context(app)
app_models = self.get_models(app)
abstract_models = self.get_abstract_models(app_models)
app_models = abstract_models + app_models
for appmodel in app_models:
if not self.use_model(appmodel._meta.object_name):
continue
appmodel_abstracts = self.get_appmodel_abstracts(appmodel)
abstract_fields = self.get_bases_abstract_fields(appmodel)
model = self.get_appmodel_context(appmodel, appmodel_abstracts)
attributes = self.get_appmodel_attributes(appmodel)
pk = appmodel._meta.pk
if pk and not appmodel._meta.abstract and pk in attributes:
model['fields'].append(self.add_attributes(pk, abstract_fields))
for field in attributes:
model = self.process_attributes(field, model, pk, abstract_fields)
if self.sort_fields:
model = self.sort_model_fields(model)
for field in appmodel._meta.local_fields:
model = self.process_local_fields(field, model, abstract_fields)
for field in appmodel._meta.local_many_to_many:
model = self.process_local_many_to_many(field, model)
if self.inheritance:
for parent in appmodel.__bases__:
model = self.process_parent(parent, appmodel, model)
app_graph['models'].append(model)
if app_graph['models']:
self.graphs.append(app_graph)
def process_local_fields(self, field, model, abstract_fields):
newmodel = model.copy()
if field.attname.endswith('_ptr_id') or field in abstract_fields or self.skip_field(field):
return newmodel
if isinstance(field, OneToOneField):
relation = self.add_relation(
field, newmodel, '[arrowhead=none, arrowtail=none, dir=both]'
)
elif isinstance(field, ForeignKey):
relation = self.add_relation(
field,
newmodel,
'[arrowhead=none, arrowtail={}, dir=both]'.format(
self.arrow_shape
),
)
else:
relation = None
if relation is not None:
newmodel['relations'].append(relation)
return newmodel
def process_local_many_to_many(self, field, model):
newmodel = model.copy()
if self.skip_field(field):
return newmodel
relation = None
if isinstance(field, ManyToManyField):
if hasattr(field.remote_field.through, '_meta') and field.remote_field.through._meta.auto_created:
relation = self.add_relation(
field,
newmodel,
'[arrowhead={} arrowtail={}, dir=both]'.format(
self.arrow_shape, self.arrow_shape
),
)
elif isinstance(field, GenericRelation):
relation = self.add_relation(field, newmodel, mark_safe('[style="dotted", arrowhead=normal, arrowtail=normal, dir=both]'))
if relation is not None:
newmodel['relations'].append(relation)
return newmodel
def process_parent(self, parent, appmodel, model):
newmodel = model.copy()
if hasattr(parent, "_meta"):
_rel = self.get_inheritance_context(appmodel, parent)
if _rel not in newmodel['relations'] and self.use_model(_rel['target']):
newmodel['relations'].append(_rel)
return newmodel
def sort_model_fields(self, model):
newmodel = model.copy()
newmodel['fields'] = sorted(newmodel['fields'], key=lambda field: (not field['primary_key'], not field['relation'], field['label']))
return newmodel
def use_model(self, model_name):
# Check against include list.
if self.include_models:
for model_pattern in self.include_models:
model_pattern = '^%s$' % model_pattern.replace('*', '.*')
if re.search(model_pattern, model_name):
return True
# Check against exclude list.
if self.exclude_models:
for model_pattern in self.exclude_models:
model_pattern = '^%s$' % model_pattern.replace('*', '.*')
if re.search(model_pattern, model_name):
return False
# Return `True` if `include_models` is falsey, otherwise return `False`.
return not self.include_models
def skip_field(self, field):
if self.exclude_columns:
if self.verbose_names and field.verbose_name:
if field.verbose_name in self.exclude_columns:
return True
if field.name in self.exclude_columns:
return True
return False
def generate_dot(graph_data, template='django_extensions/graph_models/digraph.dot'):
if isinstance(template, six.string_types):
template = loader.get_template(template)
if not isinstance(template, Template) and not (hasattr(template, 'template') and isinstance(template.template, Template)):
raise Exception("Default Django template loader isn't used. "
"This can lead to the incorrect template rendering. "
"Please, check the settings.")
c = Context(graph_data).flatten()
dot = template.render(c)
return dot
def generate_graph_data(*args, **kwargs):
generator = ModelGraph(*args, **kwargs)
generator.generate_graph_data()
return generator.get_graph_data()
def use_model(model, include_models, exclude_models):
generator = ModelGraph([], include_models=include_models, exclude_models=exclude_models)
return generator.use_model(model)
| true
| true
|
1c43fdcc16345073c0458921b47d255ef287bd2e
| 103
|
py
|
Python
|
edag/cli/__init__.py
|
sodre/edag-cli
|
f1f88fd749b3e8a94c93afa6ae78e8cb5fc84436
|
[
"BSD-3-Clause"
] | null | null | null |
edag/cli/__init__.py
|
sodre/edag-cli
|
f1f88fd749b3e8a94c93afa6ae78e8cb5fc84436
|
[
"BSD-3-Clause"
] | 4
|
2019-12-13T05:35:15.000Z
|
2019-12-30T21:07:14.000Z
|
edag/cli/__init__.py
|
sodre/edag-cli
|
f1f88fd749b3e8a94c93afa6ae78e8cb5fc84436
|
[
"BSD-3-Clause"
] | null | null | null |
"""Top-level package for ElasticDAG CLI."""
from ._version import version as __version__ # noqa: F401
| 34.333333
| 58
| 0.747573
|
from ._version import version as __version__
| true
| true
|
1c43fe5c5576e93119229d732edb22ae2f787b24
| 8,554
|
py
|
Python
|
applications/systems_of_equations_ex2/script/exodus_data_extraction.py
|
ElsevierSoftwareX/SOFTX_2019_102
|
123c4b3988ef2fb86b49a247b8431dc94a89eded
|
[
"MIT"
] | null | null | null |
applications/systems_of_equations_ex2/script/exodus_data_extraction.py
|
ElsevierSoftwareX/SOFTX_2019_102
|
123c4b3988ef2fb86b49a247b8431dc94a89eded
|
[
"MIT"
] | null | null | null |
applications/systems_of_equations_ex2/script/exodus_data_extraction.py
|
ElsevierSoftwareX/SOFTX_2019_102
|
123c4b3988ef2fb86b49a247b8431dc94a89eded
|
[
"MIT"
] | null | null | null |
import sys, os
#### import the simple module from the paraview
from paraview.simple import *
if __name__ == "__main__" and len(sys.argv) > 1:
time_step = int(sys.argv[1])
#### disable automatic camera reset on 'Show'
paraview.simple._DisableFirstRenderCameraReset()
# create a new 'ExodusIIReader'
oute = ExodusIIReader(FileName=['./out.e'])
timestep_values = oute.TimestepValues
oute.PointVariables = []
oute.SideSetArrayStatus = []
# get animation scene
animationScene1 = GetAnimationScene()
# update animation scene based on data timesteps
animationScene1.UpdateAnimationUsingDataTimeSteps()
# Properties modified on oute
oute.PointVariables = ['vel_', 'p']
oute.ElementBlocks = ['Unnamed block ID: 0 Type: QUAD9']
# get active view
renderView1 = GetActiveViewOrCreate('RenderView')
renderView1.ViewTime = timestep_values[time_step - 1]
# uncomment following to set a specific view size
# renderView1.ViewSize = [1611, 832]
# show data in view
outeDisplay = Show(oute, renderView1)
# trace defaults for the display properties.
outeDisplay.ColorArrayName = [None, '']
outeDisplay.OSPRayScaleArray = 'GlobalNodeId'
outeDisplay.OSPRayScaleFunction = 'PiecewiseFunction'
outeDisplay.SelectOrientationVectors = 'GlobalNodeId'
outeDisplay.ScaleFactor = 0.1
outeDisplay.SelectScaleArray = 'GlobalNodeId'
outeDisplay.GlyphType = 'Arrow'
outeDisplay.ScalarOpacityUnitDistance = 0.19193831036664846
outeDisplay.GaussianRadius = 0.05
outeDisplay.SetScaleArray = ['POINTS', 'GlobalNodeId']
outeDisplay.ScaleTransferFunction = 'PiecewiseFunction'
outeDisplay.OpacityArray = ['POINTS', 'GlobalNodeId']
outeDisplay.OpacityTransferFunction = 'PiecewiseFunction'
# reset view to fit data
renderView1.ResetCamera()
#changing interaction mode based on data extents
renderView1.InteractionMode = '2D'
renderView1.CameraPosition = [0.5, 0.5, 10000.0]
renderView1.CameraFocalPoint = [0.5, 0.5, 0.0]
renderView1.CameraViewUp = [0.0, 1.0, 0.0]
# set scalar coloring
ColorBy(outeDisplay, ('FIELD', 'vtkBlockColors'))
# show color bar/color legend
outeDisplay.SetScalarBarVisibility(renderView1, True)
# get color transfer function/color map for 'vtkBlockColors'
vtkBlockColorsLUT = GetColorTransferFunction('vtkBlockColors')
vtkBlockColorsLUT.InterpretValuesAsCategories = 1
vtkBlockColorsLUT.Annotations = ['0', '0', '1', '1', '2', '2', '3', '3', '4', '4', '5', '5', '6', '6', '7', '7', '8', '8', '9', '9', '10', '10', '11', '11']
vtkBlockColorsLUT.ActiveAnnotatedValues = ['0']
vtkBlockColorsLUT.IndexedColors = [1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.63, 0.63, 1.0, 0.67, 0.5, 0.33, 1.0, 0.5, 0.75, 0.53, 0.35, 0.7, 1.0, 0.75, 0.5]
# get opacity transfer function/opacity map for 'vtkBlockColors'
vtkBlockColorsPWF = GetOpacityTransferFunction('vtkBlockColors')
# set scalar coloring
ColorBy(outeDisplay, ('POINTS', 'vel_'))
# Hide the scalar bar for this color map if no visible data is colored by it.
HideScalarBarIfNotNeeded(vtkBlockColorsLUT, renderView1)
# rescale color and/or opacity maps used to include current data range
outeDisplay.RescaleTransferFunctionToDataRange(True, False)
# show color bar/color legend
outeDisplay.SetScalarBarVisibility(renderView1, True)
# get color transfer function/color map for 'vel_'
vel_LUT = GetColorTransferFunction('vel_')
vel_LUT.RGBPoints = [0.0, 0.231373, 0.298039, 0.752941, 0.0, 0.865003, 0.865003, 0.865003, 0.0, 0.705882, 0.0156863, 0.14902]
vel_LUT.ScalarRangeInitialized = 1.0
# get opacity transfer function/opacity map for 'vel_'
vel_PWF = GetOpacityTransferFunction('vel_')
vel_PWF.Points = [0.0, 0.0, 0.5, 0.0, 0.0, 1.0, 0.5, 0.0]
vel_PWF.ScalarRangeInitialized = 1
# create a new 'Plot Over Line'
plotOverLine1 = PlotOverLine(Input=oute,
Source='High Resolution Line Source')
# init the 'High Resolution Line Source' selected for 'Source'
plotOverLine1.Source.Point2 = [1.0, 1.0, 0.0]
# Properties modified on plotOverLine1.Source
plotOverLine1.Source.Point1 = [0.5, 0.0, 0.0]
plotOverLine1.Source.Point2 = [0.5, 1.0, 0.0]
# Properties modified on plotOverLine1
plotOverLine1.Tolerance = 2.22044604925031e-16
# Properties modified on plotOverLine1.Source
plotOverLine1.Source.Point1 = [0.5, 0.0, 0.0]
plotOverLine1.Source.Point2 = [0.5, 1.0, 0.0]
# Create a new 'Line Chart View'
lineChartView1 = CreateView('XYChartView')
lineChartView1.ViewSize = [801, 832]
# get layout
layout1 = GetLayout()
# place view in the layout
layout1.AssignView(2, lineChartView1)
# show data in view
plotOverLine1Display = Show(plotOverLine1, lineChartView1)
# trace defaults for the display properties.
plotOverLine1Display.CompositeDataSetIndex = [0]
plotOverLine1Display.UseIndexForXAxis = 0
plotOverLine1Display.XArrayName = 'arc_length'
plotOverLine1Display.SeriesVisibility = ['p', 'vel__Magnitude']
plotOverLine1Display.SeriesLabel = ['arc_length', 'arc_length', 'ObjectId', 'ObjectId', 'p', 'p', 'vel__X', 'vel__X', 'vel__Y', 'vel__Y', 'vel__Z', 'vel__Z', 'vel__Magnitude', 'vel__Magnitude', 'vtkValidPointMask', 'vtkValidPointMask', 'Points_X', 'Points_X', 'Points_Y', 'Points_Y', 'Points_Z', 'Points_Z', 'Points_Magnitude', 'Points_Magnitude']
plotOverLine1Display.SeriesColor = ['arc_length', '0', '0', '0', 'ObjectId', '0.89', '0.1', '0.11', 'p', '0.22', '0.49', '0.72', 'vel__X', '0.3', '0.69', '0.29', 'vel__Y', '0.6', '0.31', '0.64', 'vel__Z', '1', '0.5', '0', 'vel__Magnitude', '0.65', '0.34', '0.16', 'vtkValidPointMask', '0', '0', '0', 'Points_X', '0.89', '0.1', '0.11', 'Points_Y', '0.22', '0.49', '0.72', 'Points_Z', '0.3', '0.69', '0.29', 'Points_Magnitude', '0.6', '0.31', '0.64']
plotOverLine1Display.SeriesPlotCorner = ['arc_length', '0', 'ObjectId', '0', 'p', '0', 'vel__X', '0', 'vel__Y', '0', 'vel__Z', '0', 'vel__Magnitude', '0', 'vtkValidPointMask', '0', 'Points_X', '0', 'Points_Y', '0', 'Points_Z', '0', 'Points_Magnitude', '0']
plotOverLine1Display.SeriesLineStyle = ['arc_length', '1', 'ObjectId', '1', 'p', '1', 'vel__X', '1', 'vel__Y', '1', 'vel__Z', '1', 'vel__Magnitude', '1', 'vtkValidPointMask', '1', 'Points_X', '1', 'Points_Y', '1', 'Points_Z', '1', 'Points_Magnitude', '1']
plotOverLine1Display.SeriesLineThickness = ['arc_length', '2', 'ObjectId', '2', 'p', '2', 'vel__X', '2', 'vel__Y', '2', 'vel__Z', '2', 'vel__Magnitude', '2', 'vtkValidPointMask', '2', 'Points_X', '2', 'Points_Y', '2', 'Points_Z', '2', 'Points_Magnitude', '2']
plotOverLine1Display.SeriesMarkerStyle = ['arc_length', '0', 'ObjectId', '0', 'p', '0', 'vel__X', '0', 'vel__Y', '0', 'vel__Z', '0', 'vel__Magnitude', '0', 'vtkValidPointMask', '0', 'Points_X', '0', 'Points_Y', '0', 'Points_Z', '0', 'Points_Magnitude', '0']
plotOverLine1Display.SeriesLabelPrefix = ''
# destroy lineChartView1
Delete(lineChartView1)
del lineChartView1
# close an empty frame
layout1.Collapse(2)
# set active view
SetActiveView(renderView1)
writer = CreateWriter("./rde/" + str(time_step) + "/original_data_from_extractor.csv", plotOverLine1)
writer.FieldAssociation = "Points" # or "Cells"
writer.UpdatePipeline()
# clean original extracted raw data from exodus file
with open("./rde/" + str(time_step) + "/original_data_from_extractor.csv", "r") as input_file, open("./rde/" + str(time_step) + "/extractor_" + str(time_step) + ".data", "w+") as output_file:
header = True
for line in input_file:
if(header):
output_file.write("filename;timestep;time;u;v;w;p;x;y;z")
header = False
else:
line = line.replace(",",";").replace("\n","")
splitted_line = line.split(";")
output_file.write("\n" + ";".join([
"\"" + os.getcwd() + "/rde/" + str(time_step) + "/extractor_" + str(time_step) + ".data\"", str(time_step), str(timestep_values[time_step - 1]),
splitted_line[0], splitted_line[1], splitted_line[2],
splitted_line[3], splitted_line[7], splitted_line[8], splitted_line[9]]))
output_file.flush()
output_file.close()
input_file.close()
| 49.445087
| 452
| 0.655483
|
import sys, os
lues
oute.PointVariables = []
oute.SideSetArrayStatus = []
animationScene1 = GetAnimationScene()
animationScene1.UpdateAnimationUsingDataTimeSteps()
oute.PointVariables = ['vel_', 'p']
oute.ElementBlocks = ['Unnamed block ID: 0 Type: QUAD9']
renderView1 = GetActiveViewOrCreate('RenderView')
renderView1.ViewTime = timestep_values[time_step - 1]
outeDisplay = Show(oute, renderView1)
outeDisplay.ColorArrayName = [None, '']
outeDisplay.OSPRayScaleArray = 'GlobalNodeId'
outeDisplay.OSPRayScaleFunction = 'PiecewiseFunction'
outeDisplay.SelectOrientationVectors = 'GlobalNodeId'
outeDisplay.ScaleFactor = 0.1
outeDisplay.SelectScaleArray = 'GlobalNodeId'
outeDisplay.GlyphType = 'Arrow'
outeDisplay.ScalarOpacityUnitDistance = 0.19193831036664846
outeDisplay.GaussianRadius = 0.05
outeDisplay.SetScaleArray = ['POINTS', 'GlobalNodeId']
outeDisplay.ScaleTransferFunction = 'PiecewiseFunction'
outeDisplay.OpacityArray = ['POINTS', 'GlobalNodeId']
outeDisplay.OpacityTransferFunction = 'PiecewiseFunction'
renderView1.ResetCamera()
renderView1.InteractionMode = '2D'
renderView1.CameraPosition = [0.5, 0.5, 10000.0]
renderView1.CameraFocalPoint = [0.5, 0.5, 0.0]
renderView1.CameraViewUp = [0.0, 1.0, 0.0]
ColorBy(outeDisplay, ('FIELD', 'vtkBlockColors'))
outeDisplay.SetScalarBarVisibility(renderView1, True)
vtkBlockColorsLUT = GetColorTransferFunction('vtkBlockColors')
vtkBlockColorsLUT.InterpretValuesAsCategories = 1
vtkBlockColorsLUT.Annotations = ['0', '0', '1', '1', '2', '2', '3', '3', '4', '4', '5', '5', '6', '6', '7', '7', '8', '8', '9', '9', '10', '10', '11', '11']
vtkBlockColorsLUT.ActiveAnnotatedValues = ['0']
vtkBlockColorsLUT.IndexedColors = [1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.63, 0.63, 1.0, 0.67, 0.5, 0.33, 1.0, 0.5, 0.75, 0.53, 0.35, 0.7, 1.0, 0.75, 0.5]
vtkBlockColorsPWF = GetOpacityTransferFunction('vtkBlockColors')
ColorBy(outeDisplay, ('POINTS', 'vel_'))
HideScalarBarIfNotNeeded(vtkBlockColorsLUT, renderView1)
outeDisplay.RescaleTransferFunctionToDataRange(True, False)
outeDisplay.SetScalarBarVisibility(renderView1, True)
vel_LUT = GetColorTransferFunction('vel_')
vel_LUT.RGBPoints = [0.0, 0.231373, 0.298039, 0.752941, 0.0, 0.865003, 0.865003, 0.865003, 0.0, 0.705882, 0.0156863, 0.14902]
vel_LUT.ScalarRangeInitialized = 1.0
vel_PWF = GetOpacityTransferFunction('vel_')
vel_PWF.Points = [0.0, 0.0, 0.5, 0.0, 0.0, 1.0, 0.5, 0.0]
vel_PWF.ScalarRangeInitialized = 1
plotOverLine1 = PlotOverLine(Input=oute,
Source='High Resolution Line Source')
plotOverLine1.Source.Point2 = [1.0, 1.0, 0.0]
plotOverLine1.Source.Point1 = [0.5, 0.0, 0.0]
plotOverLine1.Source.Point2 = [0.5, 1.0, 0.0]
plotOverLine1.Tolerance = 2.22044604925031e-16
plotOverLine1.Source.Point1 = [0.5, 0.0, 0.0]
plotOverLine1.Source.Point2 = [0.5, 1.0, 0.0]
lineChartView1 = CreateView('XYChartView')
lineChartView1.ViewSize = [801, 832]
layout1 = GetLayout()
layout1.AssignView(2, lineChartView1)
plotOverLine1Display = Show(plotOverLine1, lineChartView1)
plotOverLine1Display.CompositeDataSetIndex = [0]
plotOverLine1Display.UseIndexForXAxis = 0
plotOverLine1Display.XArrayName = 'arc_length'
plotOverLine1Display.SeriesVisibility = ['p', 'vel__Magnitude']
plotOverLine1Display.SeriesLabel = ['arc_length', 'arc_length', 'ObjectId', 'ObjectId', 'p', 'p', 'vel__X', 'vel__X', 'vel__Y', 'vel__Y', 'vel__Z', 'vel__Z', 'vel__Magnitude', 'vel__Magnitude', 'vtkValidPointMask', 'vtkValidPointMask', 'Points_X', 'Points_X', 'Points_Y', 'Points_Y', 'Points_Z', 'Points_Z', 'Points_Magnitude', 'Points_Magnitude']
plotOverLine1Display.SeriesColor = ['arc_length', '0', '0', '0', 'ObjectId', '0.89', '0.1', '0.11', 'p', '0.22', '0.49', '0.72', 'vel__X', '0.3', '0.69', '0.29', 'vel__Y', '0.6', '0.31', '0.64', 'vel__Z', '1', '0.5', '0', 'vel__Magnitude', '0.65', '0.34', '0.16', 'vtkValidPointMask', '0', '0', '0', 'Points_X', '0.89', '0.1', '0.11', 'Points_Y', '0.22', '0.49', '0.72', 'Points_Z', '0.3', '0.69', '0.29', 'Points_Magnitude', '0.6', '0.31', '0.64']
plotOverLine1Display.SeriesPlotCorner = ['arc_length', '0', 'ObjectId', '0', 'p', '0', 'vel__X', '0', 'vel__Y', '0', 'vel__Z', '0', 'vel__Magnitude', '0', 'vtkValidPointMask', '0', 'Points_X', '0', 'Points_Y', '0', 'Points_Z', '0', 'Points_Magnitude', '0']
plotOverLine1Display.SeriesLineStyle = ['arc_length', '1', 'ObjectId', '1', 'p', '1', 'vel__X', '1', 'vel__Y', '1', 'vel__Z', '1', 'vel__Magnitude', '1', 'vtkValidPointMask', '1', 'Points_X', '1', 'Points_Y', '1', 'Points_Z', '1', 'Points_Magnitude', '1']
plotOverLine1Display.SeriesLineThickness = ['arc_length', '2', 'ObjectId', '2', 'p', '2', 'vel__X', '2', 'vel__Y', '2', 'vel__Z', '2', 'vel__Magnitude', '2', 'vtkValidPointMask', '2', 'Points_X', '2', 'Points_Y', '2', 'Points_Z', '2', 'Points_Magnitude', '2']
plotOverLine1Display.SeriesMarkerStyle = ['arc_length', '0', 'ObjectId', '0', 'p', '0', 'vel__X', '0', 'vel__Y', '0', 'vel__Z', '0', 'vel__Magnitude', '0', 'vtkValidPointMask', '0', 'Points_X', '0', 'Points_Y', '0', 'Points_Z', '0', 'Points_Magnitude', '0']
plotOverLine1Display.SeriesLabelPrefix = ''
Delete(lineChartView1)
del lineChartView1
layout1.Collapse(2)
SetActiveView(renderView1)
writer = CreateWriter("./rde/" + str(time_step) + "/original_data_from_extractor.csv", plotOverLine1)
writer.FieldAssociation = "Points"
writer.UpdatePipeline()
with open("./rde/" + str(time_step) + "/original_data_from_extractor.csv", "r") as input_file, open("./rde/" + str(time_step) + "/extractor_" + str(time_step) + ".data", "w+") as output_file:
header = True
for line in input_file:
if(header):
output_file.write("filename;timestep;time;u;v;w;p;x;y;z")
header = False
else:
line = line.replace(",",";").replace("\n","")
splitted_line = line.split(";")
output_file.write("\n" + ";".join([
"\"" + os.getcwd() + "/rde/" + str(time_step) + "/extractor_" + str(time_step) + ".data\"", str(time_step), str(timestep_values[time_step - 1]),
splitted_line[0], splitted_line[1], splitted_line[2],
splitted_line[3], splitted_line[7], splitted_line[8], splitted_line[9]]))
output_file.flush()
output_file.close()
input_file.close()
| true
| true
|
1c43fefda8a6cb0284260eadeb99ad911c49bee5
| 3,177
|
py
|
Python
|
gala/potential/potential/builtin/pybuiltin.py
|
akeemlh/gala
|
0fdaf9159bccc59af2a3525f2926e04501754f48
|
[
"MIT"
] | null | null | null |
gala/potential/potential/builtin/pybuiltin.py
|
akeemlh/gala
|
0fdaf9159bccc59af2a3525f2926e04501754f48
|
[
"MIT"
] | null | null | null |
gala/potential/potential/builtin/pybuiltin.py
|
akeemlh/gala
|
0fdaf9159bccc59af2a3525f2926e04501754f48
|
[
"MIT"
] | null | null | null |
# Third-party
import numpy as np
from gala.potential.potential.core import PotentialBase
from gala.potential.potential.util import sympy_wrap
from gala.potential.common import PotentialParameter
__all__ = ["HarmonicOscillatorPotential"]
class HarmonicOscillatorPotential(PotentialBase):
r"""
Represents an N-dimensional harmonic oscillator.
.. math::
\Phi = \frac{1}{2}\omega^2 x^2
Parameters
----------
omega : numeric
Frequency.
units : iterable(optional)
Unique list of non-reducable units that specify (at minimum) the
length, mass, time, and angle units.
"""
omega = PotentialParameter('omega', physical_type='frequency')
def _setup_potential(self, parameters, origin=None, R=None, units=None):
parameters['omega'] = np.atleast_1d(parameters['omega'])
super()._setup_potential(parameters, origin=origin, R=R, units=units)
self.ndim = len(self.parameters['omega'])
def _energy(self, q, t=0.):
om = np.atleast_1d(self.parameters['omega'].value)
return np.sum(0.5 * om[None]**2 * q**2, axis=1)
def _gradient(self, q, t=0.):
om = np.atleast_1d(self.parameters['omega'].value)
return om[None]**2 * q
def _hessian(self, q, t=0.):
om = np.atleast_1d(self.parameters['omega'].value)
return np.tile(np.diag(om)[:, :, None], reps=(1, 1, q.shape[0]))
@classmethod
@sympy_wrap(var='x')
def to_sympy(cls, v, p):
expr = 1/2 * p['omega']**2 * v['x']**2
return expr, v, p
def action_angle(self, w):
"""
Transform the input cartesian position and velocity to action-angle
coordinates the Harmonic Oscillator potential. This transformation
is analytic and can be used as a "toy potential" in the
Sanders & Binney 2014 formalism for computing action-angle coordinates
in _any_ potential.
Adapted from Jason Sanders' code
`genfunc <https://github.com/jlsanders/genfunc>`_.
Parameters
----------
w : :class:`gala.dynamics.PhaseSpacePosition`, :class:`gala.dynamics.Orbit`
The positions or orbit to compute the actions, angles, and frequencies at.
"""
from gala.dynamics.actionangle import harmonic_oscillator_to_aa
return harmonic_oscillator_to_aa(w, self)
# def phase_space(self, actions, angles):
# """
# Transform the input action-angle coordinates to cartesian position and velocity
# assuming a Harmonic Oscillator potential. This transformation
# is analytic and can be used as a "toy potential" in the
# Sanders & Binney 2014 formalism for computing action-angle coordinates
# in _any_ potential.
# Adapted from Jason Sanders' code
# `genfunc <https://github.com/jlsanders/genfunc>`_.
# Parameters
# ----------
# x : array_like
# Positions.
# v : array_like
# Velocities.
# """
# from gala.dynamics.actionangle import harmonic_oscillator_aa_to_xv
# return harmonic_oscillator_aa_to_xv(actions, angles, self)
| 34.912088
| 89
| 0.639597
|
import numpy as np
from gala.potential.potential.core import PotentialBase
from gala.potential.potential.util import sympy_wrap
from gala.potential.common import PotentialParameter
__all__ = ["HarmonicOscillatorPotential"]
class HarmonicOscillatorPotential(PotentialBase):
omega = PotentialParameter('omega', physical_type='frequency')
def _setup_potential(self, parameters, origin=None, R=None, units=None):
parameters['omega'] = np.atleast_1d(parameters['omega'])
super()._setup_potential(parameters, origin=origin, R=R, units=units)
self.ndim = len(self.parameters['omega'])
def _energy(self, q, t=0.):
om = np.atleast_1d(self.parameters['omega'].value)
return np.sum(0.5 * om[None]**2 * q**2, axis=1)
def _gradient(self, q, t=0.):
om = np.atleast_1d(self.parameters['omega'].value)
return om[None]**2 * q
def _hessian(self, q, t=0.):
om = np.atleast_1d(self.parameters['omega'].value)
return np.tile(np.diag(om)[:, :, None], reps=(1, 1, q.shape[0]))
@classmethod
@sympy_wrap(var='x')
def to_sympy(cls, v, p):
expr = 1/2 * p['omega']**2 * v['x']**2
return expr, v, p
def action_angle(self, w):
from gala.dynamics.actionangle import harmonic_oscillator_to_aa
return harmonic_oscillator_to_aa(w, self)
# Transform the input action-angle coordinates to cartesian position and velocity
# assuming a Harmonic Oscillator potential. This transformation
# is analytic and can be used as a "toy potential" in the
# Sanders & Binney 2014 formalism for computing action-angle coordinates
# in _any_ potential.
# Adapted from Jason Sanders' code
# `genfunc <https://github.com/jlsanders/genfunc>`_.
# Parameters
# ----------
# x : array_like
# Positions.
# v : array_like
# Velocities.
# """
# from gala.dynamics.actionangle import harmonic_oscillator_aa_to_xv
# return harmonic_oscillator_aa_to_xv(actions, angles, self)
| true
| true
|
1c43ff06f66ece3c7d95b1983fde0993f787cb7e
| 2,428
|
py
|
Python
|
swaps/utils/channels.py
|
DunnCreativeSS/cash_carry_leveraged_futures_arbitrageur
|
1120ebfb487ce4987fe70e6645b36e0d7ce041ec
|
[
"Apache-2.0"
] | 1
|
2021-09-06T00:09:11.000Z
|
2021-09-06T00:09:11.000Z
|
swaps/utils/channels.py
|
DunnCreativeSS/cash_carry_leveraged_futures_arbitrageur
|
1120ebfb487ce4987fe70e6645b36e0d7ce041ec
|
[
"Apache-2.0"
] | null | null | null |
swaps/utils/channels.py
|
DunnCreativeSS/cash_carry_leveraged_futures_arbitrageur
|
1120ebfb487ce4987fe70e6645b36e0d7ce041ec
|
[
"Apache-2.0"
] | null | null | null |
import json
from swaps.utils.time_service import get_current_timestamp
from swaps.constant import DepthStep
def kline_channel(symbol, interval):
channel = dict()
channel["sub"] = "market." + symbol + ".kline." + interval
channel["id"] = str(get_current_timestamp())
return json.dumps(channel)
def trade_detail_channel(symbol):
channel = dict()
channel["sub"] = "market." + symbol + ".trade.detail"
channel["id"] = str(get_current_timestamp())
return json.dumps(channel)
def price_depth_channel(symbol, step_type=DepthStep.STEP0):
channel = dict()
channel["sub"] = "market." + symbol + ".depth." + step_type
channel["id"] = str(get_current_timestamp())
return json.dumps(channel)
def price_depth_bbo_channel(symbol):
channel = dict()
channel["sub"] = "market." + symbol + ".bbo"
channel["id"] = str(get_current_timestamp())
return json.dumps(channel)
def orders_update_channel(symbol):
channel = dict()
channel["action"] = "sub"
channel["ch"] = "orders#{symbol}".format(symbol=symbol)
return json.dumps(channel)
def market_detail_channel(symbol):
channel = dict()
channel["sub"] = "market." + symbol + ".detail"
channel["id"] = str(get_current_timestamp())
return json.dumps(channel)
def accounts_update_channel(mode=0):
channel = dict()
channel["action"] = "sub"
if mode is None:
channel["ch"] = "accounts.update"
else:
channel["ch"] = "accounts.update#{mode}".format(mode=mode)
return json.dumps(channel)
def mbp_increase_channel(symbol, levels):
channel = dict()
channel["sub"] = "market.{symbol}.mbp.{levels}".format(symbol=symbol, levels=levels)
channel["id"] = str(get_current_timestamp())
return json.dumps(channel)
def mbp_full_channel(symbol, levels):
channel = dict()
channel["sub"] = "market.{symbol}.mbp.refresh.{levels}".format(symbol=symbol, levels=levels)
channel["id"] = str(get_current_timestamp())
return json.dumps(channel)
def request_mbp_channel(symbol, levels):
channel = dict()
channel["req"] = "market.{symbol}.mbp.{levels}".format(symbol=symbol, levels=levels)
channel["id"] = str(get_current_timestamp())
return json.dumps(channel)
def trade_clearing_channel(symbol="*"):
channel = dict()
channel["action"] = "sub"
channel["ch"] = "trade.clearing#" + symbol
return json.dumps(channel)
| 28.904762
| 96
| 0.670511
|
import json
from swaps.utils.time_service import get_current_timestamp
from swaps.constant import DepthStep
def kline_channel(symbol, interval):
channel = dict()
channel["sub"] = "market." + symbol + ".kline." + interval
channel["id"] = str(get_current_timestamp())
return json.dumps(channel)
def trade_detail_channel(symbol):
channel = dict()
channel["sub"] = "market." + symbol + ".trade.detail"
channel["id"] = str(get_current_timestamp())
return json.dumps(channel)
def price_depth_channel(symbol, step_type=DepthStep.STEP0):
channel = dict()
channel["sub"] = "market." + symbol + ".depth." + step_type
channel["id"] = str(get_current_timestamp())
return json.dumps(channel)
def price_depth_bbo_channel(symbol):
channel = dict()
channel["sub"] = "market." + symbol + ".bbo"
channel["id"] = str(get_current_timestamp())
return json.dumps(channel)
def orders_update_channel(symbol):
channel = dict()
channel["action"] = "sub"
channel["ch"] = "orders#{symbol}".format(symbol=symbol)
return json.dumps(channel)
def market_detail_channel(symbol):
channel = dict()
channel["sub"] = "market." + symbol + ".detail"
channel["id"] = str(get_current_timestamp())
return json.dumps(channel)
def accounts_update_channel(mode=0):
channel = dict()
channel["action"] = "sub"
if mode is None:
channel["ch"] = "accounts.update"
else:
channel["ch"] = "accounts.update#{mode}".format(mode=mode)
return json.dumps(channel)
def mbp_increase_channel(symbol, levels):
channel = dict()
channel["sub"] = "market.{symbol}.mbp.{levels}".format(symbol=symbol, levels=levels)
channel["id"] = str(get_current_timestamp())
return json.dumps(channel)
def mbp_full_channel(symbol, levels):
channel = dict()
channel["sub"] = "market.{symbol}.mbp.refresh.{levels}".format(symbol=symbol, levels=levels)
channel["id"] = str(get_current_timestamp())
return json.dumps(channel)
def request_mbp_channel(symbol, levels):
channel = dict()
channel["req"] = "market.{symbol}.mbp.{levels}".format(symbol=symbol, levels=levels)
channel["id"] = str(get_current_timestamp())
return json.dumps(channel)
def trade_clearing_channel(symbol="*"):
channel = dict()
channel["action"] = "sub"
channel["ch"] = "trade.clearing#" + symbol
return json.dumps(channel)
| true
| true
|
1c43ff8b50f9f4dccea00f66a1b714b913f672b2
| 4,157
|
py
|
Python
|
speech_activity_detection/sad.py
|
hlt-bme-hu/hunspeech
|
b8599e232ed2daa6ff6e07b92c6dca003b8c4bde
|
[
"MIT"
] | 17
|
2017-03-05T03:19:37.000Z
|
2020-07-28T03:05:55.000Z
|
speech_activity_detection/sad.py
|
hlt-bme-hu/hunspeech
|
b8599e232ed2daa6ff6e07b92c6dca003b8c4bde
|
[
"MIT"
] | 7
|
2016-07-05T08:40:15.000Z
|
2016-07-28T10:07:38.000Z
|
speech_activity_detection/sad.py
|
hlt-bme-hu/hunspeech
|
b8599e232ed2daa6ff6e07b92c6dca003b8c4bde
|
[
"MIT"
] | 6
|
2017-05-10T12:27:35.000Z
|
2018-09-14T20:13:43.000Z
|
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright © 2016 Judit Acs <judit@sch.bme.hu>
#
# Distributed under terms of the GPL license.
from argparse import ArgumentParser
import os
import subprocess
class EMSpeechActicityDetection:
"""Speech activity detection and segmentation
This class is a wrapper for the SHOUT toolkit's SAD module.
Since SHOUT expects the input to be raw audio, it is first converted
into the correct raw format by sox (Sound eXchange),
then shout_segment is called.
SHOUT outputs a single segmentation file, which is saved to
segments.txt by default.
Each segment is labeled as SPEECH, SIL (silence) or SOUND.
EMSpeechActicityDetection supports two additional saving solutions:
1. segment the input according to SHOUT's segmentation into
individual audio files.
2. group segments by labels and concatenate them into a single file.
This produces at most three files: one containing all speech, one
containing all silence and another one containing all sound.
"""
def __init__(self, filename, model=None, segment_out='segments.txt',
segment_dir=None, shout_path=os.environ.get('SHOUT_DIR')):
self.filename = filename
if model is None:
self.model = os.path.join(os.environ.get('SHOUT_DIR'),
'models', 'shout.sad')
else:
self.model = model
self.segment_out = segment_out
self.binary_path = '{}/shout_segment'.format(shout_path)
def segment(self):
self.raw_filename = EMSpeechActicityDetection.convert_to_raw(
self.filename)
cmd = '{0} -a {1} --am-segment {2} -mo {3}'.format(
self.binary_path,
self.raw_filename,
self.model,
self.segment_out,
)
subprocess.call(cmd, shell=True)
@staticmethod
def convert_to_raw(filename):
""" accepts mp3, wav and raw files """
EMSpeechActicityDetection.__check_audio_file(filename)
basename, ext = os.path.splitext(filename)
if ext == '.mp3':
EMSpeechActicityDetection.convert_mp3_to_wav(filename)
fn = EMSpeechActicityDetection.convert_wav_to_raw('{0}.wav'.format(
basename))
return fn
@staticmethod
def convert_mp3_to_wav(filename):
basename, ext = os.path.splitext(filename)
out_fn = '{}.wav'.format(basename)
subprocess.call('sox {0} {1}'.format(filename, out_fn), shell=True)
return out_fn
@staticmethod
def convert_wav_to_raw(filename):
basename, ext = os.path.splitext(filename)
out_fn = '{}.raw'.format(basename)
params = '-r 16k -b 16 -L -c 1'
subprocess.call('sox {0} {1} {2}'.format(params, filename, out_fn),
shell=True)
return out_fn
@staticmethod
def __check_audio_file(filename):
if not os.path.exists(filename):
raise Exception('Source file does not exist: {}'.format(
filename))
ext = os.path.splitext(filename)[-1]
if ext not in ('.raw', '.mp3', '.wav'):
raise ValueError('Cannot handle [{0}] files'.format(
ext))
def parse_args():
p = ArgumentParser()
p.add_argument('-i', '--input', type=str,
help='Input file. Use this option if you want to segment'
' a single file'
)
p.add_argument('-m', '--model', type=str,
help='SHOUT acoustic model',
default='{}/shout_am.sad'.format(
os.environ.get('SHOUT_DIR')),
)
p.add_argument('-o', '--segment-out', type=str,
help='Write segments to file',
default='segments.txt'
)
return p.parse_args()
def main():
args = parse_args()
sad = EMSpeechActicityDetection(filename=args.input, model=args.model,
segment_out=args.segment_out)
sad.segment()
if __name__ == '__main__':
main()
| 35.836207
| 76
| 0.600674
|
from argparse import ArgumentParser
import os
import subprocess
class EMSpeechActicityDetection:
def __init__(self, filename, model=None, segment_out='segments.txt',
segment_dir=None, shout_path=os.environ.get('SHOUT_DIR')):
self.filename = filename
if model is None:
self.model = os.path.join(os.environ.get('SHOUT_DIR'),
'models', 'shout.sad')
else:
self.model = model
self.segment_out = segment_out
self.binary_path = '{}/shout_segment'.format(shout_path)
def segment(self):
self.raw_filename = EMSpeechActicityDetection.convert_to_raw(
self.filename)
cmd = '{0} -a {1} --am-segment {2} -mo {3}'.format(
self.binary_path,
self.raw_filename,
self.model,
self.segment_out,
)
subprocess.call(cmd, shell=True)
@staticmethod
def convert_to_raw(filename):
EMSpeechActicityDetection.__check_audio_file(filename)
basename, ext = os.path.splitext(filename)
if ext == '.mp3':
EMSpeechActicityDetection.convert_mp3_to_wav(filename)
fn = EMSpeechActicityDetection.convert_wav_to_raw('{0}.wav'.format(
basename))
return fn
@staticmethod
def convert_mp3_to_wav(filename):
basename, ext = os.path.splitext(filename)
out_fn = '{}.wav'.format(basename)
subprocess.call('sox {0} {1}'.format(filename, out_fn), shell=True)
return out_fn
@staticmethod
def convert_wav_to_raw(filename):
basename, ext = os.path.splitext(filename)
out_fn = '{}.raw'.format(basename)
params = '-r 16k -b 16 -L -c 1'
subprocess.call('sox {0} {1} {2}'.format(params, filename, out_fn),
shell=True)
return out_fn
@staticmethod
def __check_audio_file(filename):
if not os.path.exists(filename):
raise Exception('Source file does not exist: {}'.format(
filename))
ext = os.path.splitext(filename)[-1]
if ext not in ('.raw', '.mp3', '.wav'):
raise ValueError('Cannot handle [{0}] files'.format(
ext))
def parse_args():
p = ArgumentParser()
p.add_argument('-i', '--input', type=str,
help='Input file. Use this option if you want to segment'
' a single file'
)
p.add_argument('-m', '--model', type=str,
help='SHOUT acoustic model',
default='{}/shout_am.sad'.format(
os.environ.get('SHOUT_DIR')),
)
p.add_argument('-o', '--segment-out', type=str,
help='Write segments to file',
default='segments.txt'
)
return p.parse_args()
def main():
args = parse_args()
sad = EMSpeechActicityDetection(filename=args.input, model=args.model,
segment_out=args.segment_out)
sad.segment()
if __name__ == '__main__':
main()
| true
| true
|
1c4400536ce84830b6a1ec7c250cf1e8cccf83e5
| 3,961
|
py
|
Python
|
tensorflow_probability/python/mcmc/internal/leapfrog_integrator_test.py
|
NeelGhoshal/probability
|
45ed841e3cff6cdc7cd1b2d96dd874d9070318f7
|
[
"Apache-2.0"
] | 2
|
2019-10-30T04:45:07.000Z
|
2019-10-30T04:45:08.000Z
|
tensorflow_probability/python/mcmc/internal/leapfrog_integrator_test.py
|
gregorystrubel/probability
|
df96f3d56eff92c6b06fbac68dc58e095e28fed6
|
[
"Apache-2.0"
] | null | null | null |
tensorflow_probability/python/mcmc/internal/leapfrog_integrator_test.py
|
gregorystrubel/probability
|
df96f3d56eff92c6b06fbac68dc58e095e28fed6
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for `leapfrog_integrator.py`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow.compat.v1 as tf1
import tensorflow.compat.v2 as tf
from tensorflow_probability.python.internal import test_util
from tensorflow_probability.python.mcmc.internal import leapfrog_integrator as leapfrog_impl
@test_util.test_all_tf_execution_regimes
class LeapfrogIntegratorTest(test_util.TestCase):
def setUp(self):
self._shape_param = 5.
self._rate_param = 10.
def assertAllFinite(self, x):
self.assertAllEqual(np.ones_like(x).astype(bool), np.isfinite(x))
def _log_gamma_log_prob(self, x, event_dims=()):
"""Computes log-pdf of a log-gamma random variable.
Args:
x: Value of the random variable.
event_dims: Dimensions not to treat as independent.
Returns:
log_prob: The log-pdf up to a normalizing constant.
"""
return tf.reduce_sum(
self._shape_param * x - self._rate_param * tf.exp(x),
axis=event_dims)
def _integrator_conserves_energy(self, x, independent_chain_ndims, seed):
event_dims = tf.range(independent_chain_ndims, tf.rank(x))
target_fn = lambda x: self._log_gamma_log_prob(x, event_dims)
m = tf.random.normal(tf.shape(x), seed=seed)
log_prob_0 = target_fn(x)
old_energy = -log_prob_0 + 0.5 * tf.reduce_sum(m**2., axis=event_dims)
event_size = np.prod(
self.evaluate(x).shape[independent_chain_ndims:])
integrator = leapfrog_impl.SimpleLeapfrogIntegrator(
target_fn,
step_sizes=[0.09 / event_size],
num_steps=1000)
[[new_m], [_], log_prob_1, [_]] = integrator([m], [x])
new_energy = -log_prob_1 + 0.5 * tf.reduce_sum(new_m**2., axis=event_dims)
old_energy_, new_energy_ = self.evaluate([old_energy, new_energy])
tf1.logging.vlog(
1, 'average energy relative change: {}'.format(
(1. - new_energy_ / old_energy_).mean()))
self.assertAllClose(old_energy_, new_energy_, atol=0., rtol=0.02)
def _integrator_conserves_energy_wrapper(self, independent_chain_ndims):
"""Tests the long-term energy conservation of the leapfrog integrator.
The leapfrog integrator is symplectic, so for sufficiently small step
sizes it should be possible to run it more or less indefinitely without
the energy of the system blowing up or collapsing.
Args:
independent_chain_ndims: Python `int` scalar representing the number of
dims associated with independent chains.
"""
seed_stream = test_util.test_seed_stream()
x = self.evaluate(0.1 * tf.random.normal(
shape=(50, 10, 2), seed=seed_stream()))
x = tf.constant(x)
self._integrator_conserves_energy(
x, independent_chain_ndims, seed=seed_stream())
def testIntegratorEnergyConservationNullShape(self):
self._integrator_conserves_energy_wrapper(0)
def testIntegratorEnergyConservation1(self):
self._integrator_conserves_energy_wrapper(1)
def testIntegratorEnergyConservation2(self):
self._integrator_conserves_energy_wrapper(2)
def testIntegratorEnergyConservation3(self):
self._integrator_conserves_energy_wrapper(3)
if __name__ == '__main__':
test_util.main()
| 35.053097
| 92
| 0.721283
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow.compat.v1 as tf1
import tensorflow.compat.v2 as tf
from tensorflow_probability.python.internal import test_util
from tensorflow_probability.python.mcmc.internal import leapfrog_integrator as leapfrog_impl
@test_util.test_all_tf_execution_regimes
class LeapfrogIntegratorTest(test_util.TestCase):
def setUp(self):
self._shape_param = 5.
self._rate_param = 10.
def assertAllFinite(self, x):
self.assertAllEqual(np.ones_like(x).astype(bool), np.isfinite(x))
def _log_gamma_log_prob(self, x, event_dims=()):
return tf.reduce_sum(
self._shape_param * x - self._rate_param * tf.exp(x),
axis=event_dims)
def _integrator_conserves_energy(self, x, independent_chain_ndims, seed):
event_dims = tf.range(independent_chain_ndims, tf.rank(x))
target_fn = lambda x: self._log_gamma_log_prob(x, event_dims)
m = tf.random.normal(tf.shape(x), seed=seed)
log_prob_0 = target_fn(x)
old_energy = -log_prob_0 + 0.5 * tf.reduce_sum(m**2., axis=event_dims)
event_size = np.prod(
self.evaluate(x).shape[independent_chain_ndims:])
integrator = leapfrog_impl.SimpleLeapfrogIntegrator(
target_fn,
step_sizes=[0.09 / event_size],
num_steps=1000)
[[new_m], [_], log_prob_1, [_]] = integrator([m], [x])
new_energy = -log_prob_1 + 0.5 * tf.reduce_sum(new_m**2., axis=event_dims)
old_energy_, new_energy_ = self.evaluate([old_energy, new_energy])
tf1.logging.vlog(
1, 'average energy relative change: {}'.format(
(1. - new_energy_ / old_energy_).mean()))
self.assertAllClose(old_energy_, new_energy_, atol=0., rtol=0.02)
def _integrator_conserves_energy_wrapper(self, independent_chain_ndims):
seed_stream = test_util.test_seed_stream()
x = self.evaluate(0.1 * tf.random.normal(
shape=(50, 10, 2), seed=seed_stream()))
x = tf.constant(x)
self._integrator_conserves_energy(
x, independent_chain_ndims, seed=seed_stream())
def testIntegratorEnergyConservationNullShape(self):
self._integrator_conserves_energy_wrapper(0)
def testIntegratorEnergyConservation1(self):
self._integrator_conserves_energy_wrapper(1)
def testIntegratorEnergyConservation2(self):
self._integrator_conserves_energy_wrapper(2)
def testIntegratorEnergyConservation3(self):
self._integrator_conserves_energy_wrapper(3)
if __name__ == '__main__':
test_util.main()
| true
| true
|
1c4401781e653e88d9e6d6f9fbced6b590f8d769
| 243
|
py
|
Python
|
example/envless_mode/app.py
|
jhesketh/dynaconf
|
a8038b87763ae8e790ff7e745b9335f997d5bd16
|
[
"MIT"
] | 1
|
2021-07-21T17:06:16.000Z
|
2021-07-21T17:06:16.000Z
|
example/envless_mode/app.py
|
jhesketh/dynaconf
|
a8038b87763ae8e790ff7e745b9335f997d5bd16
|
[
"MIT"
] | null | null | null |
example/envless_mode/app.py
|
jhesketh/dynaconf
|
a8038b87763ae8e790ff7e745b9335f997d5bd16
|
[
"MIT"
] | null | null | null |
import os
from dynaconf import LazySettings
settings = LazySettings(ENVLESS_MODE=True)
assert settings.FOO == "bar"
assert settings.HELLO == "world"
assert settings.DATABASES.default.port == 8080
assert settings.LAZY == os.environ["HOME"]
| 20.25
| 46
| 0.769547
|
import os
from dynaconf import LazySettings
settings = LazySettings(ENVLESS_MODE=True)
assert settings.FOO == "bar"
assert settings.HELLO == "world"
assert settings.DATABASES.default.port == 8080
assert settings.LAZY == os.environ["HOME"]
| true
| true
|
1c4403bd35f001ff67a9f8496dba9393ab34b2fe
| 5,177
|
py
|
Python
|
pineboolib/kugar/mreportobject.py
|
Miguel-J/pineboo-buscar
|
41a2f3ee0425d163619b78f32544c4b4661d5fa7
|
[
"MIT"
] | null | null | null |
pineboolib/kugar/mreportobject.py
|
Miguel-J/pineboo-buscar
|
41a2f3ee0425d163619b78f32544c4b4661d5fa7
|
[
"MIT"
] | null | null | null |
pineboolib/kugar/mreportobject.py
|
Miguel-J/pineboo-buscar
|
41a2f3ee0425d163619b78f32544c4b4661d5fa7
|
[
"MIT"
] | null | null | null |
from enum import Enum
from PyQt5 import QtGui
from PyQt5.QtCore import Qt
from PyQt5.Qt import QObject
from pineboolib import decorators
from pineboolib.flcontrols import ProjectClass
from pineboolib.fllegacy.FLStylePainter import FLStylePainter
class MReportObject(ProjectClass, QObject):
class BorderStyle(Enum):
NoPen = 0
SolidLine = 1
DashLine = 2
DotLine = 3
DashDotLine = 4
DashDotDotLine = 5
class ReportObjectType(Enum):
Invalid = 0
Label = 1
Field = 2
Calc = 3
Special = 4
@decorators.BetaImplementation
def __init__(self, *args):
if len(args) and isinstance(args[0], MReportObject):
self.copy(args[0])
else:
super(MReportObject, self).__init__()
self.xpos_ = 0
self.ypos_ = 0
self.width_ = 40
self.height_ = 23
self.backgroundColor_.setRgb(255, 255, 255)
self.foregroundColor_.setRgb(0, 0, 0)
self.borderColor_.setRgb(0, 0, 0)
self.borderWidth_ = 1
self.borderStyle_ = self.BorderStyle.SolidLine
self.sectionIndex_ = -1
self.transparent = False
self.objectId = 0
@decorators.NotImplementedWarn
# def operator=(self, mro): #FIXME
def operator(self, mro):
return self
@decorators.BetaImplementation
def draw(self, p):
self.drawBase(p)
return 0
@decorators.BetaImplementation
def drawBase(self, p):
if p.drawRect(self):
return
restore = False
if p.errCode() == FLStylePainter.IdNotFound:
p.painter().save(self.name())
p.applyTransforms()
p.painter().translate(self.xpos_, self.ypos_)
restore = True
if self.borderStyle_ != self.BorderStyle.NoPen or self.transparent_:
if self.transparent_:
p.painter().setBrush(Qt.NoBrush)
else:
p.painter().setBrush(self.backgroundColor_)
if self.borderStyle_ != 0:
p.painter().setPen(QtGui.QPen(
self.borderColor_, self.borderWidth_, self.borderStyle_)
)
else:
p.painter().setPen(Qt.NoPen)
p.painter().drawRect(0, 0, self.width_, self.height_)
else:
p.painter().fillRect(
0, 0,
self.width_, self.height_,
self.backgroundColor_
)
if restore:
p.painter().restore()
@decorators.BetaImplementation
def setGeometry(self, x, y, w, h):
self.xpos_ = x
self.ypos_ = y
self.width_ = w
self.height_ = h
@decorators.BetaImplementation
def move(self, x, y):
self.xpos_ = x
self.ypos_ = y
@decorators.BetaImplementation
def setBackgroundColor(self, r, g, b):
self.backgroundColor_.setRgb(r, g, b)
@decorators.BetaImplementation
def setForegroundColor(self, r, g, b):
self.foregroundColor_.setRgb(r, g, b)
@decorators.BetaImplementation
def setBorderColor(self, r, g, b):
self.borderColor_.setRgb(r, g, b)
@decorators.BetaImplementation
def copy(self, mro):
self.xpos_ = mro.xpos_
self.ypos_ = mro.ypos_
self.width_ = mro.width_
self.height_ = mro.height_
self.backgroundColor_ = mro.backgroundColor_
self.foregroundColor_ = mro.foregroundColor_
self.borderColor_ = mro.borderColor_
self.borderWidth_ = mro.borderWidth_
self.borderStyle_ = mro.borderStyle_
self.sectionIndex_ = mro.sectionIndex_
self.transparent_ = mro.transparent_
self.objectId_ = mro.objectId_
@decorators.BetaImplementation
def RTTI(self):
return self.ReportObjectType.Invalid
@decorators.BetaImplementation
def getX(self):
return self.xpos_
@decorators.BetaImplementation
def getY(self):
return self.ypos_
@decorators.BetaImplementation
def getHeight(self):
return self.height_
@decorators.BetaImplementation
def getWidth(self):
return self.width_
@decorators.BetaImplementation
def getDrawAtBottom(self):
return self.drawAtBottom_
@decorators.BetaImplementation
def getSectionIndex(self):
return self.sectionIndex_
@decorators.BetaImplementation
def getObjectId(self):
return self.objectId_
@decorators.BetaImplementation
def setBorderWidth(self, width):
self.borderWidth_ = width
@decorators.BetaImplementation
def setBorderStyle(self, style):
self.borderStyle_ = style
@decorators.BetaImplementation
def setTransparent(self, t):
self.transparent_ = t
@decorators.BetaImplementation
def setDrawAtBottom(self, b):
self.drawAtBottom_ = b
@decorators.BetaImplementation
def setSectionIndex(self, idx):
self.sectionIndex_ = idx
@decorators.BetaImplementation
def setObjectId(self, id):
self.objectId_ = id
| 26.548718
| 76
| 0.615414
|
from enum import Enum
from PyQt5 import QtGui
from PyQt5.QtCore import Qt
from PyQt5.Qt import QObject
from pineboolib import decorators
from pineboolib.flcontrols import ProjectClass
from pineboolib.fllegacy.FLStylePainter import FLStylePainter
class MReportObject(ProjectClass, QObject):
class BorderStyle(Enum):
NoPen = 0
SolidLine = 1
DashLine = 2
DotLine = 3
DashDotLine = 4
DashDotDotLine = 5
class ReportObjectType(Enum):
Invalid = 0
Label = 1
Field = 2
Calc = 3
Special = 4
@decorators.BetaImplementation
def __init__(self, *args):
if len(args) and isinstance(args[0], MReportObject):
self.copy(args[0])
else:
super(MReportObject, self).__init__()
self.xpos_ = 0
self.ypos_ = 0
self.width_ = 40
self.height_ = 23
self.backgroundColor_.setRgb(255, 255, 255)
self.foregroundColor_.setRgb(0, 0, 0)
self.borderColor_.setRgb(0, 0, 0)
self.borderWidth_ = 1
self.borderStyle_ = self.BorderStyle.SolidLine
self.sectionIndex_ = -1
self.transparent = False
self.objectId = 0
@decorators.NotImplementedWarn
ef operator(self, mro):
return self
@decorators.BetaImplementation
def draw(self, p):
self.drawBase(p)
return 0
@decorators.BetaImplementation
def drawBase(self, p):
if p.drawRect(self):
return
restore = False
if p.errCode() == FLStylePainter.IdNotFound:
p.painter().save(self.name())
p.applyTransforms()
p.painter().translate(self.xpos_, self.ypos_)
restore = True
if self.borderStyle_ != self.BorderStyle.NoPen or self.transparent_:
if self.transparent_:
p.painter().setBrush(Qt.NoBrush)
else:
p.painter().setBrush(self.backgroundColor_)
if self.borderStyle_ != 0:
p.painter().setPen(QtGui.QPen(
self.borderColor_, self.borderWidth_, self.borderStyle_)
)
else:
p.painter().setPen(Qt.NoPen)
p.painter().drawRect(0, 0, self.width_, self.height_)
else:
p.painter().fillRect(
0, 0,
self.width_, self.height_,
self.backgroundColor_
)
if restore:
p.painter().restore()
@decorators.BetaImplementation
def setGeometry(self, x, y, w, h):
self.xpos_ = x
self.ypos_ = y
self.width_ = w
self.height_ = h
@decorators.BetaImplementation
def move(self, x, y):
self.xpos_ = x
self.ypos_ = y
@decorators.BetaImplementation
def setBackgroundColor(self, r, g, b):
self.backgroundColor_.setRgb(r, g, b)
@decorators.BetaImplementation
def setForegroundColor(self, r, g, b):
self.foregroundColor_.setRgb(r, g, b)
@decorators.BetaImplementation
def setBorderColor(self, r, g, b):
self.borderColor_.setRgb(r, g, b)
@decorators.BetaImplementation
def copy(self, mro):
self.xpos_ = mro.xpos_
self.ypos_ = mro.ypos_
self.width_ = mro.width_
self.height_ = mro.height_
self.backgroundColor_ = mro.backgroundColor_
self.foregroundColor_ = mro.foregroundColor_
self.borderColor_ = mro.borderColor_
self.borderWidth_ = mro.borderWidth_
self.borderStyle_ = mro.borderStyle_
self.sectionIndex_ = mro.sectionIndex_
self.transparent_ = mro.transparent_
self.objectId_ = mro.objectId_
@decorators.BetaImplementation
def RTTI(self):
return self.ReportObjectType.Invalid
@decorators.BetaImplementation
def getX(self):
return self.xpos_
@decorators.BetaImplementation
def getY(self):
return self.ypos_
@decorators.BetaImplementation
def getHeight(self):
return self.height_
@decorators.BetaImplementation
def getWidth(self):
return self.width_
@decorators.BetaImplementation
def getDrawAtBottom(self):
return self.drawAtBottom_
@decorators.BetaImplementation
def getSectionIndex(self):
return self.sectionIndex_
@decorators.BetaImplementation
def getObjectId(self):
return self.objectId_
@decorators.BetaImplementation
def setBorderWidth(self, width):
self.borderWidth_ = width
@decorators.BetaImplementation
def setBorderStyle(self, style):
self.borderStyle_ = style
@decorators.BetaImplementation
def setTransparent(self, t):
self.transparent_ = t
@decorators.BetaImplementation
def setDrawAtBottom(self, b):
self.drawAtBottom_ = b
@decorators.BetaImplementation
def setSectionIndex(self, idx):
self.sectionIndex_ = idx
@decorators.BetaImplementation
def setObjectId(self, id):
self.objectId_ = id
| true
| true
|
1c440499d9570cd84e8b5504049bea924a674c85
| 2,985
|
py
|
Python
|
deepxde/geometry/geometry_3d.py
|
mitchelldaneker/deepxde
|
62e09b62ceaab6bda2ebbd02dc30ad99c2990302
|
[
"Apache-2.0"
] | 955
|
2019-06-21T21:56:02.000Z
|
2022-03-31T03:44:45.000Z
|
deepxde/geometry/geometry_3d.py
|
mitchelldaneker/deepxde
|
62e09b62ceaab6bda2ebbd02dc30ad99c2990302
|
[
"Apache-2.0"
] | 517
|
2019-07-25T16:47:44.000Z
|
2022-03-31T17:37:58.000Z
|
deepxde/geometry/geometry_3d.py
|
mitchelldaneker/deepxde
|
62e09b62ceaab6bda2ebbd02dc30ad99c2990302
|
[
"Apache-2.0"
] | 374
|
2019-06-24T00:44:16.000Z
|
2022-03-30T08:17:36.000Z
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import numpy as np
from .geometry_2d import Rectangle
from .geometry_nd import Hypercube, Hypersphere
class Cuboid(Hypercube):
"""
Args:
xmin: Coordinate of bottom left corner.
xmax: Coordinate of top right corner.
"""
def __init__(self, xmin, xmax):
super(Cuboid, self).__init__(xmin, xmax)
dx = self.xmax - self.xmin
self.area = 2 * np.sum(dx * np.roll(dx, 2))
def random_boundary_points(self, n, random="pseudo"):
pts = []
density = n / self.area
rect = Rectangle(self.xmin[:-1], self.xmax[:-1])
for z in [self.xmin[-1], self.xmax[-1]]:
u = rect.random_points(int(np.ceil(density * rect.area)), random=random)
pts.append(np.hstack((u, np.full((len(u), 1), z))))
rect = Rectangle(self.xmin[::2], self.xmax[::2])
for y in [self.xmin[1], self.xmax[1]]:
u = rect.random_points(int(np.ceil(density * rect.area)), random=random)
pts.append(np.hstack((u[:, 0:1], np.full((len(u), 1), y), u[:, 1:])))
rect = Rectangle(self.xmin[1:], self.xmax[1:])
for x in [self.xmin[0], self.xmax[0]]:
u = rect.random_points(int(np.ceil(density * rect.area)), random=random)
pts.append(np.hstack((np.full((len(u), 1), x), u)))
pts = np.vstack(pts)
if len(pts) > n:
return pts[np.random.choice(len(pts), size=n, replace=False)]
return pts
def uniform_boundary_points(self, n):
h = (self.area / n) ** 0.5
nx, ny, nz = np.ceil((self.xmax - self.xmin) / h).astype(int) + 1
x = np.linspace(self.xmin[0], self.xmax[0], num=nx)
y = np.linspace(self.xmin[1], self.xmax[1], num=ny)
z = np.linspace(self.xmin[2], self.xmax[2], num=nz)
pts = []
for v in [self.xmin[-1], self.xmax[-1]]:
u = list(itertools.product(x, y))
pts.append(np.hstack((u, np.full((len(u), 1), v))))
if nz > 2:
for v in [self.xmin[1], self.xmax[1]]:
u = np.array(list(itertools.product(x, z[1:-1])))
pts.append(np.hstack((u[:, 0:1], np.full((len(u), 1), v), u[:, 1:])))
if ny > 2 and nz > 2:
for v in [self.xmin[0], self.xmax[0]]:
u = list(itertools.product(y[1:-1], z[1:-1]))
pts.append(np.hstack((np.full((len(u), 1), v), u)))
pts = np.vstack(pts)
if n != len(pts):
print(
"Warning: {} points required, but {} points sampled.".format(
n, len(pts)
)
)
return pts
class Sphere(Hypersphere):
"""
Args:
center: Center of the sphere.
radius: Radius of the sphere.
"""
def __init__(self, center, radius):
super(Sphere, self).__init__(center, radius)
| 35.963855
| 85
| 0.540369
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import numpy as np
from .geometry_2d import Rectangle
from .geometry_nd import Hypercube, Hypersphere
class Cuboid(Hypercube):
def __init__(self, xmin, xmax):
super(Cuboid, self).__init__(xmin, xmax)
dx = self.xmax - self.xmin
self.area = 2 * np.sum(dx * np.roll(dx, 2))
def random_boundary_points(self, n, random="pseudo"):
pts = []
density = n / self.area
rect = Rectangle(self.xmin[:-1], self.xmax[:-1])
for z in [self.xmin[-1], self.xmax[-1]]:
u = rect.random_points(int(np.ceil(density * rect.area)), random=random)
pts.append(np.hstack((u, np.full((len(u), 1), z))))
rect = Rectangle(self.xmin[::2], self.xmax[::2])
for y in [self.xmin[1], self.xmax[1]]:
u = rect.random_points(int(np.ceil(density * rect.area)), random=random)
pts.append(np.hstack((u[:, 0:1], np.full((len(u), 1), y), u[:, 1:])))
rect = Rectangle(self.xmin[1:], self.xmax[1:])
for x in [self.xmin[0], self.xmax[0]]:
u = rect.random_points(int(np.ceil(density * rect.area)), random=random)
pts.append(np.hstack((np.full((len(u), 1), x), u)))
pts = np.vstack(pts)
if len(pts) > n:
return pts[np.random.choice(len(pts), size=n, replace=False)]
return pts
def uniform_boundary_points(self, n):
h = (self.area / n) ** 0.5
nx, ny, nz = np.ceil((self.xmax - self.xmin) / h).astype(int) + 1
x = np.linspace(self.xmin[0], self.xmax[0], num=nx)
y = np.linspace(self.xmin[1], self.xmax[1], num=ny)
z = np.linspace(self.xmin[2], self.xmax[2], num=nz)
pts = []
for v in [self.xmin[-1], self.xmax[-1]]:
u = list(itertools.product(x, y))
pts.append(np.hstack((u, np.full((len(u), 1), v))))
if nz > 2:
for v in [self.xmin[1], self.xmax[1]]:
u = np.array(list(itertools.product(x, z[1:-1])))
pts.append(np.hstack((u[:, 0:1], np.full((len(u), 1), v), u[:, 1:])))
if ny > 2 and nz > 2:
for v in [self.xmin[0], self.xmax[0]]:
u = list(itertools.product(y[1:-1], z[1:-1]))
pts.append(np.hstack((np.full((len(u), 1), v), u)))
pts = np.vstack(pts)
if n != len(pts):
print(
"Warning: {} points required, but {} points sampled.".format(
n, len(pts)
)
)
return pts
class Sphere(Hypersphere):
def __init__(self, center, radius):
super(Sphere, self).__init__(center, radius)
| true
| true
|
1c44054209fde45c023c2b56668fd3ef83696358
| 5,515
|
py
|
Python
|
src_py/rlpytorch/trainer/utils.py
|
r-woo/elfai
|
2c37625e608e7720b8bd7847419d7b53e87e260a
|
[
"BSD-3-Clause"
] | 3,305
|
2018-05-02T17:41:36.000Z
|
2022-03-28T05:57:56.000Z
|
src_py/rlpytorch/trainer/utils.py
|
r-woo/elfai
|
2c37625e608e7720b8bd7847419d7b53e87e260a
|
[
"BSD-3-Clause"
] | 135
|
2018-05-02T19:25:13.000Z
|
2020-08-20T02:39:14.000Z
|
src_py/rlpytorch/trainer/utils.py
|
r-woo/elfai
|
2c37625e608e7720b8bd7847419d7b53e87e260a
|
[
"BSD-3-Clause"
] | 604
|
2018-05-02T19:38:45.000Z
|
2022-03-18T10:01:57.000Z
|
# Copyright (c) 2018-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import os
from collections import defaultdict, deque, Counter
from datetime import datetime
from elf.options import auto_import_options, PyOptionSpec
class SymLink(object):
def __init__(self, sym_prefix, latest_k=5):
self.sym_prefix = sym_prefix
self.latest_k = latest_k
self.latest_files = deque()
def feed(self, filename):
self.latest_files.appendleft(filename)
if len(self.latest_files) > self.latest_k:
self.latest_files.pop()
for k, name in enumerate(self.latest_files):
symlink_file = self.sym_prefix + str(k)
try:
if os.path.exists(symlink_file):
os.unlink(symlink_file)
os.symlink(name, symlink_file)
except BaseException:
print(
"Build symlink %s for %s failed, skipped" %
(symlink_file, name))
class ModelSaver(object):
@classmethod
def get_option_spec(cls):
spec = PyOptionSpec()
spec.addStrOption(
'record_dir',
'directory to record in',
'./record')
spec.addStrOption(
'save_prefix',
'prefix of savefiles',
'save')
spec.addStrOption(
'save_dir',
'directory for savefiles',
os.environ.get('save', './'))
spec.addStrOption(
'latest_symlink',
'name for latest model symlink',
'latest')
spec.addIntOption(
'num_games',
'number of games',
1024)
spec.addIntOption(
'batchsize',
'batch size',
128)
return spec
@auto_import_options
def __init__(self, option_map):
self.save = (self.options.num_games == self.options.batchsize)
if self.save and not os.path.exists(self.options.record_dir):
os.mkdir(self.options.record_dir)
if not os.path.exists(self.options.save_dir):
os.mkdir(self.options.save_dir)
self.symlinker = SymLink(
os.path.join(
self.options.save_dir,
self.options.latest_symlink))
def feed(self, model):
basename = self.options.save_prefix + "-%d.bin" % model.step
print("Save to " + self.options.save_dir)
filename = os.path.join(self.options.save_dir, basename)
print("Filename = " + filename)
model.save(filename)
# Create a symlink
self.symlinker.feed(basename)
class ValueStats(object):
def __init__(self, name=None):
self.name = name
self.reset()
def feed(self, v):
self.summation += v
if v > self.max_value:
self.max_value = v
self.max_idx = self.counter
if v < self.min_value:
self.min_value = v
self.min_idx = self.counter
self.counter += 1
def summary(self, info=None):
info = "" if info is None else info
name = "" if self.name is None else self.name
if self.counter > 0:
try:
return "%s%s[%d]: avg: %.5f, min: %.5f[%d], max: %.5f[%d]" % (
info, name, self.counter, self.summation / self.counter,
self.min_value, self.min_idx, self.max_value, self.max_idx
)
except BaseException:
return "%s%s[Err]:" % (info, name)
else:
return "%s%s[0]" % (info, name)
def reset(self):
self.counter = 0
self.summation = 0.0
self.max_value = -1e38
self.min_value = 1e38
self.max_idx = None
self.min_idx = None
def topk_accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0)
res.append(correct_k.mul_(100.0 / batch_size))
return res
class MultiCounter(object):
def __init__(self, verbose=False):
self.last_time = None
self.verbose = verbose
self.counts = Counter()
self.stats = defaultdict(lambda: ValueStats())
self.total_count = 0
def inc(self, key):
if self.verbose:
print("[MultiCounter]: %s" % key)
self.counts[key] += 1
self.total_count += 1
def reset(self):
for k in sorted(self.stats.keys()):
self.stats[k].reset()
self.counts = Counter()
self.total_count = 0
self.last_time = datetime.now()
def summary(self, global_counter=None):
this_time = datetime.now()
if self.last_time is not None:
print(
"[%d] Time spent = %f ms" %
(global_counter,
(this_time - self.last_time).total_seconds() * 1000))
for key, count in self.counts.items():
print("%s: %d/%d" % (key, count, self.total_count))
for k in sorted(self.stats.keys()):
v = self.stats[k]
print(v.summary(info=str(global_counter) + ":" + k))
| 30.469613
| 78
| 0.558114
|
import os
from collections import defaultdict, deque, Counter
from datetime import datetime
from elf.options import auto_import_options, PyOptionSpec
class SymLink(object):
def __init__(self, sym_prefix, latest_k=5):
self.sym_prefix = sym_prefix
self.latest_k = latest_k
self.latest_files = deque()
def feed(self, filename):
self.latest_files.appendleft(filename)
if len(self.latest_files) > self.latest_k:
self.latest_files.pop()
for k, name in enumerate(self.latest_files):
symlink_file = self.sym_prefix + str(k)
try:
if os.path.exists(symlink_file):
os.unlink(symlink_file)
os.symlink(name, symlink_file)
except BaseException:
print(
"Build symlink %s for %s failed, skipped" %
(symlink_file, name))
class ModelSaver(object):
@classmethod
def get_option_spec(cls):
spec = PyOptionSpec()
spec.addStrOption(
'record_dir',
'directory to record in',
'./record')
spec.addStrOption(
'save_prefix',
'prefix of savefiles',
'save')
spec.addStrOption(
'save_dir',
'directory for savefiles',
os.environ.get('save', './'))
spec.addStrOption(
'latest_symlink',
'name for latest model symlink',
'latest')
spec.addIntOption(
'num_games',
'number of games',
1024)
spec.addIntOption(
'batchsize',
'batch size',
128)
return spec
@auto_import_options
def __init__(self, option_map):
self.save = (self.options.num_games == self.options.batchsize)
if self.save and not os.path.exists(self.options.record_dir):
os.mkdir(self.options.record_dir)
if not os.path.exists(self.options.save_dir):
os.mkdir(self.options.save_dir)
self.symlinker = SymLink(
os.path.join(
self.options.save_dir,
self.options.latest_symlink))
def feed(self, model):
basename = self.options.save_prefix + "-%d.bin" % model.step
print("Save to " + self.options.save_dir)
filename = os.path.join(self.options.save_dir, basename)
print("Filename = " + filename)
model.save(filename)
self.symlinker.feed(basename)
class ValueStats(object):
def __init__(self, name=None):
self.name = name
self.reset()
def feed(self, v):
self.summation += v
if v > self.max_value:
self.max_value = v
self.max_idx = self.counter
if v < self.min_value:
self.min_value = v
self.min_idx = self.counter
self.counter += 1
def summary(self, info=None):
info = "" if info is None else info
name = "" if self.name is None else self.name
if self.counter > 0:
try:
return "%s%s[%d]: avg: %.5f, min: %.5f[%d], max: %.5f[%d]" % (
info, name, self.counter, self.summation / self.counter,
self.min_value, self.min_idx, self.max_value, self.max_idx
)
except BaseException:
return "%s%s[Err]:" % (info, name)
else:
return "%s%s[0]" % (info, name)
def reset(self):
self.counter = 0
self.summation = 0.0
self.max_value = -1e38
self.min_value = 1e38
self.max_idx = None
self.min_idx = None
def topk_accuracy(output, target, topk=(1,)):
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0)
res.append(correct_k.mul_(100.0 / batch_size))
return res
class MultiCounter(object):
def __init__(self, verbose=False):
self.last_time = None
self.verbose = verbose
self.counts = Counter()
self.stats = defaultdict(lambda: ValueStats())
self.total_count = 0
def inc(self, key):
if self.verbose:
print("[MultiCounter]: %s" % key)
self.counts[key] += 1
self.total_count += 1
def reset(self):
for k in sorted(self.stats.keys()):
self.stats[k].reset()
self.counts = Counter()
self.total_count = 0
self.last_time = datetime.now()
def summary(self, global_counter=None):
this_time = datetime.now()
if self.last_time is not None:
print(
"[%d] Time spent = %f ms" %
(global_counter,
(this_time - self.last_time).total_seconds() * 1000))
for key, count in self.counts.items():
print("%s: %d/%d" % (key, count, self.total_count))
for k in sorted(self.stats.keys()):
v = self.stats[k]
print(v.summary(info=str(global_counter) + ":" + k))
| true
| true
|
1c44057063242c94c41dd5976ac9aa98bd752b8e
| 956
|
py
|
Python
|
devel/test_forward_all.py
|
saidbakr/darkhttpd
|
cb548aef6ded6794b2a5bee06f40ec1ce415baad
|
[
"ISC"
] | 788
|
2021-01-23T03:58:42.000Z
|
2022-03-28T12:32:35.000Z
|
devel/test_forward_all.py
|
saidbakr/darkhttpd
|
cb548aef6ded6794b2a5bee06f40ec1ce415baad
|
[
"ISC"
] | 18
|
2021-02-15T06:31:17.000Z
|
2022-03-10T21:46:47.000Z
|
devel/test_forward_all.py
|
saidbakr/darkhttpd
|
cb548aef6ded6794b2a5bee06f40ec1ce415baad
|
[
"ISC"
] | 59
|
2021-01-23T10:10:15.000Z
|
2022-03-25T13:50:16.000Z
|
#!/usr/bin/env python3
# This is run by the "run-tests" script.
import unittest
from test import TestHelper, Conn, parse
class TestForwardAll(TestHelper):
def test_forward_root(self):
resp = self.get('/', req_hdrs={'Host': 'not-example.com'})
status, hdrs, body = parse(resp)
self.assertContains(status, "301 Moved Permanently")
expect = "http://catchall.example.com/"
self.assertEqual(hdrs["Location"], expect)
self.assertContains(body, expect)
def test_forward_relative(self):
resp = self.get('/foo/bar',
req_hdrs={'Host': 'still-not.example.com'})
status, hdrs, body = parse(resp)
self.assertContains(status, "301 Moved Permanently")
expect = "http://catchall.example.com/foo/bar"
self.assertEqual(hdrs["Location"], expect)
self.assertContains(body, expect)
if __name__ == '__main__':
unittest.main()
# vim:set ts=4 sw=4 et:
| 34.142857
| 66
| 0.643305
|
import unittest
from test import TestHelper, Conn, parse
class TestForwardAll(TestHelper):
def test_forward_root(self):
resp = self.get('/', req_hdrs={'Host': 'not-example.com'})
status, hdrs, body = parse(resp)
self.assertContains(status, "301 Moved Permanently")
expect = "http://catchall.example.com/"
self.assertEqual(hdrs["Location"], expect)
self.assertContains(body, expect)
def test_forward_relative(self):
resp = self.get('/foo/bar',
req_hdrs={'Host': 'still-not.example.com'})
status, hdrs, body = parse(resp)
self.assertContains(status, "301 Moved Permanently")
expect = "http://catchall.example.com/foo/bar"
self.assertEqual(hdrs["Location"], expect)
self.assertContains(body, expect)
if __name__ == '__main__':
unittest.main()
| true
| true
|
1c4405dd71703bf265606d16a607178206d20790
| 5,053
|
py
|
Python
|
model/flops.py
|
JACKYLUO1991/Face-skin-hair-segmentaiton-and-skin-color-evaluation
|
de2375dc0ebff03b8ac39c8a16dee427838c8ac4
|
[
"Apache-2.0"
] | 152
|
2020-01-02T01:27:50.000Z
|
2022-03-23T16:40:01.000Z
|
model/flops.py
|
JACKYLUO1991/Face-skin-hair-segmentaiton-and-skin-color-evaluation
|
de2375dc0ebff03b8ac39c8a16dee427838c8ac4
|
[
"Apache-2.0"
] | 10
|
2020-01-03T07:29:59.000Z
|
2021-12-11T10:57:30.000Z
|
model/flops.py
|
JACKYLUO1991/Face-skin-hair-segmentaiton-and-skin-color-evaluation
|
de2375dc0ebff03b8ac39c8a16dee427838c8ac4
|
[
"Apache-2.0"
] | 40
|
2020-01-03T00:41:49.000Z
|
2021-11-23T11:44:07.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2020/3/27 17:49
# @Author : JackyLUO
# @E-mail : lingluo@stumail.neu.edu.cn
# @Site :
# @File : flops.py
# @Software: PyCharm
# https://github.com/ckyrkou/Keras_FLOP_Estimator
import keras.backend as K
def get_flops(model, table=False):
if table:
print('%25s | %16s | %16s | %16s | %16s | %6s | %6s' % (
'Layer Name', 'Input Shape', 'Output Shape', 'Kernel Size', 'Filters', 'Strides', 'FLOPS'))
print('-' * 170)
t_flops = 0
t_macc = 0
for l in model.layers:
o_shape, i_shape, strides, ks, filters = ['', '', ''], ['', '', ''], [1, 1], [0, 0], [0, 0]
flops = 0
macc = 0
name = l.name
factor = 1e9
if 'InputLayer' in str(l):
i_shape = l.input.get_shape()[1:4].as_list()
o_shape = i_shape
if 'Reshape' in str(l):
i_shape = l.input.get_shape()[1:4].as_list()
o_shape = l.output.get_shape()[1:4].as_list()
if 'Add' in str(l) or 'Maximum' in str(l) or 'Concatenate' in str(l):
i_shape = l.input[0].get_shape()[1:4].as_list() + [len(l.input)]
o_shape = l.output.get_shape()[1:4].as_list()
flops = (len(l.input) - 1) * i_shape[0] * i_shape[1] * i_shape[2]
if 'Average' in str(l) and 'pool' not in str(l):
i_shape = l.input[0].get_shape()[1:4].as_list() + [len(l.input)]
o_shape = l.output.get_shape()[1:4].as_list()
flops = len(l.input) * i_shape[0] * i_shape[1] * i_shape[2]
if 'BatchNormalization' in str(l):
i_shape = l.input.get_shape()[1:4].as_list()
o_shape = l.output.get_shape()[1:4].as_list()
bflops = 1
for i in range(len(i_shape)):
bflops *= i_shape[i]
flops /= factor
if 'Activation' in str(l) or 'activation' in str(l):
i_shape = l.input.get_shape()[1:4].as_list()
o_shape = l.output.get_shape()[1:4].as_list()
bflops = 1
for i in range(len(i_shape)):
bflops *= i_shape[i]
flops /= factor
if 'pool' in str(l) and ('Global' not in str(l)):
i_shape = l.input.get_shape()[1:4].as_list()
strides = l.strides
ks = l.pool_size
flops = ((i_shape[0] / strides[0]) * (i_shape[1] / strides[1]) * (ks[0] * ks[1] * i_shape[2]))
if 'Flatten' in str(l):
i_shape = l.input.shape[1:4].as_list()
flops = 1
out_vec = 1
for i in range(len(i_shape)):
flops *= i_shape[i]
out_vec *= i_shape[i]
o_shape = flops
flops = 0
if 'Dense' in str(l):
print(l.input)
i_shape = l.input.shape[1:4].as_list()[0]
if i_shape is None:
i_shape = out_vec
o_shape = l.output.shape[1:4].as_list()
flops = 2 * (o_shape[0] * i_shape)
macc = flops / 2
if 'Padding' in str(l):
flops = 0
if 'Global' in str(l):
i_shape = l.input.get_shape()[1:4].as_list()
flops = ((i_shape[0]) * (i_shape[1]) * (i_shape[2]))
o_shape = [l.output.get_shape()[1:4].as_list(), 1, 1]
out_vec = o_shape
if 'Conv2D' in str(l) and 'DepthwiseConv2D' not in str(l) and 'SeparableConv2D' not in str(l):
strides = l.strides
ks = l.kernel_size
filters = l.filters
# if 'Conv2DTranspose' in str(l):
# i_shape = list(K.int_shape(l.input)[1:4])
# o_shape = list(K.int_shape(l.output)[1:4])
# else:
i_shape = l.input.get_shape()[1:4].as_list()
o_shape = l.output.get_shape()[1:4].as_list()
if filters is None:
filters = i_shape[2]
flops = 2 * ((filters * ks[0] * ks[1] * i_shape[2]) * (
(i_shape[0] / strides[0]) * (i_shape[1] / strides[1])))
macc = flops / 2
if 'Conv2D' in str(l) and 'DepthwiseConv2D' in str(l) and 'SeparableConv2D' not in str(l):
strides = l.strides
ks = l.kernel_size
filters = l.filters
i_shape = l.input.get_shape()[1:4].as_list()
o_shape = l.output.get_shape()[1:4].as_list()
if filters is None:
filters = i_shape[2]
flops = 2 * ((ks[0] * ks[1] * i_shape[2]) * ((i_shape[0] / strides[0]) * (
i_shape[1] / strides[1]))) / factor
macc = flops / 2
t_macc += macc
t_flops += flops
if table:
print('%25s | %16s | %16s | %16s | %16s | %6s | %5.4f' % (
name, str(i_shape), str(o_shape), str(ks), str(filters), str(strides), flops))
t_flops = t_flops / factor
print('Total FLOPS (x 10^-9): %10.8f G' % (t_flops))
print('Total MACCs: %10.8f\n' % (t_macc))
return
| 34.141892
| 106
| 0.493766
|
import keras.backend as K
def get_flops(model, table=False):
if table:
print('%25s | %16s | %16s | %16s | %16s | %6s | %6s' % (
'Layer Name', 'Input Shape', 'Output Shape', 'Kernel Size', 'Filters', 'Strides', 'FLOPS'))
print('-' * 170)
t_flops = 0
t_macc = 0
for l in model.layers:
o_shape, i_shape, strides, ks, filters = ['', '', ''], ['', '', ''], [1, 1], [0, 0], [0, 0]
flops = 0
macc = 0
name = l.name
factor = 1e9
if 'InputLayer' in str(l):
i_shape = l.input.get_shape()[1:4].as_list()
o_shape = i_shape
if 'Reshape' in str(l):
i_shape = l.input.get_shape()[1:4].as_list()
o_shape = l.output.get_shape()[1:4].as_list()
if 'Add' in str(l) or 'Maximum' in str(l) or 'Concatenate' in str(l):
i_shape = l.input[0].get_shape()[1:4].as_list() + [len(l.input)]
o_shape = l.output.get_shape()[1:4].as_list()
flops = (len(l.input) - 1) * i_shape[0] * i_shape[1] * i_shape[2]
if 'Average' in str(l) and 'pool' not in str(l):
i_shape = l.input[0].get_shape()[1:4].as_list() + [len(l.input)]
o_shape = l.output.get_shape()[1:4].as_list()
flops = len(l.input) * i_shape[0] * i_shape[1] * i_shape[2]
if 'BatchNormalization' in str(l):
i_shape = l.input.get_shape()[1:4].as_list()
o_shape = l.output.get_shape()[1:4].as_list()
bflops = 1
for i in range(len(i_shape)):
bflops *= i_shape[i]
flops /= factor
if 'Activation' in str(l) or 'activation' in str(l):
i_shape = l.input.get_shape()[1:4].as_list()
o_shape = l.output.get_shape()[1:4].as_list()
bflops = 1
for i in range(len(i_shape)):
bflops *= i_shape[i]
flops /= factor
if 'pool' in str(l) and ('Global' not in str(l)):
i_shape = l.input.get_shape()[1:4].as_list()
strides = l.strides
ks = l.pool_size
flops = ((i_shape[0] / strides[0]) * (i_shape[1] / strides[1]) * (ks[0] * ks[1] * i_shape[2]))
if 'Flatten' in str(l):
i_shape = l.input.shape[1:4].as_list()
flops = 1
out_vec = 1
for i in range(len(i_shape)):
flops *= i_shape[i]
out_vec *= i_shape[i]
o_shape = flops
flops = 0
if 'Dense' in str(l):
print(l.input)
i_shape = l.input.shape[1:4].as_list()[0]
if i_shape is None:
i_shape = out_vec
o_shape = l.output.shape[1:4].as_list()
flops = 2 * (o_shape[0] * i_shape)
macc = flops / 2
if 'Padding' in str(l):
flops = 0
if 'Global' in str(l):
i_shape = l.input.get_shape()[1:4].as_list()
flops = ((i_shape[0]) * (i_shape[1]) * (i_shape[2]))
o_shape = [l.output.get_shape()[1:4].as_list(), 1, 1]
out_vec = o_shape
if 'Conv2D' in str(l) and 'DepthwiseConv2D' not in str(l) and 'SeparableConv2D' not in str(l):
strides = l.strides
ks = l.kernel_size
filters = l.filters
i_shape = l.input.get_shape()[1:4].as_list()
o_shape = l.output.get_shape()[1:4].as_list()
if filters is None:
filters = i_shape[2]
flops = 2 * ((filters * ks[0] * ks[1] * i_shape[2]) * (
(i_shape[0] / strides[0]) * (i_shape[1] / strides[1])))
macc = flops / 2
if 'Conv2D' in str(l) and 'DepthwiseConv2D' in str(l) and 'SeparableConv2D' not in str(l):
strides = l.strides
ks = l.kernel_size
filters = l.filters
i_shape = l.input.get_shape()[1:4].as_list()
o_shape = l.output.get_shape()[1:4].as_list()
if filters is None:
filters = i_shape[2]
flops = 2 * ((ks[0] * ks[1] * i_shape[2]) * ((i_shape[0] / strides[0]) * (
i_shape[1] / strides[1]))) / factor
macc = flops / 2
t_macc += macc
t_flops += flops
if table:
print('%25s | %16s | %16s | %16s | %16s | %6s | %5.4f' % (
name, str(i_shape), str(o_shape), str(ks), str(filters), str(strides), flops))
t_flops = t_flops / factor
print('Total FLOPS (x 10^-9): %10.8f G' % (t_flops))
print('Total MACCs: %10.8f\n' % (t_macc))
return
| true
| true
|
1c4406d3ffd11fb02809d090a8f414c71c74c0e7
| 835
|
py
|
Python
|
tests/acceptance/test_acceptance.py
|
magmax/livedoc
|
40b7041bcb36b2a2ebbd3d5906ce5954dbc7f1ca
|
[
"Python-2.0"
] | null | null | null |
tests/acceptance/test_acceptance.py
|
magmax/livedoc
|
40b7041bcb36b2a2ebbd3d5906ce5954dbc7f1ca
|
[
"Python-2.0"
] | 2
|
2016-06-13T08:37:20.000Z
|
2021-03-22T16:56:10.000Z
|
tests/acceptance/test_acceptance.py
|
magmax/livedoc
|
40b7041bcb36b2a2ebbd3d5906ce5954dbc7f1ca
|
[
"Python-2.0"
] | null | null | null |
import os
import unittest
import tempfile
from livedoc.__main__ import main
class LivedocTest(unittest.TestCase):
def test_example1(self):
this_path = os.path.dirname(__file__)
example_path = os.path.join(
os.path.dirname(os.path.dirname(this_path)),
'examples',
'example1',
)
with tempfile.TemporaryDirectory() as tmp:
rc = main([example_path, '-o', tmp, '-vvvv'])
assert rc == 0
def test_example2(self):
this_path = os.path.dirname(__file__)
example_path = os.path.join(
os.path.dirname(os.path.dirname(this_path)),
'examples',
'example2',
)
with tempfile.TemporaryDirectory() as tmp:
rc = main([example_path, '-o', tmp, '-vvvv'])
assert rc == 2
| 28.793103
| 57
| 0.578443
|
import os
import unittest
import tempfile
from livedoc.__main__ import main
class LivedocTest(unittest.TestCase):
def test_example1(self):
this_path = os.path.dirname(__file__)
example_path = os.path.join(
os.path.dirname(os.path.dirname(this_path)),
'examples',
'example1',
)
with tempfile.TemporaryDirectory() as tmp:
rc = main([example_path, '-o', tmp, '-vvvv'])
assert rc == 0
def test_example2(self):
this_path = os.path.dirname(__file__)
example_path = os.path.join(
os.path.dirname(os.path.dirname(this_path)),
'examples',
'example2',
)
with tempfile.TemporaryDirectory() as tmp:
rc = main([example_path, '-o', tmp, '-vvvv'])
assert rc == 2
| true
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.