hexsha
stringlengths 40
40
| size
int64 1
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
239
| max_stars_repo_name
stringlengths 5
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
239
| max_issues_repo_name
stringlengths 5
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
239
| max_forks_repo_name
stringlengths 5
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.03M
| avg_line_length
float64 1
958k
| max_line_length
int64 1
1.03M
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4a016080560981f030b6fcc190529f68ab9946c0
| 181
|
py
|
Python
|
virtual/lib/python3.6/site-packages/reviews/apps.py
|
Eccie-K/Awards
|
05bedf7c8aba4168d25715197d5bf3ad3e712ff8
|
[
"MIT"
] | null | null | null |
virtual/lib/python3.6/site-packages/reviews/apps.py
|
Eccie-K/Awards
|
05bedf7c8aba4168d25715197d5bf3ad3e712ff8
|
[
"MIT"
] | 3
|
2021-03-19T03:19:31.000Z
|
2021-09-08T01:17:09.000Z
|
virtual/lib/python3.6/site-packages/reviews/apps.py
|
Eccie-K/Awards
|
05bedf7c8aba4168d25715197d5bf3ad3e712ff8
|
[
"MIT"
] | null | null | null |
from django.apps import AppConfig
from django.utils.translation import ugettext_lazy as _
class ReviewsAppConfig(AppConfig):
name = 'reviews'
verbose_name = _('Reviews')
| 20.111111
| 55
| 0.762431
|
4a01621fbc3f423181efd23a7dc2dbf2eb01822b
| 10,441
|
py
|
Python
|
test/test_seirpp.py
|
uiuc-covid19-modeling/pydemic
|
3c0af60c2ac7e0dbf722584f61c45f9a2f993521
|
[
"MIT"
] | 6
|
2020-05-29T22:52:30.000Z
|
2020-11-08T23:27:07.000Z
|
test/test_seirpp.py
|
uiuc-covid19-modeling/pydemic
|
3c0af60c2ac7e0dbf722584f61c45f9a2f993521
|
[
"MIT"
] | null | null | null |
test/test_seirpp.py
|
uiuc-covid19-modeling/pydemic
|
3c0af60c2ac7e0dbf722584f61c45f9a2f993521
|
[
"MIT"
] | 5
|
2020-06-12T01:47:18.000Z
|
2022-03-29T13:26:09.000Z
|
__copyright__ = """
Copyright (C) 2020 George N Wong
Copyright (C) 2020 Zachary J Weiner
"""
__license__ = """
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from pathlib import Path
import numpy as np
import pandas as pd
import pytest
from pydemic.models import SEIRPlusPlusSimulation
from pydemic.distributions import GammaDistribution
from pydemic import MitigationModel
# WARNING: don't set to True unless you want to change the regression test data!
overwrite = False
def test_overwrite_isnt_true(ctx_factory, grid_shape, proc_shape):
# only runs in pytest
assert not overwrite
tspan = (50, 125)
t_eval = np.linspace(70, 120, 100)
cases_call = {
"defaults": dict(
age_distribution=np.array([1.]),
total_population=1e6,
initial_cases=10,
p_critical=.9,
p_dead=.9,
p_positive=.4,
),
"no_ifr": dict(
age_distribution=np.array([.2, .3, .4, .1]),
total_population=1e6,
initial_cases=10,
ifr=None,
p_symptomatic=np.array([.1, .3, .5, .9]),
p_critical=.9,
p_dead=.9,
p_positive=np.array([.4, .5, .6, .7]),
),
"log_ifr": dict(
age_distribution=np.array([.2, .3, .4, .1]),
total_population=1e6,
initial_cases=10,
ifr=.007,
p_symptomatic=np.array([.1, .3, .5, .9]),
p_critical=.9,
p_dead=.9,
p_positive=np.array([.4, .5, .6, .7]),
),
"change_all_params": dict(
mitigation=MitigationModel(*tspan, [70, 80], [1., .4]),
age_distribution=np.array([.2, .3, .4, .1]),
total_population=1e6,
initial_cases=9,
ifr=.008,
r0=2.5,
serial_dist=GammaDistribution(4, 3.3),
seasonal_forcing_amp=.1,
peak_day=7,
incubation_dist=GammaDistribution(5.3, 4),
p_symptomatic=np.array([.2, .4, .5, .8]),
p_positive=.9 * np.array([.2, .4, .5, .8]),
hospitalized_dist=GammaDistribution(8, 4),
p_hospitalized=np.array([.4, .6, .7, .8]),
discharged_dist=GammaDistribution(7, 3),
critical_dist=GammaDistribution(4, 1),
p_critical=np.array([.3, .3, .7, .9]),
dead_dist=GammaDistribution(4, 3),
p_dead=np.array([.4, .4, .7, .9]),
recovered_dist=GammaDistribution(8, 2.5),
all_dead_dist=GammaDistribution(2., 1.5),
all_dead_multiplier=1.3,
)
}
cases_get_model_data = {
"defaults": dict(
start_day=tspan[0],
age_distribution=np.array([1.]),
total_population=1e6,
initial_cases=10,
p_critical=.9,
p_dead=.9,
p_positive=.4,
),
"no_ifr": dict(
start_day=tspan[0],
age_distribution=np.array([.2, .3, .4, .1]),
total_population=1e6,
initial_cases=10,
ifr=None,
p_symptomatic=np.array([.1, .3, .5, .9]),
p_critical=.9,
p_dead=.9,
p_positive=np.array([.4, .5, .6, .7]),
),
"log_ifr": dict(
start_day=tspan[0],
age_distribution=np.array([.2, .3, .4, .1]),
total_population=1e6,
initial_cases=10,
log_ifr=np.log(.007),
p_symptomatic=np.array([.1, .3, .5, .9]),
p_critical=.9,
p_dead=.9,
p_positive=np.array([.4, .5, .6, .7]),
),
"change_all_params": dict(
start_day=tspan[0],
mitigation_t_0=70,
mitigation_t_1=80,
mitigation_factor_0=1.,
mitigation_factor_1=.4,
age_distribution=np.array([.2, .3, .4, .1]),
total_population=1e6,
initial_cases=9,
ifr=.008,
r0=2.5,
serial_mean=4,
serial_std=3.3,
seasonal_forcing_amp=.1,
peak_day=7,
incubation_mean=5.3,
incubation_std=4,
p_symptomatic=np.array([.2, .4, .5, .8]),
p_positive=.9 * np.array([.2, .4, .5, .8]),
hospitalized_mean=8,
hospitalized_std=4,
p_hospitalized=np.array([.4, .6, .7, .8]),
discharged_mean=7,
discharged_std=3,
critical_mean=4,
critical_std=1,
p_critical=np.array([.3, .3, .7, .9]),
dead_mean=4,
dead_std=3,
p_dead=np.array([.4, .4, .7, .9]),
recovered_mean=8,
recovered_std=2.5,
all_dead_mean=2.0,
all_dead_std=1.5,
all_dead_multiplier=1.3,
)
}
change_prefactors = {
# "p_symptomatic": .04,
"p_positive": .234,
"p_hospitalized": .2523,
"p_critical": .34,
"p_dead": .12,
}
def compare_results(a, b):
diffs = {}
for col in a.columns:
err = np.abs(1 - a[col].to_numpy() / b[col].to_numpy())
max_err = np.nanmax(err)
avg_err = np.nanmean(err)
if np.isfinite([max_err, avg_err]).all():
diffs[col] = (max_err, avg_err)
else:
print(col, a[col])
return diffs
regression_path = Path(__file__).parent / "regression.h5"
@pytest.mark.parametrize("case, params", cases_call.items())
def test_seirpp_call(case, params):
def get_df(**params):
total_population = params.get("total_population")
initial_cases = params.pop("initial_cases")
age_distribution = params.get("age_distribution")
sim = SEIRPlusPlusSimulation(**params)
y0 = {}
for key in ("susceptible", "infected"):
y0[key] = np.zeros_like(age_distribution)
y0["infected"][...] = initial_cases * np.array(age_distribution)
y0["susceptible"][...] = (
total_population * np.array(age_distribution) - y0["infected"]
)
result = sim(tspan, y0)
from scipy.interpolate import interp1d
y = {}
for key, val in result.y.items():
y[key] = interp1d(result.t, val.sum(axis=-1), axis=0)(t_eval)
for key in sim.increment_keys:
if key in result.y.keys():
spline = interp1d(result.t, result.y[key].sum(axis=-1), axis=0)
y[key+"_incr"] = spline(t_eval) - spline(t_eval - 1)
_t = pd.to_datetime(t_eval, origin="2020-01-01", unit="D")
return pd.DataFrame(y, index=_t)
df = get_df(**params)
max_rtol = 1.e-8
avg_rtol = 1.e-10
if overwrite:
df.to_hdf(regression_path, "seirpp_call/"+case)
else:
for group in ("seirpp_call/", "seirpp_get_model_data/"):
true = pd.read_hdf(regression_path, group+case)
for key, (max_err, avg_err) in compare_results(true, df).items():
assert (max_err < max_rtol and avg_err < avg_rtol), \
"case %s: %s failed against %s, %s, %s" % \
(case, key, group, max_err, avg_err)
case2 = case+"_changed_prefactors"
if "ifr" in params:
params["ifr"] = None
if "log_ifr" in params:
params.pop("log_ifr")
for key, val in change_prefactors.items():
if key in params:
params[key] *= val
else:
params[key] = val
df = get_df(**params)
if overwrite:
df.to_hdf(regression_path, "seirpp_call/"+case2)
else:
for group in ("seirpp_call/", "seirpp_get_model_data/"):
true = pd.read_hdf(regression_path, group+case2)
for key, (max_err, avg_err) in compare_results(true, df).items():
assert (max_err < max_rtol and avg_err < avg_rtol), \
"case %s: %s failed against %s, %s, %s" % \
(case2, key, group, max_err, avg_err)
@pytest.mark.parametrize("case, params", cases_get_model_data.items())
def test_seirpp_get_model_data(case, params):
df = SEIRPlusPlusSimulation.get_model_data(t_eval, **params)
max_rtol = 1.e-8
avg_rtol = 1.e-10
if overwrite:
df.to_hdf(regression_path, "seirpp_get_model_data/"+case)
else:
for group in ("seirpp_call/", "seirpp_get_model_data/"):
true = pd.read_hdf(regression_path, group+case)
for key, (max_err, avg_err) in compare_results(true, df).items():
assert (max_err < max_rtol and avg_err < avg_rtol), \
"case %s: %s failed against %s, %s, %s" % \
(case, key, group, max_err, avg_err)
case2 = case+"_changed_prefactors"
if "ifr" in params:
params["ifr"] = None
if "log_ifr" in params:
params.pop("log_ifr")
check_ps = {}
for key, val in change_prefactors.items():
check_ps[key] = np.copy(params.get(key, 1))
params[key+"_prefactor"] = val
df = SEIRPlusPlusSimulation.get_model_data(t_eval, **params)
# check that p_* didn't change
for key, val in change_prefactors.items():
assert np.allclose(check_ps[key], params.get(key, 1), rtol=1.e-13, atol=0)
if overwrite:
df.to_hdf(regression_path, "seirpp_get_model_data/"+case2)
else:
for group in ("seirpp_call/", "seirpp_get_model_data/"):
true = pd.read_hdf(regression_path, group+case2)
for key, (max_err, avg_err) in compare_results(true, df).items():
assert (max_err < max_rtol and avg_err < avg_rtol), \
"case %s: %s failed against %s, %s, %s" % \
(case2, key, group, max_err, avg_err)
if __name__ == "__main__":
for case, params in cases_call.items():
test_seirpp_call(case, params)
for case, params in cases_get_model_data.items():
test_seirpp_get_model_data(case, params)
| 32.833333
| 82
| 0.598219
|
4a01628168cc535f23e030b2ac023cbc269adc8e
| 1,488
|
py
|
Python
|
tests/benchmark_incr.py
|
mgorny/python-diskcache
|
b0451e084ea403c29980f683b8f0d8c9ac2a2dea
|
[
"Apache-2.0"
] | null | null | null |
tests/benchmark_incr.py
|
mgorny/python-diskcache
|
b0451e084ea403c29980f683b8f0d8c9ac2a2dea
|
[
"Apache-2.0"
] | null | null | null |
tests/benchmark_incr.py
|
mgorny/python-diskcache
|
b0451e084ea403c29980f683b8f0d8c9ac2a2dea
|
[
"Apache-2.0"
] | null | null | null |
"""Benchmark cache.incr method.
"""
from __future__ import print_function
import json
import multiprocessing as mp
import shutil
import time
import diskcache as dc
from .utils import secs
COUNT = int(1e3)
PROCS = 8
def worker(num):
"Rapidly increment key and time operation."
time.sleep(0.1) # Let other workers start.
cache = dc.Cache('tmp')
values = []
for _ in range(COUNT):
start = time.time()
cache.incr(b'key')
end = time.time()
values.append(end - start)
with open('output-%s.json' % num, 'w') as writer:
json.dump(values, writer)
def main():
"Run workers and print percentile results."
shutil.rmtree('tmp', ignore_errors=True)
processes = [
mp.Process(target=worker, args=(num,)) for num in range(PROCS)
]
for process in processes:
process.start()
for process in processes:
process.join()
with dc.Cache('tmp') as cache:
assert cache.get(b'key') == COUNT * PROCS
for num in range(PROCS):
values = []
with open('output-%s.json' % num) as reader:
values += json.load(reader)
values.sort()
p50 = int(len(values) * 0.50) - 1
p90 = int(len(values) * 0.90) - 1
p99 = int(len(values) * 0.99) - 1
p00 = len(values) - 1
print(['{0:9s}'.format(val) for val in 'p50 p90 p99 max'.split()])
print([secs(values[pos]) for pos in [p50, p90, p99, p00]])
if __name__ == '__main__':
main()
| 21.257143
| 70
| 0.599462
|
4a01632cc25b26d22914eb270190f0cc647d5b6a
| 591
|
py
|
Python
|
src/video/dependencies.py
|
nakata5321/feecc-io-gateway
|
a7a70c3b7239142e7ee1b846916d28961020b1a9
|
[
"Apache-2.0"
] | null | null | null |
src/video/dependencies.py
|
nakata5321/feecc-io-gateway
|
a7a70c3b7239142e7ee1b846916d28961020b1a9
|
[
"Apache-2.0"
] | 2
|
2021-11-27T09:31:12.000Z
|
2022-03-23T13:15:57.000Z
|
src/video/dependencies.py
|
nakata5321/feecc-io-gateway
|
a7a70c3b7239142e7ee1b846916d28961020b1a9
|
[
"Apache-2.0"
] | 2
|
2021-12-09T13:50:51.000Z
|
2022-03-23T12:39:38.000Z
|
from fastapi import HTTPException, status
from .camera import Camera, Recording, cameras, records
def get_camera_by_number(camera_number: int) -> Camera:
"""get a camera by its number"""
if camera_number in cameras:
return cameras[camera_number]
raise HTTPException(status.HTTP_404_NOT_FOUND, f"No such camera: {camera_number}")
def get_record_by_id(record_id: str) -> Recording:
"""get a record by its uuid"""
if record_id in records:
return records[record_id]
raise HTTPException(status.HTTP_404_NOT_FOUND, f"No such recording: {record_id}")
| 29.55
| 86
| 0.732657
|
4a01636a03762be3c905969240689bb97115bb11
| 5,132
|
py
|
Python
|
arviz/plots/traceplot.py
|
corriebar/arviz
|
95f23c97d460969b043f20253da5dc81b8f97eb3
|
[
"Apache-2.0"
] | null | null | null |
arviz/plots/traceplot.py
|
corriebar/arviz
|
95f23c97d460969b043f20253da5dc81b8f97eb3
|
[
"Apache-2.0"
] | null | null | null |
arviz/plots/traceplot.py
|
corriebar/arviz
|
95f23c97d460969b043f20253da5dc81b8f97eb3
|
[
"Apache-2.0"
] | null | null | null |
"""Plot kde or histograms and values from MCMC samples."""
def plot_trace(
data,
var_names=None,
coords=None,
divergences="bottom",
figsize=None,
textsize=None,
lines=None,
compact=False,
combined=False,
legend=False,
plot_kwargs=None,
fill_kwargs=None,
rug_kwargs=None,
hist_kwargs=None,
trace_kwargs=None,
backend=None,
**kwargs
):
"""Plot distribution (histogram or kernel density estimates) and sampled values.
If `divergences` data is available in `sample_stats`, will plot the location of divergences as
dashed vertical lines.
Parameters
----------
data : obj
Any object that can be converted to an az.InferenceData object
Refer to documentation of az.convert_to_dataset for details
var_names : string, or list of strings
One or more variables to be plotted.
coords : mapping, optional
Coordinates of var_names to be plotted. Passed to `Dataset.sel`
divergences : {"bottom", "top", None, False}
Plot location of divergences on the traceplots. Options are "bottom", "top", or False-y.
figsize : figure size tuple
If None, size is (12, variables * 2)
textsize: float
Text size scaling factor for labels, titles and lines. If None it will be autoscaled based
on figsize.
lines : tuple
Tuple of (var_name, {'coord': selection}, [line, positions]) to be overplotted as
vertical lines on the density and horizontal lines on the trace.
compact : bool
Plot multidimensional variables in a single plot.
combined : bool
Flag for combining multiple chains into a single line. If False (default), chains will be
plotted separately.
legend : bool
Add a legend to the figure with the chain color code.
plot_kwargs : dict
Extra keyword arguments passed to `arviz.plot_dist`. Only affects continuous variables.
fill_kwargs : dict
Extra keyword arguments passed to `arviz.plot_dist`. Only affects continuous variables.
rug_kwargs : dict
Extra keyword arguments passed to `arviz.plot_dist`. Only affects continuous variables.
hist_kwargs : dict
Extra keyword arguments passed to `arviz.plot_dist`. Only affects discrete variables.
trace_kwargs : dict
Extra keyword arguments passed to `plt.plot`
backend : str {"matplotlib", "bokeh"}
Select backend engine.
Returns
-------
axes : matplotlib axes
Examples
--------
Plot a subset variables
.. plot::
:context: close-figs
>>> import arviz as az
>>> data = az.load_arviz_data('non_centered_eight')
>>> coords = {'school': ['Choate', 'Lawrenceville']}
>>> az.plot_trace(data, var_names=('theta_t', 'theta'), coords=coords)
Show all dimensions of multidimensional variables in the same plot
.. plot::
:context: close-figs
>>> az.plot_trace(data, compact=True)
Combine all chains into one distribution
.. plot::
:context: close-figs
>>> az.plot_trace(data, var_names=('theta_t', 'theta'), coords=coords, combined=True)
Plot reference lines against distribution and trace
.. plot::
:context: close-figs
>>> lines = (('theta_t',{'school': "Choate"}, [-1]),)
>>> az.plot_trace(data, var_names=('theta_t', 'theta'), coords=coords, lines=lines)
"""
if backend is None or backend.lower() in ("mpl", "matplotlib"):
from .backends.matplotlib.mpl_traceplot import _plot_trace_mpl
axes = _plot_trace_mpl(
data,
var_names=var_names,
coords=coords,
divergences=divergences,
figsize=figsize,
textsize=textsize,
lines=lines,
compact=compact,
combined=combined,
legend=legend,
plot_kwargs=plot_kwargs,
fill_kwargs=fill_kwargs,
rug_kwargs=rug_kwargs,
hist_kwargs=hist_kwargs,
trace_kwargs=trace_kwargs,
)
elif backend.lower() == "bokeh":
try:
import bokeh
assert bokeh.__version__ >= "1.4.0"
except (ImportError, AssertionError):
raise ImportError("'bokeh' backend needs Bokeh (1.4.0+) installed.")
from .backends.bokeh.bokeh_traceplot import _plot_trace_bokeh
axes = _plot_trace_bokeh(
data,
var_names=var_names,
coords=coords,
divergences=divergences,
figsize=figsize,
textsize=textsize,
lines=lines,
compact=compact,
combined=combined,
legend=legend,
plot_kwargs=plot_kwargs,
fill_kwargs=fill_kwargs,
rug_kwargs=rug_kwargs,
hist_kwargs=hist_kwargs,
trace_kwargs=trace_kwargs,
**kwargs,
)
else:
raise NotImplementedError(
'Backend {} not implemented. Use {{"matplotlib", "bokeh"}}'.format(backend)
)
return axes
| 32.075
| 98
| 0.620616
|
4a0164203c09d358a833d707812289e15103b367
| 6,668
|
py
|
Python
|
bindings/python/ensmallen_graph/datasets/string/listeriagrayi.py
|
caufieldjh/ensmallen_graph
|
14e98b1cdbc73193a84a913d7d4f2b2b3eb2c43a
|
[
"MIT"
] | null | null | null |
bindings/python/ensmallen_graph/datasets/string/listeriagrayi.py
|
caufieldjh/ensmallen_graph
|
14e98b1cdbc73193a84a913d7d4f2b2b3eb2c43a
|
[
"MIT"
] | null | null | null |
bindings/python/ensmallen_graph/datasets/string/listeriagrayi.py
|
caufieldjh/ensmallen_graph
|
14e98b1cdbc73193a84a913d7d4f2b2b3eb2c43a
|
[
"MIT"
] | null | null | null |
"""
This file offers the methods to automatically retrieve the graph Listeria grayi.
The graph is automatically retrieved from the STRING repository.
Report
---------------------
At the time of rendering these methods (please see datetime below), the graph
had the following characteristics:
Datetime: 2021-02-02 21:05:20.792849
The undirected graph Listeria grayi has 2610 nodes and 208456 weighted
edges, of which none are self-loops. The graph is dense as it has a density
of 0.06123 and has 14 connected components, where the component with most
nodes has 2576 nodes and the component with the least nodes has 2 nodes.
The graph median node degree is 129, the mean node degree is 159.74, and
the node degree mode is 5. The top 5 most central nodes are 525367.HMPREF0556_12225
(degree 925), 525367.HMPREF0556_11992 (degree 888), 525367.HMPREF0556_11207
(degree 862), 525367.HMPREF0556_10227 (degree 851) and 525367.HMPREF0556_10330
(degree 843).
References
---------------------
Please cite the following if you use the data:
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
Usage example
----------------------
The usage of this graph is relatively straightforward:
.. code:: python
# First import the function to retrieve the graph from the datasets
from ensmallen_graph.datasets.string import ListeriaGrayi
# Then load the graph
graph = ListeriaGrayi()
# Finally, you can do anything with it, for instance, compute its report:
print(graph)
# If you need to run a link prediction task with validation,
# you can split the graph using a connected holdout as follows:
train_graph, validation_graph = graph.connected_holdout(
# You can use an 80/20 split the holdout, for example.
train_size=0.8,
# The random state is used to reproduce the holdout.
random_state=42,
# Wether to show a loading bar.
verbose=True
)
# Remember that, if you need, you can enable the memory-time trade-offs:
train_graph.enable(
vector_sources=True,
vector_destinations=True,
vector_outbounds=True
)
# Consider using the methods made available in the Embiggen package
# to run graph embedding or link prediction tasks.
"""
from typing import Dict
from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph
from ...ensmallen_graph import EnsmallenGraph # pylint: disable=import-error
def ListeriaGrayi(
directed: bool = False,
verbose: int = 2,
cache_path: str = "graphs/string",
**additional_graph_kwargs: Dict
) -> EnsmallenGraph:
"""Return new instance of the Listeria grayi graph.
The graph is automatically retrieved from the STRING repository.
Parameters
-------------------
directed: bool = False,
Wether to load the graph as directed or undirected.
By default false.
verbose: int = 2,
Wether to show loading bars during the retrieval and building
of the graph.
cache_path: str = "graphs",
Where to store the downloaded graphs.
additional_graph_kwargs: Dict,
Additional graph kwargs.
Returns
-----------------------
Instace of Listeria grayi graph.
Report
---------------------
At the time of rendering these methods (please see datetime below), the graph
had the following characteristics:
Datetime: 2021-02-02 21:05:20.792849
The undirected graph Listeria grayi has 2610 nodes and 208456 weighted
edges, of which none are self-loops. The graph is dense as it has a density
of 0.06123 and has 14 connected components, where the component with most
nodes has 2576 nodes and the component with the least nodes has 2 nodes.
The graph median node degree is 129, the mean node degree is 159.74, and
the node degree mode is 5. The top 5 most central nodes are 525367.HMPREF0556_12225
(degree 925), 525367.HMPREF0556_11992 (degree 888), 525367.HMPREF0556_11207
(degree 862), 525367.HMPREF0556_10227 (degree 851) and 525367.HMPREF0556_10330
(degree 843).
References
---------------------
Please cite the following if you use the data:
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
Usage example
----------------------
The usage of this graph is relatively straightforward:
.. code:: python
# First import the function to retrieve the graph from the datasets
from ensmallen_graph.datasets.string import ListeriaGrayi
# Then load the graph
graph = ListeriaGrayi()
# Finally, you can do anything with it, for instance, compute its report:
print(graph)
# If you need to run a link prediction task with validation,
# you can split the graph using a connected holdout as follows:
train_graph, validation_graph = graph.connected_holdout(
# You can use an 80/20 split the holdout, for example.
train_size=0.8,
# The random state is used to reproduce the holdout.
random_state=42,
# Wether to show a loading bar.
verbose=True
)
# Remember that, if you need, you can enable the memory-time trade-offs:
train_graph.enable(
vector_sources=True,
vector_destinations=True,
vector_outbounds=True
)
# Consider using the methods made available in the Embiggen package
# to run graph embedding or link prediction tasks.
"""
return AutomaticallyRetrievedGraph(
graph_name="ListeriaGrayi",
dataset="string",
directed=directed,
verbose=verbose,
cache_path=cache_path,
additional_graph_kwargs=additional_graph_kwargs
)()
| 34.910995
| 223
| 0.702759
|
4a0164ec55faa55dfebe952254adfcf2805427af
| 119,530
|
py
|
Python
|
components/isceobj/TopsProc/runIon.py
|
vincentschut/isce2
|
1557a05b7b6a3e65abcfc32f89c982ccc9b65e3c
|
[
"ECL-2.0",
"Apache-2.0"
] | 1
|
2020-08-18T13:00:39.000Z
|
2020-08-18T13:00:39.000Z
|
components/isceobj/TopsProc/runIon.py
|
vincentschut/isce2
|
1557a05b7b6a3e65abcfc32f89c982ccc9b65e3c
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
components/isceobj/TopsProc/runIon.py
|
vincentschut/isce2
|
1557a05b7b6a3e65abcfc32f89c982ccc9b65e3c
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
#
# Author: Cunren Liang
# Copyright 2018
# California Institute of Technology
#
import os
import shutil
import datetime
import numpy as np
import numpy.matlib
import isceobj
import logging
from isceobj.Constants import SPEED_OF_LIGHT
from isceobj.TopsProc.runBurstIfg import loadVirtualArray
logger = logging.getLogger('isce.topsinsar.ion')
#should get rid of the coherence thresholds in the future
##WARNING: when using the original full-bandwidth swath xml file, should also consider burst.image.filename
class dummy(object):
pass
def setup(self):
'''
setup parameters for processing
'''
#initialize parameters for ionospheric correction
ionParam = dummy()
#The step names in the list below are exactly the function names in 'def runIon(self):'
#when adding a new step, only put its function name (in right order) in the list,
#and put the function (in right order) in 'def runIon(self):'
ionParam.allSteps = ['subband', 'rawion', 'grd2ion', 'filt_gaussian', 'ionosphere_shift', 'ion2grd', 'esd']
###################################################################
#users are supposed to change parameters of this section ONLY
#SECTION 1. PROCESSING CONTROL PARAMETERS
#1. suggested default values of the parameters
ionParam.doIon = False
ionParam.startStep = ionParam.allSteps[0]
ionParam.endStep = ionParam.allSteps[-1]
#ionospheric layer height (km)
ionParam.ionHeight = 200.0
#before filtering ionosphere, if applying polynomial fitting
#False: no fitting
#True: with fitting
ionParam.ionFit = True
#window size for filtering ionosphere
ionParam.ionFilteringWinsizeMax = 200
ionParam.ionFilteringWinsizeMin = 100
#window size for filtering azimuth shift caused by ionosphere
ionParam.ionshiftFilteringWinsizeMax = 150
ionParam.ionshiftFilteringWinsizeMin = 75
#correct phase error caused by non-zero center frequency and azimuth shift caused by ionosphere
#0: no correction
#1: use mean value of a burst
#2: use full burst
ionParam.azshiftFlag = 1
#better NOT try changing the following two parameters, since they are related
#to the filtering parameters above
#number of azimuth looks in the processing of ionosphere estimation
ionParam.numberAzimuthLooks = 50
#number of range looks in the processing of ionosphere estimation
ionParam.numberRangeLooks = 200
#number of azimuth looks of the interferogram to be unwrapped
ionParam.numberAzimuthLooks0 = 5*2
#number of range looks of the interferogram to be unwrapped
ionParam.numberRangeLooks0 = 20*2
#2. accept the above parameters from topsApp.py
ionParam.doIon = self.ION_doIon
ionParam.startStep = self.ION_startStep
ionParam.endStep = self.ION_endStep
ionParam.ionHeight = self.ION_ionHeight
ionParam.ionFit = self.ION_ionFit
ionParam.ionFilteringWinsizeMax = self.ION_ionFilteringWinsizeMax
ionParam.ionFilteringWinsizeMin = self.ION_ionFilteringWinsizeMin
ionParam.ionshiftFilteringWinsizeMax = self.ION_ionshiftFilteringWinsizeMax
ionParam.ionshiftFilteringWinsizeMin = self.ION_ionshiftFilteringWinsizeMin
ionParam.azshiftFlag = self.ION_azshiftFlag
ionParam.numberAzimuthLooks = self.ION_numberAzimuthLooks
ionParam.numberRangeLooks = self.ION_numberRangeLooks
ionParam.numberAzimuthLooks0 = self.ION_numberAzimuthLooks0
ionParam.numberRangeLooks0 = self.ION_numberRangeLooks0
#3. check parameters
#convert to m
ionParam.ionHeight *= 1000.0
#check number of looks
if not ((ionParam.numberAzimuthLooks % ionParam.numberAzimuthLooks0 == 0) and \
(1 <= ionParam.numberAzimuthLooks0 <= ionParam.numberAzimuthLooks)):
raise Exception('numberAzimuthLooks must be integer multiples of numberAzimuthLooks0')
if not ((ionParam.numberRangeLooks % ionParam.numberRangeLooks0 == 0) and \
(1 <= ionParam.numberRangeLooks0 <= ionParam.numberRangeLooks)):
raise Exception('numberRangeLooks must be integer multiples of numberRangeLooks0')
#check steps for ionospheric correction
if ionParam.startStep not in ionParam.allSteps:
print('all steps for ionospheric correction in order: {}'.format(ionParam.allSteps))
raise Exception('please specify the correct start step for ionospheric correction from above list')
if ionParam.endStep not in ionParam.allSteps:
print('all steps for ionospheric correction in order: {}'.format(ionParam.allSteps))
raise Exception('please specify the correct start step for ionospheric correction from above list')
if ionParam.allSteps.index(ionParam.startStep) > ionParam.allSteps.index(ionParam.endStep):
print('correct relationship: start step <= end step')
raise Exception('error: start step is after end step.')
###################################################################
###################################################################
#routines that require setting parameters
#def ionosphere(self, ionParam):
#def ionSwathBySwath(self, ionParam):
#def filt_gaussian(self, ionParam):
#def ionosphere_shift(self, ionParam):
#def ion2grd(self, ionParam):
#def esd(self, ionParam):
###################################################################
#SECTION 2. DIRECTORIES AND FILENAMES
#directories
ionParam.ionDirname = 'ion'
ionParam.lowerDirname = 'lower'
ionParam.upperDirname = 'upper'
ionParam.ioncalDirname = 'ion_cal'
ionParam.ionBurstDirname = 'ion_burst'
#these are same directory names as topsApp.py/TopsProc.py
#ionParam.referenceSlcProduct = 'reference'
#ionParam.secondarySlcProduct = 'secondary'
#ionParam.fineCoregDirname = 'fine_coreg'
ionParam.fineIfgDirname = 'fine_interferogram'
ionParam.mergedDirname = 'merged'
#filenames
ionParam.ionRawNoProj = 'raw_no_projection.ion'
ionParam.ionCorNoProj = 'raw_no_projection.cor'
ionParam.ionRaw = 'raw.ion'
ionParam.ionCor = 'raw.cor'
ionParam.ionFilt = 'filt.ion'
ionParam.ionShift = 'azshift.ion'
ionParam.warning = 'warning.txt'
#SECTION 3. DATA PARAMETERS
#earth's radius (m)
ionParam.earthRadius = 6371 * 1000.0
#reference range (m) for moving range center frequency to zero, center of center swath
ionParam.rgRef = 875714.0
#range bandwidth (Hz) for splitting, range processingBandwidth: [5.650000000000000e+07, 4.830000000000000e+07, 4.278991840322842e+07]
ionParam.rgBandwidthForSplit = 40.0 * 10**6
ionParam.rgBandwidthSub = ionParam.rgBandwidthForSplit / 3.0
#SECTION 4. DEFINE WAVELENGTHS AND DETERMINE IF CALCULATE IONOSPHERE WITH MERGED INTERFEROGRAM
getParamFromData = False
referenceStartingRange = np.zeros(3)
secondaryStartingRange = np.zeros(3)
swathList = self._insar.getValidSwathList(self.swaths)
for swath in swathList:
####Load secondary metadata
reference = self._insar.loadProduct( os.path.join(self._insar.referenceSlcProduct, 'IW{0}.xml'.format(swath)))
secondary = self._insar.loadProduct( os.path.join(self._insar.secondarySlcProduct, 'IW{0}.xml'.format(swath)))
####Indices w.r.t reference
minBurst, maxBurst = self._insar.commonReferenceBurstLimits(swath-1)
secondaryBurstStart, secondaryBurstEnd = self._insar.commonSecondaryBurstLimits(swath-1)
if minBurst == maxBurst:
#print('Skipping processing of swath {0}'.format(swath))
continue
else:
ii = minBurst
jj = secondaryBurstStart + ii - minBurst
masBurst = reference.bursts[ii]
slvBurst = secondary.bursts[jj]
#use the 1/3, 1/3, 1/3 scheme for splitting
ionParam.radarWavelength = masBurst.radarWavelength
ionParam.radarWavelengthLower = SPEED_OF_LIGHT / (SPEED_OF_LIGHT / ionParam.radarWavelength - ionParam.rgBandwidthForSplit / 3.0)
ionParam.radarWavelengthUpper = SPEED_OF_LIGHT / (SPEED_OF_LIGHT / ionParam.radarWavelength + ionParam.rgBandwidthForSplit / 3.0)
#use this to determine which polynomial to use to calculate a ramp when calculating ionosphere for cross A/B interferogram
ionParam.passDirection = masBurst.passDirection.lower()
referenceStartingRange[swath-1] = masBurst.startingRange
secondaryStartingRange[swath-1] = slvBurst.startingRange
getParamFromData = True
#determine if calculate ionosphere using merged interferogram
if np.sum(referenceStartingRange==secondaryStartingRange) != 3:
ionParam.calIonWithMerged = False
else:
ionParam.calIonWithMerged = True
#there is no need to process swath by swath when there is only one swath
#ionSwathBySwath only works when number of swaths >=2
if len(swathList) == 1:
ionParam.calIonWithMerged = True
#for cross Sentinel-1A/B interferogram, always not using merged interferogram
if reference.mission != secondary.mission:
ionParam.calIonWithMerged = False
#determine if remove an empirical ramp
if reference.mission == secondary.mission:
ionParam.rampRemovel = 0
else:
#estimating ionospheric phase for cross Sentinel-1A/B interferogram
#an empirical ramp will be removed from the estimated ionospheric phase
if reference.mission == 'S1A' and secondary.mission == 'S1B':
ionParam.rampRemovel = 1
else:
ionParam.rampRemovel = -1
if getParamFromData == False:
raise Exception('cannot get parameters from data')
return ionParam
def next_pow2(a):
x=2
while x < a:
x *= 2
return x
def removeHammingWindow(inputfile, outputfile, bandwidth, samplingRate, alpha, virtual=True):
'''
This function removes the range Hamming window imposed on the signal
bandwidth: range bandwidth
samplingRate: range sampling rate
alpha: alpha of the Hamming window
'''
#(length, width) = slc.shape
inImg = isceobj.createSlcImage()
inImg.load( inputfile + '.xml')
width = inImg.getWidth()
length = inImg.getLength()
if not virtual:
slc = np.memmap(inputfile, dtype=np.complex64, mode='r', shape=(length,width))
else:
slc = loadVirtualArray(inputfile + '.vrt')
#fft length
nfft = next_pow2(width)
#Hamming window length
nwin = np.int(np.around(bandwidth / samplingRate*nfft))
#make it a even number, since we are going to use even fft length
nwin = ((nwin+1)//2)*2
#the starting and ending index of window in the spectrum
start = np.int(np.around((nfft - nwin) / 2))
end = np.int(np.around(start + nwin - 1))
hammingWindow = alpha - (1.0-alpha) * np.cos(np.linspace(-np.pi, np.pi, num=nwin, endpoint=True))
hammingWindow = 1.0/np.fft.fftshift(hammingWindow)
spec = np.fft.fft(slc, n=nfft, axis=1)
spec = np.fft.fftshift(spec, axes=1)
spec[:, start:end+1] *= hammingWindow[None,:]
spec = np.fft.fftshift(spec, axes=1)
spec = np.fft.ifft(spec, n=nfft, axis=1)
slcd = spec[:, 0:width] * ((slc.real!=0) | (slc.imag!=0))
#after these fft and ifft, the values are not scaled by constant.
slcd.astype(np.complex64).tofile(outputfile)
inImg.setFilename(outputfile)
inImg.extraFilename = outputfile + '.vrt'
inImg.setAccessMode('READ')
inImg.renderHdr()
return slcd
def runCmd(cmd, silent=0):
if silent == 0:
print("{}".format(cmd))
status = os.system(cmd)
if status != 0:
raise Exception('error when running:\n{}\n'.format(cmd))
def adjustValidLineSample(reference,secondary):
reference_lastValidLine = reference.firstValidLine + reference.numValidLines - 1
reference_lastValidSample = reference.firstValidSample + reference.numValidSamples - 1
secondary_lastValidLine = secondary.firstValidLine + secondary.numValidLines - 1
secondary_lastValidSample = secondary.firstValidSample + secondary.numValidSamples - 1
igram_lastValidLine = min(reference_lastValidLine, secondary_lastValidLine)
igram_lastValidSample = min(reference_lastValidSample, secondary_lastValidSample)
reference.firstValidLine = max(reference.firstValidLine, secondary.firstValidLine)
reference.firstValidSample = max(reference.firstValidSample, secondary.firstValidSample)
reference.numValidLines = igram_lastValidLine - reference.firstValidLine + 1
reference.numValidSamples = igram_lastValidSample - reference.firstValidSample + 1
def multiply2(referencename, secondaryname, fact, rngname=None, ionname=None, infname=None, overlapBox=None, valid=True, virtual=True):
'''
This routine forms interferogram and possibly removes topographic and ionospheric phases.
all the following indexes start from 1
overlapBox[0]: first line
overlapBox[1]: last line
overlapBox[2]: first sample
overlapBox[3]: last sample
'''
#use reference image
img = isceobj.createSlcImage()
img.load(referencename + '.xml')
width = img.getWidth()
length = img.getLength()
#reference
if not virtual:
reference = np.memmap(referencename, dtype=np.complex64, mode='r', shape=(length,width))
else:
reference = loadVirtualArray(referencename + '.vrt')
#secondary
secondary = np.memmap(secondaryname, dtype=np.complex64, mode='r', shape=(length, width))
#interferogram
cJ = np.complex64(-1j)
inf = reference[overlapBox[0]-1:overlapBox[1]-1+1, overlapBox[2]-1:overlapBox[3]-1+1] \
* np.conj(secondary[overlapBox[0]-1:overlapBox[1]-1+1, overlapBox[2]-1:overlapBox[3]-1+1])
#topography
if rngname != None:
rng2 = np.memmap(rngname, dtype=np.float32, mode='r', shape=(length,width))
inf *= np.exp(cJ*fact*rng2[overlapBox[0]-1:overlapBox[1]-1+1, overlapBox[2]-1:overlapBox[3]-1+1])
#ionosphere
if ionname != None:
ion = np.memmap(ionname, dtype=np.float32, mode='r', shape=(length, width))
inf *= np.exp(cJ*ion[overlapBox[0]-1:overlapBox[1]-1+1, overlapBox[2]-1:overlapBox[3]-1+1])
if valid == True:
inf2 = inf
else:
inf2 = np.zeros((length,width), dtype=np.complex64)
inf2[overlapBox[0]-1:overlapBox[1]-1+1, overlapBox[2]-1:overlapBox[3]-1+1] = inf
#inf = reference[overlapBox[0]-1:overlapBox[1]-1+1, overlapBox[2]-1:overlapBox[3]-1+1] \
# * np.conj(secondary[overlapBox[0]-1:overlapBox[1]-1+1, overlapBox[2]-1:overlapBox[3]-1+1]) \
# * np.exp(cJ*ion[overlapBox[0]-1:overlapBox[1]-1+1, overlapBox[2]-1:overlapBox[3]-1+1]) \
# * np.exp(cJ*fact*rng2[overlapBox[0]-1:overlapBox[1]-1+1, overlapBox[2]-1:overlapBox[3]-1+1])
if infname != None:
inf2.astype(np.complex64).tofile(infname)
img = isceobj.createIntImage()
img.setFilename(infname)
img.extraFilename = infname + '.vrt'
if valid == True:
img.setWidth(overlapBox[3]-overlapBox[2]+1)
img.setLength(overlapBox[1]-overlapBox[0]+1)
else:
img.setWidth(width)
img.setLength(length)
img.setAccessMode('READ')
img.renderHdr()
return inf2
def subband(self, ionParam):
'''
generate subband images
'''
from isceobj.Sensor.TOPS import createTOPSSwathSLCProduct
from isceobj.Util.Poly2D import Poly2D
from contrib.alos2proc.alos2proc import rg_filter
from isceobj.TopsProc.runFineResamp import resampSecondary
from isceobj.TopsProc.runFineResamp import getRelativeShifts
from isceobj.TopsProc.runFineResamp import adjustValidSampleLine
from isceobj.TopsProc.runFineResamp import getValidLines
#from isceobj.TopsProc.runBurstIfg import adjustValidLineSample
print('processing subband burst interferograms')
virtual = self.useVirtualFiles
swathList = self._insar.getValidSwathList(self.swaths)
for swath in swathList:
####Load secondary metadata
reference = self._insar.loadProduct( os.path.join(self._insar.referenceSlcProduct, 'IW{0}.xml'.format(swath)))
secondary = self._insar.loadProduct( os.path.join(self._insar.secondarySlcProduct, 'IW{0}.xml'.format(swath)))
dt = secondary.bursts[0].azimuthTimeInterval
dr = secondary.bursts[0].rangePixelSize
###Directory with offsets
offdir = os.path.join(self._insar.fineOffsetsDirname, 'IW{0}'.format(swath))
####Indices w.r.t reference
minBurst, maxBurst = self._insar.commonReferenceBurstLimits(swath-1)
secondaryBurstStart, secondaryBurstEnd = self._insar.commonSecondaryBurstLimits(swath-1)
if minBurst == maxBurst:
print('Skipping processing of swath {0}'.format(swath))
continue
#create dirs
lowerDir = os.path.join(ionParam.ionDirname, ionParam.lowerDirname, ionParam.fineIfgDirname, 'IW{0}'.format(swath))
upperDir = os.path.join(ionParam.ionDirname, ionParam.upperDirname, ionParam.fineIfgDirname, 'IW{0}'.format(swath))
os.makedirs(lowerDir, exist_ok=True)
os.makedirs(upperDir, exist_ok=True)
##############################################################
#for resampling
relShifts = getRelativeShifts(reference, secondary, minBurst, maxBurst, secondaryBurstStart)
print('Shifts IW-{0}: '.format(swath), relShifts)
####Can corporate known misregistration here
apoly = Poly2D()
apoly.initPoly(rangeOrder=0,azimuthOrder=0,coeffs=[[0.]])
rpoly = Poly2D()
rpoly.initPoly(rangeOrder=0,azimuthOrder=0,coeffs=[[0.]])
misreg_az = self._insar.secondaryTimingCorrection / dt
misreg_rg = self._insar.secondaryRangeCorrection / dr
##############################################################
fineIfgLower = createTOPSSwathSLCProduct()
fineIfgLower.configure()
fineIfgUpper = createTOPSSwathSLCProduct()
fineIfgUpper.configure()
#only process common bursts
for ii in range(minBurst, maxBurst):
jj = secondaryBurstStart + ii - minBurst
masBurst = reference.bursts[ii]
slvBurst = secondary.bursts[jj]
print('processing reference burst: %02d, secondary burst: %02d, swath: %d'%(ii+1, jj+1, swath))
################################################################
#1. removing window and subband
for ms in ['reference', 'secondary']:
#setup something
if ms == 'reference':
burst = masBurst
#put the temporary file in the lower directory
tmpFilename = os.path.join(lowerDir, 'reference_dw_'+os.path.basename(burst.image.filename))
tmpFilename2 = 'reference_'+os.path.basename(burst.image.filename)
else:
burst = slvBurst
#put the temporary file in the lower directory
tmpFilename = os.path.join(lowerDir, 'secondary_dw_'+os.path.basename(burst.image.filename))
tmpFilename2 = 'secondary_'+os.path.basename(burst.image.filename)
#removing window
rangeSamplingRate = SPEED_OF_LIGHT / (2.0 * burst.rangePixelSize)
if burst.rangeWindowType == 'Hamming':
removeHammingWindow(burst.image.filename, tmpFilename, burst.rangeProcessingBandwidth, rangeSamplingRate, burst.rangeWindowCoefficient, virtual=virtual)
else:
raise Exception('Range weight window type: {} is not supported yet!'.format(burst.rangeWindowType))
#subband
rg_filter(tmpFilename,
#burst.numberOfSamples,
2,
[os.path.join(lowerDir, tmpFilename2), os.path.join(upperDir, tmpFilename2)],
[ionParam.rgBandwidthSub / rangeSamplingRate, ionParam.rgBandwidthSub / rangeSamplingRate],
[-ionParam.rgBandwidthForSplit / 3.0 / rangeSamplingRate, ionParam.rgBandwidthForSplit / 3.0 / rangeSamplingRate],
129,
512,
0.1,
0,
(burst.startingRange - ionParam.rgRef) / burst.rangePixelSize
)
#remove temporary file
os.remove(tmpFilename)
os.remove(tmpFilename+'.xml')
os.remove(tmpFilename+'.vrt')
#2. resampling and form interferogram
#resampling
try:
offset = relShifts[jj]
except:
raise Exception('Trying to access shift for secondary burst index {0}, which may not overlap with reference for swath {1}'.format(jj, swath))
####Setup initial polynomials
### If no misregs are given, these are zero
### If provided, can be used for resampling without running to geo2rdr again for fast results
rdict = {'azpoly' : apoly,
'rgpoly' : rpoly,
'rangeOff' : os.path.join(offdir, 'range_%02d.off'%(ii+1)),
'azimuthOff': os.path.join(offdir, 'azimuth_%02d.off'%(ii+1))}
###For future - should account for azimuth and range misreg here .. ignoring for now.
azCarrPoly, dpoly = secondary.estimateAzimuthCarrierPolynomials(slvBurst, offset = -1.0 * offset)
rdict['carrPoly'] = azCarrPoly
rdict['doppPoly'] = dpoly
for lu in ['lower', 'upper']:
masBurst2 = masBurst.clone()
slvBurst2 = slvBurst.clone()
slvBurstResamp2 = masBurst.clone()
if lu == 'lower':
masBurst2.radarWavelength = ionParam.radarWavelengthLower
masBurst2.rangeProcessingBandwidth = ionParam.rgBandwidthSub
masBurst2.image.filename = os.path.join(lowerDir, 'reference_'+os.path.basename(masBurst.image.filename))
slvBurst2.radarWavelength = ionParam.radarWavelengthLower
slvBurst2.rangeProcessingBandwidth = ionParam.rgBandwidthSub
slvBurst2.image.filename = os.path.join(lowerDir, 'secondary_'+os.path.basename(slvBurst.image.filename))
slvBurstResamp2.radarWavelength = ionParam.radarWavelengthLower
slvBurstResamp2.rangeProcessingBandwidth = ionParam.rgBandwidthSub
slvBurstResamp2.image.filename = os.path.join(lowerDir, 'reference_'+os.path.basename(masBurst.image.filename))
outname = os.path.join(lowerDir, 'secondary_resamp_'+os.path.basename(slvBurst.image.filename))
ifgdir = lowerDir
else:
masBurst2.radarWavelength = ionParam.radarWavelengthUpper
masBurst2.rangeProcessingBandwidth = ionParam.rgBandwidthSub
masBurst2.image.filename = os.path.join(upperDir, 'reference_'+os.path.basename(masBurst.image.filename))
slvBurst2.radarWavelength = ionParam.radarWavelengthUpper
slvBurst2.rangeProcessingBandwidth = ionParam.rgBandwidthSub
slvBurst2.image.filename = os.path.join(upperDir, 'secondary_'+os.path.basename(slvBurst.image.filename))
slvBurstResamp2.radarWavelength = ionParam.radarWavelengthUpper
slvBurstResamp2.rangeProcessingBandwidth = ionParam.rgBandwidthSub
slvBurstResamp2.image.filename = os.path.join(upperDir, 'reference_'+os.path.basename(masBurst.image.filename))
outname = os.path.join(upperDir, 'secondary_resamp_'+os.path.basename(slvBurst.image.filename))
ifgdir = upperDir
outimg = resampSecondary(masBurst2, slvBurst2, rdict, outname)
minAz, maxAz, minRg, maxRg = getValidLines(slvBurst2, rdict, outname,
misreg_az = misreg_az - offset, misreg_rng = misreg_rg)
adjustValidSampleLine(slvBurstResamp2, slvBurst2,
minAz=minAz, maxAz=maxAz,
minRng=minRg, maxRng=maxRg)
slvBurstResamp2.image.filename = outimg.filename
#forming interferogram
referencename = masBurst2.image.filename
secondaryname = slvBurstResamp2.image.filename
rngname = os.path.join(offdir, 'range_%02d.off'%(ii+1))
infname = os.path.join(ifgdir, 'burst_%02d.int'%(ii+1))
fact = 4.0 * np.pi * slvBurstResamp2.rangePixelSize / slvBurstResamp2.radarWavelength
adjustValidLineSample(masBurst2,slvBurstResamp2)
#in original runBurstIfg.py, valid samples in the interferogram are the following (indexes in the numpy matrix):
#referenceFrame.firstValidLine:referenceFrame.firstValidLine + referenceFrame.numValidLines, referenceFrame.firstValidSample:referenceFrame.firstValidSample + referenceFrame.numValidSamples
#after the following processing, valid samples in the interferogram are the following (indexes in the numpy matrix):
#[masBurst.firstValidLine:masBurst.firstValidLine + masBurst.numValidLines, masBurst.firstValidSample:masBurst.firstValidSample + masBurst.numValidSamples]
#SO THEY ARE EXACTLY THE SAME
firstline = masBurst2.firstValidLine + 1
lastline = firstline + masBurst2.numValidLines - 1
firstcolumn = masBurst2.firstValidSample + 1
lastcolumn = firstcolumn + masBurst2.numValidSamples - 1
overlapBox = [firstline, lastline, firstcolumn, lastcolumn]
multiply2(referencename, secondaryname, fact, rngname=rngname, ionname=None, infname=infname, overlapBox=overlapBox, valid=False, virtual=virtual)
#directly from multiply() of runBurstIfg.py
img = isceobj.createIntImage()
img.setFilename(infname)
img.setWidth(masBurst2.numberOfSamples)
img.setLength(masBurst2.numberOfLines)
img.setAccessMode('READ')
#img.renderHdr()
#save it for deleting later
masBurst2_filename = masBurst2.image.filename
#change it for interferogram
masBurst2.image = img
if lu == 'lower':
fineIfgLower.bursts.append(masBurst2)
else:
fineIfgUpper.bursts.append(masBurst2)
#remove reference and secondary subband slcs
os.remove(masBurst2_filename)
os.remove(masBurst2_filename+'.xml')
os.remove(masBurst2_filename+'.vrt')
os.remove(slvBurst2.image.filename)
os.remove(slvBurst2.image.filename+'.xml')
os.remove(slvBurst2.image.filename+'.vrt')
os.remove(slvBurstResamp2.image.filename)
os.remove(slvBurstResamp2.image.filename+'.xml')
os.remove(slvBurstResamp2.image.filename+'.vrt')
fineIfgLower.numberOfBursts = len(fineIfgLower.bursts)
fineIfgUpper.numberOfBursts = len(fineIfgUpper.bursts)
self._insar.saveProduct(fineIfgLower, os.path.join(ionParam.ionDirname, ionParam.lowerDirname, ionParam.fineIfgDirname, 'IW{0}.xml'.format(swath)))
self._insar.saveProduct(fineIfgUpper, os.path.join(ionParam.ionDirname, ionParam.upperDirname, ionParam.fineIfgDirname, 'IW{0}.xml'.format(swath)))
def cal_coherence(inf, win=5, edge=0):
'''
compute coherence uisng only interferogram (phase).
This routine still follows the regular equation for computing coherence,
but assumes the amplitudes of reference and secondary are one, so that coherence
can be computed using phase only.
inf: interferogram
win: window size
edge: 0: remove all non-full convolution samples
1: remove samples computed from less than half convolution
(win=5 used to illustration below)
* * *
* * *
* * *
* * *
* * *
2: remove samples computed from less than quater convolution
(win=5 used to illustration below)
* * *
* * *
* * *
3: remove non-full convolution samples on image edges
4: keep all samples
'''
import scipy.signal as ss
if win % 2 != 1:
raise Exception('window size must be odd!')
hwin = np.int(np.around((win - 1) / 2))
filt = np.ones((win, win))
amp = np.absolute(inf)
cnt = ss.convolve2d((amp!=0), filt, mode='same')
cor = ss.convolve2d(inf/(amp + (amp==0)), filt, mode='same')
cor = (amp!=0) * np.absolute(cor) / (cnt + (cnt==0))
#trim edges
if edge == 0:
num = win * win
cor[np.nonzero(cnt < num)] = 0.0
elif edge == 1:
num = win * (hwin+1)
cor[np.nonzero(cnt < num)] = 0.0
elif edge == 2:
num = (hwin+1) * (hwin+1)
cor[np.nonzero(cnt < num)] = 0.0
elif edge == 3:
cor[0:hwin, :] = 0.0
cor[-hwin:, :] = 0.0
cor[:, 0:hwin] = 0.0
cor[:, -hwin:] = 0.0
else:
pass
#print("coherence, max: {} min: {}".format(np.max(cor[np.nonzero(cor!=0)]), np.min(cor[np.nonzero(cor!=0)])))
return cor
def getMergeBox(self, xmlDirname, numberRangeLooks=1, numberAzimuthLooks=1):
'''
xmlDirname: directory containing xml file
numberRangeLooks: number of range looks to take after merging
numberAzimuthLooks: number of azimuth looks to take after merging
'''
from isceobj.TopsProc.runMergeBursts import mergeBox
from isceobj.TopsProc.runMergeBursts import adjustValidWithLooks
swathList = self._insar.getValidSwathList(self.swaths)
#get bursts
frames=[]
for swath in swathList:
minBurst, maxBurst = self._insar.commonReferenceBurstLimits(swath-1)
if minBurst==maxBurst:
#print('Skipping processing of swath {0}'.format(swath))
continue
#since burst directory does not necessarily has IW*.xml, we use the following dir
#ifg = self._insar.loadProduct( os.path.join(self._insar.fineIfgDirname, 'IW{0}.xml'.format(swath)))
#use lower
#dirname = os.path.join(ionParam.ionDirname, ionParam.lowerDirname, ionParam.fineIfgDirname)
ifg = self._insar.loadProduct( os.path.join(xmlDirname, 'IW{0}.xml'.format(swath)))
frames.append(ifg)
#determine merged size
box = mergeBox(frames)
#adjust valid with looks, 'frames' ARE CHANGED AFTER RUNNING THIS
(burstValidBox, burstValidBox2, message) = adjustValidWithLooks(frames, box, numberAzimuthLooks, numberRangeLooks, edge=0, avalid='strict', rvalid='strict')
return (box, burstValidBox, burstValidBox2, frames)
def merge(self, ionParam):
'''
merge burst interferograms and compute coherence
'''
from isceobj.TopsProc.runMergeBursts import mergeBox
from isceobj.TopsProc.runMergeBursts import adjustValidWithLooks
from isceobj.TopsProc.runMergeBursts import mergeBurstsVirtual
from isceobj.TopsProc.runMergeBursts import multilook as multilook2
#merge burst interferograms
mergeFilename = self._insar.mergedIfgname
xmlDirname = os.path.join(ionParam.ionDirname, ionParam.lowerDirname, ionParam.fineIfgDirname)
dirs = [ionParam.lowerDirname, ionParam.upperDirname]
for dirx in dirs:
mergeDirname = os.path.join(ionParam.ionDirname, dirx, ionParam.mergedDirname)
burstDirname = os.path.join(ionParam.ionDirname, dirx, ionParam.fineIfgDirname)
frames=[]
burstList = []
swathList = self._insar.getValidSwathList(self.swaths)
for swath in swathList:
minBurst, maxBurst = self._insar.commonReferenceBurstLimits(swath-1)
if minBurst==maxBurst:
continue
ifg = self._insar.loadProduct( os.path.join(xmlDirname, 'IW{0}.xml'.format(swath)))
frames.append(ifg)
burstList.append([os.path.join(burstDirname, 'IW{0}'.format(swath), 'burst_%02d.int'%(x+1)) for x in range(minBurst, maxBurst)])
os.makedirs(mergeDirname, exist_ok=True)
suffix = '.full'
if (ionParam.numberRangeLooks0 == 1) and (ionParam.numberAzimuthLooks0 == 1):
suffix=''
box = mergeBox(frames)
#adjust valid with looks, 'frames' ARE CHANGED AFTER RUNNING THIS
#here numberRangeLooks, instead of numberRangeLooks0, is used, since we need to do next step multilooking after unwrapping. same for numberAzimuthLooks.
(burstValidBox, burstValidBox2, message) = adjustValidWithLooks(frames, box, ionParam.numberAzimuthLooks, ionParam.numberRangeLooks, edge=0, avalid='strict', rvalid='strict')
mergeBurstsVirtual(frames, burstList, box, os.path.join(mergeDirname, mergeFilename+suffix))
if suffix not in ['',None]:
multilook2(os.path.join(mergeDirname, mergeFilename+suffix),
outname = os.path.join(mergeDirname, mergeFilename),
alks = ionParam.numberAzimuthLooks0, rlks=ionParam.numberRangeLooks0)
#this is never used for ionosphere correction
else:
print('Skipping multi-looking ....')
#The orginal coherence calculated by topsApp.py is not good at all, use the following coherence instead
lowerintfile = os.path.join(ionParam.ionDirname, ionParam.lowerDirname, ionParam.mergedDirname, self._insar.mergedIfgname)
upperintfile = os.path.join(ionParam.ionDirname, ionParam.upperDirname, ionParam.mergedDirname, self._insar.mergedIfgname)
corfile = os.path.join(ionParam.ionDirname, ionParam.lowerDirname, ionParam.mergedDirname, self._insar.correlationFilename)
img = isceobj.createImage()
img.load(lowerintfile + '.xml')
width = img.width
length = img.length
lowerint = np.fromfile(lowerintfile, dtype=np.complex64).reshape(length, width)
upperint = np.fromfile(upperintfile, dtype=np.complex64).reshape(length, width)
#compute coherence only using interferogram
#here I use differential interferogram of lower and upper band interferograms
#so that coherence is not affected by fringes
cord = cal_coherence(lowerint*np.conjugate(upperint), win=3, edge=4)
cor = np.zeros((length*2, width), dtype=np.float32)
cor[0:length*2:2, :] = np.sqrt( (np.absolute(lowerint)+np.absolute(upperint))/2.0 )
cor[1:length*2:2, :] = cord
cor.astype(np.float32).tofile(corfile)
#create xml and vrt
#img.scheme = 'BIL'
#img.bands = 2
#img.filename = corfile
#img.renderHdr()
#img = isceobj.Image.createUnwImage()
img = isceobj.createOffsetImage()
img.setFilename(corfile)
img.extraFilename = corfile + '.vrt'
img.setWidth(width)
img.setLength(length)
img.renderHdr()
def renameFile(oldname, newname):
img = isceobj.createImage()
img.load(oldname + '.xml')
img.setFilename(newname)
img.extraFilename = newname+'.vrt'
img.renderHdr()
os.rename(oldname, newname)
os.remove(oldname + '.xml')
os.remove(oldname + '.vrt')
def maskUnwrap(unwfile, maskfile):
tmpfile = 'tmp.unw'
renameFile(unwfile, tmpfile)
cmd = "imageMath.py -e='a_0*(abs(b)!=0);a_1*(abs(b)!=0)' --a={0} --b={1} -s BIL -o={2}".format(tmpfile, maskfile, unwfile)
runCmd(cmd)
os.remove(tmpfile)
os.remove(tmpfile+'.xml')
os.remove(tmpfile+'.vrt')
def snaphuUnwrap(self, xmlDirname, wrapName, corrfile, unwrapName, nrlks, nalks, costMode = 'DEFO',initMethod = 'MST', defomax = 4.0, initOnly = False):
#runUnwrap(self, costMode = 'SMOOTH',initMethod = 'MCF', defomax = 2, initOnly = True)
'''
xmlDirname: xml dir name
wrapName: input interferogram
corrfile: input coherence file
unwrapName: output unwrapped interferogram
nrlks: number of range looks of the interferogram
nalks: number of azimuth looks of the interferogram
'''
from contrib.Snaphu.Snaphu import Snaphu
from isceobj.Planet.Planet import Planet
img = isceobj.createImage()
img.load(wrapName + '.xml')
width = img.getWidth()
#get altitude
swathList = self._insar.getValidSwathList(self.swaths)
for swath in swathList[0:1]:
ifg = self._insar.loadProduct( os.path.join(xmlDirname, 'IW{0}.xml'.format(swath)))
wavelength = ifg.bursts[0].radarWavelength
####tmid
tstart = ifg.bursts[0].sensingStart
tend = ifg.bursts[-1].sensingStop
tmid = tstart + 0.5*(tend - tstart)
#14-APR-2018
burst_index = np.int(np.around(len(ifg.bursts)/2))
orbit = ifg.bursts[burst_index].orbit
peg = orbit.interpolateOrbit(tmid, method='hermite')
refElp = Planet(pname='Earth').ellipsoid
llh = refElp.xyz_to_llh(peg.getPosition())
hdg = orbit.getENUHeading(tmid)
refElp.setSCH(llh[0], llh[1], hdg)
earthRadius = refElp.pegRadCur
altitude = llh[2]
rangeLooks = nrlks
azimuthLooks = nalks
azfact = 0.8
rngfact = 0.8
corrLooks = rangeLooks * azimuthLooks/(azfact*rngfact)
maxComponents = 20
snp = Snaphu()
snp.setInitOnly(initOnly)
snp.setInput(wrapName)
snp.setOutput(unwrapName)
snp.setWidth(width)
snp.setCostMode(costMode)
snp.setEarthRadius(earthRadius)
snp.setWavelength(wavelength)
snp.setAltitude(altitude)
snp.setCorrfile(corrfile)
snp.setInitMethod(initMethod)
snp.setCorrLooks(corrLooks)
snp.setMaxComponents(maxComponents)
snp.setDefoMaxCycles(defomax)
snp.setRangeLooks(rangeLooks)
snp.setAzimuthLooks(azimuthLooks)
#snp.setCorFileFormat('FLOAT_DATA')
snp.prepare()
snp.unwrap()
######Render XML
outImage = isceobj.Image.createUnwImage()
outImage.setFilename(unwrapName)
outImage.setWidth(width)
outImage.setAccessMode('read')
outImage.renderVRT()
outImage.createImage()
outImage.finalizeImage()
outImage.renderHdr()
#####Check if connected components was created
if snp.dumpConnectedComponents:
connImage = isceobj.Image.createImage()
connImage.setFilename(unwrapName+'.conncomp')
connImage.setWidth(width)
connImage.setAccessMode('read')
connImage.setDataType('BYTE')
connImage.renderVRT()
connImage.createImage()
connImage.finalizeImage()
connImage.renderHdr()
return
def unwrap(self, ionParam):
'''
unwrap lower and upper band interferograms
'''
print('unwrapping lower and upper band interferograms')
dirs = [ionParam.lowerDirname, ionParam.upperDirname]
#there is only one coherence file in lower directory
corfile = os.path.join(ionParam.ionDirname, ionParam.lowerDirname, ionParam.mergedDirname, self._insar.correlationFilename)
for dirx in dirs:
procdir = os.path.join(ionParam.ionDirname, dirx, ionParam.mergedDirname)
wrapName = os.path.join(procdir, self._insar.mergedIfgname)
unwrapName = os.path.join(procdir, self._insar.unwrappedIntFilename)
xmlDirname = os.path.join(ionParam.ionDirname, ionParam.lowerDirname, ionParam.fineIfgDirname)
#unwrap
snaphuUnwrap(self, xmlDirname, wrapName, corfile, unwrapName, ionParam.numberRangeLooks0, ionParam.numberAzimuthLooks0, costMode = 'SMOOTH',initMethod = 'MCF', defomax = 2, initOnly = True)
#remove wired things in no-data area
maskUnwrap(unwrapName, wrapName)
if [ionParam.numberRangeLooks0, ionParam.numberAzimuthLooks0] != [ionParam.numberRangeLooks, ionParam.numberAzimuthLooks]:
multilook_unw(self, ionParam, ionParam.mergedDirname)
def multilook_unw(self, ionParam, mergedDirname):
'''
30-APR-2018
This routine moves the original unwrapped files to a directory and takes looks
'''
from isceobj.TopsProc.runMergeBursts import multilook as multilook2
oridir0 = '{}rlks_{}alks'.format(ionParam.numberRangeLooks0, ionParam.numberAzimuthLooks0)
dirs = [ionParam.lowerDirname, ionParam.upperDirname]
corName = os.path.join(ionParam.ionDirname, ionParam.lowerDirname, ionParam.mergedDirname, oridir0, self._insar.correlationFilename)
for dirx in dirs:
procdir = os.path.join(ionParam.ionDirname, dirx, mergedDirname)
#create a directory for original files
oridir = os.path.join(procdir, oridir0)
os.makedirs(oridir, exist_ok=True)
#move files, renameFile uses os.rename, which overwrites if file already exists in oridir. This can support re-run
filename0 = os.path.join(procdir, self._insar.mergedIfgname)
filename = os.path.join(oridir, self._insar.mergedIfgname)
if os.path.isfile(filename0):
renameFile(filename0, filename)
filename0 = os.path.join(procdir, self._insar.unwrappedIntFilename)
filename = os.path.join(oridir, self._insar.unwrappedIntFilename)
if os.path.isfile(filename0):
renameFile(filename0, filename)
filename0 = os.path.join(procdir, self._insar.unwrappedIntFilename+'.conncomp')
filename = os.path.join(oridir, self._insar.unwrappedIntFilename+'.conncomp')
if os.path.isfile(filename0):
renameFile(filename0, filename)
filename0 = os.path.join(procdir, self._insar.correlationFilename)
filename = os.path.join(oridir, self._insar.correlationFilename)
if os.path.isfile(filename0):
renameFile(filename0, filename)
#for topophase.flat.full, move directly
filename0 = os.path.join(procdir, self._insar.mergedIfgname+'.full.vrt')
filename = os.path.join(oridir, self._insar.mergedIfgname+'.full.vrt')
if os.path.isfile(filename0):
os.rename(filename0, filename)
filename0 = os.path.join(procdir, self._insar.mergedIfgname+'.full.xml')
filename = os.path.join(oridir, self._insar.mergedIfgname+'.full.xml')
if os.path.isfile(filename0):
os.rename(filename0, filename)
#multi-looking
nrlks = np.int(np.around(ionParam.numberRangeLooks / ionParam.numberRangeLooks0))
nalks = np.int(np.around(ionParam.numberAzimuthLooks / ionParam.numberAzimuthLooks0))
#coherence
if dirx == ionParam.lowerDirname:
corName0 = os.path.join(oridir, self._insar.correlationFilename)
corimg = isceobj.createImage()
corimg.load(corName0 + '.xml')
width = corimg.width
length = corimg.length
widthNew = np.int(width / nrlks)
lengthNew = np.int(length / nalks)
cor0 = (np.fromfile(corName0, dtype=np.float32).reshape(length*2, width))[1:length*2:2, :]
amp0 = (np.fromfile(corName0, dtype=np.float32).reshape(length*2, width))[0:length*2:2, :]
wgt = cor0**2
a = multilook(wgt, nalks, nrlks)
b = multilook(cor0, nalks, nrlks)
c = multilook(amp0**2, nalks, nrlks)
d = multilook((cor0!=0).astype(np.int), nalks, nrlks)
#coherence after multiple looking
cor = np.zeros((lengthNew*2, widthNew), dtype=np.float32)
cor[0:lengthNew*2:2, :] = np.sqrt(c / (d + (d==0)))
cor[1:lengthNew*2:2, :] = b / (d + (d==0))
#output file
corName = os.path.join(procdir, self._insar.correlationFilename)
cor.astype(np.float32).tofile(corName)
corimg.setFilename(corName)
corimg.extraFilename = corName + '.vrt'
corimg.setWidth(widthNew)
corimg.setLength(lengthNew)
corimg.renderHdr()
#unwrapped file
unwrapName0 = os.path.join(oridir, self._insar.unwrappedIntFilename)
unwimg = isceobj.createImage()
unwimg.load(unwrapName0 + '.xml')
unw0 = (np.fromfile(unwrapName0, dtype=np.float32).reshape(length*2, width))[1:length*2:2, :]
amp0 = (np.fromfile(unwrapName0, dtype=np.float32).reshape(length*2, width))[0:length*2:2, :]
e = multilook(unw0*wgt, nalks, nrlks)
f = multilook(amp0**2, nalks, nrlks)
unw = np.zeros((lengthNew*2, widthNew), dtype=np.float32)
unw[0:lengthNew*2:2, :] = np.sqrt(f / (d + (d==0)))
unw[1:lengthNew*2:2, :] = e / (a + (a==0))
#output file
unwrapName = os.path.join(procdir, self._insar.unwrappedIntFilename)
unw.astype(np.float32).tofile(unwrapName)
unwimg.setFilename(unwrapName)
unwimg.extraFilename = unwrapName + '.vrt'
unwimg.setWidth(widthNew)
unwimg.setLength(lengthNew)
unwimg.renderHdr()
#looks like the above is not a good coherence, re-calculate here
#here I use differential interferogram of lower and upper band interferograms
#so that coherence is not affected by fringes
lowerIntName0 = os.path.join(ionParam.ionDirname, ionParam.lowerDirname, mergedDirname, oridir0, self._insar.mergedIfgname)
upperIntName0 = os.path.join(ionParam.ionDirname, ionParam.upperDirname, mergedDirname, oridir0, self._insar.mergedIfgname)
lowerIntName = os.path.join(ionParam.ionDirname, ionParam.lowerDirname, mergedDirname, self._insar.mergedIfgname)
upperIntName = os.path.join(ionParam.ionDirname, ionParam.upperDirname, mergedDirname, self._insar.mergedIfgname)
#cmd = 'looks.py -i {} -o {} -r {} -a {}'.format(lowerIntName0, lowerIntName, nrlks, nalks)
#runCmd(cmd)
#cmd = 'looks.py -i {} -o {} -r {} -a {}'.format(upperIntName0, upperIntName, nrlks, nalks)
#runCmd(cmd)
multilook2(lowerIntName0, outname = lowerIntName, alks = nalks, rlks=nrlks)
multilook2(upperIntName0, outname = upperIntName, alks = nalks, rlks=nrlks)
lowerint = np.fromfile(lowerIntName, dtype=np.complex64).reshape(lengthNew, widthNew)
upperint = np.fromfile(upperIntName, dtype=np.complex64).reshape(lengthNew, widthNew)
cor = np.zeros((lengthNew*2, widthNew), dtype=np.float32)
cor[0:length*2:2, :] = np.sqrt( (np.absolute(lowerint)+np.absolute(upperint))/2.0 )
cor[1:length*2:2, :] = cal_coherence(lowerint*np.conjugate(upperint), win=3, edge=4)
cor.astype(np.float32).tofile(corName)
def create_multi_index2(width2, l1, l2):
#for number of looks of l1 and l2
#calculate the correponding index number of l2 in the l1 array
#applies to both range and azimuth direction
return ((l2 - l1) / 2.0 + np.arange(width2) * l2) / l1
def fit_surface(x, y, z, wgt, order):
# x: x coordinate, a column vector
# y: y coordinate, a column vector
# z: z coordinate, a column vector
# wgt: weight of the data points, a column vector
#number of data points
m = x.shape[0]
l = np.ones((m,1), dtype=np.float64)
# #create polynomial
# if order == 1:
# #order of estimated coefficents: 1, x, y
# a1 = np.concatenate((l, x, y), axis=1)
# elif order == 2:
# #order of estimated coefficents: 1, x, y, x*y, x**2, y**2
# a1 = np.concatenate((l, x, y, x*y, x**2, y**2), axis=1)
# elif order == 3:
# #order of estimated coefficents: 1, x, y, x*y, x**2, y**2, x**2*y, y**2*x, x**3, y**3
# a1 = np.concatenate((l, x, y, x*y, x**2, y**2, x**2*y, y**2*x, x**3, y**3), axis=1)
# else:
# raise Exception('order not supported yet\n')
if order < 1:
raise Exception('order must be larger than 1.\n')
#create polynomial
a1 = l;
for i in range(1, order+1):
for j in range(i+1):
a1 = np.concatenate((a1, x**(i-j)*y**(j)), axis=1)
#number of variable to be estimated
n = a1.shape[1]
#do the least squares
a = a1 * np.matlib.repmat(np.sqrt(wgt), 1, n)
b = z * np.sqrt(wgt)
c = np.linalg.lstsq(a, b, rcond=-1)[0]
#type: <class 'numpy.ndarray'>
return c
def cal_surface(x, y, c, order):
#x: x coordinate, a row vector
#y: y coordinate, a column vector
#c: coefficients of polynomial from fit_surface
#order: order of polynomial
if order < 1:
raise Exception('order must be larger than 1.\n')
#number of lines
length = y.shape[0]
#number of columns, if row vector, only one element in the shape tuple
#width = x.shape[1]
width = x.shape[0]
x = np.matlib.repmat(x, length, 1)
y = np.matlib.repmat(y, 1, width)
z = c[0] * np.ones((length,width), dtype=np.float64)
index = 0
for i in range(1, order+1):
for j in range(i+1):
index += 1
z += c[index] * x**(i-j)*y**(j)
return z
def weight_fitting(ionos, cor, width, length, nrli, nali, nrlo, nalo, order, coth):
'''
ionos: input ionospheric phase
cor: coherence of the interferogram
width: file width
length: file length
nrli: number of range looks of the input interferograms
nali: number of azimuth looks of the input interferograms
nrlo: number of range looks of the output ionosphere phase
nalo: number of azimuth looks of the ioutput ionosphere phase
order: the order of the polynomial for fitting ionosphere phase estimates
coth: coherence threshhold for ionosphere phase estimation
'''
lengthi = int(length/nali)
widthi = int(width/nrli)
lengtho = int(length/nalo)
widtho = int(width/nrlo)
#calculate output index
rgindex = create_multi_index2(widtho, nrli, nrlo)
azindex = create_multi_index2(lengtho, nali, nalo)
#convert coherence to weight
cor = cor**2/(1.009-cor**2)
#look for data to use
flag = (cor>coth)*(ionos!=0)
point_index = np.nonzero(flag)
m = point_index[0].shape[0]
#calculate input index matrix
x0=np.matlib.repmat(np.arange(widthi), lengthi, 1)
y0=np.matlib.repmat(np.arange(lengthi).reshape(lengthi, 1), 1, widthi)
x = x0[point_index].reshape(m, 1)
y = y0[point_index].reshape(m, 1)
z = ionos[point_index].reshape(m, 1)
w = cor[point_index].reshape(m, 1)
#convert to higher precision type before use
x=np.asfarray(x,np.float64)
y=np.asfarray(y,np.float64)
z=np.asfarray(z,np.float64)
w=np.asfarray(w,np.float64)
coeff = fit_surface(x, y, z, w, order)
#convert to higher precision type before use
rgindex=np.asfarray(rgindex,np.float64)
azindex=np.asfarray(azindex,np.float64)
phase_fit = cal_surface(rgindex, azindex.reshape(lengtho, 1), coeff, order)
#format: widtho, lengtho, single band float32
return phase_fit
def computeIonosphere(lowerUnw, upperUnw, cor, fl, fu, adjFlag, corThresholdAdj, dispersive):
'''
This routine computes ionosphere and remove the relative phase unwrapping errors
lowerUnw: lower band unwrapped interferogram
upperUnw: upper band unwrapped interferogram
cor: coherence
fl: lower band center frequency
fu: upper band center frequency
adjFlag: method for removing relative phase unwrapping errors
0: mean value
1: polynomial
corThresholdAdj: coherence threshold of samples used in removing relative phase unwrapping errors
dispersive: compute dispersive or non-dispersive
0: dispersive
1: non-dispersive
'''
#use image size from lower unwrapped interferogram
(length, width)=lowerUnw.shape
##########################################################################################
# ADJUST PHASE USING MEAN VALUE
# #ajust phase of upper band to remove relative phase unwrapping errors
# flag = (lowerUnw!=0)*(cor>=ionParam.corThresholdAdj)
# index = np.nonzero(flag!=0)
# mv = np.mean((lowerUnw - upperUnw)[index], dtype=np.float64)
# print('mean value of phase difference: {}'.format(mv))
# flag2 = (lowerUnw!=0)
# index2 = np.nonzero(flag2)
# #phase for adjustment
# unwd = ((lowerUnw - upperUnw)[index2] - mv) / (2.0*np.pi)
# unw_adj = np.around(unwd) * (2.0*np.pi)
# #ajust phase of upper band
# upperUnw[index2] += unw_adj
# unw_diff = lowerUnw - upperUnw
# print('after adjustment:')
# print('max phase difference: {}'.format(np.amax(unw_diff)))
# print('min phase difference: {}'.format(np.amin(unw_diff)))
##########################################################################################
#adjust phase using mean value
if adjFlag == 0:
flag = (lowerUnw!=0)*(cor>=corThresholdAdj)
index = np.nonzero(flag!=0)
mv = np.mean((lowerUnw - upperUnw)[index], dtype=np.float64)
print('mean value of phase difference: {}'.format(mv))
diff = mv
#adjust phase using a surface
else:
diff = weight_fitting(lowerUnw - upperUnw, cor, width, length, 1, 1, 1, 1, 2, corThresholdAdj)
flag2 = (lowerUnw!=0)
index2 = np.nonzero(flag2)
#phase for adjustment
unwd = ((lowerUnw - upperUnw) - diff)[index2] / (2.0*np.pi)
unw_adj = np.around(unwd) * (2.0*np.pi)
#ajust phase of upper band
upperUnw[index2] += unw_adj
unw_diff = (lowerUnw - upperUnw)[index2]
print('after adjustment:')
print('max phase difference: {}'.format(np.amax(unw_diff)))
print('min phase difference: {}'.format(np.amin(unw_diff)))
print('max-min: {}'.format(np.amax(unw_diff) - np.amin(unw_diff) ))
#ionosphere
#fl = SPEED_OF_LIGHT / ionParam.radarWavelengthLower
#fu = SPEED_OF_LIGHT / ionParam.radarWavelengthUpper
f0 = (fl + fu) / 2.0
#dispersive
if dispersive == 0:
ionos = fl * fu * (lowerUnw * fu - upperUnw * fl) / f0 / (fu**2 - fl**2)
#non-dispersive phase
else:
ionos = f0 * (upperUnw*fu - lowerUnw * fl) / (fu**2 - fl**2)
return ionos
def ionosphere(self, ionParam):
###################################
#SET PARAMETERS HERE
#THESE SHOULD BE GOOD ENOUGH, NO NEED TO SET IN setup(self)
corThresholdAdj = 0.85
###################################
print('computing ionosphere')
#get files
lowerUnwfile = os.path.join(ionParam.ionDirname, ionParam.lowerDirname, ionParam.mergedDirname, self._insar.unwrappedIntFilename)
upperUnwfile = os.path.join(ionParam.ionDirname, ionParam.upperDirname, ionParam.mergedDirname, self._insar.unwrappedIntFilename)
corfile = os.path.join(ionParam.ionDirname, ionParam.lowerDirname, ionParam.mergedDirname, self._insar.correlationFilename)
#use image size from lower unwrapped interferogram
img = isceobj.createImage()
img.load(lowerUnwfile + '.xml')
width = img.width
length = img.length
lowerUnw = (np.fromfile(lowerUnwfile, dtype=np.float32).reshape(length*2, width))[1:length*2:2, :]
upperUnw = (np.fromfile(upperUnwfile, dtype=np.float32).reshape(length*2, width))[1:length*2:2, :]
lowerAmp = (np.fromfile(lowerUnwfile, dtype=np.float32).reshape(length*2, width))[0:length*2:2, :]
upperAmp = (np.fromfile(upperUnwfile, dtype=np.float32).reshape(length*2, width))[0:length*2:2, :]
cor = (np.fromfile(corfile, dtype=np.float32).reshape(length*2, width))[1:length*2:2, :]
amp = np.sqrt(lowerAmp**2+upperAmp**2)
#compute ionosphere
fl = SPEED_OF_LIGHT / ionParam.radarWavelengthLower
fu = SPEED_OF_LIGHT / ionParam.radarWavelengthUpper
adjFlag = 1
ionos = computeIonosphere(lowerUnw, upperUnw, cor, fl, fu, adjFlag, corThresholdAdj, 0)
#dump ionosphere
outDir = os.path.join(ionParam.ionDirname, ionParam.ioncalDirname)
os.makedirs(outDir, exist_ok=True)
outFilename = os.path.join(outDir, ionParam.ionRawNoProj)
ion = np.zeros((length*2, width), dtype=np.float32)
ion[0:length*2:2, :] = amp
ion[1:length*2:2, :] = ionos
ion.astype(np.float32).tofile(outFilename)
img.filename = outFilename
img.extraFilename = outFilename + '.vrt'
img.renderHdr()
#dump coherence
outFilename = os.path.join(ionParam.ionDirname, ionParam.ioncalDirname, ionParam.ionCorNoProj)
ion[1:length*2:2, :] = cor
ion.astype(np.float32).tofile(outFilename)
img.filename = outFilename
img.extraFilename = outFilename + '.vrt'
img.renderHdr()
def cal_cross_ab_ramp(swathList, width, numberRangeLooks, passDirection):
'''
calculate an empirical ramp between Sentinel-1A/B
29-JUN-2018
swathList: self._insar.getValidSwathList(self.swaths)
width: single-look image width after merging
numberRangeLooks: number of range looks in the processing of ionosphere estimation
passDirection: descending/ascending
'''
#below is from processing chile_d156_160725(S1A)-160929(S1B)
#empirical polynomial
deg = 3
if passDirection.lower() == 'descending':
p = np.array([0.95381267, 2.95567604, -4.56047084, 1.05443172])
elif passDirection.lower() == 'ascending':
#for ascending, the polynomial is left/right flipped
p = np.array([-0.95381267, 5.81711404, -4.21231923, 0.40344958])
else:
raise Exception('unknown passDirection! should be either descending or ascending')
#ca/a166/process/160807-170305 also has the swath offset almost equal to these
#swath offset in single-look range pixels
swath_offset = [0, 19810, 43519]
#total number of single-look range pixels
tnp = 69189
#getting x
nswath = len(swathList)
if nswath == 3:
width2 = np.int(width/numberRangeLooks)
x = np.arange(width2) / (width2 - 1.0)
else:
width2 = np.int(width/numberRangeLooks)
#WARNING: what if the some swaths does not have bursts, and are not merged?
# here I just simply ignore this case
offset = swath_offset[swathList[0]-1]
x = offset / tnp + width / tnp * np.arange(width2) / (width2 - 1.0)
#calculate ramp
y_fit = x * 0.0
for i in range(deg+1):
y_fit += p[i] * x**[deg-i]
return y_fit
def ionSwathBySwath(self, ionParam):
'''
This routine merge, unwrap and compute ionosphere swath by swath, and then
adjust phase difference between adjacent swaths caused by relative range timing
error between adjacent swaths.
This routine includes the following steps in the merged-swath processing:
merge(self, ionParam)
unwrap(self, ionParam)
ionosphere(self, ionParam)
'''
from isceobj.TopsProc.runMergeBursts import mergeBox
from isceobj.TopsProc.runMergeBursts import adjustValidWithLooks
from isceobj.TopsProc.runMergeBursts import mergeBurstsVirtual
from isceobj.TopsProc.runMergeBursts import multilook as multilook2
#########################################
#SET PARAMETERS HERE
numberRangeLooks = ionParam.numberRangeLooks
numberAzimuthLooks = ionParam.numberAzimuthLooks
numberRangeLooks0 = ionParam.numberRangeLooks0
numberAzimuthLooks0 = ionParam.numberAzimuthLooks0
#THESE SHOULD BE GOOD ENOUGH, NO NEED TO SET IN setup(self)
corThresholdSwathAdj = 0.85
corThresholdAdj = 0.85
#########################################
print('computing ionosphere swath by swath')
#if ionParam.calIonWithMerged == False:
warningInfo = '{} calculating ionosphere swath by swath, there may be slight phase error between subswaths\n'.format(datetime.datetime.now())
with open(os.path.join(ionParam.ionDirname, ionParam.warning), 'a') as f:
f.write(warningInfo)
#get bursts
numValidSwaths = 0
swathList = self._insar.getValidSwathList(self.swaths)
for swath in swathList:
minBurst, maxBurst = self._insar.commonReferenceBurstLimits(swath-1)
if minBurst==maxBurst:
#print('Skipping processing of swath {0}'.format(swath))
continue
numValidSwaths += 1
if numValidSwaths <= 1:
raise Exception('There are less than one subswaths, no need to use swath-by-swath method to compute ionosphere!')
else:
xmlDirname = os.path.join(ionParam.ionDirname, ionParam.lowerDirname, ionParam.fineIfgDirname)
(box, burstValidBox, burstValidBox2, frames) = getMergeBox(self, xmlDirname, numberRangeLooks=ionParam.numberRangeLooks, numberAzimuthLooks=ionParam.numberAzimuthLooks)
#compute ionosphere swath by swath
corList = []
ampList = []
ionosList = []
nswath = len(swathList)
ii = -1
for i in range(nswath):
swath = swathList[i]
minBurst, maxBurst = self._insar.commonReferenceBurstLimits(swath-1)
if minBurst==maxBurst:
print('Skipping processing of swath {0}'.format(swath))
continue
else:
ii += 1
########################################################
#STEP 1. MERGE THE BURSTS OF A SWATH
########################################################
dirs = [ionParam.lowerDirname, ionParam.upperDirname]
for dirx in dirs:
outputFilename = self._insar.mergedIfgname
outputDirname = os.path.join(ionParam.ionDirname, dirx, ionParam.mergedDirname + '_IW{0}'.format(swath))
os.makedirs(outputDirname, exist_ok=True)
suffix = '.full'
if (numberRangeLooks0 == 1) and (numberAzimuthLooks0 == 1):
suffix=''
#merge
burstPattern = 'burst_%02d.int'
burstDirname = os.path.join(ionParam.ionDirname, dirx, ionParam.fineIfgDirname)
ifg = self._insar.loadProduct( os.path.join(burstDirname, 'IW{0}.xml'.format(swath)))
bst = [os.path.join(burstDirname, 'IW{0}'.format(swath), burstPattern%(x+1)) for x in range(minBurst, maxBurst)]
#doing adjustment before use
adjustValidWithLooks([ifg], box, numberAzimuthLooks, numberRangeLooks, edge=0, avalid='strict', rvalid=np.int(np.around(numberRangeLooks/8.0)))
mergeBurstsVirtual([ifg], [bst], box, os.path.join(outputDirname, outputFilename+suffix))
#take looks
if suffix not in ['', None]:
multilook2(os.path.join(outputDirname, outputFilename+suffix),
os.path.join(outputDirname, outputFilename),
numberAzimuthLooks0,
numberRangeLooks0)
else:
print('skipping multilooking')
#The orginal coherence calculated by topsApp.py is not good at all, use the following coherence instead
lowerintfile = os.path.join(ionParam.ionDirname, ionParam.lowerDirname, ionParam.mergedDirname + '_IW{0}'.format(swath), self._insar.mergedIfgname)
upperintfile = os.path.join(ionParam.ionDirname, ionParam.upperDirname, ionParam.mergedDirname + '_IW{0}'.format(swath), self._insar.mergedIfgname)
corfile = os.path.join(ionParam.ionDirname, ionParam.lowerDirname, ionParam.mergedDirname + '_IW{0}'.format(swath), self._insar.correlationFilename)
img = isceobj.createImage()
img.load(lowerintfile + '.xml')
width = img.width
length = img.length
lowerint = np.fromfile(lowerintfile, dtype=np.complex64).reshape(length, width)
upperint = np.fromfile(upperintfile, dtype=np.complex64).reshape(length, width)
##########################################################################
#slight filtering to improve the estimation accurary of swath difference
if 1 and shutil.which('psfilt1') != None:
cmd1 = 'mv {} tmp'.format(lowerintfile)
cmd2 = 'psfilt1 tmp {} {} .3 32 8'.format(lowerintfile, width)
cmd3 = 'rm tmp'
cmd4 = 'mv {} tmp'.format(upperintfile)
cmd5 = 'psfilt1 tmp {} {} .3 32 8'.format(upperintfile, width)
cmd6 = 'rm tmp'
runCmd(cmd1)
runCmd(cmd2)
runCmd(cmd3)
runCmd(cmd4)
runCmd(cmd5)
runCmd(cmd6)
##########################################################################
#compute coherence only using interferogram
#here I use differential interferogram of lower and upper band interferograms
#so that coherence is not affected by fringes
cord = cal_coherence(lowerint*np.conjugate(upperint), win=3, edge=4)
cor = np.zeros((length*2, width), dtype=np.float32)
cor[0:length*2:2, :] = np.sqrt( (np.absolute(lowerint)+np.absolute(upperint))/2.0 )
cor[1:length*2:2, :] = cord
cor.astype(np.float32).tofile(corfile)
#create xml and vrt
#img.scheme = 'BIL'
#img.bands = 2
#img.filename = corfile
#img.renderHdr()
#img = isceobj.Image.createUnwImage()
img = isceobj.createOffsetImage()
img.setFilename(corfile)
img.extraFilename = corfile + '.vrt'
img.setWidth(width)
img.setLength(length)
img.renderHdr()
########################################################
#STEP 2. UNWRAP SWATH INTERFEROGRAM
########################################################
dirs = [ionParam.lowerDirname, ionParam.upperDirname]
#there is only one coherence file in lower directory
corfile = os.path.join(ionParam.ionDirname, ionParam.lowerDirname, ionParam.mergedDirname + '_IW{0}'.format(swath), self._insar.correlationFilename)
for dirx in dirs:
procdir = os.path.join(ionParam.ionDirname, dirx, ionParam.mergedDirname + '_IW{0}'.format(swath))
wrapName = os.path.join(procdir, self._insar.mergedIfgname)
unwrapName = os.path.join(procdir, self._insar.unwrappedIntFilename)
xmlDirname = os.path.join(ionParam.ionDirname, ionParam.lowerDirname, ionParam.fineIfgDirname)
#unwrap
snaphuUnwrap(self, xmlDirname, wrapName, corfile, unwrapName, numberRangeLooks0, numberAzimuthLooks0, costMode = 'SMOOTH',initMethod = 'MCF', defomax = 2, initOnly = True)
#remove wired things in no-data area
maskUnwrap(unwrapName, wrapName)
if [ionParam.numberRangeLooks0, ionParam.numberAzimuthLooks0] != [ionParam.numberRangeLooks, ionParam.numberAzimuthLooks]:
multilook_unw(self, ionParam, ionParam.mergedDirname + '_IW{0}'.format(swath))
########################################################
#STEP 3. COMPUTE IONOSPHERE
########################################################
#get files
lowerUnwfile = os.path.join(ionParam.ionDirname, ionParam.lowerDirname, ionParam.mergedDirname + '_IW{0}'.format(swath), self._insar.unwrappedIntFilename)
upperUnwfile = os.path.join(ionParam.ionDirname, ionParam.upperDirname, ionParam.mergedDirname + '_IW{0}'.format(swath), self._insar.unwrappedIntFilename)
corfile = os.path.join(ionParam.ionDirname, ionParam.lowerDirname, ionParam.mergedDirname + '_IW{0}'.format(swath), self._insar.correlationFilename)
#use image size from lower unwrapped interferogram
img = isceobj.createImage()
img.load(lowerUnwfile + '.xml')
width = img.width
length = img.length
lowerUnw = (np.fromfile(lowerUnwfile, dtype=np.float32).reshape(length*2, width))[1:length*2:2, :]
upperUnw = (np.fromfile(upperUnwfile, dtype=np.float32).reshape(length*2, width))[1:length*2:2, :]
lowerAmp = (np.fromfile(lowerUnwfile, dtype=np.float32).reshape(length*2, width))[0:length*2:2, :]
upperAmp = (np.fromfile(upperUnwfile, dtype=np.float32).reshape(length*2, width))[0:length*2:2, :]
cor = (np.fromfile(corfile, dtype=np.float32).reshape(length*2, width))[1:length*2:2, :]
amp = np.sqrt(lowerAmp**2+upperAmp**2)
#compute ionosphere
fl = SPEED_OF_LIGHT / ionParam.radarWavelengthLower
fu = SPEED_OF_LIGHT / ionParam.radarWavelengthUpper
adjFlag = 1
ionos = computeIonosphere(lowerUnw, upperUnw, cor, fl, fu, adjFlag, corThresholdAdj, 0)
#dump result
outDir = os.path.join(ionParam.ionDirname, ionParam.ioncalDirname + '_IW{0}'.format(swath))
os.makedirs(outDir, exist_ok=True)
outFilename = os.path.join(outDir, ionParam.ionRawNoProj)
ion = np.zeros((length*2, width), dtype=np.float32)
ion[0:length*2:2, :] = amp
ion[1:length*2:2, :] = ionos
ion.astype(np.float32).tofile(outFilename)
img.filename = outFilename
img.extraFilename = outFilename + '.vrt'
img.renderHdr()
corList.append(cor)
ampList.append(amp)
ionosList.append(ionos)
#do adjustment between ajacent swaths
if numValidSwaths == 3:
adjustList = [ionosList[0], ionosList[2]]
else:
adjustList = [ionosList[0]]
for adjdata in adjustList:
index = np.nonzero((adjdata!=0) * (ionosList[1]!=0) * (corList[1] > corThresholdSwathAdj))
if index[0].size < 5:
print('WARNING: too few samples available for adjustment between swaths: {} with coherence threshold: {}'.format(index[0].size, corThresholdSwathAdj))
print(' no adjustment made')
print(' to do ajustment, please consider using lower coherence threshold')
else:
print('number of samples available for adjustment in the overlap area: {}'.format(index[0].size))
#diff = np.mean((ionosList[1] - adjdata)[index], dtype=np.float64)
#use weighted mean instead
wgt = corList[1][index]**14
diff = np.sum((ionosList[1] - adjdata)[index] * wgt / np.sum(wgt, dtype=np.float64), dtype=np.float64)
index2 = np.nonzero(adjdata!=0)
adjdata[index2] = adjdata[index2] + diff
#get merged ionosphere
ampMerged = np.zeros((length, width), dtype=np.float32)
corMerged = np.zeros((length, width), dtype=np.float32)
ionosMerged = np.zeros((length, width), dtype=np.float32)
for i in range(numValidSwaths):
nBurst = len(burstValidBox[i])
for j in range(nBurst):
#index after multi-looking in merged image, index starts from 1
first_line = np.int(np.around((burstValidBox[i][j][0] - 1) / numberAzimuthLooks + 1))
last_line = np.int(np.around(burstValidBox[i][j][1] / numberAzimuthLooks))
first_sample = np.int(np.around((burstValidBox[i][j][2] - 1) / numberRangeLooks + 1))
last_sample = np.int(np.around(burstValidBox[i][j][3] / numberRangeLooks))
corMerged[first_line-1:last_line-1+1, first_sample-1:last_sample-1+1] = \
corList[i][first_line-1:last_line-1+1, first_sample-1:last_sample-1+1]
ampMerged[first_line-1:last_line-1+1, first_sample-1:last_sample-1+1] = \
ampList[i][first_line-1:last_line-1+1, first_sample-1:last_sample-1+1]
ionosMerged[first_line-1:last_line-1+1, first_sample-1:last_sample-1+1] = \
ionosList[i][first_line-1:last_line-1+1, first_sample-1:last_sample-1+1]
#remove an empirical ramp
if ionParam.rampRemovel != 0:
warningInfo = '{} calculating ionosphere for cross S-1A/B interferogram, an empirical ramp is removed from estimated ionosphere\n'.format(datetime.datetime.now())
with open(os.path.join(ionParam.ionDirname, ionParam.warning), 'a') as f:
f.write(warningInfo)
abramp = cal_cross_ab_ramp(swathList, box[1], numberRangeLooks, ionParam.passDirection)
if ionParam.rampRemovel == -1:
abramp *= -1.0
#currently do not apply this
#ionosMerged -= abramp[None, :]
#dump ionosphere
outDir = os.path.join(ionParam.ionDirname, ionParam.ioncalDirname)
os.makedirs(outDir, exist_ok=True)
outFilename = os.path.join(outDir, ionParam.ionRawNoProj)
ion = np.zeros((length*2, width), dtype=np.float32)
ion[0:length*2:2, :] = ampMerged
ion[1:length*2:2, :] = ionosMerged
ion.astype(np.float32).tofile(outFilename)
img.filename = outFilename
img.extraFilename = outFilename + '.vrt'
img.renderHdr()
#dump coherence
outFilename = os.path.join(outDir, ionParam.ionCorNoProj)
ion[1:length*2:2, :] = corMerged
ion.astype(np.float32).tofile(outFilename)
img.filename = outFilename
img.extraFilename = outFilename + '.vrt'
img.renderHdr()
def multilookIndex(first, last, nl):
'''
create the index after multilooking
the orginal 1-look index can start from any number such as 0, 1 or other number
after multilooking, the index still starts from the same number.
first: index of first pixel in the original 1-look array
last: index of last pixel in the original 1-look array
nl: number of looks(nl can also be 1). nl >= 1
'''
#number of pixels after multilooking
num = int((last - first + 1)/nl)
offset = (first + (first + nl - 1)) / 2.0
index = offset + np.arange(num) * nl
return index
def computeDopplerOffset(burst, firstline, lastline, firstcolumn, lastcolumn, nrlks=1, nalks=1):
'''
compute offset corresponding to center Doppler frequency
firstline, lastline, firstcolumn, lastcolumn: index of original 1-look burst, index starts from 1.
output: first lines > 0, last lines < 0
'''
Vs = np.linalg.norm(burst.orbit.interpolateOrbit(burst.sensingMid, method='hermite').getVelocity())
Ks = 2 * Vs * burst.azimuthSteeringRate / burst.radarWavelength
#firstcolumn, lastcolumn: index starts from 1
rng = multilookIndex(firstcolumn-1, lastcolumn-1, nrlks) * burst.rangePixelSize + burst.startingRange
#firstline, lastline: index starts from 1
eta = ( multilookIndex(firstline-1, lastline-1, nalks) - (burst.numberOfLines-1.0)/2.0) * burst.azimuthTimeInterval
f_etac = burst.doppler(rng)
Ka = burst.azimuthFMRate(rng)
eta_ref = (burst.doppler(burst.startingRange) / burst.azimuthFMRate(burst.startingRange) ) - (f_etac / Ka)
Kt = Ks / (1.0 - Ks/Ka)
#carr = np.pi * Kt[None,:] * ((eta[:,None] - eta_ref[None,:])**2)
#center doppler frequency due to rotation
dopplerOffset1 = (eta[:,None] - eta_ref[None,:]) * Kt / Ka[None,:] / (burst.azimuthTimeInterval * nalks)
#center doppler frequency due to squint
dopplerOffset2 = (f_etac[None,:] / Ka[None,:]) / (burst.azimuthTimeInterval * nalks)
dopplerOffset = dopplerOffset1 + dopplerOffset2
return (dopplerOffset, Ka)
def grd2ion(self, ionParam):
from scipy import interpolate
from scipy.interpolate import interp1d
print('resampling ionosphere from ground to ionospheric layer')
#get files
corfile = os.path.join(ionParam.ionDirname, ionParam.ioncalDirname, ionParam.ionCorNoProj)
ionfile = os.path.join(ionParam.ionDirname, ionParam.ioncalDirname, ionParam.ionRawNoProj)
#use image size from lower unwrapped interferogram
img = isceobj.createImage()
img.load(corfile + '.xml')
width = img.width
length = img.length
cor = (np.fromfile(corfile, dtype=np.float32).reshape(length*2, width))[1:length*2:2, :]
amp = (np.fromfile(ionfile, dtype=np.float32).reshape(length*2, width))[0:length*2:2, :]
ionos = (np.fromfile(ionfile, dtype=np.float32).reshape(length*2, width))[1:length*2:2, :]
#use the satellite height of the mid burst of first swath of reference acquistion
swathList = self._insar.getValidSwathList(self.swaths)
reference = self._insar.loadProduct( os.path.join(self._insar.referenceSlcProduct, 'IW{0}.xml'.format(swathList[0])))
minBurst, maxBurst = self._insar.commonReferenceBurstLimits(swathList[0]-1)
#no problem with this index at all
midBurst = np.int(np.around((minBurst+ maxBurst-1) / 2.0))
masBurst = reference.bursts[midBurst]
#satellite height
satHeight = np.linalg.norm(masBurst.orbit.interpolateOrbit(masBurst.sensingMid, method='hermite').getPosition())
#orgininal doppler offset should be multiplied by this ratio
ratio = ionParam.ionHeight/(satHeight-ionParam.earthRadius)
xmlDirname = os.path.join(ionParam.ionDirname, ionParam.lowerDirname, ionParam.fineIfgDirname)
(box, burstValidBox, burstValidBox2, frames) = getMergeBox(self, xmlDirname, numberRangeLooks=ionParam.numberRangeLooks, numberAzimuthLooks=ionParam.numberAzimuthLooks)
##############################################################################################################
swathList = self._insar.getValidSwathList(self.swaths)
frames=[]
#for valid swaths and bursts, consistent with runMergeBursts.py
for swath in swathList:
minBurst, maxBurst = self._insar.commonReferenceBurstLimits(swath-1)
if minBurst==maxBurst:
print('Skipping processing of swath {0}'.format(swath))
continue
ifg = self._insar.loadProduct( os.path.join(xmlDirname, 'IW{0}.xml'.format(swath)))
frames.append(ifg)
##############################################################################################################
for band in [amp, ionos, cor]:
nswath = len(frames)
for i in range(nswath):
nburst = len(frames[i].bursts)
for j in range(nburst):
#according to runBurstIfg.py, this is originally from self._insar.referenceSlcProduct, 'IW{0}.xml'
masBurst = frames[i].bursts[j]
(dopplerOffset, Ka) = computeDopplerOffset(masBurst, burstValidBox2[i][j][0], burstValidBox2[i][j][1], burstValidBox2[i][j][2], burstValidBox2[i][j][3], nrlks=ionParam.numberRangeLooks, nalks=ionParam.numberAzimuthLooks)
offset = ratio * dopplerOffset
# 0 1 2 3
#firstlineAdj, lastlineAdj, firstcolumnAdj, lastcolumnAdj,
#after multiplication, index starts from 1
firstline = np.int(np.around((burstValidBox[i][j][0] - 1) / ionParam.numberAzimuthLooks + 1))
lastline = np.int(np.around(burstValidBox[i][j][1] / ionParam.numberAzimuthLooks))
firstcolumn = np.int(np.around((burstValidBox[i][j][2] - 1) / ionParam.numberRangeLooks + 1))
lastcolumn = np.int(np.around(burstValidBox[i][j][3] / ionParam.numberRangeLooks))
#extract image
burstImage = band[firstline-1:lastline, firstcolumn-1:lastcolumn]
blength = lastline - firstline + 1
bwidth = lastcolumn - firstcolumn + 1
#interpolation
index0 = np.linspace(0, blength-1, num=blength, endpoint=True)
for k in range(bwidth):
index = index0 + offset[:, k]
value = burstImage[:, k]
f = interp1d(index, value, kind='cubic', fill_value="extrapolate")
index_min = np.int(np.around(np.amin(index)))
index_max = np.int(np.around(np.amax(index)))
flag = index0 * 0.0
flag[index_min:index_max+1] = 1.0
#replace the original column with new column in burstImage
#this should also replace teh original column with new column in band
burstImage[:, k] = (f(index0)) * flag
#dump ionosphere with projection
outDir = os.path.join(ionParam.ionDirname, ionParam.ioncalDirname)
outFilename = os.path.join(outDir, ionParam.ionRaw)
ion = np.zeros((length*2, width), dtype=np.float32)
ion[0:length*2:2, :] = amp
ion[1:length*2:2, :] = ionos
ion.astype(np.float32).tofile(outFilename)
img.filename = outFilename
img.extraFilename = outFilename + '.vrt'
img.renderHdr()
#dump coherence with projection
outFilename = os.path.join(outDir, ionParam.ionCor)
ion[1:length*2:2, :] = cor
ion.astype(np.float32).tofile(outFilename)
img.filename = outFilename
img.extraFilename = outFilename + '.vrt'
img.renderHdr()
def gaussian(size, sigma, scale = 1.0):
if size % 2 != 1:
raise Exception('size must be odd')
hsize = (size - 1) / 2
x = np.arange(-hsize, hsize + 1) * scale
f = np.exp(-x**2/(2.0*sigma**2)) / (sigma * np.sqrt(2.0*np.pi))
f2d=np.matlib.repmat(f, size, 1) * np.matlib.repmat(f.reshape(size, 1), 1, size)
return f2d/np.sum(f2d)
def adaptive_gaussian(ionos, wgt, size_max, size_min):
'''
This program performs Gaussian filtering with adaptive window size.
ionos: ionosphere
wgt: weight
size_max: maximum window size
size_min: minimum window size
'''
import scipy.signal as ss
length = (ionos.shape)[0]
width = (ionos.shape)[1]
flag = (ionos!=0) * (wgt!=0)
ionos *= flag
wgt *= flag
size_num = 100
size = np.linspace(size_min, size_max, num=size_num, endpoint=True)
std = np.zeros((length, width, size_num))
flt = np.zeros((length, width, size_num))
out = np.zeros((length, width, 1))
#calculate filterd image and standard deviation
#sigma of window size: size_max
sigma = size_max / 2.0
for i in range(size_num):
size2 = np.int(np.around(size[i]))
if size2 % 2 == 0:
size2 += 1
if (i+1) % 10 == 0:
print('min win: %4d, max win: %4d, current win: %4d'%(np.int(np.around(size_min)), np.int(np.around(size_max)), size2))
g2d = gaussian(size2, sigma*size2/size_max, scale=1.0)
scale = ss.fftconvolve(wgt, g2d, mode='same')
flt[:, :, i] = ss.fftconvolve(ionos*wgt, g2d, mode='same') / (scale + (scale==0))
#variance of resulting filtered sample
scale = scale**2
var = ss.fftconvolve(wgt, g2d**2, mode='same') / (scale + (scale==0))
#in case there is a large area without data where scale is very small, which leads to wired values in variance
var[np.nonzero(var<0)] = 0
std[:, :, i] = np.sqrt(var)
std_mv = np.mean(std[np.nonzero(std!=0)], dtype=np.float64)
diff_max = np.amax(np.absolute(std - std_mv)) + std_mv + 1
std[np.nonzero(std==0)] = diff_max
index = np.nonzero(np.ones((length, width))) + ((np.argmin(np.absolute(std - std_mv), axis=2)).reshape(length*width), )
out = flt[index]
out = out.reshape((length, width))
#remove artifacts due to varying wgt
size_smt = size_min
if size_smt % 2 == 0:
size_smt += 1
g2d = gaussian(size_smt, size_smt/2.0, scale=1.0)
scale = ss.fftconvolve((out!=0), g2d, mode='same')
out2 = ss.fftconvolve(out, g2d, mode='same') / (scale + (scale==0))
return out2
def filt_gaussian(self, ionParam):
'''
This function filters image using gaussian filter
we projected the ionosphere value onto the ionospheric layer, and the indexes are integers.
this reduces the number of samples used in filtering
a better method is to project the indexes onto the ionospheric layer. This way we have orginal
number of samples used in filtering. but this requries more complicated operation in filtering
currently not implemented.
a less accurate method is to use ionsphere without any projection
'''
#################################################
#SET PARAMETERS HERE
#if applying polynomial fitting
#False: no fitting, True: with fitting
fit = ionParam.ionFit
#gaussian filtering window size
size_max = ionParam.ionFilteringWinsizeMax
size_min = ionParam.ionFilteringWinsizeMin
#THESE SHOULD BE GOOD ENOUGH, NO NEED TO SET IN setup(self)
corThresholdIon = 0.85
#################################################
print('filtering ionosphere')
#I find it's better to use ionosphere that is not projected, it's mostly slowlying changing anyway.
#this should also be better for operational use.
ionfile = os.path.join(ionParam.ionDirname, ionParam.ioncalDirname, ionParam.ionRawNoProj)
#since I decide to use ionosphere that is not projected, I should also use coherence that is not projected.
corfile = os.path.join(ionParam.ionDirname, ionParam.ioncalDirname, ionParam.ionCorNoProj)
#use ionosphere and coherence that are projected.
#ionfile = os.path.join(ionParam.ionDirname, ionParam.ioncalDirname, ionParam.ionRaw)
#corfile = os.path.join(ionParam.ionDirname, ionParam.ioncalDirname, ionParam.ionCor)
outfile = os.path.join(ionParam.ionDirname, ionParam.ioncalDirname, ionParam.ionFilt)
img = isceobj.createImage()
img.load(ionfile + '.xml')
width = img.width
length = img.length
ion = (np.fromfile(ionfile, dtype=np.float32).reshape(length*2, width))[1:length*2:2, :]
cor = (np.fromfile(corfile, dtype=np.float32).reshape(length*2, width))[1:length*2:2, :]
amp = (np.fromfile(ionfile, dtype=np.float32).reshape(length*2, width))[0:length*2:2, :]
########################################################################################
#AFTER COHERENCE IS RESAMPLED AT grd2ion, THERE ARE SOME WIRED VALUES
cor[np.nonzero(cor<0)] = 0.0
cor[np.nonzero(cor>1)] = 0.0
########################################################################################
ion_fit = weight_fitting(ion, cor, width, length, 1, 1, 1, 1, 2, corThresholdIon)
#no fitting
if fit == False:
ion_fit *= 0
ion -= ion_fit * (ion!=0)
#minimize the effect of low coherence pixels
#cor[np.nonzero( (cor<0.85)*(cor!=0) )] = 0.00001
#filt = adaptive_gaussian(ion, cor, size_max, size_min)
#cor**14 should be a good weight to use. 22-APR-2018
filt = adaptive_gaussian(ion, cor**14, size_max, size_min)
filt += ion_fit * (filt!=0)
ion = np.zeros((length*2, width), dtype=np.float32)
ion[0:length*2:2, :] = amp
ion[1:length*2:2, :] = filt
ion.astype(np.float32).tofile(outfile)
img.filename = outfile
img.extraFilename = outfile + '.vrt'
img.renderHdr()
def ionosphere_shift(self, ionParam):
'''
calculate azimuth shift caused by ionosphere using ionospheric phase
'''
#################################################
#SET PARAMETERS HERE
#gaussian filtering window size
#size = np.int(np.around(width / 12.0))
#size = ionParam.ionshiftFilteringWinsize
size_max = ionParam.ionshiftFilteringWinsizeMax
size_min = ionParam.ionshiftFilteringWinsizeMin
#THESE SHOULD BE GOOD ENOUGH, NO NEED TO SET IN setup(self)
#if applying polynomial fitting
#0: no fitting, 1: with fitting
fit = 0
corThresholdIonshift = 0.85
#################################################
####################################################################
#STEP 1. GET DERIVATIVE OF IONOSPHERE
####################################################################
#get files
ionfile = os.path.join(ionParam.ionDirname, ionParam.ioncalDirname, ionParam.ionFilt)
#we are using filtered ionosphere, so we should use coherence file that is not projected.
#corfile = os.path.join(ionParam.ionDirname, ionParam.ioncalDirname, ionParam.ionCor)
corfile = os.path.join(ionParam.ionDirname, ionParam.ioncalDirname, ionParam.ionCorNoProj)
img = isceobj.createImage()
img.load(ionfile + '.xml')
width = img.width
length = img.length
amp = (np.fromfile(ionfile, dtype=np.float32).reshape(length*2, width))[0:length*2:2, :]
ion = (np.fromfile(ionfile, dtype=np.float32).reshape(length*2, width))[1:length*2:2, :]
cor = (np.fromfile(corfile, dtype=np.float32).reshape(length*2, width))[1:length*2:2, :]
########################################################################################
#AFTER COHERENCE IS RESAMPLED AT grd2ion, THERE ARE SOME WIRED VALUES
cor[np.nonzero(cor<0)] = 0.0
cor[np.nonzero(cor>1)] = 0.0
########################################################################################
#get the azimuth derivative of ionosphere
dion = np.diff(ion, axis=0)
dion = np.concatenate((dion, np.zeros((1,width))), axis=0)
#remove the samples affected by zeros
flag_ion0 = (ion!=0)
#moving down by one line
flag_ion1 = np.roll(flag_ion0, 1, axis=0)
flag_ion1[0,:] = 0
#moving up by one line
flag_ion2 = np.roll(flag_ion0, -1, axis=0)
flag_ion2[-1,:] = 0
#now remove the samples affected by zeros
flag_ion = flag_ion0 * flag_ion1 * flag_ion2
dion *= flag_ion
flag = flag_ion * (cor>corThresholdIonshift)
index = np.nonzero(flag)
####################################################################
#STEP 2. FIT A POLYNOMIAL TO THE DERIVATIVE OF IONOSPHERE
####################################################################
order = 3
#look for data to use
point_index = np.nonzero(flag)
m = point_index[0].shape[0]
#calculate input index matrix
x0=np.matlib.repmat(np.arange(width), length, 1)
y0=np.matlib.repmat(np.arange(length).reshape(length, 1), 1, width)
x = x0[point_index].reshape(m, 1)
y = y0[point_index].reshape(m, 1)
z = dion[point_index].reshape(m, 1)
w = cor[point_index].reshape(m, 1)
#convert to higher precision type before use
x=np.asfarray(x,np.float64)
y=np.asfarray(y,np.float64)
z=np.asfarray(z,np.float64)
w=np.asfarray(w,np.float64)
coeff = fit_surface(x, y, z, w, order)
rgindex = np.arange(width)
azindex = np.arange(length).reshape(length, 1)
#convert to higher precision type before use
rgindex=np.asfarray(rgindex,np.float64)
azindex=np.asfarray(azindex,np.float64)
dion_fit = cal_surface(rgindex, azindex, coeff, order)
#no fitting
if fit == 0:
dion_fit *= 0
dion_res = (dion - dion_fit)*(dion!=0)
####################################################################
#STEP 3. FILTER THE RESIDUAL OF THE DERIVATIVE OF IONOSPHERE
####################################################################
#this will be affected by low coherence areas like water, so not use this.
#filter the derivation of ionosphere
#if size % 2 == 0:
# size += 1
#sigma = size / 2.0
#g2d = gaussian(size, sigma, scale=1.0)
#scale = ss.fftconvolve((dion_res!=0), g2d, mode='same')
#dion_filt = ss.fftconvolve(dion_res, g2d, mode='same') / (scale + (scale==0))
#minimize the effect of low coherence pixels
cor[np.nonzero( (cor<0.85)*(cor!=0) )] = 0.00001
dion_filt = adaptive_gaussian(dion_res, cor, size_max, size_min)
dion = (dion_fit + dion_filt)*(dion!=0)
#return dion
####################################################################
#STEP 4. CONVERT TO AZIMUTH SHIFT
####################################################################
#use the satellite height of the mid burst of first swath of reference acquistion
swathList = self._insar.getValidSwathList(self.swaths)
reference = self._insar.loadProduct( os.path.join(self._insar.referenceSlcProduct, 'IW{0}.xml'.format(swathList[0])))
minBurst, maxBurst = self._insar.commonReferenceBurstLimits(swathList[0]-1)
#no problem with this index at all
midBurst = np.int(np.around((minBurst+ maxBurst-1) / 2.0))
masBurst = reference.bursts[midBurst]
#shift casued by ionosphere [unit: masBurst.azimuthTimeInterval]
rng = masBurst.rangePixelSize * ((np.arange(width))*ionParam.numberRangeLooks + (ionParam.numberRangeLooks - 1.0) / 2.0) + masBurst.startingRange
Ka = masBurst.azimuthFMRate(rng)
ionShift = dion / (masBurst.azimuthTimeInterval * ionParam.numberAzimuthLooks) / (4.0 * np.pi) / Ka[None, :] / masBurst.azimuthTimeInterval
#output
outfile = os.path.join(ionParam.ionDirname, ionParam.ioncalDirname, ionParam.ionShift)
tmp = np.zeros((length*2, width), dtype=np.float32)
tmp[0:length*2:2, :] = amp
tmp[1:length*2:2, :] = ionShift
tmp.astype(np.float32).tofile(outfile)
img.filename = outfile
img.extraFilename = outfile + '.vrt'
img.renderHdr()
def ion2grd(self, ionParam):
from scipy import interpolate
from scipy.interpolate import interp1d
#################################################
#SET PARAMETERS HERE
#correct phase error caused by non-zero center frequency
#and azimuth shift caused by ionosphere
#0: no correction
#1: use mean value of a burst
#2: use full burst
azshiftFlag = ionParam.azshiftFlag
#################################################
print('resampling ionosphere from ionospheric layer to ground')
#get files
ionFiltFile = os.path.join(ionParam.ionDirname, ionParam.ioncalDirname, ionParam.ionFilt)
dionfile = os.path.join(ionParam.ionDirname, ionParam.ioncalDirname, ionParam.ionShift)
corfile = os.path.join(ionParam.ionDirname, ionParam.ioncalDirname, ionParam.ionCorNoProj)
img = isceobj.createImage()
img.load(ionFiltFile + '.xml')
width = img.width
length = img.length
ion = (np.fromfile(ionFiltFile, dtype=np.float32).reshape(length*2, width))[1:length*2:2, :]
dion = (np.fromfile(dionfile, dtype=np.float32).reshape(length*2, width))[1:length*2:2, :]
cor = (np.fromfile(corfile, dtype=np.float32).reshape(length*2, width))[1:length*2:2, :]
print('resampling ionosphere in range')
#in the following, column index of burst (one look) will never exceed merged image index (one look) on the left side.
#so we only add one multi-looked sample on the right side in case it exceeds on this side
#index starts from 0
ionOneRangeLook = np.zeros((length, (width+1)*ionParam.numberRangeLooks), dtype=np.float32)
if azshiftFlag == 2:
dionOneRangeLook = np.zeros((length, (width+1)*ionParam.numberRangeLooks), dtype=np.float32)
indexRange = np.linspace(1-1, (width+1)*ionParam.numberRangeLooks-1, num=(width+1)*ionParam.numberRangeLooks, endpoint=True)
indexRange2 = multilookIndex(1-1, width*ionParam.numberRangeLooks-1, ionParam.numberRangeLooks)
for i in range(length):
f = interp1d(indexRange2, ion[i, :], kind='cubic', fill_value="extrapolate")
ionOneRangeLook[i, :] = f(indexRange)
if azshiftFlag == 2:
f2 = interp1d(indexRange2, dion[i, :], kind='cubic', fill_value="extrapolate")
dionOneRangeLook[i, :] = f2(indexRange)
#use the satellite height of the mid burst of first swath of reference acquistion
swathList = self._insar.getValidSwathList(self.swaths)
reference = self._insar.loadProduct( os.path.join(self._insar.referenceSlcProduct, 'IW{0}.xml'.format(swathList[0])))
minBurst, maxBurst = self._insar.commonReferenceBurstLimits(swathList[0]-1)
#no problem with this index at all
midBurst = np.int(np.around((minBurst+ maxBurst-1) / 2.0))
masBurst = reference.bursts[midBurst]
#satellite height
satHeight = np.linalg.norm(masBurst.orbit.interpolateOrbit(masBurst.sensingMid, method='hermite').getPosition())
#orgininal doppler offset should be multiplied by this ratio
ratio = ionParam.ionHeight/(satHeight-ionParam.earthRadius)
xmlDirname = os.path.join(ionParam.ionDirname, ionParam.lowerDirname, ionParam.fineIfgDirname)
(box, burstValidBox, burstValidBox2, frames) = getMergeBox(self, xmlDirname, numberRangeLooks=ionParam.numberRangeLooks, numberAzimuthLooks=ionParam.numberAzimuthLooks)
##############################################################################################################
swathList = self._insar.getValidSwathList(self.swaths)
frames=[]
swathList2 = []
minBurst2 =[]
#for valid swaths and bursts, consistent with runMergeBursts.py
for swath in swathList:
minBurst, maxBurst = self._insar.commonReferenceBurstLimits(swath-1)
if minBurst==maxBurst:
print('Skipping processing of swath {0}'.format(swath))
continue
ifg = self._insar.loadProduct( os.path.join(xmlDirname, 'IW{0}.xml'.format(swath)))
frames.append(ifg)
swathList2.append(swath)
minBurst2.append(minBurst)
##############################################################################################################
print('resampling ionosphere in azimuth')
nswath = len(frames)
for i in range(nswath):
nburst = len(frames[i].bursts)
###output directory for burst ionosphere
outdir = os.path.join(ionParam.ionDirname, ionParam.ionBurstDirname, 'IW{0}'.format(swathList2[i]))
os.makedirs(outdir, exist_ok=True)
for j in range(nburst):
#according to runBurstIfg.py, this is originally from self._insar.referenceSlcProduct, 'IW{0}.xml'
masBurst = frames[i].bursts[j]
(dopplerOffset, Ka) = computeDopplerOffset(masBurst, 1, masBurst.numberOfLines, 1, masBurst.numberOfSamples, nrlks=1, nalks=1)
offset = ratio * dopplerOffset
#output ionosphere for this burst
burstIon = np.zeros((masBurst.numberOfLines, masBurst.numberOfSamples), dtype=np.float32)
burstDion = np.zeros((masBurst.numberOfLines, masBurst.numberOfSamples), dtype=np.float32)
# index in merged index in burst
lineOff = burstValidBox[i][j][0] - burstValidBox2[i][j][0]
columnOff = burstValidBox[i][j][2] - burstValidBox2[i][j][2]
#use index starts from 0
#1-look index of burst in the 1-look merged image
indexBurst0 = np.linspace(0+lineOff, masBurst.numberOfLines-1+lineOff, num=masBurst.numberOfLines, endpoint=True)
#1-look index of multi-looked merged image in the 1-look merged image
indexMerged = multilookIndex(1-1, length*ionParam.numberAzimuthLooks-1, ionParam.numberAzimuthLooks)
for k in range(masBurst.numberOfSamples):
index = indexMerged
value = ionOneRangeLook[:, k+columnOff]
f = interp1d(index, value, kind='cubic', fill_value="extrapolate")
indexBurst = indexBurst0 + offset[:, k]
burstIon[:, k] = f(indexBurst)
if azshiftFlag == 2:
value2 = dionOneRangeLook[:, k+columnOff]
f2 = interp1d(index, value2, kind='cubic', fill_value="extrapolate")
burstDion[:, k] = f2(indexBurst)
#calculate phase caused by ionospheric shift and non-zero center frequency
#index after multi-looking in merged image, index starts from 1
first_line = np.int(np.around((burstValidBox[i][j][0] - 1) / ionParam.numberAzimuthLooks + 1))
last_line = np.int(np.around(burstValidBox[i][j][1] / ionParam.numberAzimuthLooks))
first_sample = np.int(np.around((burstValidBox[i][j][2] - 1) / ionParam.numberRangeLooks + 1))
last_sample = np.int(np.around(burstValidBox[i][j][3] / ionParam.numberRangeLooks))
burstDionMultilook = dion[first_line-1:last_line-1+1, first_sample-1:last_sample-1+1]
#for avoid areas with strong decorrelation like water
burstCorMultilook = cor[first_line-1:last_line-1+1, first_sample-1:last_sample-1+1]
#index = np.nonzero(burstDionMultilook!=0)
index = np.nonzero(burstCorMultilook>0.85)
if len(index[0]) < 10:
dionMean = 0.0
else:
dionMean = np.mean(burstDionMultilook[index], dtype=np.float64)
if azshiftFlag == 0:
#no correction
burstIonShift = 0
elif azshiftFlag == 1:
#use mean value
burstIonShift = 2.0 * np.pi * (dopplerOffset * Ka[None,:] * (masBurst.azimuthTimeInterval)) * (dionMean*masBurst.azimuthTimeInterval)
elif azshiftFlag == 2:
#use full burst
burstIonShift = 2.0 * np.pi * (dopplerOffset * Ka[None,:] * (masBurst.azimuthTimeInterval)) * (burstDion*masBurst.azimuthTimeInterval)
else:
raise Exception('unknown option for correcting azimuth shift caused by ionosphere!')
burstIon += burstIonShift
print('resampling burst %02d of swath %d, azimuth shift caused by ionosphere: %8.5f azimuth lines'%(minBurst2[i]+j+1, swathList2[i], dionMean))
#create xml and vrt files
filename = os.path.join(outdir, '%s_%02d.ion'%('burst', minBurst2[i]+j+1))
burstIon.astype(np.float32).tofile(filename)
burstImg = isceobj.createImage()
burstImg.setDataType('FLOAT')
burstImg.setFilename(filename)
burstImg.extraFilename = filename + '.vrt'
burstImg.setWidth(masBurst.numberOfSamples)
burstImg.setLength(masBurst.numberOfLines)
burstImg.renderHdr()
print('')
def multilook(data, nalks, nrlks):
'''
doing multiple looking
ATTENTION:
NO AVERAGING BY DIVIDING THE NUMBER OF TOTAL SAMPLES IS DONE.
'''
(length, width)=data.shape
width2 = np.int(width/nrlks)
length2 = np.int(length/nalks)
tmp2 = np.zeros((length2, width), dtype=data.dtype)
data2 = np.zeros((length2, width2), dtype=data.dtype)
for i in range(nalks):
tmp2 += data[i:length2*nalks:nalks, :]
for i in range(nrlks):
data2 += tmp2[:, i:width2*nrlks:nrlks]
return data2
def get_overlap_box(swath, minBurst, maxBurst):
#number of burst
nBurst = maxBurst - minBurst
if nBurst <= 1:
print('number of burst: {}, no need to get overlap box'.format(nBurst))
return None
overlapBox = []
overlapBox.append([])
for ii in range(minBurst+1, maxBurst):
topBurst = swath.bursts[ii-1]
curBurst = swath.bursts[ii]
#overlap lines, line index starts from 1
offLine = np.int(np.round( (curBurst.sensingStart - topBurst.sensingStart).total_seconds() / curBurst.azimuthTimeInterval))
firstLineTop = topBurst.firstValidLine + 1
lastLineTop = topBurst.firstValidLine + topBurst.numValidLines
firstLineCur = offLine + curBurst.firstValidLine + 1
lastLineCur = offLine + curBurst.firstValidLine + curBurst.numValidLines
if lastLineTop < firstLineCur:
raise Exception('there is not enough overlap between burst {} and burst {}\n'.format(ii-1+1, ii+1))
firstLine = firstLineCur
lastLine = lastLineTop
#overlap samples, sample index starts from 1
offSample = np.int(np.round( (curBurst.startingRange - topBurst.startingRange) / curBurst.rangePixelSize ))
firstSampleTop = topBurst.firstValidSample + 1
lastSampleTop = topBurst.firstValidSample + topBurst.numValidSamples
firstSampleCur = offSample + curBurst.firstValidSample + 1
lastSampleCur = offSample + curBurst.firstValidSample + curBurst.numValidSamples
firstSample = max(firstSampleTop, firstSampleCur)
lastSample = min(lastSampleTop, lastSampleCur)
#overlap area index. all indexes start from 1.
# | top burst | current burst |
# 0 1 2 3 4 5 6 7
overlapBox.append([firstLine, lastLine, firstSample, lastSample, firstLine-offLine, lastLine-offLine, firstSample-offSample, lastSample-offSample])
return overlapBox
def esd(self, ionParam):
'''
esd after ionosphere correction
'''
######################################
#SET PARAMETERS HERE
#THESE SHOULD BE GOOD ENOUGH, NO NEED TO SET IN setup(self)
nalks = 5
nrlks = 30
corThreshold = 0.75
######################################
print('applying ESD to compensate phase error caused by residual misregistration')
virtual = self.useVirtualFiles
swathList = self._insar.getValidSwathList(self.swaths)
for swath in swathList:
minBurst, maxBurst = self._insar.commonReferenceBurstLimits(swath-1)
nBurst = maxBurst - minBurst
if nBurst <= 1:
continue
####Load relevant products
reference = self._insar.loadProduct( os.path.join(self._insar.referenceSlcProduct, 'IW{0}.xml'.format(swath)))
secondary = self._insar.loadProduct( os.path.join(self._insar.fineCoregDirname, 'IW{0}.xml'.format(swath)))
#get overlap area
for ii in range(minBurst, maxBurst):
jj = ii - minBurst
####Process the top bursts
masBurst = reference.bursts[ii]
slvBurst = secondary.bursts[jj]
adjustValidLineSample(masBurst,slvBurst)
overlapBox = get_overlap_box(reference, minBurst, maxBurst)
#using esd to calculate mis-registration
misreg = np.array([])
totalSamples = 0
for ii in range(minBurst+1, maxBurst):
jj = ii - minBurst
####Process the top bursts
masBurstTop = reference.bursts[ii-1]
slvBurstTop = secondary.bursts[jj-1]
masBurstCur = reference.bursts[ii]
slvBurstCur = secondary.bursts[jj]
#get info
referencename = masBurstTop.image.filename
secondaryname = slvBurstTop.image.filename
ionname = os.path.join(ionParam.ionDirname, ionParam.ionBurstDirname, 'IW{0}'.format(swath), '%s_%02d.ion'%('burst',ii+1-1))
rngname = os.path.join(self._insar.fineOffsetsDirname, 'IW{0}'.format(swath), 'range_%02d.off'%(ii+1-1))
fact = 4.0 * np.pi * slvBurstTop.rangePixelSize / slvBurstTop.radarWavelength
#infTop = multiply2(referencename, secondaryname, ionname, rngname, fact, overlapBox[jj][0:4], virtual=virtual)
infTop = multiply2(referencename, secondaryname, fact, rngname=rngname, ionname=ionname, infname=None, overlapBox=overlapBox[jj][0:4], valid=True, virtual=virtual)
(dopTop, Ka) = computeDopplerOffset(masBurstTop, overlapBox[jj][0], overlapBox[jj][1], overlapBox[jj][2], overlapBox[jj][3], nrlks=nrlks, nalks=nalks)
#rng = multilookIndex(overlapBox[jj][2]-1, overlapBox[jj][3]-1, nrlks) * masBurstTop.rangePixelSize + masBurstTop.startingRange
#Ka = masBurstTop.azimuthFMRate(rng)
frqTop = dopTop * Ka[None,:] * (masBurstTop.azimuthTimeInterval * nalks)
referencename = masBurstCur.image.filename
secondaryname = slvBurstCur.image.filename
ionname = os.path.join(ionParam.ionDirname, ionParam.ionBurstDirname, 'IW{0}'.format(swath), '%s_%02d.ion'%('burst',ii+1))
rngname = os.path.join(self._insar.fineOffsetsDirname, 'IW{0}'.format(swath), 'range_%02d.off'%(ii+1))
fact = 4.0 * np.pi * slvBurstCur.rangePixelSize / slvBurstCur.radarWavelength
#infCur = multiply2(referencename, secondaryname, ionname, rngname, fact, overlapBox[jj][4:8], virtual=virtual)
infCur = multiply2(referencename, secondaryname, fact, rngname=rngname, ionname=ionname, infname=None, overlapBox=overlapBox[jj][4:8], valid=True, virtual=virtual)
(dopCur, Ka) = computeDopplerOffset(masBurstCur, overlapBox[jj][4], overlapBox[jj][5], overlapBox[jj][6], overlapBox[jj][7], nrlks=nrlks, nalks=nalks)
#rng = multilookIndex(overlapBox[jj][6]-1, overlapBox[jj][7]-1, nrlks) * masBurstCur.rangePixelSize + masBurstCur.startingRange
#Ka = masBurstCur.azimuthFMRate(rng)
frqCur = dopCur * Ka[None,:] * (masBurstCur.azimuthTimeInterval * nalks)
infTop = multilook(infTop, nalks, nrlks)
infCur = multilook(infCur, nalks, nrlks)
infDif = infTop * np.conjugate(infCur)
cor = cal_coherence(infDif, win=3, edge=4)
index = np.nonzero(cor > corThreshold)
totalSamples += infTop.size
if index[0].size:
#misregistration in sec. it should be OK to only use reference frequency to compute ESD
misreg0 = np.angle(infDif[index]) / (2.0 * np.pi * (frqTop[index]-frqCur[index]))
misreg=np.append(misreg, misreg0.flatten())
print("misregistration at burst %02d and burst %02d of swath %d: %10.5f azimuth lines"%(ii+1-1, ii+1, swath, np.mean(misreg0, dtype=np.float64)/masBurstCur.azimuthTimeInterval))
else:
print("no samples available for ESD at burst %02d and burst %02d of swath %d"%(ii+1-1, ii+1, swath))
percentage = 100.0 * len(misreg) / totalSamples
#number of samples per overlap: 100/5*23334/150 = 3111.2
print("samples available for ESD at swath %d: %d out of %d available, percentage: %5.1f%%"%(swath, len(misreg), totalSamples, percentage))
if len(misreg) < 1000:
print("too few samples available for ESD, no ESD correction will be applied\n")
misreg = 0
continue
else:
misreg = np.mean(misreg, dtype=np.float64)
print("misregistration from ESD: {} sec, {} azimuth lines\n".format(misreg, misreg/reference.bursts[minBurst].azimuthTimeInterval))
#use mis-registration estimated from esd to compute phase error
for ii in range(minBurst, maxBurst):
jj = ii - minBurst
####Process the top bursts
masBurst = reference.bursts[ii]
slvBurst = secondary.bursts[jj]
ionname = os.path.join(ionParam.ionDirname, ionParam.ionBurstDirname, 'IW{0}'.format(swath), '%s_%02d.ion'%('burst',ii+1))
ion = np.fromfile(ionname, dtype=np.float32).reshape(masBurst.numberOfLines, masBurst.numberOfSamples)
(dopplerOffset, Ka) = computeDopplerOffset(masBurst, 1, masBurst.numberOfLines, 1, masBurst.numberOfSamples, nrlks=1, nalks=1)
centerFrequency = dopplerOffset * Ka[None,:] * (masBurst.azimuthTimeInterval)
ion += 2.0 * np.pi * centerFrequency * misreg
#overwrite
ion.astype(np.float32).tofile(ionname)
def esd_noion(self, ionParam):
'''
esd after ionosphere correction
'''
######################################
#SET PARAMETERS HERE
#THESE SHOULD BE GOOD ENOUGH, NO NEED TO SET IN setup(self)
nalks = 5
nrlks = 30
corThreshold = 0.75
######################################
print('applying ESD to compensate phase error caused by residual misregistration')
esddir = 'esd'
virtual = self.useVirtualFiles
swathList = self._insar.getValidSwathList(self.swaths)
for swath in swathList:
minBurst, maxBurst = self._insar.commonReferenceBurstLimits(swath-1)
nBurst = maxBurst - minBurst
if nBurst <= 1:
continue
####Load relevant products
reference = self._insar.loadProduct( os.path.join(self._insar.referenceSlcProduct, 'IW{0}.xml'.format(swath)))
secondary = self._insar.loadProduct( os.path.join(self._insar.fineCoregDirname, 'IW{0}.xml'.format(swath)))
#get overlap area
for ii in range(minBurst, maxBurst):
jj = ii - minBurst
####Process the top bursts
masBurst = reference.bursts[ii]
slvBurst = secondary.bursts[jj]
adjustValidLineSample(masBurst,slvBurst)
overlapBox = get_overlap_box(reference, minBurst, maxBurst)
#using esd to calculate mis-registration
misreg = np.array([])
totalSamples = 0
for ii in range(minBurst+1, maxBurst):
jj = ii - minBurst
####Process the top bursts
masBurstTop = reference.bursts[ii-1]
slvBurstTop = secondary.bursts[jj-1]
masBurstCur = reference.bursts[ii]
slvBurstCur = secondary.bursts[jj]
#get info
referencename = masBurstTop.image.filename
secondaryname = slvBurstTop.image.filename
ionname = os.path.join(ionParam.ionDirname, ionParam.ionBurstDirname, 'IW{0}'.format(swath), '%s_%02d.ion'%('burst',ii+1-1))
rngname = os.path.join(self._insar.fineOffsetsDirname, 'IW{0}'.format(swath), 'range_%02d.off'%(ii+1-1))
fact = 4.0 * np.pi * slvBurstTop.rangePixelSize / slvBurstTop.radarWavelength
#infTop = multiply2(referencename, secondaryname, ionname, rngname, fact, overlapBox[jj][0:4], virtual=virtual)
infTop = multiply2(referencename, secondaryname, fact, rngname=rngname, ionname=None, infname=None, overlapBox=overlapBox[jj][0:4], valid=True, virtual=virtual)
(dopTop, Ka) = computeDopplerOffset(masBurstTop, overlapBox[jj][0], overlapBox[jj][1], overlapBox[jj][2], overlapBox[jj][3], nrlks=nrlks, nalks=nalks)
#rng = multilookIndex(overlapBox[jj][2]-1, overlapBox[jj][3]-1, nrlks) * masBurstTop.rangePixelSize + masBurstTop.startingRange
#Ka = masBurstTop.azimuthFMRate(rng)
frqTop = dopTop * Ka[None,:] * (masBurstTop.azimuthTimeInterval * nalks)
referencename = masBurstCur.image.filename
secondaryname = slvBurstCur.image.filename
ionname = os.path.join(ionParam.ionDirname, ionParam.ionBurstDirname, 'IW{0}'.format(swath), '%s_%02d.ion'%('burst',ii+1))
rngname = os.path.join(self._insar.fineOffsetsDirname, 'IW{0}'.format(swath), 'range_%02d.off'%(ii+1))
fact = 4.0 * np.pi * slvBurstCur.rangePixelSize / slvBurstCur.radarWavelength
#infCur = multiply2(referencename, secondaryname, ionname, rngname, fact, overlapBox[jj][4:8], virtual=virtual)
infCur = multiply2(referencename, secondaryname, fact, rngname=rngname, ionname=None, infname=None, overlapBox=overlapBox[jj][4:8], valid=True, virtual=virtual)
(dopCur, Ka) = computeDopplerOffset(masBurstCur, overlapBox[jj][4], overlapBox[jj][5], overlapBox[jj][6], overlapBox[jj][7], nrlks=nrlks, nalks=nalks)
#rng = multilookIndex(overlapBox[jj][6]-1, overlapBox[jj][7]-1, nrlks) * masBurstCur.rangePixelSize + masBurstCur.startingRange
#Ka = masBurstCur.azimuthFMRate(rng)
frqCur = dopCur * Ka[None,:] * (masBurstCur.azimuthTimeInterval * nalks)
infTop = multilook(infTop, nalks, nrlks)
infCur = multilook(infCur, nalks, nrlks)
infDif = infTop * np.conjugate(infCur)
cor = cal_coherence(infDif, win=3, edge=4)
index = np.nonzero(cor > corThreshold)
totalSamples += infTop.size
if index[0].size:
#misregistration in sec. it should be OK to only use reference frequency to compute ESD
misreg0 = np.angle(infDif[index]) / (2.0 * np.pi * (frqTop[index]-frqCur[index]))
misreg=np.append(misreg, misreg0.flatten())
print("misregistration at burst %02d and burst %02d of swath %d: %10.5f azimuth lines"%(ii+1-1, ii+1, swath, np.mean(misreg0, dtype=np.float64)/masBurstCur.azimuthTimeInterval))
else:
print("no samples available for ESD at burst %02d and burst %02d of swath %d"%(ii+1-1, ii+1, swath))
percentage = 100.0 * len(misreg) / totalSamples
#number of samples per overlap: 100/5*23334/150 = 3111.2
print("samples available for ESD at swath %d: %d out of %d available, percentage: %5.1f%%"%(swath, len(misreg), totalSamples, percentage))
if len(misreg) < 1000:
print("too few samples available for ESD, no ESD correction will be applied\n")
misreg = 0
continue
else:
misreg = np.mean(misreg, dtype=np.float64)
print("misregistration from ESD: {} sec, {} azimuth lines\n".format(misreg, misreg/reference.bursts[minBurst].azimuthTimeInterval))
sdir = os.path.join(ionParam.ionDirname, esddir, 'IW{0}'.format(swath))
os.makedirs(sdir, exist_ok=True)
#use mis-registration estimated from esd to compute phase error
for ii in range(minBurst, maxBurst):
jj = ii - minBurst
####Process the top bursts
masBurst = reference.bursts[ii]
slvBurst = secondary.bursts[jj]
#ionname = os.path.join(ionParam.ionDirname, ionParam.ionBurstDirname, 'IW{0}'.format(swath), '%s_%02d.ion'%('burst',ii+1))
#ion = np.fromfile(ionname, dtype=np.float32).reshape(masBurst.numberOfLines, masBurst.numberOfSamples)
(dopplerOffset, Ka) = computeDopplerOffset(masBurst, 1, masBurst.numberOfLines, 1, masBurst.numberOfSamples, nrlks=1, nalks=1)
centerFrequency = dopplerOffset * Ka[None,:] * (masBurst.azimuthTimeInterval)
ion = 2.0 * np.pi * centerFrequency * misreg
#overwrite
ionname = os.path.join(ionParam.ionDirname, esddir, 'IW{0}'.format(swath), '%s_%02d.esd'%('burst',ii+1))
ion.astype(np.float32).tofile(ionname)
#create xml and vrt files
burstImg = isceobj.createImage()
burstImg.setDataType('FLOAT')
burstImg.setFilename(ionname)
burstImg.extraFilename = ionname + '.vrt'
burstImg.setWidth(masBurst.numberOfSamples)
burstImg.setLength(masBurst.numberOfLines)
burstImg.renderHdr()
def rawion(self, ionParam):
'''
a simple wrapper
'''
if ionParam.calIonWithMerged == True:
#merge bursts
merge(self, ionParam)
#unwrap
unwrap(self, ionParam)
#compute ionosphere
ionosphere(self, ionParam)
else:
#an alternative of the above steps: processing swath by swath
ionSwathBySwath(self, ionParam)
def run_step(currentStep, ionParam):
return ionParam.allSteps.index(ionParam.startStep) <= ionParam.allSteps.index(currentStep) <= ionParam.allSteps.index(ionParam.endStep)
def runIon(self):
#get processing parameters
ionParam = setup(self)
#if do ionospheric correction
if ionParam.doIon == False:
return
#form subband interferograms
if run_step('subband', ionParam):
subband(self, ionParam)
#compute ionosphere (raw_no_projection.ion) and coherence (raw_no_projection.cor) without projection
if run_step('rawion', ionParam):
rawion(self, ionParam)
#next we move to 'ion_cal' to do the remaining processing
#resample ionosphere from the ground layer to ionospheric layer
if run_step('grd2ion', ionParam):
grd2ion(self, ionParam)
#filter ionosphere
if run_step('filt_gaussian', ionParam):
filt_gaussian(self, ionParam)
#ionosphere shift
if run_step('ionosphere_shift', ionParam):
ionosphere_shift(self, ionParam)
#resample from ionospheric layer to ground layer, get ionosphere for each burst
if run_step('ion2grd', ionParam):
ion2grd(self, ionParam)
#esd
if run_step('esd', ionParam):
esd(self, ionParam)
#pure esd without applying ionospheric correction
#esd_noion(self, ionParam)
return
| 45.003765
| 236
| 0.639923
|
4a016518aa27e44e357a0485fd23684ed2c2ecca
| 3,211
|
py
|
Python
|
07-Elemental-Matrices-and-The-Properties-of-Inversion/02-Implement-Inverse-of-Matrix/playLA/Matrix.py
|
hcc817/Mtianyan-Play-with-Linear-Algebra
|
80e95a13cee0c4a8251adb84ff21956e7553638c
|
[
"Apache-2.0"
] | null | null | null |
07-Elemental-Matrices-and-The-Properties-of-Inversion/02-Implement-Inverse-of-Matrix/playLA/Matrix.py
|
hcc817/Mtianyan-Play-with-Linear-Algebra
|
80e95a13cee0c4a8251adb84ff21956e7553638c
|
[
"Apache-2.0"
] | null | null | null |
07-Elemental-Matrices-and-The-Properties-of-Inversion/02-Implement-Inverse-of-Matrix/playLA/Matrix.py
|
hcc817/Mtianyan-Play-with-Linear-Algebra
|
80e95a13cee0c4a8251adb84ff21956e7553638c
|
[
"Apache-2.0"
] | 1
|
2019-09-04T08:46:14.000Z
|
2019-09-04T08:46:14.000Z
|
from .Vector import Vector
class Matrix:
def __init__(self, list2d):
self._values = [row[:] for row in list2d]
@classmethod
def zero(cls, r, c):
"""返回一个r行c列的零矩阵"""
return cls([[0] * c for _ in range(r)])
@classmethod
def identity(cls, n):
"""返回一个n行n列的单位矩阵"""
m = [[0]*n for _ in range(n)]
for i in range(n):
m[i][i] = 1;
return cls(m)
def T(self):
"""返回矩阵的转置矩阵"""
return Matrix([[e for e in self.col_vector(i)]
for i in range(self.col_num())])
def __add__(self, another):
"""返回两个矩阵的加法结果"""
assert self.shape() == another.shape(), \
"Error in adding. Shape of matrix must be same."
return Matrix([[a + b for a, b in zip(self.row_vector(i), another.row_vector(i))]
for i in range(self.row_num())])
def __sub__(self, another):
"""返回两个矩阵的减法结果"""
assert self.shape() == another.shape(), \
"Error in subtracting. Shape of matrix must be same."
return Matrix([[a - b for a, b in zip(self.row_vector(i), another.row_vector(i))]
for i in range(self.row_num())])
def dot(self, another):
"""返回矩阵乘法的结果"""
if isinstance(another, Vector):
# 矩阵和向量的乘法
assert self.col_num() == len(another), \
"Error in Matrix-Vector Multiplication."
return Vector([self.row_vector(i).dot(another) for i in range(self.row_num())])
if isinstance(another, Matrix):
# 矩阵和矩阵的乘法
assert self.col_num() == another.row_num(), \
"Error in Matrix-Matrix Multiplication."
return Matrix([[self.row_vector(i).dot(another.col_vector(j)) for j in range(another.col_num())]
for i in range(self.row_num())])
def __mul__(self, k):
"""返回矩阵的数量乘结果: self * k"""
return Matrix([[e * k for e in self.row_vector(i)]
for i in range(self.row_num())])
def __rmul__(self, k):
"""返回矩阵的数量乘结果: k * self"""
return self * k
def __truediv__(self, k):
"""返回数量除法的结果矩阵:self / k"""
return (1 / k) * self
def __pos__(self):
"""返回矩阵取正的结果"""
return 1 * self
def __neg__(self):
"""返回矩阵取负的结果"""
return -1 * self
def row_vector(self, index):
"""返回矩阵的第index个行向量"""
return Vector(self._values[index])
def col_vector(self, index):
"""返回矩阵的第index个列向量"""
return Vector([row[index] for row in self._values])
def __getitem__(self, pos):
"""返回矩阵pos位置的元素"""
r, c = pos
return self._values[r][c]
def size(self):
"""返回矩阵的元素个数"""
r, c = self.shape()
return r * c
def row_num(self):
"""返回矩阵的行数"""
return self.shape()[0]
__len__ = row_num
def col_num(self):
"""返回矩阵的列数"""
return self.shape()[1]
def shape(self):
"""返回矩阵的形状: (行数, 列数)"""
return len(self._values), len(self._values[0])
def __repr__(self):
return "Matrix({})".format(self._values)
__str__ = __repr__
| 28.415929
| 108
| 0.530053
|
4a01667f874567443940efcb396c159e07defa8c
| 380
|
py
|
Python
|
spinach/contrib/spinachd/signals.py
|
0xDEC0DE/spinach
|
8a719cfed41183a01d834830b8b4a5bd14756ea4
|
[
"BSD-2-Clause"
] | null | null | null |
spinach/contrib/spinachd/signals.py
|
0xDEC0DE/spinach
|
8a719cfed41183a01d834830b8b4a5bd14756ea4
|
[
"BSD-2-Clause"
] | null | null | null |
spinach/contrib/spinachd/signals.py
|
0xDEC0DE/spinach
|
8a719cfed41183a01d834830b8b4a5bd14756ea4
|
[
"BSD-2-Clause"
] | null | null | null |
from django.db import reset_queries, close_old_connections
from spinach import signals
from .apps import spin
@signals.job_started.connect_via(spin.namespace)
def job_started(*args, job=None, **kwargs):
reset_queries()
close_old_connections()
@signals.job_finished.connect_via(spin.namespace)
def job_finished(*args, job=None, **kwargs):
close_old_connections()
| 22.352941
| 58
| 0.781579
|
4a01680e4425dfd5896b98730a016af1e361a87c
| 6,371
|
py
|
Python
|
Examples/Braced Frame - Spring Supported.py
|
maderero/PyNite
|
20fd90c126e3eb0487d541b86bd5a057208af780
|
[
"MIT"
] | null | null | null |
Examples/Braced Frame - Spring Supported.py
|
maderero/PyNite
|
20fd90c126e3eb0487d541b86bd5a057208af780
|
[
"MIT"
] | null | null | null |
Examples/Braced Frame - Spring Supported.py
|
maderero/PyNite
|
20fd90c126e3eb0487d541b86bd5a057208af780
|
[
"MIT"
] | null | null | null |
# Example of a basic 2D tension-only braced frame with gravity and lateral
# loads. Units used for the model in this example are inches and kips.
# Import `FEModel3D` from `PyNite`
from PyNite import FEModel3D
# Create a new finite element model
braced_frame = FEModel3D()
# Add nodes (frame is 15 ft wide x 12 ft tall)
braced_frame.add_node('N1', 0, 0, 0)
braced_frame.add_node('N2', 0, 12*12, 0)
braced_frame.add_node('N3', 15*12, 12*12, 0)
braced_frame.add_node('N4', 15*12, 0*12, 0)
# Define column properties (use W10x33 from the AISC Manual):
E = 29000 # ksi
G = 11400 # ksi
Iy = 36.6 # in^4
Iz = 171 # in^4
J = 0.58 # in^4
A = 9.71 # in^2
# Define the columns
braced_frame.add_member('Col1', 'N1', 'N2', E, G, Iy, Iz, J, A)
braced_frame.add_member('Col2', 'N4', 'N3', E, G, Iy, Iz, J, A)
# Define beam properties (Use W8x24)
Iy = 18.3 # in^4
Iz = 82.7 # in^4
J = 0.346 # in^4
A = 7.08 # in^2
# Define the beams
braced_frame.add_member('Beam', 'N2', 'N3', E, G, Iy, Iz, J, A)
braced_frame.def_releases('Beam', Ryi=True, Rzi=True, Ryj=True, Rzj=True)
# Define the brace properties
# We'll use a section with L/r <= 300 which is a common rule of thumb for
# tension members. We'll use L4x4x1/4.
Iy = 3 # in^4
Iz = 3 # in^4
J = 0.0438 # in^4
A = 1.94 # in^2
# Define a brace (tension and compression - both ways)
braced_frame.add_member('Brace1', 'N1', 'N3', E, G, Iy, Iz, J, A)
# Let's add spring supports to the base of the structure. We'll add a couple of
# extra nodes at the base of the structure that will receive the springs. The
# lengths of these springs is irrelevant, since we're defining a spring constant
# that is independent of the spring's length. Only the direction of the spring
# matters as it defines the direction of the spring's stiffness. The nodes will
# be directly below N1 and N4 in the Y-direction.
braced_frame.add_node('N1s', 0, -2*12, 0)
braced_frame.add_node('N4s', 15*12, -2*12, 0)
braced_frame.add_spring('Spring1','N1', 'N1s', 10000, tension_only=True,
comp_only=False)
braced_frame.add_spring('Spring2','N4', 'N4s', 10000, tension_only=False,
comp_only=True) # The structure would be unstable if
# this was tension only
# Release the brace ends to form an axial member
braced_frame.def_releases('Brace1', Ryi=True, Rzi=True, Ryj=True, Rzj=True)
# Springs only carry axial loads, nothing else, so we'll need to stabilize
# the column bases in the other directions. The column bases will be
# supported by the springs vertically. For the other directions (horizontally
# and about the Y-axis) we'll need to provide supports.
braced_frame.def_support('N1', support_DX=True, support_DZ=True, support_RY=True)
braced_frame.def_support('N4', support_DX=True, support_DZ=True, support_RY=True)
# Fix the nodes supporting the bottoms of the springs. Note that even though
# we're fixing these nodes, the only reactions the supports will carry will
# be in the Y-direction, due to the fact that the spring only has stiffness in
# that direction. We fix the node so that it's not free to spin or translate
# in the other directions however. If we didn't the node would be unstable and
# the model would crash. PyNite is unforgiving in this regard. Every degree of
# freedom (3 translations and 3 rotations) at every node must be stabilized so
# it's not free to move infinitely.
braced_frame.def_support('N1s', support_DX=True, support_DY=True, support_DZ=True, support_RX=True, support_RY=True, support_RZ=True)
braced_frame.def_support('N4s', support_DX=True, support_DY=True, support_DZ=True, support_RX=True, support_RY=True, support_RZ=True)
# Stabilize the frame in the global Z-direction so it doesn't tip over
# out-of-plane.
braced_frame.def_support('N2', support_DZ=True)
braced_frame.def_support('N3', support_DZ=True)
# Add self weight dead loads to the frame.
# Note that we could leave 'x1' and 'x2' undefined below and it would default
# to the full member length. Note also that the direction uses lowercase
# notations to indicate member local coordinate systems. Brace loads have been
# neglected.
braced_frame.add_member_dist_load('Beam', Direction='Fy', w1=-0.024/12,
w2=-0.024/12, x1=0, x2=15*12, case='D')
braced_frame.add_member_dist_load('Col1', Direction='Fx', w1=-0.033/12,
w2=-0.033/12, x1=0, x2=12*12, case='D')
braced_frame.add_member_dist_load('Col2', Direction='Fx', w1=-0.033/12,
w2=-0.033/12, x1=0, x2=12*12, case='D')
# Add nodal wind loads of 25 kips to each side of the frame. Note that the
# direction uses uppercase notation to indicate model global coordinate
# system.
braced_frame.add_node_load('N2', Direction='FX', P=25, case='W')
braced_frame.add_node_load('N3', Direction='FX', P=25, case='W')
# Create load combinations
# Note that the load combination '1.4D' has no lateral load, but does have
# gravity load. The gravity load forces the tension only spring to receive
# minor compression, which causes it to be deactivated on the first iteration.
# Once deactivated the model is unstable and an exception is thrown. This is
# normal and correct behavior. Load combination '1.4D' has been commented out,
# but you can uncomment it to see for yourself what happens.
# braced_frame.add_load_combo('1.4D', factors={'D':1.4})
braced_frame.add_load_combo('1.2D+1.0W', factors={'D':1.2, 'W':1.0})
braced_frame.add_load_combo('0.9D+1.0W', factors={'D':0.9, 'W':1.0})
# Analyze the braced frame
# P-Delta analysis could also be performed using braced_frame.analyze_PDelta().
# Generally, P-Delta analysis will have little effect on a model of a braced
# frame, as there is usually very little bending moment in the members.
braced_frame.analyze()
# Display the deformed shape of the structure magnified 50 times with the text
# height 5 model units (inches) high.
from PyNite import Visualization
Visualization.render_model(braced_frame, annotation_size=5, deformed_shape=True,
deformed_scale=50, combo_name='1.2D+1.0W')
# We should see upward displacement at N1 and downward displacement at N4 if
# our springs worked correctly
print('N1 displacement in Y =', braced_frame.Nodes['N1'].DY['1.2D+1.0W'])
print('N4 displacement in Y =', braced_frame.Nodes['N4'].DY['1.2D+1.0W'])
| 46.845588
| 133
| 0.718098
|
4a01681835bc4ed651ca7d4416e86c79af5a562d
| 5,475
|
py
|
Python
|
base.py
|
jacksyyy/PurpurDocs
|
b2a4044e52233ab066985e4ac921da7cb8f31d97
|
[
"BSD-2-Clause"
] | 6
|
2021-12-05T23:38:21.000Z
|
2022-03-15T19:39:14.000Z
|
base.py
|
jacksyyy/PurpurDocs
|
b2a4044e52233ab066985e4ac921da7cb8f31d97
|
[
"BSD-2-Clause"
] | 6
|
2021-12-09T11:57:38.000Z
|
2022-03-16T00:37:45.000Z
|
base.py
|
jacksyyy/PurpurDocs
|
b2a4044e52233ab066985e4ac921da7cb8f31d97
|
[
"BSD-2-Clause"
] | 14
|
2021-12-04T00:21:10.000Z
|
2022-03-12T21:46:01.000Z
|
from os import makedirs, path
import requests
import yaml
import asyncio
import re
import sys
CONFIG_REGEX = re.compile(r'[^.]get(Boolean|Int|Double|String|List)\("(.+)",\s*(\w+)')
PERM_REGEX = re.compile(r'hasPermission\("(.+?)"\)')
LOG_DIR = './logs/'
PROJECT = {
'owner': 'PurpurMC',
'repo': 'Purpur',
'branch': 'ver/1.18.2'
}
async def find_default_value(config_result, patch):
if config_result[0] == 'Boolean':
if config_result[2] == 'true' or config_result[2] == 'false':
return {config_result[1]: config_result[2]}
if config_result[0] == 'Int':
if config_result[2].isnumeric():
return {config_result[1]: config_result[2]}
if config_result[0] == 'Double':
if config_result[2].endswith('F') or config_result[2].endswith('D'):
return {config_result[1]: config_result[2][:-1]}
if config_result[0] == 'String':
if '"' in config_result[2]:
return {config_result[1]: config_result[2][1:-1]}
if config_result[0] == 'List':
return {config_result[1]: config_result[2]}
search_config = re.search(config_result[2] + r'\s*=\s*(.+);', patch)
if search_config != None and len(search_config.groups()):
return {config_result[1]: search_config.group(1)}
return config_result[1]
async def compile_diff(compare_commits, project):
if compare_commits is None and len(compare_commits) != 2:
raise ValueError('Can only compare between two commits')
if project is None:
raise ValueError('Could not find a project to use.')
diff = {'config': {'additions': [], 'removals': []},
'permission': {'additions': [], 'removals': []}}
repo_link = f"https://github.com/{PROJECT['owner']}/{PROJECT['repo']}/compare/{compare_commits[0]}..{compare_commits[1]}.diff"
patch_file = requests.get(repo_link).text
for line in patch_file.split('\n'):
if line.startswith('++'):
regex_config_result = CONFIG_REGEX.search(line)
regex_perm_result = PERM_REGEX.search(line)
# if not in removals list then include in additions list
if regex_config_result:
config_result = regex_config_result.groups()
if config_result in diff['config']['removals'] or config_result in diff['config']['additions']:
continue
diff['config']['additions'].append(await find_default_value(config_result, patch_file))
if regex_perm_result:
perm_result = regex_perm_result.group(1)
if perm_result in diff['permission']['removals'] or perm_result in diff['permission']['additions']:
continue
diff['permission']['additions'].append(perm_result)
elif line.startswith('--') or line.startswith('-+'):
regex_config_result = CONFIG_REGEX.search(line)
regex_perm_result = PERM_REGEX.search(line)
# if not in additions list then include in removals list
if regex_config_result:
config_result = regex_config_result.groups()
if config_result in diff['config']['additions'] or config_result in diff['config']['removals']:
continue
diff['config']['removals'].append(await find_default_value(config_result, patch_file))
if regex_perm_result:
perm_result = regex_perm_result.group(1)
if perm_result in diff['permission']['additions'] or perm_result in diff['permission']['removals']:
continue
diff['permission']['removals'].append(perm_result)
return diff
async def main():
compare_commits = []
args = sys.argv[1:3]
if len(args) > 0:
compare_commits += args
if len(args) == 1:
compare_commits.append(PROJECT['branch'])
last_commit = ''
if len(compare_commits) == 0:
if path.exists('last_commit'):
with open('last_commit', 'r') as stream:
last_commit = stream.read()
branch_sha = ''
branch_sha_json = requests.get(f"https://api.github.com/repos/{PROJECT['owner']}/{PROJECT['repo']}/branches").json()
for branch in branch_sha_json:
if branch['name'] == PROJECT['branch']:
branch_sha = branch['commit']['sha'][:6]
break
if not branch_sha:
raise ValueError(f"Could not locate branch {PROJECT['branch']} in project {PROJECT['owner']}/{PROJECT['repo']}")
with open('last_commit', 'w') as stream:
stream.write(branch_sha)
if last_commit:
compare_commits += [last_commit, branch_sha]
else:
compare_commits += [branch_sha, PROJECT['branch']]
diff = await compile_diff(compare_commits, PROJECT)
# remove duplicates between additions/removals
for type in diff:
# diff["config"]["additions"]
additions = diff[type]["additions"]
removals = diff[type]["removals"]
diff[type]["additions"] = [ x for x in additions if x not in removals ]
diff[type]["removals"] = [ x for x in removals if x not in additions ]
filename = f"{LOG_DIR}{'..'.join(compare_commits).replace('/', '|')}.yml"
makedirs(path.dirname(filename), exist_ok=True)
with open(filename, 'w') as stream:
yaml.safe_dump(diff, stream)
if __name__ == '__main__':
asyncio.run(main())
| 40.257353
| 130
| 0.610228
|
4a01684feb0d46e3d18df6df7c1008fc6fe9fb6e
| 1,174
|
py
|
Python
|
model-optimizer/mo/front/mxnet/extractors/broadcast_mul.py
|
mypopydev/dldt
|
8cd639116b261adbbc8db860c09807c3be2cc2ca
|
[
"Apache-2.0"
] | 3
|
2019-07-08T09:03:03.000Z
|
2020-09-09T10:34:17.000Z
|
model-optimizer/mo/front/mxnet/extractors/broadcast_mul.py
|
openvino-pushbot/dldt
|
e607ee70212797cf9ca51dac5b7ac79f66a1c73f
|
[
"Apache-2.0"
] | 3
|
2020-11-13T18:59:18.000Z
|
2022-02-10T02:14:53.000Z
|
model-optimizer/mo/front/mxnet/extractors/broadcast_mul.py
|
openvino-pushbot/dldt
|
e607ee70212797cf9ca51dac5b7ac79f66a1c73f
|
[
"Apache-2.0"
] | 1
|
2018-12-14T07:56:02.000Z
|
2018-12-14T07:56:02.000Z
|
"""
Copyright (c) 2018 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from mo.front.common.extractors.utils import layout_attrs
from mo.front.common.partial_infer.utils import mark_input_bins
from mo.graph.graph import Node
def broadcast_mul_infer(node: Node):
in_port = 0
if node.in_node(1).value is None:
in_port = 1
weights_port = 1 - in_port
node.out_node(0).shape = node.in_node(in_port).shape
mark_input_bins(node, ['weights'], weights_port)
def broadcast_mul_ext(attrs):
node_attrs = {
'type': 'ScaleShift',
'infer': broadcast_mul_infer
}
node_attrs.update(layout_attrs())
return node_attrs
| 30.894737
| 73
| 0.736797
|
4a016907455a10f0cb90d8e70b2491e754d2694c
| 506
|
py
|
Python
|
tests/python/grad_test.py
|
447983454/taichi
|
2bfbca88b2d8cb1a070da9a40c5422c99b23fc2f
|
[
"MIT"
] | 1
|
2020-06-01T14:22:19.000Z
|
2020-06-01T14:22:19.000Z
|
tests/python/grad_test.py
|
447983454/taichi
|
2bfbca88b2d8cb1a070da9a40c5422c99b23fc2f
|
[
"MIT"
] | null | null | null |
tests/python/grad_test.py
|
447983454/taichi
|
2bfbca88b2d8cb1a070da9a40c5422c99b23fc2f
|
[
"MIT"
] | null | null | null |
import taichi as ti
from taichi import approx
def grad_test(tifunc, npfunc=None):
from autograd import grad
if npfunc is None:
npfunc = tifunc
x = ti.var(ti.f32)
y = ti.var(ti.f32)
ti.root.dense(ti.i, 1).place(x, x.grad, y, y.grad)
@ti.kernel
def func():
for i in x:
y[i] = tifunc(x[i])
v = 0.2
y.grad[0] = 1
x[0] = v
func()
func.grad()
assert y[0] == approx(npfunc(v))
assert x.grad[0] == approx(grad(npfunc)(v))
| 17.448276
| 54
| 0.543478
|
4a016975957ed5eb08ff5318ba7250d82054fcf0
| 18,964
|
py
|
Python
|
etcmodel/models/wikihop/run_wikihop.py
|
deepneuralmachine/google-research
|
d2ce2cf0f5c004f8d78bfeddf6e88e88f4840231
|
[
"Apache-2.0"
] | 23,901
|
2018-10-04T19:48:53.000Z
|
2022-03-31T21:27:42.000Z
|
etcmodel/models/wikihop/run_wikihop.py
|
deepneuralmachine/google-research
|
d2ce2cf0f5c004f8d78bfeddf6e88e88f4840231
|
[
"Apache-2.0"
] | 891
|
2018-11-10T06:16:13.000Z
|
2022-03-31T10:42:34.000Z
|
etcmodel/models/wikihop/run_wikihop.py
|
deepneuralmachine/google-research
|
d2ce2cf0f5c004f8d78bfeddf6e88e88f4840231
|
[
"Apache-2.0"
] | 6,047
|
2018-10-12T06:31:02.000Z
|
2022-03-31T13:59:28.000Z
|
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""ETC finetuning runner for WikiHop evaluation.
1) Reference paper describing the construction and details of the dataset:
https://transacl.org/ojs/index.php/tacl/article/viewFile/1325/299
2) Dataset link: http://qangaroo.cs.ucl.ac.uk/
"""
import json
import os
import time
from absl import flags
import numpy as np
import tensorflow.compat.v1 as tf
from etcmodel.models import input_utils
from etcmodel.models.wikihop import run_wikihop_lib
tf.compat.v1.disable_v2_behavior()
flags = tf.flags
FLAGS = flags.FLAGS
# Required parameters
flags.DEFINE_string(
"source_model_config_file", None,
"The source config json file corresponding to the ETC model. "
"This specifies the model architecture.")
flags.DEFINE_string(
"input_tf_records_path", None,
"The path to the TFRecords. If None, the data will be generated using "
"the `input_file_path`. At least one of `input_file_path` or "
"this should be specified. This flag is useful for optimization where in "
"we don't need to generated train/dev tf_records during multiple "
"iterations of modeling.")
flags.DEFINE_string(
"predict_ckpt", None, "The path to the checkpoint to "
"be used for in predict mode. If None, the latest checkpoint in the "
"model dir would be used.")
flags.DEFINE_string(
"predict_output_file_path", None, "The full path of the output file to "
"write the test set results. The results would be in json format with key "
"being the example id and value being the candidate answer.")
flags.DEFINE_string(
"output_dir", None,
"The output directory where the model checkpoints will be written.")
## Other parameters
flags.DEFINE_bool(
"candidate_ignore_hard_g2l", False, "If True, all the "
"candidate tokens in the global input attend to everything "
"in the long input (except padding) even when "
"`use_hard_g2l_mask` is enabled.")
flags.DEFINE_bool(
"query_ignore_hard_g2l", False, "If True, all the "
"query tokens in the global input attend to everything in "
"the long input (except padding) even when "
"`use_hard_g2l_mask` is enabled.")
flags.DEFINE_bool(
"enable_l2g_linking", True, "If True, all the "
"candidate mentions in the long will be linked to the "
"candidate global token.")
flags.DEFINE_float(
"hidden_dropout_prob", -1, "The dropout probability for "
"all fully connected layers in the embeddings, encoder, and "
"pooler.")
flags.DEFINE_float("attention_probs_dropout_prob", -1,
"The dropout ratio for the attention "
"probabilities.")
flags.DEFINE_float(
"local_radius", -1, "The local radius (window size) for the long input "
"attention.")
flags.DEFINE_string(
"init_checkpoint", None,
"Initial checkpoint (usually from a pre-trained ETC model) to start "
"fine-tuning.")
flags.DEFINE_integer("long_seq_len", 4096,
"The total input sequence length to pad to for training.")
flags.DEFINE_integer("global_seq_len", 430,
"The raw maximum global input sequence length.")
flags.DEFINE_bool("do_train", False, "Whether to run training.")
flags.DEFINE_bool("do_eval", False, "Whether to run eval on the dev set.")
flags.DEFINE_bool(
"do_predict", False,
"Whether to run the model in inference mode on the test set.")
flags.DEFINE_bool(
"do_export", False, "To export SavedModels for all the "
"checkpoints within the model_dir.")
flags.DEFINE_string(
"export_ckpts", None, "A space separated list of all the "
"checkpoints to be exported. If None, exports all the "
"checkpoints within the model_dir. Applicable only when "
"`do_export` is set to True.")
flags.DEFINE_integer("train_batch_size", 32, "Total batch size for training.")
flags.DEFINE_integer("eval_batch_size", 8, "Total batch size for eval.")
flags.DEFINE_integer("predict_batch_size", 8, "Total batch size for predict.")
flags.DEFINE_enum("optimizer", "adamw", ["adamw", "lamb"],
"The optimizer for training.")
flags.DEFINE_float("learning_rate", 3e-05, "The initial learning rate for "
"Adam.")
flags.DEFINE_float("weight_decay_rate", 0.1, "The weight decay rate.")
flags.DEFINE_float("label_smoothing", 0.0, "The label smoothing param.")
flags.DEFINE_integer(
"num_train_epochs", 15, "Number of train epochs. The total number of "
"examples on the WikiHop dataset is ~44K.")
flags.DEFINE_float(
"warmup_proportion", 0.1,
"Proportion of training to perform linear learning rate warmup for. "
"E.g., 0.1 = 10% of training.")
flags.DEFINE_integer(
"max_eval_steps", 600, "Maximum number of eval steps. "
"Total number of dev examples in WikiHop is ~5K. "
"This number has been set assuming a eval_batch_size of "
"8.")
flags.DEFINE_enum(
"learning_rate_schedule", "poly_decay", ["poly_decay", "inverse_sqrt"],
"The learning rate schedule to use. The default of "
"`poly_decay` uses tf.train.polynomial_decay, while "
"`inverse_sqrt` uses inverse sqrt of time after the warmup.")
flags.DEFINE_float("poly_power", 1.0,
"The power of poly decay if using `poly_decay` schedule.")
flags.DEFINE_integer("start_warmup_step", 0, "The starting step of warmup.")
flags.DEFINE_integer("save_checkpoints_steps", 1000,
"How often to save the model checkpoint.")
flags.DEFINE_integer(
"grad_checkpointing_period", None,
"If specified, this overrides the corresponding `EtcConfig` value loaded "
"from `source_model_config_file`.")
flags.DEFINE_integer("iterations_per_loop", 1000,
"How many steps to make in each estimator call.")
flags.DEFINE_integer("keep_checkpoint_max", 100,
"How many steps to make in each estimator call.")
flags.DEFINE_bool("use_tpu", False, "Whether to use TPU or GPU/CPU.")
flags.DEFINE_bool(
"use_one_hot_embeddings", False,
"Whether to use one-hot multiplication instead of gather for embedding "
"lookups.")
flags.DEFINE_bool(
"add_final_layer", True,
"If True, a ResNet block is applied on the global output before "
"prediction.")
flags.DEFINE_string(
"tpu_name", None,
"The Cloud TPU to use for training. This should be either the name "
"used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 "
"url.")
flags.DEFINE_string(
"tpu_zone", None,
"[Optional] GCE zone where the Cloud TPU is located in. If not "
"specified, we will attempt to automatically detect the GCE project from "
"metadata.")
flags.DEFINE_string(
"gcp_project", None,
"[Optional] Project name for the Cloud TPU-enabled project. If not "
"specified, we will attempt to automatically detect the GCE project from "
"metadata.")
flags.DEFINE_string("master", None, "[Optional] TensorFlow master URL.")
flags.DEFINE_string("tpu_job_name", None,
"Name of TPU worker, if anything other than 'tpu_worker'")
flags.DEFINE_integer(
"num_tpu_cores", 8,
"Only used if `use_tpu` is True. Total number of TPU cores to use.")
flags.DEFINE_integer("num_train_examples", None, "Number of train tf examples.")
flags.DEFINE_integer("num_dev_examples", None, "Number of dev tf examples.")
def main(argv):
tf.logging.set_verbosity(tf.logging.INFO)
tf.compat.v1.enable_resource_variables()
if (not FLAGS.do_train and not FLAGS.do_eval and not FLAGS.do_predict and
not FLAGS.do_export):
raise ValueError(
"At least one of `do_train`, `do_eval`, `do_predict' or `do_export` "
"must be True.")
if not FLAGS.do_export and FLAGS.input_tf_records_path is None:
raise ValueError(
"input_tf_records_path` must be specified when not in export mode.")
tf.gfile.MakeDirs(FLAGS.output_dir)
model_config = input_utils.get_model_config(
model_dir=FLAGS.output_dir,
source_file=FLAGS.source_model_config_file,
write_from_source=FLAGS.do_train)
tpu_cluster_resolver = None
if FLAGS.use_tpu and FLAGS.tpu_name:
tpu_cluster_resolver = tf.distribute.cluster_resolver.TPUClusterResolver(
FLAGS.tpu_name, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project)
is_per_host = tf.estimator.tpu.InputPipelineConfig.PER_HOST_V2
run_config = tf.estimator.tpu.RunConfig(
cluster=tpu_cluster_resolver,
master=FLAGS.master,
model_dir=FLAGS.output_dir,
save_checkpoints_steps=FLAGS.save_checkpoints_steps,
keep_checkpoint_max=FLAGS.keep_checkpoint_max,
tpu_config=tf.estimator.tpu.TPUConfig(
tpu_job_name=FLAGS.tpu_job_name,
iterations_per_loop=FLAGS.iterations_per_loop,
num_shards=FLAGS.num_tpu_cores,
per_host_input_for_training=is_per_host))
num_train_steps = int(FLAGS.num_train_examples / FLAGS.train_batch_size *
FLAGS.num_train_epochs)
num_warmup_steps = int(num_train_steps * FLAGS.warmup_proportion)
model_fn = run_wikihop_lib.model_fn_builder(
model_config=model_config,
model_dir=FLAGS.output_dir,
init_checkpoint=FLAGS.init_checkpoint,
learning_rate=FLAGS.learning_rate,
num_train_steps=num_train_steps,
num_warmup_steps=num_warmup_steps,
use_tpu=FLAGS.use_tpu,
use_one_hot_embeddings=FLAGS.use_one_hot_embeddings,
optimizer=FLAGS.optimizer,
poly_power=FLAGS.poly_power,
start_warmup_step=FLAGS.start_warmup_step,
learning_rate_schedule=FLAGS.learning_rate_schedule,
add_final_layer=FLAGS.add_final_layer,
weight_decay_rate=FLAGS.weight_decay_rate,
label_smoothing=FLAGS.label_smoothing)
# If TPU is not available, this will fall back to normal Estimator on CPU
# or GPU.
estimator = tf.estimator.tpu.TPUEstimator(
model_fn=model_fn,
config=run_config,
train_batch_size=FLAGS.train_batch_size,
eval_batch_size=FLAGS.eval_batch_size,
predict_batch_size=FLAGS.predict_batch_size,
use_tpu=FLAGS.use_tpu,
export_to_tpu=False)
if FLAGS.do_export:
tf.logging.info("***** Running export of models *****")
run_wikihop_lib.run_export(
estimator=estimator,
model_dir=FLAGS.output_dir,
model_config=model_config,
export_ckpts=FLAGS.export_ckpts,
long_seq_len=FLAGS.long_seq_len,
global_seq_len=FLAGS.global_seq_len,
candidate_ignore_hard_g2l=FLAGS.candidate_ignore_hard_g2l,
query_ignore_hard_g2l=FLAGS.query_ignore_hard_g2l,
enable_l2g_linking=FLAGS.enable_l2g_linking)
if FLAGS.do_train:
tf.logging.info("***** Running training *****")
assert FLAGS.weight_decay_rate is not None
assert FLAGS.learning_rate is not None
tf.logging.info("*** Model Hyperparams ****")
tf.logging.info(
"learning_rate: {}, weight_decay_rate: {}, label_smoothing:{}"
.format(FLAGS.learning_rate, FLAGS.weight_decay_rate,
FLAGS.label_smoothing))
if FLAGS.hidden_dropout_prob >= 0.0:
model_config.hidden_dropout_prob = FLAGS.hidden_dropout_prob
tf.logging.info("Overwriting hidden_dropout_prob to: {}".format(
model_config.hidden_dropout_prob))
if FLAGS.attention_probs_dropout_prob >= 0.0:
model_config.attention_probs_dropout_prob = (
FLAGS.attention_probs_dropout_prob)
tf.logging.info("Overwriting attention_probs_dropout_prob to: {}".format(
model_config.attention_probs_dropout_prob))
if FLAGS.grad_checkpointing_period is not None:
model_config.grad_checkpointing_period = FLAGS.grad_checkpointing_period
tf.logging.info("Overwriting grad_checkpointing_period to: {}".format(
model_config.grad_checkpointing_period))
if FLAGS.local_radius >= 0:
model_config.local_radius = FLAGS.local_radius
tf.logging.info("Overwriting local_radius to: {}".format(
model_config.local_radius))
tf.logging.info(" Batch size = %d", FLAGS.train_batch_size)
tf.logging.info(" Num steps = %d", num_train_steps)
train_tf_file = FLAGS.input_tf_records_path
train_input_fn = run_wikihop_lib.input_fn_builder(
input_file_pattern=train_tf_file,
model_config=model_config,
long_seq_len=FLAGS.long_seq_len,
global_seq_len=FLAGS.global_seq_len,
is_training=True,
drop_remainder=True,
candidate_ignore_hard_g2l=FLAGS.candidate_ignore_hard_g2l,
query_ignore_hard_g2l=FLAGS.query_ignore_hard_g2l,
enable_l2g_linking=FLAGS.enable_l2g_linking)
estimator.train(input_fn=train_input_fn, max_steps=num_train_steps)
if FLAGS.do_eval:
tf.logging.info("***** Running evaluation *****")
tf.logging.info(" Batch size = %d", FLAGS.eval_batch_size)
eval_drop_remainder = True if FLAGS.use_tpu else False
eval_tf_file = FLAGS.input_tf_records_path
eval_input_fn = run_wikihop_lib.input_fn_builder(
input_file_pattern=eval_tf_file,
model_config=model_config,
long_seq_len=FLAGS.long_seq_len,
global_seq_len=FLAGS.global_seq_len,
is_training=False,
drop_remainder=eval_drop_remainder,
candidate_ignore_hard_g2l=FLAGS.candidate_ignore_hard_g2l,
query_ignore_hard_g2l=FLAGS.query_ignore_hard_g2l,
enable_l2g_linking=FLAGS.enable_l2g_linking)
# Run evaluation for each new checkpoint.
for ckpt in tf.train.checkpoints_iterator(FLAGS.output_dir):
tf.logging.info("Starting eval on new checkpoint: %s", ckpt)
try:
start_timestamp = time.time() # This time will include compilation time
eval_results = estimator.evaluate(
input_fn=eval_input_fn,
checkpoint_path=ckpt,
steps=FLAGS.max_eval_steps,
name="metrics")
elapsed_time = int(time.time() - start_timestamp)
tf.logging.info("Eval results: %s. Elapsed seconds: %d", eval_results,
elapsed_time)
# Terminate eval job when final checkpoint is reached.
current_step = int(os.path.basename(ckpt).split("-")[1])
if current_step >= num_train_steps:
tf.logging.info("Evaluation finished after training step %d",
current_step)
break
except tf.errors.NotFoundError:
# Since the coordinator is on a different job than the TPU worker,
# sometimes the TPU worker does not finish initializing until long after
# the CPU job tells it to start evaluating. In this case, the checkpoint
# file could have been deleted already.
tf.logging.info("Checkpoint %s no longer exists, skipping checkpoint",
ckpt)
if FLAGS.do_predict:
predict_tf_file = FLAGS.input_tf_records_path
predict_input_fn = run_wikihop_lib.input_fn_builder(
input_file_pattern=predict_tf_file,
model_config=model_config,
long_seq_len=FLAGS.long_seq_len,
global_seq_len=FLAGS.global_seq_len,
is_training=False,
drop_remainder=False,
candidate_ignore_hard_g2l=FLAGS.candidate_ignore_hard_g2l,
query_ignore_hard_g2l=FLAGS.query_ignore_hard_g2l,
enable_l2g_linking=FLAGS.enable_l2g_linking)
tf.logging.info("***** Running prediction *****")
tf.logging.info(" Batch size = %d", FLAGS.predict_batch_size)
for ckpt in tf.train.checkpoints_iterator(FLAGS.output_dir):
tf.logging.info("Starting prediction on new checkpoint: %s", ckpt)
current_step = int(os.path.basename(ckpt).split("-")[1])
try:
result = estimator.predict(
input_fn=predict_input_fn,
checkpoint_path=ckpt,
yield_single_examples=True)
except tf.errors.NotFoundError:
tf.logging.info("Checkpoint %s no longer exists, skipping checkpoint",
ckpt)
continue
tf.logging.info("***** Predict results for ckpt = %d *****", current_step)
predict_output_file = os.path.join(
FLAGS.output_dir, "predict-" + str(current_step) + ".json")
predict_output = {}
num_written_lines = 0
num_correct_predictions = 0
num_incorrect_predictions = 0
for (i, prediction) in enumerate(result):
if i >= FLAGS.num_dev_examples:
break
if i % 500 == 0:
tf.logging.info("*** Done processing %d examples for ckpt %d ***", i,
current_step)
tf.logging.info("*** num_total_predictions = %d ***",
num_written_lines)
tf.logging.info("*** num_correct_predictions = %d ***",
num_correct_predictions)
tf.logging.info("*** num_incorrect_predictions = %d ***",
num_incorrect_predictions)
logits = prediction["logits"]
assert len(logits) == FLAGS.global_seq_len
predicted_index = np.argmax(logits)
predict_output["WH_dev_" + str(i)] = str(predicted_index)
if prediction["label_ids"] == predicted_index:
num_correct_predictions += 1
else:
num_incorrect_predictions += 1
num_written_lines += 1
tf.logging.info("*** Prediction results for ckpt = %d ***", current_step)
tf.logging.info("*** num_total_predictions = %d ***", num_written_lines)
tf.logging.info("*** num_correct_predictions = %d ***",
num_correct_predictions)
tf.logging.info("*** num_incorrect_predictions = %d ***",
num_incorrect_predictions)
assert num_written_lines == FLAGS.num_dev_examples
predict_output["num_total_predictions"] = num_written_lines
predict_output["num_correct_predictions"] = num_correct_predictions
predict_output["num_incorrect_predictions"] = num_incorrect_predictions
predict_output["accuracy"] = (num_correct_predictions / num_written_lines)
with tf.gfile.GFile(predict_output_file, "w") as writer:
json.dump(predict_output, writer)
if current_step >= num_train_steps:
tf.logging.info("Prediction finished after training step %d",
current_step)
break
if __name__ == "__main__":
flags.mark_flag_as_required("output_dir")
flags.mark_flag_as_required("source_model_config_file")
tf.app.run()
| 38.388664
| 80
| 0.699272
|
4a016a094721ff93e0d79ad3795737c01ebb46b0
| 1,348
|
py
|
Python
|
tests/test_problems/test_zoo/test_diffeq/test_ivp_examples.py
|
christopheroates/probnum
|
4ae63da307bd7279c3ce477ef68cbd0b8e30c73a
|
[
"MIT"
] | null | null | null |
tests/test_problems/test_zoo/test_diffeq/test_ivp_examples.py
|
christopheroates/probnum
|
4ae63da307bd7279c3ce477ef68cbd0b8e30c73a
|
[
"MIT"
] | null | null | null |
tests/test_problems/test_zoo/test_diffeq/test_ivp_examples.py
|
christopheroates/probnum
|
4ae63da307bd7279c3ce477ef68cbd0b8e30c73a
|
[
"MIT"
] | null | null | null |
import numpy as np
import pytest
import probnum.problems as pnpr
import probnum.problems.zoo.diffeq as diffeqzoo
ODE_LIST = [
diffeqzoo.vanderpol(),
diffeqzoo.threebody(),
diffeqzoo.rigidbody(),
diffeqzoo.lotkavolterra(),
diffeqzoo.logistic(),
diffeqzoo.seir(),
diffeqzoo.fitzhughnagumo(),
diffeqzoo.lorenz(),
]
all_odes = pytest.mark.parametrize("ivp", ODE_LIST)
@all_odes
def test_isinstance(ivp):
assert isinstance(ivp, pnpr.InitialValueProblem)
@all_odes
def test_eval(ivp):
f0 = ivp.f(ivp.t0, ivp.y0)
assert isinstance(f0, np.ndarray)
if ivp.df is not None:
df0 = ivp.df(ivp.t0, ivp.y0)
assert isinstance(df0, np.ndarray)
if ivp.ddf is not None:
ddf0 = ivp.ddf(ivp.t0, ivp.y0)
assert isinstance(ddf0, np.ndarray)
@all_odes
def test_df0(ivp):
if ivp.df is not None:
step = 1e-6
time = ivp.t0 + 0.1 * np.random.rand()
direction = step * (1.0 + 0.1 * np.random.rand(len(ivp.y0)))
increment = step * direction
point = ivp.y0 + 0.1 * np.random.rand(len(ivp.y0))
fd_approx = (
ivp.f(time, point + increment) - ivp.f(time, point - increment)
) / (2.0 * step)
np.testing.assert_allclose(
fd_approx, ivp.df(time, point) @ direction, rtol=1e-3, atol=1e-3
)
| 24.509091
| 76
| 0.623145
|
4a016af0909efbd7f966d50e6e0e6974238ed8fa
| 7,843
|
py
|
Python
|
h/models/document/_document.py
|
BearerPipelineTest/h
|
6b8b6600f5995463ca60ded9e4c82053d606f4de
|
[
"BSD-2-Clause"
] | 2,103
|
2015-01-07T12:47:49.000Z
|
2022-03-29T02:38:25.000Z
|
h/models/document/_document.py
|
BearerPipelineTest/h
|
6b8b6600f5995463ca60ded9e4c82053d606f4de
|
[
"BSD-2-Clause"
] | 4,322
|
2015-01-04T17:18:01.000Z
|
2022-03-31T17:06:02.000Z
|
h/models/document/_document.py
|
admariner/h
|
25ef1b8d94889df86ace5a084f1aa0effd9f4e25
|
[
"BSD-2-Clause"
] | 389
|
2015-01-24T04:10:02.000Z
|
2022-03-28T08:00:16.000Z
|
import logging
from datetime import datetime
from urllib.parse import urlparse
import sqlalchemy as sa
from h.db import Base, mixins
from h.models import Annotation
from h.models.document._exceptions import ConcurrentUpdateError
from h.models.document._meta import create_or_update_document_meta
from h.models.document._uri import DocumentURI, create_or_update_document_uri
from h.util.uri import normalize as uri_normalize
log = logging.getLogger(__name__)
class Document(Base, mixins.Timestamps):
__tablename__ = "document"
id = sa.Column(sa.Integer, autoincrement=True, primary_key=True)
#: The denormalized value of the first DocumentMeta record with type title.
title = sa.Column("title", sa.UnicodeText())
#: The denormalized value of the "best" http(s) DocumentURI for this Document.
web_uri = sa.Column("web_uri", sa.UnicodeText())
# FIXME: This relationship should be named `uris` again after the
# dependency on the annotator-store is removed, as it clashes with
# making the Postgres and Elasticsearch interface of a Document
# object behave the same way.
document_uris = sa.orm.relationship(
"DocumentURI", backref="document", order_by="DocumentURI.updated.desc()"
)
meta = sa.orm.relationship(
"DocumentMeta", backref="document", order_by="DocumentMeta.updated.desc()"
)
def __repr__(self):
return f"<Document {self.id}>"
def update_web_uri(self):
"""
Update the value of the self.web_uri field.
Set self.web_uri to the "best" http(s) URL from self.document_uris.
Set self.web_uri to None if there's no http(s) DocumentURIs.
"""
def first_http_url(type_=None):
"""
Return this document's first http(s) URL of the given type.
Return None if this document doesn't have any http(s) URLs of the
given type.
If no type is given just return this document's first http(s)
URL, or None.
"""
for document_uri in self.document_uris:
uri = document_uri.uri
if type_ is not None and document_uri.type != type_:
continue
if urlparse(uri).scheme not in ["http", "https"]:
continue
return document_uri.uri
self.web_uri = (
first_http_url(type_="self-claim")
or first_http_url(type_="rel-canonical")
or first_http_url()
)
@classmethod
def find_by_uris(cls, session, uris):
"""Find documents by a list of uris."""
query_uris = [uri_normalize(u) for u in uris]
matching_claims = (
session.query(DocumentURI)
.filter(
DocumentURI.uri_normalized.in_(query_uris) # pylint: disable=no-member
)
.distinct(DocumentURI.document_id)
.subquery()
)
return session.query(Document).join(matching_claims)
@classmethod
def find_or_create_by_uris( # pylint: disable=too-many-arguments
cls, session, claimant_uri, uris, created=None, updated=None
):
"""
Find or create documents from a claimant uri and a list of uris.
It tries to find a document based on the claimant and the set of uris.
If none can be found it will return a new document with the claimant
uri as its only document uri as a self-claim. It is the callers
responsibility to create any other document uris.
"""
finduris = [claimant_uri] + uris
documents = cls.find_by_uris(session, finduris)
if not documents.count():
doc = Document(created=created, updated=updated)
DocumentURI(
document=doc,
claimant=claimant_uri,
uri=claimant_uri,
type="self-claim",
created=created,
updated=updated,
)
session.add(doc)
try:
session.flush()
except sa.exc.IntegrityError as err:
raise ConcurrentUpdateError("concurrent document creation") from err
return documents
def merge_documents(session, documents, updated=None):
"""
Take a list of documents and merges them together. It returns the new master document.
The support for setting a specific value for the `updated` should only
be used during the Postgres migration. It should be removed afterwards.
"""
if updated is None:
updated = datetime.utcnow()
master = documents[0]
duplicates = documents[1:]
duplicate_ids = [doc.id for doc in duplicates]
log.info("Merging %s documents", len(duplicate_ids) + 1)
for doc in duplicates:
for _ in range(len(doc.document_uris)):
uri = doc.document_uris.pop()
uri.document = master
uri.updated = updated
for _ in range(len(doc.meta)):
meta = doc.meta.pop()
meta.document = master
meta.updated = updated
try: # pylint:disable=too-many-try-statements
session.flush()
session.query(Annotation).filter(
Annotation.document_id.in_(duplicate_ids)
).update({Annotation.document_id: master.id}, synchronize_session="fetch")
session.query(Document).filter(Document.id.in_(duplicate_ids)).delete(
synchronize_session="fetch"
)
except sa.exc.IntegrityError as err:
raise ConcurrentUpdateError("concurrent document merges") from err
return master
def update_document_metadata( # pylint: disable=too-many-arguments
session,
target_uri,
document_meta_dicts,
document_uri_dicts,
created=None,
updated=None,
):
"""
Create and update document metadata from the given annotation.
Document, DocumentURI and DocumentMeta objects will be created, updated
and deleted in the database as required by the given annotation and
document meta and uri dicts.
:param target_uri: the target_uri of the annotation from which the document metadata comes from
:param document_meta_dicts: the document metadata dicts that were derived
by validation from the "document" dict that the client posted
:type document_meta_dicts: list of dicts
:param document_uri_dicts: the document URI dicts that were derived by
validation from the "document" dict that the client posted
:type document_uri_dicts: list of dicts
:param created: Date and time value for the new document records
:param updated: Date and time value for the new document records
:returns: the matched or created document
:rtype: h.models.Document
"""
if created is None:
created = datetime.utcnow()
if updated is None:
updated = datetime.utcnow()
documents = Document.find_or_create_by_uris(
session,
target_uri,
[u["uri"] for u in document_uri_dicts],
created=created,
updated=updated,
)
if documents.count() > 1:
document = merge_documents(session, documents, updated=updated)
else:
document = documents.first()
document.updated = updated
for document_uri_dict in document_uri_dicts:
create_or_update_document_uri(
session=session,
document=document,
created=created,
updated=updated,
**document_uri_dict,
)
document.update_web_uri()
for document_meta_dict in document_meta_dicts:
create_or_update_document_meta(
session=session,
document=document,
created=created,
updated=updated,
**document_meta_dict,
)
return document
| 32.8159
| 99
| 0.643759
|
4a016beaa21640468ef3874988187f2c8a5969a1
| 355
|
py
|
Python
|
doc/programming/parts/python-firebird-testproc-values-output-params.py
|
laigor/sqlrelay-non-english-fixes-
|
7803f862ddbf88bca078c50d621c64c22fc0a405
|
[
"PHP-3.01",
"CC-BY-3.0"
] | 16
|
2018-04-23T09:58:33.000Z
|
2022-01-31T13:40:20.000Z
|
doc/programming/parts/python-firebird-testproc-values-output-params.py
|
laigor/sqlrelay-non-english-fixes-
|
7803f862ddbf88bca078c50d621c64c22fc0a405
|
[
"PHP-3.01",
"CC-BY-3.0"
] | null | null | null |
doc/programming/parts/python-firebird-testproc-values-output-params.py
|
laigor/sqlrelay-non-english-fixes-
|
7803f862ddbf88bca078c50d621c64c22fc0a405
|
[
"PHP-3.01",
"CC-BY-3.0"
] | 4
|
2020-12-23T12:17:54.000Z
|
2022-01-04T20:46:34.000Z
|
cur.prepareQuery("execute procedure exampleproc ?, ?, ?")
cur.inputBind("1",1)
cur.inputBind("2",1.1,2,1)
cur.inputBind("3","hello")
cur.defineOutputBindInteger("1")
cur.defineOutputBindDouble("2")
cur.defineOutputBindString("3",20)
cur.executeQuery()
out1=cur.getOutputBindInteger("1")
out2=cur.getOutputBindDouble("2")
out3=cur.getOutputBindString("3")
| 29.583333
| 57
| 0.757746
|
4a016c080f06699df344bd86fb67ec9df19102d7
| 1,302
|
py
|
Python
|
src/common/admin.py
|
kuriadn/fayvad
|
7f26644b1407f7c493fff04126371a9e1ca53d58
|
[
"MIT"
] | null | null | null |
src/common/admin.py
|
kuriadn/fayvad
|
7f26644b1407f7c493fff04126371a9e1ca53d58
|
[
"MIT"
] | null | null | null |
src/common/admin.py
|
kuriadn/fayvad
|
7f26644b1407f7c493fff04126371a9e1ca53d58
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from .models import *
class VehicleAdmin(admin.ModelAdmin):
fields = ('regno', 'dueinsurance',)
list_display = ('regno', 'dueinsurance',)
list_filter = ('regno', 'dueinsurance',)
list_per_page = 10
class DriverAdmin(admin.ModelAdmin):
fields = ('idno', 'fname', 'lname', 'address', 'telno', 'email', 'duelicense', )
list_display = ('idno', 'fname', 'lname', 'address', 'telno', 'email', 'duelicense', )
list_filter = ('idno', 'fname', 'lname', 'address', 'telno', 'email', 'duelicense',)
list_per_page = 10
class ExpenseAdmin(admin.ModelAdmin):
fields = ('exptype', 'amount', 'date', 'vehicle',)
list_display = ('exptype', 'amount', 'date', 'vehicle',)
list_filter = ('exptype', 'amount', 'date', 'vehicle',)
list_per_page = 10
class TripAdmin(admin.ModelAdmin):
fields = ('van', 'amount', 'datepaid', 'desc',)
list_display = ('van', 'amount', 'datepaid', 'desc',)
list_filter = ('van', 'amount', 'datepaid','desc',)
list_per_page = 10
#admin.site.unregister(PayMode)
#admin.site.register(PayMode)
admin.site.register(Vehicle, VehicleAdmin)
admin.site.register(Driver, DriverAdmin)
admin.site.register(Expense, ExpenseAdmin)
admin.site.register(Trip, TripAdmin)
#admin.site.register(ExpenseType)
admin.site.register(Position)
| 37.2
| 90
| 0.6851
|
4a016c1c5fc48559764b2c0a3d336a2ce19b745c
| 14,767
|
py
|
Python
|
test/functional/p2p-acceptblock.py
|
thomascvitale/herbsters
|
451daf9bde43e5b97a32a4bb7578be16acbaca30
|
[
"MIT"
] | 1
|
2020-06-28T19:49:26.000Z
|
2020-06-28T19:49:26.000Z
|
test/functional/p2p-acceptblock.py
|
thomascvitale/herbsters
|
451daf9bde43e5b97a32a4bb7578be16acbaca30
|
[
"MIT"
] | 1
|
2020-06-27T00:10:05.000Z
|
2020-06-27T08:16:21.000Z
|
test/functional/p2p-acceptblock.py
|
thomascvitale/herbsters
|
451daf9bde43e5b97a32a4bb7578be16acbaca30
|
[
"MIT"
] | 2
|
2020-02-05T23:43:32.000Z
|
2020-06-26T15:29:15.000Z
|
#!/usr/bin/env python3
# Copyright (c) 2015-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test processing of unrequested blocks.
Setup: two nodes, node0+node1, not connected to each other. Node1 will have
nMinimumChainWork set to 0x10, so it won't process low-work unrequested blocks.
We have one NodeConn connection to node0 called test_node, and one to node1
called min_work_node.
The test:
1. Generate one block on each node, to leave IBD.
2. Mine a new block on each tip, and deliver to each node from node's peer.
The tip should advance for node0, but node1 should skip processing due to
nMinimumChainWork.
Node1 is unused in tests 3-7:
3. Mine a block that forks from the genesis block, and deliver to test_node.
Node0 should not process this block (just accept the header), because it
is unrequested and doesn't have more or equal work to the tip.
4a,b. Send another two blocks that build on the forking block.
Node0 should process the second block but be stuck on the shorter chain,
because it's missing an intermediate block.
4c.Send 288 more blocks on the longer chain (the number of blocks ahead
we currently store).
Node0 should process all but the last block (too far ahead in height).
5. Send a duplicate of the block in #3 to Node0.
Node0 should not process the block because it is unrequested, and stay on
the shorter chain.
6. Send Node0 an inv for the height 3 block produced in #4 above.
Node0 should figure out that Node0 has the missing height 2 block and send a
getdata.
7. Send Node0 the missing block again.
Node0 should process and the tip should advance.
8. Create a fork which is invalid at a height longer than the current chain
(ie to which the node will try to reorg) but which has headers built on top
of the invalid block. Check that we get disconnected if we send more headers
on the chain the node now knows to be invalid.
9. Test Node1 is able to sync when connected to node0 (which should have sufficient
work on its chain).
"""
from test_framework.mininode import *
from test_framework.test_framework import herbstersTestFramework
from test_framework.util import *
import time
from test_framework.blocktools import create_block, create_coinbase, create_transaction
class AcceptBlockTest(herbstersTestFramework):
def add_options(self, parser):
parser.add_option("--testbinary", dest="testbinary",
default=os.getenv("herbstersD", "herbstersd"),
help="herbstersd binary to test")
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 2
self.extra_args = [[], ["-minimumchainwork=0x10"]]
def setup_network(self):
# Node0 will be used to test behavior of processing unrequested blocks
# from peers which are not whitelisted, while Node1 will be used for
# the whitelisted case.
# Node2 will be used for non-whitelisted peers to test the interaction
# with nMinimumChainWork.
self.setup_nodes()
def run_test(self):
# Setup the p2p connections and start up the network thread.
test_node = NodeConnCB() # connects to node0
min_work_node = NodeConnCB() # connects to node1
connections = []
connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test_node))
connections.append(NodeConn('127.0.0.1', p2p_port(1), self.nodes[1], min_work_node))
test_node.add_connection(connections[0])
min_work_node.add_connection(connections[1])
NetworkThread().start() # Start up network handling in another thread
# Test logic begins here
test_node.wait_for_verack()
min_work_node.wait_for_verack()
# 1. Have nodes mine a block (leave IBD)
[ n.generate(1) for n in self.nodes ]
tips = [ int("0x" + n.getbestblockhash(), 0) for n in self.nodes ]
# 2. Send one block that builds on each tip.
# This should be accepted by node0
blocks_h2 = [] # the height 2 blocks on each node's chain
block_time = int(time.time()) + 1
for i in range(2):
blocks_h2.append(create_block(tips[i], create_coinbase(2), block_time))
blocks_h2[i].solve()
block_time += 1
test_node.send_message(msg_block(blocks_h2[0]))
min_work_node.send_message(msg_block(blocks_h2[1]))
for x in [test_node, min_work_node]:
x.sync_with_ping()
assert_equal(self.nodes[0].getblockcount(), 2)
assert_equal(self.nodes[1].getblockcount(), 1)
self.log.info("First height 2 block accepted by node0; correctly rejected by node1")
# 3. Send another block that builds on genesis.
block_h1f = create_block(int("0x" + self.nodes[0].getblockhash(0), 0), create_coinbase(1), block_time)
block_time += 1
block_h1f.solve()
test_node.send_message(msg_block(block_h1f))
test_node.sync_with_ping()
tip_entry_found = False
for x in self.nodes[0].getchaintips():
if x['hash'] == block_h1f.hash:
assert_equal(x['status'], "headers-only")
tip_entry_found = True
assert(tip_entry_found)
assert_raises_rpc_error(-1, "Block not found on disk", self.nodes[0].getblock, block_h1f.hash)
# 4. Send another two block that build on the fork.
block_h2f = create_block(block_h1f.sha256, create_coinbase(2), block_time)
block_time += 1
block_h2f.solve()
test_node.send_message(msg_block(block_h2f))
test_node.sync_with_ping()
# Since the earlier block was not processed by node, the new block
# can't be fully validated.
tip_entry_found = False
for x in self.nodes[0].getchaintips():
if x['hash'] == block_h2f.hash:
assert_equal(x['status'], "headers-only")
tip_entry_found = True
assert(tip_entry_found)
# But this block should be accepted by node since it has equal work.
self.nodes[0].getblock(block_h2f.hash)
self.log.info("Second height 2 block accepted, but not reorg'ed to")
# 4b. Now send another block that builds on the forking chain.
block_h3 = create_block(block_h2f.sha256, create_coinbase(3), block_h2f.nTime+1)
block_h3.solve()
test_node.send_message(msg_block(block_h3))
test_node.sync_with_ping()
# Since the earlier block was not processed by node, the new block
# can't be fully validated.
tip_entry_found = False
for x in self.nodes[0].getchaintips():
if x['hash'] == block_h3.hash:
assert_equal(x['status'], "headers-only")
tip_entry_found = True
assert(tip_entry_found)
self.nodes[0].getblock(block_h3.hash)
# But this block should be accepted by node since it has more work.
self.nodes[0].getblock(block_h3.hash)
self.log.info("Unrequested more-work block accepted")
# 4c. Now mine 288 more blocks and deliver; all should be processed but
# the last (height-too-high) on node (as long as its not missing any headers)
tip = block_h3
all_blocks = []
for i in range(288):
next_block = create_block(tip.sha256, create_coinbase(i + 4), tip.nTime+1)
next_block.solve()
all_blocks.append(next_block)
tip = next_block
# Now send the block at height 5 and check that it wasn't accepted (missing header)
test_node.send_message(msg_block(all_blocks[1]))
test_node.sync_with_ping()
assert_raises_rpc_error(-5, "Block not found", self.nodes[0].getblock, all_blocks[1].hash)
assert_raises_rpc_error(-5, "Block not found", self.nodes[0].getblockheader, all_blocks[1].hash)
# The block at height 5 should be accepted if we provide the missing header, though
headers_message = msg_headers()
headers_message.headers.append(CBlockHeader(all_blocks[0]))
test_node.send_message(headers_message)
test_node.send_message(msg_block(all_blocks[1]))
test_node.sync_with_ping()
self.nodes[0].getblock(all_blocks[1].hash)
# Now send the blocks in all_blocks
for i in range(288):
test_node.send_message(msg_block(all_blocks[i]))
test_node.sync_with_ping()
# Blocks 1-287 should be accepted, block 288 should be ignored because it's too far ahead
for x in all_blocks[:-1]:
self.nodes[0].getblock(x.hash)
assert_raises_rpc_error(-1, "Block not found on disk", self.nodes[0].getblock, all_blocks[-1].hash)
# 5. Test handling of unrequested block on the node that didn't process
# Should still not be processed (even though it has a child that has more
# work).
# The node should have requested the blocks at some point, so
# disconnect/reconnect first
connections[0].disconnect_node()
test_node.wait_for_disconnect()
test_node = NodeConnCB() # connects to node (not whitelisted)
connections[0] = NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test_node)
test_node.add_connection(connections[0])
test_node.wait_for_verack()
test_node.send_message(msg_block(block_h1f))
test_node.sync_with_ping()
assert_equal(self.nodes[0].getblockcount(), 2)
self.log.info("Unrequested block that would complete more-work chain was ignored")
# 6. Try to get node to request the missing block.
# Poke the node with an inv for block at height 3 and see if that
# triggers a getdata on block 2 (it should if block 2 is missing).
with mininode_lock:
# Clear state so we can check the getdata request
test_node.last_message.pop("getdata", None)
test_node.send_message(msg_inv([CInv(2, block_h3.sha256)]))
test_node.sync_with_ping()
with mininode_lock:
getdata = test_node.last_message["getdata"]
# Check that the getdata includes the right block
assert_equal(getdata.inv[0].hash, block_h1f.sha256)
self.log.info("Inv at tip triggered getdata for unprocessed block")
# 7. Send the missing block for the third time (now it is requested)
test_node.send_message(msg_block(block_h1f))
test_node.sync_with_ping()
assert_equal(self.nodes[0].getblockcount(), 290)
self.nodes[0].getblock(all_blocks[286].hash)
assert_equal(self.nodes[0].getbestblockhash(), all_blocks[286].hash)
assert_raises_rpc_error(-1, "Block not found on disk", self.nodes[0].getblock, all_blocks[287].hash)
self.log.info("Successfully reorged to longer chain from non-whitelisted peer")
# 8. Create a chain which is invalid at a height longer than the
# current chain, but which has more blocks on top of that
block_289f = create_block(all_blocks[284].sha256, create_coinbase(289), all_blocks[284].nTime+1)
block_289f.solve()
block_290f = create_block(block_289f.sha256, create_coinbase(290), block_289f.nTime+1)
block_290f.solve()
block_291 = create_block(block_290f.sha256, create_coinbase(291), block_290f.nTime+1)
# block_291 spends a coinbase below maturity!
block_291.vtx.append(create_transaction(block_290f.vtx[0], 0, b"42", 1))
block_291.hashMerkleRoot = block_291.calc_merkle_root()
block_291.solve()
block_292 = create_block(block_291.sha256, create_coinbase(292), block_291.nTime+1)
block_292.solve()
# Now send all the headers on the chain and enough blocks to trigger reorg
headers_message = msg_headers()
headers_message.headers.append(CBlockHeader(block_289f))
headers_message.headers.append(CBlockHeader(block_290f))
headers_message.headers.append(CBlockHeader(block_291))
headers_message.headers.append(CBlockHeader(block_292))
test_node.send_message(headers_message)
test_node.sync_with_ping()
tip_entry_found = False
for x in self.nodes[0].getchaintips():
if x['hash'] == block_292.hash:
assert_equal(x['status'], "headers-only")
tip_entry_found = True
assert(tip_entry_found)
assert_raises_rpc_error(-1, "Block not found on disk", self.nodes[0].getblock, block_292.hash)
test_node.send_message(msg_block(block_289f))
test_node.send_message(msg_block(block_290f))
test_node.sync_with_ping()
self.nodes[0].getblock(block_289f.hash)
self.nodes[0].getblock(block_290f.hash)
test_node.send_message(msg_block(block_291))
# At this point we've sent an obviously-bogus block, wait for full processing
# without assuming whether we will be disconnected or not
try:
# Only wait a short while so the test doesn't take forever if we do get
# disconnected
test_node.sync_with_ping(timeout=1)
except AssertionError:
test_node.wait_for_disconnect()
test_node = NodeConnCB() # connects to node (not whitelisted)
connections[0] = NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test_node)
test_node.add_connection(connections[0])
NetworkThread().start() # Start up network handling in another thread
test_node.wait_for_verack()
# We should have failed reorg and switched back to 290 (but have block 291)
assert_equal(self.nodes[0].getblockcount(), 290)
assert_equal(self.nodes[0].getbestblockhash(), all_blocks[286].hash)
assert_equal(self.nodes[0].getblock(block_291.hash)["confirmations"], -1)
# Now send a new header on the invalid chain, indicating we're forked off, and expect to get disconnected
block_293 = create_block(block_292.sha256, create_coinbase(293), block_292.nTime+1)
block_293.solve()
headers_message = msg_headers()
headers_message.headers.append(CBlockHeader(block_293))
test_node.send_message(headers_message)
test_node.wait_for_disconnect()
# 9. Connect node1 to node0 and ensure it is able to sync
connect_nodes(self.nodes[0], 1)
sync_blocks([self.nodes[0], self.nodes[1]])
self.log.info("Successfully synced nodes 1 and 0")
[ c.disconnect_node() for c in connections ]
if __name__ == '__main__':
AcceptBlockTest().main()
| 44.748485
| 113
| 0.674748
|
4a016d58199dd31f6544d1e39db366f36abb8df3
| 1,834
|
py
|
Python
|
dataproc/list_clusters.py
|
spitfire55/python-docs-samples
|
b8fe0d1c5c9f7f5d27965fa3367117af7b1f0aed
|
[
"Apache-2.0"
] | 1
|
2019-02-07T21:26:34.000Z
|
2019-02-07T21:26:34.000Z
|
dataproc/list_clusters.py
|
spitfire55/python-docs-samples
|
b8fe0d1c5c9f7f5d27965fa3367117af7b1f0aed
|
[
"Apache-2.0"
] | 16
|
2019-06-15T00:02:56.000Z
|
2021-03-25T23:22:38.000Z
|
dataproc/list_clusters.py
|
spitfire55/python-docs-samples
|
b8fe0d1c5c9f7f5d27965fa3367117af7b1f0aed
|
[
"Apache-2.0"
] | 3
|
2019-02-11T16:16:11.000Z
|
2019-04-19T21:34:37.000Z
|
#!/usr/bin/env python
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Sample command-line program for listing Google Dataproc Clusters
"""
import argparse
import googleapiclient.discovery
# [START dataproc_list_clusters]
def list_clusters(dataproc, project, region):
result = dataproc.projects().regions().clusters().list(
projectId=project,
region=region).execute()
return result
# [END dataproc_list_clusters]
# [START dataproc_get_client]
def get_client():
"""Builds a client to the dataproc API."""
dataproc = googleapiclient.discovery.build('dataproc', 'v1')
return dataproc
# [END dataproc_get_client]
def main(project_id, region):
dataproc = get_client()
result = list_clusters(dataproc, project_id, region)
print(result)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter
)
parser.add_argument(
'project_id', help='Project ID you want to access.'),
# Sets the region to "global" if it's not provided
# Note: sub-regions (e.g.: us-central1-a/b) are currently not supported
parser.add_argument(
'--region', default='global', help='Region to create clusters in')
args = parser.parse_args()
main(args.project_id, args.region)
| 31.084746
| 75
| 0.721919
|
4a016ebe3cf7a57068b44b66692a24ee88f0bf96
| 4,031
|
py
|
Python
|
tests/tasks/tasks/instr/test_apply_mag_field_task.py
|
Exopy/ecpy_hqc_legacy
|
3e31a8865d130907a82005e6cd78d99c6da7a951
|
[
"BSD-3-Clause"
] | null | null | null |
tests/tasks/tasks/instr/test_apply_mag_field_task.py
|
Exopy/ecpy_hqc_legacy
|
3e31a8865d130907a82005e6cd78d99c6da7a951
|
[
"BSD-3-Clause"
] | 34
|
2015-12-14T22:06:57.000Z
|
2018-02-07T08:40:47.000Z
|
tests/tasks/tasks/instr/test_apply_mag_field_task.py
|
Exopy/ecpy_hqc_legacy
|
3e31a8865d130907a82005e6cd78d99c6da7a951
|
[
"BSD-3-Clause"
] | 6
|
2018-04-20T14:48:54.000Z
|
2021-06-23T22:25:17.000Z
|
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright 2015-2018 by ExopyHqcLegacy Authors, see AUTHORS for more details.
#
# Distributed under the terms of the BSD license.
#
# The full license is in the file LICENCE, distributed with this software.
# -----------------------------------------------------------------------------
"""Tests for the ApplyMagFieldTask
"""
from multiprocessing import Event
import pytest
import enaml
from exopy.tasks.api import RootTask
from exopy.tasks.tasks.logic.loop_task import LoopTask
from exopy.testing.util import show_and_close_widget
from exopy_hqc_legacy.tasks.tasks.instr.apply_mag_field_task\
import ApplyMagFieldTask
with enaml.imports():
from exopy.tasks.tasks.logic.views.loop_view import LoopView
from exopy_hqc_legacy.tasks.tasks.instr.views.apply_mag_field_view\
import ApplyMagFieldView
from .instr_helper import (InstrHelper, InstrHelperStarter, DummyJob,
PROFILES, DRIVERS)
class TestApplyMagFieldTask(object):
def setup(self):
self.root = RootTask(should_stop=Event(), should_pause=Event())
self.task = ApplyMagFieldTask(name='Test',
parallel={'activated': False})
self.root.add_child_task(0, self.task)
self.root.run_time[DRIVERS] = {'Test': (InstrHelper,
InstrHelperStarter())}
self.root.run_time[PROFILES] =\
{'Test1':
{'connections': {'C': {'owner': [],
'output_fluctuations': 1e-6,
'heater_state': ['On', 'Off'],
'fast_sweep_rate': '1.',
'field_sweep_rate': '1.'}},
'settings': {'S': {'sweep_to_field': [DummyJob(), DummyJob(),
DummyJob()],
'sweep_to_persistent_field': [DummyJob()],
'read_persistent_field': [1],
'check_connection': [True]}}
}
}
# This is set simply to make sure the test of InstrTask pass.
self.task.selected_instrument = ('Test1', 'Test', 'C', 'S')
def test_check1(self):
"""Simply test that everything is ok if field can be evaluated.
"""
self.task.field = '3.0'
test, traceback = self.task.check(test_instr=True)
assert test
assert not traceback
assert self.task.get_from_database('Test_field') == 3.0
def test_check2(self):
"""Check handling a wrong field.
"""
self.task.field = '*1.0*'
test, traceback = self.task.check(test_instr=True)
assert not test
assert len(traceback) == 1
assert 'root/Test-field'in traceback
assert self.task.get_from_database('Test_field') == 0.01
def test_perform1(self):
"""Simple test when everything is right.
"""
self.task.field = '2.0'
self.root.prepare()
self.task.perform()
assert self.root.get_from_database('Test_field') == 2.0
@pytest.mark.ui
def test_apply_mag_field_view1(exopy_qtbot, root_view, task_workbench):
"""Test ApplyMagFieldView widget outisde of a LoopTask.
"""
task = ApplyMagFieldTask(name='Test')
root_view.task.add_child_task(0, task)
show_and_close_widget(exopy_qtbot, ApplyMagFieldView(task=task, root=root_view))
@pytest.mark.ui
def test_apply_mag_field_view2(exopy_qtbot, root_view, task_workbench):
"""Test ApplyMagFieldView widget inside of a LoopTask.
"""
task = ApplyMagFieldTask(name='Test')
loop = LoopTask(name='r', task=task)
root_view.task.add_child_task(0, loop)
# XXX check for absence of target field
show_and_close_widget(exopy_qtbot, LoopView(task=loop, root=root_view))
| 34.75
| 84
| 0.576532
|
4a0170139270daf55a128f60c8aa17a6e85a3c17
| 3,191
|
py
|
Python
|
ros/src/vision/scripts/species_recognition/shapeIdentification.py
|
purduerov/X12-Repo
|
33574a9a07c3512d6db3a513d13a5666f60fc1f7
|
[
"MIT"
] | 2
|
2020-01-13T17:28:59.000Z
|
2020-02-14T01:00:14.000Z
|
ros/src/vision/scripts/species_recognition/shapeIdentification.py
|
purduerov/X12-Repo
|
33574a9a07c3512d6db3a513d13a5666f60fc1f7
|
[
"MIT"
] | 2
|
2019-10-23T23:16:36.000Z
|
2020-10-10T17:52:27.000Z
|
ros/src/vision/scripts/species_recognition/shapeIdentification.py
|
purduerov/X12-Repo
|
33574a9a07c3512d6db3a513d13a5666f60fc1f7
|
[
"MIT"
] | 2
|
2020-02-15T19:00:38.000Z
|
2020-02-15T19:00:40.000Z
|
import cv2
import numpy as np
from imutils.convenience import grab_contours
# Load in input images from the web
orig_img = cv2.imread("images/species1.jpg")
cap = cv2.VideoCapture(0)
""" Program to match shapes"""
def match_shapes(orig_img):
# Read in the images for matching
species = ["species_" + name for name in ["A", "B", "C", "D"]]
shape_images = [cv2.cvtColor(cv2.imread("shape_images/" + image_name + ".png"), cv2.COLOR_BGR2GRAY) for image_name
in species]
shape_contours = []
img = cv2.cvtColor(orig_img, cv2.COLOR_BGR2GRAY)
blockKernel = np.ones((3, 3)) # Should be a very aggressive erode
# Morphological operators to clean up the image
_, img = cv2.threshold(img, 40, 255, cv2.THRESH_BINARY_INV)
cv2.imshow("Binarized Img", img)
cv2.waitKey(1)
img = cv2.erode(img, blockKernel)
img = cv2.dilate(img, blockKernel)
# Find contour for shape images that we already have
for shape_img in shape_images:
_, binary_img = cv2.threshold(shape_img.copy(), 200, 255, cv2.THRESH_BINARY_INV)
contours = cv2.findContours(binary_img, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
contours = grab_contours(contours)
shape_contours.append(contours[0]) # Assume that the first contour is the desired one
# Good for debugging the shape contours
# final_img = np.zeros((binary_img.shape[0], binary_img.shape[1], 3))
# final_img[:,:,2] = binary_img
# cv2.drawContours(final_img, contours[0], -1, (0,255,0, 1))
# cv2.imshow("adsfasd", final_img)
# cv2.waitKey(-1)
# Find contours for input image
contours = cv2.findContours(img, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
contours = grab_contours(contours)
cv2.drawContours(orig_img, contours, -1, (0, 255, 0), 1)
for potential_shape in contours:
probable_species = (2000, None) # distance (2000 is huge), species name
cv2.drawContours(orig_img, [potential_shape], -1, (255, 0, 0), 2)
shape_moments = cv2.moments(potential_shape)
hu_shape_moments = cv2.HuMoments(shape_moments)
for shape_contour, species_name in zip(shape_contours, species):
potential_shape_moments = cv2.moments(shape_contour)
potential_hu_moments = cv2.HuMoments(potential_shape_moments)
dist = sum([(x - y) ** 2 for x, y in zip(potential_hu_moments[0:5], hu_shape_moments[0:5])])
# dist = cv2.matchShapes(potential_shape, shape_contour, cv2.CONTOURS_MATCH_I3, 0)
probable_species = (dist, species_name) if dist < probable_species[0] else probable_species
if not probable_species[1]:
break
moments = cv2.moments(potential_shape)
shape_centerX = int((moments["m10"] / moments["m00"]))
shape_centerY = int((moments["m01"] / moments["m00"]))
cv2.putText(orig_img, probable_species[1], (shape_centerX, shape_centerY), cv2.FONT_HERSHEY_COMPLEX,
.5, (0, 0, 255), 2)
# Display the output
cv2.imshow("Shapes", orig_img)
cv2.waitKey(1)
while True:
ret, frame = cap.read()
match_shapes(frame)
| 38.914634
| 118
| 0.662488
|
4a01702ae29eac6e10c08ecb4175fb5a1af16099
| 535
|
py
|
Python
|
Pythonbot/broadcast_db.py
|
LEGEND-LX/PYTHONBOT.py.pkg
|
897b05528990acf76fbb2a05538429cd5d178733
|
[
"CC0-1.0"
] | 2
|
2021-09-09T06:50:21.000Z
|
2021-10-01T16:59:30.000Z
|
Pythonbot/broadcast_db.py
|
LEGEND-LX/PYTHONBOT.py.pkg
|
897b05528990acf76fbb2a05538429cd5d178733
|
[
"CC0-1.0"
] | null | null | null |
Pythonbot/broadcast_db.py
|
LEGEND-LX/PYTHONBOT.py.pkg
|
897b05528990acf76fbb2a05538429cd5d178733
|
[
"CC0-1.0"
] | null | null | null |
from userbot.database import db_x
broadcast_db = db_x["BROADCAST_DB"]
async def add_broadcast_chat(chat_id):
await broadcast_db.insert_one({"chat_id": chat_id})
async def rmbroadcast_chat(chat_id):
await broadcast_db.delete_one({"chat_id": chat_id})
async def get_all_broadcast_chats():
lol = [la async for la in broadcast_db.find({})]
return lol
async def is_broadcast_chat_in_db(chat_id):
k = await broadcast_db.find_one({"chat_id": chat_id})
if k:
return True
else:
return False
| 21.4
| 57
| 0.714019
|
4a01702af83ada3657b53340258f0d04c949bbfb
| 3,032
|
py
|
Python
|
mmtrack/core/bbox/transforms.py
|
BigBen0519/mmtracking
|
61509b301ccbc2ab14f82a682b94c56f82ce09de
|
[
"Apache-2.0"
] | 2,226
|
2021-01-04T11:13:01.000Z
|
2022-03-31T11:49:59.000Z
|
mmtrack/core/bbox/transforms.py
|
BigBen0519/mmtracking
|
61509b301ccbc2ab14f82a682b94c56f82ce09de
|
[
"Apache-2.0"
] | 300
|
2021-01-04T11:36:59.000Z
|
2022-03-31T07:48:28.000Z
|
mmtrack/core/bbox/transforms.py
|
BigBen0519/mmtracking
|
61509b301ccbc2ab14f82a682b94c56f82ce09de
|
[
"Apache-2.0"
] | 333
|
2021-01-04T11:35:12.000Z
|
2022-03-31T08:11:50.000Z
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch
from mmdet.core.bbox.transforms import bbox_xyxy_to_cxcywh
def quad2bbox(quad):
"""Convert quadrilateral to axis aligned box in [cx, cy, w, h] format.
Args:
quad (Tensor): of shape (N, 8), (8, ), (N, 4) or (4, ). The
coordinates are in [x1, y1, x2, y2, x3, y3, x4, y4] or
[tl_x, tl_y, br_x, br_y] format.
Returns:
Tensor: in [cx, cy, w, h] format.
"""
if len(quad.shape) == 1:
quad = quad.unsqueeze(0)
length = quad.shape[1]
if length == 8:
cx = torch.mean(quad[:, 0::2], dim=-1)
cy = torch.mean(quad[:, 1::2], dim=-1)
x1 = torch.min(quad[:, 0::2], dim=-1)[0]
x2 = torch.max(quad[:, 0::2], dim=-1)[0]
y1 = torch.min(quad[:, 1::2], dim=-1)[0]
y2 = torch.max(quad[:, 1::2], dim=-1)[0]
area1 = torch.norm(quad[:, 0:2] - quad[:, 2:4], dim=1) * \
torch.norm(quad[:, 2:4] - quad[:, 4:6], dim=1)
area2 = (x2 - x1) * (y2 - y1)
scale_factor = torch.sqrt(area1 / area2)
w = scale_factor * (x2 - x1)
h = scale_factor * (y2 - y1)
bbox = torch.stack((cx, cy, w, h), dim=-1).squeeze(0)
elif length == 4:
bbox = bbox_xyxy_to_cxcywh(quad).squeeze(0)
else:
NotImplementedError(f'The length of quadrilateral: {length} is \
not supported')
return bbox
def bbox_cxcywh_to_x1y1wh(bbox):
"""Convert bbox coordinates from (cx, cy, w, h) to (x1, y1, w, h).
Args:
bbox (Tensor): Shape (n, 4) or (4, ) for bboxes.
Returns:
Tensor: Converted bboxes.
"""
cx, cy, w, h = bbox.split((1, 1, 1, 1), dim=-1)
bbox_new = [(cx - 0.5 * w), (cy - 0.5 * h), w, h]
return torch.cat(bbox_new, dim=-1)
def bbox_xyxy_to_x1y1wh(bbox):
"""Convert bbox coordinates from (x1, y1, x2, y2) to (x1, y1, w, h).
Args:
bbox (Tensor): Shape (n, 4) or (4, ) for bboxes.
Returns:
Tensor: Converted bboxes.
"""
x1, y1, x2, y2 = bbox.split((1, 1, 1, 1), dim=-1)
bbox_new = [x1, y1, (x2 - x1), (y2 - y1)]
return torch.cat(bbox_new, dim=-1)
def bbox_xyxy_to_cxcyah(bboxes):
"""Convert bbox coordinates from (x1, y1, x2, y2) to (cx, cy, ratio, h).
Args:
bbox (Tensor): Shape (n, 4) for bboxes.
Returns:
Tensor: Converted bboxes.
"""
cx = (bboxes[:, 2] + bboxes[:, 0]) / 2
cy = (bboxes[:, 3] + bboxes[:, 1]) / 2
w = bboxes[:, 2] - bboxes[:, 0]
h = bboxes[:, 3] - bboxes[:, 1]
xyah = torch.stack([cx, cy, w / h, h], -1)
return xyah
def bbox_cxcyah_to_xyxy(bboxes):
"""Convert bbox coordinates from (cx, cy, ratio, h) to (x1, y1, x2, y2).
Args:
bbox (Tensor): Shape (n, 4) for bboxes.
Returns:
Tensor: Converted bboxes.
"""
cx, cy, ratio, h = bboxes.split((1, 1, 1, 1), dim=-1)
w = ratio * h
x1y1x2y2 = [cx - w / 2.0, cy - h / 2.0, cx + w / 2.0, cy + h / 2.0]
return torch.cat(x1y1x2y2, dim=-1)
| 30.626263
| 76
| 0.528034
|
4a0171bd5ef87ae84f7c0fc3b03f93b5f2681e1c
| 1,157
|
py
|
Python
|
src/utils.py
|
nbip/IWAE
|
3a5e38b4d6eafceb5ec47dbe59aee3b42ad576f6
|
[
"MIT"
] | 5
|
2021-01-15T20:32:49.000Z
|
2022-01-10T18:49:30.000Z
|
src/utils.py
|
nbip/IWAE
|
3a5e38b4d6eafceb5ec47dbe59aee3b42ad576f6
|
[
"MIT"
] | 7
|
2021-01-08T18:04:39.000Z
|
2021-02-05T18:49:17.000Z
|
src/utils.py
|
nbip/IWAE
|
3a5e38b4d6eafceb5ec47dbe59aee3b42ad576f6
|
[
"MIT"
] | 3
|
2021-06-03T15:30:25.000Z
|
2022-03-30T15:12:35.000Z
|
import tensorflow as tf
import numpy as np
from tensorflow import keras
def logmeanexp(log_w, axis):
max = tf.reduce_max(log_w, axis=axis)
return tf.math.log(tf.reduce_mean(tf.exp(log_w - max), axis=axis)) + max
def get_bias():
# ---- For initializing the bias in the final Bernoulli layer for p(x|z)
(Xtrain, ytrain), (_, _) = keras.datasets.mnist.load_data()
Ntrain = Xtrain.shape[0]
# ---- reshape to vectors
Xtrain = Xtrain.reshape(Ntrain, -1) / 255
train_mean = np.mean(Xtrain, axis=0)
bias = -np.log(1. / np.clip(train_mean, 0.001, 0.999) - 1.)
return tf.constant_initializer(bias)
def bernoullisample(x):
return np.random.binomial(1, x, size=x.shape).astype('float32')
class MyMetric():
def __init__(self):
self.VALUES = []
self.N = []
def update_state(self, losses):
self.VALUES.append(losses)
self.N.append(losses.shape[0])
def result(self):
VALUES = tf.concat(self.VALUES, axis=0)
return tf.reduce_sum(VALUES) / tf.cast(tf.reduce_sum(self.N), tf.float32)
def reset_states(self):
self.VALUES = []
self.N = []
| 25.152174
| 81
| 0.634399
|
4a0173fdfe521169efdd0f32c29305e861b4fa10
| 3,156
|
py
|
Python
|
examples/pointnet++.py
|
DL-85/pytorch_geometric
|
eb12a94a667e881c4a6bff26b0453428bcb72393
|
[
"MIT"
] | 2
|
2019-10-10T07:01:07.000Z
|
2020-11-04T06:26:42.000Z
|
examples/pointnet++.py
|
cloudyyyyy/pytorch_geometric
|
61d389b5f8ee700dda4d18cadca72f24c978fce1
|
[
"MIT"
] | null | null | null |
examples/pointnet++.py
|
cloudyyyyy/pytorch_geometric
|
61d389b5f8ee700dda4d18cadca72f24c978fce1
|
[
"MIT"
] | 1
|
2019-07-31T16:31:20.000Z
|
2019-07-31T16:31:20.000Z
|
import os.path as osp
import torch
import torch.nn.functional as F
from torch.nn import Sequential as Seq, Linear as Lin, ReLU
from torch_geometric.datasets import ModelNet
import torch_geometric.transforms as T
from torch_geometric.data import DataLoader
from torch_geometric.nn import PointConv, fps, radius
path = osp.join(osp.dirname(osp.realpath(__file__)), '..', 'data/ModelNet10')
pre_transform, transform = T.NormalizeScale(), T.SamplePoints(1024)
train_dataset = ModelNet(path, '10', True, transform, pre_transform)
test_dataset = ModelNet(path, '10', False, transform, pre_transform)
train_loader = DataLoader(train_dataset, batch_size=32, shuffle=True)
test_loader = DataLoader(test_dataset, batch_size=32, shuffle=False)
class Net(torch.nn.Module):
def __init__(self):
super(Net, self).__init__()
self.local_sa1 = PointConv(
Seq(Lin(3, 64), ReLU(), Lin(64, 64), ReLU(), Lin(64, 128)))
self.local_sa2 = PointConv(
Seq(Lin(131, 128), ReLU(), Lin(128, 128), ReLU(), Lin(128, 256)))
self.global_sa = Seq(
Lin(259, 256), ReLU(), Lin(256, 512), ReLU(), Lin(512, 1024))
self.lin1 = Lin(1024, 512)
self.lin2 = Lin(512, 256)
self.lin3 = Lin(256, 10)
def forward(self, data):
pos, batch = data.pos, data.batch
idx = fps(pos, batch, ratio=0.5) # 512 points
row, col = radius(
pos, pos[idx], 0.1, batch, batch[idx], max_num_neighbors=64)
edge_index = torch.stack([col, row], dim=0) # Transpose.
x = F.relu(self.local_sa1(None, (pos, pos[idx]), edge_index))
pos, batch = pos[idx], batch[idx]
idx = fps(pos, batch, ratio=0.25) # 128 points
row, col = radius(
pos, pos[idx], 0.2, batch, batch[idx], max_num_neighbors=64)
edge_index = torch.stack([col, row], dim=0) # Transpose.
x = F.relu(self.local_sa2(x, (pos, pos[idx]), edge_index))
pos, batch = pos[idx], batch[idx]
x = self.global_sa(torch.cat([x, pos], dim=1))
x = x.view(-1, 128, self.lin1.in_features).max(dim=1)[0]
x = F.relu(self.lin1(x))
x = F.dropout(x, p=0.5, training=self.training)
x = F.relu(self.lin2(x))
x = F.dropout(x, p=0.5, training=self.training)
x = self.lin3(x)
return F.log_softmax(x, dim=-1)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model = Net().to(device)
optimizer = torch.optim.Adam(model.parameters(), lr=0.001)
def train(epoch):
model.train()
for data in train_loader:
data = data.to(device)
optimizer.zero_grad()
loss = F.nll_loss(model(data), data.y)
loss.backward()
optimizer.step()
def test(loader):
model.eval()
correct = 0
for data in loader:
data = data.to(device)
with torch.no_grad():
pred = model(data).max(1)[1]
correct += pred.eq(data.y).sum().item()
return correct / len(loader.dataset)
for epoch in range(1, 201):
train(epoch)
test_acc = test(test_loader)
print('Epoch: {:02d}, Test: {:.4f}'.format(epoch, test_acc))
| 32.875
| 77
| 0.619138
|
4a0174895f56546c92ccd87e792af476750db061
| 1,552
|
py
|
Python
|
dev/fix_maxmx.py
|
navravi/amrclaw
|
727d98d243c521267c927f6fe107ba6f1155597b
|
[
"BSD-3-Clause"
] | 16
|
2015-05-27T08:16:09.000Z
|
2022-01-21T06:36:24.000Z
|
dev/fix_maxmx.py
|
navravi/amrclaw
|
727d98d243c521267c927f6fe107ba6f1155597b
|
[
"BSD-3-Clause"
] | 107
|
2015-01-02T19:51:43.000Z
|
2021-11-24T03:35:32.000Z
|
dev/fix_maxmx.py
|
BrisaDavis/amrclaw
|
c5cacdf00f1959e160ea5616cdf6ea7b6cd374f3
|
[
"BSD-3-Clause"
] | 28
|
2015-01-10T00:03:56.000Z
|
2022-02-11T23:52:34.000Z
|
# Script used to get rid of maxmx and maxmy dependencies when converting
# from 4.x to 5.0 form. Executed in library and application directories.
#
# Fix a set of target files in directory tree rootdir by replacing
# oldpat with newpat.
#
# Now supports wildcards in list of targetfiles.
#
from __future__ import absolute_import
from __future__ import print_function
import os,sys,glob
from six.moves import zip
rootdir = '.'
targetfiles = ['*.f*']
oldpat_list = ["1-mbc:maxmx", "1-mbc:maxmy", "maxmx,maxmy,"]
newpat_list = ["1-mbc:mx", "1-mbc:my", ""]
for oldpat,newpat in zip(oldpat_list, newpat_list):
print("============================================")
print('Replacing "%s" with "%s"' % (oldpat,newpat))
print("============================================")
for (dirpath, subdirs, files) in os.walk(rootdir):
currentdir = os.path.abspath(os.getcwd())
os.chdir(os.path.abspath(dirpath))
tfiles = []
for fpat in targetfiles:
for f in glob.glob(fpat):
tfiles.append(f)
for file in tfiles:
infile = open(file,'r')
lines = infile.read()
infile.close()
if lines.find(oldpat) > -1:
lines = lines.replace(oldpat, newpat)
print("Fixed file ",dirpath + '/' + file)
else:
print("No change to ",dirpath + '/' + file)
outfile = open(file,'w')
outfile.write(lines)
outfile.close()
os.chdir(currentdir)
| 29.846154
| 73
| 0.557345
|
4a01757afdbfad2e9ece92daadb0024f832d06c2
| 5,253
|
py
|
Python
|
temboo/core/Library/Salesforce/Passwords/GetPasswordInfo.py
|
jordanemedlock/psychtruths
|
52e09033ade9608bd5143129f8a1bfac22d634dd
|
[
"Apache-2.0"
] | 7
|
2016-03-07T02:07:21.000Z
|
2022-01-21T02:22:41.000Z
|
temboo/core/Library/Salesforce/Passwords/GetPasswordInfo.py
|
jordanemedlock/psychtruths
|
52e09033ade9608bd5143129f8a1bfac22d634dd
|
[
"Apache-2.0"
] | null | null | null |
temboo/core/Library/Salesforce/Passwords/GetPasswordInfo.py
|
jordanemedlock/psychtruths
|
52e09033ade9608bd5143129f8a1bfac22d634dd
|
[
"Apache-2.0"
] | 8
|
2016-06-14T06:01:11.000Z
|
2020-04-22T09:21:44.000Z
|
# -*- coding: utf-8 -*-
###############################################################################
#
# GetPasswordInfo
# Gets information on a user's password.
#
# Python versions 2.6, 2.7, 3.x
#
# Copyright 2014, Temboo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class GetPasswordInfo(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the GetPasswordInfo Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(GetPasswordInfo, self).__init__(temboo_session, '/Library/Salesforce/Passwords/GetPasswordInfo')
def new_input_set(self):
return GetPasswordInfoInputSet()
def _make_result_set(self, result, path):
return GetPasswordInfoResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return GetPasswordInfoChoreographyExecution(session, exec_id, path)
class GetPasswordInfoInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the GetPasswordInfo
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_AccessToken(self, value):
"""
Set the value of the AccessToken input for this Choreo. ((optional, string) A valid access token retrieved during the OAuth process. This is required unless you provide the ClientID, ClientSecret, and RefreshToken to generate a new access token.)
"""
super(GetPasswordInfoInputSet, self)._set_input('AccessToken', value)
def set_ClientID(self, value):
"""
Set the value of the ClientID input for this Choreo. ((conditional, string) The Client ID provided by Salesforce. Required unless providing a valid AccessToken.)
"""
super(GetPasswordInfoInputSet, self)._set_input('ClientID', value)
def set_ClientSecret(self, value):
"""
Set the value of the ClientSecret input for this Choreo. ((conditional, string) The Client Secret provided by Salesforce. Required unless providing a valid AccessToken.)
"""
super(GetPasswordInfoInputSet, self)._set_input('ClientSecret', value)
def set_ID(self, value):
"""
Set the value of the ID input for this Choreo. ((required, string) The ID of the user you're getting info for.)
"""
super(GetPasswordInfoInputSet, self)._set_input('ID', value)
def set_InstanceName(self, value):
"""
Set the value of the InstanceName input for this Choreo. ((required, string) The server url prefix that indicates which instance your Salesforce account is on (e.g. na1, na2, na3, etc).)
"""
super(GetPasswordInfoInputSet, self)._set_input('InstanceName', value)
def set_RefreshToken(self, value):
"""
Set the value of the RefreshToken input for this Choreo. ((conditional, string) An OAuth Refresh Token used to generate a new access token when the original token is expired. Required unless providing a valid AccessToken.)
"""
super(GetPasswordInfoInputSet, self)._set_input('RefreshToken', value)
def set_ResponseFormat(self, value):
"""
Set the value of the ResponseFormat input for this Choreo. ((optional, string) The format that the response should be in. Valid values are: json (the default) and xml.)
"""
super(GetPasswordInfoInputSet, self)._set_input('ResponseFormat', value)
class GetPasswordInfoResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the GetPasswordInfo Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. (The response from Salesforce.)
"""
return self._output.get('Response', None)
def get_NewAccessToken(self):
"""
Retrieve the value for the "NewAccessToken" output from this Choreo execution. ((string) Contains a new AccessToken when the RefreshToken is provided.)
"""
return self._output.get('NewAccessToken', None)
class GetPasswordInfoChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return GetPasswordInfoResultSet(response, path)
| 44.516949
| 254
| 0.692747
|
4a017853ac9eb3a5d9d6118b8bdb75e0333cc2c7
| 174
|
py
|
Python
|
human_services/phone_at_location/admin.py
|
DarwishMenna/pathways-backend
|
e9825e0373c586ce8f07ee8b70aecc7de679fb41
|
[
"BSD-3-Clause"
] | 12
|
2017-08-30T18:21:00.000Z
|
2021-12-09T04:04:17.000Z
|
human_services/phone_at_location/admin.py
|
DarwishMenna/pathways-backend
|
e9825e0373c586ce8f07ee8b70aecc7de679fb41
|
[
"BSD-3-Clause"
] | 424
|
2017-08-08T18:32:14.000Z
|
2022-03-30T21:42:51.000Z
|
human_services/phone_at_location/admin.py
|
DarwishMenna/pathways-backend
|
e9825e0373c586ce8f07ee8b70aecc7de679fb41
|
[
"BSD-3-Clause"
] | 7
|
2017-09-29T21:14:37.000Z
|
2019-12-30T21:07:37.000Z
|
from django.contrib import admin
from human_services.phone_at_location import models
admin.site.register(models.PhoneNumberType)
admin.site.register(models.PhoneAtLocation)
| 29
| 51
| 0.867816
|
4a017899951f38d0d29fb6245b40c6052c0d800a
| 1,670
|
py
|
Python
|
agent/main.py
|
Danieldevop/Cryptongo
|
8f3cf92563497aa49bbc3b926e8ada4eaabdc85a
|
[
"MIT"
] | null | null | null |
agent/main.py
|
Danieldevop/Cryptongo
|
8f3cf92563497aa49bbc3b926e8ada4eaabdc85a
|
[
"MIT"
] | null | null | null |
agent/main.py
|
Danieldevop/Cryptongo
|
8f3cf92563497aa49bbc3b926e8ada4eaabdc85a
|
[
"MIT"
] | null | null | null |
import requests
import pymongo
API_URL = 'https://api.coinmarketcap.com/v1/ticker/'
def get_db_connection(uri):
client = pymongo.MongoClient(uri)
return client.cryptongo
def get_cryptocurrencies_from_api():
r = requests.get(API_URL)
if r.status_code == 200:
result = r.json()
return result
raise Exception('Api Error')
def get_hash(value):
from hashlib import sha512
return sha512(
value.encode('utf-8')
).hexdigest()
def first_element(elements):
return elements[0]
def get_ticker_hash(ticker_data):
from collections import OrderedDict
ticker_data = OrderedDict(
sorted(
ticker_data.items(),
key = first_element
)
)
ticker_value = ''
for _, value in ticker_data.items():
ticker_value += str(value)
return get_hash(ticker_value)
def check_if_exist(db_connection, ticker_data):
ticker_hash = get_ticker_hash(ticker_data)
if db_connection.tickers.find_one({'ticker_hash': ticker_hash}):
return True
return False
def save_ticker(db_connection, ticker_data=None):
if not ticker_data:
return False
if check_if_exist(db_connection, ticker_data):
return False
ticker_hash = get_ticker_hash(ticker_data)
ticker_data['ticker_hash'] = ticker_hash
ticker_data['rank'] = int(ticker_data['rank'])
ticker_data['last_updated'] = int(ticker_data['last_updated'])
db_connection.tickers.insert_one(ticker_data)
return True
if __name__ == '__main__':
connection = get_db_connection('mongodb://localhost:27017/')
tickers = get_cryptocurrencies_from_api()
for ticker in tickers:
save_ticker(connection, ticker)
print("Tickers almacenados")
| 22.876712
| 66
| 0.726946
|
4a0178b5eaa1c6f07744ca28bd489a2ff8b33025
| 5,461
|
py
|
Python
|
axis/param_cgi.py
|
Lokaltog/axis
|
f602ef8089ed0332317274e0433f4ede75109533
|
[
"MIT"
] | null | null | null |
axis/param_cgi.py
|
Lokaltog/axis
|
f602ef8089ed0332317274e0433f4ede75109533
|
[
"MIT"
] | null | null | null |
axis/param_cgi.py
|
Lokaltog/axis
|
f602ef8089ed0332317274e0433f4ede75109533
|
[
"MIT"
] | null | null | null |
"""Axis Vapix parameter management.
https://www.axis.com/vapix-library/#/subjects/t10037719/section/t10036014
Lists Brand, Ports, Properties, Stream profiles.
"""
from .api import APIItem, APIItems
from .stream_profiles import StreamProfile
PROPERTY = "Properties.API.HTTP.Version=3"
URL = "/axis-cgi/param.cgi"
URL_GET = URL + "?action=list"
GROUP = "&group={group}"
BRAND = "root.Brand"
INPUT = "root.Input"
IOPORT = "root.IOPort"
OUTPUT = "root.Output"
PROPERTIES = "root.Properties"
STREAM_PROFILES = "root.StreamProfile"
class Params(APIItems):
"""Represents all parameters of param.cgi."""
def __init__(self, request: object) -> None:
super().__init__("", request, URL_GET, APIItem)
def process_raw(self, raw: str) -> None:
"""Pre-process raw string.
Prepare parameters to work with APIItems.
"""
raw_params = dict(group.split("=", 1) for group in raw.splitlines())
super().process_raw(raw_params)
# Brand
def update_brand(self) -> None:
"""Update brand group of parameters."""
self.update(path=URL_GET + GROUP.format(group=BRAND))
@property
def brand(self) -> str:
return self[f"{BRAND}.Brand"].raw
@property
def prodfullname(self) -> str:
return self[f"{BRAND}.ProdFullName"].raw
@property
def prodnbr(self) -> str:
return self[f"{BRAND}.ProdNbr"].raw
@property
def prodshortname(self) -> str:
return self[f"{BRAND}.ProdShortName"].raw
@property
def prodtype(self) -> str:
return self[f"{BRAND}.ProdType"].raw
@property
def prodvariant(self) -> str:
return self[f"{BRAND}.ProdVariant"].raw
@property
def weburl(self) -> str:
return self[f"{BRAND}.WebURL"].raw
# Ports
def update_ports(self) -> None:
"""Update port groups of parameters."""
self.update(path=URL_GET + GROUP.format(group=INPUT))
self.update(path=URL_GET + GROUP.format(group=IOPORT))
self.update(path=URL_GET + GROUP.format(group=OUTPUT))
@property
def nbrofinput(self) -> int:
"""Match the number of configured inputs."""
return self[f"{INPUT}.NbrOfInputs"].raw
@property
def nbrofoutput(self) -> int:
"""Match the number of configured outputs."""
return self[f"{OUTPUT}.NbrOfOutputs"].raw
@property
def ports(self) -> dict:
"""Create a smaller dictionary containing all ports."""
return {param: self[param].raw for param in self if param.startswith(IOPORT)}
# Properties
def update_properties(self) -> None:
"""Update properties group of parameters."""
self.update(path=URL_GET + GROUP.format(group=PROPERTIES))
@property
def api_http_version(self) -> str:
return self[f"{PROPERTIES}.API.HTTP.Version"].raw
@property
def api_metadata(self) -> str:
return self[f"{PROPERTIES}.API.Metadata.Metadata"].raw
@property
def api_metadata_version(self) -> str:
return self[f"{PROPERTIES}.API.Metadata.Version"].raw
@property
def embedded_development(self) -> str:
"""VAPIX® Application API is supported.
Application list.cgi supported if => 1.20.
"""
return self[f"{PROPERTIES}.EmbeddedDevelopment.Version"].raw
@property
def firmware_builddate(self) -> str:
return self[f"{PROPERTIES}.Firmware.BuildDate"].raw
@property
def firmware_buildnumber(self) -> str:
return self[f"{PROPERTIES}.Firmware.BuildNumber"].raw
@property
def firmware_version(self) -> str:
return self[f"{PROPERTIES}.Firmware.Version"].raw
@property
def image_format(self) -> str:
if f"{PROPERTIES}.Image.Format" in self:
return self[f"{PROPERTIES}.Image.Format"].raw
return None
@property
def image_nbrofviews(self) -> str:
return self[f"{PROPERTIES}.Image.NbrOfViews"].raw
@property
def image_resolution(self) -> str:
return self[f"{PROPERTIES}.Image.Resolution"].raw
@property
def image_rotation(self) -> str:
return self[f"{PROPERTIES}.Image.Rotation"].raw
@property
def light_control(self) -> bool:
light_control = f"{PROPERTIES}.LightControl.LightControl2"
if light_control not in self:
return False
return self[light_control].raw == "yes"
@property
def system_serialnumber(self) -> str:
return self[f"{PROPERTIES}.System.SerialNumber"].raw
# Stream profiles
def update_stream_profiles(self) -> None:
"""Update properties group of parameters."""
self.update(path=URL_GET + GROUP.format(group=STREAM_PROFILES))
def stream_profiles(self) -> list:
"""Return a list of stream profiles."""
profiles = []
length = 0
if f"{STREAM_PROFILES}.MaxGroups" in self:
length = int(self[f"{STREAM_PROFILES}.MaxGroups"].raw)
try:
for nbr in range(length):
raw = {
"name": self[f"{STREAM_PROFILES}.S{nbr}.Name"].raw,
"description": self[f"{STREAM_PROFILES}.S{nbr}.Description"].raw,
"parameters": self[f"{STREAM_PROFILES}.S{nbr}.Parameters"].raw,
}
profiles.append(StreamProfile(raw["name"], raw, self._request))
except KeyError:
pass
return profiles
| 28.89418
| 85
| 0.626808
|
4a0179031ae02d4606673510d7dae27a947dffca
| 6,163
|
py
|
Python
|
lib/modeling/matching.py
|
Min-Sheng/CA_FSIS_Cell
|
c24750d860a9417b30819c05613282cd74dc517f
|
[
"MIT"
] | null | null | null |
lib/modeling/matching.py
|
Min-Sheng/CA_FSIS_Cell
|
c24750d860a9417b30819c05613282cd74dc517f
|
[
"MIT"
] | 1
|
2021-03-01T09:16:15.000Z
|
2021-03-01T09:34:49.000Z
|
lib/modeling/matching.py
|
Min-Sheng/CA_FSIS_Cell
|
c24750d860a9417b30819c05613282cd74dc517f
|
[
"MIT"
] | null | null | null |
import torch
import torch.nn as nn
import torch.nn.functional as F
import nn as mynn
class l1_distance_match_block(nn.Module):
def __init__(self, inplanes):
super(l1_distance_match_block, self).__init__()
self.in_channels = inplanes
self.globalAvgPool = nn.AdaptiveAvgPool2d(1)
self.conv1x1 = nn.Conv2d(in_channels=self.in_channels*2, out_channels=self.in_channels,
kernel_size=1, stride=1, padding=0)
def forward(self, detect, aim):
prototype_aim = self.globalAvgPool(aim)
l1_distance = torch.abs(detect - prototype_aim)
concat_feat = torch.cat([detect,l1_distance],1)
match_feat = self.conv1x1(concat_feat)
return match_feat, match_feat, aim, None
class match_block(nn.Module):
def __init__(self, inplanes):
super(match_block, self).__init__()
self.sub_sample = False
self.in_channels = inplanes
self.inter_channels = None
if self.inter_channels is None:
self.inter_channels = self.in_channels // 2
if self.inter_channels == 0:
self.inter_channels = 1
conv_nd = nn.Conv2d
max_pool_layer = nn.MaxPool2d(kernel_size=(2, 2))
bn = nn.BatchNorm2d
self.g = conv_nd(in_channels=self.in_channels, out_channels=self.inter_channels,
kernel_size=1, stride=1, padding=0)
self.W = nn.Sequential(
conv_nd(in_channels=self.inter_channels, out_channels=self.in_channels,
kernel_size=1, stride=1, padding=0),
bn(self.in_channels)
)
nn.init.constant_(self.W[1].weight, 0)
nn.init.constant_(self.W[1].bias, 0)
self.Q = nn.Sequential(
conv_nd(in_channels=self.inter_channels, out_channels=self.in_channels,
kernel_size=1, stride=1, padding=0),
bn(self.in_channels)
)
nn.init.constant_(self.Q[1].weight, 0)
nn.init.constant_(self.Q[1].bias, 0)
self.theta = conv_nd(in_channels=self.in_channels, out_channels=self.inter_channels,
kernel_size=1, stride=1, padding=0)
self.phi = conv_nd(in_channels=self.in_channels, out_channels=self.inter_channels,
kernel_size=1, stride=1, padding=0)
self.concat_project = nn.Sequential(
nn.Conv2d(self.inter_channels * 2, 1, 1, 1, 0, bias=False),
nn.ReLU()
)
self.ChannelGate = ChannelGate(self.in_channels)
self.globalAvgPool = nn.AdaptiveAvgPool2d(1)
def detectron_weight_mapping(self):
mapping = {}
orphan_in_detectron = []
return mapping, orphan_in_detectron
def forward(self, detect, aim):
batch_size, channels, height_a, width_a = aim.shape
batch_size, channels, height_d, width_d = detect.shape
#####################################find aim image similar object ####################################################
d_x = self.g(detect).view(batch_size, self.inter_channels, -1)
d_x = d_x.permute(0, 2, 1).contiguous()
a_x = self.g(aim).view(batch_size, self.inter_channels, -1)
a_x = a_x.permute(0, 2, 1).contiguous()
theta_x = self.theta(aim).view(batch_size, self.inter_channels, -1)
theta_x = theta_x.permute(0, 2, 1)
phi_x = self.phi(detect).view(batch_size, self.inter_channels, -1)
f = torch.matmul(theta_x, phi_x)
N = f.size(-1)
f_div_C = f / N
f = f.permute(0, 2, 1).contiguous()
N = f.size(-1)
fi_div_C = f / N
non_aim = torch.matmul(f_div_C, d_x)
non_aim = non_aim.permute(0, 2, 1).contiguous()
non_aim = non_aim.view(batch_size, self.inter_channels, height_a, width_a)
non_aim = self.W(non_aim)
non_aim = non_aim + aim
non_det = torch.matmul(fi_div_C, a_x)
non_det = non_det.permute(0, 2, 1).contiguous()
non_det = non_det.view(batch_size, self.inter_channels, height_d, width_d)
non_det = self.Q(non_det)
non_det = non_det + detect
##################################### Response in chaneel weight ####################################################
c_weight = self.ChannelGate(non_aim)
act_aim = non_aim * c_weight
act_det = non_det * c_weight
return non_det, act_det, act_aim, c_weight
class Flatten(nn.Module):
def forward(self, x):
return x.view(x.size(0), -1)
class ChannelGate(nn.Module):
def __init__(self, gate_channels, reduction_ratio=16, pool_types=['avg', 'max']):
super(ChannelGate, self).__init__()
self.gate_channels = gate_channels
self.mlp = nn.Sequential(
Flatten(),
nn.Linear(gate_channels, gate_channels // reduction_ratio),
nn.ReLU(),
nn.Linear(gate_channels // reduction_ratio, gate_channels)
)
self.pool_types = pool_types
def forward(self, x):
channel_att_sum = None
for pool_type in self.pool_types:
if pool_type=='avg':
avg_pool = F.avg_pool2d( x, (x.size(2), x.size(3)), stride=(x.size(2), x.size(3)))
channel_att_raw = self.mlp( avg_pool )
elif pool_type=='max':
max_pool = F.max_pool2d( x, (x.size(2), x.size(3)), stride=(x.size(2), x.size(3)))
channel_att_raw = self.mlp( max_pool )
elif pool_type=='lp':
lp_pool = F.lp_pool2d( x, 2, (x.size(2), x.size(3)), stride=(x.size(2), x.size(3)))
channel_att_raw = self.mlp( lp_pool )
elif pool_type=='lse':
# LSE pool only
lse_pool = logsumexp_2d(x)
channel_att_raw = self.mlp( lse_pool )
if channel_att_sum is None:
channel_att_sum = channel_att_raw
else:
channel_att_sum = channel_att_sum + channel_att_raw
scale = torch.sigmoid( channel_att_sum ).unsqueeze(2).unsqueeze(3)
return scale
| 37.126506
| 127
| 0.583158
|
4a0179c323f4177511e9f22e88c1da56f40360d4
| 9,351
|
py
|
Python
|
Main Code.py
|
OmarHanyOMH/Text-Editor
|
d786ad451e32b380682dd155f85b4d0139e6ceaf
|
[
"BSL-1.0"
] | 1
|
2021-04-17T13:40:47.000Z
|
2021-04-17T13:40:47.000Z
|
Main Code.py
|
OmarHanyOMH/Text-Editor
|
d786ad451e32b380682dd155f85b4d0139e6ceaf
|
[
"BSL-1.0"
] | null | null | null |
Main Code.py
|
OmarHanyOMH/Text-Editor
|
d786ad451e32b380682dd155f85b4d0139e6ceaf
|
[
"BSL-1.0"
] | null | null | null |
# while True:
# out = input("enter code : ")
# while True:
# code = exec(out)
# Importing Required libraries & Modules
from tkinter import *
from tkinter import messagebox
from tkinter import filedialog
# Defining TextEditor Class
class TextEditor:
# Defining Constructor
def __init__(self,root):
# Assigning root
self.root = root
# Title of the window
self.root.title("TEXT EDITOR")
# Window Geometry
self.root.geometry("1200x700+200+150")
# Initializing filename
self.filename = None
# Declaring Title variable
self.title = StringVar()
# Declaring Status variable
self.status = StringVar()
# Creating Titlebar
self.titlebar = Label(self.root,textvariable=self.title,font=("times new roman",15,"bold"),bd=2,relief=GROOVE)
# Packing Titlebar to root window
self.titlebar.pack(side=TOP,fill=BOTH)
# Calling Settitle Function
self.settitle()
# Creating Statusbar
self.statusbar = Label(self.root,textvariable=self.status,font=("times new roman",15,"bold"),bd=2,relief=GROOVE)
# Packing status bar to root window
self.statusbar.pack(side=BOTTOM,fill=BOTH)
# Initializing Status
self.status.set("Welcome To Text Editor")
# Creating Menubar
self.menubar = Menu(self.root,font=("times new roman",15,"bold"),activebackground="skyblue")
# Configuring menubar on root window
self.root.config(menu=self.menubar)
# Creating File Menu
self.filemenu = Menu(self.menubar,font=("times new roman",12,"bold"),activebackground="skyblue",tearoff=0)
# Adding New file Command
self.filemenu.add_command(label="New",accelerator="Ctrl+N",command=self.newfile)
# Adding Open file Command
self.filemenu.add_command(label="Open",accelerator="Ctrl+O",command=self.openfile)
# Adding Save File Command
self.filemenu.add_command(label="Save",accelerator="Ctrl+S",command=self.savefile)
# Adding Save As file Command
self.filemenu.add_command(label="Save As",accelerator="Ctrl+A",command=self.saveasfile)
# Adding Seprator
self.filemenu.add_separator()
# Adding Exit window Command
self.filemenu.add_command(label="Exit",accelerator="Ctrl+E",command=self.exit)
# Cascading filemenu to menubar
self.menubar.add_cascade(label="File", menu=self.filemenu)
# Creating Edit Menu
self.editmenu = Menu(self.menubar,font=("times new roman",12,"bold"),activebackground="skyblue",tearoff=0)
# Adding Cut text Command
self.editmenu.add_command(label="Cut",accelerator="Ctrl+X",command=self.cut)
# Adding Copy text Command
self.editmenu.add_command(label="Copy",accelerator="Ctrl+C",command=self.copy)
# Adding Paste text command
self.editmenu.add_command(label="Paste",accelerator="Ctrl+V",command=self.paste)
# Adding Seprator
self.editmenu.add_separator()
# Adding Undo text Command
self.editmenu.add_command(label="Undo",accelerator="Ctrl+U",command=self.undo)
# Cascading editmenu to menubar
self.menubar.add_cascade(label="Edit", menu=self.editmenu)
# Creating Help Menu
self.helpmenu = Menu(self.menubar,font=("times new roman",12,"bold"),activebackground="skyblue",tearoff=0)
# Adding About Command
self.helpmenu.add_command(label="About",command=self.infoabout)
# Cascading helpmenu to menubar
self.menubar.add_cascade(label="Help", menu=self.helpmenu)
# Creating Scrollbar
scrol_y = Scrollbar(self.root,orient=VERTICAL)
# Creating Text Area
self.txtarea = Text(self.root,yscrollcommand=scrol_y.set,font=("times new roman",15,"bold"),state="normal",relief=GROOVE)
# Packing scrollbar to root window
scrol_y.pack(side=RIGHT,fill=Y)
# Adding Scrollbar to text area
scrol_y.config(command=self.txtarea.yview)
# Packing Text Area to root window
self.txtarea.pack(fill=BOTH,expand=1)
# Calling shortcuts funtion
self.shortcuts()
# Defining settitle function
def settitle(self):
# Checking if Filename is not None
if self.filename:
# Updating Title as filename
self.title.set(self.filename)
else:
# Updating Title as Untitled
self.title.set("Untitled")
# Defining New file Function
def newfile(self,*args):
# Clearing the Text Area
self.txtarea.delete("1.0",END)
# Updating filename as None
self.filename = None
# Calling settitle funtion
self.settitle()
# updating status
self.status.set("New File Created")
# Defining Open File Funtion
def openfile(self,*args):
# Exception handling
try:
# Asking for file to open
self.filename = filedialog.askopenfilename(title = "Select file",filetypes = (("All Files","*.*"),("Text Files","*.txt"),("Python Files","*.py")))
# checking if filename not none
if self.filename:
# opening file in readmode
infile = open(self.filename,"r")
# Clearing text area
self.txtarea.delete("1.0",END)
# Inserting data Line by line into text area
for line in infile:
self.txtarea.insert(END,line)
# Closing the file
infile.close()
# Calling Set title
self.settitle()
# Updating Status
self.status.set("Opened Successfully")
except Exception as e:
messagebox.showerror("Exception",e)
# Defining Save File Funtion
def savefile(self,*args):
# Exception handling
try:
# checking if filename not none
if self.filename:
# Reading the data from text area
data = self.txtarea.get("1.0",END)
# opening File in write mode
outfile = open(self.filename,"w")
# Writing Data into file
outfile.write(data)
# Closing File
outfile.close()
# Calling Set title
self.settitle()
# Updating Status
self.status.set("Saved Successfully")
else:
self.saveasfile()
except Exception as e:
messagebox.showerror("Exception",e)
# Defining Save As File Funtion
def saveasfile(self,*args):
# Exception handling
try:
# Asking for file name and type to save
untitledfile = filedialog.asksaveasfilename(title = "Save file As",defaultextension=".txt",initialfile = "Untitled.txt",filetypes = (("All Files","*.*"),("Text Files","*.txt"),("Python Files","*.py")))
# Reading the data from text area
data = self.txtarea.get("1.0",END)
# opening File in write mode
outfile = open(untitledfile,"w")
# Writing Data into file
outfile.write(data)
# Closing File
outfile.close()
# Updating filename as Untitled
self.filename = untitledfile
# Calling Set title
self.settitle()
# Updating Status
self.status.set("Saved Successfully")
except Exception as e:
messagebox.showerror("Exception",e)
# Defining Exit Funtion
def exit(self,*args):
op = messagebox.askyesno("WARNING","Your Unsaved Data May be Lost!!")
if op>0:
self.root.destroy()
else:
return
# Defining Cut Funtion
def cut(self,*args):
self.txtarea.event_generate("<<Cut>>")
# Defining Copy Funtion
def copy(self,*args):
self.txtarea.event_generate("<<Copy>>")
# Defining Paste Funtion
def paste(self,*args):
self.txtarea.event_generate("<<Paste>>")
# Defining Undo Funtion
def undo(self,*args):
# Exception handling
try:
# checking if filename not none
if self.filename:
# Clearing Text Area
self.txtarea.delete("1.0",END)
# opening File in read mode
infile = open(self.filename,"r")
# Inserting data Line by line into text area
for line in infile:
self.txtarea.insert(END,line)
# Closing File
infile.close()
# Calling Set title
self.settitle()
# Updating Status
self.status.set("Undone Successfully")
else:
# Clearing Text Area
self.txtarea.delete("1.0",END)
# Updating filename as None
self.filename = None
# Calling Set title
self.settitle()
# Updating Status
self.status.set("Undone Successfully")
except Exception as e:
messagebox.showerror("Exception",e)
# Defining About Funtion
def infoabout(self):
messagebox.showinfo("About Text Editor","A Simple Text Editor\nCreated using Python.")
# Defining shortcuts Funtion
def shortcuts(self):
# Binding Ctrl+n to newfile funtion
self.txtarea.bind("<Control-n>",self.newfile)
# Binding Ctrl+o to openfile funtion
self.txtarea.bind("<Control-o>",self.openfile)
# Binding Ctrl+s to savefile funtion
self.txtarea.bind("<Control-s>",self.savefile)
# Binding Ctrl+a to saveasfile funtion
self.txtarea.bind("<Control-a>",self.saveasfile)
# Binding Ctrl+e to exit funtion
self.txtarea.bind("<Control-e>",self.exit)
# Binding Ctrl+x to cut funtion
self.txtarea.bind("<Control-x>",self.cut)
# Binding Ctrl+c to copy funtion
self.txtarea.bind("<Control-c>",self.copy)
# Binding Ctrl+v to paste funtion
self.txtarea.bind("<Control-v>",self.paste)
# Binding Ctrl+u to undo funtion
self.txtarea.bind("<Control-u>",self.undo)
# Creating TK Container
root = Tk()
# Passing Root to TextEditor Class
TextEditor(root)
# Root Window Looping
root.mainloop()
| 37.404
| 207
| 0.669768
|
4a017ba9769e53ab3239a325ddb856c5b49f2bd8
| 455
|
py
|
Python
|
env/Lib/site-packages/plotly/validators/choroplethmapbox/_zmid.py
|
andresgreen-byte/Laboratorio-1--Inversion-de-Capital
|
8a4707301d19c3826c31026c4077930bcd6a8182
|
[
"MIT"
] | 11,750
|
2015-10-12T07:03:39.000Z
|
2022-03-31T20:43:15.000Z
|
venv/Lib/site-packages/plotly/validators/choroplethmapbox/_zmid.py
|
wakisalvador/constructed-misdirection
|
74779e9ec640a11bc08d5d1967c85ac4fa44ea5e
|
[
"Unlicense"
] | 2,951
|
2015-10-12T00:41:25.000Z
|
2022-03-31T22:19:26.000Z
|
venv/Lib/site-packages/plotly/validators/choroplethmapbox/_zmid.py
|
wakisalvador/constructed-misdirection
|
74779e9ec640a11bc08d5d1967c85ac4fa44ea5e
|
[
"Unlicense"
] | 2,623
|
2015-10-15T14:40:27.000Z
|
2022-03-28T16:05:50.000Z
|
import _plotly_utils.basevalidators
class ZmidValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(self, plotly_name="zmid", parent_name="choroplethmapbox", **kwargs):
super(ZmidValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
implied_edits=kwargs.pop("implied_edits", {}),
**kwargs
)
| 35
| 85
| 0.663736
|
4a017c74dee3ca7a2cecac654ba8659402332a71
| 3,408
|
py
|
Python
|
admin/handler/roleHandler.py
|
xin1195/smart
|
11815b8a63f2459300e8aaad82b539cfef8a7546
|
[
"Apache-2.0"
] | 1
|
2016-05-09T12:29:47.000Z
|
2016-05-09T12:29:47.000Z
|
admin/handler/roleHandler.py
|
xin1195/smartSearch
|
11815b8a63f2459300e8aaad82b539cfef8a7546
|
[
"Apache-2.0"
] | null | null | null |
admin/handler/roleHandler.py
|
xin1195/smartSearch
|
11815b8a63f2459300e8aaad82b539cfef8a7546
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
# _*_coding:utf-8_*_
import traceback
import tornado.web
from tornado import gen
from admin.handler.baseHandler import BaseHandler
from common.authLib import auth_permissions
from setting import logger
class AdminRoleHandler(BaseHandler):
@tornado.web.authenticated
@auth_permissions
@gen.coroutine
def get(self, *args, **kwargs):
res_msg = ""
roles = []
num = int(self.get_argument("num", 15))
page = int(self.get_argument("page", 1))
total_count = 0
try:
query = {}
show = {"_id": 0}
cursor = self.db.sys_role.find(query, show).skip((page - 1) * num).limit(num)
while (yield cursor.fetch_next):
user = cursor.next_object()
roles.append(user)
total_count = yield self.db.sys_role.find().count()
except:
logger.error(traceback.format_exc())
self.render("admin/sys_role_list.html", roles=roles, res_msg=res_msg, total_count=total_count, page=page, num=num)
class AdminRoleAddHandler(BaseHandler):
@tornado.web.authenticated
@auth_permissions
@gen.coroutine
def get(self, *args, **kwargs):
res_msg = ""
role = {}
self.render("admin/sys_role_add.html", res_msg=res_msg, form_action="/admin/role/add", role=role)
@auth_permissions
@gen.coroutine
def post(self, *args, **kwargs):
role_id = self.get_argument("role_id", "")
role_name = self.get_argument("role_name", "")
try:
role_dict = {
"role_id": role_id,
"role_name": role_name,
}
query = {"role_id": role_id}
yield self.db.sys_role.update(query, role_dict, upsert=True)
except:
logger.error(traceback.format_exc())
self.redirect("/admin/role")
class AdminRoleUpdateHandler(BaseHandler):
@tornado.web.authenticated
@auth_permissions
@gen.coroutine
def get(self, *args, **kwargs):
res_msg = ""
role = {}
try:
role_id = self.get_argument("role_id", "")
query = {"role_id": role_id}
show = {"_id": 0}
role = yield self.db.sys_role.find_one(query, show)
except:
logger.error(traceback.format_exc())
self.render("admin/sys_role_add.html", role=role, res_msg=res_msg, form_action="/admin/role/update")
@auth_permissions
@gen.coroutine
def post(self, *args, **kwargs):
role_id = self.get_argument("role_id", "")
role_name = self.get_argument("role_name", "")
try:
role_dict = {
"role_id": role_id,
"role_name": role_name,
}
query = {"role_id": role_id}
yield self.db.sys_role.update(query, {"$set": role_dict}, upsert=True)
except:
logger.error(traceback.format_exc())
self.redirect("/admin/user")
class AdminRoleDeleteHandler(BaseHandler):
@tornado.web.authenticated
@auth_permissions
@gen.coroutine
def get(self, *args, **kwargs):
try:
role_id = self.get_argument("role_id", "")
query = {"role_id": role_id}
self.db.sys_role.remove(query)
except:
logger.error(traceback.format_exc())
self.redirect("/admin/role")
| 31.850467
| 122
| 0.588322
|
4a017e10eed34fefe3782c4bb422853ba052bc0f
| 3,610
|
py
|
Python
|
src/oci/core/models/shape_access_control_service_enabled_platform_options.py
|
pabs3/oci-python-sdk
|
437ba18ce39af2d1090e277c4bb8750c89f83021
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
src/oci/core/models/shape_access_control_service_enabled_platform_options.py
|
pabs3/oci-python-sdk
|
437ba18ce39af2d1090e277c4bb8750c89f83021
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
src/oci/core/models/shape_access_control_service_enabled_platform_options.py
|
pabs3/oci-python-sdk
|
437ba18ce39af2d1090e277c4bb8750c89f83021
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
# coding: utf-8
# Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class ShapeAccessControlServiceEnabledPlatformOptions(object):
"""
Configuration options for the Access Control Service.
"""
def __init__(self, **kwargs):
"""
Initializes a new ShapeAccessControlServiceEnabledPlatformOptions object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param allowed_values:
The value to assign to the allowed_values property of this ShapeAccessControlServiceEnabledPlatformOptions.
:type allowed_values: list[bool]
:param is_default_enabled:
The value to assign to the is_default_enabled property of this ShapeAccessControlServiceEnabledPlatformOptions.
:type is_default_enabled: bool
"""
self.swagger_types = {
'allowed_values': 'list[bool]',
'is_default_enabled': 'bool'
}
self.attribute_map = {
'allowed_values': 'allowedValues',
'is_default_enabled': 'isDefaultEnabled'
}
self._allowed_values = None
self._is_default_enabled = None
@property
def allowed_values(self):
"""
Gets the allowed_values of this ShapeAccessControlServiceEnabledPlatformOptions.
Whether the Access Control Service can be enabled.
:return: The allowed_values of this ShapeAccessControlServiceEnabledPlatformOptions.
:rtype: list[bool]
"""
return self._allowed_values
@allowed_values.setter
def allowed_values(self, allowed_values):
"""
Sets the allowed_values of this ShapeAccessControlServiceEnabledPlatformOptions.
Whether the Access Control Service can be enabled.
:param allowed_values: The allowed_values of this ShapeAccessControlServiceEnabledPlatformOptions.
:type: list[bool]
"""
self._allowed_values = allowed_values
@property
def is_default_enabled(self):
"""
Gets the is_default_enabled of this ShapeAccessControlServiceEnabledPlatformOptions.
Whether the Access Control Service is enabled by default.
:return: The is_default_enabled of this ShapeAccessControlServiceEnabledPlatformOptions.
:rtype: bool
"""
return self._is_default_enabled
@is_default_enabled.setter
def is_default_enabled(self, is_default_enabled):
"""
Sets the is_default_enabled of this ShapeAccessControlServiceEnabledPlatformOptions.
Whether the Access Control Service is enabled by default.
:param is_default_enabled: The is_default_enabled of this ShapeAccessControlServiceEnabledPlatformOptions.
:type: bool
"""
self._is_default_enabled = is_default_enabled
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| 35.392157
| 245
| 0.706094
|
4a017e775e89378438dabb8716a5ee057768e69d
| 547
|
py
|
Python
|
quiz2/quiz2_pw/app.py
|
damiankarol7/python101
|
1978a9402a8fb0f20c4ca7bd542cb8d7d4501b9b
|
[
"MIT"
] | 44
|
2015-02-11T19:10:37.000Z
|
2021-11-11T09:45:43.000Z
|
quiz2/quiz2_pw/app.py
|
damiankarol7/python101
|
1978a9402a8fb0f20c4ca7bd542cb8d7d4501b9b
|
[
"MIT"
] | 9
|
2015-02-06T21:26:25.000Z
|
2022-03-31T10:44:22.000Z
|
quiz2/quiz2_pw/app.py
|
damiankarol7/python101
|
1978a9402a8fb0f20c4ca7bd542cb8d7d4501b9b
|
[
"MIT"
] | 172
|
2015-06-13T07:16:24.000Z
|
2022-03-30T20:41:11.000Z
|
# -*- coding: utf-8 -*-
# quiz_pw/app.py
from flask import Flask, g
from peewee import *
app = Flask(__name__)
# konfiguracja aplikacji, m.in. klucz do obsługi sesji HTTP wymaganej
# przez funkcję flash
app.config.update(dict(
SECRET_KEY='bardzosekretnawartosc',
TYTUL='Quiz 2 Peewee'
))
# tworzymy instancję bazy używanej przez modele
baza = SqliteDatabase('quiz.db')
@app.before_request
def before_request():
g.db = baza
g.db.connect()
@app.after_request
def after_request(response):
g.db.close()
return response
| 18.233333
| 69
| 0.714808
|
4a017e8ba1f780000df17970dfa142d74a1fc99c
| 8,621
|
py
|
Python
|
model.py
|
gasperverc13/Valute
|
12e24b4574ed30d272283806b296f5fee5e09885
|
[
"MIT"
] | null | null | null |
model.py
|
gasperverc13/Valute
|
12e24b4574ed30d272283806b296f5fee5e09885
|
[
"MIT"
] | null | null | null |
model.py
|
gasperverc13/Valute
|
12e24b4574ed30d272283806b296f5fee5e09885
|
[
"MIT"
] | null | null | null |
import yfinance as yf
import json
import datetime as dt
import plotly.graph_objs as go
class Portfelj:
def __init__(self):
self.moje_valute = []
self.trenutna_valuta = None
def dodaj_valuto(self, valuta):
self.moje_valute.append(valuta)
if not self.trenutna_valuta:
self.trenutna_valuta = valuta
def prodaj_vse(self, valuta):
self.moje_valute.remove(valuta)
def zamenjaj_valuto(self, valuta):
self.trenutna_valuta = valuta
def kupi_vec(self, nakup):
self.trenutna_valuta.dodaj_nakup(nakup)
def prodaj_del(self, nakup):
self.trenutna_valuta.prodaj_del(nakup)
def graf(self, zacetek, konec, interval):
kratica = self.trenutna_valuta.kratica
if kratica[:3] == ('USD' or 'usd'):
kratica_x = kratica[-3:]
else:
kratica_x = ''.join(kratica.split('/'))
kratica_x = f'{kratica_x}=X'
if zacetek is not None:
if konec is not None:
if konec < zacetek:
t = konec
konec = zacetek
zacetek = t
elif zacetek > dt.date.today():
zacetek = dt.date.today()
try:
yf.Ticker(kratica_x).history(start=zacetek)
except OverflowError:
zacetek = None
try:
yf.Ticker(kratica_x).history(start=zacetek, end=konec)
except OverflowError:
konec = None
graf = go.Figure()
podatki = yf.download(
tickers=kratica_x, start=zacetek, end=konec, interval=interval)
graf.add_trace(go.Candlestick(
x=podatki.index, open=podatki['Open'], high=podatki['High'], low=podatki['Low'], close=podatki['Close']))
graf.update_layout(title=kratica)
graf.show()
def v_slovar(self):
return {
'moje_valute': [valuta.v_slovar() for valuta in self.moje_valute],
'trenutna_valuta': self.moje_valute.index(self.trenutna_valuta) if self.trenutna_valuta else None,
}
@staticmethod
def iz_slovarja(slovar):
portfelj = Portfelj()
portfelj.moje_valute = [
Valuta.iz_slovarja(sl_valuta) for sl_valuta in slovar['moje_valute']
]
if slovar['trenutna_valuta'] is not None:
portfelj.trenutna_valuta = portfelj.moje_valute[slovar['trenutna_valuta']]
return portfelj
def shrani_v_datoteko(self, ime_dat):
with open(ime_dat, 'w', encoding='utf-8') as dat:
slovar = self.v_slovar()
json.dump(slovar, dat)
@staticmethod
def preberi_iz_datoteke(ime_dat):
with open(ime_dat, 'r', encoding='utf-8') as dat:
slovar = json.load(dat)
return Portfelj.iz_slovarja(slovar)
def preveri_podatke_nove_valute(self, kratica):
napake = {}
if not kratica:
napake['kratica'] = 'Vpišite kratico.'
elif len(kratica) != 7 or '/' != kratica[3]:
napake['kratica'] = 'Napačen format vnosa.'
for valuta in self.moje_valute:
obratno = '/'.join([kratica[-3:].upper(), kratica[:3].upper()])
if (valuta.kratica == kratica.upper()) or (valuta.kratica == obratno):
napake['kratica'] = 'Ta kratica je že vpisana.'
return napake
def preveri_podatke_nakupa(self, kolicina_delna, kupna_cena, stop, limit):
napake = {}
for podatek in [kolicina_delna, kupna_cena, stop, limit]:
try:
float(podatek)
if float(podatek) == 0:
napake['nakup'] = 'Vrednosti ne smejo biti 0.'
break
except ValueError:
napake['nakup'] = 'Vnešeni podatki niso ustrezni.'
break
except TypeError:
continue
return napake
def preveri_podatke_grafa(self, interval):
kratica = self.trenutna_valuta.kratica
napake = {}
if interval not in ['1m', '2m', '5m', '15m', '30m', '60m', '90m', '1h', '1d', '5d', '1wk', '1mo', '3mo']:
napake['graf'] = 'Vnesite ustrezen interval.'
return napake
if kratica[:3] == 'USD':
kratica_x = kratica[-3:]
else:
kratica_x = ''.join(kratica.split('/'))
kratica_x = f'{kratica_x}=X'
poskus = yf.Ticker(kratica_x).history(start='2021-01-01')
if len(poskus) == 0:
napake['graf'] = 'Grafa za ta par ni mogoče prikazati.'
return napake
class Valuta:
def __init__(self, kratica):
self.kratica = kratica
self.kupljeno = []
self.trenutna_cena = Valuta.trenutna_cena_valute(self.kratica)
self.skupna_razlika = 0
self.skupna_kolicina = 0
def dodaj_nakup(self, nakup):
self.kupljeno.append(nakup)
self.kolicina_skupna(nakup, 'dodaj')
self.razlika(nakup, 'dodaj')
def prodaj_del(self, nakup):
self.kupljeno.remove(nakup)
self.kolicina_skupna(nakup, 'prodaj')
self.razlika(nakup, 'prodaj')
def kolicina_skupna(self, nakup, naredi):
if naredi == 'dodaj':
self.skupna_kolicina += nakup.kolicina_delna
elif naredi == 'prodaj':
self.skupna_kolicina -= nakup.kolicina_delna
def razlika(self, nakup, naredi):
trenutna_cena = self.trenutna_cena
if type(trenutna_cena) == float:
if naredi == 'dodaj':
self.skupna_razlika += float(
f'{(trenutna_cena - nakup.kupna_cena) * nakup.kolicina_delna:.4f}')
elif naredi == 'prodaj':
self.skupna_razlika -= float(
f'{(trenutna_cena - nakup.kupna_cena) * nakup.kolicina_delna:.4f}')
else:
self.skupna_razlika = 'Ni podatka'
@staticmethod
def trenutna_cena_valute(kratica):
if kratica[:3] == 'USD':
kratica_x = kratica[-3:]
else:
kratica_x = ''.join(kratica.split('/'))
kratica_x = f'{kratica_x}=X'
valuta = yf.Ticker(kratica_x)
try:
cena = valuta.info['regularMarketPrice']
return float(f'{cena:.4f}')
except TypeError:
return 'Ni podatka'
except TimeoutError:
return 'Trenutno ni podatka'
def v_slovar(self):
return {
'kratica': self.kratica,
'kupljeno': [nakup.v_slovar() for nakup in self.kupljeno],
'skupna_kolicina': self.skupna_kolicina,
'trenutna_cena': self.trenutna_cena,
'skupna_razlika': self.skupna_razlika,
}
@staticmethod
def iz_slovarja(slovar):
valuta = Valuta(slovar['kratica'])
valuta.kupljeno = [
Nakup.iz_slovarja(sl_kupljeno) for sl_kupljeno in slovar['kupljeno']
]
valuta.skupna_kolicina = slovar['skupna_kolicina']
valuta.skupna_razlika = slovar['skupna_razlika']
return valuta
class Nakup:
def __init__(self, kratica_del, kolicina_delna, kupna_cena, cas_nakupa, stop, limit):
self.kratica_del = kratica_del
self.kolicina_delna = float(kolicina_delna)
self.kupna_cena = float(kupna_cena)
self.cas_nakupa = cas_nakupa
self.stop = float(stop) if stop is not None else None
self.limit = float(limit) if limit is not None else None
self.razlika_delna = Nakup.razlika_delna(
self.kratica_del, self.kupna_cena, self.kolicina_delna)
@staticmethod
def razlika_delna(kratica_del, kupna_cena, kolicina_delna):
if type(Valuta.trenutna_cena_valute(kratica_del)) == float:
return float(f'{(Valuta.trenutna_cena_valute(kratica_del) - kupna_cena) * kolicina_delna:.4f}')
else:
return 'Ni podatka'
def v_slovar(self):
return {
'kratica_del': self.kratica_del,
'kolicina_delna': self.kolicina_delna,
'kupna_cena': self.kupna_cena,
'cas_nakupa': dt.datetime.isoformat(self.cas_nakupa) if self.cas_nakupa else None,
'stop': self.stop,
'limit': self.limit,
'razlika_delna': self.razlika_delna,
}
@staticmethod
def iz_slovarja(slovar):
return Nakup(
slovar['kratica_del'],
slovar['kolicina_delna'],
slovar['kupna_cena'],
dt.datetime.fromisoformat(
slovar['cas_nakupa']) if slovar['cas_nakupa'] else None,
slovar['stop'],
slovar['limit'],
)
| 35.331967
| 117
| 0.582647
|
4a017ed7fad0136b467c4a43e5a2072b20d11ddc
| 3,925
|
py
|
Python
|
src/bin/shipyard_airflow/tests/unit/plugins/test_armada_test_releases_operator.py
|
openstack/airship-shipyard
|
7dcada80f108d47524d04b9259c4321684ba555c
|
[
"Apache-2.0"
] | 12
|
2018-05-18T18:59:23.000Z
|
2019-05-10T12:31:44.000Z
|
src/bin/shipyard_airflow/tests/unit/plugins/test_armada_test_releases_operator.py
|
airshipit/shipyard
|
034b906dd6df0f9683dc6808f7ee08f68c9a527b
|
[
"Apache-2.0"
] | 4
|
2021-07-28T14:36:57.000Z
|
2022-03-22T16:39:23.000Z
|
src/bin/shipyard_airflow/tests/unit/plugins/test_armada_test_releases_operator.py
|
openstack/airship-shipyard
|
7dcada80f108d47524d04b9259c4321684ba555c
|
[
"Apache-2.0"
] | 9
|
2018-05-18T16:42:41.000Z
|
2019-04-18T20:12:14.000Z
|
# Copyright 2018 AT&T Intellectual Property. All other rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests ArmadaTestReleasesOperator functionality"""
import os
from unittest import mock
from airflow.exceptions import AirflowException
import pytest
from shipyard_airflow.plugins.armada_base_operator import \
ArmadaBaseOperator
from shipyard_airflow.plugins.armada_test_releases import \
ArmadaTestReleasesOperator
from shipyard_airflow.plugins.ucp_base_operator import \
UcpBaseOperator
CONF_FILE = os.path.join(os.path.dirname(__file__), 'test.conf')
ACTION_PARAMS = {
'release': 'glance'
}
RELEASES = {
'ucp': ['armada', 'deckhand', 'shipyard'],
'openstack': ['glance', 'heat', 'horizon', 'keystone']
}
class TestArmadaTestReleasesOperator:
@mock.patch('shipyard_airflow.plugins.armada_test_releases.LOG.info')
@mock.patch.object(ArmadaBaseOperator, 'armada_client', create=True)
@mock.patch.object(ArmadaBaseOperator, 'get_releases',
return_value=RELEASES)
def test_do_execute(self, mock_releases, mock_client,
mock_logs):
op = ArmadaTestReleasesOperator(main_dag_name='main',
shipyard_conf=CONF_FILE,
task_id='t1')
op.action_params = dict()
op.do_execute()
# Verify Armada client called to test every release
calls = list()
for release_list in RELEASES.values():
for release in release_list:
calls.append(mock.call(
release=release,
timeout=None))
mock_client.get_test_release.assert_has_calls(calls, any_order=True)
# Verify test results logged
mock_logs.assert_called_with(mock_client.get_test_release.return_value)
@mock.patch('shipyard_airflow.plugins.armada_test_releases.LOG.info')
@mock.patch.object(ArmadaBaseOperator, 'armada_client', create=True)
def test_do_execute_with_params(self, mock_client, mock_logs):
op = ArmadaTestReleasesOperator(main_dag_name='main',
shipyard_conf=CONF_FILE,
task_id='t1')
op.action_params = ACTION_PARAMS
op.do_execute()
# Verify Armada client called for single release with action params
release = ACTION_PARAMS['release']
mock_client.get_test_release.assert_called_once_with(
release=release,
timeout=None)
# Verify test results logged
mock_logs.assert_called_with(mock_client.get_test_release.return_value)
@mock.patch.object(ArmadaBaseOperator, 'armada_client', create=True)
@mock.patch.object(ArmadaBaseOperator, 'get_releases',
return_value=RELEASES)
@mock.patch.object(UcpBaseOperator, 'get_k8s_logs')
def test_do_execute_fail(self, mock_k8s_logs,
mock_releases, mock_client):
mock_client.get_test_release.return_value = None
op = ArmadaTestReleasesOperator(main_dag_name='main',
shipyard_conf=CONF_FILE,
task_id='t1')
op.action_params = dict()
# Verify errors logged to pods
with pytest.raises(AirflowException):
op.do_execute()
mock_k8s_logs.assert_called_once()
| 38.480392
| 79
| 0.670573
|
4a017edcabd13950b07bfaa6e5fdc1030e1ade6c
| 11,995
|
py
|
Python
|
scope2screen/server/routes/data_routes.py
|
labsyspharm/scope2screen
|
a9ae7ac67605d2e34813b6c9d06ca0aa3d3cf421
|
[
"MIT"
] | 3
|
2021-10-10T23:59:46.000Z
|
2022-02-17T17:02:41.000Z
|
scope2screen/server/routes/data_routes.py
|
labsyspharm/scope2screen
|
a9ae7ac67605d2e34813b6c9d06ca0aa3d3cf421
|
[
"MIT"
] | null | null | null |
scope2screen/server/routes/data_routes.py
|
labsyspharm/scope2screen
|
a9ae7ac67605d2e34813b6c9d06ca0aa3d3cf421
|
[
"MIT"
] | null | null | null |
from scope2screen import app
from flask import render_template, request, Response, jsonify, abort, send_file
import io
from PIL import Image
from scope2screen import data_path, get_config
from scope2screen.server.models import data_model
from scope2screen.server.analytics import comparison
from pathlib import Path
from time import time
import pandas as pd
import json
import orjson
from flask_sqlalchemy import SQLAlchemy
@app.route('/init_database', methods=['GET'])
def init_database():
datasource = request.args.get('datasource')
data_model.init(datasource)
resp = jsonify(success=True)
return resp
@app.route('/config')
def serve_config():
return get_config()
@app.route('/get_nearest_cell', methods=['GET'])
def get_nearest_cell():
x = float(request.args.get('point_x'))
y = float(request.args.get('point_y'))
datasource = request.args.get('datasource')
resp = data_model.query_for_closest_cell(x, y, datasource)
return serialize_and_submit_json(resp)
@app.route('/get_channel_cell_ids', methods=['GET'])
def get_channel_cell_ids():
datasource = request.args.get('datasource')
filter = json.loads(request.args.get('filter'))
resp = data_model.get_channel_cells(datasource, filter)
return serialize_and_submit_json(resp)
@app.route('/get_cell_ids_phenotype', methods=['GET'])
def get_cell_ids_phenotype():
datasource = request.args.get('datasource')
resp = data_model.get_cells_phenotype(datasource)
return serialize_and_submit_json(resp)
# Gets a row based on the index
@app.route('/get_phenotype_column_name', methods=['GET'])
def get_phenotype_column_name():
datasource = request.args.get('datasource')
resp = data_model.get_phenotype_column_name(datasource)
return serialize_and_submit_json(resp)
# Gets a row based on the index
@app.route('/get_phenotype_description', methods=['GET'])
def get_phenotype_description():
datasource = request.args.get('datasource')
resp = data_model.get_phenotype_description(datasource)
return serialize_and_submit_json(resp)
# Gets a row based on the index
@app.route('/get_database_row', methods=['GET'])
def get_database_row():
datasource = request.args.get('datasource')
row = int(request.args.get('row'))
resp = data_model.get_row(row, datasource)
return serialize_and_submit_json(resp)
@app.route('/get_channel_names', methods=['GET'])
def get_channel_names():
datasource = request.args.get('datasource')
shortnames = bool(request.args.get('shortNames'))
resp = data_model.get_channel_names(datasource, shortnames)
return serialize_and_submit_json(resp)
@app.route('/get_phenotypes', methods=['GET'])
def get_phenotypes():
datasource = request.args.get('datasource')
resp = data_model.get_phenotypes(datasource)
return serialize_and_submit_json(resp)
@app.route('/get_color_scheme', methods=['GET'])
def get_color_scheme():
datasource = request.args.get('datasource')
refresh = request.args.get('refresh') == 'true'
resp = data_model.get_color_scheme(datasource, refresh)
return serialize_and_submit_json(resp)
@app.route('/get_neighborhood', methods=['GET'])
def get_neighborhood():
x = float(request.args.get('point_x'))
y = float(request.args.get('point_y'))
max_distance = float(request.args.get('max_distance'))
datasource = request.args.get('datasource')
resp = data_model.get_neighborhood(x, y, datasource, r=max_distance)
return serialize_and_submit_json(resp)
@app.route('/get_neighborhood_for_spat_corr', methods=['GET'])
def get_neighborhood_for_spat_corr():
x = float(request.args.get('point_x'))
y = float(request.args.get('point_y'))
max_distance = float(request.args.get('max_distance'))
datasource = request.args.get('datasource')
resp = data_model.get_neighborhood_for_spat_corr(x, y, datasource, r=max_distance)
return serialize_and_submit_json(resp)
@app.route('/get_k_results_for_spat_corr', methods=['GET'])
def get_k_results_for_spat_corr():
x = float(request.args.get('point_x'))
y = float(request.args.get('point_y'))
max_distance = float(request.args.get('max_distance'))
channels = request.args.get('channels').split()[0].split(',')
datasource = request.args.get('datasource')
resp = data_model.get_k_results_for_spat_corr(x, y, datasource, r=max_distance, channels=channels)
return serialize_and_submit_json(resp)
@app.route('/get_num_cells_in_circle', methods=['GET'])
def get_num_cells_in_circle():
datasource = request.args.get('datasource')
x = float(request.args.get('point_x'))
y = float(request.args.get('point_y'))
r = float(request.args.get('radius'))
resp = data_model.get_number_of_cells_in_circle(x, y, datasource, r=r)
return serialize_and_submit_json(resp)
@app.route('/get_gated_cell_ids', methods=['GET'])
def get_gated_cell_ids():
datasource = request.args.get('datasource')
filter = json.loads(request.args.get('filter'))
resp = data_model.get_gated_cells(datasource, filter)
return serialize_and_submit_json(resp)
@app.route('/get_database_description', methods=['GET'])
def get_database_description():
datasource = request.args.get('datasource')
resp = data_model.get_datasource_description(datasource)
return serialize_and_submit_json(resp)
@app.route('/upload_gates', methods=['POST'])
def upload_gates():
file = request.files['file']
if file.filename.endswith('.csv') == False:
abort(422)
datasource = request.form['datasource']
save_path = data_path / datasource
if save_path.is_dir() == False:
abort(422)
filename = 'uploaded_gates.csv'
file.save(Path(save_path / filename))
resp = jsonify(success=True)
return resp
@app.route('/get_rect_cells', methods=['GET'])
def get_rect_cells():
# Parse (rect - [x, y, r], channels [string])
datasource = request.args.get('datasource')
rect = [float(x) for x in request.args.get('rect').split(',')]
channels = request.args.get('channels')
# Retrieve cells - FIXME: Too slow - jam is stalling image loading
resp = data_model.get_rect_cells(datasource, rect, channels)
print('Neighborhood size:', len(resp))
return serialize_and_submit_json(resp)
@app.route('/get_ome_metadata', methods=['GET'])
def get_ome_metadata():
datasource = request.args.get('datasource')
resp = data_model.get_ome_metadata(datasource).json()
# OME-Types handles jsonify itself, so skip the orjson conversion
response = app.response_class(
response=resp,
mimetype='application/json'
)
return response
@app.route('/download_gating_csv', methods=['POST'])
def download_gating_csv():
datasource = request.form['datasource']
filter = json.loads(request.form['filter'])
channels = json.loads(request.form['channels'])
fullCsv = json.loads(request.form['fullCsv'])
if fullCsv:
csv = data_model.download_gating_csv(datasource, filter, channels)
else:
csv = data_model.download_gates(datasource, filter, channels)
return Response(
csv.to_csv(index=False),
mimetype="text/csv",
headers={"Content-disposition":
"attachment; filename=gating_csv.csv"})
@app.route('/get_uploaded_gating_csv_values', methods=['GET'])
def get_gating_csv_values():
datasource = request.args.get('datasource')
file_path = data_path / datasource / 'uploaded_gates.csv'
if file_path.is_file() == False:
abort(422)
csv = pd.read_csv(file_path)
obj = csv.to_dict(orient='records')
return serialize_and_submit_json(obj)
# @app.route('/get_histogram_comparison', methods=['GET'])
# def get_histogram_comparison():
# x = float(request.args.get('point_x'))
# y = float(request.args.get('point_y'))
# max_distance = float(request.args.get('max_distance'))
# datasource = request.args.get('datasource')
# channels = []
# if request.args.get('channels') != '':
# channels = request.args.get('channels').split()[0].split(',')
# resp = image_similarity.histogramComparison(x, y, datasource, max_distance, channels)
# return serialize_and_submit_json(resp)
@app.route('/histogram_comparison', methods=['GET'])
def histogram_comparison():
x = float(request.args.get('point_x'))
y = float(request.args.get('point_y'))
max_distance = float(request.args.get('max_distance'))
datasource = request.args.get('datasource')
viewport = request.args.getlist('viewport')[0]
zoomlevel = int(float(request.args.get('zoomlevel')))
sensitivity = float(request.args.get('sensitivity'))
# for which channels to compute? (currently only the first)
channels = []
if request.args.get('channels') != '':
channels = request.args.get('channels').split()[0].split(',')
# call functionality
resp = comparison.histogramComparison(x, y, datasource, max_distance, channels, viewport, zoomlevel, sensitivity)
return serialize_and_submit_json(resp)
@app.route('/histogram_comparison_simmap', methods=['GET'])
def histogram_comparison_simmap():
x = float(request.args.get('point_x'))
y = float(request.args.get('point_y'))
max_distance = float(request.args.get('max_distance'))
datasource = request.args.get('datasource')
viewport = request.args.getlist('viewport')[0]
zoomlevel = int(float(request.args.get('zoomlevel')))
sensitivity = float(request.args.get('sensitivity'))
# for which channels to compute? (currently only the first)
channels = []
if request.args.get('channels') != '':
channels = request.args.get('channels').split()[0].split(',')
# call functionality
resp = comparison.histogramComparisonSimMap(x, y, datasource, max_distance, channels, viewport, zoomlevel,
sensitivity)
# file_object = io.BytesIO()
# # write PNG in file-object
# Image.fromarray(png).save(file_object, 'PNG', compress_level=0)
# # move to beginning of file so `send_file()` it will read from start
# file_object.seek(0)
return serialize_and_submit_json(resp)
@app.route('/save_dot', methods=['POST'])
def save_dot():
post_data = json.loads(request.data)
datasource = post_data['datasource']
dot = post_data['dot']
resp = data_model.save_dot(datasource, dot)
return serialize_and_submit_json(resp)
@app.route('/load_dots', methods=['GET'])
def load_dots():
datasource = request.args.get('datasource')
dots = data_model.load_dots(datasource)
dots_dict = [to_dict(dot) for dot in dots]
return serialize_and_submit_json(dots_dict)
@app.route('/delete_dot', methods=['GET'])
def delete_dot():
datasource = request.args.get('datasource')
id = int(request.args.get('id'))
dots = data_model.delete_dot(datasource, id)
return serialize_and_submit_json(True)
def to_dict(row):
return {column.name: getattr(row, row.__mapper__.get_property_by_column(column).key) for column in
row.__table__.columns}
# E.G /generated/data/melanoma/channel_00_files/13/16_18.png
@app.route('/generated/data/<string:datasource>/<string:channel>/<string:level>/<string:tile>')
def generate_png(datasource, channel, level, tile):
png = data_model.generate_zarr_png(datasource, channel, level, tile)
file_object = io.BytesIO()
# write PNG in file-object
Image.fromarray(png).save(file_object, 'PNG', compress_level=0)
# move to beginning of file so `send_file()` it will read from start
file_object.seek(0)
return send_file(file_object, mimetype='image/PNG')
@app.route('/start_spatial_correlation')
def start_spatial_correlation():
data_model.spatial_corr([])
return 'hi'
def serialize_and_submit_json(data):
response = app.response_class(
response=orjson.dumps(data, option=orjson.OPT_SERIALIZE_NUMPY),
mimetype='application/json'
)
return response
| 35.488166
| 117
| 0.710796
|
4a017f335a3783579b0d44a2c838028ba28d6b3f
| 10,016
|
py
|
Python
|
s2cnn/soft/s2_fft.py
|
Archer-Tatsu/s2cnn
|
db9f816335de695f63b462578748f69364695d2d
|
[
"MIT"
] | null | null | null |
s2cnn/soft/s2_fft.py
|
Archer-Tatsu/s2cnn
|
db9f816335de695f63b462578748f69364695d2d
|
[
"MIT"
] | null | null | null |
s2cnn/soft/s2_fft.py
|
Archer-Tatsu/s2cnn
|
db9f816335de695f63b462578748f69364695d2d
|
[
"MIT"
] | null | null | null |
# pylint: disable=R,C,E1101
from functools import lru_cache
import torch
import torch.cuda
from string import Template
from s2cnn.utils.decorator import cached_dirpklgz
# inspired by https://gist.github.com/szagoruyko/89f83b6f5f4833d3c8adf81ee49f22a8
def s2_fft(x, for_grad=False, b_out=None):
'''
:param x: [..., beta, alpha, complex]
:return: [l * m, ..., complex]
'''
assert x.size(-1) == 2
b_in = x.size(-2) // 2
assert x.size(-2) == 2 * b_in
assert x.size(-3) == 2 * b_in
if b_out is None:
b_out = b_in
assert b_out <= b_in
batch_size = x.size()[:-3]
x = x.view(-1, 2 * b_in, 2 * b_in, 2) # [batch, beta, alpha, complex]
output = _s2_fft(x, for_grad=for_grad, b_in=b_in, b_out=b_out) # [l * m, batch, complex]
output = output.view(-1, *batch_size, 2) # [l * m, ..., complex] (nspec, ..., 2)
return output
def _s2_fft(x, for_grad, b_in, b_out):
'''
:param x: [batch, beta, alpha, complex] (nbatch, 2 * b_in, 2 * b_in, 2)
:return: [l * m, batch, complex] (b_out**2, nbatch, 2)
'''
nspec = b_out ** 2
nbatch = x.size(0)
wigner = _setup_wigner(b_in, nl=b_out, weighted=not for_grad, device_type=x.device.type,
device_index=x.device.index)
wigner = wigner.view(2 * b_in, -1) # [beta, l * m] (2 * b_in, nspec)
x = torch.fft(x, 1) # [batch, beta, m, complex]
output = x.new_empty((nspec, nbatch, 2))
if x.is_cuda and x.dtype == torch.float32:
import s2cnn.utils.cuda as cuda_utils
device = torch.cuda.current_device()
cuda_kernel = _setup_s2fft_cuda_kernel(b=b_in, nspec=nspec, nbatch=nbatch, device=device)
stream = cuda_utils.Stream(ptr=torch.cuda.current_stream().cuda_stream)
cuda_kernel(block=(1024, 1, 1),
grid=(cuda_utils.get_blocks(nspec * nbatch, 1024), 1, 1),
args=[x.contiguous().data_ptr(), wigner.contiguous().data_ptr(), output.data_ptr()],
stream=stream)
# [l * m, batch, complex]
else:
for l in range(b_out):
s = slice(l ** 2, l ** 2 + 2 * l + 1)
xx = torch.cat((x[:, :, -l:], x[:, :, :l + 1]), dim=2) if l > 0 else x[:, :, :1]
output[s] = torch.einsum("bm,zbmc->mzc", (wigner[:, s], xx))
return output
def s2_ifft(x, for_grad=False, b_out=None):
'''
:param x: [l * m, ..., complex]
'''
assert x.size(-1) == 2
nspec = x.size(0)
b_in = round(nspec ** 0.5)
assert nspec == b_in ** 2
if b_out is None:
b_out = b_in
assert b_out >= b_in
batch_size = x.size()[1:-1]
x = x.view(nspec, -1, 2) # [l * m, batch, complex] (nspec, nbatch, 2)
output = _s2_ifft(x, for_grad=for_grad, b_in=b_in, b_out=b_out) # [batch, beta, alpha, complex]
output = output.view(*batch_size, 2 * b_out, 2 * b_out, 2)
return output
def _s2_ifft(x, for_grad, b_in, b_out):
'''
:param x: [l * m, batch, complex] (b_in**2, nbatch, 2)
:return: [batch, beta, alpha, complex] (nbatch, 2 b_out, 2 * b_out, 2)
'''
nbatch = x.size(1)
wigner = _setup_wigner(b_out, nl=b_in, weighted=for_grad, device_type=x.device.type, device_index=x.device.index)
wigner = wigner.view(2 * b_out, -1) # [beta, l * m] (2 * b_out, nspec)
if x.is_cuda and x.dtype == torch.float32:
import s2cnn.utils.cuda as cuda_utils
device = torch.cuda.current_device()
cuda_kernel = _setup_s2ifft_cuda_kernel(b=b_out, nl=b_in, nbatch=nbatch, device=device)
stream = cuda_utils.Stream(ptr=torch.cuda.current_stream().cuda_stream)
output = x.new_empty((nbatch, 2 * b_out, 2 * b_out, 2))
cuda_kernel(block=(1024, 1, 1),
grid=(cuda_utils.get_blocks(nbatch * (2 * b_out) ** 2, 1024), 1, 1),
args=[x.data_ptr(), wigner.data_ptr(), output.data_ptr()],
stream=stream)
# [batch, beta, m, complex] (nbatch, 2 * b_out, 2 * b_out, 2)
else:
output = x.new_zeros((nbatch, 2 * b_out, 2 * b_out, 2))
for l in range(b_in):
s = slice(l ** 2, l ** 2 + 2 * l + 1)
out = torch.einsum("mzc,bm->zbmc", (x[s], wigner[:, s]))
output[:, :, :l + 1] += out[:, :, -l - 1:]
if l > 0:
output[:, :, -l:] += out[:, :, :l]
output = torch.ifft(output, 1) * output.size(-2) # [batch, beta, alpha, complex]
return output
@lru_cache(maxsize=32)
def _setup_wigner(b, nl, weighted, device_type, device_index):
dss = _setup_s2_fft(b, nl, weighted)
dss = torch.tensor(dss, dtype=torch.float32,
device=torch.device(device_type, device_index)) # [beta, l * m] # pylint: disable=E1102
return dss.contiguous()
@cached_dirpklgz("cache/setup_s2_fft")
def _setup_s2_fft(b, nl, weighted):
from lie_learn.representations.SO3.wigner_d import wigner_d_matrix
import lie_learn.spaces.S3 as S3
import numpy as np
import logging
betas = (np.arange(2 * b) + 0.5) / (2 * b) * np.pi
w = S3.quadrature_weights(b) * 2 * b
assert len(w) == len(betas)
logging.getLogger("trainer").info("Compute Wigner (only columns): b=%d nbeta=%d nl=%d nspec=%d", b, len(betas), nl,
nl ** 2)
dss = []
for b, beta in enumerate(betas):
ds = []
for l in range(nl):
d = wigner_d_matrix(l, beta,
field='complex', normalization='quantum', order='centered', condon_shortley='cs')
d = d[:, l] # d[m=:, n=0]
if weighted:
d *= w[b]
else:
d *= 2 * l + 1
ds.append(d) # [m]
dss.append(np.concatenate(ds)) # [l * m]
dss = np.stack(dss) # [beta, l * m]
return dss
@lru_cache(maxsize=32)
def _setup_s2fft_cuda_kernel(b, nspec, nbatch, device=0):
kernel = Template('''
#define COMPUTE_LM(s) \
int l = powf(s, 0.5); \
int m = (s - l * l) - l;
#define MOD(i, n) (((i) + (n)) % (n))
extern "C"
__global__ void main_(const float* in, const float* wig, float* out) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < ${nspec} * ${nbatch}; index += blockDim.x * gridDim.x) {
int i = index % ${nbatch}; // batch index
int s = index / ${nbatch}; // spectral index
// compute s -> (l,m)
COMPUTE_LM(s)
float out_re = 0.0;
float out_im = 0.0;
for (int beta = 0; beta < 2 * ${b}; ++beta) {
float in_re = in[((i * 2 * ${b} + beta) * 2 * ${b} + MOD(m, 2 * ${b})) * 2 + 0];
float in_im = in[((i * 2 * ${b} + beta) * 2 * ${b} + MOD(m, 2 * ${b})) * 2 + 1];
float w = wig[beta * ${nspec} + s];
out_re += w * in_re;
out_im += w * in_im;
}
out[index * 2 + 0] = out_re;
out[index * 2 + 1] = out_im;
}
}
''').substitute({'b': b, 'nbatch': nbatch, 'nspec': nspec})
import s2cnn.utils.cuda as cuda_utils
return cuda_utils.compile_kernel(kernel, b's2fft.cu', 'main_')
@lru_cache(maxsize=32)
def _setup_s2ifft_cuda_kernel(b, nl, nbatch, device=0):
kernel = Template('''
extern "C"
__global__ void main_(const float* in, const float* wig, float* out) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < ${nbatch} * 2 * ${b} * 2 * ${b}; index += blockDim.x * gridDim.x) {
int i = index / (2 * ${b} * 2 * ${b}); // batch index
int beta = (index / (2 * ${b})) % (2 * ${b});
int m = index % (2 * ${b});
// from 0,1,2, 3, 4 or 0,1,2, 3, 4, 5
// to 0,1,2,-2,-1 or 0,1,2,-3,-2,-1
int mm = m <= (2 * ${b} - 1) / 2 ? m : m - 2 * ${b};
float out_re = 0.0;
float out_im = 0.0;
for (int l = abs(mm); l < ${nl}; ++l) {
int s = l * l + (l + mm);
float in_re = in[(s * ${nbatch} + i) * 2 + 0];
float in_im = in[(s * ${nbatch} + i) * 2 + 1];
float w = wig[beta * ${nspec} + s];
out_re += in_re * w;
out_im += in_im * w;
}
out[index * 2 + 0] = out_re;
out[index * 2 + 1] = out_im;
}
}
''').substitute({'b': b, 'nbatch': nbatch, 'nl': nl, 'nspec': nl ** 2})
import s2cnn.utils.cuda as cuda_utils
return cuda_utils.compile_kernel(kernel, b's2ifft.cu', 'main_')
class S2_fft_real(torch.autograd.Function):
def __init__(self, b_out=None):
super(S2_fft_real, self).__init__()
self.b_in = None
self.b_out = b_out
def forward(self, x): # pylint: disable=W
from s2cnn.utils.complex import as_complex
self.b_in = x.size(-1) // 2
return s2_fft(as_complex(x), b_out=self.b_out)
def backward(self, grad_output): # pylint: disable=W
return s2_ifft(grad_output, for_grad=True, b_out=self.b_in)[..., 0]
class S2_ifft_real(torch.autograd.Function):
def __init__(self, b_out=None):
super(S2_ifft_real, self).__init__()
self.b_in = None
self.b_out = b_out
def forward(self, x): # pylint: disable=W
nspec = x.size(0)
self.b_in = round(nspec ** 0.5)
return s2_ifft(x, b_out=self.b_out)[..., 0]
def backward(self, grad_output): # pylint: disable=W
from s2cnn.utils.complex import as_complex
return s2_fft(as_complex(grad_output), for_grad=True, b_out=self.b_in)
def test_s2fft_cuda_cpu():
x = torch.rand(1, 2, 12, 12, 2) # [..., beta, alpha, complex]
z1 = s2_fft(x, b_out=5)
z2 = s2_fft(x.cuda(), b_out=5).cpu()
q = (z1 - z2).abs().max().item() / z1.std().item()
print(q)
assert q < 1e-4
def test_s2ifft_cuda_cpu():
x = torch.rand(12 ** 2, 10, 2) # [l * m, ..., complex]
z1 = s2_ifft(x, b_out=13)
z2 = s2_ifft(x.cuda(), b_out=13).cpu()
q = (z1 - z2).abs().max().item() / z1.std().item()
print(q)
assert q < 1e-4
if __name__ == "__main__":
test_s2fft_cuda_cpu()
test_s2ifft_cuda_cpu()
| 34.068027
| 135
| 0.547524
|
4a017f70821b44348c07886961f6f31a3a328e20
| 93,208
|
py
|
Python
|
sympy/utilities/tests/test_wester.py
|
ricopicone/sympy
|
de27c97214d540247a35c8215c7920e9a46b54ed
|
[
"BSD-3-Clause"
] | 2
|
2019-02-05T19:20:24.000Z
|
2019-04-23T13:24:38.000Z
|
sympy/utilities/tests/test_wester.py
|
ricopicone/sympy
|
de27c97214d540247a35c8215c7920e9a46b54ed
|
[
"BSD-3-Clause"
] | null | null | null |
sympy/utilities/tests/test_wester.py
|
ricopicone/sympy
|
de27c97214d540247a35c8215c7920e9a46b54ed
|
[
"BSD-3-Clause"
] | 1
|
2019-10-15T10:55:42.000Z
|
2019-10-15T10:55:42.000Z
|
""" Tests from Michael Wester's 1999 paper "Review of CAS mathematical
capabilities".
http://www.math.unm.edu/~wester/cas/book/Wester.pdf
See also http://math.unm.edu/~wester/cas_review.html for detailed output of
each tested system.
"""
from sympy import (Rational, symbols, Dummy, factorial, sqrt, log, exp, oo, zoo,
product, binomial, rf, pi, gamma, igcd, factorint, radsimp, combsimp,
npartitions, totient, primerange, factor, simplify, gcd, resultant, expand,
I, trigsimp, tan, sin, cos, cot, diff, nan, limit, EulerGamma, polygamma,
bernoulli, hyper, hyperexpand, besselj, asin, assoc_legendre, Function, re,
im, DiracDelta, chebyshevt, legendre_poly, polylog, series, O,
atan, sinh, cosh, tanh, floor, ceiling, solve, asinh, acot, csc, sec,
LambertW, N, apart, sqrtdenest, factorial2, powdenest, Mul, S, ZZ,
Poly, expand_func, E, Q, And, Or, Ne, Eq, Le, Lt, Min,
ask, refine, AlgebraicNumber, continued_fraction_iterator as cf_i,
continued_fraction_periodic as cf_p, continued_fraction_convergents as cf_c,
continued_fraction_reduce as cf_r, FiniteSet, elliptic_e, elliptic_f,
powsimp, hessian, wronskian, fibonacci, sign, Lambda, Piecewise, Subs,
residue, Derivative, logcombine, Symbol, Intersection, Union,
EmptySet, Interval, Integral, idiff, ImageSet, acos, Max, MatMul, conjugate)
import mpmath
from sympy.functions.combinatorial.numbers import stirling
from sympy.functions.special.delta_functions import Heaviside
from sympy.functions.special.error_functions import Ci, Si, erf
from sympy.functions.special.zeta_functions import zeta
from sympy.integrals.deltafunctions import deltaintegrate
from sympy.utilities.pytest import XFAIL, slow, SKIP, skip, ON_TRAVIS
from sympy.utilities.iterables import partitions
from mpmath import mpi, mpc
from sympy.matrices import Matrix, GramSchmidt, eye
from sympy.matrices.expressions.blockmatrix import BlockMatrix, block_collapse
from sympy.matrices.expressions import MatrixSymbol, ZeroMatrix
from sympy.physics.quantum import Commutator
from sympy.assumptions import assuming
from sympy.polys.rings import vring
from sympy.polys.fields import vfield
from sympy.polys.solvers import solve_lin_sys
from sympy.concrete import Sum
from sympy.concrete.products import Product
from sympy.integrals import integrate
from sympy.integrals.transforms import laplace_transform,\
inverse_laplace_transform, LaplaceTransform, fourier_transform,\
mellin_transform
from sympy.solvers.recurr import rsolve
from sympy.solvers.solveset import solveset, solveset_real, linsolve
from sympy.solvers.ode import dsolve
from sympy.core.relational import Equality
from sympy.core.compatibility import range, PY3
from itertools import islice, takewhile
from sympy.series.formal import fps
from sympy.series.fourier import fourier_series
from sympy.calculus.util import minimum
R = Rational
x, y, z = symbols('x y z')
i, j, k, l, m, n = symbols('i j k l m n', integer=True)
f = Function('f')
g = Function('g')
# A. Boolean Logic and Quantifier Elimination
# Not implemented.
# B. Set Theory
def test_B1():
assert (FiniteSet(i, j, j, k, k, k) | FiniteSet(l, k, j) |
FiniteSet(j, m, j)) == FiniteSet(i, j, k, l, m)
def test_B2():
assert (FiniteSet(i, j, j, k, k, k) & FiniteSet(l, k, j) &
FiniteSet(j, m, j)) == Intersection({j, m}, {i, j, k}, {j, k, l})
# Previous output below. Not sure why that should be the expected output.
# There should probably be a way to rewrite Intersections that way but I
# don't see why an Intersection should evaluate like that:
#
# == Union({j}, Intersection({m}, Union({j, k}, Intersection({i}, {l}))))
def test_B3():
assert (FiniteSet(i, j, k, l, m) - FiniteSet(j) ==
FiniteSet(i, k, l, m))
def test_B4():
assert (FiniteSet(*(FiniteSet(i, j)*FiniteSet(k, l))) ==
FiniteSet((i, k), (i, l), (j, k), (j, l)))
# C. Numbers
def test_C1():
assert (factorial(50) ==
30414093201713378043612608166064768844377641568960512000000000000)
def test_C2():
assert (factorint(factorial(50)) == {2: 47, 3: 22, 5: 12, 7: 8,
11: 4, 13: 3, 17: 2, 19: 2, 23: 2, 29: 1, 31: 1, 37: 1,
41: 1, 43: 1, 47: 1})
def test_C3():
assert (factorial2(10), factorial2(9)) == (3840, 945)
# Base conversions; not really implemented by sympy
# Whatever. Take credit!
def test_C4():
assert 0xABC == 2748
def test_C5():
assert 123 == int('234', 7)
def test_C6():
assert int('677', 8) == int('1BF', 16) == 447
def test_C7():
assert log(32768, 8) == 5
def test_C8():
# Modular multiplicative inverse. Would be nice if divmod could do this.
assert ZZ.invert(5, 7) == 3
assert ZZ.invert(5, 6) == 5
def test_C9():
assert igcd(igcd(1776, 1554), 5698) == 74
def test_C10():
x = 0
for n in range(2, 11):
x += R(1, n)
assert x == R(4861, 2520)
def test_C11():
assert R(1, 7) == S('0.[142857]')
def test_C12():
assert R(7, 11) * R(22, 7) == 2
def test_C13():
test = R(10, 7) * (1 + R(29, 1000)) ** R(1, 3)
good = 3 ** R(1, 3)
assert test == good
def test_C14():
assert sqrtdenest(sqrt(2*sqrt(3) + 4)) == 1 + sqrt(3)
def test_C15():
test = sqrtdenest(sqrt(14 + 3*sqrt(3 + 2*sqrt(5 - 12*sqrt(3 - 2*sqrt(2))))))
good = sqrt(2) + 3
assert test == good
def test_C16():
test = sqrtdenest(sqrt(10 + 2*sqrt(6) + 2*sqrt(10) + 2*sqrt(15)))
good = sqrt(2) + sqrt(3) + sqrt(5)
assert test == good
def test_C17():
test = radsimp((sqrt(3) + sqrt(2)) / (sqrt(3) - sqrt(2)))
good = 5 + 2*sqrt(6)
assert test == good
def test_C18():
assert simplify((sqrt(-2 + sqrt(-5)) * sqrt(-2 - sqrt(-5))).expand(complex=True)) == 3
@XFAIL
def test_C19():
assert radsimp(simplify((90 + 34*sqrt(7)) ** R(1, 3))) == 3 + sqrt(7)
def test_C20():
inside = (135 + 78*sqrt(3))
test = AlgebraicNumber((inside**R(2, 3) + 3) * sqrt(3) / inside**R(1, 3))
assert simplify(test) == AlgebraicNumber(12)
def test_C21():
assert simplify(AlgebraicNumber((41 + 29*sqrt(2)) ** R(1, 5))) == \
AlgebraicNumber(1 + sqrt(2))
@XFAIL
def test_C22():
test = simplify(((6 - 4*sqrt(2))*log(3 - 2*sqrt(2)) + (3 - 2*sqrt(2))*log(17
- 12*sqrt(2)) + 32 - 24*sqrt(2)) / (48*sqrt(2) - 72))
good = sqrt(2)/3 - log(sqrt(2) - 1)/3
assert test == good
def test_C23():
assert 2 * oo - 3 is oo
@XFAIL
def test_C24():
raise NotImplementedError("2**aleph_null == aleph_1")
# D. Numerical Analysis
def test_D1():
assert 0.0 / sqrt(2) == 0.0
def test_D2():
assert str(exp(-1000000).evalf()) == '3.29683147808856e-434295'
def test_D3():
assert exp(pi*sqrt(163)).evalf(50).num.ae(262537412640768744)
def test_D4():
assert floor(R(-5, 3)) == -2
assert ceiling(R(-5, 3)) == -1
@XFAIL
def test_D5():
raise NotImplementedError("cubic_spline([1, 2, 4, 5], [1, 4, 2, 3], x)(3) == 27/8")
@XFAIL
def test_D6():
raise NotImplementedError("translate sum(a[i]*x**i, (i,1,n)) to FORTRAN")
@XFAIL
def test_D7():
raise NotImplementedError("translate sum(a[i]*x**i, (i,1,n)) to C")
@XFAIL
def test_D8():
# One way is to cheat by converting the sum to a string,
# and replacing the '[' and ']' with ''.
# E.g., horner(S(str(_).replace('[','').replace(']','')))
raise NotImplementedError("apply Horner's rule to sum(a[i]*x**i, (i,1,5))")
@XFAIL
def test_D9():
raise NotImplementedError("translate D8 to FORTRAN")
@XFAIL
def test_D10():
raise NotImplementedError("translate D8 to C")
@XFAIL
def test_D11():
#Is there a way to use count_ops?
raise NotImplementedError("flops(sum(product(f[i][k], (i,1,k)), (k,1,n)))")
@XFAIL
def test_D12():
assert (mpi(-4, 2) * x + mpi(1, 3)) ** 2 == mpi(-8, 16)*x**2 + mpi(-24, 12)*x + mpi(1, 9)
@XFAIL
def test_D13():
raise NotImplementedError("discretize a PDE: diff(f(x,t),t) == diff(diff(f(x,t),x),x)")
# E. Statistics
# See scipy; all of this is numerical.
# F. Combinatorial Theory.
def test_F1():
assert rf(x, 3) == x*(1 + x)*(2 + x)
def test_F2():
assert expand_func(binomial(n, 3)) == n*(n - 1)*(n - 2)/6
@XFAIL
def test_F3():
assert combsimp(2**n * factorial(n) * factorial2(2*n - 1)) == factorial(2*n)
@XFAIL
def test_F4():
assert combsimp((2**n * factorial(n) * product(2*k - 1, (k, 1, n)))) == factorial(2*n)
@XFAIL
def test_F5():
assert gamma(n + R(1, 2)) / sqrt(pi) / factorial(n) == factorial(2*n)/2**(2*n)/factorial(n)**2
def test_F6():
partTest = [p.copy() for p in partitions(4)]
partDesired = [{4: 1}, {1: 1, 3: 1}, {2: 2}, {1: 2, 2:1}, {1: 4}]
assert partTest == partDesired
def test_F7():
assert npartitions(4) == 5
def test_F8():
assert stirling(5, 2, signed=True) == -50 # if signed, then kind=1
def test_F9():
assert totient(1776) == 576
# G. Number Theory
def test_G1():
assert list(primerange(999983, 1000004)) == [999983, 1000003]
@XFAIL
def test_G2():
raise NotImplementedError("find the primitive root of 191 == 19")
@XFAIL
def test_G3():
raise NotImplementedError("(a+b)**p mod p == a**p + b**p mod p; p prime")
# ... G14 Modular equations are not implemented.
def test_G15():
assert Rational(sqrt(3).evalf()).limit_denominator(15) == R(26, 15)
assert list(takewhile(lambda x: x.q <= 15, cf_c(cf_i(sqrt(3)))))[-1] == \
R(26, 15)
def test_G16():
assert list(islice(cf_i(pi),10)) == [3, 7, 15, 1, 292, 1, 1, 1, 2, 1]
def test_G17():
assert cf_p(0, 1, 23) == [4, [1, 3, 1, 8]]
def test_G18():
assert cf_p(1, 2, 5) == [[1]]
assert cf_r([[1]]).expand() == S.Half + sqrt(5)/2
@XFAIL
def test_G19():
s = symbols('s', integer=True, positive=True)
it = cf_i((exp(1/s) - 1)/(exp(1/s) + 1))
assert list(islice(it, 5)) == [0, 2*s, 6*s, 10*s, 14*s]
def test_G20():
s = symbols('s', integer=True, positive=True)
# Wester erroneously has this as -s + sqrt(s**2 + 1)
assert cf_r([[2*s]]) == s + sqrt(s**2 + 1)
@XFAIL
def test_G20b():
s = symbols('s', integer=True, positive=True)
assert cf_p(s, 1, s**2 + 1) == [[2*s]]
# H. Algebra
def test_H1():
assert simplify(2*2**n) == simplify(2**(n + 1))
assert powdenest(2*2**n) == simplify(2**(n + 1))
def test_H2():
assert powsimp(4 * 2**n) == 2**(n + 2)
def test_H3():
assert (-1)**(n*(n + 1)) == 1
def test_H4():
expr = factor(6*x - 10)
assert type(expr) is Mul
assert expr.args[0] == 2
assert expr.args[1] == 3*x - 5
p1 = 64*x**34 - 21*x**47 - 126*x**8 - 46*x**5 - 16*x**60 - 81
p2 = 72*x**60 - 25*x**25 - 19*x**23 - 22*x**39 - 83*x**52 + 54*x**10 + 81
q = 34*x**19 - 25*x**16 + 70*x**7 + 20*x**3 - 91*x - 86
def test_H5():
assert gcd(p1, p2, x) == 1
def test_H6():
assert gcd(expand(p1 * q), expand(p2 * q)) == q
def test_H7():
p1 = 24*x*y**19*z**8 - 47*x**17*y**5*z**8 + 6*x**15*y**9*z**2 - 3*x**22 + 5
p2 = 34*x**5*y**8*z**13 + 20*x**7*y**7*z**7 + 12*x**9*y**16*z**4 + 80*y**14*z
assert gcd(p1, p2, x, y, z) == 1
def test_H8():
p1 = 24*x*y**19*z**8 - 47*x**17*y**5*z**8 + 6*x**15*y**9*z**2 - 3*x**22 + 5
p2 = 34*x**5*y**8*z**13 + 20*x**7*y**7*z**7 + 12*x**9*y**16*z**4 + 80*y**14*z
q = 11*x**12*y**7*z**13 - 23*x**2*y**8*z**10 + 47*x**17*y**5*z**8
assert gcd(p1 * q, p2 * q, x, y, z) == q
def test_H9():
p1 = 2*x**(n + 4) - x**(n + 2)
p2 = 4*x**(n + 1) + 3*x**n
assert gcd(p1, p2) == x**n
def test_H10():
p1 = 3*x**4 + 3*x**3 + x**2 - x - 2
p2 = x**3 - 3*x**2 + x + 5
assert resultant(p1, p2, x) == 0
def test_H11():
assert resultant(p1 * q, p2 * q, x) == 0
def test_H12():
num = x**2 - 4
den = x**2 + 4*x + 4
assert simplify(num/den) == (x - 2)/(x + 2)
@XFAIL
def test_H13():
assert simplify((exp(x) - 1) / (exp(x/2) + 1)) == exp(x/2) - 1
def test_H14():
p = (x + 1) ** 20
ep = expand(p)
assert ep == (1 + 20*x + 190*x**2 + 1140*x**3 + 4845*x**4 + 15504*x**5
+ 38760*x**6 + 77520*x**7 + 125970*x**8 + 167960*x**9 + 184756*x**10
+ 167960*x**11 + 125970*x**12 + 77520*x**13 + 38760*x**14 + 15504*x**15
+ 4845*x**16 + 1140*x**17 + 190*x**18 + 20*x**19 + x**20)
dep = diff(ep, x)
assert dep == (20 + 380*x + 3420*x**2 + 19380*x**3 + 77520*x**4
+ 232560*x**5 + 542640*x**6 + 1007760*x**7 + 1511640*x**8 + 1847560*x**9
+ 1847560*x**10 + 1511640*x**11 + 1007760*x**12 + 542640*x**13
+ 232560*x**14 + 77520*x**15 + 19380*x**16 + 3420*x**17 + 380*x**18
+ 20*x**19)
assert factor(dep) == 20*(1 + x)**19
def test_H15():
assert simplify((Mul(*[x - r for r in solveset(x**3 + x**2 - 7)]))) == x**3 + x**2 - 7
def test_H16():
assert factor(x**100 - 1) == ((x - 1)*(x + 1)*(x**2 + 1)*(x**4 - x**3
+ x**2 - x + 1)*(x**4 + x**3 + x**2 + x + 1)*(x**8 - x**6 + x**4
- x**2 + 1)*(x**20 - x**15 + x**10 - x**5 + 1)*(x**20 + x**15 + x**10
+ x**5 + 1)*(x**40 - x**30 + x**20 - x**10 + 1))
def test_H17():
assert simplify(factor(expand(p1 * p2)) - p1*p2) == 0
@XFAIL
def test_H18():
# Factor over complex rationals.
test = factor(4*x**4 + 8*x**3 + 77*x**2 + 18*x + 153)
good = (2*x + 3*I)*(2*x - 3*I)*(x + 1 - 4*I)*(x + 1 + 4*I)
assert test == good
def test_H19():
a = symbols('a')
# The idea is to let a**2 == 2, then solve 1/(a-1). Answer is a+1")
assert Poly(a - 1).invert(Poly(a**2 - 2)) == a + 1
@XFAIL
def test_H20():
raise NotImplementedError("let a**2==2; (x**3 + (a-2)*x**2 - "
+ "(2*a+3)*x - 3*a) / (x**2-2) = (x**2 - 2*x - 3) / (x-a)")
@XFAIL
def test_H21():
raise NotImplementedError("evaluate (b+c)**4 assuming b**3==2, c**2==3. \
Answer is 2*b + 8*c + 18*b**2 + 12*b*c + 9")
def test_H22():
assert factor(x**4 - 3*x**2 + 1, modulus=5) == (x - 2)**2 * (x + 2)**2
def test_H23():
f = x**11 + x + 1
g = (x**2 + x + 1) * (x**9 - x**8 + x**6 - x**5 + x**3 - x**2 + 1)
assert factor(f, modulus=65537) == g
def test_H24():
phi = AlgebraicNumber(S.GoldenRatio.expand(func=True), alias='phi')
assert factor(x**4 - 3*x**2 + 1, extension=phi) == \
(x - phi)*(x + 1 - phi)*(x - 1 + phi)*(x + phi)
def test_H25():
e = (x - 2*y**2 + 3*z**3) ** 20
assert factor(expand(e)) == e
def test_H26():
g = expand((sin(x) - 2*cos(y)**2 + 3*tan(z)**3)**20)
assert factor(g, expand=False) == (-sin(x) + 2*cos(y)**2 - 3*tan(z)**3)**20
def test_H27():
f = 24*x*y**19*z**8 - 47*x**17*y**5*z**8 + 6*x**15*y**9*z**2 - 3*x**22 + 5
g = 34*x**5*y**8*z**13 + 20*x**7*y**7*z**7 + 12*x**9*y**16*z**4 + 80*y**14*z
h = -2*z*y**7 \
*(6*x**9*y**9*z**3 + 10*x**7*z**6 + 17*y*x**5*z**12 + 40*y**7) \
*(3*x**22 + 47*x**17*y**5*z**8 - 6*x**15*y**9*z**2 - 24*x*y**19*z**8 - 5)
assert factor(expand(f*g)) == h
@XFAIL
def test_H28():
raise NotImplementedError("expand ((1 - c**2)**5 * (1 - s**2)**5 * "
+ "(c**2 + s**2)**10) with c**2 + s**2 = 1. Answer is c**10*s**10.")
@XFAIL
def test_H29():
assert factor(4*x**2 - 21*x*y + 20*y**2, modulus=3) == (x + y)*(x - y)
def test_H30():
test = factor(x**3 + y**3, extension=sqrt(-3))
answer = (x + y)*(x + y*(-R(1, 2) - sqrt(3)/2*I))*(x + y*(-R(1, 2) + sqrt(3)/2*I))
assert answer == test
def test_H31():
f = (x**2 + 2*x + 3)/(x**3 + 4*x**2 + 5*x + 2)
g = 2 / (x + 1)**2 - 2 / (x + 1) + 3 / (x + 2)
assert apart(f) == g
@XFAIL
def test_H32(): # issue 6558
raise NotImplementedError("[A*B*C - (A*B*C)**(-1)]*A*C*B (product \
of a non-commuting product and its inverse)")
def test_H33():
A, B, C = symbols('A, B, C', commutative=False)
assert (Commutator(A, Commutator(B, C))
+ Commutator(B, Commutator(C, A))
+ Commutator(C, Commutator(A, B))).doit().expand() == 0
# I. Trigonometry
def test_I1():
assert tan(pi*R(7, 10)) == -sqrt(1 + 2/sqrt(5))
@XFAIL
def test_I2():
assert sqrt((1 + cos(6))/2) == -cos(3)
def test_I3():
assert cos(n*pi) + sin((4*n - 1)*pi/2) == (-1)**n - 1
def test_I4():
assert refine(cos(pi*cos(n*pi)) + sin(pi/2*cos(n*pi)), Q.integer(n)) == (-1)**n - 1
@XFAIL
def test_I5():
assert sin((n**5/5 + n**4/2 + n**3/3 - n/30) * pi) == 0
@XFAIL
def test_I6():
raise NotImplementedError("assuming -3*pi<x<-5*pi/2, abs(cos(x)) == -cos(x), abs(sin(x)) == -sin(x)")
@XFAIL
def test_I7():
assert cos(3*x)/cos(x) == cos(x)**2 - 3*sin(x)**2
@XFAIL
def test_I8():
assert cos(3*x)/cos(x) == 2*cos(2*x) - 1
@XFAIL
def test_I9():
# Supposed to do this with rewrite rules.
assert cos(3*x)/cos(x) == cos(x)**2 - 3*sin(x)**2
def test_I10():
assert trigsimp((tan(x)**2 + 1 - cos(x)**-2) / (sin(x)**2 + cos(x)**2 - 1)) is nan
@SKIP("hangs")
@XFAIL
def test_I11():
assert limit((tan(x)**2 + 1 - cos(x)**-2) / (sin(x)**2 + cos(x)**2 - 1), x, 0) != 0
@XFAIL
def test_I12():
try:
# This should fail or return nan or something.
diff((tan(x)**2 + 1 - cos(x)**-2) / (sin(x)**2 + cos(x)**2 - 1), x)
except:
assert True
else:
assert False, "taking the derivative with a fraction equivalent to 0/0 should fail"
# J. Special functions.
def test_J1():
assert bernoulli(16) == R(-3617, 510)
def test_J2():
assert diff(elliptic_e(x, y**2), y) == (elliptic_e(x, y**2) - elliptic_f(x, y**2))/y
@XFAIL
def test_J3():
raise NotImplementedError("Jacobi elliptic functions: diff(dn(u,k), u) == -k**2*sn(u,k)*cn(u,k)")
def test_J4():
assert gamma(R(-1, 2)) == -2*sqrt(pi)
def test_J5():
assert polygamma(0, R(1, 3)) == -log(3) - sqrt(3)*pi/6 - EulerGamma - log(sqrt(3))
def test_J6():
assert mpmath.besselj(2, 1 + 1j).ae(mpc('0.04157988694396212', '0.24739764151330632'))
def test_J7():
assert simplify(besselj(R(-5,2), pi/2)) == 12/(pi**2)
def test_J8():
p = besselj(R(3,2), z)
q = (sin(z)/z - cos(z))/sqrt(pi*z/2)
assert simplify(expand_func(p) -q) == 0
def test_J9():
assert besselj(0, z).diff(z) == - besselj(1, z)
def test_J10():
mu, nu = symbols('mu, nu', integer=True)
assert assoc_legendre(nu, mu, 0) == 2**mu*sqrt(pi)/gamma((nu - mu)/2 + 1)/gamma((-nu - mu + 1)/2)
def test_J11():
assert simplify(assoc_legendre(3, 1, x)) == simplify(-R(3, 2)*sqrt(1 - x**2)*(5*x**2 - 1))
@slow
def test_J12():
assert simplify(chebyshevt(1008, x) - 2*x*chebyshevt(1007, x) + chebyshevt(1006, x)) == 0
def test_J13():
a = symbols('a', integer=True, negative=False)
assert chebyshevt(a, -1) == (-1)**a
def test_J14():
p = hyper([S.Half, S.Half], [R(3, 2)], z**2)
assert hyperexpand(p) == asin(z)/z
@XFAIL
def test_J15():
raise NotImplementedError("F((n+2)/2,-(n-2)/2,R(3,2),sin(z)**2) == sin(n*z)/(n*sin(z)*cos(z)); F(.) is hypergeometric function")
@XFAIL
def test_J16():
raise NotImplementedError("diff(zeta(x), x) @ x=0 == -log(2*pi)/2")
def test_J17():
assert integrate(f((x + 2)/5)*DiracDelta((x - 2)/3) - g(x)*diff(DiracDelta(x - 1), x), (x, 0, 3)) == 3*f(R(4, 5)) + Subs(Derivative(g(x), x), x, 1)
@XFAIL
def test_J18():
raise NotImplementedError("define an antisymmetric function")
# K. The Complex Domain
def test_K1():
z1, z2 = symbols('z1, z2', complex=True)
assert re(z1 + I*z2) == -im(z2) + re(z1)
assert im(z1 + I*z2) == im(z1) + re(z2)
def test_K2():
assert abs(3 - sqrt(7) + I*sqrt(6*sqrt(7) - 15)) == 1
@XFAIL
def test_K3():
a, b = symbols('a, b', real=True)
assert simplify(abs(1/(a + I/a + I*b))) == 1/sqrt(a**2 + (I/a + b)**2)
def test_K4():
assert log(3 + 4*I).expand(complex=True) == log(5) + I*atan(R(4, 3))
def test_K5():
x, y = symbols('x, y', real=True)
assert tan(x + I*y).expand(complex=True) == (sin(2*x)/(cos(2*x) +
cosh(2*y)) + I*sinh(2*y)/(cos(2*x) + cosh(2*y)))
def test_K6():
assert sqrt(x*y*abs(z)**2)/(sqrt(x)*abs(z)) == sqrt(x*y)/sqrt(x)
assert sqrt(x*y*abs(z)**2)/(sqrt(x)*abs(z)) != sqrt(y)
def test_K7():
y = symbols('y', real=True, negative=False)
expr = sqrt(x*y*abs(z)**2)/(sqrt(x)*abs(z))
sexpr = simplify(expr)
assert sexpr == sqrt(y)
@XFAIL
def test_K8():
z = symbols('z', complex=True)
assert simplify(sqrt(1/z) - 1/sqrt(z)) != 0 # Passes
z = symbols('z', complex=True, negative=False)
assert simplify(sqrt(1/z) - 1/sqrt(z)) == 0 # Fails
def test_K9():
z = symbols('z', real=True, positive=True)
assert simplify(sqrt(1/z) - 1/sqrt(z)) == 0
def test_K10():
z = symbols('z', real=True, negative=True)
assert simplify(sqrt(1/z) + 1/sqrt(z)) == 0
# This goes up to K25
# L. Determining Zero Equivalence
def test_L1():
assert sqrt(997) - (997**3)**R(1, 6) == 0
def test_L2():
assert sqrt(999983) - (999983**3)**R(1, 6) == 0
def test_L3():
assert simplify((2**R(1, 3) + 4**R(1, 3))**3 - 6*(2**R(1, 3) + 4**R(1, 3)) - 6) == 0
def test_L4():
assert trigsimp(cos(x)**3 + cos(x)*sin(x)**2 - cos(x)) == 0
@XFAIL
def test_L5():
assert log(tan(R(1, 2)*x + pi/4)) - asinh(tan(x)) == 0
def test_L6():
assert (log(tan(x/2 + pi/4)) - asinh(tan(x))).diff(x).subs({x: 0}) == 0
@XFAIL
def test_L7():
assert simplify(log((2*sqrt(x) + 1)/(sqrt(4*x + 4*sqrt(x) + 1)))) == 0
@XFAIL
def test_L8():
assert simplify((4*x + 4*sqrt(x) + 1)**(sqrt(x)/(2*sqrt(x) + 1)) \
*(2*sqrt(x) + 1)**(1/(2*sqrt(x) + 1)) - 2*sqrt(x) - 1) == 0
@XFAIL
def test_L9():
z = symbols('z', complex=True)
assert simplify(2**(1 - z)*gamma(z)*zeta(z)*cos(z*pi/2) - pi**2*zeta(1 - z)) == 0
# M. Equations
@XFAIL
def test_M1():
assert Equality(x, 2)/2 + Equality(1, 1) == Equality(x/2 + 1, 2)
def test_M2():
# The roots of this equation should all be real. Note that this
# doesn't test that they are correct.
sol = solveset(3*x**3 - 18*x**2 + 33*x - 19, x)
assert all(s.expand(complex=True).is_real for s in sol)
@XFAIL
def test_M5():
assert solveset(x**6 - 9*x**4 - 4*x**3 + 27*x**2 - 36*x - 23, x) == FiniteSet(2**(1/3) + sqrt(3), 2**(1/3) - sqrt(3), +sqrt(3) - 1/2**(2/3) + I*sqrt(3)/2**(2/3), +sqrt(3) - 1/2**(2/3) - I*sqrt(3)/2**(2/3), -sqrt(3) - 1/2**(2/3) + I*sqrt(3)/2**(2/3), -sqrt(3) - 1/2**(2/3) - I*sqrt(3)/2**(2/3))
def test_M6():
assert set(solveset(x**7 - 1, x)) == \
{cos(n*pi*R(2, 7)) + I*sin(n*pi*R(2, 7)) for n in range(0, 7)}
# The paper asks for exp terms, but sin's and cos's may be acceptable;
# if the results are simplified, exp terms appear for all but
# -sin(pi/14) - I*cos(pi/14) and -sin(pi/14) + I*cos(pi/14) which
# will simplify if you apply the transformation foo.rewrite(exp).expand()
def test_M7():
# TODO: Replace solve with solveset, as of now test fails for solveset
sol = solve(x**8 - 8*x**7 + 34*x**6 - 92*x**5 + 175*x**4 - 236*x**3 +
226*x**2 - 140*x + 46, x)
assert [s.simplify() for s in sol] == [
1 - sqrt(-6 - 2*I*sqrt(3 + 4*sqrt(3)))/2,
1 + sqrt(-6 - 2*I*sqrt(3 + 4*sqrt(3)))/2,
1 - sqrt(-6 + 2*I*sqrt(3 + 4*sqrt(3)))/2,
1 + sqrt(-6 + 2*I*sqrt(3 + 4*sqrt (3)))/2,
1 - sqrt(-6 + 2*sqrt(-3 + 4*sqrt(3)))/2,
1 + sqrt(-6 + 2*sqrt(-3 + 4*sqrt(3)))/2,
1 - sqrt(-6 - 2*sqrt(-3 + 4*sqrt(3)))/2,
1 + sqrt(-6 - 2*sqrt(-3 + 4*sqrt(3)))/2]
@XFAIL # There are an infinite number of solutions.
def test_M8():
x = Symbol('x')
z = symbols('z', complex=True)
assert solveset(exp(2*x) + 2*exp(x) + 1 - z, x, S.Reals) == \
FiniteSet(log(1 + z - 2*sqrt(z))/2, log(1 + z + 2*sqrt(z))/2)
# This one could be simplified better (the 1/2 could be pulled into the log
# as a sqrt, and the function inside the log can be factored as a square,
# giving [log(sqrt(z) - 1), log(sqrt(z) + 1)]). Also, there should be an
# infinite number of solutions.
# x = {log(sqrt(z) - 1), log(sqrt(z) + 1) + i pi} [+ n 2 pi i, + n 2 pi i]
# where n is an arbitrary integer. See url of detailed output above.
@XFAIL
def test_M9():
x = symbols('x')
raise NotImplementedError("solveset(exp(2-x**2)-exp(-x),x) has complex solutions.")
def test_M10():
# TODO: Replace solve with solveset, as of now test fails for solveset
assert solve(exp(x) - x, x) == [-LambertW(-1)]
@XFAIL
def test_M11():
assert solveset(x**x - x, x) == FiniteSet(-1, 1)
def test_M12():
# TODO: x = [-1, 2*(+/-asinh(1)*I + n*pi}, 3*(pi/6 + n*pi/3)]
# TODO: Replace solve with solveset, as of now test fails for solveset
assert solve((x + 1)*(sin(x)**2 + 1)**2*cos(3*x)**3, x) == [
-1, pi/6, pi/2,
- I*log(1 + sqrt(2)), I*log(1 + sqrt(2)),
pi - I*log(1 + sqrt(2)), pi + I*log(1 + sqrt(2)),
]
@XFAIL
def test_M13():
n = Dummy('n')
assert solveset_real(sin(x) - cos(x), x) == ImageSet(Lambda(n, n*pi - pi*R(7, 4)), S.Integers)
@XFAIL
def test_M14():
n = Dummy('n')
assert solveset_real(tan(x) - 1, x) == ImageSet(Lambda(n, n*pi + pi/4), S.Integers)
def test_M15():
if PY3:
n = Dummy('n')
assert solveset(sin(x) - S.Half) in (Union(ImageSet(Lambda(n, 2*n*pi + pi/6), S.Integers),
ImageSet(Lambda(n, 2*n*pi + pi*R(5, 6)), S.Integers)),
Union(ImageSet(Lambda(n, 2*n*pi + pi*R(5, 6)), S.Integers),
ImageSet(Lambda(n, 2*n*pi + pi/6), S.Integers)))
@XFAIL
def test_M16():
n = Dummy('n')
assert solveset(sin(x) - tan(x), x) == ImageSet(Lambda(n, n*pi), S.Integers)
@XFAIL
def test_M17():
assert solveset_real(asin(x) - atan(x), x) == FiniteSet(0)
@XFAIL
def test_M18():
assert solveset_real(acos(x) - atan(x), x) == FiniteSet(sqrt((sqrt(5) - 1)/2))
def test_M19():
# TODO: Replace solve with solveset, as of now test fails for solveset
assert solve((x - 2)/x**R(1, 3), x) == [2]
def test_M20():
assert solveset(sqrt(x**2 + 1) - x + 2, x) == EmptySet()
def test_M21():
assert solveset(x + sqrt(x) - 2) == FiniteSet(1)
def test_M22():
assert solveset(2*sqrt(x) + 3*x**R(1, 4) - 2) == FiniteSet(R(1, 16))
def test_M23():
x = symbols('x', complex=True)
# TODO: Replace solve with solveset, as of now test fails for solveset
assert solve(x - 1/sqrt(1 + x**2)) == [
-I*sqrt(S.Half + sqrt(5)/2), sqrt(Rational(-1, 2) + sqrt(5)/2)]
def test_M24():
# TODO: Replace solve with solveset, as of now test fails for solveset
solution = solve(1 - binomial(m, 2)*2**k, k)
answer = log(2/(m*(m - 1)), 2)
assert solution[0].expand() == answer.expand()
def test_M25():
a, b, c, d = symbols(':d', positive=True)
x = symbols('x')
# TODO: Replace solve with solveset, as of now test fails for solveset
assert solve(a*b**x - c*d**x, x)[0].expand() == (log(c/a)/log(b/d)).expand()
def test_M26():
# TODO: Replace solve with solveset, as of now test fails for solveset
assert solve(sqrt(log(x)) - log(sqrt(x))) == [1, exp(4)]
def test_M27():
x = symbols('x', real=True)
b = symbols('b', real=True)
with assuming(Q.is_true(sin(cos(1/E**2) + 1) + b > 0)):
# TODO: Replace solve with solveset
solve(log(acos(asin(x**R(2, 3) - b) - 1)) + 2, x) == [-b - sin(1 + cos(1/E**2))**R(3/2), b + sin(1 + cos(1/E**2))**R(3/2)]
@XFAIL
def test_M28():
assert solveset_real(5*x + exp((x - 5)/2) - 8*x**3, x, assume=Q.real(x)) == [-0.784966, -0.016291, 0.802557]
def test_M29():
x = symbols('x')
assert solveset(abs(x - 1) - 2, domain=S.Reals) == FiniteSet(-1, 3)
def test_M30():
# TODO: Replace solve with solveset, as of now
# solveset doesn't supports assumptions
# assert solve(abs(2*x + 5) - abs(x - 2),x, assume=Q.real(x)) == [-1, -7]
assert solveset_real(abs(2*x + 5) - abs(x - 2), x) == FiniteSet(-1, -7)
def test_M31():
# TODO: Replace solve with solveset, as of now
# solveset doesn't supports assumptions
# assert solve(1 - abs(x) - max(-x - 2, x - 2),x, assume=Q.real(x)) == [-3/2, 3/2]
assert solveset_real(1 - abs(x) - Max(-x - 2, x - 2), x) == FiniteSet(R(-3, 2), R(3, 2))
@XFAIL
def test_M32():
# TODO: Replace solve with solveset, as of now
# solveset doesn't supports assumptions
assert solveset_real(Max(2 - x**2, x)- Max(-x, (x**3)/9), x) == FiniteSet(-1, 3)
@XFAIL
def test_M33():
# TODO: Replace solve with solveset, as of now
# solveset doesn't supports assumptions
# Second answer can be written in another form. The second answer is the root of x**3 + 9*x**2 - 18 = 0 in the interval (-2, -1).
assert solveset_real(Max(2 - x**2, x) - x**3/9, x) == FiniteSet(-3, -1.554894, 3)
@XFAIL
def test_M34():
z = symbols('z', complex=True)
assert solveset((1 + I) * z + (2 - I) * conjugate(z) + 3*I, z) == FiniteSet(2 + 3*I)
def test_M35():
x, y = symbols('x y', real=True)
assert linsolve((3*x - 2*y - I*y + 3*I).as_real_imag(), y, x) == FiniteSet((3, 2))
def test_M36():
# TODO: Replace solve with solveset, as of now
# solveset doesn't supports solving for function
# assert solve(f**2 + f - 2, x) == [Eq(f(x), 1), Eq(f(x), -2)]
assert solveset(f(x)**2 + f(x) - 2, f(x)) == FiniteSet(-2, 1)
def test_M37():
assert linsolve([x + y + z - 6, 2*x + y + 2*z - 10, x + 3*y + z - 10 ], x, y, z) == \
FiniteSet((-z + 4, 2, z))
def test_M38():
variables = vring("k1:50", vfield("a,b,c", ZZ).to_domain())
system = [
-b*k8/a + c*k8/a, -b*k11/a + c*k11/a, -b*k10/a + c*k10/a + k2, -k3 - b*k9/a + c*k9/a,
-b*k14/a + c*k14/a, -b*k15/a + c*k15/a, -b*k18/a + c*k18/a - k2, -b*k17/a + c*k17/a,
-b*k16/a + c*k16/a + k4, -b*k13/a + c*k13/a - b*k21/a + c*k21/a + b*k5/a - c*k5/a,
b*k44/a - c*k44/a, -b*k45/a + c*k45/a, -b*k20/a + c*k20/a, -b*k44/a + c*k44/a,
b*k46/a - c*k46/a, b**2*k47/a**2 - 2*b*c*k47/a**2 + c**2*k47/a**2, k3, -k4,
-b*k12/a + c*k12/a - a*k6/b + c*k6/b, -b*k19/a + c*k19/a + a*k7/c - b*k7/c,
b*k45/a - c*k45/a, -b*k46/a + c*k46/a, -k48 + c*k48/a + c*k48/b - c**2*k48/(a*b),
-k49 + b*k49/a + b*k49/c - b**2*k49/(a*c), a*k1/b - c*k1/b, a*k4/b - c*k4/b,
a*k3/b - c*k3/b + k9, -k10 + a*k2/b - c*k2/b, a*k7/b - c*k7/b, -k9, k11,
b*k12/a - c*k12/a + a*k6/b - c*k6/b, a*k15/b - c*k15/b, k10 + a*k18/b - c*k18/b,
-k11 + a*k17/b - c*k17/b, a*k16/b - c*k16/b, -a*k13/b + c*k13/b + a*k21/b - c*k21/b + a*k5/b - c*k5/b,
-a*k44/b + c*k44/b, a*k45/b - c*k45/b, a*k14/c - b*k14/c + a*k20/b - c*k20/b,
a*k44/b - c*k44/b, -a*k46/b + c*k46/b, -k47 + c*k47/a + c*k47/b - c**2*k47/(a*b),
a*k19/b - c*k19/b, -a*k45/b + c*k45/b, a*k46/b - c*k46/b, a**2*k48/b**2 - 2*a*c*k48/b**2 + c**2*k48/b**2,
-k49 + a*k49/b + a*k49/c - a**2*k49/(b*c), k16, -k17, -a*k1/c + b*k1/c,
-k16 - a*k4/c + b*k4/c, -a*k3/c + b*k3/c, k18 - a*k2/c + b*k2/c, b*k19/a - c*k19/a - a*k7/c + b*k7/c,
-a*k6/c + b*k6/c, -a*k8/c + b*k8/c, -a*k11/c + b*k11/c + k17, -a*k10/c + b*k10/c - k18,
-a*k9/c + b*k9/c, -a*k14/c + b*k14/c - a*k20/b + c*k20/b, -a*k13/c + b*k13/c + a*k21/c - b*k21/c - a*k5/c + b*k5/c,
a*k44/c - b*k44/c, -a*k45/c + b*k45/c, -a*k44/c + b*k44/c, a*k46/c - b*k46/c,
-k47 + b*k47/a + b*k47/c - b**2*k47/(a*c), -a*k12/c + b*k12/c, a*k45/c - b*k45/c,
-a*k46/c + b*k46/c, -k48 + a*k48/b + a*k48/c - a**2*k48/(b*c),
a**2*k49/c**2 - 2*a*b*k49/c**2 + b**2*k49/c**2, k8, k11, -k15, k10 - k18,
-k17, k9, -k16, -k29, k14 - k32, -k21 + k23 - k31, -k24 - k30, -k35, k44,
-k45, k36, k13 - k23 + k39, -k20 + k38, k25 + k37, b*k26/a - c*k26/a - k34 + k42,
-2*k44, k45, k46, b*k47/a - c*k47/a, k41, k44, -k46, -b*k47/a + c*k47/a,
k12 + k24, -k19 - k25, -a*k27/b + c*k27/b - k33, k45, -k46, -a*k48/b + c*k48/b,
a*k28/c - b*k28/c + k40, -k45, k46, a*k48/b - c*k48/b, a*k49/c - b*k49/c,
-a*k49/c + b*k49/c, -k1, -k4, -k3, k15, k18 - k2, k17, k16, k22, k25 - k7,
k24 + k30, k21 + k23 - k31, k28, -k44, k45, -k30 - k6, k20 + k32, k27 + b*k33/a - c*k33/a,
k44, -k46, -b*k47/a + c*k47/a, -k36, k31 - k39 - k5, -k32 - k38, k19 - k37,
k26 - a*k34/b + c*k34/b - k42, k44, -2*k45, k46, a*k48/b - c*k48/b,
a*k35/c - b*k35/c - k41, -k44, k46, b*k47/a - c*k47/a, -a*k49/c + b*k49/c,
-k40, k45, -k46, -a*k48/b + c*k48/b, a*k49/c - b*k49/c, k1, k4, k3, -k8,
-k11, -k10 + k2, -k9, k37 + k7, -k14 - k38, -k22, -k25 - k37, -k24 + k6,
-k13 - k23 + k39, -k28 + b*k40/a - c*k40/a, k44, -k45, -k27, -k44, k46,
b*k47/a - c*k47/a, k29, k32 + k38, k31 - k39 + k5, -k12 + k30, k35 - a*k41/b + c*k41/b,
-k44, k45, -k26 + k34 + a*k42/c - b*k42/c, k44, k45, -2*k46, -b*k47/a + c*k47/a,
-a*k48/b + c*k48/b, a*k49/c - b*k49/c, k33, -k45, k46, a*k48/b - c*k48/b,
-a*k49/c + b*k49/c
]
solution = {
k49: 0, k48: 0, k47: 0, k46: 0, k45: 0, k44: 0, k41: 0, k40: 0,
k38: 0, k37: 0, k36: 0, k35: 0, k33: 0, k32: 0, k30: 0, k29: 0,
k28: 0, k27: 0, k25: 0, k24: 0, k22: 0, k21: 0, k20: 0, k19: 0,
k18: 0, k17: 0, k16: 0, k15: 0, k14: 0, k13: 0, k12: 0, k11: 0,
k10: 0, k9: 0, k8: 0, k7: 0, k6: 0, k5: 0, k4: 0, k3: 0,
k2: 0, k1: 0,
k34: b/c*k42, k31: k39, k26: a/c*k42, k23: k39
}
assert solve_lin_sys(system, variables) == solution
def test_M39():
x, y, z = symbols('x y z', complex=True)
# TODO: Replace solve with solveset, as of now
# solveset doesn't supports non-linear multivariate
assert solve([x**2*y + 3*y*z - 4, -3*x**2*z + 2*y**2 + 1, 2*y*z**2 - z**2 - 1 ]) ==\
[{y: 1, z: 1, x: -1}, {y: 1, z: 1, x: 1},\
{y: sqrt(2)*I, z: R(1,3) - sqrt(2)*I/3, x: -sqrt(-1 - sqrt(2)*I)},\
{y: sqrt(2)*I, z: R(1,3) - sqrt(2)*I/3, x: sqrt(-1 - sqrt(2)*I)},\
{y: -sqrt(2)*I, z: R(1,3) + sqrt(2)*I/3, x: -sqrt(-1 + sqrt(2)*I)},\
{y: -sqrt(2)*I, z: R(1,3) + sqrt(2)*I/3, x: sqrt(-1 + sqrt(2)*I)}]
# N. Inequalities
def test_N1():
assert ask(Q.is_true(E**pi > pi**E))
@XFAIL
def test_N2():
x = symbols('x', real=True)
assert ask(Q.is_true(x**4 - x + 1 > 0)) is True
assert ask(Q.is_true(x**4 - x + 1 > 1)) is False
@XFAIL
def test_N3():
x = symbols('x', real=True)
assert ask(Q.is_true(And(Lt(-1, x), Lt(x, 1))), Q.is_true(abs(x) < 1 ))
@XFAIL
def test_N4():
x, y = symbols('x y', real=True)
assert ask(Q.is_true(2*x**2 > 2*y**2), Q.is_true((x > y) & (y > 0))) is True
@XFAIL
def test_N5():
x, y, k = symbols('x y k', real=True)
assert ask(Q.is_true(k*x**2 > k*y**2), Q.is_true((x > y) & (y > 0) & (k > 0))) is True
@XFAIL
def test_N6():
x, y, k, n = symbols('x y k n', real=True)
assert ask(Q.is_true(k*x**n > k*y**n), Q.is_true((x > y) & (y > 0) & (k > 0) & (n > 0))) is True
@XFAIL
def test_N7():
x, y = symbols('x y', real=True)
assert ask(Q.is_true(y > 0), Q.is_true((x > 1) & (y >= x - 1))) is True
@XFAIL
def test_N8():
x, y, z = symbols('x y z', real=True)
assert ask(Q.is_true((x == y) & (y == z)),
Q.is_true((x >= y) & (y >= z) & (z >= x)))
def test_N9():
x = Symbol('x')
assert solveset(abs(x - 1) > 2, domain=S.Reals) == Union(Interval(-oo, -1, False, True),
Interval(3, oo, True))
def test_N10():
x = Symbol('x')
p = (x - 1)*(x - 2)*(x - 3)*(x - 4)*(x - 5)
assert solveset(expand(p) < 0, domain=S.Reals) == Union(Interval(-oo, 1, True, True),
Interval(2, 3, True, True),
Interval(4, 5, True, True))
def test_N11():
x = Symbol('x')
assert solveset(6/(x - 3) <= 3, domain=S.Reals) == Union(Interval(-oo, 3, True, True), Interval(5, oo))
def test_N12():
x = Symbol('x')
assert solveset(sqrt(x) < 2, domain=S.Reals) == Interval(0, 4, False, True)
def test_N13():
x = Symbol('x')
assert solveset(sin(x) < 2, domain=S.Reals) == S.Reals
@XFAIL
def test_N14():
x = Symbol('x')
# Gives 'Union(Interval(Integer(0), Mul(Rational(1, 2), pi), false, true),
# Interval(Mul(Rational(1, 2), pi), Mul(Integer(2), pi), true, false))'
# which is not the correct answer, but the provided also seems wrong.
assert solveset(sin(x) < 1, x, domain=S.Reals) == Union(Interval(-oo, pi/2, True, True),
Interval(pi/2, oo, True, True))
def test_N15():
r, t = symbols('r t')
# raises NotImplementedError: only univariate inequalities are supported
solveset(abs(2*r*(cos(t) - 1) + 1) <= 1, r, S.Reals)
def test_N16():
r, t = symbols('r t')
solveset((r**2)*((cos(t) - 4)**2)*sin(t)**2 < 9, r, S.Reals)
@XFAIL
def test_N17():
# currently only univariate inequalities are supported
assert solveset((x + y > 0, x - y < 0), (x, y)) == (abs(x) < y)
def test_O1():
M = Matrix((1 + I, -2, 3*I))
assert sqrt(expand(M.dot(M.H))) == sqrt(15)
def test_O2():
assert Matrix((2, 2, -3)).cross(Matrix((1, 3, 1))) == Matrix([[11],
[-5],
[4]])
# The vector module has no way of representing vectors symbolically (without
# respect to a basis)
@XFAIL
def test_O3():
assert (va ^ vb) | (vc ^ vd) == -(va | vc)*(vb | vd) + (va | vd)*(vb | vc)
def test_O4():
from sympy.vector import CoordSys3D, Del
N = CoordSys3D("N")
delop = Del()
i, j, k = N.base_vectors()
x, y, z = N.base_scalars()
F = i*(x*y*z) + j*((x*y*z)**2) + k*((y**2)*(z**3))
assert delop.cross(F).doit() == (-2*x**2*y**2*z + 2*y*z**3)*i + x*y*j + (2*x*y**2*z**2 - x*z)*k
# The vector module has no way of representing vectors symbolically (without
# respect to a basis)
@XFAIL
def test_O5():
assert grad|(f^g)-g|(grad^f)+f|(grad^g) == 0
#testO8-O9 MISSING!!
def test_O10():
L = [Matrix([2, 3, 5]), Matrix([3, 6, 2]), Matrix([8, 3, 6])]
assert GramSchmidt(L) == [Matrix([
[2],
[3],
[5]]),
Matrix([
[R(23, 19)],
[R(63, 19)],
[R(-47, 19)]]),
Matrix([
[R(1692, 353)],
[R(-1551, 706)],
[R(-423, 706)]])]
def test_P1():
assert Matrix(3, 3, lambda i, j: j - i).diagonal(-1) == Matrix(
1, 2, [-1, -1])
def test_P2():
M = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
M.row_del(1)
M.col_del(2)
assert M == Matrix([[1, 2],
[7, 8]])
def test_P3():
A = Matrix([
[11, 12, 13, 14],
[21, 22, 23, 24],
[31, 32, 33, 34],
[41, 42, 43, 44]])
A11 = A[0:3, 1:4]
A12 = A[(0, 1, 3), (2, 0, 3)]
A21 = A
A221 = -A[0:2, 2:4]
A222 = -A[(3, 0), (2, 1)]
A22 = BlockMatrix([[A221, A222]]).T
rows = [[-A11, A12], [A21, A22]]
from sympy.utilities.pytest import raises
raises(ValueError, lambda: BlockMatrix(rows))
B = Matrix(rows)
assert B == Matrix([
[-12, -13, -14, 13, 11, 14],
[-22, -23, -24, 23, 21, 24],
[-32, -33, -34, 43, 41, 44],
[11, 12, 13, 14, -13, -23],
[21, 22, 23, 24, -14, -24],
[31, 32, 33, 34, -43, -13],
[41, 42, 43, 44, -42, -12]])
@XFAIL
def test_P4():
raise NotImplementedError("Block matrix diagonalization not supported")
def test_P5():
M = Matrix([[7, 11],
[3, 8]])
assert M % 2 == Matrix([[1, 1],
[1, 0]])
def test_P6():
M = Matrix([[cos(x), sin(x)],
[-sin(x), cos(x)]])
assert M.diff(x, 2) == Matrix([[-cos(x), -sin(x)],
[sin(x), -cos(x)]])
def test_P7():
M = Matrix([[x, y]])*(
z*Matrix([[1, 3, 5],
[2, 4, 6]]) + Matrix([[7, -9, 11],
[-8, 10, -12]]))
assert M == Matrix([[x*(z + 7) + y*(2*z - 8), x*(3*z - 9) + y*(4*z + 10),
x*(5*z + 11) + y*(6*z - 12)]])
def test_P8():
M = Matrix([[1, -2*I],
[-3*I, 4]])
assert M.norm(ord=S.Infinity) == 7
def test_P9():
a, b, c = symbols('a b c', real=True)
M = Matrix([[a/(b*c), 1/c, 1/b],
[1/c, b/(a*c), 1/a],
[1/b, 1/a, c/(a*b)]])
assert factor(M.norm('fro')) == (a**2 + b**2 + c**2)/(abs(a)*abs(b)*abs(c))
@XFAIL
def test_P10():
M = Matrix([[1, 2 + 3*I],
[f(4 - 5*I), 6]])
# conjugate(f(4 - 5*i)) is not simplified to f(4+5*I)
assert M.H == Matrix([[1, f(4 + 5*I)],
[2 + 3*I, 6]])
@XFAIL
def test_P11():
# raises NotImplementedError("Matrix([[x,y],[1,x*y]]).inv()
# not simplifying to extract common factor")
assert Matrix([[x, y],
[1, x*y]]).inv() == (1/(x**2 - 1))*Matrix([[x, -1],
[-1/y, x/y]])
def test_P11_workaround():
M = Matrix([[x, y], [1, x*y]]).inv()
c = gcd(tuple(M))
assert MatMul(c, M/c, evaluate=False) == MatMul(c, Matrix([
[-x*y, y],
[ 1, -x]]), evaluate=False)
def test_P12():
A11 = MatrixSymbol('A11', n, n)
A12 = MatrixSymbol('A12', n, n)
A22 = MatrixSymbol('A22', n, n)
B = BlockMatrix([[A11, A12],
[ZeroMatrix(n, n), A22]])
assert block_collapse(B.I) == BlockMatrix([[A11.I, (-1)*A11.I*A12*A22.I],
[ZeroMatrix(n, n), A22.I]])
def test_P13():
M = Matrix([[1, x - 2, x - 3],
[x - 1, x**2 - 3*x + 6, x**2 - 3*x - 2],
[x - 2, x**2 - 8, 2*(x**2) - 12*x + 14]])
L, U, _ = M.LUdecomposition()
assert simplify(L) == Matrix([[1, 0, 0],
[x - 1, 1, 0],
[x - 2, x - 3, 1]])
assert simplify(U) == Matrix([[1, x - 2, x - 3],
[0, 4, x - 5],
[0, 0, x - 7]])
def test_P14():
M = Matrix([[1, 2, 3, 1, 3],
[3, 2, 1, 1, 7],
[0, 2, 4, 1, 1],
[1, 1, 1, 1, 4]])
R, _ = M.rref()
assert R == Matrix([[1, 0, -1, 0, 2],
[0, 1, 2, 0, -1],
[0, 0, 0, 1, 3],
[0, 0, 0, 0, 0]])
def test_P15():
M = Matrix([[-1, 3, 7, -5],
[4, -2, 1, 3],
[2, 4, 15, -7]])
assert M.rank() == 2
def test_P16():
M = Matrix([[2*sqrt(2), 8],
[6*sqrt(6), 24*sqrt(3)]])
assert M.rank() == 1
def test_P17():
t = symbols('t', real=True)
M=Matrix([
[sin(2*t), cos(2*t)],
[2*(1 - (cos(t)**2))*cos(t), (1 - 2*(sin(t)**2))*sin(t)]])
assert M.rank() == 1
def test_P18():
M = Matrix([[1, 0, -2, 0],
[-2, 1, 0, 3],
[-1, 2, -6, 6]])
assert M.nullspace() == [Matrix([[2],
[4],
[1],
[0]]),
Matrix([[0],
[-3],
[0],
[1]])]
def test_P19():
w = symbols('w')
M = Matrix([[1, 1, 1, 1],
[w, x, y, z],
[w**2, x**2, y**2, z**2],
[w**3, x**3, y**3, z**3]])
assert M.det() == (w**3*x**2*y - w**3*x**2*z - w**3*x*y**2 + w**3*x*z**2
+ w**3*y**2*z - w**3*y*z**2 - w**2*x**3*y + w**2*x**3*z
+ w**2*x*y**3 - w**2*x*z**3 - w**2*y**3*z + w**2*y*z**3
+ w*x**3*y**2 - w*x**3*z**2 - w*x**2*y**3 + w*x**2*z**3
+ w*y**3*z**2 - w*y**2*z**3 - x**3*y**2*z + x**3*y*z**2
+ x**2*y**3*z - x**2*y*z**3 - x*y**3*z**2 + x*y**2*z**3
)
@XFAIL
def test_P20():
raise NotImplementedError("Matrix minimal polynomial not supported")
def test_P21():
M = Matrix([[5, -3, -7],
[-2, 1, 2],
[2, -3, -4]])
assert M.charpoly(x).as_expr() == x**3 - 2*x**2 - 5*x + 6
def test_P22():
d = 100
M = (2 - x)*eye(d)
assert M.eigenvals() == {-x + 2: d}
def test_P23():
M = Matrix([
[2, 1, 0, 0, 0],
[1, 2, 1, 0, 0],
[0, 1, 2, 1, 0],
[0, 0, 1, 2, 1],
[0, 0, 0, 1, 2]])
assert M.eigenvals() == {
S('1'): 1,
S('2'): 1,
S('3'): 1,
S('sqrt(3) + 2'): 1,
S('-sqrt(3) + 2'): 1}
def test_P24():
M = Matrix([[611, 196, -192, 407, -8, -52, -49, 29],
[196, 899, 113, -192, -71, -43, -8, -44],
[-192, 113, 899, 196, 61, 49, 8, 52],
[ 407, -192, 196, 611, 8, 44, 59, -23],
[ -8, -71, 61, 8, 411, -599, 208, 208],
[ -52, -43, 49, 44, -599, 411, 208, 208],
[ -49, -8, 8, 59, 208, 208, 99, -911],
[ 29, -44, 52, -23, 208, 208, -911, 99]])
assert M.eigenvals() == {
S('0'): 1,
S('10*sqrt(10405)'): 1,
S('100*sqrt(26) + 510'): 1,
S('1000'): 2,
S('-100*sqrt(26) + 510'): 1,
S('-10*sqrt(10405)'): 1,
S('1020'): 1}
def test_P25():
MF = N(Matrix([[ 611, 196, -192, 407, -8, -52, -49, 29],
[ 196, 899, 113, -192, -71, -43, -8, -44],
[-192, 113, 899, 196, 61, 49, 8, 52],
[ 407, -192, 196, 611, 8, 44, 59, -23],
[ -8, -71, 61, 8, 411, -599, 208, 208],
[ -52, -43, 49, 44, -599, 411, 208, 208],
[ -49, -8, 8, 59, 208, 208, 99, -911],
[ 29, -44, 52, -23, 208, 208, -911, 99]]))
assert (Matrix(sorted(MF.eigenvals())) - Matrix(
[-1020.0490184299969, 0.0, 0.09804864072151699, 1000.0,
1019.9019513592784, 1020.0, 1020.0490184299969])).norm() < 1e-13
def test_P26():
a0, a1, a2, a3, a4 = symbols('a0 a1 a2 a3 a4')
M = Matrix([[-a4, -a3, -a2, -a1, -a0, 0, 0, 0, 0],
[ 1, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 1, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 1, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 1, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, -1, -1, 0, 0],
[ 0, 0, 0, 0, 0, 1, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 1, -1, -1],
[ 0, 0, 0, 0, 0, 0, 0, 1, 0]])
assert M.eigenvals(error_when_incomplete=False) == {
S('-1/2 - sqrt(3)*I/2'): 2,
S('-1/2 + sqrt(3)*I/2'): 2}
def test_P27():
a = symbols('a')
M = Matrix([[a, 0, 0, 0, 0],
[0, 0, 0, 0, 1],
[0, 0, a, 0, 0],
[0, 0, 0, a, 0],
[0, -2, 0, 0, 2]])
assert M.eigenvects() == [(a, 3, [Matrix([[1],
[0],
[0],
[0],
[0]]),
Matrix([[0],
[0],
[1],
[0],
[0]]),
Matrix([[0],
[0],
[0],
[1],
[0]])]),
(1 - I, 1, [Matrix([[ 0],
[-1/(-1 + I)],
[ 0],
[ 0],
[ 1]])]),
(1 + I, 1, [Matrix([[ 0],
[-1/(-1 - I)],
[ 0],
[ 0],
[ 1]])])]
@XFAIL
def test_P28():
raise NotImplementedError("Generalized eigenvectors not supported \
https://github.com/sympy/sympy/issues/5293")
@XFAIL
def test_P29():
raise NotImplementedError("Generalized eigenvectors not supported \
https://github.com/sympy/sympy/issues/5293")
def test_P30():
M = Matrix([[1, 0, 0, 1, -1],
[0, 1, -2, 3, -3],
[0, 0, -1, 2, -2],
[1, -1, 1, 0, 1],
[1, -1, 1, -1, 2]])
_, J = M.jordan_form()
assert J == Matrix([[-1, 0, 0, 0, 0],
[0, 1, 1, 0, 0],
[0, 0, 1, 0, 0],
[0, 0, 0, 1, 1],
[0, 0, 0, 0, 1]])
@XFAIL
def test_P31():
raise NotImplementedError("Smith normal form not implemented")
def test_P32():
M = Matrix([[1, -2],
[2, 1]])
assert exp(M).rewrite(cos).simplify() == Matrix([[E*cos(2), -E*sin(2)],
[E*sin(2), E*cos(2)]])
def test_P33():
w, t = symbols('w t')
M = Matrix([[0, 1, 0, 0],
[0, 0, 0, 2*w],
[0, 0, 0, 1],
[0, -2*w, 3*w**2, 0]])
assert exp(M*t).rewrite(cos).expand() == Matrix([
[1, -3*t + 4*sin(t*w)/w, 6*t*w - 6*sin(t*w), -2*cos(t*w)/w + 2/w],
[0, 4*cos(t*w) - 3, -6*w*cos(t*w) + 6*w, 2*sin(t*w)],
[0, 2*cos(t*w)/w - 2/w, -3*cos(t*w) + 4, sin(t*w)/w],
[0, -2*sin(t*w), 3*w*sin(t*w), cos(t*w)]])
@XFAIL
def test_P34():
a, b, c = symbols('a b c', real=True)
M = Matrix([[a, 1, 0, 0, 0, 0],
[0, a, 0, 0, 0, 0],
[0, 0, b, 0, 0, 0],
[0, 0, 0, c, 1, 0],
[0, 0, 0, 0, c, 1],
[0, 0, 0, 0, 0, c]])
# raises exception, sin(M) not supported. exp(M*I) also not supported
# https://github.com/sympy/sympy/issues/6218
assert sin(M) == Matrix([[sin(a), cos(a), 0, 0, 0, 0],
[0, sin(a), 0, 0, 0, 0],
[0, 0, sin(b), 0, 0, 0],
[0, 0, 0, sin(c), cos(c), -sin(c)/2],
[0, 0, 0, 0, sin(c), cos(c)],
[0, 0, 0, 0, 0, sin(c)]])
@XFAIL
def test_P35():
M = pi/2*Matrix([[2, 1, 1],
[2, 3, 2],
[1, 1, 2]])
# raises exception, sin(M) not supported. exp(M*I) also not supported
# https://github.com/sympy/sympy/issues/6218
assert sin(M) == eye(3)
@XFAIL
def test_P36():
M = Matrix([[10, 7],
[7, 17]])
assert sqrt(M) == Matrix([[3, 1],
[1, 4]])
def test_P37():
M = Matrix([[1, 1, 0],
[0, 1, 0],
[0, 0, 1]])
assert M**S.Half == Matrix([[1, R(1, 2), 0],
[0, 1, 0],
[0, 0, 1]])
@XFAIL
def test_P38():
M=Matrix([[0, 1, 0],
[0, 0, 0],
[0, 0, 0]])
#raises ValueError: Matrix det == 0; not invertible
M**S.Half
@XFAIL
def test_P39():
"""
M=Matrix([
[1, 1],
[2, 2],
[3, 3]])
M.SVD()
"""
raise NotImplementedError("Singular value decomposition not implemented")
def test_P40():
r, t = symbols('r t', real=True)
M = Matrix([r*cos(t), r*sin(t)])
assert M.jacobian(Matrix([r, t])) == Matrix([[cos(t), -r*sin(t)],
[sin(t), r*cos(t)]])
def test_P41():
r, t = symbols('r t', real=True)
assert hessian(r**2*sin(t),(r,t)) == Matrix([[ 2*sin(t), 2*r*cos(t)],
[2*r*cos(t), -r**2*sin(t)]])
def test_P42():
assert wronskian([cos(x), sin(x)], x).simplify() == 1
def test_P43():
def __my_jacobian(M, Y):
return Matrix([M.diff(v).T for v in Y]).T
r, t = symbols('r t', real=True)
M = Matrix([r*cos(t), r*sin(t)])
assert __my_jacobian(M,[r,t]) == Matrix([[cos(t), -r*sin(t)],
[sin(t), r*cos(t)]])
def test_P44():
def __my_hessian(f, Y):
V = Matrix([diff(f, v) for v in Y])
return Matrix([V.T.diff(v) for v in Y])
r, t = symbols('r t', real=True)
assert __my_hessian(r**2*sin(t), (r, t)) == Matrix([
[ 2*sin(t), 2*r*cos(t)],
[2*r*cos(t), -r**2*sin(t)]])
def test_P45():
def __my_wronskian(Y, v):
M = Matrix([Matrix(Y).T.diff(x, n) for n in range(0, len(Y))])
return M.det()
assert __my_wronskian([cos(x), sin(x)], x).simplify() == 1
# Q1-Q6 Tensor tests missing
@XFAIL
def test_R1():
i, j, n = symbols('i j n', integer=True, positive=True)
xn = MatrixSymbol('xn', n, 1)
Sm = Sum((xn[i, 0] - Sum(xn[j, 0], (j, 0, n - 1))/n)**2, (i, 0, n - 1))
# sum does not calculate
# Unknown result
Sm.doit()
raise NotImplementedError('Unknown result')
@XFAIL
def test_R2():
m, b = symbols('m b')
i, n = symbols('i n', integer=True, positive=True)
xn = MatrixSymbol('xn', n, 1)
yn = MatrixSymbol('yn', n, 1)
f = Sum((yn[i, 0] - m*xn[i, 0] - b)**2, (i, 0, n - 1))
f1 = diff(f, m)
f2 = diff(f, b)
# raises TypeError: solveset() takes at most 2 arguments (3 given)
solveset((f1, f2), (m, b), domain=S.Reals)
@XFAIL
def test_R3():
n, k = symbols('n k', integer=True, positive=True)
sk = ((-1)**k) * (binomial(2*n, k))**2
Sm = Sum(sk, (k, 1, oo))
T = Sm.doit()
T2 = T.combsimp()
# returns -((-1)**n*factorial(2*n)
# - (factorial(n))**2)*exp_polar(-I*pi)/(factorial(n))**2
assert T2 == (-1)**n*binomial(2*n, n)
@XFAIL
def test_R4():
# Macsyma indefinite sum test case:
#(c15) /* Check whether the full Gosper algorithm is implemented
# => 1/2^(n + 1) binomial(n, k - 1) */
#closedform(indefsum(binomial(n, k)/2^n - binomial(n + 1, k)/2^(n + 1), k));
#Time= 2690 msecs
# (- n + k - 1) binomial(n + 1, k)
#(d15) - --------------------------------
# n
# 2 2 (n + 1)
#
#(c16) factcomb(makefact(%));
#Time= 220 msecs
# n!
#(d16) ----------------
# n
# 2 k! 2 (n - k)!
# Might be possible after fixing https://github.com/sympy/sympy/pull/1879
raise NotImplementedError("Indefinite sum not supported")
@XFAIL
def test_R5():
a, b, c, n, k = symbols('a b c n k', integer=True, positive=True)
sk = ((-1)**k)*(binomial(a + b, a + k)
*binomial(b + c, b + k)*binomial(c + a, c + k))
Sm = Sum(sk, (k, 1, oo))
T = Sm.doit() # hypergeometric series not calculated
assert T == factorial(a+b+c)/(factorial(a)*factorial(b)*factorial(c))
def test_R6():
n, k = symbols('n k', integer=True, positive=True)
gn = MatrixSymbol('gn', n + 2, 1)
Sm = Sum(gn[k, 0] - gn[k - 1, 0], (k, 1, n + 1))
assert Sm.doit() == -gn[0, 0] + gn[n + 1, 0]
def test_R7():
n, k = symbols('n k', integer=True, positive=True)
T = Sum(k**3,(k,1,n)).doit()
assert T.factor() == n**2*(n + 1)**2/4
@XFAIL
def test_R8():
n, k = symbols('n k', integer=True, positive=True)
Sm = Sum(k**2*binomial(n, k), (k, 1, n))
T = Sm.doit() #returns Piecewise function
assert T.combsimp() == n*(n + 1)*2**(n - 2)
def test_R9():
n, k = symbols('n k', integer=True, positive=True)
Sm = Sum(binomial(n, k - 1)/k, (k, 1, n + 1))
assert Sm.doit().simplify() == (2**(n + 1) - 1)/(n + 1)
@XFAIL
def test_R10():
n, m, r, k = symbols('n m r k', integer=True, positive=True)
Sm = Sum(binomial(n, k)*binomial(m, r - k), (k, 0, r))
T = Sm.doit()
T2 = T.combsimp().rewrite(factorial)
assert T2 == factorial(m + n)/(factorial(r)*factorial(m + n - r))
assert T2 == binomial(m + n, r).rewrite(factorial)
# rewrite(binomial) is not working.
# https://github.com/sympy/sympy/issues/7135
T3 = T2.rewrite(binomial)
assert T3 == binomial(m + n, r)
@XFAIL
def test_R11():
n, k = symbols('n k', integer=True, positive=True)
sk = binomial(n, k)*fibonacci(k)
Sm = Sum(sk, (k, 0, n))
T = Sm.doit()
# Fibonacci simplification not implemented
# https://github.com/sympy/sympy/issues/7134
assert T == fibonacci(2*n)
@XFAIL
def test_R12():
n, k = symbols('n k', integer=True, positive=True)
Sm = Sum(fibonacci(k)**2, (k, 0, n))
T = Sm.doit()
assert T == fibonacci(n)*fibonacci(n + 1)
@XFAIL
def test_R13():
n, k = symbols('n k', integer=True, positive=True)
Sm = Sum(sin(k*x), (k, 1, n))
T = Sm.doit() # Sum is not calculated
assert T.simplify() == cot(x/2)/2 - cos(x*(2*n + 1)/2)/(2*sin(x/2))
@XFAIL
def test_R14():
n, k = symbols('n k', integer=True, positive=True)
Sm = Sum(sin((2*k - 1)*x), (k, 1, n))
T = Sm.doit() # Sum is not calculated
assert T.simplify() == sin(n*x)**2/sin(x)
@XFAIL
def test_R15():
n, k = symbols('n k', integer=True, positive=True)
Sm = Sum(binomial(n - k, k), (k, 0, floor(n/2)))
T = Sm.doit() # Sum is not calculated
assert T.simplify() == fibonacci(n + 1)
def test_R16():
k = symbols('k', integer=True, positive=True)
Sm = Sum(1/k**2 + 1/k**3, (k, 1, oo))
assert Sm.doit() == zeta(3) + pi**2/6
def test_R17():
k = symbols('k', integer=True, positive=True)
assert abs(float(Sum(1/k**2 + 1/k**3, (k, 1, oo)))
- 2.8469909700078206) < 1e-15
def test_R18():
k = symbols('k', integer=True, positive=True)
Sm = Sum(1/(2**k*k**2), (k, 1, oo))
T = Sm.doit()
assert T.simplify() == -log(2)**2/2 + pi**2/12
@slow
@XFAIL
def test_R19():
k = symbols('k', integer=True, positive=True)
Sm = Sum(1/((3*k + 1)*(3*k + 2)*(3*k + 3)), (k, 0, oo))
T = Sm.doit()
# assert fails, T not simplified
assert T.simplify() == -log(3)/4 + sqrt(3)*pi/12
@XFAIL
def test_R20():
n, k = symbols('n k', integer=True, positive=True)
Sm = Sum(binomial(n, 4*k), (k, 0, oo))
T = Sm.doit()
# assert fails, T not simplified
assert T.simplify() == 2**(n/2)*cos(pi*n/4)/2 + 2**(n - 1)/2
@XFAIL
def test_R21():
k = symbols('k', integer=True, positive=True)
Sm = Sum(1/(sqrt(k*(k + 1)) * (sqrt(k) + sqrt(k + 1))), (k, 1, oo))
T = Sm.doit() # Sum not calculated
assert T.simplify() == 1
# test_R22 answer not available in Wester samples
# Sum(Sum(binomial(n, k)*binomial(n - k, n - 2*k)*x**n*y**(n - 2*k),
# (k, 0, floor(n/2))), (n, 0, oo)) with abs(x*y)<1?
@XFAIL
def test_R23():
n, k = symbols('n k', integer=True, positive=True)
Sm = Sum(Sum((factorial(n)/(factorial(k)**2*factorial(n - 2*k)))*
(x/y)**k*(x*y)**(n - k), (n, 2*k, oo)), (k, 0, oo))
# Missing how to express constraint abs(x*y)<1?
T = Sm.doit() # Sum not calculated
assert T == -1/sqrt(x**2*y**2 - 4*x**2 - 2*x*y + 1)
def test_R24():
m, k = symbols('m k', integer=True, positive=True)
Sm = Sum(Product(k/(2*k - 1), (k, 1, m)), (m, 2, oo))
assert Sm.doit() == pi/2
def test_S1():
k = symbols('k', integer=True, positive=True)
Pr = Product(gamma(k/3), (k, 1, 8))
assert Pr.doit().simplify() == 640*sqrt(3)*pi**3/6561
def test_S2():
n, k = symbols('n k', integer=True, positive=True)
assert Product(k, (k, 1, n)).doit() == factorial(n)
def test_S3():
n, k = symbols('n k', integer=True, positive=True)
assert Product(x**k, (k, 1, n)).doit().simplify() == x**(n*(n + 1)/2)
def test_S4():
n, k = symbols('n k', integer=True, positive=True)
assert Product(1 + 1/k, (k, 1, n -1)).doit().simplify() == n
def test_S5():
n, k = symbols('n k', integer=True, positive=True)
assert (Product((2*k - 1)/(2*k), (k, 1, n)).doit().gammasimp() ==
gamma(n + S.Half)/(sqrt(pi)*gamma(n + 1)))
@XFAIL
def test_S6():
n, k = symbols('n k', integer=True, positive=True)
# Product does not evaluate
assert (Product(x**2 -2*x*cos(k*pi/n) + 1, (k, 1, n - 1)).doit().simplify()
== (x**(2*n) - 1)/(x**2 - 1))
@XFAIL
def test_S7():
k = symbols('k', integer=True, positive=True)
Pr = Product((k**3 - 1)/(k**3 + 1), (k, 2, oo))
T = Pr.doit() # Product does not evaluate
assert T.simplify() == R(2, 3)
@XFAIL
def test_S8():
k = symbols('k', integer=True, positive=True)
Pr = Product(1 - 1/(2*k)**2, (k, 1, oo))
T = Pr.doit()
# Product does not evaluate
assert T.simplify() == 2/pi
@XFAIL
def test_S9():
k = symbols('k', integer=True, positive=True)
Pr = Product(1 + (-1)**(k + 1)/(2*k - 1), (k, 1, oo))
T = Pr.doit()
# Product produces 0
# https://github.com/sympy/sympy/issues/7133
assert T.simplify() == sqrt(2)
@XFAIL
def test_S10():
k = symbols('k', integer=True, positive=True)
Pr = Product((k*(k + 1) + 1 + I)/(k*(k + 1) + 1 - I), (k, 0, oo))
T = Pr.doit()
# Product does not evaluate
assert T.simplify() == -1
def test_T1():
assert limit((1 + 1/n)**n, n, oo) == E
assert limit((1 - cos(x))/x**2, x, 0) == S.Half
def test_T2():
assert limit((3**x + 5**x)**(1/x), x, oo) == 5
def test_T3():
assert limit(log(x)/(log(x) + sin(x)), x, oo) == 1
def test_T4():
assert limit((exp(x*exp(-x)/(exp(-x) + exp(-2*x**2/(x + 1))))
- exp(x))/x, x, oo) == -exp(2)
def test_T5():
assert limit(x*log(x)*log(x*exp(x) - x**2)**2/log(log(x**2
+ 2*exp(exp(3*x**3*log(x))))), x, oo) == R(1, 3)
def test_T6():
assert limit(1/n * factorial(n)**(1/n), n, oo) == exp(-1)
def test_T7():
limit(1/n * gamma(n + 1)**(1/n), n, oo)
def test_T8():
a, z = symbols('a z', real=True, positive=True)
assert limit(gamma(z + a)/gamma(z)*exp(-a*log(z)), z, oo) == 1
@XFAIL
def test_T9():
z, k = symbols('z k', real=True, positive=True)
# raises NotImplementedError:
# Don't know how to calculate the mrv of '(1, k)'
assert limit(hyper((1, k), (1,), z/k), k, oo) == exp(z)
@XFAIL
def test_T10():
# No longer raises PoleError, but should return euler-mascheroni constant
assert limit(zeta(x) - 1/(x - 1), x, 1) == integrate(-1/x + 1/floor(x), (x, 1, oo))
@XFAIL
def test_T11():
n, k = symbols('n k', integer=True, positive=True)
# evaluates to 0
assert limit(n**x/(x*product((1 + x/k), (k, 1, n))), n, oo) == gamma(x)
@XFAIL
def test_T12():
x, t = symbols('x t', real=True)
# Does not evaluate the limit but returns an expression with erf
assert limit(x * integrate(exp(-t**2), (t, 0, x))/(1 - exp(-x**2)),
x, 0) == 1
def test_T13():
x = symbols('x', real=True)
assert [limit(x/abs(x), x, 0, dir='-'),
limit(x/abs(x), x, 0, dir='+')] == [-1, 1]
def test_T14():
x = symbols('x', real=True)
assert limit(atan(-log(x)), x, 0, dir='+') == pi/2
def test_U1():
x = symbols('x', real=True)
assert diff(abs(x), x) == sign(x)
def test_U2():
f = Lambda(x, Piecewise((-x, x < 0), (x, x >= 0)))
assert diff(f(x), x) == Piecewise((-1, x < 0), (1, x >= 0))
def test_U3():
f = Lambda(x, Piecewise((x**2 - 1, x == 1), (x**3, x != 1)))
f1 = Lambda(x, diff(f(x), x))
assert f1(x) == 3*x**2
assert f1(1) == 3
@XFAIL
def test_U4():
n = symbols('n', integer=True, positive=True)
x = symbols('x', real=True)
d = diff(x**n, x, n)
assert d.rewrite(factorial) == factorial(n)
def test_U5():
# issue 6681
t = symbols('t')
ans = (
Derivative(f(g(t)), g(t))*Derivative(g(t), (t, 2)) +
Derivative(f(g(t)), (g(t), 2))*Derivative(g(t), t)**2)
assert f(g(t)).diff(t, 2) == ans
assert ans.doit() == ans
def test_U6():
h = Function('h')
T = integrate(f(y), (y, h(x), g(x)))
assert T.diff(x) == (
f(g(x))*Derivative(g(x), x) - f(h(x))*Derivative(h(x), x))
@XFAIL
def test_U7():
p, t = symbols('p t', real=True)
# Exact differential => d(V(P, T)) => dV/dP DP + dV/dT DT
# raises ValueError: Since there is more than one variable in the
# expression, the variable(s) of differentiation must be supplied to
# differentiate f(p,t)
diff(f(p, t))
def test_U8():
x, y = symbols('x y', real=True)
eq = cos(x*y) + x
# If SymPy had implicit_diff() function this hack could be avoided
# TODO: Replace solve with solveset, current test fails for solveset
assert idiff(y - eq, y, x) == (-y*sin(x*y) + 1)/(x*sin(x*y) + 1)
def test_U9():
# Wester sample case for Maple:
# O29 := diff(f(x, y), x) + diff(f(x, y), y);
# /d \ /d \
# |-- f(x, y)| + |-- f(x, y)|
# \dx / \dy /
#
# O30 := factor(subs(f(x, y) = g(x^2 + y^2), %));
# 2 2
# 2 D(g)(x + y ) (x + y)
x, y = symbols('x y', real=True)
su = diff(f(x, y), x) + diff(f(x, y), y)
s2 = su.subs(f(x, y), g(x**2 + y**2))
s3 = s2.doit().factor()
# Subs not performed, s3 = 2*(x + y)*Subs(Derivative(
# g(_xi_1), _xi_1), _xi_1, x**2 + y**2)
# Derivative(g(x*2 + y**2), x**2 + y**2) is not valid in SymPy,
# and probably will remain that way. You can take derivatives with respect
# to other expressions only if they are atomic, like a symbol or a
# function.
# D operator should be added to SymPy
# See https://github.com/sympy/sympy/issues/4719.
assert s3 == (x + y)*Subs(Derivative(g(x), x), x, x**2 + y**2)*2
def test_U10():
# see issue 2519:
assert residue((z**3 + 5)/((z**4 - 1)*(z + 1)), z, -1) == R(-9, 4)
@XFAIL
def test_U11():
assert (2*dx + dz) ^ (3*dx + dy + dz) ^ (dx + dy + 4*dz) == 8*dx ^ dy ^dz
@XFAIL
def test_U12():
# Wester sample case:
# (c41) /* d(3 x^5 dy /\ dz + 5 x y^2 dz /\ dx + 8 z dx /\ dy)
# => (15 x^4 + 10 x y + 8) dx /\ dy /\ dz */
# factor(ext_diff(3*x^5 * dy ~ dz + 5*x*y^2 * dz ~ dx + 8*z * dx ~ dy));
# 4
# (d41) (10 x y + 15 x + 8) dx dy dz
raise NotImplementedError(
"External diff of differential form not supported")
def test_U13():
assert minimum(x**4 - x + 1, x) == -3*2**R(1,3)/8 + 1
@XFAIL
def test_U14():
#f = 1/(x**2 + y**2 + 1)
#assert [minimize(f), maximize(f)] == [0,1]
raise NotImplementedError("minimize(), maximize() not supported")
@XFAIL
def test_U15():
raise NotImplementedError("minimize() not supported and also solve does \
not support multivariate inequalities")
@XFAIL
def test_U16():
raise NotImplementedError("minimize() not supported in SymPy and also \
solve does not support multivariate inequalities")
@XFAIL
def test_U17():
raise NotImplementedError("Linear programming, symbolic simplex not \
supported in SymPy")
def test_V1():
x = symbols('x', real=True)
assert integrate(abs(x), x) == Piecewise((-x**2/2, x <= 0), (x**2/2, True))
def test_V2():
assert integrate(Piecewise((-x, x < 0), (x, x >= 0)), x
) == Piecewise((-x**2/2, x < 0), (x**2/2, True))
def test_V3():
assert integrate(1/(x**3 + 2),x).diff().simplify() == 1/(x**3 + 2)
def test_V4():
assert integrate(2**x/sqrt(1 + 4**x), x) == asinh(2**x)/log(2)
@XFAIL
def test_V5():
# Returns (-45*x**2 + 80*x - 41)/(5*sqrt(2*x - 1)*(4*x**2 - 4*x + 1))
assert (integrate((3*x - 5)**2/(2*x - 1)**R(7, 2), x).simplify() ==
(-41 + 80*x - 45*x**2)/(5*(2*x - 1)**R(5, 2)))
@XFAIL
def test_V6():
# returns RootSum(40*_z**2 - 1, Lambda(_i, _i*log(-4*_i + exp(-m*x))))/m
assert (integrate(1/(2*exp(m*x) - 5*exp(-m*x)), x) == sqrt(10)*(
log(2*exp(m*x) - sqrt(10)) - log(2*exp(m*x) + sqrt(10)))/(20*m))
def test_V7():
r1 = integrate(sinh(x)**4/cosh(x)**2)
assert r1.simplify() == x*R(-3, 2) + sinh(x)**3/(2*cosh(x)) + 3*tanh(x)/2
@XFAIL
def test_V8_V9():
#Macsyma test case:
#(c27) /* This example involves several symbolic parameters
# => 1/sqrt(b^2 - a^2) log([sqrt(b^2 - a^2) tan(x/2) + a + b]/
# [sqrt(b^2 - a^2) tan(x/2) - a - b]) (a^2 < b^2)
# [Gradshteyn and Ryzhik 2.553(3)] */
#assume(b^2 > a^2)$
#(c28) integrate(1/(a + b*cos(x)), x);
#(c29) trigsimp(ratsimp(diff(%, x)));
# 1
#(d29) ------------
# b cos(x) + a
raise NotImplementedError(
"Integrate with assumption not supported")
def test_V10():
assert integrate(1/(3 + 3*cos(x) + 4*sin(x)), x) == log(tan(x/2) + R(3, 4))/4
def test_V11():
r1 = integrate(1/(4 + 3*cos(x) + 4*sin(x)), x)
r2 = factor(r1)
assert (logcombine(r2, force=True) ==
log(((tan(x/2) + 1)/(tan(x/2) + 7))**R(1, 3)))
@XFAIL
def test_V12():
r1 = integrate(1/(5 + 3*cos(x) + 4*sin(x)), x)
# Correct result in python2.7.4, wrong result in python3.5
# https://github.com/sympy/sympy/issues/7157
assert r1 == -1/(tan(x/2) + 2)
@XFAIL
def test_V13():
r1 = integrate(1/(6 + 3*cos(x) + 4*sin(x)), x)
# expression not simplified, returns: -sqrt(11)*I*log(tan(x/2) + 4/3
# - sqrt(11)*I/3)/11 + sqrt(11)*I*log(tan(x/2) + 4/3 + sqrt(11)*I/3)/11
assert r1.simplify() == 2*sqrt(11)*atan(sqrt(11)*(3*tan(x/2) + 4)/11)/11
@slow
@XFAIL
def test_V14():
r1 = integrate(log(abs(x**2 - y**2)), x)
# Piecewise result does not simplify to the desired result.
assert (r1.simplify() == x*log(abs(x**2 - y**2))
+ y*log(x + y) - y*log(x - y) - 2*x)
def test_V15():
r1 = integrate(x*acot(x/y), x)
assert simplify(r1 - (x*y + (x**2 + y**2)*acot(x/y))/2) == 0
@XFAIL
def test_V16():
# Integral not calculated
assert integrate(cos(5*x)*Ci(2*x), x) == Ci(2*x)*sin(5*x)/5 - (Si(3*x) + Si(7*x))/10
@XFAIL
def test_V17():
r1 = integrate((diff(f(x), x)*g(x)
- f(x)*diff(g(x), x))/(f(x)**2 - g(x)**2), x)
# integral not calculated
assert simplify(r1 - (f(x) - g(x))/(f(x) + g(x))/2) == 0
@XFAIL
def test_W1():
# The function has a pole at y.
# The integral has a Cauchy principal value of zero but SymPy returns -I*pi
# https://github.com/sympy/sympy/issues/7159
assert integrate(1/(x - y), (x, y - 1, y + 1)) == 0
@XFAIL
def test_W2():
# The function has a pole at y.
# The integral is divergent but SymPy returns -2
# https://github.com/sympy/sympy/issues/7160
# Test case in Macsyma:
# (c6) errcatch(integrate(1/(x - a)^2, x, a - 1, a + 1));
# Integral is divergent
assert integrate(1/(x - y)**2, (x, y - 1, y + 1)) is zoo
@XFAIL
@slow
def test_W3():
# integral is not calculated
# https://github.com/sympy/sympy/issues/7161
assert integrate(sqrt(x + 1/x - 2), (x, 0, 1)) == R(4, 3)
@XFAIL
@slow
def test_W4():
# integral is not calculated
assert integrate(sqrt(x + 1/x - 2), (x, 1, 2)) == -2*sqrt(2)/3 + R(4, 3)
@XFAIL
@slow
def test_W5():
# integral is not calculated
assert integrate(sqrt(x + 1/x - 2), (x, 0, 2)) == -2*sqrt(2)/3 + R(8, 3)
@XFAIL
@slow
def test_W6():
# integral is not calculated
assert integrate(sqrt(2 - 2*cos(2*x))/2, (x, pi*R(-3, 4), -pi/4)) == sqrt(2)
def test_W7():
a = symbols('a', real=True, positive=True)
r1 = integrate(cos(x)/(x**2 + a**2), (x, -oo, oo))
assert r1.simplify() == pi*exp(-a)/a
@XFAIL
def test_W8():
# Test case in Mathematica:
# In[19]:= Integrate[t^(a - 1)/(1 + t), {t, 0, Infinity},
# Assumptions -> 0 < a < 1]
# Out[19]= Pi Csc[a Pi]
raise NotImplementedError(
"Integrate with assumption 0 < a < 1 not supported")
@XFAIL
def test_W9():
# Integrand with a residue at infinity => -2 pi [sin(pi/5) + sin(2pi/5)]
# (principal value) [Levinson and Redheffer, p. 234] *)
r1 = integrate(5*x**3/(1 + x + x**2 + x**3 + x**4), (x, -oo, oo))
r2 = r1.doit()
assert r2 == -2*pi*(sqrt(-sqrt(5)/8 + 5/8) + sqrt(sqrt(5)/8 + 5/8))
@XFAIL
def test_W10():
# integrate(1/[1 + x + x^2 + ... + x^(2 n)], x = -infinity..infinity) =
# 2 pi/(2 n + 1) [1 + cos(pi/[2 n + 1])] csc(2 pi/[2 n + 1])
# [Levinson and Redheffer, p. 255] => 2 pi/5 [1 + cos(pi/5)] csc(2 pi/5) */
r1 = integrate(x/(1 + x + x**2 + x**4), (x, -oo, oo))
r2 = r1.doit()
assert r2 == 2*pi*(sqrt(5)/4 + 5/4)*csc(pi*R(2, 5))/5
@XFAIL
def test_W11():
# integral not calculated
assert (integrate(sqrt(1 - x**2)/(1 + x**2), (x, -1, 1)) ==
pi*(-1 + sqrt(2)))
def test_W12():
p = symbols('p', real=True, positive=True)
q = symbols('q', real=True)
r1 = integrate(x*exp(-p*x**2 + 2*q*x), (x, -oo, oo))
assert r1.simplify() == sqrt(pi)*q*exp(q**2/p)/p**R(3, 2)
@XFAIL
def test_W13():
# Integral not calculated. Expected result is 2*(Euler_mascheroni_constant)
r1 = integrate(1/log(x) + 1/(1 - x) - log(log(1/x)), (x, 0, 1))
assert r1 == 2*EulerGamma
def test_W14():
assert integrate(sin(x)/x*exp(2*I*x), (x, -oo, oo)) == 0
@XFAIL
def test_W15():
# integral not calculated
assert integrate(log(gamma(x))*cos(6*pi*x), (x, 0, 1)) == R(1, 12)
def test_W16():
assert integrate((1 + x)**3*legendre_poly(1, x)*legendre_poly(2, x),
(x, -1, 1)) == R(36, 35)
def test_W17():
a, b = symbols('a b', real=True, positive=True)
assert integrate(exp(-a*x)*besselj(0, b*x),
(x, 0, oo)) == 1/(b*sqrt(a**2/b**2 + 1))
def test_W18():
assert integrate((besselj(1, x)/x)**2, (x, 0, oo)) == 4/(3*pi)
@XFAIL
def test_W19():
# Integral not calculated
# Expected result is (cos 7 - 1)/7 [Gradshteyn and Ryzhik 6.782(3)]
assert integrate(Ci(x)*besselj(0, 2*sqrt(7*x)), (x, 0, oo)) == (cos(7) - 1)/7
@XFAIL
def test_W20():
# integral not calculated
assert (integrate(x**2*polylog(3, 1/(x + 1)), (x, 0, 1)) ==
-pi**2/36 - R(17, 108) + zeta(3)/4 +
(-pi**2/2 - 4*log(2) + log(2)**2 + 35/3)*log(2)/9)
def test_W21():
assert abs(N(integrate(x**2*polylog(3, 1/(x + 1)), (x, 0, 1)))
- 0.210882859565594) < 1e-15
def test_W22():
t, u = symbols('t u', real=True)
s = Lambda(x, Piecewise((1, And(x >= 1, x <= 2)), (0, True)))
assert integrate(s(t)*cos(t), (t, 0, u)) == Piecewise(
(0, u < 0),
(-sin(Min(1, u)) + sin(Min(2, u)), True))
@slow
def test_W23():
a, b = symbols('a b', real=True, positive=True)
r1 = integrate(integrate(x/(x**2 + y**2), (x, a, b)), (y, -oo, oo))
assert r1.collect(pi) == pi*(-a + b)
def test_W23b():
# like W23 but limits are reversed
a, b = symbols('a b', real=True, positive=True)
r2 = integrate(integrate(x/(x**2 + y**2), (y, -oo, oo)), (x, a, b))
assert r2.collect(pi) == pi*(-a + b)
@XFAIL
@slow
def test_W24():
if ON_TRAVIS:
skip("Too slow for travis.")
# Not that slow, but does not fully evaluate so simplify is slow.
# Maybe also require doit()
x, y = symbols('x y', real=True)
r1 = integrate(integrate(sqrt(x**2 + y**2), (x, 0, 1)), (y, 0, 1))
assert (r1 - (sqrt(2) + asinh(1))/3).simplify() == 0
@XFAIL
@slow
def test_W25():
if ON_TRAVIS:
skip("Too slow for travis.")
a, x, y = symbols('a x y', real=True)
i1 = integrate(
sin(a)*sin(y)/sqrt(1 - sin(a)**2*sin(x)**2*sin(y)**2),
(x, 0, pi/2))
i2 = integrate(i1, (y, 0, pi/2))
assert (i2 - pi*a/2).simplify() == 0
def test_W26():
x, y = symbols('x y', real=True)
assert integrate(integrate(abs(y - x**2), (y, 0, 2)),
(x, -1, 1)) == R(46, 15)
def test_W27():
a, b, c = symbols('a b c')
assert integrate(integrate(integrate(1, (z, 0, c*(1 - x/a - y/b))),
(y, 0, b*(1 - x/a))),
(x, 0, a)) == a*b*c/6
def test_X1():
v, c = symbols('v c', real=True)
assert (series(1/sqrt(1 - (v/c)**2), v, x0=0, n=8) ==
5*v**6/(16*c**6) + 3*v**4/(8*c**4) + v**2/(2*c**2) + 1 + O(v**8))
def test_X2():
v, c = symbols('v c', real=True)
s1 = series(1/sqrt(1 - (v/c)**2), v, x0=0, n=8)
assert (1/s1**2).series(v, x0=0, n=8) == -v**2/c**2 + 1 + O(v**8)
def test_X3():
s1 = (sin(x).series()/cos(x).series()).series()
s2 = tan(x).series()
assert s2 == x + x**3/3 + 2*x**5/15 + O(x**6)
assert s1 == s2
def test_X4():
s1 = log(sin(x)/x).series()
assert s1 == -x**2/6 - x**4/180 + O(x**6)
assert log(series(sin(x)/x)).series() == s1
@XFAIL
def test_X5():
# test case in Mathematica syntax:
# In[21]:= (* => [a f'(a d) + g(b d) + integrate(h(c y), y = 0..d)]
# + [a^2 f''(a d) + b g'(b d) + h(c d)] (x - d) *)
# In[22]:= D[f[a*x], x] + g[b*x] + Integrate[h[c*y], {y, 0, x}]
# Out[22]= g[b x] + Integrate[h[c y], {y, 0, x}] + a f'[a x]
# In[23]:= Series[%, {x, d, 1}]
# Out[23]= (g[b d] + Integrate[h[c y], {y, 0, d}] + a f'[a d]) +
# 2 2
# (h[c d] + b g'[b d] + a f''[a d]) (-d + x) + O[-d + x]
h = Function('h')
a, b, c, d = symbols('a b c d', real=True)
# series() raises NotImplementedError:
# The _eval_nseries method should be added to <class
# 'sympy.core.function.Subs'> to give terms up to O(x**n) at x=0
series(diff(f(a*x), x) + g(b*x) + integrate(h(c*y), (y, 0, x)),
x, x0=d, n=2)
# assert missing, until exception is removed
def test_X6():
# Taylor series of nonscalar objects (noncommutative multiplication)
# expected result => (B A - A B) t^2/2 + O(t^3) [Stanly Steinberg]
a, b = symbols('a b', commutative=False, scalar=False)
assert (series(exp((a + b)*x) - exp(a*x) * exp(b*x), x, x0=0, n=3) ==
x**2*(-a*b/2 + b*a/2) + O(x**3))
def test_X7():
# => sum( Bernoulli[k]/k! x^(k - 2), k = 1..infinity )
# = 1/x^2 - 1/(2 x) + 1/12 - x^2/720 + x^4/30240 + O(x^6)
# [Levinson and Redheffer, p. 173]
assert (series(1/(x*(exp(x) - 1)), x, 0, 7) == x**(-2) - 1/(2*x) +
R(1, 12) - x**2/720 + x**4/30240 - x**6/1209600 + O(x**7))
def test_X8():
# Puiseux series (terms with fractional degree):
# => 1/sqrt(x - 3/2 pi) + (x - 3/2 pi)^(3/2) / 12 + O([x - 3/2 pi]^(7/2))
# see issue 7167:
x = symbols('x', real=True)
assert (series(sqrt(sec(x)), x, x0=pi*3/2, n=4) ==
1/sqrt(x - pi*R(3, 2)) + (x - pi*R(3, 2))**R(3, 2)/12 +
(x - pi*R(3, 2))**R(7, 2)/160 + O((x - pi*R(3, 2))**4, (x, pi*R(3, 2))))
def test_X9():
assert (series(x**x, x, x0=0, n=4) == 1 + x*log(x) + x**2*log(x)**2/2 +
x**3*log(x)**3/6 + O(x**4*log(x)**4))
def test_X10():
z, w = symbols('z w')
assert (series(log(sinh(z)) + log(cosh(z + w)), z, x0=0, n=2) ==
log(cosh(w)) + log(z) + z*sinh(w)/cosh(w) + O(z**2))
def test_X11():
z, w = symbols('z w')
assert (series(log(sinh(z) * cosh(z + w)), z, x0=0, n=2) ==
log(cosh(w)) + log(z) + z*sinh(w)/cosh(w) + O(z**2))
@XFAIL
def test_X12():
# Look at the generalized Taylor series around x = 1
# Result => (x - 1)^a/e^b [1 - (a + 2 b) (x - 1) / 2 + O((x - 1)^2)]
a, b, x = symbols('a b x', real=True)
# series returns O(log(x-1)**2)
# https://github.com/sympy/sympy/issues/7168
assert (series(log(x)**a*exp(-b*x), x, x0=1, n=2) ==
(x - 1)**a/exp(b)*(1 - (a + 2*b)*(x - 1)/2 + O((x - 1)**2)))
def test_X13():
assert series(sqrt(2*x**2 + 1), x, x0=oo, n=1) == sqrt(2)*x + O(1/x, (x, oo))
@XFAIL
def test_X14():
# Wallis' product => 1/sqrt(pi n) + ... [Knopp, p. 385]
assert series(1/2**(2*n)*binomial(2*n, n),
n, x==oo, n=1) == 1/(sqrt(pi)*sqrt(n)) + O(1/x, (x, oo))
@SKIP("https://github.com/sympy/sympy/issues/7164")
def test_X15():
# => 0!/x - 1!/x^2 + 2!/x^3 - 3!/x^4 + O(1/x^5) [Knopp, p. 544]
x, t = symbols('x t', real=True)
# raises RuntimeError: maximum recursion depth exceeded
# https://github.com/sympy/sympy/issues/7164
# 2019-02-17: Raises
# PoleError:
# Asymptotic expansion of Ei around [-oo] is not implemented.
e1 = integrate(exp(-t)/t, (t, x, oo))
assert (series(e1, x, x0=oo, n=5) ==
6/x**4 + 2/x**3 - 1/x**2 + 1/x + O(x**(-5), (x, oo)))
def test_X16():
# Multivariate Taylor series expansion => 1 - (x^2 + 2 x y + y^2)/2 + O(x^4)
assert (series(cos(x + y), x + y, x0=0, n=4) == 1 - (x + y)**2/2 +
O(x**4 + x**3*y + x**2*y**2 + x*y**3 + y**4, x, y))
@XFAIL
def test_X17():
# Power series (compute the general formula)
# (c41) powerseries(log(sin(x)/x), x, 0);
# /aquarius/data2/opt/local/macsyma_422/library1/trgred.so being loaded.
# inf
# ==== i1 2 i1 2 i1
# \ (- 1) 2 bern(2 i1) x
# (d41) > ------------------------------
# / 2 i1 (2 i1)!
# ====
# i1 = 1
# fps does not calculate
assert fps(log(sin(x)/x)) == \
Sum((-1)**k*2**(2*k - 1)*bernoulli(2*k)*x**(2*k)/(k*factorial(2*k)), (k, 1, oo))
@XFAIL
def test_X18():
# Power series (compute the general formula). Maple FPS:
# > FormalPowerSeries(exp(-x)*sin(x), x = 0);
# infinity
# ----- (1/2 k) k
# \ 2 sin(3/4 k Pi) x
# ) -------------------------
# / k!
# -----
#
# Now, sympy returns
# oo
# _____
# \ `
# \ / k k\
# \ k |I*(-1 - I) I*(-1 + I) |
# \ x *|----------- - -----------|
# / \ 2 2 /
# / ------------------------------
# / k!
# /____,
# k = 0
k = Dummy('k')
assert fps(exp(-x)*sin(x)) == \
Sum(2**(S.Half*k)*sin(R(3, 4)*k*pi)*x**k/factorial(k), (k, 0, oo))
@XFAIL
def test_X19():
# (c45) /* Derive an explicit Taylor series solution of y as a function of
# x from the following implicit relation:
# y = x - 1 + (x - 1)^2/2 + 2/3 (x - 1)^3 + (x - 1)^4 +
# 17/10 (x - 1)^5 + ...
# */
# x = sin(y) + cos(y);
# Time= 0 msecs
# (d45) x = sin(y) + cos(y)
#
# (c46) taylor_revert(%, y, 7);
raise NotImplementedError("Solve using series not supported. \
Inverse Taylor series expansion also not supported")
@XFAIL
def test_X20():
# Pade (rational function) approximation => (2 - x)/(2 + x)
# > numapprox[pade](exp(-x), x = 0, [1, 1]);
# bytes used=9019816, alloc=3669344, time=13.12
# 1 - 1/2 x
# ---------
# 1 + 1/2 x
# mpmath support numeric Pade approximant but there is
# no symbolic implementation in SymPy
# https://en.wikipedia.org/wiki/Pad%C3%A9_approximant
raise NotImplementedError("Symbolic Pade approximant not supported")
def test_X21():
"""
Test whether `fourier_series` of x periodical on the [-p, p] interval equals
`- (2 p / pi) sum( (-1)^n / n sin(n pi x / p), n = 1..infinity )`.
"""
p = symbols('p', positive=True)
n = symbols('n', positive=True, integer=True)
s = fourier_series(x, (x, -p, p))
# All cosine coefficients are equal to 0
assert s.an.formula == 0
# Check for sine coefficients
assert s.bn.formula.subs(s.bn.variables[0], 0) == 0
assert s.bn.formula.subs(s.bn.variables[0], n) == \
-2*p/pi * (-1)**n / n * sin(n*pi*x/p)
@XFAIL
def test_X22():
# (c52) /* => p / 2
# - (2 p / pi^2) sum( [1 - (-1)^n] cos(n pi x / p) / n^2,
# n = 1..infinity ) */
# fourier_series(abs(x), x, p);
# p
# (e52) a = -
# 0 2
#
# %nn
# (2 (- 1) - 2) p
# (e53) a = ------------------
# %nn 2 2
# %pi %nn
#
# (e54) b = 0
# %nn
#
# Time= 5290 msecs
# inf %nn %pi %nn x
# ==== (2 (- 1) - 2) cos(---------)
# \ p
# p > -------------------------------
# / 2
# ==== %nn
# %nn = 1 p
# (d54) ----------------------------------------- + -
# 2 2
# %pi
raise NotImplementedError("Fourier series not supported")
def test_Y1():
t = symbols('t', real=True, positive=True)
w = symbols('w', real=True)
s = symbols('s')
F, _, _ = laplace_transform(cos((w - 1)*t), t, s)
assert F == s/(s**2 + (w - 1)**2)
def test_Y2():
t = symbols('t', real=True, positive=True)
w = symbols('w', real=True)
s = symbols('s')
f = inverse_laplace_transform(s/(s**2 + (w - 1)**2), s, t)
assert f == cos(t*w - t)
def test_Y3():
t = symbols('t', real=True, positive=True)
w = symbols('w', real=True)
s = symbols('s')
F, _, _ = laplace_transform(sinh(w*t)*cosh(w*t), t, s)
assert F == w/(s**2 - 4*w**2)
def test_Y4():
t = symbols('t', real=True, positive=True)
s = symbols('s')
F, _, _ = laplace_transform(erf(3/sqrt(t)), t, s)
assert F == (1 - exp(-6*sqrt(s)))/s
@XFAIL
def test_Y5_Y6():
# Solve y'' + y = 4 [H(t - 1) - H(t - 2)], y(0) = 1, y'(0) = 0 where H is the
# Heaviside (unit step) function (the RHS describes a pulse of magnitude 4 and
# duration 1). See David A. Sanchez, Richard C. Allen, Jr. and Walter T.
# Kyner, _Differential Equations: An Introduction_, Addison-Wesley Publishing
# Company, 1983, p. 211. First, take the Laplace transform of the ODE
# => s^2 Y(s) - s + Y(s) = 4/s [e^(-s) - e^(-2 s)]
# where Y(s) is the Laplace transform of y(t)
t = symbols('t', real=True, positive=True)
s = symbols('s')
y = Function('y')
F, _, _ = laplace_transform(diff(y(t), t, 2)
+ y(t)
- 4*(Heaviside(t - 1)
- Heaviside(t - 2)), t, s)
# Laplace transform for diff() not calculated
# https://github.com/sympy/sympy/issues/7176
assert (F == s**2*LaplaceTransform(y(t), t, s) - s
+ LaplaceTransform(y(t), t, s) - 4*exp(-s)/s + 4*exp(-2*s)/s)
# TODO implement second part of test case
# Now, solve for Y(s) and then take the inverse Laplace transform
# => Y(s) = s/(s^2 + 1) + 4 [1/s - s/(s^2 + 1)] [e^(-s) - e^(-2 s)]
# => y(t) = cos t + 4 {[1 - cos(t - 1)] H(t - 1) - [1 - cos(t - 2)] H(t - 2)}
@XFAIL
def test_Y7():
# What is the Laplace transform of an infinite square wave?
# => 1/s + 2 sum( (-1)^n e^(- s n a)/s, n = 1..infinity )
# [Sanchez, Allen and Kyner, p. 213]
t = symbols('t', real=True, positive=True)
a = symbols('a', real=True)
s = symbols('s')
F, _, _ = laplace_transform(1 + 2*Sum((-1)**n*Heaviside(t - n*a),
(n, 1, oo)), t, s)
# returns 2*LaplaceTransform(Sum((-1)**n*Heaviside(-a*n + t),
# (n, 1, oo)), t, s) + 1/s
# https://github.com/sympy/sympy/issues/7177
assert F == 2*Sum((-1)**n*exp(-a*n*s)/s, (n, 1, oo)) + 1/s
@XFAIL
def test_Y8():
assert fourier_transform(1, x, z) == DiracDelta(z)
def test_Y9():
assert (fourier_transform(exp(-9*x**2), x, z) ==
sqrt(pi)*exp(-pi**2*z**2/9)/3)
def test_Y10():
assert (fourier_transform(abs(x)*exp(-3*abs(x)), x, z) ==
(-8*pi**2*z**2 + 18)/(16*pi**4*z**4 + 72*pi**2*z**2 + 81))
@SKIP("https://github.com/sympy/sympy/issues/7181")
@slow
def test_Y11():
# => pi cot(pi s) (0 < Re s < 1) [Gradshteyn and Ryzhik 17.43(5)]
x, s = symbols('x s')
# raises RuntimeError: maximum recursion depth exceeded
# https://github.com/sympy/sympy/issues/7181
# Update 2019-02-17 raises:
# TypeError: cannot unpack non-iterable MellinTransform object
F, _, _ = mellin_transform(1/(1 - x), x, s)
assert F == pi*cot(pi*s)
@XFAIL
def test_Y12():
# => 2^(s - 4) gamma(s/2)/gamma(4 - s/2) (0 < Re s < 1)
# [Gradshteyn and Ryzhik 17.43(16)]
x, s = symbols('x s')
# returns Wrong value -2**(s - 4)*gamma(s/2 - 3)/gamma(-s/2 + 1)
# https://github.com/sympy/sympy/issues/7182
F, _, _ = mellin_transform(besselj(3, x)/x**3, x, s)
assert F == -2**(s - 4)*gamma(s/2)/gamma(-s/2 + 4)
@XFAIL
def test_Y13():
# Z[H(t - m T)] => z/[z^m (z - 1)] (H is the Heaviside (unit step) function) z
raise NotImplementedError("z-transform not supported")
@XFAIL
def test_Y14():
# Z[H(t - m T)] => z/[z^m (z - 1)] (H is the Heaviside (unit step) function)
raise NotImplementedError("z-transform not supported")
def test_Z1():
r = Function('r')
assert (rsolve(r(n + 2) - 2*r(n + 1) + r(n) - 2, r(n),
{r(0): 1, r(1): m}).simplify() == n**2 + n*(m - 2) + 1)
def test_Z2():
r = Function('r')
assert (rsolve(r(n) - (5*r(n - 1) - 6*r(n - 2)), r(n), {r(0): 0, r(1): 1})
== -2**n + 3**n)
def test_Z3():
# => r(n) = Fibonacci[n + 1] [Cohen, p. 83]
r = Function('r')
# recurrence solution is correct, Wester expects it to be simplified to
# fibonacci(n+1), but that is quite hard
assert (rsolve(r(n) - (r(n - 1) + r(n - 2)), r(n),
{r(1): 1, r(2): 2}).simplify()
== 2**(-n)*((1 + sqrt(5))**n*(sqrt(5) + 5) +
(-sqrt(5) + 1)**n*(-sqrt(5) + 5))/10)
@XFAIL
def test_Z4():
# => [c^(n+1) [c^(n+1) - 2 c - 2] + (n+1) c^2 + 2 c - n] / [(c-1)^3 (c+1)]
# [Joan Z. Yu and Robert Israel in sci.math.symbolic]
r = Function('r')
c = symbols('c')
# raises ValueError: Polynomial or rational function expected,
# got '(c**2 - c**n)/(c - c**n)
s = rsolve(r(n) - ((1 + c - c**(n-1) - c**(n+1))/(1 - c**n)*r(n - 1)
- c*(1 - c**(n-2))/(1 - c**(n-1))*r(n - 2) + 1),
r(n), {r(1): 1, r(2): (2 + 2*c + c**2)/(1 + c)})
assert (s - (c*(n + 1)*(c*(n + 1) - 2*c - 2) +
(n + 1)*c**2 + 2*c - n)/((c-1)**3*(c+1)) == 0)
@XFAIL
def test_Z5():
# Second order ODE with initial conditions---solve directly
# transform: f(t) = sin(2 t)/8 - t cos(2 t)/4
C1, C2 = symbols('C1 C2')
# initial conditions not supported, this is a manual workaround
# https://github.com/sympy/sympy/issues/4720
eq = Derivative(f(x), x, 2) + 4*f(x) - sin(2*x)
sol = dsolve(eq, f(x))
f0 = Lambda(x, sol.rhs)
assert f0(x) == C2*sin(2*x) + (C1 - x/4)*cos(2*x)
f1 = Lambda(x, diff(f0(x), x))
# TODO: Replace solve with solveset, when it works for solveset
const_dict = solve((f0(0), f1(0)))
result = f0(x).subs(C1, const_dict[C1]).subs(C2, const_dict[C2])
assert result == -x*cos(2*x)/4 + sin(2*x)/8
# Result is OK, but ODE solving with initial conditions should be
# supported without all this manual work
raise NotImplementedError('ODE solving with initial conditions \
not supported')
@XFAIL
def test_Z6():
# Second order ODE with initial conditions---solve using Laplace
# transform: f(t) = sin(2 t)/8 - t cos(2 t)/4
t = symbols('t', real=True, positive=True)
s = symbols('s')
eq = Derivative(f(t), t, 2) + 4*f(t) - sin(2*t)
F, _, _ = laplace_transform(eq, t, s)
# Laplace transform for diff() not calculated
# https://github.com/sympy/sympy/issues/7176
assert (F == s**2*LaplaceTransform(f(t), t, s) +
4*LaplaceTransform(f(t), t, s) - 2/(s**2 + 4))
# rest of test case not implemented
| 30.590089
| 297
| 0.489818
|
4a017fa04bcc5c9abad513245f006f42b088ded0
| 8,560
|
py
|
Python
|
route/recent_change.py
|
LinuxSnapshot/openNAMU
|
ea7d97410da432ae65e7139fdffa6c36bfdfb3d0
|
[
"BSD-3-Clause"
] | 2
|
2021-12-16T13:24:53.000Z
|
2021-12-19T10:18:18.000Z
|
route/recent_change.py
|
LinuxSnapshot/openNAMU
|
ea7d97410da432ae65e7139fdffa6c36bfdfb3d0
|
[
"BSD-3-Clause"
] | null | null | null |
route/recent_change.py
|
LinuxSnapshot/openNAMU
|
ea7d97410da432ae65e7139fdffa6c36bfdfb3d0
|
[
"BSD-3-Clause"
] | 1
|
2021-12-16T13:27:02.000Z
|
2021-12-16T13:27:02.000Z
|
from .tool.func import *
def recent_change_2(conn, name, tool):
curs = conn.cursor()
if flask.request.method == 'POST':
return redirect(
'/diff' +
'/' + flask.request.form.get('b', '1') +
'/' + flask.request.form.get('a', '1') +
'/' + url_pas(name)
)
else:
ban = ''
select = ''
sub = ''
admin_6 = admin_check(6)
admin = admin_check()
div = '''
<table id="main_table_set">
<tbody>
<tr id="main_table_top_tr">
'''
num = int(number_check(flask.request.args.get('num', '1')))
sql_num = (num * 50 - 50) if num * 50 > 0 else 0
if name:
if tool == 'history':
sub += ' (' + load_lang('history') + ')'
div += '''
<td id="main_table_width">''' + load_lang('version') + '''</td>
<td id="main_table_width">''' + load_lang('editor') + '''</td>
<td id="main_table_width">''' + load_lang('time') + '''</td>
'''
set_type = flask.request.args.get('set', 'normal')
set_type = '' if set_type == 'edit' else set_type
if set_type != 'normal':
curs.execute(db_change('' + \
'select id, title, date, ip, send, leng, hide from history ' + \
'where title = ? and type = ? ' + \
'order by id + 0 desc ' + \
"limit ?, 50" + \
''), [name, set_type, sql_num])
else:
curs.execute(db_change('' + \
'select id, title, date, ip, send, leng, hide from history ' + \
'where title = ? ' + \
'order by id + 0 desc ' + \
"limit ?, 50" + \
''), [name, sql_num])
data_list = curs.fetchall()
else:
div += '''
<td id="main_table_width">''' + load_lang('document_name') + '''</td>
<td id="main_table_width">''' + load_lang('editor') + '''</td>
<td id="main_table_width">''' + load_lang('time') + '''</td>
'''
curs.execute(db_change('' + \
'select id, title, date, ip, send, leng, hide from history ' + \
"where ip = ? order by date desc limit ?, 50" + \
''), [name, sql_num])
data_list = curs.fetchall()
else:
div += '''
<td id="main_table_width">''' + load_lang('document_name') + '''</td>
<td id="main_table_width">''' + load_lang('editor') + '''</td>
<td id="main_table_width">''' + load_lang('time') + '''</td>
'''
sub = ''
set_type = flask.request.args.get('set', 'normal')
set_type = '' if set_type == 'edit' else set_type
data_list = []
curs.execute(db_change('select id, title from rc where type = ? order by date desc'), [set_type])
for i in curs.fetchall():
curs.execute(db_change('select id, title, date, ip, send, leng, hide from history where id = ? and title = ?'), i)
data_list += curs.fetchall()
div += '</tr>'
all_ip = ip_pas([i[3] for i in data_list])
for data in data_list:
select += '<option value="' + data[0] + '">' + data[0] + '</option>'
send = '<br>'
if data[4]:
if not re.search(r"^(?: +)$", data[4]):
send = data[4]
if re.search(r"\+", data[5]):
leng = '<span style="color:green;">(' + data[5] + ')</span>'
elif re.search(r"\-", data[5]):
leng = '<span style="color:red;">(' + data[5] + ')</span>'
else:
leng = '<span style="color:gray;">(' + data[5] + ')</span>'
ip = all_ip[data[3]]
m_tool = '<a href="/history/tool/' + data[0] + '/' + url_pas(data[1]) + '">(' + load_lang('tool') + ')</a>'
style = ['', '']
date = data[2]
if data[6] == 'O':
if admin == 1:
style[0] = 'id="toron_color_grey"'
style[1] = 'id="toron_color_grey"'
else:
ip = ''
ban = ''
date = ''
style[0] = 'style="display: none;"'
style[1] = 'id="toron_color_grey"'
if tool == 'history':
title = '<a href="/w/' + url_pas(name) + '/doc_rev/' + data[0] + '">r' + data[0] + '</a> '
else:
title = '<a href="/w/' + url_pas(data[1]) + '">' + html.escape(data[1]) + '</a> '
if int(data[0]) < 2:
title += '<a href="/history/' + url_pas(data[1]) + '">(r' + data[0] + ')</a> '
else:
title += '<a href="/diff/' + str(int(data[0]) - 1) + '/' + data[0] + '/' + url_pas(data[1]) + '">(r' + data[0] + ')</a> '
div += '''
<tr ''' + style[0] + '''>
<td>''' + title + m_tool + ' ' + leng + '''</td>
<td>''' + ip + ban + '''</td>
<td>''' + date + '''</td>
</tr>
<tr ''' + style[1] + '''>
<td class="send_content" colspan="3">''' + html.escape(send) + '''</td>
</tr>
'''
div += '''
</tbody>
</table>
<script>send_render();</script>
'''
if name:
if tool == 'history':
div = '' + \
'<a href="?set=normal">(' + load_lang('normal') + ')</a> ' + \
'<a href="?set=edit">(' + load_lang('edit') + ')</a> ' + \
'<a href="?set=move">(' + load_lang('move') + ')</a> ' + \
'<a href="?set=delete">(' + load_lang('delete') + ')</a> ' + \
'<a href="?set=revert">(' + load_lang('revert') + ')</a>' + \
'<hr class="main_hr">' + div + \
''
menu = [['w/' + url_pas(name), load_lang('return')]]
if set_type == 'normal':
div = '''
<form method="post">
<select name="a">''' + select + '''</select> <select name="b">''' + select + '''</select>
<button type="submit">''' + load_lang('compare') + '''</button>
</form>
<hr class=\"main_hr\">
''' + div
if admin == 1:
menu += [
['history/add/' + url_pas(name), load_lang('history_add')],
['history/reset/' + url_pas(name), load_lang('history_reset')]
]
title = name
div += next_fix('/history/' + url_pas(name) + '?tool=' + set_type + '&num=', num, data_list)
else:
title = load_lang('edit_record')
menu = [
['other', load_lang('other')],
['user', load_lang('user')],
['record/reset/' + url_pas(name), load_lang('record_reset')]
]
div += next_fix('/record/' + url_pas(name) + '?num=', num, data_list)
else:
div = '' + \
'<a href="?set=normal">(' + load_lang('normal') + ')</a> ' + \
'<a href="?set=edit">(' + load_lang('edit') + ')</a> ' + \
'<a href="?set=user">(' + load_lang('user_document') + ')</a> ' + \
'<a href="?set=move">(' + load_lang('move') + ')</a> ' + \
'<a href="?set=delete">(' + load_lang('delete') + ')</a> ' + \
'<a href="?set=revert">(' + load_lang('revert') + ')</a>' + \
'<hr class="main_hr">' + div + \
''
menu = 0
title = load_lang('recent_change')
if sub == '':
sub = 0
return easy_minify(flask.render_template(skin_check(),
imp = [title, wiki_set(), wiki_custom(), wiki_css([sub, 0])],
data = div,
menu = menu
))
| 41.756098
| 141
| 0.380491
|
4a018162b0818427c3fdfc97f2315f1d4066e69f
| 4,007
|
py
|
Python
|
thirdparty/gd2c/gd2c/variant.py
|
ppiecuch/godot
|
ff2098b324b814a0d1bd9d5722aa871fc5214fab
|
[
"MIT",
"Apache-2.0",
"CC-BY-4.0",
"Unlicense"
] | null | null | null |
thirdparty/gd2c/gd2c/variant.py
|
ppiecuch/godot
|
ff2098b324b814a0d1bd9d5722aa871fc5214fab
|
[
"MIT",
"Apache-2.0",
"CC-BY-4.0",
"Unlicense"
] | null | null | null |
thirdparty/gd2c/gd2c/variant.py
|
ppiecuch/godot
|
ff2098b324b814a0d1bd9d5722aa871fc5214fab
|
[
"MIT",
"Apache-2.0",
"CC-BY-4.0",
"Unlicense"
] | null | null | null |
from __future__ import annotations
from typing import Union, Dict, cast
class VariantType:
NIL: 'VariantType' = cast('VariantType', None)
BOOL: 'VariantType' = cast('VariantType', None)
INT: 'VariantType' = cast('VariantType', None)
REAL: 'VariantType' = cast('VariantType', None)
STRING: 'VariantType' = cast('VariantType', None)
VECTOR2: 'VariantType' = cast('VariantType', None)
RECT2: 'VariantType' = cast('VariantType', None)
VECTOR3: 'VariantType' = cast('VariantType', None)
TRANSFORM2D: 'VariantType' = cast('VariantType', None)
PLANE: 'VariantType' = cast('VariantType', None)
QUAT: 'VariantType' = cast('VariantType', None)
AABB: 'VariantType' = cast('VariantType', None)
BASIS: 'VariantType' = cast('VariantType', None)
TRANSFORM: 'VariantType' = cast('VariantType', None)
COLOR: 'VariantType' = cast('VariantType', None)
NODE_PATH: 'VariantType' = cast('VariantType', None)
RID: 'VariantType' = cast('VariantType', None)
OBJECT: 'VariantType' = cast('VariantType', None)
DICTIONARY: 'VariantType' = cast('VariantType', None)
ARRAY: 'VariantType' = cast('VariantType', None)
POOL_BYTE_ARRAY: 'VariantType' = cast('VariantType', None)
POOL_INT_ARRAY: 'VariantType' = cast('VariantType', None)
POOL_REAL_ARRAY: 'VariantType' = cast('VariantType', None)
POOL_STRING_ARRAY: 'VariantType' = cast('VariantType', None)
POOL_VECTOR2_ARRAY: 'VariantType' = cast('VariantType', None)
POOL_VECTOR3_ARRAY: 'VariantType' = cast('VariantType', None)
POOL_COLOR_ARRAY: 'VariantType' = cast('VariantType', None)
VARIANT_MAX = 27
@staticmethod
def get(value: Union[VariantType, str, int]) -> VariantType:
if isinstance(value, VariantType):
return value
elif isinstance(value, int):
return _vtypes[value]
elif isinstance(value, str):
return _vtypes[int(value)]
elif value is None:
return VariantType.NIL
raise "Value must be int, str, or VariantType"
def __init__(self, value: int, name: str):
self._value = value
self._name = name
_vtypes[self.value] = self
@property
def value(self):
return self._value
@property
def name(self):
return self._name
def __str__(self):
return self._name
_vtypes: Dict[int, VariantType] = {}
class Variant:
def __init__(self, vtype: Union[VariantType, int]):
if isinstance(vtype, VariantType):
self.vtype = vtype
else:
self.vtype = VariantType.get(vtype)
VariantType.NIL = VariantType(0, "NIL")
VariantType.BOOL = VariantType(1, "BOOL")
VariantType.INT = VariantType(2, "INT")
VariantType.REAL = VariantType(3, "REAL")
VariantType.STRING = VariantType(4, "STRING")
VariantType.VECTOR2 = VariantType(5, "VECTOR2")
VariantType.RECT2 = VariantType(6, "RECT2")
VariantType.VECTOR3 = VariantType(7, "VECTOR3")
VariantType.TRANSFORM2D = VariantType(8, "TRANSFORM2D")
VariantType.PLANE = VariantType(9, "PLANE")
VariantType.QUAT = VariantType(10, "QUAT")
VariantType.AABB = VariantType(11, "AABB")
VariantType.BASIS = VariantType(12, "BASIS")
VariantType.TRANSFORM = VariantType(13, "TRANSFORM")
VariantType.COLOR = VariantType(14, "COLOR")
VariantType.NODE_PATH = VariantType(15, "NODE_PATH")
VariantType.RID = VariantType(16, "_RID")
VariantType.OBJECT = VariantType(17, "OBJECT")
VariantType.DICTIONARY = VariantType(18, "DICTIONARY")
VariantType.ARRAY = VariantType(19, "ARRAY")
VariantType.POOL_BYTE_ARRAY = VariantType(20, "POOL_BYTE_ARRAY")
VariantType.POOL_INT_ARRAY = VariantType(21, "POOL_INT_ARRAY")
VariantType.POOL_REAL_ARRAY = VariantType(22, "POOL_REAL_ARRAY")
VariantType.POOL_STRING_ARRAY = VariantType(23, "POOL_STRING_ARRAY")
VariantType.POOL_VECTOR2_ARRAY = VariantType(24, "POOL_VECTOR2_ARRAY")
VariantType.POOL_VECTOR3_ARRAY = VariantType(25, "POOL_VECTOR3_ARRAY")
VariantType.POOL_COLOR_ARRAY = VariantType(26, "POOL_COLOR_ARRAY")
| 39.673267
| 70
| 0.698028
|
4a0181f30afe6cc52285e61fa98514ec102bb4ef
| 39,942
|
py
|
Python
|
mystery/game.py
|
ZDDM/MysteryBot
|
20fe9ffe5c6942cc7eb64f684f55790d440404b5
|
[
"MIT"
] | null | null | null |
mystery/game.py
|
ZDDM/MysteryBot
|
20fe9ffe5c6942cc7eb64f684f55790d440404b5
|
[
"MIT"
] | null | null | null |
mystery/game.py
|
ZDDM/MysteryBot
|
20fe9ffe5c6942cc7eb64f684f55790d440404b5
|
[
"MIT"
] | 1
|
2021-02-28T23:40:55.000Z
|
2021-02-28T23:40:55.000Z
|
import discord
import asyncio
import random
from copy import deepcopy as copy
class Game():
# Game states.
STATE_PREPARE = -1
STATE_LOBBY = 0
STATE_GAME = 1
STATE_END = 2
def __init__(self, bot, server, cleanup_function=None):
self.bot = bot
self.server = server
self.cleanup_function = cleanup_function
self.game_state = self.STATE_PREPARE
self.players = []
self.observers = []
self.murderers = []
self.item_database = {"band aid" : HealItem(name="band aid", description="Used for non-serious injuries", heal=5),
"bandage" : HealItem(name="bandage", description="A bandage made out of cotton", heal=20),
"first aid kit" : HealItem(name="first aid kit", description="Collection of supplies and equipment that is used to give medical treatment", heal=40),
"paper" : Paper()}
self.weapon_database = {"book" : Weapon(name="book", description="Bust someone's head with it! Still better than your fists", robustness=7),
"branch" : Weapon(name="branch", description="A branch from a tree. Better than using your fists!", robustness=8),
"knife" : Weapon(name="knife", description="A kitchen knife", robustness=10),
"hatchet" : Weapon(name="Hatchet", description="A small axe.", robustness=13),
"baseball bat" : Weapon(name="baseball bat", description="SMAAAAASH! Homerun!", robustness=15),
"billhook" : Weapon(name="billhook", description="Traditional cutting tool", robustness=20),
"toolbox" : Weapon(name="toolbox", description="Originally used for storing tools, now used for busting heads!", robustness=25),
"katana" : Weapon(name="katana", description="Traditional japanese sword", robustness=30),
"sword" : Weapon(name="sword", description="A beautiful long sword", robustness=30),
"winchester" : Weapon(name="winchester 1894", description="A ranged rifle for \"self-defense\"...", robustness=50)}
self.channel_prefix = "mystery_"
self.player_role = None
self.observer_role = None
self.dead_role = None
self.loop_task = None
self.debug = False
self.appear_location = False
self.locations = self.map_rokkenjima()
self.channel = None
def map_devtest(self):
self.channel_prefix = "dev_"
chest = Furniture(name="chest", description="A chest forged from zeros and ones.", \
contents=[Item(name="Useless junk", description="Totally useless..."), \
Weapon(name="Toolbox", description="ROBUST!", robustness=30)], \
random_content=[(Weapon(name="Legendary bike horn", description="Used by a clown living inside a space station... Cool, eh?"),1/3)])
devroom1 = Location(self, name="devroom1", topic="OH NO, A DEV ROOM", items=[Item(name="Tears", description="Solidified tears from a coder.", is_bloody=True), Paper()], furniture=[chest])
devroom2 = Location(self, name="devroom2", topic="Oh hey, it's a dev room.", items=[Weapon(name="Billhook", description="Popularized by teenage girls.", robustness=20), Paper(), Paper(name="golden letter", description="a golden letter", text="There are murderers and such", signature="the Golden Witch")])
devroom1.add_adjacent_location(devroom2)
locations = [devroom1, devroom2]
return locations
def map_rokkenjima(self):
self.channel_prefix = "rokkenjima_"
closet = Furniture(name="closet", description="Full of clothes", random_content=[(copy(self.weapon_database["toolbox"]), 1/4), (copy(self.item_database["paper"]), 3/4), (copy(self.item_database["paper"]), 3/4)])
locker = Furniture(name="locker", description="A person could fit in, maybe...", random_content=[(copy(self.weapon_database["baseball bat"]), 1/3)])
shelf = Furniture(name="shelf", description="Just a shelf", random_content=[(copy(self.weapon_database["book"]),1/3), (copy(self.item_database["paper"]), 3/4), (copy(self.item_database["paper"]), 3/4)])
crate = Furniture(name="crate", description="It's a crate! Oh no!", random_content=[(copy(self.weapon_database["katana"]), 1/6)])
medical_closet = Furniture(name="closet", description="A closet with a green cross on it", random_content=[(copy(self.item_database["first aid kit"]), 1/4), (copy(self.item_database["first aid kit"]), 1/4), (copy(self.item_database["bandage"]), 1/3), (copy(self.item_database["bandage"]), 1/3), (copy(self.item_database["band aid"]), 1/2), (copy(self.item_database["band aid"]), 1/2)])
letter = Paper(name="golden letter", description="a beautiful envelope with the Ushiromiya family crest imprinted on it", signature="Beatrice the Golden", can_rename=False, \
text="Welcome to Rokkenjima, everyone. I am serving Kinzo-sama as the alchemist-adviser of this house and my name is Beatrice.\n\
Today, we are going to play a fun little game... There are murderers amongst you... They're driven by greed, and won't hesitate to kill.\n\
Now, everyone! Have fun...")
pier = Location(self, name="pier", topic="A small pier where boats come by", furniture=[copy(crate)])
rose_garden = Location(self, name="rose_garden", topic="A beautiful rose garden")
tool_shed = Location(self, name="tool_shed", topic="A shed for storing various gardening tools", furniture=[copy(locker), copy(shelf)], items=[copy(self.weapon_database["hatchet"])], random_items=[(copy(self.weapon_database["billhook"]), 1/2)])
forest1 = Location(self, name="forest1", topic="Full of trees...", random_items=[(copy(self.weapon_database["branch"]),1/2), (copy(self.weapon_database["branch"]),1/2)])
forest2 = Location(self, name="forest2", topic="Like, REALLY full of trees...", random_items=[(copy(self.weapon_database["branch"]),1/2), (copy(self.weapon_database["branch"]),1/2)])
kuwadorian = Location(self, name="kuwadorian", topic="A beautiful mansion inside the forest", furniture=[copy(closet), copy(crate)], random_items=[(copy(self.weapon_database["katana"]), 1/4), (copy(self.weapon_database["sword"]), 1/4)])
guest_house_1f = Location(self, name="guest_house_1f", topic="First floor of the guest house")
guest_house_parlor = Location(self, name="parlor", topic="A rustical chamber with a bar for the guests", furniture=[copy(closet)])
guest_house_archive = Location(self, name="archive", topic="Holds a small but a wide collection of books", furniture=[copy(closet)], items=[copy(self.weapon_database["book"]), copy(self.weapon_database["book"])])
guest_house_2f = Location(self, name="guest_house_2f", topic="Second floor of the guest house")
guest_house_bedroom = Location(self, name="guest_house_bedroom", topic="An elegant guest room with a few beds", furniture=[copy(shelf)], items=[copy(self.weapon_database["baseball bat"])])
mansion_entrance = Location(self, name="mansion_entrance", topic="I wonder how the mansion looks on the inside...")
mansion_1f = Location(self, name="mansion_1f", topic="First floor of the guest house. The portrait of a beautiful witch can be seen on the wall...")
mansion_dining_room = Location(self, name="dining_room", topic="A big but elegant dining room", items=[letter])
mansion_kitchen = Location(self, name="kitchen", topic="It looks like the kitchen from some restaurant...", furniture=[copy(closet), copy(locker), copy(medical_closet)], items=[copy(self.weapon_database["knife"])])
mansion_2f = Location(self, name="mansion_2f", topic="Second floor of the mansion", items=[copy(self.weapon_database["book"])])
mansion_bedroom = Location(self, name="mansion_bedroom", topic="A luxurious bedroom with a large bed", furniture=[copy(closet)])
mansion_bathroom = Location(self, name="mansion_bathroom", topic="It's just a bathroom. You can't fit through the sink, though!", furniture=[copy(locker), copy(medical_closet)])
mansion_3f = Location(self, name="mansion_3f", topic="Third and last floor of the mansion")
mansion_study = Location(self, name="mansion_study", topic="An apartment-sized study", furniture=[copy(closet), copy(shelf), copy(locker)], random_items=[(copy(self.weapon_database["winchester"]), 1/3)])
mansion_study_kitchen = Location(self, name="mansion_study_kitchen", topic="An ordinary kitchen", items=[copy(self.weapon_database["knife"])])
mansion_study_bathroom = Location(self, name="mansion_study_bathroom", topic="Just a bathroom...", furniture=[copy(medical_closet)])
pier.add_adjacent_location(rose_garden)
rose_garden.add_adjacent_location(tool_shed)
rose_garden.add_adjacent_location(forest1)
tool_shed.add_adjacent_location(forest1)
rose_garden.add_adjacent_location(guest_house_1f)
rose_garden.add_adjacent_location(mansion_entrance)
forest1.add_adjacent_location(forest2)
forest2.add_adjacent_location(kuwadorian)
kuwadorian.add_adjacent_location(pier, one_way=True)
guest_house_1f.add_adjacent_location(guest_house_parlor)
guest_house_1f.add_adjacent_location(guest_house_archive)
guest_house_1f.add_adjacent_location(guest_house_2f)
guest_house_2f.add_adjacent_location(guest_house_bedroom)
mansion_entrance.add_adjacent_location(mansion_1f)
mansion_1f.add_adjacent_location(mansion_kitchen)
mansion_1f.add_adjacent_location(mansion_dining_room)
mansion_1f.add_adjacent_location(mansion_2f)
mansion_2f.add_adjacent_location(mansion_bedroom)
mansion_bedroom.add_adjacent_location(mansion_bathroom)
mansion_2f.add_adjacent_location(mansion_3f)
mansion_3f.add_adjacent_location(mansion_study)
mansion_study.add_adjacent_location(mansion_study_kitchen)
mansion_study.add_adjacent_location(mansion_study_bathroom)
mansion_study_kitchen.add_adjacent_location(mansion_study_bathroom)
locations = [pier, rose_garden, tool_shed, forest1, forest2, kuwadorian, guest_house_1f, guest_house_parlor,\
guest_house_archive, guest_house_2f, guest_house_bedroom, mansion_entrance, mansion_kitchen, mansion_1f, mansion_2f,\
mansion_bedroom, mansion_dining_room, mansion_bathroom, mansion_3f, mansion_study, mansion_study_kitchen, mansion_study_bathroom]
if random.random() < 0.5:
self.appear_location = mansion_dining_room # Everyone appears in the dining room
else:
self.appear_location = False # Random location for each player
return locations
async def prepare(self):
self.player_role = await self.bot.create_role(self.server, name="Mystery Player")
self.observer_role = await self.bot.create_role(self.server, name="Mystery Observer")
self.dead_role = await self.bot.create_role(self.server, name="Location")
everyone_perm = discord.ChannelPermissions(target=self.server.default_role, overwrite=discord.PermissionOverwrite(read_messages=False, send_messages=False))
player_perm = discord.ChannelPermissions(target=self.player_role, overwrite=discord.PermissionOverwrite(read_messages=True, send_messages=True))
observer_perm = discord.ChannelPermissions(target=self.observer_role, overwrite=discord.PermissionOverwrite(read_messages=True, send_messages=False))
dead_perm = discord.ChannelPermissions(target=self.dead_role, overwrite=discord.PermissionOverwrite(read_messages=True, send_messages=True))
self.channel = await self.bot.create_channel(self.server, "%slobby"%(self.channel_prefix), everyone_perm, player_perm, observer_perm, dead_perm)
await self.bot.edit_channel(self.channel, topic="Mystery game lobby.")
self.game_state = self.STATE_LOBBY
for location in self.locations:
await location.start()
async def start(self, timer):
await self.bot.send_message(self.channel, "The game will start in %s seconds."%(timer))
await asyncio.sleep(timer)
self.game_state = self.STATE_GAME
await self.bot.send_message(self.channel, "The game has started! @everyone")
await self.bot.edit_channel(self.channel, topic="Mystery game lobby. The game has already started!")
await asyncio.sleep(2)
await self.bot.edit_channel(self.channel, topic="Mystery game lobby. The game has already started! You can discuss it here.")
await self.bot.edit_channel_permissions(self.channel, target=self.player_role, overwrite=discord.PermissionOverwrite(read_messages=False, send_messages=False))
await self.bot.edit_channel_permissions(self.channel, target=self.observer_role, overwrite=discord.PermissionOverwrite(read_messages=True, send_messages=True))
murderer_number = int(len(self.players) / 3)
if not murderer_number:
murderer_number = 1
murderer_list = ""
random.seed()
sample = random.sample(range(len(self.players)), murderer_number)
for i in sample:
self.murderers.append(self.players[i])
self.players[i].role = Player.ROLE_MURDERER
murderer_list += self.players[i].user.mention + "\n"
await self.bot.send_message(self.players[i].user, "You're the **MURDERER**. Your goal is to kill all innocents without being caught.")
em = discord.Embed(title="Murderers", description=murderer_list, colour=0xff5555)
em.set_footer(text="Know your \"friends\"...")
self.loop_task = self.bot.loop.create_task(self.game_loop())
for i in self.murderers:
await self.bot.send_message(i.user, embed=em)
for player in self.players:
if self.appear_location:
await self.appear_location.player_enter(player)
else:
random.seed()
await self.locations[random.randint(0, len(self.locations) - 1)].player_enter(player)
async def stop(self):
await self.delete()
async def add_player(self, user):
if self.game_state == self.STATE_LOBBY:
player = self.find_by_user(user)
if not (player in self.players):
self.players.append(Player(self, user))
await self.bot.add_roles(self.server.get_member(user.id), self.player_role)
await self.bot.send_message(self.channel, "%s joins the game!" % (user.mention))
return True
return False
async def remove_player(self, user):
if self.game_state == self.STATE_LOBBY:
player = self.find_by_user(user)
if player:
if not player.is_observer and player in self.players:
self.players.remove(player)
await self.bot.remove_roles(player.member, self.player_role)
await self.bot.send_message(self.channel, "%s leaves the game..." % (user.mention))
return True
return False
async def add_observer(self, user):
if self.game_state != self.STATE_PREPARE:
player = self.find_by_user(user)
if not player:
self.observers.append(Player(self, user, True))
await self.bot.add_roles(self.server.get_member(user.id), self.observer_role)
return True
return False
async def remove_observer(self, user):
player = self.find_by_user(user)
if player:
if player.is_observer and player in self.observers:
self.observers.remove(player)
await self.bot.remove_roles(player.member, self.observer_role)
return True
return False
def find_by_user(self, user):
for item in self.players:
if (item.user == user) or (item.member == user):
return item
for item in self.observers:
if (item.user == user) or (item.member == user):
return item
return False
def find_by_member(self, member):
# deprecated
for item in self.players:
if item.member == member:
return item
for item in self.observers:
if item.member == member:
return item
return False
def find_location(self, loc):
for location in self.locations:
if location.name == loc:
return location
return False
async def end_game(self):
self.game_state = self.STATE_END
await self.bot.edit_channel_permissions(self.channel, target=self.player_role, overwrite=discord.PermissionOverwrite(read_messages=True, send_messages=True))
await self.bot.send_message(self.channel, "The game has ended! @everyone")
await asyncio.sleep(5)
rolestring = ""
for player in self.players:
await self.bot.remove_roles(player.member, player.location.role)
if player.role == Player.ROLE_NONE:
rolestring += "%s was an innocent bystander! "%(player.user.mention)
elif player.role == Player.ROLE_MURDERER:
rolestring += "%s was a murderer! "%(player.user.mention)
if player.is_dead:
rolestring += "%s didn't survive the events.\n"%(player.name)
else:
rolestring += "%s survived the events!\n"%(player.name)
em = discord.Embed(title="Roles", description=rolestring, color=0xf0f8ff)
await self.bot.send_message(self.channel, embed=em)
await self.bot.send_message(self.channel, "The game will be stopped in 60 seconds...")
await asyncio.sleep(60)
await self.stop()
async def game_loop(self):
await self.bot.wait_until_ready()
while not self.bot.is_closed and self.game_state != self.STATE_END:
end_game = False
alive_murderers = 0
alive_bystanders = 0
for murderer in self.murderers:
if not murderer.is_dead:
alive_murderers += 1
for player in self.players:
if not player.role == Player.ROLE_MURDERER and not player.is_dead:
alive_bystanders += 1
if (not alive_murderers and alive_bystanders) or (alive_murderers and not alive_bystanders) or (not alive_murderers and not alive_bystanders):
end_game = True
if end_game and not debug:
await self.end_game()
self.loop_task.cancel()
else:
await asyncio.sleep(1) # Runs every second.
async def delete(self):
if self.cleanup_function:
self.cleanup_function()
await self.bot.delete_role(self.server, self.player_role)
await self.bot.delete_role(self.server, self.observer_role)
await self.bot.delete_role(self.server, self.dead_role)
await self.bot.delete_channel(self.channel)
for player in self.players:
await player.delete()
for player in self.observers:
await player.delete()
for item in list(self.weapon_database.values()):
await item.delete()
for location in self.locations:
await location.delete()
class Player():
ATTACK_FAIL = 0
ATTACK_COOLDOWN = 1
ATTACK_SUCCESS = 2
ATTACK_CRITICAL = 3
ATTACK_LETHAL = 4
ROLE_NONE = 0
ROLE_MURDERER = 1
def __init__(self, game, user, is_observer=False):
self.game = game
self.user = user
self.name = self.user.name
self.role = self.ROLE_NONE
self.member = self.game.server.get_member(self.user.id)
if self.member.nick:
self.name = self.member.nick
self.location = None
self.is_observer = is_observer
self.is_bloody = False
self.is_dead = False
self.equipped_item = None
self.inventory = []
self.health = 100
self.killed_by = None
self.move_cooldown = False
self.attack_cooldown = False
def equipped_a_weapon(self):
return isinstance(self.equipped_item, Weapon)
def equip(self, item):
if item in self.inventory:
if item != self.equipped_item:
self.equipped_item = item
return True
return False
async def die(self, who_killed_me=None):
self.killed_by = who_killed_me
self.is_dead = True
await self.game.bot.send_message(self.location.channel, "**%s seizes up and falls limp, their eyes dead and lifeless...**"%(self.name))
await self.game.bot.remove_roles(self.member, self.location.role)
await asyncio.sleep(0.25)
await self.game.bot.add_roles(self.member, self.game.dead_role)
await self.game.bot.add_roles(self.member, self.location.dead_role)
async def attack(self, player):
if not self.attack_cooldown:
assert isinstance(player, Player)
if player == self:
pass
else:
robustness = 5
if self.equipped_a_weapon():
robustness += self.equipped_item.robustness
random.seed()
if random.randint(0, 100) <= 30:
return self.ATTACK_FAIL
else:
random.seed()
if random.randint(0, 100) <= 15:
robustness *= 1.5
await self._attack(player, robustness)
if player.health:
return self.ATTACK_CRITICAL
else:
return self.ATTACK_LETHAL
else:
await self._attack(player, robustness)
if player.health:
return self.ATTACK_SUCCESS
else:
return self.ATTACK_LETHAL
return self.ATTACK_COOLDOWN
async def _attack(self, player, robustness):
damage = random.randint(10, 15) + robustness
player.health -= damage
if random.randint(0, 1):
self.is_bloody = True
if random.randint(0, 1):
player.is_bloody = True
if self.equipped_a_weapon():
await self.equipped_item.on_attack(player)
if random.randint(0, 1):
self.equipped_item.is_bloody = True
if player.health <= 0:
player.health = 0
player.is_bloody = True
def heal(self, hp):
self.health += hp
if self.health > 100:
self.health = 100
def add_item(self, item):
if item not in self.inventory:
if isinstance(item.parent, Player) or isinstance(item.parent, Location) or isinstance(item.parent, Furniture):
item.parent.remove_item(item)
self.inventory.append(item)
item.parent = self
def remove_item(self, item):
if item in self.inventory:
if item.parent:
item.parent = None
self.inventory.remove(item)
if self.equipped_item == item:
self.equipped_item = None
def find_item(self, item):
for mitem in self.inventory:
if mitem._name.lower() == item.lower():
return mitem
return False
def examine(self):
'''Returns a single-line string.'''
examined = ""
if not self.is_dead:
if self.health >= 100:
examined = "%s seems to be doing alright.\n"%self.name
elif self.health > 90:
examined = "%s seems to be slightly hurt.\n"%self.name
elif self.health > 70:
examined = "%s seems to be hurt.\n"%self.name
elif self.health > 50:
examined = "%s seems to be injured.\n"%self.name
elif self.health > 30:
examined = "%s seems to be quite injured...\n"%self.name
elif self.health > 10:
examined = "%s seems like they need urgent medical care!\n"%self.name
elif self.health >= 1:
examined = "%s seems like they're about to die!\n"%self.name
else:
examined = "%s is already...\n"
else:
examined = "%s seems to be dead!\n"%self.name
if self.is_bloody:
examined += "%s's clothes are **blood-stained**!\n"%self.name
if self.equipped_item:
examined += "%s is holding %s %s. \n"%(self.name, self.equipped_item.indef_article(), self.equipped_item.name())
return examined
async def delete(self):
self.killed_by = None
self.game = None
self.equipped_item = None
if self.location:
await self.location.player_leave(self, message=False)
self.user = None
self.member = None
for item in self.inventory:
self.remove_item(item)
await item.delete()
class Item():
def __init__(self, name="Unknown", description="Unknown item.", is_bloody=False):
self._name = name
self.description = description
self.is_bloody = is_bloody
self.parent = None
def name(self):
if self.is_bloody:
return "**blood-stained** __%s__"%(self._name)
else:
return "__%s__"%(self._name)
def pickup(self, player):
if isinstance(player, Player):
player.add_item(self)
def drop(self):
if isinstance(self.parent, Player):
self.parent.location.add_item(self)
def indef_article(self):
if self._name[0] in ("a", "e", "i", "o", "u") and not self.is_bloody:
return "an"
else:
return "a"
def examine(self):
if isinstance(self.parent, Player):
return "This is %s %s! "%(self.indef_article(), self.name().lower()) + self.description
elif isinstance(self.parent, Location):
return "There is %s %s on the ground! "%(self.indef_article(), self.name().lower())
elif isinstance(self.parent, Furniture):
return "There is %s %s inside the %s! "%(self.indef_article(), self.name().lower(), self.parent.name.lower())
async def delete(self):
if self.parent:
self.parent.remove_item(self)
class Usable(Item):
async def use(self, *args):
pass
class Weapon(Usable):
def __init__(self, name="Unknown", description="Unknown weapon.", is_bloody=False, robustness=15):
super(Weapon, self).__init__(name, description, is_bloody)
self.robustness = robustness
async def on_attack(self, other):
pass
class HealItem(Usable):
def __init__(self, name="Unknown", description="Unknown heal item.", is_bloody=False, heal=15):
super(HealItem, self).__init__(name, description, is_bloody)
self.heal = heal
async def use(self, *args):
if len(args):
if isinstance(args[0], Player):
other = args[0]
if other.location == self.parent.location:
await self.parent.game.bot.send_message(self.parent.location.channel, "%s heals %s using the %s"%(self.parent.user.mention, other.user.mention, self.name))
other.heal(self.heal)
await self.delete()
return
await self.parent.game.bot.send_message(self.parent.location.channel, "That person is not here!")
return
await self.parent.game.bot.send_message(self.parent.location.channel, "%s heals themself using the %s"%(self.parent.user.mention, self.name()))
self.parent.heal(self.heal)
await self.delete()
class Paper(Usable):
def __init__(self, name="paper", description="A blank piece of paper", text="", signature=None, can_rename=True, is_bloody=False):
super(Paper, self).__init__(name, description, is_bloody)
self.text = text
self.signature = signature
self.can_rename = can_rename
def on_write(self):
self._name = "unnamed paper"
self.description = "a piece of paper that has already been used"
async def read_paper(self):
await self.parent.game.bot.send_message(self.parent.user, "*---BEGIN---*")
await self.parent.game.bot.send_message(self.parent.user, self.text)
await self.parent.game.bot.send_message(self.parent.user, "*----END----*")
if self.signature:
await self.parent.game.bot.send_message(self.parent.user, "*This text has been signed by %s*"%(self.signature))
else:
await self.parent.game.bot.send_message(self.parent.user, "*This text has no signature")
async def use(self, *args):
if len(args):
if self.signature:
if self.can_rename:
for arg in args:
if isinstance(arg, discord.Member) or isinstance(arg, discord.User):
await self.parent.game.bot.send_message(self.parent.location.channel, "Invalid title!")
break
else:
await self.parent.game.bot.send_message(self.parent.location.channel, "%s writes a new title for the %s"%(self.parent.user.mention, self.name()))
self._name = ""
for arg in args:
self._name += "%s "%(str(arg))
self._name = self._name.strip()
self.can_rename = False
else:
await self.parent.game.bot.send_message(self.parent.location.channel, "Can't rename!")
else:
end_writting = False
for arg in args:
if isinstance(arg, str):
if "[sign]" in arg:
self.signature = self.parent.user.mention
arg = arg.replace("[sign]", "")
end_writting = True
elif "[anonsign]" in arg:
self.signature = "an anonymous writer"
arg = arg.replace("[anonsign]", "")
end_writting = True
self.text += "%s " %(arg)
elif isinstance(arg, discord.Member) or isintance(arg, discord.User):
self.text += "%s " %(arg.mention)
else:
try:
arg = str(arg)
self.text += "%s " %(arg)
except:
pass
if end_writting:
await self.parent.game.bot.send_message(self.parent.location.channel, "%s writes on the %s and signs it. Now it's just missing a title!" %(self.parent.user.mention, self.name()))
self.on_write()
else:
self.text += "\n"
await self.parent.game.bot.send_message(self.parent.location.channel, "%s writes on the %s"%(self.parent.user.mention, self.name()))
else:
await self.read_paper()
class Furniture():
def __init__(self, name="", description="", contents=[], random_content=[]):
'''contents uses object instances.
random_content uses a tuple including an object instance and a chance (from 0.0 to 1.0)'''
self.parent = None
self.contents = []
self.name = name
for item, chance in random_content:
random.seed()
if random.random() < chance:
self.add_item(item)
for item in contents:
self.add_item(item)
def examine(self):
if self.name[0] in ("a", "e", "i", "o", "u"):
return "There is an %s! "%(self.name.lower())
else:
return "There is a %s! "%(self.name.lower())
def dump(self):
for item in self.contents:
self.parent.add_item(item)
def examine_contents(self):
contentstr = ""
for content in self.contents:
contentstr += "%s \n"%(content.examine())
return contentstr
def add_item(self, item):
if item not in self.contents:
if isinstance(item.parent, Player) or isinstance(item.parent, Location) or isinstance(item.parent, Furniture):
item.parent.remove_item(item)
self.contents.append(item)
item.parent = self
def remove_item(self, item):
if item in self.contents:
if item.parent:
item.parent = None
def find_item(self, item):
for mitem in self.contents:
if mitem._name.lower() == item.lower():
return mitem
return False
async def delete(self):
for item in self.contents:
self.remove_item(item)
await item.delete()
class Location():
def __init__(self, game, name, topic="", description="", items=[], random_items=[], furniture=[], random_furniture=[], cooldown=3):
'''items uses item instances.
random_content uses a tuple including an item instance and a chance (from 0.0 to 1.0)
furniture uses furniture instances
random_furniture uses a tuple including a furniture instance and a chance (from 0.0 to 1.0)'''
self.game = game
self.name = name.replace(" ", "_")
self.role = None
self.dead_role = None
self.topic = topic
self.cooldown = cooldown
self.description = description
self.players = [] # Players in this location.
self.items = [] # Items in this location.
self.furniture = [] # Furniture in this location
self.adjacent_locations = []
self.channel = None
for item, chance in random_items:
random.seed()
if random.random() < chance:
self.add_item(item)
for item in items:
self.add_item(item)
for furniture, chance in random_furniture:
random.seed()
if random.random() < chance:
self.add_furniture(furniture)
for furniture in furniture:
self.add_furniture(furniture)
async def start(self):
self.role = await self.game.bot.create_role(self.game.server, name="Location")
self.dead_role = await self.game.bot.create_role(self.game.server, name="Location")
everyone_perm = discord.ChannelPermissions(target=self.game.server.default_role, overwrite=discord.PermissionOverwrite(read_messages=False, send_messages=False, read_message_history=False))
role_perm = discord.ChannelPermissions(target=self.role, overwrite=discord.PermissionOverwrite(read_messages=True, send_messages=True, read_message_history=False))
observer_perm = discord.ChannelPermissions(target=self.game.observer_role, overwrite=discord.PermissionOverwrite(read_messages=True, send_messages=False, read_message_history=True))
dead_perm = discord.ChannelPermissions(target=self.game.dead_role, overwrite=discord.PermissionOverwrite(read_messages=False, send_messages=False, read_message_history=False))
dead_perm2 = discord.ChannelPermissions(target=self.dead_role, overwrite=discord.PermissionOverwrite(read_messages=True, send_messages=False, read_message_history=True))
self.channel = await self.game.bot.create_channel(self.game.server, "%s%s"%(self.game.channel_prefix, self.name), everyone_perm, role_perm, observer_perm, dead_perm, dead_perm2)
await self.game.bot.edit_channel(self.channel, topic=self.topic)
def add_adjacent_location(self, location, one_way=False):
assert isinstance(location, Location)
if location not in self.adjacent_locations:
self.adjacent_locations.append(location)
if self not in location.adjacent_locations and not one_way:
location.adjacent_locations.append(self)
def add_item(self, item):
if item not in self.items:
if isinstance(item.parent, Player) or isinstance(item.parent, Location) or isinstance(item.parent, Furniture):
item.parent.remove_item(item)
self.items.append(item)
item.parent = self
def remove_item(self, item):
if item in self.items:
if item.parent:
item.parent = None
self.items.remove(item)
def find_item(self, item):
for mitem in self.items:
if mitem._name.lower() == item.lower():
return mitem
return False
def add_furniture(self, furniture):
if furniture not in self.furniture:
if isinstance(furniture.parent, Location):
furniture.parent.remove_furniture(item)
self.furniture.append(furniture)
furniture.parent = self
def remove_furniture(self, furniture):
if furniture in self.furniture:
if furniture.parent:
furniture.parent = None
self.furniture.remove(furniture)
def find_furniture(self, furniture):
for mfurniture in self.furniture:
if mfurniture.name == furniture:
return mfurniture
return False
async def player_enter(self, player, message=True):
if player.is_observer:
return False
if not (player in self.players):
await self.game.bot.add_roles(player.member, self.role)
if message:
await self.game.bot.send_message(self.channel, "%s enters." %(player.user.mention))
if player.location:
await player.location.player_leave(player)
player.location = self
self.players.append(player)
return True
async def player_leave(self, player, message=True):
if player in self.players:
if message:
await self.game.bot.send_message(self.channel, "%s leaves." %(player.user.mention))
await self.game.bot.remove_roles(player.member, self.role)
if player.location == self:
player.location = None
self.players.remove(player)
async def delete(self):
await self.game.bot.delete_channel(self.channel)
await self.game.bot.delete_role(self.game.server, self.role)
self.adjacent_locations = None
for item in self.items:
self.remove_item(item)
for furniture in self.furniture:
self.remove_furniture(furniture)
await furniture.delete()
def examine(self):
examined = {"players" : "", "furniture" : "", "items" : "", "location" : self.description}
for player in self.players:
if player.is_dead:
examined["players"] += "%s lies on the floor!\n%s"%(player.name, player.examine())
else:
examined["players"] += "%s is in here.\n%s"%(player.name, player.examine())
for furniture in self.furniture:
examined["furniture"] += furniture.examine()
for item in self.items:
examined["items"] += item.examine()
return examined
if __name__ == "__main__":
raise Exception("Execute main.py instead.")
| 48.064982
| 395
| 0.618697
|
4a018305bcfeb90e16bfd668299fadf5ffb1bd34
| 11,006
|
py
|
Python
|
tests/components/nest/camera_sdm_test.py
|
rchl/core
|
974e099e2a9527d38445531c6d9bc1461ba4c36f
|
[
"Apache-2.0"
] | 1
|
2020-12-16T13:36:50.000Z
|
2020-12-16T13:36:50.000Z
|
tests/components/nest/camera_sdm_test.py
|
rchl/core
|
974e099e2a9527d38445531c6d9bc1461ba4c36f
|
[
"Apache-2.0"
] | 52
|
2020-10-15T06:46:28.000Z
|
2022-03-31T06:02:24.000Z
|
tests/components/nest/camera_sdm_test.py
|
rchl/core
|
974e099e2a9527d38445531c6d9bc1461ba4c36f
|
[
"Apache-2.0"
] | 2
|
2020-12-25T16:31:22.000Z
|
2020-12-30T20:53:56.000Z
|
"""
Test for Nest cameras platform for the Smart Device Management API.
These tests fake out the subscriber/devicemanager, and are not using a real
pubsub subscriber.
"""
import datetime
import aiohttp
from google_nest_sdm.device import Device
from homeassistant.components import camera
from homeassistant.components.camera import STATE_IDLE
from homeassistant.util.dt import utcnow
from .common import async_setup_sdm_platform
from tests.async_mock import patch
from tests.common import async_fire_time_changed
PLATFORM = "camera"
CAMERA_DEVICE_TYPE = "sdm.devices.types.CAMERA"
DEVICE_ID = "some-device-id"
DEVICE_TRAITS = {
"sdm.devices.traits.Info": {
"customName": "My Camera",
},
"sdm.devices.traits.CameraLiveStream": {
"maxVideoResolution": {
"width": 640,
"height": 480,
},
"videoCodecs": ["H264"],
"audioCodecs": ["AAC"],
},
}
DATETIME_FORMAT = "YY-MM-DDTHH:MM:SS"
DOMAIN = "nest"
async def async_setup_camera(hass, traits={}, auth=None):
"""Set up the platform and prerequisites."""
devices = {}
if traits:
devices[DEVICE_ID] = Device.MakeDevice(
{
"name": DEVICE_ID,
"type": CAMERA_DEVICE_TYPE,
"traits": traits,
},
auth=auth,
)
return await async_setup_sdm_platform(hass, PLATFORM, devices)
async def fire_alarm(hass, point_in_time):
"""Fire an alarm and wait for callbacks to run."""
with patch("homeassistant.util.dt.utcnow", return_value=point_in_time):
async_fire_time_changed(hass, point_in_time)
await hass.async_block_till_done()
async def test_no_devices(hass):
"""Test configuration that returns no devices."""
await async_setup_camera(hass)
assert len(hass.states.async_all()) == 0
async def test_ineligible_device(hass):
"""Test configuration with devices that do not support cameras."""
await async_setup_camera(
hass,
{
"sdm.devices.traits.Info": {
"customName": "My Camera",
},
},
)
assert len(hass.states.async_all()) == 0
async def test_camera_device(hass):
"""Test a basic camera with a live stream."""
await async_setup_camera(hass, DEVICE_TRAITS)
assert len(hass.states.async_all()) == 1
camera = hass.states.get("camera.my_camera")
assert camera is not None
assert camera.state == STATE_IDLE
registry = await hass.helpers.entity_registry.async_get_registry()
entry = registry.async_get("camera.my_camera")
assert entry.unique_id == "some-device-id-camera"
assert entry.original_name == "My Camera"
assert entry.domain == "camera"
device_registry = await hass.helpers.device_registry.async_get_registry()
device = device_registry.async_get(entry.device_id)
assert device.name == "My Camera"
assert device.model == "Camera"
assert device.identifiers == {("nest", DEVICE_ID)}
async def test_camera_stream(hass, auth):
"""Test a basic camera and fetch its live stream."""
now = utcnow()
expiration = now + datetime.timedelta(seconds=100)
auth.responses = [
aiohttp.web.json_response(
{
"results": {
"streamUrls": {
"rtspUrl": "rtsp://some/url?auth=g.0.streamingToken"
},
"streamExtensionToken": "g.1.extensionToken",
"streamToken": "g.0.streamingToken",
"expiresAt": expiration.isoformat(timespec="seconds"),
},
}
)
]
await async_setup_camera(hass, DEVICE_TRAITS, auth=auth)
assert len(hass.states.async_all()) == 1
cam = hass.states.get("camera.my_camera")
assert cam is not None
assert cam.state == STATE_IDLE
stream_source = await camera.async_get_stream_source(hass, "camera.my_camera")
assert stream_source == "rtsp://some/url?auth=g.0.streamingToken"
with patch(
"homeassistant.components.ffmpeg.ImageFrame.get_image",
autopatch=True,
return_value=b"image bytes",
):
image = await camera.async_get_image(hass, "camera.my_camera")
assert image.content == b"image bytes"
async def test_refresh_expired_stream_token(hass, auth):
"""Test a camera stream expiration and refresh."""
now = utcnow()
stream_1_expiration = now + datetime.timedelta(seconds=90)
stream_2_expiration = now + datetime.timedelta(seconds=180)
stream_3_expiration = now + datetime.timedelta(seconds=360)
auth.responses = [
# Stream URL #1
aiohttp.web.json_response(
{
"results": {
"streamUrls": {
"rtspUrl": "rtsp://some/url?auth=g.1.streamingToken"
},
"streamExtensionToken": "g.1.extensionToken",
"streamToken": "g.1.streamingToken",
"expiresAt": stream_1_expiration.isoformat(timespec="seconds"),
},
}
),
# Stream URL #2
aiohttp.web.json_response(
{
"results": {
"streamExtensionToken": "g.2.extensionToken",
"streamToken": "g.2.streamingToken",
"expiresAt": stream_2_expiration.isoformat(timespec="seconds"),
},
}
),
# Stream URL #3
aiohttp.web.json_response(
{
"results": {
"streamExtensionToken": "g.3.extensionToken",
"streamToken": "g.3.streamingToken",
"expiresAt": stream_3_expiration.isoformat(timespec="seconds"),
},
}
),
]
await async_setup_camera(
hass,
DEVICE_TRAITS,
auth=auth,
)
assert len(hass.states.async_all()) == 1
cam = hass.states.get("camera.my_camera")
assert cam is not None
assert cam.state == STATE_IDLE
stream_source = await camera.async_get_stream_source(hass, "camera.my_camera")
assert stream_source == "rtsp://some/url?auth=g.1.streamingToken"
# Fire alarm before stream_1_expiration. The stream url is not refreshed
next_update = now + datetime.timedelta(seconds=25)
await fire_alarm(hass, next_update)
stream_source = await camera.async_get_stream_source(hass, "camera.my_camera")
assert stream_source == "rtsp://some/url?auth=g.1.streamingToken"
# Alarm is near stream_1_expiration which causes the stream extension
next_update = now + datetime.timedelta(seconds=65)
await fire_alarm(hass, next_update)
stream_source = await camera.async_get_stream_source(hass, "camera.my_camera")
assert stream_source == "rtsp://some/url?auth=g.2.streamingToken"
# Next alarm is well before stream_2_expiration, no change
next_update = now + datetime.timedelta(seconds=100)
await fire_alarm(hass, next_update)
stream_source = await camera.async_get_stream_source(hass, "camera.my_camera")
assert stream_source == "rtsp://some/url?auth=g.2.streamingToken"
# Alarm is near stream_2_expiration, causing it to be extended
next_update = now + datetime.timedelta(seconds=155)
await fire_alarm(hass, next_update)
stream_source = await camera.async_get_stream_source(hass, "camera.my_camera")
assert stream_source == "rtsp://some/url?auth=g.3.streamingToken"
async def test_camera_removed(hass, auth):
"""Test case where entities are removed and stream tokens expired."""
now = utcnow()
expiration = now + datetime.timedelta(seconds=100)
auth.responses = [
aiohttp.web.json_response(
{
"results": {
"streamUrls": {
"rtspUrl": "rtsp://some/url?auth=g.0.streamingToken"
},
"streamExtensionToken": "g.1.extensionToken",
"streamToken": "g.0.streamingToken",
"expiresAt": expiration.isoformat(timespec="seconds"),
},
}
),
aiohttp.web.json_response({"results": {}}),
]
await async_setup_camera(
hass,
DEVICE_TRAITS,
auth=auth,
)
assert len(hass.states.async_all()) == 1
cam = hass.states.get("camera.my_camera")
assert cam is not None
assert cam.state == STATE_IDLE
stream_source = await camera.async_get_stream_source(hass, "camera.my_camera")
assert stream_source == "rtsp://some/url?auth=g.0.streamingToken"
for config_entry in hass.config_entries.async_entries(DOMAIN):
await hass.config_entries.async_remove(config_entry.entry_id)
assert len(hass.states.async_all()) == 0
async def test_refresh_expired_stream_failure(hass, auth):
"""Tests a failure when refreshing the stream."""
now = utcnow()
stream_1_expiration = now + datetime.timedelta(seconds=90)
stream_2_expiration = now + datetime.timedelta(seconds=180)
auth.responses = [
aiohttp.web.json_response(
{
"results": {
"streamUrls": {
"rtspUrl": "rtsp://some/url?auth=g.1.streamingToken"
},
"streamExtensionToken": "g.1.extensionToken",
"streamToken": "g.1.streamingToken",
"expiresAt": stream_1_expiration.isoformat(timespec="seconds"),
},
}
),
# Extending the stream fails with arbitrary error
aiohttp.web.Response(status=500),
# Next attempt to get a stream fetches a new url
aiohttp.web.json_response(
{
"results": {
"streamUrls": {
"rtspUrl": "rtsp://some/url?auth=g.2.streamingToken"
},
"streamExtensionToken": "g.2.extensionToken",
"streamToken": "g.2.streamingToken",
"expiresAt": stream_2_expiration.isoformat(timespec="seconds"),
},
}
),
]
await async_setup_camera(
hass,
DEVICE_TRAITS,
auth=auth,
)
assert len(hass.states.async_all()) == 1
cam = hass.states.get("camera.my_camera")
assert cam is not None
assert cam.state == STATE_IDLE
stream_source = await camera.async_get_stream_source(hass, "camera.my_camera")
assert stream_source == "rtsp://some/url?auth=g.1.streamingToken"
# Fire alarm when stream is nearing expiration, causing it to be extended.
# The stream expires.
next_update = now + datetime.timedelta(seconds=65)
await fire_alarm(hass, next_update)
# The stream is entirely refreshed
stream_source = await camera.async_get_stream_source(hass, "camera.my_camera")
assert stream_source == "rtsp://some/url?auth=g.2.streamingToken"
| 34.719243
| 83
| 0.616573
|
4a0183155f1748519e690aa452e6dcbe603bda2e
| 869
|
py
|
Python
|
Chapter 07/combinations.py
|
bpbpublications/Python-Quick-Interview-Guide
|
ab4ff3e670b116a4db6b9e1f0ccba8424640704d
|
[
"MIT"
] | 1
|
2021-05-14T19:53:41.000Z
|
2021-05-14T19:53:41.000Z
|
Chapter 07/combinations.py
|
bpbpublications/Python-Quick-Interview-Guide
|
ab4ff3e670b116a4db6b9e1f0ccba8424640704d
|
[
"MIT"
] | null | null | null |
Chapter 07/combinations.py
|
bpbpublications/Python-Quick-Interview-Guide
|
ab4ff3e670b116a4db6b9e1f0ccba8424640704d
|
[
"MIT"
] | null | null | null |
from typing import List
class Solution:
def combine(self, n: int, k: int) -> List[List[int]]:
res = []
#Nested helper function
def dfs(partial, index):
print("On entry partial=",partial,"index=",index)
if len(partial) == k:
print("Appended ",partial,index)
res.append(partial)
return
#Call dfs for all values of i
#Backtrace to same after return from dfs
for i in range(index, n+1):
print("Before ",partial,"i=",i,"Ind=",index)
dfs(partial + [i], i+1)
print("After ",partial,"i=",i,"Ind=",index)
print("Returning")
#Resume main function
dfs([], 1)
return res
#Driver code
sol=Solution()
print(sol.combine(4,3))
| 33.423077
| 62
| 0.491369
|
4a0183b4c4f59448901841a41d784ece600ea097
| 7,413
|
py
|
Python
|
main.py
|
JAMJU/KernelMethod
|
e52f5a0cfaefa87073facd88220c311709e513e8
|
[
"MIT"
] | null | null | null |
main.py
|
JAMJU/KernelMethod
|
e52f5a0cfaefa87073facd88220c311709e513e8
|
[
"MIT"
] | null | null | null |
main.py
|
JAMJU/KernelMethod
|
e52f5a0cfaefa87073facd88220c311709e513e8
|
[
"MIT"
] | null | null | null |
import numpy as np
from logistic_regression import logistic_kernel_regression, compute_label
from kernel_creation import convert_spectral_kernel_quad, convert_spectral_kernel_quint, convert_spectral_kernel_trig
from kernel_creation import convert_acid_kernel, convert_acid_quad, convert_mismatch_lev, convert_lect_trig, get_mismatch_dict
from kernel_creation import get_correspondances, convert_mismatch_dico, get_full_corres, convert_encode
from kernel_creation import compute_test_matrix, compute_K_matrix, convert_lect_acid, compute_K_gaussian
from read_fn import read_csv_file_label, read_csv_file_data, save_label, save_data_converted
from SVM import SVM, svm_compute_label
list_letters = ["A", "C", "G", "T"]
list_trig = [a + b + c for a in list_letters for b in list_letters for c in list_letters]
list_quad = [a + b + c + d for a in list_letters for b in list_letters for c in list_letters for d in list_letters]
list_quint = [a + b + c + d + e for a in list_letters for b in list_letters for c in list_letters for d in list_letters for e in list_letters]
list_six = [a + b + c + d + e + f for a in list_letters for b in list_letters for c in list_letters for d in list_letters for e in list_letters for f in list_letters]
dico_acid = {'Alanine': [ 'GCU', 'GCC', 'GCA', 'GCG'], 'Arginine': ['CGU', 'CGC', 'CGA', 'CGG' , 'AGA', 'AGG'],
'Asparagine': ['AAU', 'AAC'], 'Acide aspartique': ['GAU', 'GAC'],
'Cysteine': ['UGU', 'UGC'], 'Glutamine': ['CAA', 'CAG'], 'Acide glutamique':['GAA', 'GAG'],
'Glycine':['GGU', 'GGC', 'GGA', 'GGG'], 'Histidine': ['CAU', 'CAC'], 'Isoleucine': ['AUU', 'AUC', 'AUA'],
'Leucine': ['UUA', 'UUG' , 'CUU', 'CUC', 'CUA', 'CUG'], 'Lysine': ['AAA', 'AAG'],
'Methionine': ['AUG'], 'Phenylalanine':['UUU', 'UUC'], 'Proline' :['CCU', 'CCC', 'CCA', 'CCG'],
'Pyrrolysine': ['UAG'], 'Selenocysteine':['UGA'], 'Serine':['UCU', 'UCC', 'UCA', 'UCG' , 'AGU', 'AGC'],
'Threonine':['ACU', 'ACC', 'ACA', 'ACG'], 'Tryptophane':['UGG'], 'Tyrosine':['UAU', 'UAC'],
'Valine':['GUU', 'GUC', 'GUA', 'GUG'], 'Initiation': ['AUG'], 'Terminaison': ['UAG', 'UAA', 'UGA']}
def is_pos_def(x):
return np.all(np.linalg.eigvals(x) > 0)
## Parameters
lamb_log = 0.0000001
lamb_svm = 0.00001
sigma = 0.8
add_param = 10.**(-10)
list_seq_id = list_six
mis_lev = False
if mis_lev:
dict_mismatch = get_mismatch_dict(list_seq_id)
mis_dic = False
size_seq = 6
nb_mis = 0
beg = 0
if mis_dic:
dict_corres = get_correspondances(list_seq_id, nb_mis, list_letters)
list_mis_corres = dict_corres.keys()
print(list_mis_corres)
mis_dic_full = False
if mis_dic_full:
dict_corres = get_full_corres(list_seq_id, nb_mis, list_letters)
list_mis_corres = dict_corres.keys()
##
list_labels_log = []
list_labels_svm = []
for name in [ "0", "1","2"]:
print ("beginning loading of the data")
# Training data
sequences = read_csv_file_data("data/Xtr"+ name+ ".csv")
#list_converted = convert_spectral_kernel_trig(sequences, list_seq_id)
#list_converted = convert_spectral_kernel_quad(sequences, list_quad)
list_converted = convert_spectral_kernel_quint(sequences, list_quint)
#list_converted = convert_spectral_kernel_quint(sequences, list_quint)
#list_converted = convert_acid_kernel(sequences, dico_acid)
#list_converted = convert_acid_quad(sequences, dico_acid, list_quad
#list_converted = convert_mismatch_lev(sequences, list_seq_id, dict_mismatch, size_seq, nb_mis)
#list_converted = convert_lect_trig(sequences, list_seq_id, beg)
#list_converted = convert_lect_acid(sequences, dico_acid, beg)
#list_converted = convert_mismatch_dico(sequences, dict_corres,list_mis_corres, list_seq_id)
#list_converted = convert_encode(sequences, list_letters)
training = np.asarray(list_converted, dtype = float)
# to avoid huge values and to save time for the logistic regression :
sm = np.sum(training, axis= 1)
training = training/sm[0]
mean = np.mean(training, axis= 0)
training = training - mean
#vst = np.std(training, axis= 0)
#training = training / vst
#save_data_converted("spectral_kernel/Xtr"+ name+ ".csv", training)
# label training data
label = read_csv_file_label("data/Ytr"+ name+ ".csv")
label= np.asarray(label).reshape((len(label), ))
# select what will be the test for training
size_test = int(training.shape[0]/10)
test_train = training[0:size_test]
label_test_train = label[0:size_test]
print( label_test_train.shape)
size_total = training.shape[0]
training = training[size_test:size_total]
label_train = label[size_test:size_total]
print (label_train.shape)
# Test data
sequences_test = read_csv_file_data("data/Xte"+ name+ ".csv")
#list_converted_test = convert_spectral_kernel_trig(sequences_test, list_seq_id)
#list_converted_test = convert_spectral_kernel_quad(sequences_test, list_quad)
list_converted_test = convert_spectral_kernel_quint(sequences_test, list_quint)
#list_converted_test = convert_acid_kernel(sequences_test, dico_acid)
#list_converted_test = convert_acid_quad(sequences_test, dico_acid, list_quad)
#list_converted_test = convert_mismatch_lev(sequences_test, list_seq_id, dict_mismatch, size_seq, nb_mis)
#list_converted_test = convert_lect_trig(sequences_test, list_seq_id, beg )
#list_converted_test = convert_lect_acid(sequences_test, dico_acid, beg)
#list_converted_test = convert_mismatch_dico(sequences_test, dict_corres,list_mis_corres, list_seq_id)
#list_converted_test = convert_encode(sequences, list_letters)
testing = np.asarray(list_converted_test, dtype = float)
# to avoid huge values and to save time for the logistic regression :
testing = testing/sm[0]
testing = testing - mean
#testing = testing/ vst
# param for each dataset:
"""if name=="0":
lamb_svm = 0.000008
add_param = 10. ** (-10)
if name=="1":
lamb_svm = 0.00001
add_param = 10.**(-10)
if name == "2":
lamb_svm = 0.000005
add_param=10.**(-9)"""
if name=="2":
add_param = 10**(-9)
print ("data loaded")
# Computing the kernel
print ("beginning computing K")
K = compute_K_matrix(training)
add = add_param*np.identity(K.shape[0])
K_add = K + add # to make it positive definite
#K = compute_K_gaussian(training, sigma)
#K_add = K
print(K)
print("K shape", K.shape)
print(is_pos_def(K_add))
K_test_train = compute_test_matrix(training, test_train)
print (K_test_train.shape)
print ("K computed")
"""#Training : kernel logistic regression
alpha = logistic_kernel_regression(K, label_train, lamb_log, 15, K_test_train, label_test_train)
# Testing : kernel logistic regression
Ktest = compute_test_matrix(training, testing)
labels_test = compute_label(Ktest, alpha)
list_labels_log = list_labels_log + labels_test"""
# Training : SVM
alpha = SVM(K_add, label_train, lamb_svm, K_test_train, label_test_train)
print(alpha)
# Testing : kernel logistic regression
Ktest = compute_test_matrix(training, testing)
labels_test = svm_compute_label(Ktest, alpha)
list_labels_svm = list_labels_svm + labels_test
save_label(0, list_labels_svm,"results/SVM-quint-centered-mixed.csv" )
| 43.863905
| 167
| 0.703629
|
4a01845721bc76fb5208bce7ad30d98667c357ca
| 1,560
|
py
|
Python
|
superset/migrations/versions/937d04c16b64_update_datasources.py
|
Manikantan22/incubator-superset
|
ec325c871e60ae2a050aae595b430d6fc2888d1a
|
[
"Apache-2.0"
] | 6
|
2019-06-14T11:16:54.000Z
|
2020-11-08T16:02:00.000Z
|
superset/migrations/versions/937d04c16b64_update_datasources.py
|
Manikantan22/incubator-superset
|
ec325c871e60ae2a050aae595b430d6fc2888d1a
|
[
"Apache-2.0"
] | 203
|
2019-05-31T11:13:10.000Z
|
2020-03-31T02:50:54.000Z
|
superset/migrations/versions/937d04c16b64_update_datasources.py
|
Manikantan22/incubator-superset
|
ec325c871e60ae2a050aae595b430d6fc2888d1a
|
[
"Apache-2.0"
] | 14
|
2019-05-31T11:32:40.000Z
|
2021-01-28T11:18:16.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""update datasources
Revision ID: 937d04c16b64
Revises: d94d33dbe938
Create Date: 2018-07-20 16:08:10.195843
"""
# revision identifiers, used by Alembic.
revision = "937d04c16b64"
down_revision = "d94d33dbe938"
from alembic import op
import sqlalchemy as sa
def upgrade():
# Enforce that the datasource_name column be non-nullable.
with op.batch_alter_table("datasources") as batch_op:
batch_op.alter_column(
"datasource_name", existing_type=sa.String(255), nullable=False
)
def downgrade():
# Forego that the datasource_name column be non-nullable.
with op.batch_alter_table("datasources") as batch_op:
batch_op.alter_column(
"datasource_name", existing_type=sa.String(255), nullable=True
)
| 31.836735
| 75
| 0.742949
|
4a0184da11900426ae4ccf09e167266adf4a46f4
| 3,879
|
py
|
Python
|
xl_tensorflow/models/vision/detection/body/learning_rates.py
|
Lannister-Xiaolin/xl_tensorflow
|
99e0f458769ee1e45ebf55c789961e40f7d2eeac
|
[
"Apache-2.0"
] | null | null | null |
xl_tensorflow/models/vision/detection/body/learning_rates.py
|
Lannister-Xiaolin/xl_tensorflow
|
99e0f458769ee1e45ebf55c789961e40f7d2eeac
|
[
"Apache-2.0"
] | 1
|
2020-11-13T18:52:23.000Z
|
2020-11-13T18:52:23.000Z
|
xl_tensorflow/models/vision/detection/body/learning_rates.py
|
Lannister-Xiaolin/xl_tensorflow
|
99e0f458769ee1e45ebf55c789961e40f7d2eeac
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Learning rate schedule."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import numpy as np
import tensorflow as tf
from xl_tensorflow.utils import params_dict
class StepLearningRateWithLinearWarmup(tf.keras.optimizers.schedules.LearningRateSchedule):
"""Class to generate learning rate tensor."""
def __init__(self, total_steps, params):
"""Creates the step learning rate tensor with linear warmup."""
super(StepLearningRateWithLinearWarmup, self).__init__()
self._total_steps = total_steps
assert isinstance(params, (dict, params_dict.ParamsDict))
if isinstance(params, dict):
params = params_dict.ParamsDict(params)
self._params = params
def __call__(self, global_step):
warmup_lr = self._params.warmup_learning_rate
warmup_steps = self._params.warmup_steps
init_lr = self._params.init_learning_rate
lr_levels = self._params.learning_rate_levels
lr_steps = self._params.learning_rate_steps
linear_warmup = (
warmup_lr + tf.cast(global_step, dtype=tf.float32) / warmup_steps *
(init_lr - warmup_lr))
learning_rate = tf.where(global_step < warmup_steps, linear_warmup, init_lr)
for next_learning_rate, start_step in zip(lr_levels, lr_steps):
learning_rate = tf.where(global_step >= start_step, next_learning_rate,
learning_rate)
return learning_rate
def get_config(self):
return {'_params': self._params.as_dict()}
class CosineLearningRateWithLinearWarmup(tf.keras.optimizers.schedules.LearningRateSchedule):
"""Class to generate learning rate tensor."""
def __init__(self, total_steps, params):
"""Creates the consine learning rate tensor with linear warmup."""
super(CosineLearningRateWithLinearWarmup, self).__init__()
self._total_steps = total_steps
assert isinstance(params, (dict, params_dict.ParamsDict))
if isinstance(params, dict):
params = params_dict.ParamsDict(params)
self._params = params
def __call__(self, global_step):
global_step = tf.cast(global_step, dtype=tf.float32)
warmup_lr = self._params.warmup_learning_rate
warmup_steps = self._params.warmup_steps
init_lr = self._params.init_learning_rate
total_steps = self._total_steps
linear_warmup = (
warmup_lr + global_step / warmup_steps * (init_lr - warmup_lr))
cosine_learning_rate = (
init_lr * (tf.cos(np.pi * (global_step - warmup_steps) /
(total_steps - warmup_steps)) + 1.0) / 2.0)
learning_rate = tf.where(global_step < warmup_steps, linear_warmup,
cosine_learning_rate)
return learning_rate
def get_config(self):
return {'_params': self._params.as_dict()}
def learning_rate_generator(total_steps, params):
"""The learning rate function generator."""
if params.type == 'step':
return StepLearningRateWithLinearWarmup(total_steps, params)
elif params.type == 'cosine':
return CosineLearningRateWithLinearWarmup(total_steps, params)
else:
raise ValueError('Unsupported learning rate type: {}.'.format(params.type))
| 39.181818
| 93
| 0.720031
|
4a018575d0cb9a81124f4c50b800f008f6cd16e4
| 8,781
|
py
|
Python
|
maskrcnn_benchmark/config/paths_catalog.py
|
Sreehari-S/mask-rcnn-benchmark
|
b4434c39fccda80575276308da86b6e944540445
|
[
"MIT"
] | null | null | null |
maskrcnn_benchmark/config/paths_catalog.py
|
Sreehari-S/mask-rcnn-benchmark
|
b4434c39fccda80575276308da86b6e944540445
|
[
"MIT"
] | 1
|
2020-02-18T12:25:48.000Z
|
2020-02-18T12:25:48.000Z
|
maskrcnn_benchmark/config/paths_catalog.py
|
Sreehari-S/mask-rcnn-benchmark
|
b4434c39fccda80575276308da86b6e944540445
|
[
"MIT"
] | null | null | null |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
"""Centralized catalog of paths."""
import os
class DatasetCatalog(object):
DATA_DIR = "/home/uavws/Sreehari/DigestPath/coordinate_data"
DATASETS = {
"coco_2017_train": {
"img_dir": "coco/train2017",
"ann_file": "coco/annotations/instances_train2017.json"
},
"coco_2017_val": {
"img_dir": "coco/val2017",
"ann_file": "coco/annotations/instances_val2017.json"
},
"coco_2014_train": {
"img_dir": "coco/train2014",
"ann_file": "coco/annotations/instances_train2014.json"
},
"coco_2014_val": {
"img_dir": "coco/val2014",
"ann_file": "coco/annotations/instances_val2014.json"
},
"coco_2014_minival": {
"img_dir": "coco/val2014",
"ann_file": "coco/annotations/instances_minival2014.json"
},
"coco_2014_valminusminival": {
"img_dir": "coco/val2014",
"ann_file": "coco/annotations/instances_valminusminival2014.json"
},
"keypoints_coco_2014_train": {
"img_dir": "coco/train2014",
"ann_file": "coco/annotations/person_keypoints_train2014.json",
},
"keypoints_coco_2014_val": {
"img_dir": "coco/val2014",
"ann_file": "coco/annotations/person_keypoints_val2014.json"
},
"keypoints_coco_2014_minival": {
"img_dir": "coco/val2014",
"ann_file": "coco/annotations/person_keypoints_minival2014.json",
},
"keypoints_coco_2014_valminusminival": {
"img_dir": "coco/val2014",
"ann_file": "coco/annotations/person_keypoints_valminusminival2014.json",
},
"voc_2007_train": {
"data_dir": "voc/VOC2007",
"split": "train"
},
"voc_2007_train_cocostyle": {
"img_dir": "voc/VOC2007/JPEGImages",
"ann_file": "voc/VOC2007/Annotations/pascal_train2007.json"
},
"voc_2007_val": {
"data_dir": "voc/VOC2007",
"split": "val"
},
"voc_2007_val_cocostyle": {
"img_dir": "voc/VOC2007/JPEGImages",
"ann_file": "voc/VOC2007/Annotations/pascal_val2007.json"
},
"voc_2007_test": {
"data_dir": "voc/VOC2007",
"split": "test"
},
"voc_2007_test_cocostyle": {
"img_dir": "voc/VOC2007/JPEGImages",
"ann_file": "voc/VOC2007/Annotations/pascal_test2007.json"
},
"voc_2012_train": {
"data_dir": "voc/VOC2012",
"split": "train"
},
"voc_2012_train_cocostyle": {
"img_dir": "voc/VOC2012/JPEGImages",
"ann_file": "voc/VOC2012/Annotations/pascal_train2012.json"
},
"voc_2012_val": {
"data_dir": "voc/VOC2012",
"split": "val"
},
"voc_2012_val_cocostyle": {
"img_dir": "voc/VOC2012/JPEGImages",
"ann_file": "voc/VOC2012/Annotations/pascal_val2012.json"
},
"voc_2012_test": {
"data_dir": "voc/VOC2012",
"split": "test"
# PASCAL VOC2012 doesn't made the test annotations available, so there's no json annotation
},
"cityscapes_fine_instanceonly_seg_train_cocostyle": {
"img_dir": "cityscapes/images",
"ann_file": "cityscapes/annotations/instancesonly_filtered_gtFine_train.json"
},
"cityscapes_fine_instanceonly_seg_val_cocostyle": {
"img_dir": "cityscapes/images",
"ann_file": "cityscapes/annotations/instancesonly_filtered_gtFine_val.json"
},
"cityscapes_fine_instanceonly_seg_test_cocostyle": {
"img_dir": "cityscapes/images",
"ann_file": "cityscapes/annotations/instancesonly_filtered_gtFine_test.json"
},
"DAVIS":{
"root_dir" :"YOLO_data/train",
},
"SignetCell":{
"root_dir": "train_test_points_fold_3",
}
}
@staticmethod
def get(name):
if "coco" in name:
data_dir = DatasetCatalog.DATA_DIR
attrs = DatasetCatalog.DATASETS[name]
args = dict(
root=os.path.join(data_dir, attrs["img_dir"]),
ann_file=os.path.join(data_dir, attrs["ann_file"]),
)
return dict(
factory="COCODataset",
args=args,
)
elif "voc" in name:
data_dir = DatasetCatalog.DATA_DIR
attrs = DatasetCatalog.DATASETS[name]
args = dict(
data_dir=os.path.join(data_dir, attrs["data_dir"]),
split=attrs["split"],
)
return dict(
factory="PascalVOCDataset",
args=args,
)
elif "DAVIS" in name:
data_dir = DatasetCatalog.DATA_DIR
attrs = DatasetCatalog.DATASETS[name]
args = dict(
data_root_dir=os.path.join(data_dir, attrs["root_dir"]),
)
return dict(
factory="DavisDataset",
args=args,
)
elif "SignetCell" in name:
data_dir = DatasetCatalog.DATA_DIR
attrs = DatasetCatalog.DATASETS[name]
args = dict(
data_root_dir=os.path.join(data_dir, attrs["root_dir"]),
)
return dict(
factory="SignetCellTrainingDataset",
args=args,
)
raise RuntimeError("Dataset not available: {}".format(name))
class ModelCatalog(object):
S3_C2_DETECTRON_URL = "https://dl.fbaipublicfiles.com/detectron"
C2_IMAGENET_MODELS = {
"MSRA/R-50": "ImageNetPretrained/MSRA/R-50.pkl",
"MSRA/R-50-GN": "ImageNetPretrained/47261647/R-50-GN.pkl",
"MSRA/R-101": "ImageNetPretrained/MSRA/R-101.pkl",
"MSRA/R-101-GN": "ImageNetPretrained/47592356/R-101-GN.pkl",
"FAIR/20171220/X-101-32x8d": "ImageNetPretrained/20171220/X-101-32x8d.pkl",
}
C2_DETECTRON_SUFFIX = "output/train/{}coco_2014_train%3A{}coco_2014_valminusminival/generalized_rcnn/model_final.pkl"
C2_DETECTRON_MODELS = {
"35857197/e2e_faster_rcnn_R-50-C4_1x": "01_33_49.iAX0mXvW",
"35857345/e2e_faster_rcnn_R-50-FPN_1x": "01_36_30.cUF7QR7I",
"35857890/e2e_faster_rcnn_R-101-FPN_1x": "01_38_50.sNxI7sX7",
"36761737/e2e_faster_rcnn_X-101-32x8d-FPN_1x": "06_31_39.5MIHi1fZ",
"35858791/e2e_mask_rcnn_R-50-C4_1x": "01_45_57.ZgkA7hPB",
"35858933/e2e_mask_rcnn_R-50-FPN_1x": "01_48_14.DzEQe4wC",
"35861795/e2e_mask_rcnn_R-101-FPN_1x": "02_31_37.KqyEK4tT",
"36761843/e2e_mask_rcnn_X-101-32x8d-FPN_1x": "06_35_59.RZotkLKI",
"37129812/e2e_mask_rcnn_X-152-32x8d-FPN-IN5k_1.44x": "09_35_36.8pzTQKYK",
# keypoints
"37697547/e2e_keypoint_rcnn_R-50-FPN_1x": "08_42_54.kdzV35ao"
}
@staticmethod
def get(name):
if name.startswith("Caffe2Detectron/COCO"):
return ModelCatalog.get_c2_detectron_12_2017_baselines(name)
if name.startswith("ImageNetPretrained"):
return ModelCatalog.get_c2_imagenet_pretrained(name)
raise RuntimeError("model not present in the catalog {}".format(name))
@staticmethod
def get_c2_imagenet_pretrained(name):
prefix = ModelCatalog.S3_C2_DETECTRON_URL
name = name[len("ImageNetPretrained/"):]
name = ModelCatalog.C2_IMAGENET_MODELS[name]
url = "/".join([prefix, name])
return url
@staticmethod
def get_c2_detectron_12_2017_baselines(name):
# Detectron C2 models are stored following the structure
# prefix/<model_id>/2012_2017_baselines/<model_name>.yaml.<signature>/suffix
# we use as identifiers in the catalog Caffe2Detectron/COCO/<model_id>/<model_name>
prefix = ModelCatalog.S3_C2_DETECTRON_URL
dataset_tag = "keypoints_" if "keypoint" in name else ""
suffix = ModelCatalog.C2_DETECTRON_SUFFIX.format(dataset_tag, dataset_tag)
# remove identification prefix
name = name[len("Caffe2Detectron/COCO/"):]
# split in <model_id> and <model_name>
model_id, model_name = name.split("/")
# parsing to make it match the url address from the Caffe2 models
model_name = "{}.yaml".format(model_name)
signature = ModelCatalog.C2_DETECTRON_MODELS[name]
unique_name = ".".join([model_name, signature])
url = "/".join([prefix, model_id, "12_2017_baselines", unique_name, suffix])
return url
| 39.733032
| 121
| 0.594124
|
4a018655e1f031016e0ef3027ee298bf12b9ec0e
| 7,386
|
py
|
Python
|
MyPyQt5LearnExamples/ComplexUITest/ComplexUI.py
|
prayjourney/on_the_way_ing
|
88d04752b7b18c6d60d74b18357f6b2c09c9748e
|
[
"MIT"
] | null | null | null |
MyPyQt5LearnExamples/ComplexUITest/ComplexUI.py
|
prayjourney/on_the_way_ing
|
88d04752b7b18c6d60d74b18357f6b2c09c9748e
|
[
"MIT"
] | null | null | null |
MyPyQt5LearnExamples/ComplexUITest/ComplexUI.py
|
prayjourney/on_the_way_ing
|
88d04752b7b18c6d60d74b18357f6b2c09c9748e
|
[
"MIT"
] | 1
|
2020-09-29T14:17:39.000Z
|
2020-09-29T14:17:39.000Z
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'ComplexUI.ui'
#
# Created by: PyQt5 UI code generator 5.9.1
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(801, 600)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.tabWidget = QtWidgets.QTabWidget(self.centralwidget)
self.tabWidget.setGeometry(QtCore.QRect(0, 0, 801, 551))
self.tabWidget.setObjectName("tabWidget")
self.tab = QtWidgets.QWidget()
self.tab.setObjectName("tab")
self.tabWidget_2 = QtWidgets.QTabWidget(self.tab)
self.tabWidget_2.setGeometry(QtCore.QRect(0, 0, 791, 531))
self.tabWidget_2.setObjectName("tabWidget_2")
self.tab_3 = QtWidgets.QWidget()
self.tab_3.setObjectName("tab_3")
self.treeWidget = QtWidgets.QTreeWidget(self.tab_3)
self.treeWidget.setGeometry(QtCore.QRect(0, 0, 781, 501))
self.treeWidget.setObjectName("treeWidget")
item_0 = QtWidgets.QTreeWidgetItem(self.treeWidget)
item_0 = QtWidgets.QTreeWidgetItem(self.treeWidget)
item_1 = QtWidgets.QTreeWidgetItem(item_0)
self.tabWidget_2.addTab(self.tab_3, "")
self.tab_4 = QtWidgets.QWidget()
self.tab_4.setObjectName("tab_4")
self.verticalLayoutWidget = QtWidgets.QWidget(self.tab_4)
self.verticalLayoutWidget.setGeometry(QtCore.QRect(0, 0, 791, 501))
self.verticalLayoutWidget.setObjectName("verticalLayoutWidget")
self.verticalLayout = QtWidgets.QVBoxLayout(self.verticalLayoutWidget)
self.verticalLayout.setContentsMargins(0, 0, 0, 0)
self.verticalLayout.setObjectName("verticalLayout")
self.calendarWidget = QtWidgets.QCalendarWidget(self.verticalLayoutWidget)
self.calendarWidget.setObjectName("calendarWidget")
self.verticalLayout.addWidget(self.calendarWidget)
self.dateEdit = QtWidgets.QDateEdit(self.verticalLayoutWidget)
self.dateEdit.setObjectName("dateEdit")
self.verticalLayout.addWidget(self.dateEdit)
self.tabWidget_2.addTab(self.tab_4, "")
self.tabWidget.addTab(self.tab, "")
self.tab_2 = QtWidgets.QWidget()
self.tab_2.setObjectName("tab_2")
self.groupBox = QtWidgets.QGroupBox(self.tab_2)
self.groupBox.setGeometry(QtCore.QRect(10, 20, 331, 131))
self.groupBox.setObjectName("groupBox")
self.widget = QtWidgets.QWidget(self.groupBox)
self.widget.setGeometry(QtCore.QRect(90, 30, 55, 62))
self.widget.setObjectName("widget")
self.verticalLayout_2 = QtWidgets.QVBoxLayout(self.widget)
self.verticalLayout_2.setContentsMargins(0, 0, 0, 0)
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.radioButton = QtWidgets.QRadioButton(self.widget)
self.radioButton.setObjectName("radioButton")
self.verticalLayout_2.addWidget(self.radioButton)
self.radioButton_2 = QtWidgets.QRadioButton(self.widget)
self.radioButton_2.setObjectName("radioButton_2")
self.verticalLayout_2.addWidget(self.radioButton_2)
self.radioButton_3 = QtWidgets.QRadioButton(self.widget)
self.radioButton_3.setObjectName("radioButton_3")
self.verticalLayout_2.addWidget(self.radioButton_3)
self.groupBox_2 = QtWidgets.QGroupBox(self.tab_2)
self.groupBox_2.setGeometry(QtCore.QRect(350, 20, 431, 131))
self.groupBox_2.setObjectName("groupBox_2")
self.widget1 = QtWidgets.QWidget(self.groupBox_2)
self.widget1.setGeometry(QtCore.QRect(80, 20, 311, 102))
self.widget1.setObjectName("widget1")
self.horizontalLayout = QtWidgets.QHBoxLayout(self.widget1)
self.horizontalLayout.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout.setObjectName("horizontalLayout")
self.dial = QtWidgets.QDial(self.widget1)
self.dial.setObjectName("dial")
self.horizontalLayout.addWidget(self.dial)
self.lcdNumber = QtWidgets.QLCDNumber(self.widget1)
self.lcdNumber.setObjectName("lcdNumber")
self.horizontalLayout.addWidget(self.lcdNumber)
self.fontComboBox = QtWidgets.QFontComboBox(self.tab_2)
self.fontComboBox.setGeometry(QtCore.QRect(10, 190, 331, 22))
self.fontComboBox.setObjectName("fontComboBox")
self.label = QtWidgets.QLabel(self.tab_2)
self.label.setGeometry(QtCore.QRect(10, 220, 331, 211))
self.label.setAlignment(QtCore.Qt.AlignCenter)
self.label.setObjectName("label")
self.progressBar = QtWidgets.QProgressBar(self.tab_2)
self.progressBar.setGeometry(QtCore.QRect(10, 470, 781, 23))
self.progressBar.setProperty("value", 24)
self.progressBar.setObjectName("progressBar")
self.tabWidget.addTab(self.tab_2, "")
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 801, 23))
self.menubar.setObjectName("menubar")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.retranslateUi(MainWindow)
self.tabWidget.setCurrentIndex(0)
self.tabWidget_2.setCurrentIndex(1)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "ComplexUI"))
self.treeWidget.headerItem().setText(0, _translate("MainWindow", "第1列"))
self.treeWidget.headerItem().setText(1, _translate("MainWindow", "New Column"))
__sortingEnabled = self.treeWidget.isSortingEnabled()
self.treeWidget.setSortingEnabled(False)
self.treeWidget.topLevelItem(0).setText(0, _translate("MainWindow", "子项目1"))
self.treeWidget.topLevelItem(1).setText(0, _translate("MainWindow", "子项目2"))
self.treeWidget.topLevelItem(1).child(0).setText(0, _translate("MainWindow", "子子项目1"))
self.treeWidget.setSortingEnabled(__sortingEnabled)
self.tabWidget_2.setTabText(self.tabWidget_2.indexOf(self.tab_3), _translate("MainWindow", "树"))
self.tabWidget_2.setTabText(self.tabWidget_2.indexOf(self.tab_4), _translate("MainWindow", "日历"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab), _translate("MainWindow", "选项卡1"))
self.groupBox.setTitle(_translate("MainWindow", "功能选择"))
self.radioButton.setText(_translate("MainWindow", "默认"))
self.radioButton_2.setText(_translate("MainWindow", "重置"))
self.radioButton_3.setText(_translate("MainWindow", "选项3"))
self.groupBox_2.setTitle(_translate("MainWindow", "移动刻度盘"))
self.label.setText(_translate("MainWindow", "TextLabel"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_2), _translate("MainWindow", "选项卡2"))
| 55.533835
| 106
| 0.694693
|
4a01869f0f6c02d25669b76a8da9ba7ad0c7d0b4
| 2,102
|
py
|
Python
|
Exercise-3/sensor_stick/scripts/capture_features.py
|
cielsys/RoboNDProj3_Exercises
|
3cafd0e040ad6fe215493fec1d779a9e8f317b11
|
[
"MIT"
] | 105
|
2017-07-05T01:39:33.000Z
|
2022-01-30T20:31:46.000Z
|
Exercise-3/sensor_stick/scripts/capture_features.py
|
zhanghming/RoboND-Perception-Exercises
|
2607a0e83907f086bcff4e461a394eb0a607e7a4
|
[
"MIT"
] | 20
|
2017-07-03T18:22:14.000Z
|
2021-05-03T10:51:24.000Z
|
Exercise-3/sensor_stick/scripts/capture_features.py
|
zhanghming/RoboND-Perception-Exercises
|
2607a0e83907f086bcff4e461a394eb0a607e7a4
|
[
"MIT"
] | 263
|
2017-07-06T00:10:52.000Z
|
2021-12-31T20:35:08.000Z
|
#!/usr/bin/env python
import numpy as np
import pickle
import rospy
from sensor_stick.pcl_helper import *
from sensor_stick.training_helper import spawn_model
from sensor_stick.training_helper import delete_model
from sensor_stick.training_helper import initial_setup
from sensor_stick.training_helper import capture_sample
from sensor_stick.features import compute_color_histograms
from sensor_stick.features import compute_normal_histograms
from sensor_stick.srv import GetNormals
from geometry_msgs.msg import Pose
from sensor_msgs.msg import PointCloud2
def get_normals(cloud):
get_normals_prox = rospy.ServiceProxy('/feature_extractor/get_normals', GetNormals)
return get_normals_prox(cloud).cluster
if __name__ == '__main__':
rospy.init_node('capture_node')
models = [\
'beer',
'bowl',
'create',
'disk_part',
'hammer',
'plastic_cup',
'soda_can']
# Disable gravity and delete the ground plane
initial_setup()
labeled_features = []
for model_name in models:
spawn_model(model_name)
for i in range(5):
# make five attempts to get a valid a point cloud then give up
sample_was_good = False
try_count = 0
while not sample_was_good and try_count < 5:
sample_cloud = capture_sample()
sample_cloud_arr = ros_to_pcl(sample_cloud).to_array()
# Check for invalid clouds.
if sample_cloud_arr.shape[0] == 0:
print('Invalid cloud detected')
try_count += 1
else:
sample_was_good = True
# Extract histogram features
chists = compute_color_histograms(sample_cloud, using_hsv=False)
normals = get_normals(sample_cloud)
nhists = compute_normal_histograms(normals)
feature = np.concatenate((chists, nhists))
labeled_features.append([feature, model_name])
delete_model()
pickle.dump(labeled_features, open('training_set.sav', 'wb'))
| 30.463768
| 87
| 0.666984
|
4a0187384e75384c081de5364e606c6c8547f720
| 6,089
|
py
|
Python
|
venv/lib/python2.7/site-packages/samples/sample_kinesis_wordputter.py
|
bopopescu/localstackvenv
|
3b1003c5fcca94fbd57ea722128d93b93119d2b5
|
[
"Apache-2.0"
] | 1
|
2021-05-11T12:09:58.000Z
|
2021-05-11T12:09:58.000Z
|
venv/lib/python2.7/site-packages/samples/sample_kinesis_wordputter.py
|
bopopescu/localstackvenv
|
3b1003c5fcca94fbd57ea722128d93b93119d2b5
|
[
"Apache-2.0"
] | null | null | null |
venv/lib/python2.7/site-packages/samples/sample_kinesis_wordputter.py
|
bopopescu/localstackvenv
|
3b1003c5fcca94fbd57ea722128d93b93119d2b5
|
[
"Apache-2.0"
] | 2
|
2020-01-13T17:51:02.000Z
|
2020-07-24T17:50:44.000Z
|
#!env python
'''
Copyright 2014-2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Amazon Software License (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://aws.amazon.com/asl/
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
'''
from __future__ import print_function
import sys, random, time, argparse
from boto import kinesis
def get_stream_status(conn, stream_name):
'''
Query this provided connection object for the provided stream's status.
:type conn: boto.kinesis.layer1.KinesisConnection
:param conn: A connection to Amazon Kinesis
:type stream_name: str
:param stream_name: The name of a stream.
:rtype: str
:return: The stream's status
'''
r = conn.describe_stream(stream_name)
description = r.get('StreamDescription')
return description.get('StreamStatus')
def wait_for_stream(conn, stream_name):
'''
Wait for the provided stream to become active.
:type conn: boto.kinesis.layer1.KinesisConnection
:param conn: A connection to Amazon Kinesis
:type stream_name: str
:param stream_name: The name of a stream.
'''
SLEEP_TIME_SECONDS = 3
status = get_stream_status(conn, stream_name)
while status != 'ACTIVE':
print('{stream_name} has status: {status}, sleeping for {secs} seconds'.format(
stream_name = stream_name,
status = status,
secs = SLEEP_TIME_SECONDS))
time.sleep(SLEEP_TIME_SECONDS) # sleep for 3 seconds
status = get_stream_status(conn, stream_name)
def put_words_in_stream(conn, stream_name, words):
'''
Put each word in the provided list of words into the stream.
:type conn: boto.kinesis.layer1.KinesisConnection
:param conn: A connection to Amazon Kinesis
:type stream_name: str
:param stream_name: The name of a stream.
:type words: list
:param words: A list of strings to put into the stream.
'''
for w in words:
try:
conn.put_record(stream_name, w, w)
print("Put word: " + w + " into stream: " + stream_name)
except Exception as e:
sys.stderr.write("Encountered an exception while trying to put a word: "
+ w + " into stream: " + stream_name + " exception was: " + str(e))
def put_words_in_stream_periodically(conn, stream_name, words, period_seconds):
'''
Puts words into a stream, then waits for the period to elapse then puts the words in again. There is no strict
guarantee about how frequently we put each word into the stream, just that we will wait between iterations.
:type conn: boto.kinesis.layer1.KinesisConnection
:param conn: A connection to Amazon Kinesis
:type stream_name: str
:param stream_name: The name of a stream.
:type words: list
:param words: A list of strings to put into the stream.
:type period_seconds: int
:param period_seconds: How long to wait, in seconds, between iterations over the list of words.
'''
while True:
put_words_in_stream(conn, stream_name, words)
print("Sleeping for {period_seconds} seconds".format(period_seconds=period_seconds))
time.sleep(period_seconds)
if __name__ == '__main__':
parser = argparse.ArgumentParser('''
Puts words into a stream.
# Using the -w option multiple times
sample_wordputter.py -s STREAM_NAME -w WORD1 -w WORD2 -w WORD3 -p 3
# Passing input from STDIN
echo "WORD1\\nWORD2\\nWORD3" | sample_wordputter.py -s STREAM_NAME -p 3
''')
parser.add_argument("-s", "--stream", dest="stream_name", required=True,
help="The stream you'd like to create.", metavar="STREAM_NAME",)
parser.add_argument("-r", "--regionName", "--region", dest="region", default="us-east-1",
help="The region you'd like to make this stream in. Default is 'us-east-1'", metavar="REGION_NAME",)
parser.add_argument("-w", "--word", dest="words", default=[], action="append",
help="A word to add to the stream. Can be specified multiple times to add multiple words.", metavar="WORD",)
parser.add_argument("-p", "--period", dest="period", type=int,
help="If you'd like to repeatedly put words into the stream, this option provides the period for putting "
+ "words into the stream in SECONDS. If no period is given then the words are put once.",
metavar="SECONDS",)
args = parser.parse_args()
stream_name = args.stream_name
'''
Getting a connection to Amazon Kinesis will require that you have your credentials available to
one of the standard credentials providers.
'''
print("Connecting to stream: {s} in {r}".format(s=stream_name, r=args.region))
conn = kinesis.connect_to_region(region_name = args.region)
try:
status = get_stream_status(conn, stream_name)
if 'DELETING' == status:
print('The stream: {s} is being deleted, please rerun the script.'.format(s=stream_name))
sys.exit(1)
elif 'ACTIVE' != status:
wait_for_stream(conn, stream_name)
except:
# We'll assume the stream didn't exist so we will try to create it with just one shard
conn.create_stream(stream_name, 1)
wait_for_stream(conn, stream_name)
# Now the stream should exist
if len(args.words) == 0:
print('No -w options provided. Waiting on input from STDIN')
words = [l.strip() for l in sys.stdin.readlines() if l.strip() != '']
else:
words = args.words
if args.period != None:
put_words_in_stream_periodically(conn, stream_name, words, args.period)
else:
put_words_in_stream(conn, stream_name, words)
| 40.593333
| 130
| 0.670882
|
4a0187eade5266cbbc02a8581ca27d67e2a9a680
| 4,699
|
py
|
Python
|
build/x86/python/m5/internal/param_TaggedPrefetcher.py
|
billionshang/gem5
|
18cc4294f32315595f865d07d1f33434e92b06b2
|
[
"BSD-3-Clause"
] | null | null | null |
build/x86/python/m5/internal/param_TaggedPrefetcher.py
|
billionshang/gem5
|
18cc4294f32315595f865d07d1f33434e92b06b2
|
[
"BSD-3-Clause"
] | null | null | null |
build/x86/python/m5/internal/param_TaggedPrefetcher.py
|
billionshang/gem5
|
18cc4294f32315595f865d07d1f33434e92b06b2
|
[
"BSD-3-Clause"
] | null | null | null |
# This file was automatically generated by SWIG (http://www.swig.org).
# Version 3.0.8
#
# Do not make changes to this file unless you know what you are doing--modify
# the SWIG interface file instead.
from sys import version_info
if version_info >= (2, 6, 0):
def swig_import_helper():
from os.path import dirname
import imp
fp = None
try:
fp, pathname, description = imp.find_module('_param_TaggedPrefetcher', [dirname(__file__)])
except ImportError:
import _param_TaggedPrefetcher
return _param_TaggedPrefetcher
if fp is not None:
try:
_mod = imp.load_module('_param_TaggedPrefetcher', fp, pathname, description)
finally:
fp.close()
return _mod
_param_TaggedPrefetcher = swig_import_helper()
del swig_import_helper
else:
import _param_TaggedPrefetcher
del version_info
try:
_swig_property = property
except NameError:
pass # Python < 2.2 doesn't have 'property'.
def _swig_setattr_nondynamic(self, class_type, name, value, static=1):
if (name == "thisown"):
return self.this.own(value)
if (name == "this"):
if type(value).__name__ == 'SwigPyObject':
self.__dict__[name] = value
return
method = class_type.__swig_setmethods__.get(name, None)
if method:
return method(self, value)
if (not static):
object.__setattr__(self, name, value)
else:
raise AttributeError("You cannot add attributes to %s" % self)
def _swig_setattr(self, class_type, name, value):
return _swig_setattr_nondynamic(self, class_type, name, value, 0)
def _swig_getattr_nondynamic(self, class_type, name, static=1):
if (name == "thisown"):
return self.this.own()
method = class_type.__swig_getmethods__.get(name, None)
if method:
return method(self)
if (not static):
return object.__getattr__(self, name)
else:
raise AttributeError(name)
def _swig_getattr(self, class_type, name):
return _swig_getattr_nondynamic(self, class_type, name, 0)
def _swig_repr(self):
try:
strthis = "proxy of " + self.this.__repr__()
except Exception:
strthis = ""
return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)
try:
_object = object
_newclass = 1
except AttributeError:
class _object:
pass
_newclass = 0
def _swig_setattr_nondynamic_method(set):
def set_attr(self, name, value):
if (name == "thisown"):
return self.this.own(value)
if hasattr(self, name) or (name == "this"):
set(self, name, value)
else:
raise AttributeError("You cannot add attributes to %s" % self)
return set_attr
import m5.internal.param_QueuedPrefetcher
import m5.internal.param_BasePrefetcher
import m5.internal.param_System
import m5.internal.enum_MemoryMode
import m5.internal.AddrRange_vector
import m5.internal.AbstractMemory_vector
import m5.internal.param_AbstractMemory
import m5.internal.param_MemObject
import m5.internal.param_ClockedObject
import m5.internal.param_ClockDomain
import m5.internal.param_SimObject
import m5.internal.drain
import m5.internal.serialize
class TaggedPrefetcher(m5.internal.param_QueuedPrefetcher.QueuedPrefetcher):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined - class is abstract")
__repr__ = _swig_repr
TaggedPrefetcher_swigregister = _param_TaggedPrefetcher.TaggedPrefetcher_swigregister
TaggedPrefetcher_swigregister(TaggedPrefetcher)
class TaggedPrefetcherParams(m5.internal.param_QueuedPrefetcher.QueuedPrefetcherParams):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def create(self):
return _param_TaggedPrefetcher.TaggedPrefetcherParams_create(self)
degree = _swig_property(_param_TaggedPrefetcher.TaggedPrefetcherParams_degree_get, _param_TaggedPrefetcher.TaggedPrefetcherParams_degree_set)
def __init__(self):
this = _param_TaggedPrefetcher.new_TaggedPrefetcherParams()
try:
self.this.append(this)
except Exception:
self.this = this
__swig_destroy__ = _param_TaggedPrefetcher.delete_TaggedPrefetcherParams
__del__ = lambda self: None
TaggedPrefetcherParams_swigregister = _param_TaggedPrefetcher.TaggedPrefetcherParams_swigregister
TaggedPrefetcherParams_swigregister(TaggedPrefetcherParams)
| 32.406897
| 145
| 0.708874
|
4a0188c5fe2b389d9863605bf1aafc3174e34c48
| 4,042
|
py
|
Python
|
babypandas.py
|
lexual/babypandas
|
a649bac4fdae7a3c3c2159c2a85d0f82339ee325
|
[
"BSD-3-Clause"
] | 6
|
2016-06-14T23:34:04.000Z
|
2020-05-27T14:04:18.000Z
|
babypandas.py
|
lexual/babypandas
|
a649bac4fdae7a3c3c2159c2a85d0f82339ee325
|
[
"BSD-3-Clause"
] | null | null | null |
babypandas.py
|
lexual/babypandas
|
a649bac4fdae7a3c3c2159c2a85d0f82339ee325
|
[
"BSD-3-Clause"
] | 4
|
2017-04-10T02:52:46.000Z
|
2020-10-31T02:35:11.000Z
|
# this is very quick hacky pure python dataframe/series.
# no index support
# just useful for tabular datastructure
from operator import itemgetter
# returns Series, not a list
# mainly useful, so we can do chaining.
# e.g. s.map(foo).map(bar), etc.
def return_series(fn):
def wrapper(*args, **kwargs):
result = fn(*args, **kwargs)
return Series(result)
return wrapper
class Series(list):
@return_series
def map(self, fn):
return map(fn, self)
return self.__class__(map(fn, self))
def sum(self):
return sum(self)
@return_series
def __eq__(self, other):
if hasattr(other, '__iter__'):
return [x == y for x, y in zip(self, other)]
else:
return [x == other for x in self]
@return_series
def __ne__(self, other):
return [not x for x in self == other]
@return_series
def __lt__(self, other):
if hasattr(other, '__iter__'):
return [x < y for x, y in zip(self, other)]
else:
result = [x < other for x in self]
return result
@return_series
def __gt__(self, other):
if hasattr(other, '__iter__'):
return [x > y for x, y in zip(self, other)]
else:
result = [x > other for x in self]
return result
return [not x for x in self < other]
@return_series
def __le__(self, other):
return [not x for x in self > other]
@return_series
def __ge__(self, other):
return [not x for x in self < other]
@return_series
def __add__(self, other):
if hasattr(other, '__iter__'):
return [x + y for x, y in zip(self, other)]
else:
return [x + other for x in self]
@return_series
def __mul__(self, other):
if hasattr(other, '__iter__'):
return [x * y for x, y in zip(self, other)]
else:
return [x * other for x in self]
def __rmul__(self, other):
return self.__mul__(other)
@return_series
def __div__(self, other):
if hasattr(other, '__iter__'):
return [x / y for x, y in zip(self, other)]
else:
return [x / other for x in self]
class DataFrame:
def __init__(self, dict_list=None):
if dict_list is None:
self._data = []
self.columns = []
else:
self._data = dict_list
self.columns = dict_list[0].keys()
def __setitem__(self, key, item):
if hasattr(item, '__iter__'):
self._data = [dict(row, **{key: x})
for row, x in zip(self._data, item)]
else:
self._data = [dict(row, **{key: item}) for row in self._data]
if key not in self.columns:
self.columns.append(key)
def __getitem__(self, key):
if hasattr(key, '__iter__'):
if isinstance(key[0], bool):
result = [x for x, y in zip(self._data, key) if y]
return self.__class__(result)
else:
result = self.copy()
for col in result:
if col not in key:
del result[col]
return result
else:
return Series([row[key] for row in self._data])
def __repr__(self):
result = ['\t'.join(self.columns)]
for row in self._data:
line = '\t'.join(str(row[col]) for col in self.columns)
result.append(line)
return '\n'.join(result)
def copy(self):
return self.__class__(self._data)
def __delitem__(self, key):
getter = itemgetter(*[col for col in self.columns if col != key])
self._data = [getter(row) for row in self._data]
self.columns.remove(key)
def __contains__(self, key):
return key in self.columns
def __iter__(self):
for column in self.columns:
yield column
def __len__(self):
return len(self._data)
| 28.464789
| 73
| 0.548243
|
4a0189069210d07d6abccad243afe3b446e73e8b
| 1,134
|
py
|
Python
|
tests/storage/cases/test_KT1XKBeSeSsZppNGpT8Ly7mnL2nMWQ5dkxDc.py
|
juztin/pytezos-1
|
7e608ff599d934bdcf129e47db43dbdb8fef9027
|
[
"MIT"
] | 1
|
2020-08-11T02:31:24.000Z
|
2020-08-11T02:31:24.000Z
|
tests/storage/cases/test_KT1XKBeSeSsZppNGpT8Ly7mnL2nMWQ5dkxDc.py
|
juztin/pytezos-1
|
7e608ff599d934bdcf129e47db43dbdb8fef9027
|
[
"MIT"
] | 1
|
2020-12-30T16:44:56.000Z
|
2020-12-30T16:44:56.000Z
|
tests/storage/cases/test_KT1XKBeSeSsZppNGpT8Ly7mnL2nMWQ5dkxDc.py
|
tqtezos/pytezos
|
a4ac0b022d35d4c9f3062609d8ce09d584b5faa8
|
[
"MIT"
] | 1
|
2022-03-20T19:01:00.000Z
|
2022-03-20T19:01:00.000Z
|
from unittest import TestCase
from tests import get_data
from pytezos.michelson.converter import build_schema, decode_micheline, encode_micheline, micheline_to_michelson
class StorageTestKT1XKBeSeSsZppNGpT8Ly7mnL2nMWQ5dkxDc(TestCase):
@classmethod
def setUpClass(cls):
cls.maxDiff = None
cls.contract = get_data('storage/carthagenet/KT1XKBeSeSsZppNGpT8Ly7mnL2nMWQ5dkxDc.json')
def test_storage_encoding_KT1XKBeSeSsZppNGpT8Ly7mnL2nMWQ5dkxDc(self):
type_expr = self.contract['script']['code'][1]
val_expr = self.contract['script']['storage']
schema = build_schema(type_expr)
decoded = decode_micheline(val_expr, type_expr, schema)
actual = encode_micheline(decoded, schema)
self.assertEqual(val_expr, actual)
def test_storage_schema_KT1XKBeSeSsZppNGpT8Ly7mnL2nMWQ5dkxDc(self):
_ = build_schema(self.contract['script']['code'][0])
def test_storage_format_KT1XKBeSeSsZppNGpT8Ly7mnL2nMWQ5dkxDc(self):
_ = micheline_to_michelson(self.contract['script']['code'])
_ = micheline_to_michelson(self.contract['script']['storage'])
| 40.5
| 112
| 0.749559
|
4a018bb3396378e8b392a3db4e0446306f81019f
| 10,399
|
py
|
Python
|
album2embedcodes.py
|
arne-cl/flickr-album-embed-codes
|
aad4c8720442939ee7fecd911e0122be6e41c6f6
|
[
"BSD-3-Clause"
] | 1
|
2015-12-27T17:47:20.000Z
|
2015-12-27T17:47:20.000Z
|
album2embedcodes.py
|
arne-cl/flickr-album-embed-codes
|
aad4c8720442939ee7fecd911e0122be6e41c6f6
|
[
"BSD-3-Clause"
] | 1
|
2015-12-29T22:58:36.000Z
|
2015-12-29T22:58:36.000Z
|
album2embedcodes.py
|
arne-cl/flickr-album-embed-codes
|
aad4c8720442939ee7fecd911e0122be6e41c6f6
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Author: Arne Neumann <flickr.programming@arne.cl>
"""
This module contains code for retrieving HTML embed codes for all images in a
given Flickr photoset/album URL.
Unforturnately, not all photosets which are visible on the website can be
accessed via Flickr's REST API. We can't simply extract the URLs from the
HTML source either, so we'll have to use a Javascript-capable library for
scraping (i.e. selenium).
"""
from __future__ import absolute_import, division, print_function
import argparse
import codecs
from collections import OrderedDict
import re
import sys
import time
from pyvirtualdisplay import Display
from selenium import webdriver
from selenium.common.exceptions import (
NoSuchElementException, WebDriverException)
# string that includes the width, height and URL of a hotlinked image on
# a Flickr album page
STYLE_STRING_PATTERN = """
.*? # ignore
width:\ (?P<width>\d+) # width
.*? # ignore
height:\ (?P<height>\d+) # height
.*? # ignore
//(?P<url>.*)" # hotlink URL
"""
# The URL of an image used on a Flickr album page is not the same as the
# one they use in their HTML embed codes. Since we're playing nice,
# we will only URLs that would also be used for embed codes.
HOTLINK_URL_REGEX = """
^ # beginning of string
(?P<subdomain>c\d+) # subdomain, e.g. c1
\.staticflickr.com/ # domain
(?P<farm_id>\d+)/ # ID of the server farm
(?P<image_id>.*?) # ID of the image
(?P<image_size>(_\S)?) # optional suffix, e.g. '_z'
# (for dimensions other than 500x334px)
\.jpg # file extension
$ # end of string
"""
def get_orientation(width, height):
"""
returns the orientation of an image.
Returns
-------
orientation : str
'landscape', if the image is wider than tall. 'portrait', otherwise.
"""
return 'landscape' if width > height else 'portrait'
def _get_visible_photos(browser, known_urls):
"""
extracts all *currently visible* photo URLs from a Flickr photoset/album
page, converts them into "embed code compatible" (i.e. sanctioned by
Flickr) URLs and returns them.
Parameters
----------
browser : TODO ???
a selenium webdriver instance
known_urls : dict(str: dict(str: str))
a dictionary mapping from embed code compatible image URLs to a
dictionary holding some metadata ('image_page', 'title' and
'orientation'). We'll update this dict, if we find new image
after scrolling down the page.
output : str or None
if 'cli': print an embed code as soon as a new image is found/parsed
Returns
-------
known_urls : dict(str: dict(str: str))
a dictionary mapping from embed code compatible image URLs to a
dictionary holding some metadata ('image_page', 'title' and
'orientation')
"""
image_elems = browser.find_elements_by_class_name('awake')
for elem in image_elems:
style_attrib = elem.get_attribute('style')
match = re.match(STYLE_STRING_PATTERN, style_attrib, re.VERBOSE)
width = int(match.group('width'))
height = int(match.group('height'))
orientation = get_orientation(width, height)
url = match.group('url')
# URL of the page that only shows one image
try:
image_page_elem = elem.find_element_by_class_name('overlay')
image_page = image_page_elem.get_attribute('href')
except NoSuchElementException as e:
image_page = browser.current_url
# title of the image
try:
title_elem = elem.find_element_by_class_name('interaction-bar')
title_str = title_elem.get_attribute('title')
title = re.match('^(?P<title>.*) by.*$', title_str).group('title')
except NoSuchElementException as e:
title = ''
try:
embed_url = hotlink_url2embed_url(url)
if not embed_url in known_urls:
known_urls[embed_url] = {
'image_page': image_page,
'title': title,
'orientation': orientation}
except AttributeError as e:
raise AttributeError("Warning: can't convert URL: {}".format(url))
return known_urls
def _get_page_photos(browser):
"""
returns all photo URLs from a Flickr photoset/album page, by scrolling
down multiple times.
Parameters
----------
browser : TODO ???
a selenium webdriver instance
"""
urls = OrderedDict()
num_of_urls = 0
while num_of_urls < 100:
# this seems to be the canonical way to scroll "to the bottom"
browser.execute_script(
"window.scrollTo(0, document.body.scrollHeight);")
time.sleep(3)
current_num_of_urls = len(_get_visible_photos(
browser, urls))
if current_num_of_urls > num_of_urls:
num_of_urls = current_num_of_urls
else:
break
return _get_visible_photos(browser, urls)
def get_photo_urls(album_url, browser, wait=2):
"""
returns a list of URLs of all photos belonging to
the given album / photoset.
Parameters
----------
album_url : str
URL of a Flickr album / photoset page
browser : TODO ???
a selenium webdriver instance
wait : int
time in seconds to wait/retry before a network/browser-related error is
thrown (default: 2)
Returns
-------
photo_urls : set(str)
a set of embed code compatible image URLs
"""
browser.implicitly_wait(wait)
browser.get(album_url)
if 'Problem' in browser.title: # the website can't be reached
raise WebDriverException(browser.title)
if not browser.find_elements_by_class_name('awake'):
raise NoSuchElementException('Is this really a Flickr Album page?')
photo_urls = _get_page_photos(browser)
# get URLs from follow-up pages, if any
next_page = True
while next_page:
try:
# this is not really a button, but you know what I mean ...
next_page_button = browser.find_element_by_xpath(
"//a[@data-track='paginationRightClick']")
next_page_button.click()
next_page_photos = _get_page_photos(browser)
photo_urls.update(next_page_photos)
except NoSuchElementException as e:
next_page = False
return photo_urls
def hotlink_url2embed_url(hotlink_url):
"""
Given a image URL extracted from a Flickr album page, returns the
corresponding URL for embedding that image into another website. These
URLs differ in terms of the server name and directory structure.
Since images on a Flickr album page are shown in different sizes
(for design purposes), we will have to 'normalize' the URL first, in order
to always embed images of the same size (i.e. 500x334).
Flickr image sizes
------------------
without ending: 500 x 334
ending with _b: 100 x 668
ending with _c: 800 x 543
ending with _z: 640 x 428
ending with _o: 100 x 668 (or "original size")
"""
match = re.match(HOTLINK_URL_REGEX, hotlink_url, re.VERBOSE)
embed_url = 'https://farm{0}.staticflickr.com/{1}.jpg'.format(
match.group('farm_id'), match.group('image_id'))
return embed_url
def embed_url2embed_code(image_url, image_page, title, orientation):
"""
creates an HTML embed code for a given Flickr image of medium dimensions
(i.e. 500x334 or 334x500).
"""
if orientation == 'landscape':
width = 500
height = 334
else:
width = 334
height = 500
embed_code = (
u'<a data-flickr-embed="true" href="{image_page}" '
'title="{image_title}"> <img src="{image_url}" '
'width="{width}" height="{height}" '
'alt="{image_title}"></a>').format(
image_page=image_page, image_title=title, image_url=image_url,
width=width, height=height)
return embed_code
def get_headless_browser():
"""
returns a headless (i.e. invisible) Firefox browser instance.
cf. http://stackoverflow.com/a/8910326
"""
display = Display(visible=0, size=(1024, 768))
display.start()
# now Firefox will run in a virtual display.
# you will not see the browser.
return webdriver.Firefox()
def write_embed_codes(photo_dict, output_file):
"""
writes HTML embed codes to an open file.
Parameters
----------
photo_dict : dict(str: dict(str: str))
a dictionary mapping from embed code compatible image URLs to a
dictionary holding some metadata ('image_page', 'title' and
'orientation')
output_file : file
an open, writable file
"""
for photo_url in photo_dict:
metadata = photo_dict[photo_url]
embed_code = embed_url2embed_code(
photo_url, metadata['image_page'], metadata['title'],
metadata['orientation'])
output_file.write(embed_code+'\n\n')
def cli():
"""
commandline interface for extracting HTML embed codes from a Flickr
album / photoset.
"""
parser = argparse.ArgumentParser(
"extract HTML embed codes from a Flickr album")
parser.add_argument('--debug', action='store_true',
help="enable debug mode")
parser.add_argument(
'album_url',
help='URL of the Flickr album/photoset to extract embed codes from')
parser.add_argument(
'output_file', nargs='?', default=sys.stdout,
help='output file for photo embed codes')
args = parser.parse_args(sys.argv[1:])
if args.debug:
import pudb
pudb.set_trace()
browser = webdriver.Firefox()
else:
browser = get_headless_browser()
try:
photo_dict = get_photo_urls(args.album_url, browser)
if isinstance(args.output_file, basestring):
with codecs.open(args.output_file, 'w', encoding='utf8') as out_file:
write_embed_codes(photo_dict, out_file)
else: # args.output_file is an open file (i.e. stdout)
write_embed_codes(photo_dict, args.output_file)
args.output_file.close()
finally:
browser.close()
if __name__ == '__main__':
cli()
| 32.295031
| 81
| 0.640062
|
4a018bc8b87f196e23d08f3a350841aba016a9b6
| 2,367
|
py
|
Python
|
pandas/tests/test_msgpack/test_obj.py
|
certik/pandas
|
758ca05e2eb04532b5d78331ba87c291038e2c61
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-2-Clause",
"BSD-3-Clause"
] | 29
|
2015-01-08T19:20:37.000Z
|
2021-04-20T08:25:56.000Z
|
pandas/tests/test_msgpack/test_obj.py
|
certik/pandas
|
758ca05e2eb04532b5d78331ba87c291038e2c61
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-2-Clause",
"BSD-3-Clause"
] | 5
|
2021-03-19T08:36:48.000Z
|
2022-01-13T01:52:34.000Z
|
pandas/tests/test_msgpack/test_obj.py
|
certik/pandas
|
758ca05e2eb04532b5d78331ba87c291038e2c61
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-2-Clause",
"BSD-3-Clause"
] | 22
|
2015-01-02T12:14:20.000Z
|
2021-10-13T09:22:30.000Z
|
# coding: utf-8
import unittest
import nose
import datetime
from pandas.msgpack import packb, unpackb
class DecodeError(Exception):
pass
class TestObj(unittest.TestCase):
def _arr_to_str(self, arr):
return ''.join(str(c) for c in arr)
def bad_complex_decoder(self, o):
raise DecodeError("Ooops!")
def _decode_complex(self, obj):
if b'__complex__' in obj:
return complex(obj[b'real'], obj[b'imag'])
return obj
def _encode_complex(self, obj):
if isinstance(obj, complex):
return {b'__complex__': True, b'real': 1, b'imag': 2}
return obj
def test_encode_hook(self):
packed = packb([3, 1+2j], default=self._encode_complex)
unpacked = unpackb(packed, use_list=1)
assert unpacked[1] == {b'__complex__': True, b'real': 1, b'imag': 2}
def test_decode_hook(self):
packed = packb([3, {b'__complex__': True, b'real': 1, b'imag': 2}])
unpacked = unpackb(packed, object_hook=self._decode_complex, use_list=1)
assert unpacked[1] == 1+2j
def test_decode_pairs_hook(self):
packed = packb([3, {1: 2, 3: 4}])
prod_sum = 1 * 2 + 3 * 4
unpacked = unpackb(packed, object_pairs_hook=lambda l: sum(k * v for k, v in l), use_list=1)
assert unpacked[1] == prod_sum
def test_only_one_obj_hook(self):
self.assertRaises(ValueError, unpackb, b'', object_hook=lambda x: x, object_pairs_hook=lambda x: x)
def test_bad_hook(self):
def f():
packed = packb([3, 1+2j], default=lambda o: o)
unpacked = unpackb(packed, use_list=1)
self.assertRaises(ValueError, f)
def test_array_hook(self):
packed = packb([1,2,3])
unpacked = unpackb(packed, list_hook=self._arr_to_str, use_list=1)
assert unpacked == '123'
def test_an_exception_in_objecthook1(self):
def f():
packed = packb({1: {'__complex__': True, 'real': 1, 'imag': 2}})
unpackb(packed, object_hook=self.bad_complex_decoder)
self.assertRaises(DecodeError, f)
def test_an_exception_in_objecthook2(self):
def f():
packed = packb({1: [{'__complex__': True, 'real': 1, 'imag': 2}]})
unpackb(packed, list_hook=self.bad_complex_decoder, use_list=1)
self.assertRaises(DecodeError, f)
| 32.875
| 107
| 0.621462
|
4a018c17dc7c72af6daaef5bae9cd9f56b360246
| 5,920
|
py
|
Python
|
pybricks/hubs/__stub/__screen.py
|
drewwhis/pybricks-fll
|
9d6672df9aeef375d3cd0983dc710abeebf9c798
|
[
"MIT"
] | 17
|
2019-08-30T12:30:34.000Z
|
2021-11-02T16:06:46.000Z
|
pybricks/hubs/__stub/__screen.py
|
drewwhis/pybricks-fll
|
9d6672df9aeef375d3cd0983dc710abeebf9c798
|
[
"MIT"
] | 19
|
2019-10-20T04:01:20.000Z
|
2021-09-03T18:47:07.000Z
|
pybricks/hubs/__stub/__screen.py
|
drewwhis/pybricks-fll
|
9d6672df9aeef375d3cd0983dc710abeebf9c798
|
[
"MIT"
] | 8
|
2019-09-21T03:13:21.000Z
|
2020-12-23T17:16:52.000Z
|
from pybricks.media.ev3dev import Font, Image, ImageFile
from pybricks.parameters import Color
from typing import Union
class Screen:
"""
A stub class to represent the screen member of the EV3Brick class.
Attributes:
height (int): The height of the screen in pixels.
width (int): The width of the screen in pixels.
"""
def __init__(self):
self.width = 178 # type: int
self.height = 128 # type: int
def clear(self):
"""
Clears the screen. All pixels on the screen will be set to Color.WHITE.
"""
...
def draw_text(self, x: int, y: int, text: str, text_color: Color = Color.BLACK, background_color: Color = None):
"""
Draws text on the screen.
The most recent font set using set_font() will be used or Font.DEFAULT if no font has been set yet.
Args:
x (int): The x-axis value where the left side of the text will start.
y (int): The y-axis value where the top of the text will start.
text (str): The text to draw.
text_color (Color): The color used for drawing the text.
background_color (Color): The color used to fill the rectangle behind the text or None for transparent background.
"""
...
def print(self, *args, sep: str = "", end: str = "\n"):
"""
Prints a line of text on the screen.
This method works like the builtin print() function, but it writes on the screen instead.
You can set the font using set_font(). If no font has been set, Font.DEFAULT will be used. The text is always printed used black text with a white background.
Unlike the builtin print(), the text does not wrap if it is too wide to fit on the screen. It just gets cut off. But if the text would go off of the bottom of the screen, the entire image is scrolled up and the text is printed in the new blank area at the bottom of the screen.
Args:
args (object): Zero or more objects to print.
sep (str): Separator that will be placed between each object that is printed.
end (str): End of line that will be printed after the last object.
"""
...
def set_font(self, font: Font):
"""
Sets the font used for writing on the screen.
The font is used for both draw_text() and print().
Args:
font (Font): The font to use.
"""
...
def load_image(self, source: Union[str, Image, ImageFile]):
"""
Clears this image, then draws the source image centered in the screen.
Args:
source (ImageFile, Image, or str): The source Image. If the argument is a string (or ImageFile), then the source image is loaded from file.
"""
...
def draw_image(self, x: int, y: int, source: Union[str, Image, ImageFile], transparent: Color = None):
"""
Draws the source image on the screen.
Args:
x (int): The x-axis value where the left side of the image will start.
y (int): The y-axis value where the top of the image will start.
source (ImageFile, Image, str): The source Image. If the argument is a string (or ImageFile), then the source image is loaded from file.
transparent (Color): The color of image to treat as transparent or None for no transparency.
"""
...
def draw_pixel(self, x: int, y: int, color: Color = Color.BLACK):
"""
Draws a single pixel on the screen.
Args:
x (int): The x coordinate of the pixel.
y (int): The y coordinate of the pixel.
color (Color): The color of the pixel.
"""
...
def draw_line(self, x1: int, y1: int, x2: int, y2: int, width: int = 1, color: Color = Color.BLACK):
"""
Draws a line on the screen.
Args:
x1 (int): The x coordinate of the starting point of the line.
y1 (int): The y coordinate of the starting point of the line.
x2 (int): The x coordinate of the ending point of the line.
y2 (int): The y coordinate of the ending point of the line.
width (int): The width of the line in pixels.
color (Color): The color of the line.
"""
...
def draw_box(self, x1: int, y1: int, x2: int, y2: int, r: int = 0, fill: bool = False, color: Color = Color.BLACK):
"""
Draws a box on the screen.
Args:
x1 (int): The x coordinate of the left side of the box.
y1 (int): The y coordinate of the top of the box.
x2 (int): The x coordinate of the right side of the box.
y2 (int): The y coordinate of the bottom of the box.
r (int): The radius of the corners of the box.
fill (bool): If True, the box will be filled with color, otherwise only the outline of the box will be drawn.
color (Color): The color of the box.
"""
...
def draw_circle(self, x: int, y: int, r: int, fill: bool = False, color: Color = Color.BLACK):
"""
Draws a circle on the screen.
Args:
x (int): The x coordinate of the center of the circle.
y (int): The y coordinate of the center of the circle.
r (int): The radius of the circle.
fill (bool): If True, the circle will be filled with color, otherwise only the circumference will be drawn.
color (Color): The color of the circle.
"""
...
def save(self, filename: str):
"""
Saves the screen as a .png file.
Args:
filename (str): The path to the file to be saved.
Raises:
TypeError: filename is not a string
OSError: There was a problem saving the file.
"""
...
| 38.441558
| 285
| 0.588345
|
4a018cf7f6f5db2bf96fe9f6ab9ec725f082c7aa
| 16,891
|
py
|
Python
|
iyzico_objects.py
|
uguratar/pyzico
|
b779d590b99392df60db7c5e2df832708df9b6a2
|
[
"MIT"
] | 6
|
2015-05-03T10:48:54.000Z
|
2018-03-06T12:36:02.000Z
|
iyzico_objects.py
|
uguratar/pyzico
|
b779d590b99392df60db7c5e2df832708df9b6a2
|
[
"MIT"
] | 1
|
2021-06-01T22:06:45.000Z
|
2021-06-01T22:06:45.000Z
|
iyzico_objects.py
|
uguratar/pyzico
|
b779d590b99392df60db7c5e2df832708df9b6a2
|
[
"MIT"
] | null | null | null |
# coding=utf-8
__author__ = 'Ugur Atar <ugur@kahvekap.com>'
import requests
import settings
import uuid
class IyzicoCardException(ValueError):
def __init__(self, *args, **kwargs):
super(IyzicoCardException, self).__init__(*args, **kwargs)
class IyzicoValueException(ValueError):
def __init__(self, *args, **kwargs):
super(IyzicoValueException, self).__init__(*args, **kwargs)
class IyzicoHTTPException(IOError):
def __init__(self, *args, **kwargs):
response = kwargs.pop('response', None)
self.response = response
self.request = kwargs.pop('request', None)
if (response is not None and not self.request and
hasattr(response, 'request')):
self.request = self.response.request
super(IyzicoHTTPException, self).__init__(*args, **kwargs)
class IyzicoCard:
def __init__(self, card_number, card_expiry_month,
card_expiry_year, card_verification,
card_holder_name):
self.card_number = card_number
self.card_expiry_month = card_expiry_month
self.card_expiry_year = card_expiry_year
self.card_verification = card_verification
self.card_holder_name = card_holder_name
self.card_brand = None
self._bin_response = None
self._connector = None
self._valid = False
self.validate()
@property
def connector(self):
return self._connector
@property
def is_valid(self):
return self._valid
@property
def card_number(self):
return self.card_number
@property
def card_expiry_month(self):
return self.expiry_month
@property
def card_expiry_year(self):
return self.card_expiry_year
@property
def card_verification(self):
return self.card_verification
@property
def card_brand(self):
return self.card_brand
@property
def card_holder_name(self):
return self.card_holder_name
def validate(self):
if self._bin_response is None:
self._bin_check()
if self._bin_response.success:
self._valid = True
self._connector = self._find_connector()
self.card_brand = self._bin_response.issuer
else:
self._valid = False
self.card_brand = self._card_brand()
elif self._bin_response.bin != self.card_number[:6]:
self._bin_check()
if self._bin_response.success:
self._connector = self._find_connector()
self.card_brand = self._bin_response.issuer
self._valid = True
else:
self._valid = False
self.card_brand = self._card_brand()
elif self._bin_response.success:
self._valid = True
self.card_brand = self._bin_response.issuer
self._connector = self._find_connector()
def _bin_check(self):
payload = {'api_id': settings.api_id,
'secret': settings.api_secret,
'bin': self.card_number[:6]}
try:
raw_response = requests.post(settings.bin_check_url,
payload)
bin_response = IyzicoBinResponse(raw_response)
self._bin_response = bin_response
return bin_response
except requests.RequestException as re:
self.card_brand = self._card_brand()
raise IyzicoHTTPException(re.args, response=re.response)
except ValueError as value_error:
self.card_brand = self._card_brand()
raise IyzicoValueException(value_error)
def _card_brand(self):
number = str(self.card_number)
card_brand = "Invalid"
if len(number) == 15:
if number[:2] == "34" or number[:2] == "37":
card_brand = "AMEX"
if len(number) == 13:
if number[:1] == "4":
card_brand = "VISA"
if len(number) == 16:
if number[:4] == "6011":
card_brand = "DISCOVER"
if 51 <= int(number[:2]) <= 55:
card_brand = "MASTER"
if number[:1] == "4":
card_brand = "VISA"
return card_brand
def _find_connector(self):
if self._bin_response.card_brand == "Bonus":
return "Denizbank"
elif self._bin_response.card_brand == "Maximum":
return "Isbank"
elif self._bin_response.card_brand == "World":
return "Vakifbank"
if self._bin_response.bank_code == "12":
return "Halkbank"
elif self._bin_response.bank_code == "111":
return "Finansbank"
elif self._bin_response.bank_code == "208":
return "Bankasya"
return "Bankasya"
class IyzicoCustomer:
def __init__(self, customer_first_name=None,
customer_last_name=None,
customer_contact_email=None,):
if customer_first_name is None \
or customer_last_name is None\
or customer_first_name is None\
or len(customer_first_name.strip()) == 0\
or len(customer_last_name.strip()) == 0 \
or len(customer_contact_email.strip()) == 0:
return None
else:
self.customer_first_name = customer_first_name
self.customer_last_name = customer_last_name
self.customer_contact_email = customer_contact_email
@property
def customer_first_name(self):
return self.customer_first_name
@property
def customer_last_name(self):
return self.customer_last_name
@property
def customer_contact_email(self):
return self.customer_contact_email
class IyzicoCardToken:
def __init__(self, card_token,):
self.card_token = card_token
@property
def card_token(self):
return self.card_token
class IyzicoSettings:
def __init__(self, api_id=None, secret=None,
mode=None):
if api_id is not None and secret is not None and mode is not None:
self.api_id = api_id
self.secret = secret
self.mode = mode
else:
self.api_id = settings.api_id
self.secret = settings.api_secret
self.mode = settings.mode
@property
def api_id(self):
return self.api_id
@property
def secret(self):
return self.secret
@property
def mode(self):
return self.mode
class IyzicoPayloadBuilder:
payload = {}
def __init__(self, settings):
if not isinstance(settings, IyzicoSettings):
raise TypeError(str(self.__class__)
+ ": settings is not instance of "
+ str(IyzicoSettings))
self.settings = settings
self.reset()
def reset(self):
self.payload = {}
self._append_object(self.settings)
self.payload['response_mode'] = 'SYNC'
def debit_with_token(self, card_token, amount, descriptor,
currency, customer):
if not isinstance(card_token, IyzicoCardToken):
raise TypeError(str(self.__class__)
+ ": card_token is not instance of "
+ str(IyzicoCardToken))
self.reset()
self.payload['external_id'] = uuid.uuid1().hex
self._append_object(card_token)
self.payload["type"] = "DB"
self.payload["amount"] = self.format_amount(amount)
self.payload["currency"] = currency
self.payload["descriptor"] = descriptor
if customer is not None and isinstance(customer,
IyzicoCustomer):
self._append_object(customer)
return self.payload
def debit(self, card, amount, descriptor, currency,
customer=None, card_register=False,
installment=None):
if not isinstance(card, IyzicoCard):
raise TypeError(str(self.__class__)
+ ": card is not instance of "
+ str(IyzicoCard))
self.reset()
self._append_object(card)
self.payload['external_id'] = uuid.uuid1().hex
self.payload["type"] = "DB"
self.payload["amount"] = self.format_amount(amount)
self.payload["currency"] = currency
self.payload["descriptor"] = descriptor
if installment is not None and isinstance(installment, int) \
and installment > 1:
self.payload["connector_type"] = card.connector
self.payload["installment_count"] = str(int(installment))
if card_register:
self.payload["card_register"] = str(int(card_register))
if customer is not None and isinstance(customer,
IyzicoCustomer):
self._append_object(customer)
return self.payload
def register_card(self, card):
if not isinstance(card, IyzicoCard):
raise TypeError(str(self.__class__)
+ ": card is not instance of "
+ str(IyzicoCard))
self.reset()
self._append_object(card)
return self.payload
def delete_card(self, card_token):
if not isinstance(card_token, IyzicoCardToken):
raise TypeError(str(self.__class__)
+ ": card token is not instance of "
+ str(IyzicoCardToken))
self.reset()
self._append_object(card_token)
return self.payload
def pre_authorization(self, card, amount, descriptor, currency,
customer=None, ):
if not isinstance(card, IyzicoCard):
raise TypeError(str(self.__class__)
+ ": card is not instance of "
+ str(IyzicoCard))
self.reset()
self._append_object(card)
self.payload['external_id'] = uuid.uuid1().hex
self.payload["type"] = "PA"
self.payload["amount"] = self.format_amount(amount)
self.payload["currency"] = currency
self.payload["descriptor"] = descriptor
if customer is not None and isinstance(customer,
IyzicoCustomer):
self._append_object(customer)
return self.payload
def capture(self, transaction_id, amount, descriptor, currency,
customer=None, ):
self.reset()
self.payload['transaction_id'] = transaction_id
self.payload['external_id'] = uuid.uuid1().hex
self.payload["type"] = "CP"
self.payload["amount"] = self.format_amount(amount)
self.payload["currency"] = currency
self.payload["descriptor"] = descriptor
if customer is not None and isinstance(customer,
IyzicoCustomer):
self._append_object(customer)
return self.payload
def refund(self, transaction_id, amount, descriptor, currency,
customer=None,):
self.reset()
self.payload['transaction_id'] = transaction_id
self.payload['external_id'] = uuid.uuid1().hex
self.payload["type"] = "RF"
self.payload["amount"] = self.format_amount(amount)
self.payload["currency"] = currency
self.payload["descriptor"] = descriptor
if customer is not None and isinstance(customer,
IyzicoCustomer):
self._append_object(customer)
return self.payload
def reversal(self, transaction_id, amount, descriptor, currency,
customer=None,):
self.refund(transaction_id, amount, descriptor, currency,
customer)
self.payload["type"] = "RV"
return self.payload
def installment_matrix(self, amount, bin_number):
self.reset()
self.payload["bin_number"] = bin_number
self.payload["amount"] = self.format_amount(amount)
del self.payload["response_mode"]
return self.payload
def _append_object(self, obj):
for attr, value in obj.__dict__.iteritems():
if not attr.startswith('_'):
self.payload[attr] = value
@staticmethod
def format_amount(amount):
return str(int(100 * float("{:.2f}".format(amount))))
class IyzicoRequest():
@staticmethod
def execute(url, payload):
try:
raw_response = requests.post(url, payload)
response = IyzicoResponse(raw_response)
return response
except requests.RequestException as re:
raise IyzicoHTTPException(re.args, response=re.response)
except ValueError as value_error:
raise IyzicoValueException(value_error)
class IyzicoResponse():
def __init__(self, server_response):
self._raw_response = server_response
self._json_response = server_response.json()
self.response = self._json_response["response"]
self.error_message = None
self.error_code = None
self.transaction = None
self.transaction_id = None
self.transaction_state = None
self.reference_id = None
self.request_id = None
self.account = None
self.card_token = None
if self.response["state"] == "success":
self.success = True
try:
self.mode = self._json_response["mode"]
except KeyError:
self.mode = None
try:
self.transaction = self._json_response["transaction"]
self.transaction_id = \
self._json_response["transaction"]["transaction_id"]
self.transaction_state = \
self._json_response["transaction"]["state"]
self.reference_id = \
self._json_response["transaction"]["reference_id"]
except KeyError:
self.transaction = None
self.transaction_id = None
self.transaction_state = None
self.reference_id = None
try:
self.request_id = self.response["request_id"]
except KeyError:
self.request_id = None
try:
self.account = self._json_response["account"]
except KeyError:
self.account = None
try:
self.customer = self._json_response["customer"]
except KeyError:
self.customer = None
try:
self.card_token = self._json_response["card_token"]
except KeyError:
self.card_token = None
else:
self.success = False
try:
self.error_message = self.response["error_message"]
except KeyError:
self.error_message = None
try:
self.error_code = self.response["error_code"]
except KeyError:
self.error_code = None
@property
def response(self):
return self.response
@property
def mode(self):
return self.mode
@property
def card_token(self):
return self.card_token
@property
def transaction(self):
return self.transaction
@property
def customer(self):
return self.customer
@property
def account(self):
return self.account
@property
def success(self):
return self.success
class IyzicoBinResponse():
def __init__(self, server_response):
self._raw_response = server_response
self._json_response = server_response.json()
self.details = self._json_response["details"]
self.success = False
if self._json_response["status"] == "SUCCESS":
self.success = True
self.card_type = self.details["CARD_TYPE"]
self.bin = self.details["BIN"]
self.card_brand = self.details["BRAND"]
self.bank_code = self.details["BANK_CODE"]
self.issuer = self.details["ISSUER"]
@property
def success(self):
return self.success
@property
def card_type(self):
return self.card_type
@property
def bin(self):
return self.bin
@property
def card_brand(self):
return self.card_brand
@property
def bank_code(self):
return self.bank_code
@property
def issuer(self):
return self.issuer
@property
def details(self):
return self.details
| 30.544304
| 74
| 0.577763
|
4a018e00eac0ecc647da8811ea84c327e85bfe4c
| 10,566
|
py
|
Python
|
test/test_ucx.py
|
brisbane/hpc-container-maker
|
29c675d62651c6dde566b699ad85f794114a94c4
|
[
"Apache-2.0"
] | null | null | null |
test/test_ucx.py
|
brisbane/hpc-container-maker
|
29c675d62651c6dde566b699ad85f794114a94c4
|
[
"Apache-2.0"
] | null | null | null |
test/test_ucx.py
|
brisbane/hpc-container-maker
|
29c675d62651c6dde566b699ad85f794114a94c4
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=invalid-name, too-few-public-methods, bad-continuation
"""Test cases for the ucx module"""
from __future__ import unicode_literals
from __future__ import print_function
import logging # pylint: disable=unused-import
import unittest
from helpers import centos, docker, ppc64le, ubuntu, x86_64
from hpccm.building_blocks.ucx import ucx
class Test_ucx(unittest.TestCase):
def setUp(self):
"""Disable logging output messages"""
logging.disable(logging.ERROR)
@x86_64
@ubuntu
@docker
def test_defaults_ubuntu(self):
"""Default ucx building block"""
u = ucx()
self.assertEqual(str(u),
r'''# UCX version 1.5.2
RUN apt-get update -y && \
DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \
binutils-dev \
file \
libnuma-dev \
make \
wget && \
rm -rf /var/lib/apt/lists/*
RUN mkdir -p /var/tmp && wget -q -nc --no-check-certificate -P /var/tmp https://github.com/openucx/ucx/releases/download/v1.5.2/ucx-1.5.2.tar.gz && \
mkdir -p /var/tmp && tar -x -f /var/tmp/ucx-1.5.2.tar.gz -C /var/tmp -z && \
cd /var/tmp/ucx-1.5.2 && ./configure --prefix=/usr/local/ucx --enable-optimizations --disable-logging --disable-debug --disable-assertions --disable-params-check --disable-doxygen-doc --with-cuda=/usr/local/cuda && \
make -j$(nproc) && \
make -j$(nproc) install && \
rm -rf /var/tmp/ucx-1.5.2.tar.gz /var/tmp/ucx-1.5.2
ENV LD_LIBRARY_PATH=/usr/local/ucx/lib:$LD_LIBRARY_PATH \
PATH=/usr/local/ucx/bin:$PATH''')
@x86_64
@centos
@docker
def test_defaults_centos(self):
"""Default ucx building block"""
u = ucx()
self.assertEqual(str(u),
r'''# UCX version 1.5.2
RUN yum install -y \
binutils-devel \
file \
make \
numactl-devel \
wget && \
rm -rf /var/cache/yum/*
RUN mkdir -p /var/tmp && wget -q -nc --no-check-certificate -P /var/tmp https://github.com/openucx/ucx/releases/download/v1.5.2/ucx-1.5.2.tar.gz && \
mkdir -p /var/tmp && tar -x -f /var/tmp/ucx-1.5.2.tar.gz -C /var/tmp -z && \
cd /var/tmp/ucx-1.5.2 && ./configure --prefix=/usr/local/ucx --enable-optimizations --disable-logging --disable-debug --disable-assertions --disable-params-check --disable-doxygen-doc --with-cuda=/usr/local/cuda && \
make -j$(nproc) && \
make -j$(nproc) install && \
rm -rf /var/tmp/ucx-1.5.2.tar.gz /var/tmp/ucx-1.5.2
ENV LD_LIBRARY_PATH=/usr/local/ucx/lib:$LD_LIBRARY_PATH \
PATH=/usr/local/ucx/bin:$PATH''')
@x86_64
@ubuntu
@docker
def test_with_paths_ubuntu(self):
"""ucx building block with paths"""
u = ucx(cuda='/cuda', gdrcopy='/gdrcopy', knem='/knem', ofed='/ofed',
xpmem='/xpmem')
self.assertEqual(str(u),
r'''# UCX version 1.5.2
RUN apt-get update -y && \
DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \
binutils-dev \
file \
libnuma-dev \
make \
wget && \
rm -rf /var/lib/apt/lists/*
RUN mkdir -p /var/tmp && wget -q -nc --no-check-certificate -P /var/tmp https://github.com/openucx/ucx/releases/download/v1.5.2/ucx-1.5.2.tar.gz && \
mkdir -p /var/tmp && tar -x -f /var/tmp/ucx-1.5.2.tar.gz -C /var/tmp -z && \
cd /var/tmp/ucx-1.5.2 && ./configure --prefix=/usr/local/ucx --enable-optimizations --disable-logging --disable-debug --disable-assertions --disable-params-check --disable-doxygen-doc --with-cuda=/cuda --with-gdrcopy=/gdrcopy --with-knem=/knem --with-verbs=/ofed --with-rdmacm=/ofed --with-xpmem=/xpmem && \
make -j$(nproc) && \
make -j$(nproc) install && \
rm -rf /var/tmp/ucx-1.5.2.tar.gz /var/tmp/ucx-1.5.2
ENV LD_LIBRARY_PATH=/usr/local/ucx/lib:$LD_LIBRARY_PATH \
PATH=/usr/local/ucx/bin:$PATH''')
@x86_64
@ubuntu
@docker
def test_with_true_ubuntu(self):
"""ucx building block with True values"""
u = ucx(cuda=True, gdrcopy=True, knem=True, ofed=True, xpmem=True)
self.assertEqual(str(u),
r'''# UCX version 1.5.2
RUN apt-get update -y && \
DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \
binutils-dev \
file \
libnuma-dev \
make \
wget && \
rm -rf /var/lib/apt/lists/*
RUN mkdir -p /var/tmp && wget -q -nc --no-check-certificate -P /var/tmp https://github.com/openucx/ucx/releases/download/v1.5.2/ucx-1.5.2.tar.gz && \
mkdir -p /var/tmp && tar -x -f /var/tmp/ucx-1.5.2.tar.gz -C /var/tmp -z && \
cd /var/tmp/ucx-1.5.2 && ./configure --prefix=/usr/local/ucx --enable-optimizations --disable-logging --disable-debug --disable-assertions --disable-params-check --disable-doxygen-doc --with-cuda=/usr/local/cuda --with-gdrcopy --with-knem --with-verbs --with-rdmacm --with-xpmem && \
make -j$(nproc) && \
make -j$(nproc) install && \
rm -rf /var/tmp/ucx-1.5.2.tar.gz /var/tmp/ucx-1.5.2
ENV LD_LIBRARY_PATH=/usr/local/ucx/lib:$LD_LIBRARY_PATH \
PATH=/usr/local/ucx/bin:$PATH''')
@x86_64
@ubuntu
@docker
def test_with_false_ubuntu(self):
"""ucx building block with False values"""
u = ucx(cuda=False, gdrcopy=False, knem=False, ofed=False, xpmem=False)
self.assertEqual(str(u),
r'''# UCX version 1.5.2
RUN apt-get update -y && \
DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \
binutils-dev \
file \
libnuma-dev \
make \
wget && \
rm -rf /var/lib/apt/lists/*
RUN mkdir -p /var/tmp && wget -q -nc --no-check-certificate -P /var/tmp https://github.com/openucx/ucx/releases/download/v1.5.2/ucx-1.5.2.tar.gz && \
mkdir -p /var/tmp && tar -x -f /var/tmp/ucx-1.5.2.tar.gz -C /var/tmp -z && \
cd /var/tmp/ucx-1.5.2 && ./configure --prefix=/usr/local/ucx --enable-optimizations --disable-logging --disable-debug --disable-assertions --disable-params-check --disable-doxygen-doc --without-cuda --without-gdrcopy --without-knem --without-verbs --without-rdmacm --without-xpmem && \
make -j$(nproc) && \
make -j$(nproc) install && \
rm -rf /var/tmp/ucx-1.5.2.tar.gz /var/tmp/ucx-1.5.2
ENV LD_LIBRARY_PATH=/usr/local/ucx/lib:$LD_LIBRARY_PATH \
PATH=/usr/local/ucx/bin:$PATH''')
@x86_64
@ubuntu
@docker
def test_ldconfig(self):
"""ldconfig option"""
u = ucx(ldconfig=True, version='1.4.0')
self.assertEqual(str(u),
r'''# UCX version 1.4.0
RUN apt-get update -y && \
DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \
binutils-dev \
file \
libnuma-dev \
make \
wget && \
rm -rf /var/lib/apt/lists/*
RUN mkdir -p /var/tmp && wget -q -nc --no-check-certificate -P /var/tmp https://github.com/openucx/ucx/releases/download/v1.4.0/ucx-1.4.0.tar.gz && \
mkdir -p /var/tmp && tar -x -f /var/tmp/ucx-1.4.0.tar.gz -C /var/tmp -z && \
cd /var/tmp/ucx-1.4.0 && ./configure --prefix=/usr/local/ucx --enable-optimizations --disable-logging --disable-debug --disable-assertions --disable-params-check --disable-doxygen-doc --with-cuda=/usr/local/cuda && \
make -j$(nproc) && \
make -j$(nproc) install && \
echo "/usr/local/ucx/lib" >> /etc/ld.so.conf.d/hpccm.conf && ldconfig && \
rm -rf /var/tmp/ucx-1.4.0.tar.gz /var/tmp/ucx-1.4.0
ENV PATH=/usr/local/ucx/bin:$PATH''')
@x86_64
@ubuntu
@docker
def test_environment(self):
"""environment option"""
u = ucx(environment=False, version='1.4.0')
self.assertEqual(str(u),
r'''# UCX version 1.4.0
RUN apt-get update -y && \
DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \
binutils-dev \
file \
libnuma-dev \
make \
wget && \
rm -rf /var/lib/apt/lists/*
RUN mkdir -p /var/tmp && wget -q -nc --no-check-certificate -P /var/tmp https://github.com/openucx/ucx/releases/download/v1.4.0/ucx-1.4.0.tar.gz && \
mkdir -p /var/tmp && tar -x -f /var/tmp/ucx-1.4.0.tar.gz -C /var/tmp -z && \
cd /var/tmp/ucx-1.4.0 && ./configure --prefix=/usr/local/ucx --enable-optimizations --disable-logging --disable-debug --disable-assertions --disable-params-check --disable-doxygen-doc --with-cuda=/usr/local/cuda && \
make -j$(nproc) && \
make -j$(nproc) install && \
rm -rf /var/tmp/ucx-1.4.0.tar.gz /var/tmp/ucx-1.4.0''')
@ppc64le
@centos
@docker
def test_ppc64le(self):
"""ppc64le"""
u = ucx(cuda=False, knem='/usr/local/knem', version='1.5.2')
self.assertEqual(str(u),
r'''# UCX version 1.5.2
RUN yum install -y \
binutils-devel \
file \
make \
numactl-devel \
wget && \
rm -rf /var/cache/yum/*
RUN mkdir -p /var/tmp && wget -q -nc --no-check-certificate -P /var/tmp https://github.com/openucx/ucx/releases/download/v1.5.2/ucx-1.5.2.tar.gz && \
mkdir -p /var/tmp && tar -x -f /var/tmp/ucx-1.5.2.tar.gz -C /var/tmp -z && \
cd /var/tmp/ucx-1.5.2 && CFLAGS=-Wno-error=format ./configure --prefix=/usr/local/ucx --enable-optimizations --disable-logging --disable-debug --disable-assertions --disable-params-check --disable-doxygen-doc --without-cuda --with-knem=/usr/local/knem && \
make -j$(nproc) && \
make -j$(nproc) install && \
rm -rf /var/tmp/ucx-1.5.2.tar.gz /var/tmp/ucx-1.5.2
ENV LD_LIBRARY_PATH=/usr/local/ucx/lib:$LD_LIBRARY_PATH \
PATH=/usr/local/ucx/bin:$PATH''')
@ubuntu
@docker
def test_runtime(self):
"""Runtime"""
u = ucx()
r = u.runtime()
self.assertEqual(r,
r'''# UCX
RUN apt-get update -y && \
DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \
binutils && \
rm -rf /var/lib/apt/lists/*
COPY --from=0 /usr/local/ucx /usr/local/ucx
ENV LD_LIBRARY_PATH=/usr/local/ucx/lib:$LD_LIBRARY_PATH \
PATH=/usr/local/ucx/bin:$PATH''')
| 42.95122
| 313
| 0.627768
|
4a018e014a89be8f74caf24a897ae021be21673f
| 24,673
|
py
|
Python
|
test/functional/rpc_rawtransaction.py
|
CJwon-98/Pyeongtaekcoin
|
45a81933a98a7487f11e57e6e9315efe740a297e
|
[
"MIT"
] | null | null | null |
test/functional/rpc_rawtransaction.py
|
CJwon-98/Pyeongtaekcoin
|
45a81933a98a7487f11e57e6e9315efe740a297e
|
[
"MIT"
] | null | null | null |
test/functional/rpc_rawtransaction.py
|
CJwon-98/Pyeongtaekcoin
|
45a81933a98a7487f11e57e6e9315efe740a297e
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2014-2018 The Pyeongtaekcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the rawtransaction RPCs.
Test the following RPCs:
- createrawtransaction
- signrawtransactionwithwallet
- sendrawtransaction
- decoderawtransaction
- getrawtransaction
"""
from collections import OrderedDict
from decimal import Decimal
from io import BytesIO
from test_framework.messages import CTransaction, ToHex
from test_framework.test_framework import PyeongtaekcoinTestFramework
from test_framework.util import assert_equal, assert_raises_rpc_error, bytes_to_hex_str, connect_nodes_bi, hex_str_to_bytes
class multidict(dict):
"""Dictionary that allows duplicate keys.
Constructed with a list of (key, value) tuples. When dumped by the json module,
will output invalid json with repeated keys, eg:
>>> json.dumps(multidict([(1,2),(1,2)])
'{"1": 2, "1": 2}'
Used to test calls to rpc methods with repeated keys in the json object."""
def __init__(self, x):
dict.__init__(self, x)
self.x = x
def items(self):
return self.x
# Create one-input, one-output, no-fee transaction:
class RawTransactionsTest(PyeongtaekcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 3
self.extra_args = [["-addresstype=legacy", "-txindex"], ["-addresstype=legacy", "-txindex"], ["-addresstype=legacy", "-txindex"]]
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def setup_network(self):
super().setup_network()
connect_nodes_bi(self.nodes, 0, 2)
def run_test(self):
self.log.info('prepare some coins for multiple *rawtransaction commands')
self.nodes[2].generate(1)
self.sync_all()
self.nodes[0].generate(101)
self.sync_all()
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(),1.5)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(),1.0)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(),5.0)
self.sync_all()
self.nodes[0].generate(5)
self.sync_all()
self.log.info('Test getrawtransaction on genesis block coinbase returns an error')
block = self.nodes[0].getblock(self.nodes[0].getblockhash(0))
assert_raises_rpc_error(-5, "The genesis block coinbase is not considered an ordinary transaction", self.nodes[0].getrawtransaction, block['merkleroot'])
self.log.info('Check parameter types and required parameters of createrawtransaction')
# Test `createrawtransaction` required parameters
assert_raises_rpc_error(-1, "createrawtransaction", self.nodes[0].createrawtransaction)
assert_raises_rpc_error(-1, "createrawtransaction", self.nodes[0].createrawtransaction, [])
# Test `createrawtransaction` invalid extra parameters
assert_raises_rpc_error(-1, "createrawtransaction", self.nodes[0].createrawtransaction, [], {}, 0, False, 'foo')
# Test `createrawtransaction` invalid `inputs`
txid = '1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000'
assert_raises_rpc_error(-3, "Expected type array", self.nodes[0].createrawtransaction, 'foo', {})
assert_raises_rpc_error(-1, "JSON value is not an object as expected", self.nodes[0].createrawtransaction, ['foo'], {})
assert_raises_rpc_error(-1, "JSON value is not a string as expected", self.nodes[0].createrawtransaction, [{}], {})
assert_raises_rpc_error(-8, "txid must be of length 64 (not 3, for 'foo')", self.nodes[0].createrawtransaction, [{'txid': 'foo'}], {})
assert_raises_rpc_error(-8, "txid must be hexadecimal string (not 'ZZZ7bb8b1697ea987f3b223ba7819250cae33efacb068d23dc24859824a77844')", self.nodes[0].createrawtransaction, [{'txid': 'ZZZ7bb8b1697ea987f3b223ba7819250cae33efacb068d23dc24859824a77844'}], {})
assert_raises_rpc_error(-8, "Invalid parameter, missing vout key", self.nodes[0].createrawtransaction, [{'txid': txid}], {})
assert_raises_rpc_error(-8, "Invalid parameter, missing vout key", self.nodes[0].createrawtransaction, [{'txid': txid, 'vout': 'foo'}], {})
assert_raises_rpc_error(-8, "Invalid parameter, vout must be positive", self.nodes[0].createrawtransaction, [{'txid': txid, 'vout': -1}], {})
assert_raises_rpc_error(-8, "Invalid parameter, sequence number is out of range", self.nodes[0].createrawtransaction, [{'txid': txid, 'vout': 0, 'sequence': -1}], {})
# Test `createrawtransaction` invalid `outputs`
address = self.nodes[0].getnewaddress()
address2 = self.nodes[0].getnewaddress()
assert_raises_rpc_error(-1, "JSON value is not an array as expected", self.nodes[0].createrawtransaction, [], 'foo')
self.nodes[0].createrawtransaction(inputs=[], outputs={}) # Should not throw for backwards compatibility
self.nodes[0].createrawtransaction(inputs=[], outputs=[])
assert_raises_rpc_error(-8, "Data must be hexadecimal string", self.nodes[0].createrawtransaction, [], {'data': 'foo'})
assert_raises_rpc_error(-5, "Invalid Pyeongtaekcoin address", self.nodes[0].createrawtransaction, [], {'foo': 0})
assert_raises_rpc_error(-3, "Invalid amount", self.nodes[0].createrawtransaction, [], {address: 'foo'})
assert_raises_rpc_error(-3, "Amount out of range", self.nodes[0].createrawtransaction, [], {address: -1})
assert_raises_rpc_error(-8, "Invalid parameter, duplicated address: %s" % address, self.nodes[0].createrawtransaction, [], multidict([(address, 1), (address, 1)]))
assert_raises_rpc_error(-8, "Invalid parameter, duplicated address: %s" % address, self.nodes[0].createrawtransaction, [], [{address: 1}, {address: 1}])
assert_raises_rpc_error(-8, "Invalid parameter, duplicate key: data", self.nodes[0].createrawtransaction, [], [{"data": 'aa'}, {"data": "bb"}])
assert_raises_rpc_error(-8, "Invalid parameter, duplicate key: data", self.nodes[0].createrawtransaction, [], multidict([("data", 'aa'), ("data", "bb")]))
assert_raises_rpc_error(-8, "Invalid parameter, key-value pair must contain exactly one key", self.nodes[0].createrawtransaction, [], [{'a': 1, 'b': 2}])
assert_raises_rpc_error(-8, "Invalid parameter, key-value pair not an object as expected", self.nodes[0].createrawtransaction, [], [['key-value pair1'], ['2']])
# Test `createrawtransaction` invalid `locktime`
assert_raises_rpc_error(-3, "Expected type number", self.nodes[0].createrawtransaction, [], {}, 'foo')
assert_raises_rpc_error(-8, "Invalid parameter, locktime out of range", self.nodes[0].createrawtransaction, [], {}, -1)
assert_raises_rpc_error(-8, "Invalid parameter, locktime out of range", self.nodes[0].createrawtransaction, [], {}, 4294967296)
# Test `createrawtransaction` invalid `replaceable`
assert_raises_rpc_error(-3, "Expected type bool", self.nodes[0].createrawtransaction, [], {}, 0, 'foo')
self.log.info('Check that createrawtransaction accepts an array and object as outputs')
tx = CTransaction()
# One output
tx.deserialize(BytesIO(hex_str_to_bytes(self.nodes[2].createrawtransaction(inputs=[{'txid': txid, 'vout': 9}], outputs={address: 99}))))
assert_equal(len(tx.vout), 1)
assert_equal(
bytes_to_hex_str(tx.serialize()),
self.nodes[2].createrawtransaction(inputs=[{'txid': txid, 'vout': 9}], outputs=[{address: 99}]),
)
# Two outputs
tx.deserialize(BytesIO(hex_str_to_bytes(self.nodes[2].createrawtransaction(inputs=[{'txid': txid, 'vout': 9}], outputs=OrderedDict([(address, 99), (address2, 99)])))))
assert_equal(len(tx.vout), 2)
assert_equal(
bytes_to_hex_str(tx.serialize()),
self.nodes[2].createrawtransaction(inputs=[{'txid': txid, 'vout': 9}], outputs=[{address: 99}, {address2: 99}]),
)
# Multiple mixed outputs
tx.deserialize(BytesIO(hex_str_to_bytes(self.nodes[2].createrawtransaction(inputs=[{'txid': txid, 'vout': 9}], outputs=multidict([(address, 99), (address2, 99), ('data', '99')])))))
assert_equal(len(tx.vout), 3)
assert_equal(
bytes_to_hex_str(tx.serialize()),
self.nodes[2].createrawtransaction(inputs=[{'txid': txid, 'vout': 9}], outputs=[{address: 99}, {address2: 99}, {'data': '99'}]),
)
for type in ["bech32", "p2sh-segwit", "legacy"]:
addr = self.nodes[0].getnewaddress("", type)
addrinfo = self.nodes[0].getaddressinfo(addr)
pubkey = addrinfo["scriptPubKey"]
self.log.info('sendrawtransaction with missing prevtx info (%s)' %(type))
# Test `signrawtransactionwithwallet` invalid `prevtxs`
inputs = [ {'txid' : txid, 'vout' : 3, 'sequence' : 1000}]
outputs = { self.nodes[0].getnewaddress() : 1 }
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
prevtx = dict(txid=txid, scriptPubKey=pubkey, vout=3, amount=1)
succ = self.nodes[0].signrawtransactionwithwallet(rawtx, [prevtx])
assert succ["complete"]
if type == "legacy":
del prevtx["amount"]
succ = self.nodes[0].signrawtransactionwithwallet(rawtx, [prevtx])
assert succ["complete"]
if type != "legacy":
assert_raises_rpc_error(-3, "Missing amount", self.nodes[0].signrawtransactionwithwallet, rawtx, [
{
"txid": txid,
"scriptPubKey": pubkey,
"vout": 3,
}
])
assert_raises_rpc_error(-3, "Missing vout", self.nodes[0].signrawtransactionwithwallet, rawtx, [
{
"txid": txid,
"scriptPubKey": pubkey,
"amount": 1,
}
])
assert_raises_rpc_error(-3, "Missing txid", self.nodes[0].signrawtransactionwithwallet, rawtx, [
{
"scriptPubKey": pubkey,
"vout": 3,
"amount": 1,
}
])
assert_raises_rpc_error(-3, "Missing scriptPubKey", self.nodes[0].signrawtransactionwithwallet, rawtx, [
{
"txid": txid,
"vout": 3,
"amount": 1
}
])
#########################################
# sendrawtransaction with missing input #
#########################################
self.log.info('sendrawtransaction with missing input')
inputs = [ {'txid' : "1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000", 'vout' : 1}] #won't exists
outputs = { self.nodes[0].getnewaddress() : 4.998 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
rawtx = self.nodes[2].signrawtransactionwithwallet(rawtx)
# This will raise an exception since there are missing inputs
assert_raises_rpc_error(-25, "Missing inputs", self.nodes[2].sendrawtransaction, rawtx['hex'])
#####################################
# getrawtransaction with block hash #
#####################################
# make a tx by sending then generate 2 blocks; block1 has the tx in it
tx = self.nodes[2].sendtoaddress(self.nodes[1].getnewaddress(), 1)
block1, block2 = self.nodes[2].generate(2)
self.sync_all()
# We should be able to get the raw transaction by providing the correct block
gottx = self.nodes[0].getrawtransaction(tx, True, block1)
assert_equal(gottx['txid'], tx)
assert_equal(gottx['in_active_chain'], True)
# We should not have the 'in_active_chain' flag when we don't provide a block
gottx = self.nodes[0].getrawtransaction(tx, True)
assert_equal(gottx['txid'], tx)
assert 'in_active_chain' not in gottx
# We should not get the tx if we provide an unrelated block
assert_raises_rpc_error(-5, "No such transaction found", self.nodes[0].getrawtransaction, tx, True, block2)
# An invalid block hash should raise the correct errors
assert_raises_rpc_error(-1, "JSON value is not a string as expected", self.nodes[0].getrawtransaction, tx, True, True)
assert_raises_rpc_error(-8, "parameter 3 must be of length 64 (not 6, for 'foobar')", self.nodes[0].getrawtransaction, tx, True, "foobar")
assert_raises_rpc_error(-8, "parameter 3 must be of length 64 (not 8, for 'abcd1234')", self.nodes[0].getrawtransaction, tx, True, "abcd1234")
assert_raises_rpc_error(-8, "parameter 3 must be hexadecimal string (not 'ZZZ0000000000000000000000000000000000000000000000000000000000000')", self.nodes[0].getrawtransaction, tx, True, "ZZZ0000000000000000000000000000000000000000000000000000000000000")
assert_raises_rpc_error(-5, "Block hash not found", self.nodes[0].getrawtransaction, tx, True, "0000000000000000000000000000000000000000000000000000000000000000")
# Undo the blocks and check in_active_chain
self.nodes[0].invalidateblock(block1)
gottx = self.nodes[0].getrawtransaction(txid=tx, verbose=True, blockhash=block1)
assert_equal(gottx['in_active_chain'], False)
self.nodes[0].reconsiderblock(block1)
assert_equal(self.nodes[0].getbestblockhash(), block2)
#########################
# RAW TX MULTISIG TESTS #
#########################
# 2of2 test
addr1 = self.nodes[2].getnewaddress()
addr2 = self.nodes[2].getnewaddress()
addr1Obj = self.nodes[2].getaddressinfo(addr1)
addr2Obj = self.nodes[2].getaddressinfo(addr2)
# Tests for createmultisig and addmultisigaddress
assert_raises_rpc_error(-5, "Invalid public key", self.nodes[0].createmultisig, 1, ["01020304"])
self.nodes[0].createmultisig(2, [addr1Obj['pubkey'], addr2Obj['pubkey']]) # createmultisig can only take public keys
assert_raises_rpc_error(-5, "Invalid public key", self.nodes[0].createmultisig, 2, [addr1Obj['pubkey'], addr1]) # addmultisigaddress can take both pubkeys and addresses so long as they are in the wallet, which is tested here.
mSigObj = self.nodes[2].addmultisigaddress(2, [addr1Obj['pubkey'], addr1])['address']
#use balance deltas instead of absolute values
bal = self.nodes[2].getbalance()
# send 1.2 PTC to msig adr
txId = self.nodes[0].sendtoaddress(mSigObj, 1.2)
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
assert_equal(self.nodes[2].getbalance(), bal+Decimal('1.20000000')) #node2 has both keys of the 2of2 ms addr., tx should affect the balance
# 2of3 test from different nodes
bal = self.nodes[2].getbalance()
addr1 = self.nodes[1].getnewaddress()
addr2 = self.nodes[2].getnewaddress()
addr3 = self.nodes[2].getnewaddress()
addr1Obj = self.nodes[1].getaddressinfo(addr1)
addr2Obj = self.nodes[2].getaddressinfo(addr2)
addr3Obj = self.nodes[2].getaddressinfo(addr3)
mSigObj = self.nodes[2].addmultisigaddress(2, [addr1Obj['pubkey'], addr2Obj['pubkey'], addr3Obj['pubkey']])['address']
txId = self.nodes[0].sendtoaddress(mSigObj, 2.2)
decTx = self.nodes[0].gettransaction(txId)
rawTx = self.nodes[0].decoderawtransaction(decTx['hex'])
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
#THIS IS AN INCOMPLETE FEATURE
#NODE2 HAS TWO OF THREE KEY AND THE FUNDS SHOULD BE SPENDABLE AND COUNT AT BALANCE CALCULATION
assert_equal(self.nodes[2].getbalance(), bal) #for now, assume the funds of a 2of3 multisig tx are not marked as spendable
txDetails = self.nodes[0].gettransaction(txId, True)
rawTx = self.nodes[0].decoderawtransaction(txDetails['hex'])
vout = False
for outpoint in rawTx['vout']:
if outpoint['value'] == Decimal('2.20000000'):
vout = outpoint
break
bal = self.nodes[0].getbalance()
inputs = [{ "txid" : txId, "vout" : vout['n'], "scriptPubKey" : vout['scriptPubKey']['hex'], "amount" : vout['value']}]
outputs = { self.nodes[0].getnewaddress() : 2.19 }
rawTx = self.nodes[2].createrawtransaction(inputs, outputs)
rawTxPartialSigned = self.nodes[1].signrawtransactionwithwallet(rawTx, inputs)
assert_equal(rawTxPartialSigned['complete'], False) #node1 only has one key, can't comp. sign the tx
rawTxSigned = self.nodes[2].signrawtransactionwithwallet(rawTx, inputs)
assert_equal(rawTxSigned['complete'], True) #node2 can sign the tx compl., own two of three keys
self.nodes[2].sendrawtransaction(rawTxSigned['hex'])
rawTx = self.nodes[0].decoderawtransaction(rawTxSigned['hex'])
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
assert_equal(self.nodes[0].getbalance(), bal+Decimal('50.00000000')+Decimal('2.19000000')) #block reward + tx
# 2of2 test for combining transactions
bal = self.nodes[2].getbalance()
addr1 = self.nodes[1].getnewaddress()
addr2 = self.nodes[2].getnewaddress()
addr1Obj = self.nodes[1].getaddressinfo(addr1)
addr2Obj = self.nodes[2].getaddressinfo(addr2)
self.nodes[1].addmultisigaddress(2, [addr1Obj['pubkey'], addr2Obj['pubkey']])['address']
mSigObj = self.nodes[2].addmultisigaddress(2, [addr1Obj['pubkey'], addr2Obj['pubkey']])['address']
mSigObjValid = self.nodes[2].getaddressinfo(mSigObj)
txId = self.nodes[0].sendtoaddress(mSigObj, 2.2)
decTx = self.nodes[0].gettransaction(txId)
rawTx2 = self.nodes[0].decoderawtransaction(decTx['hex'])
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
assert_equal(self.nodes[2].getbalance(), bal) # the funds of a 2of2 multisig tx should not be marked as spendable
txDetails = self.nodes[0].gettransaction(txId, True)
rawTx2 = self.nodes[0].decoderawtransaction(txDetails['hex'])
vout = False
for outpoint in rawTx2['vout']:
if outpoint['value'] == Decimal('2.20000000'):
vout = outpoint
break
bal = self.nodes[0].getbalance()
inputs = [{ "txid" : txId, "vout" : vout['n'], "scriptPubKey" : vout['scriptPubKey']['hex'], "redeemScript" : mSigObjValid['hex'], "amount" : vout['value']}]
outputs = { self.nodes[0].getnewaddress() : 2.19 }
rawTx2 = self.nodes[2].createrawtransaction(inputs, outputs)
rawTxPartialSigned1 = self.nodes[1].signrawtransactionwithwallet(rawTx2, inputs)
self.log.debug(rawTxPartialSigned1)
assert_equal(rawTxPartialSigned1['complete'], False) #node1 only has one key, can't comp. sign the tx
rawTxPartialSigned2 = self.nodes[2].signrawtransactionwithwallet(rawTx2, inputs)
self.log.debug(rawTxPartialSigned2)
assert_equal(rawTxPartialSigned2['complete'], False) #node2 only has one key, can't comp. sign the tx
rawTxComb = self.nodes[2].combinerawtransaction([rawTxPartialSigned1['hex'], rawTxPartialSigned2['hex']])
self.log.debug(rawTxComb)
self.nodes[2].sendrawtransaction(rawTxComb)
rawTx2 = self.nodes[0].decoderawtransaction(rawTxComb)
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
assert_equal(self.nodes[0].getbalance(), bal+Decimal('50.00000000')+Decimal('2.19000000')) #block reward + tx
# decoderawtransaction tests
# witness transaction
encrawtx = "010000000001010000000000000072c1a6a246ae63f74f931e8365e15a089c68d61900000000000000000000ffffffff0100e1f50500000000000102616100000000"
decrawtx = self.nodes[0].decoderawtransaction(encrawtx, True) # decode as witness transaction
assert_equal(decrawtx['vout'][0]['value'], Decimal('1.00000000'))
assert_raises_rpc_error(-22, 'TX decode failed', self.nodes[0].decoderawtransaction, encrawtx, False) # force decode as non-witness transaction
# non-witness transaction
encrawtx = "01000000010000000000000072c1a6a246ae63f74f931e8365e15a089c68d61900000000000000000000ffffffff0100e1f505000000000000000000"
decrawtx = self.nodes[0].decoderawtransaction(encrawtx, False) # decode as non-witness transaction
assert_equal(decrawtx['vout'][0]['value'], Decimal('1.00000000'))
# getrawtransaction tests
# 1. valid parameters - only supply txid
txHash = rawTx["hash"]
assert_equal(self.nodes[0].getrawtransaction(txHash), rawTxSigned['hex'])
# 2. valid parameters - supply txid and 0 for non-verbose
assert_equal(self.nodes[0].getrawtransaction(txHash, 0), rawTxSigned['hex'])
# 3. valid parameters - supply txid and False for non-verbose
assert_equal(self.nodes[0].getrawtransaction(txHash, False), rawTxSigned['hex'])
# 4. valid parameters - supply txid and 1 for verbose.
# We only check the "hex" field of the output so we don't need to update this test every time the output format changes.
assert_equal(self.nodes[0].getrawtransaction(txHash, 1)["hex"], rawTxSigned['hex'])
# 5. valid parameters - supply txid and True for non-verbose
assert_equal(self.nodes[0].getrawtransaction(txHash, True)["hex"], rawTxSigned['hex'])
# 6. invalid parameters - supply txid and string "Flase"
assert_raises_rpc_error(-1, "not a boolean", self.nodes[0].getrawtransaction, txHash, "Flase")
# 7. invalid parameters - supply txid and empty array
assert_raises_rpc_error(-1, "not a boolean", self.nodes[0].getrawtransaction, txHash, [])
# 8. invalid parameters - supply txid and empty dict
assert_raises_rpc_error(-1, "not a boolean", self.nodes[0].getrawtransaction, txHash, {})
inputs = [ {'txid' : "1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000", 'vout' : 1, 'sequence' : 1000}]
outputs = { self.nodes[0].getnewaddress() : 1 }
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
decrawtx= self.nodes[0].decoderawtransaction(rawtx)
assert_equal(decrawtx['vin'][0]['sequence'], 1000)
# 9. invalid parameters - sequence number out of range
inputs = [ {'txid' : "1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000", 'vout' : 1, 'sequence' : -1}]
outputs = { self.nodes[0].getnewaddress() : 1 }
assert_raises_rpc_error(-8, 'Invalid parameter, sequence number is out of range', self.nodes[0].createrawtransaction, inputs, outputs)
# 10. invalid parameters - sequence number out of range
inputs = [ {'txid' : "1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000", 'vout' : 1, 'sequence' : 4294967296}]
outputs = { self.nodes[0].getnewaddress() : 1 }
assert_raises_rpc_error(-8, 'Invalid parameter, sequence number is out of range', self.nodes[0].createrawtransaction, inputs, outputs)
inputs = [ {'txid' : "1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000", 'vout' : 1, 'sequence' : 4294967294}]
outputs = { self.nodes[0].getnewaddress() : 1 }
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
decrawtx= self.nodes[0].decoderawtransaction(rawtx)
assert_equal(decrawtx['vin'][0]['sequence'], 4294967294)
####################################
# TRANSACTION VERSION NUMBER TESTS #
####################################
# Test the minimum transaction version number that fits in a signed 32-bit integer.
tx = CTransaction()
tx.nVersion = -0x80000000
rawtx = ToHex(tx)
decrawtx = self.nodes[0].decoderawtransaction(rawtx)
assert_equal(decrawtx['version'], -0x80000000)
# Test the maximum transaction version number that fits in a signed 32-bit integer.
tx = CTransaction()
tx.nVersion = 0x7fffffff
rawtx = ToHex(tx)
decrawtx = self.nodes[0].decoderawtransaction(rawtx)
assert_equal(decrawtx['version'], 0x7fffffff)
if __name__ == '__main__':
RawTransactionsTest().main()
| 56.202733
| 263
| 0.652859
|
4a018ea8a51f6ead2ec41029ec9874ebdd5212d8
| 13,743
|
py
|
Python
|
qiskit/quantum_info/states/statevector.py
|
BoschSamuel/qiskit-terra
|
01bdfe88c15a93fa7548edc0db0e33b287cc8c98
|
[
"Apache-2.0"
] | null | null | null |
qiskit/quantum_info/states/statevector.py
|
BoschSamuel/qiskit-terra
|
01bdfe88c15a93fa7548edc0db0e33b287cc8c98
|
[
"Apache-2.0"
] | null | null | null |
qiskit/quantum_info/states/statevector.py
|
BoschSamuel/qiskit-terra
|
01bdfe88c15a93fa7548edc0db0e33b287cc8c98
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2019.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""
Statevector quantum state class.
"""
import re
from numbers import Number
import numpy as np
from qiskit.circuit.quantumcircuit import QuantumCircuit
from qiskit.circuit.instruction import Instruction
from qiskit.exceptions import QiskitError
from qiskit.quantum_info.states.quantum_state import QuantumState
from qiskit.quantum_info.operators.operator import Operator
class Statevector(QuantumState):
"""Statevector class"""
def __init__(self, data, dims=None):
"""Initialize a state object."""
if isinstance(data, Statevector):
# Shallow copy constructor
vec = data.data
if dims is None:
dims = data.dims()
elif isinstance(data, Operator):
# We allow conversion of column-vector operators to Statevectors
input_dim, output_dim = data.dim
if input_dim != 1:
raise QiskitError("Input Operator is not a column-vector.")
vec = np.reshape(data.data, output_dim)
elif isinstance(data, (list, np.ndarray)):
# Finally we check if the input is a raw vector in either a
# python list or numpy array format.
vec = np.array(data, dtype=complex)
else:
raise QiskitError("Invalid input data format for Statevector")
# Check that the input is a numpy vector or column-vector numpy
# matrix. If it is a column-vector matrix reshape to a vector.
if vec.ndim not in [1, 2] or (vec.ndim == 2 and vec.shape[1] != 1):
raise QiskitError("Invalid input: not a vector or column-vector.")
if vec.ndim == 2 and vec.shape[1] == 1:
vec = np.reshape(vec, vec.shape[0])
dim = vec.shape[0]
subsystem_dims = self._automatic_dims(dims, dim)
super().__init__('Statevector', vec, subsystem_dims)
def is_valid(self, atol=None, rtol=None):
"""Return True if a Statevector has norm 1."""
if atol is None:
atol = self._atol
if rtol is None:
rtol = self._rtol
norm = np.linalg.norm(self.data)
return np.allclose(norm, 1, rtol=rtol, atol=atol)
def to_operator(self):
"""Convert state to a rank-1 projector operator"""
mat = np.outer(self.data, np.conj(self.data))
return Operator(mat, input_dims=self.dims(), output_dims=self.dims())
def conjugate(self):
"""Return the conjugate of the operator."""
return Statevector(np.conj(self.data), dims=self.dims())
def trace(self):
"""Return the trace of the quantum state as a density matrix."""
return np.sum(np.abs(self.data) ** 2)
def purity(self):
"""Return the purity of the quantum state."""
# For a valid statevector the purity is always 1, however if we simply
# have an arbitrary vector (not correctly normalized) then the
# purity is equivalent to the trace squared:
# P(|psi>) = Tr[|psi><psi|psi><psi|] = |<psi|psi>|^2
return self.trace() ** 2
def tensor(self, other):
"""Return the tensor product state self ⊗ other.
Args:
other (Statevector): a quantum state object.
Returns:
Statevector: the tensor product operator self ⊗ other.
Raises:
QiskitError: if other is not a quantum state.
"""
if not isinstance(other, Statevector):
other = Statevector(other)
dims = other.dims() + self.dims()
data = np.kron(self._data, other._data)
return Statevector(data, dims)
def expand(self, other):
"""Return the tensor product state other ⊗ self.
Args:
other (Statevector): a quantum state object.
Returns:
Statevector: the tensor product state other ⊗ self.
Raises:
QiskitError: if other is not a quantum state.
"""
if not isinstance(other, Statevector):
other = Statevector(other)
dims = self.dims() + other.dims()
data = np.kron(other._data, self._data)
return Statevector(data, dims)
def add(self, other):
"""Return the linear combination self + other.
Args:
other (Statevector): a quantum state object.
Returns:
LinearOperator: the linear combination self + other.
Raises:
QiskitError: if other is not a quantum state, or has
incompatible dimensions.
"""
if not isinstance(other, Statevector):
other = Statevector(other)
if self.dim != other.dim:
raise QiskitError("other Statevector has different dimensions.")
return Statevector(self.data + other.data, self.dims())
def subtract(self, other):
"""Return the linear operator self - other.
Args:
other (Statevector): a quantum state object.
Returns:
LinearOperator: the linear combination self - other.
Raises:
QiskitError: if other is not a quantum state, or has
incompatible dimensions.
"""
if not isinstance(other, Statevector):
other = Statevector(other)
if self.dim != other.dim:
raise QiskitError("other Statevector has different dimensions.")
return Statevector(self.data - other.data, self.dims())
def multiply(self, other):
"""Return the linear operator self * other.
Args:
other (complex): a complex number.
Returns:
Operator: the linear combination other * self.
Raises:
QiskitError: if other is not a valid complex number.
"""
if not isinstance(other, Number):
raise QiskitError("other is not a number")
return Statevector(other * self.data, self.dims())
def evolve(self, other, qargs=None):
"""Evolve a quantum state by the operator.
Args:
other (Operator): The operator to evolve by.
qargs (list): a list of Statevector subsystem positions to apply
the operator on.
Returns:
Statevector: the output quantum state.
Raises:
QiskitError: if the operator dimension does not match the
specified Statevector subsystem dimensions.
"""
# Evolution by a circuit or instruction
if isinstance(other, (QuantumCircuit, Instruction)):
return self._evolve_instruction(other, qargs=qargs)
# Evolution by an Operator
if not isinstance(other, Operator):
other = Operator(other)
if qargs is None:
# Evolution on full statevector
if self._dim != other._input_dim:
raise QiskitError(
"Operator input dimension is not equal to statevector dimension."
)
return Statevector(np.dot(other.data, self.data), dims=other.output_dims())
# Otherwise we are applying an operator only to subsystems
# Check dimensions of subsystems match the operator
if self.dims(qargs) != other.input_dims():
raise QiskitError(
"Operator input dimensions are not equal to statevector subsystem dimensions."
)
# Reshape statevector and operator
tensor = np.reshape(self.data, self._shape)
mat = np.reshape(other.data, other._shape)
# Construct list of tensor indices of statevector to be contracted
num_indices = len(self.dims())
indices = [num_indices - 1 - qubit for qubit in qargs]
tensor = Operator._einsum_matmul(tensor, mat, indices)
new_dims = list(self.dims())
for i, qubit in enumerate(qargs):
new_dims[qubit] = other._output_dims[i]
# Replace evolved dimensions
return Statevector(np.reshape(tensor, np.product(new_dims)), dims=new_dims)
@classmethod
def from_label(cls, label):
"""Return a tensor product of Pauli X,Y,Z eigenstates.
Args:
label (string): a eigenstate string ket label 0,1,+,-,r,l.
Returns:
Statevector: The N-qubit basis state density matrix.
Raises:
QiskitError: if the label contains invalid characters, or the length
of the label is larger than an explicitly specified num_qubits.
Additional Information:
The labels correspond to the single-qubit states:
'0': [1, 0]
'1': [0, 1]
'+': [1 / sqrt(2), 1 / sqrt(2)]
'-': [1 / sqrt(2), -1 / sqrt(2)]
'r': [1 / sqrt(2), 1j / sqrt(2)]
'l': [1 / sqrt(2), -1j / sqrt(2)]
"""
# Check label is valid
if re.match(r'^[01rl\-+]+$', label) is None:
raise QiskitError('Label contains invalid characters.')
# We can prepare Z-eigenstates by converting the computational
# basis bit-string to an integer and preparing that unit vector
# However, for X-basis states, we will prepare a Z-eigenstate first
# then apply Hadamard gates to rotate 0 and 1s to + and -.
z_label = label
xy_states = False
if re.match('^[01]+$', label) is None:
# We have X or Y eigenstates so replace +,r with 0 and
# -,l with 1 and prepare the corresponding Z state
xy_states = True
z_label = z_label.replace('+', '0')
z_label = z_label.replace('r', '0')
z_label = z_label.replace('-', '1')
z_label = z_label.replace('l', '1')
# Initialize Z eigenstate vector
num_qubits = len(label)
data = np.zeros(1 << num_qubits, dtype=complex)
pos = int(z_label, 2)
data[pos] = 1
state = Statevector(data)
if xy_states:
# Apply hadamards to all qubits in X eigenstates
x_mat = np.array([[1, 1], [1, -1]], dtype=complex) / np.sqrt(2)
# Apply S.H to qubits in Y eigenstates
y_mat = np.dot(np.diag([1, 1j]), x_mat)
for qubit, char in enumerate(reversed(label)):
if char in ['+', '-']:
state = state.evolve(x_mat, qargs=[qubit])
elif char in ['r', 'l']:
state = state.evolve(y_mat, qargs=[qubit])
return state
@classmethod
def from_instruction(cls, instruction):
"""Return the output statevector of an instruction.
The statevector is initialized in the state |0,...,0> of the same
number of qubits as the input instruction or circuit, evolved
by the input instruction, and the output statevector returned.
Args:
instruction (Instruction or QuantumCircuit): instruction or circuit
Returns:
Statevector: The final statevector.
Raises:
QiskitError: if the instruction contains invalid instructions for
the statevector simulation.
"""
# Convert circuit to an instruction
if isinstance(instruction, QuantumCircuit):
instruction = instruction.to_instruction()
# Initialize an the statevector in the all |0> state
init = np.zeros(2 ** instruction.num_qubits, dtype=complex)
init[0] = 1
vec = Statevector(init, dims=instruction.num_qubits * [2])
vec._append_instruction(instruction)
return vec
@property
def _shape(self):
"""Return the tensor shape of the matrix operator"""
return tuple(reversed(self.dims()))
def _append_instruction(self, obj, qargs=None):
"""Update the current Statevector by applying an instruction."""
mat = Operator._instruction_to_matrix(obj)
if mat is not None:
# Perform the composition and inplace update the current state
# of the operator
self._data = self.evolve(mat, qargs=qargs).data
else:
# If the instruction doesn't have a matrix defined we use its
# circuit decomposition definition if it exists, otherwise we
# cannot compose this gate and raise an error.
if obj.definition is None:
raise QiskitError('Cannot apply Instruction: {}'.format(obj.name))
for instr, qregs, cregs in obj.definition:
if cregs:
raise QiskitError(
'Cannot apply instruction with classical registers: {}'.format(
instr.name))
# Get the integer position of the flat register
if qargs is None:
new_qargs = [tup.index for tup in qregs]
else:
new_qargs = [qargs[tup.index] for tup in qregs]
self._append_instruction(instr, qargs=new_qargs)
def _evolve_instruction(self, obj, qargs=None):
"""Return a new statevector by applying an instruction."""
if isinstance(obj, QuantumCircuit):
obj = obj.to_instruction()
vec = Statevector(self.data, dims=self.dims())
vec._append_instruction(obj, qargs=qargs)
return vec
| 38.932011
| 94
| 0.600669
|
4a0192b57dd87c41dfc68d1e0527f577eb69d0e8
| 3,189
|
py
|
Python
|
uploadr.py
|
akent/uploadr-reloaded
|
b94f75ab48b6062ba5212ed86ac0926113b9054b
|
[
"BSD-3-Clause"
] | null | null | null |
uploadr.py
|
akent/uploadr-reloaded
|
b94f75ab48b6062ba5212ed86ac0926113b9054b
|
[
"BSD-3-Clause"
] | null | null | null |
uploadr.py
|
akent/uploadr-reloaded
|
b94f75ab48b6062ba5212ed86ac0926113b9054b
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
# uploadr.py inspired by http://berserk.org/uploadr/
# but using http://stuvel.eu/projects/flickrapi instead
import sys, time, os, shelve, string
import exifread
import flickrapi
#
# Location to scan for new images
#
IMAGE_DIR = "/Volumes/NIKON D40"
#
# Flickr settings
#
FLICKR = {"title": "",
"description": "",
"tags": "autoupload",
"is_public": "0",
"is_friend": "0",
"is_family": "0" }
#
# File we keep the history of uploaded images in.
#
HISTORY_FILE = "uploadr.history"
##
## You shouldn't need to modify anything below here
##
FLICKR["secret" ] = "4273bf03b90b6adc"
FLICKR["api_key" ] = "04bb4d7119a20ca262a7b2c07c7e0f81"
class Uploadr:
def __init__( self ):
self.flickr = flickrapi.FlickrAPI(FLICKR["api_key"], FLICKR["secret"])
(token, frob) = self.flickr.get_token_part_one(perms='write')
if not token:
raw_input("Press ENTER after you have authorised this program")
self.flickr.get_token_part_two((token, frob))
def upload( self ):
newImages = self.grabNewImages()
for image in newImages:
self.uploaded = shelve.open( HISTORY_FILE )
self.uploadImage( image )
self.uploaded.close()
def grabNewImages( self ):
images = []
foo = os.walk( IMAGE_DIR )
for data in foo:
(dirpath, dirnames, filenames) = data
for f in filenames :
ext = f.lower().split(".")[-1]
if ( ext == "jpg" or
ext == "gif" or
ext == "png"):
images.append( os.path.normpath( dirpath + "/" + f ) )
images.sort()
return images
def uploadImage( self, image ):
if ( not self.uploaded.has_key( image ) ):
print "Uploading ", image
f = open(image, 'rb')
metadata = exifread.process_file(f)
try:
date = time.strptime("%s" % metadata["Image DateTime"],
"%Y:%m:%d %H:%M:%S")
except Exception as e:
print e
date = time.localtime()
response = self.flickr.upload(filename = image,
tags = FLICKR["tags"],
is_public = FLICKR["is_public"],
is_friend = FLICKR["is_friend"],
is_family = FLICKR["is_family"])
if (response.attrib['stat'] == "ok"):
pid = response.getchildren()[0].text
try:
self.flickr.photos_setDates(photo_id = pid,
date_posted = "%d" % time.mktime(date))
except flickrapi.exceptions.FlickrError:
print "Can't set date, pressing on anyway"
self.logUpload(pid, image);
def logUpload( self, photoID, imageName ):
photoID = str( photoID )
imageName = str( imageName )
self.uploaded[ imageName ] = photoID
self.uploaded[ photoID ] = imageName
if __name__ == "__main__":
flick = Uploadr()
flick.upload()
| 31.574257
| 78
| 0.534337
|
4a01930446d885e1f074bd32fcf18d5ceb3e9612
| 4,189
|
py
|
Python
|
tests/unit/test_todo.py
|
lekshmimallika-aot/business-schemas
|
d95b43f1d04e29fd9bab101789c277db54123d9b
|
[
"Apache-2.0"
] | 2
|
2020-02-05T21:36:27.000Z
|
2021-08-28T23:56:52.000Z
|
tests/unit/test_todo.py
|
lekshmimallika-aot/business-schemas
|
d95b43f1d04e29fd9bab101789c277db54123d9b
|
[
"Apache-2.0"
] | 13
|
2020-03-25T17:28:11.000Z
|
2022-03-30T20:06:04.000Z
|
tests/unit/test_todo.py
|
lekshmimallika-aot/business-schemas
|
d95b43f1d04e29fd9bab101789c277db54123d9b
|
[
"Apache-2.0"
] | 19
|
2020-01-31T23:11:47.000Z
|
2022-03-30T18:08:15.000Z
|
# Copyright © 2019 Province of British Columbia
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test Suite to ensure the legal todo schema is valid.
This suite should have at least 1 test for the annualReport todo item.
"""
from registry_schemas import validate
def test_valid_todo():
"""Assert that the schema accepts a valid todo item."""
todo = {
'todo': {
'business': {
'cacheId': 1,
'foundingDate': '2007-04-08T00:00:00+00:00',
'identifier': 'CP0002098',
'lastLedgerTimestamp': '2019-04-15T20:05:49.068272+00:00',
'legalName': 'Legal Name - CP0002098'
},
'header': {
'name': 'annualReport',
'ARFilingYear': 2019,
'status': 'NEW'
}
}
}
is_valid, errors = validate(todo, 'todo')
# if errors:
# for err in errors:
# print(err.message)
print(errors)
assert is_valid
def test_invalid_todo_name():
"""Assert that the schema rejects a todo item with an invalid name."""
todo = {
'invalid': {
'business': {
'cacheId': 1,
'foundingDate': '2007-04-08T00:00:00+00:00',
'identifier': 'CP0002098',
'lastLedgerTimestamp': '2019-04-15T20:05:49.068272+00:00',
'legalName': 'Legal Name - CP0002098'
},
'header': {
'name': 'annualReport',
'ARFilingYear': 2019,
'status': 'NEW'
}
}
}
is_valid, errors = validate(todo, 'todo')
# if errors:
# for err in errors:
# print(err.message)
print(errors)
assert not is_valid
def test_invalid_todo_missing_business():
"""Assert that the schema rejects a todo item missing the 'business' object."""
todo = {
'todo': {
'header': {
'name': 'annualReport',
'ARFilingYear': 2019,
'status': 'NEW'
}
}
}
is_valid, errors = validate(todo, 'todo')
# if errors:
# for err in errors:
# print(err.message)
print(errors)
assert not is_valid
def test_invalid_todo_missing_header():
"""Assert that the schema rejects a todo item missing the 'header' object."""
todo = {
'todo': {
'business': {
'cacheId': 1,
'foundingDate': '2007-04-08T00:00:00+00:00',
'identifier': 'CP0002098',
'lastLedgerTimestamp': '2019-04-15T20:05:49.068272+00:00',
'legalName': 'Legal Name - CP0002098'
}
}
}
is_valid, errors = validate(todo, 'todo')
# if errors:
# for err in errors:
# print(err.message)
print(errors)
assert not is_valid
def test_invalid_todo_invalid_header():
"""Assert that the schema rejects a todo item with a missing 'header' property."""
todo = {
'todo': {
'business': {
'cacheId': 1,
'foundingDate': '2007-04-08T00:00:00+00:00',
'identifier': 'CP0002098',
'lastLedgerTimestamp': '2019-04-15T20:05:49.068272+00:00',
'legalName': 'Legal Name - CP0002098'
},
'header': {
'ARFilingYear': 2019,
'status': 'NEW'
}
}
}
is_valid, errors = validate(todo, 'todo')
# if errors:
# for err in errors:
# print(err.message)
print(errors)
assert not is_valid
| 27.559211
| 86
| 0.534973
|
4a0193d94ad32c9b65a861cd1357203de27e11e3
| 18,471
|
py
|
Python
|
dashboard/views.py
|
Kgermando/es-script
|
f1b10ecf2c805e8875a025e7033c724e236f6cd1
|
[
"Apache-2.0"
] | null | null | null |
dashboard/views.py
|
Kgermando/es-script
|
f1b10ecf2c805e8875a025e7033c724e236f6cd1
|
[
"Apache-2.0"
] | null | null | null |
dashboard/views.py
|
Kgermando/es-script
|
f1b10ecf2c805e8875a025e7033c724e236f6cd1
|
[
"Apache-2.0"
] | null | null | null |
from django.shortcuts import render
from django.contrib.auth.decorators import login_required
from django.http import JsonResponse
from django.contrib.auth.models import User
from issabel.models import Cdr, Cel
from agenda.models import Note
from contacts.models import Contact
from accounts.models import Profile
from acquisition.models import Acquisition
from commprom.models import Commprom
from dat.models import Dat
from recouvrement.models import Recouvrement
from renouvellement.models import Renouvellement
from comptedormant.models import Compte_dormant
# Create your views here.
# STATUTS
statut_1 = 'Statuts de reporting'
statut_2 = 'Accord'
statut_3 = 'Déjà payé son crédit'
statut_4 = 'Refus'
statut_5 = 'Rappel'
statut_6 = 'Injoignable'
statut_7 = 'Absent'
statut_8 = 'Faux numéro'
statut_9 = 'Réfléchir'
# ANSWERED
# BUSY
# FAILED
# CONGESTION
# NO ANSWER
@login_required
def dashboard_view(request):
# online
users_online = Profile.objects.filter(is_online=True).count()
user_list = User.objects.all()
user = request.user
# report CDR
cdr_answered = Cdr.objects.filter(src=user).filter(disposition='ANSWERED').count()
cdr_busy = Cdr.objects.filter(src=user).filter(disposition='BUSY').count()
cdr_no_answered = Cdr.objects.filter(src=user).filter(disposition='NO ANSWER').count()
cdr_congestion = Cdr.objects.filter(src=user).filter(disposition='CONGESTION').count()
cdr_total = Cdr.objects.filter(src=user).count()
cdr_list = Cdr.objects.filter(src=user).order_by('-calldate')[:5]
# Notes
note_nbr = Note.objects.filter(user=user).order_by('-created_date').count()
# Scripting
acquisition_total = Acquisition.objects.filter(user=user).count()
commprom_total = Commprom.objects.filter(user=user).count()
dat_total = Dat.objects.filter(user=user).count()
recouvrement_total = Recouvrement.objects.filter(user=user).count()
renouvellement_total = Renouvellement.objects.filter(user=user).count()
compte_dormant_total= Compte_dormant.objects.filter(user=user).count()
acquisition_1_user = Acquisition.objects.filter(user=user).filter(Statut=statut_1).count()
acquisition_2_user = Acquisition.objects.filter(user=user).filter(Statut=statut_2).count()
acquisition_3_user = Acquisition.objects.filter(user=user).filter(Statut=statut_3).count()
acquisition_4_user = Acquisition.objects.filter(user=user).filter(Statut=statut_4).count()
acquisition_5_user = Acquisition.objects.filter(user=user).filter(Statut=statut_5).count()
acquisition_6_user = Acquisition.objects.filter(user=user).filter(Statut=statut_6).count()
acquisition_7_user = Acquisition.objects.filter(user=user).filter(Statut=statut_7).count()
acquisition_8_user = Acquisition.objects.filter(user=user).filter(Statut=statut_8).count()
acquisition_9_user = Acquisition.objects.filter(user=user).filter(Statut=statut_9).count()
commprom_1_user = Commprom.objects.filter(user=user).filter(Statut=statut_1).count()
commprom_2_user = Commprom.objects.filter(user=user).filter(Statut=statut_2).count()
commprom_3_user = Commprom.objects.filter(user=user).filter(Statut=statut_3).count()
commprom_4_user = Commprom.objects.filter(user=user).filter(Statut=statut_4).count()
commprom_5_user = Commprom.objects.filter(user=user).filter(Statut=statut_5).count()
commprom_6_user = Commprom.objects.filter(user=user).filter(Statut=statut_6).count()
commprom_7_user = Commprom.objects.filter(user=user).filter(Statut=statut_7).count()
commprom_8_user = Commprom.objects.filter(user=user).filter(Statut=statut_8).count()
commprom_9_user = Commprom.objects.filter(user=user).filter(Statut=statut_9).count()
dat_1_user = Dat.objects.filter(user=user).filter(Statut=statut_1).count()
dat_2_user = Dat.objects.filter(user=user).filter(Statut=statut_2).count()
dat_3_user = Dat.objects.filter(user=user).filter(Statut=statut_3).count()
dat_4_user = Dat.objects.filter(user=user).filter(Statut=statut_4).count()
dat_5_user = Dat.objects.filter(user=user).filter(Statut=statut_5).count()
dat_6_user = Dat.objects.filter(user=user).filter(Statut=statut_6).count()
dat_7_user = Dat.objects.filter(user=user).filter(Statut=statut_7).count()
dat_8_user = Dat.objects.filter(user=user).filter(Statut=statut_8).count()
dat_9_user = Dat.objects.filter(user=user).filter(Statut=statut_9).count()
recouvrement_1_user = Recouvrement.objects.filter(user=user).filter(Statut=statut_1).count()
recouvrement_2_user = Recouvrement.objects.filter(user=user).filter(Statut=statut_2).count()
recouvrement_3_user = Recouvrement.objects.filter(user=user).filter(Statut=statut_3).count()
recouvrement_4_user = Recouvrement.objects.filter(user=user).filter(Statut=statut_4).count()
recouvrement_5_user = Recouvrement.objects.filter(user=user).filter(Statut=statut_5).count()
recouvrement_6_user = Recouvrement.objects.filter(user=user).filter(Statut=statut_6).count()
recouvrement_7_user = Recouvrement.objects.filter(user=user).filter(Statut=statut_7).count()
recouvrement_8_user = Recouvrement.objects.filter(user=user).filter(Statut=statut_8).count()
recouvrement_9_user = Recouvrement.objects.filter(user=user).filter(Statut=statut_9).count()
renouvellement_1_user = Renouvellement.objects.filter(user=user).filter(Statut=statut_1).count()
renouvellement_2_user = Renouvellement.objects.filter(user=user).filter(Statut=statut_2).count()
renouvellement_3_user = Renouvellement.objects.filter(user=user).filter(Statut=statut_3).count()
renouvellement_4_user = Renouvellement.objects.filter(user=user).filter(Statut=statut_4).count()
renouvellement_5_user = Renouvellement.objects.filter(user=user).filter(Statut=statut_5).count()
renouvellement_6_user = Renouvellement.objects.filter(user=user).filter(Statut=statut_6).count()
renouvellement_7_user = Renouvellement.objects.filter(user=user).filter(Statut=statut_7).count()
renouvellement_8_user = Renouvellement.objects.filter(user=user).filter(Statut=statut_8).count()
renouvellement_9_user = Renouvellement.objects.filter(user=user).filter(Statut=statut_9).count()
compte_dormant__1_user = Compte_dormant.objects.filter(user=user).filter(Statut=statut_1).count()
compte_dormant__2_user = Compte_dormant.objects.filter(user=user).filter(Statut=statut_2).count()
compte_dormant__3_user = Compte_dormant.objects.filter(user=user).filter(Statut=statut_3).count()
compte_dormant__4_user = Compte_dormant.objects.filter(user=user).filter(Statut=statut_4).count()
compte_dormant__5_user = Compte_dormant.objects.filter(user=user).filter(Statut=statut_5).count()
compte_dormant__6_user = Compte_dormant.objects.filter(user=user).filter(Statut=statut_6).count()
compte_dormant__7_user = Compte_dormant.objects.filter(user=user).filter(Statut=statut_7).count()
compte_dormant__8_user = Compte_dormant.objects.filter(user=user).filter(Statut=statut_8).count()
compte_dormant__9_user = Compte_dormant.objects.filter(user=user).filter(Statut=statut_9).count()
context = {
'user_list': user_list,
'users_online': users_online,
'cdr_answered': cdr_answered,
'cdr_busy': cdr_busy,
'cdr_no_answered': cdr_no_answered,
'cdr_congestion': cdr_congestion,
'cdr_total': cdr_total,
'cdr_list': cdr_list,
'note_nbr': note_nbr,
'acquisition_total': acquisition_total,
'commprom_total': commprom_total,
'dat_total': dat_total,
'recouvrement_total': recouvrement_total,
'renouvellement_total': renouvellement_total,
'compte_dormant_total': compte_dormant_total,
'acquisition_1_user': acquisition_1_user,
'acquisition_2_user': acquisition_2_user,
'acquisition_3_user': acquisition_3_user,
'acquisition_4_user': acquisition_4_user,
'acquisition_5_user': acquisition_5_user,
'acquisition_6_user': acquisition_6_user,
'acquisition_7_user': acquisition_7_user,
'acquisition_8_user': acquisition_8_user,
'acquisition_9_user': acquisition_9_user,
'commprom_1_user': commprom_1_user,
'commprom_2_user': commprom_2_user,
'commprom_3_user': commprom_3_user,
'commprom_4_user': commprom_4_user,
'commprom_5_user': commprom_5_user,
'commprom_6_user': commprom_6_user,
'commprom_7_user': commprom_7_user,
'commprom_8_user': commprom_8_user,
'commprom_9_user': commprom_9_user,
'dat_1_user': dat_1_user,
'dat_2_user': dat_2_user,
'dat_3_user': dat_3_user,
'dat_4_user': dat_4_user,
'dat_5_user': dat_5_user,
'dat_6_user': dat_6_user,
'dat_7_user': dat_7_user,
'dat_8_user': dat_8_user,
'dat_9_user': dat_9_user,
'recouvrement_1_user': recouvrement_1_user,
'recouvrement_2_user': recouvrement_2_user,
'recouvrement_3_user': recouvrement_3_user,
'recouvrement_4_user': recouvrement_4_user,
'recouvrement_5_user': recouvrement_5_user,
'recouvrement_6_user': recouvrement_6_user,
'recouvrement_7_user': recouvrement_7_user,
'recouvrement_8_user': recouvrement_8_user,
'recouvrement_9_user': recouvrement_9_user,
'renouvellement_1_user': renouvellement_1_user,
'renouvellement_2_user': renouvellement_2_user,
'renouvellement_3_user': renouvellement_3_user,
'renouvellement_4_user': renouvellement_4_user,
'renouvellement_5_user': renouvellement_5_user,
'renouvellement_6_user': renouvellement_6_user,
'renouvellement_7_user': renouvellement_7_user,
'renouvellement_8_user': renouvellement_8_user,
'renouvellement_9_user': renouvellement_9_user,
'compte_dormant__1_user': compte_dormant__1_user,
'compte_dormant__2_user': compte_dormant__2_user,
'compte_dormant__3_user': compte_dormant__3_user,
'compte_dormant__4_user': compte_dormant__4_user,
'compte_dormant__5_user': compte_dormant__5_user,
'compte_dormant__6_user': compte_dormant__6_user,
'compte_dormant__7_user': compte_dormant__7_user,
'compte_dormant__8_user': compte_dormant__8_user,
'compte_dormant__9_user': compte_dormant__9_user,
}
template_name = 'pages/dashboard/dashboard_view.html'
return render(request, template_name, context)
@login_required
def dashboard_admin_view(request):
user = request.user
users_online = Profile.objects.filter(is_online=True).count()
user_list = User.objects.all().count()
# report CDR
cdr_answered = Cdr.objects.all().filter(disposition='ANSWERED').count()
cdr_busy = Cdr.objects.all().filter(disposition='BUSY').count()
cdr_no_answered = Cdr.objects.all().filter(disposition='NO ANSWER').count()
cdr_congestion = Cdr.objects.all().filter(disposition='CONGESTION').count()
cdr_total = Cdr.objects.all().count()
cdr_list = Cdr.objects.all().order_by('-calldate')[:5]
cdr_duration = Cdr.objects.all().order_by('-calldate')[:1]
# Notes
note_nbr = Note.objects.all().count()
# Contacts
contact_list = Contact.objects.all().count()
# Scripting
acquisition_total = Acquisition.objects.all().count()
commprom_total = Commprom.objects.all().count()
dat_total = Dat.objects.all().count()
recouvrement_total = Recouvrement.objects.all().count()
renouvellement_total = Renouvellement.objects.all().count()
compte_dormant_total= Compte_dormant.objects.all().count()
acquisition_1 = Acquisition.objects.filter(Statut=statut_1).count()
acquisition_2 = Acquisition.objects.filter(Statut=statut_2).count()
acquisition_3 = Acquisition.objects.filter(Statut=statut_3).count()
acquisition_4 = Acquisition.objects.filter(Statut=statut_4).count()
acquisition_5 = Acquisition.objects.filter(Statut=statut_5).count()
acquisition_6 = Acquisition.objects.filter(Statut=statut_6).count()
acquisition_7 = Acquisition.objects.filter(Statut=statut_7).count()
acquisition_8 = Acquisition.objects.filter(Statut=statut_8).count()
acquisition_9 = Acquisition.objects.filter(Statut=statut_9).count()
commprom_1 = Commprom.objects.filter(Statut=statut_1).count()
commprom_2 = Commprom.objects.filter(Statut=statut_2).count()
commprom_3 = Commprom.objects.filter(Statut=statut_3).count()
commprom_4 = Commprom.objects.filter(Statut=statut_4).count()
commprom_5 = Commprom.objects.filter(Statut=statut_5).count()
commprom_6 = Commprom.objects.filter(Statut=statut_6).count()
commprom_7 = Commprom.objects.filter(Statut=statut_7).count()
commprom_8 = Commprom.objects.filter(Statut=statut_8).count()
commprom_9 = Commprom.objects.filter(Statut=statut_9).count()
dat_1 = Dat.objects.filter(Statut=statut_1).count()
dat_2 = Dat.objects.filter(Statut=statut_2).count()
dat_3 = Dat.objects.filter(Statut=statut_3).count()
dat_4 = Dat.objects.filter(Statut=statut_4).count()
dat_5 = Dat.objects.filter(Statut=statut_5).count()
dat_6 = Dat.objects.filter(Statut=statut_6).count()
dat_7 = Dat.objects.filter(Statut=statut_7).count()
dat_8 = Dat.objects.filter(Statut=statut_8).count()
dat_9 = Dat.objects.filter(Statut=statut_9).count()
recouvrement_1 = Recouvrement.objects.filter(Statut=statut_1).count()
recouvrement_2 = Recouvrement.objects.filter(Statut=statut_2).count()
recouvrement_3 = Recouvrement.objects.filter(Statut=statut_3).count()
recouvrement_4 = Recouvrement.objects.filter(Statut=statut_4).count()
recouvrement_5 = Recouvrement.objects.filter(Statut=statut_5).count()
recouvrement_6 = Recouvrement.objects.filter(Statut=statut_6).count()
recouvrement_7 = Recouvrement.objects.filter(Statut=statut_7).count()
recouvrement_8 = Recouvrement.objects.filter(Statut=statut_8).count()
recouvrement_9 = Recouvrement.objects.filter(Statut=statut_9).count()
renouvellement_1 = Renouvellement.objects.filter(Statut=statut_1).count()
renouvellement_2 = Renouvellement.objects.filter(Statut=statut_2).count()
renouvellement_3 = Renouvellement.objects.filter(Statut=statut_3).count()
renouvellement_4 = Renouvellement.objects.filter(Statut=statut_4).count()
renouvellement_5 = Renouvellement.objects.filter(Statut=statut_5).count()
renouvellement_6 = Renouvellement.objects.filter(Statut=statut_6).count()
renouvellement_7 = Renouvellement.objects.filter(Statut=statut_7).count()
renouvellement_8 = Renouvellement.objects.filter(Statut=statut_8).count()
renouvellement_9 = Renouvellement.objects.filter(Statut=statut_9).count()
compte_dormant__1 = Compte_dormant.objects.filter(Statut=statut_1).count()
compte_dormant__2 = Compte_dormant.objects.filter(Statut=statut_2).count()
compte_dormant__3 = Compte_dormant.objects.filter(Statut=statut_3).count()
compte_dormant__4 = Compte_dormant.objects.filter(Statut=statut_4).count()
compte_dormant__5 = Compte_dormant.objects.filter(Statut=statut_5).count()
compte_dormant__6 = Compte_dormant.objects.filter(Statut=statut_6).count()
compte_dormant__7 = Compte_dormant.objects.filter(Statut=statut_7).count()
compte_dormant__8 = Compte_dormant.objects.filter(Statut=statut_8).count()
compte_dormant__9 = Compte_dormant.objects.filter(Statut=statut_9).count()
context = {
'users_online': users_online,
'user_list': user_list,
'cdr_answered': cdr_answered,
'cdr_busy': cdr_busy,
'cdr_no_answered': cdr_no_answered,
'cdr_congestion': cdr_congestion,
'cdr_total': cdr_total,
'cdr_list': cdr_list,
'cdr_duration': cdr_duration,
'note_nbr': note_nbr,
'contact_list': contact_list,
'acquisition_total': acquisition_total,
'commprom_total': commprom_total,
'dat_total': dat_total,
'recouvrement_total': recouvrement_total,
'renouvellement_total': renouvellement_total,
'compte_dormant_total': compte_dormant_total,
'acquisition_1': acquisition_1,
'acquisition_2': acquisition_2,
'acquisition_3': acquisition_3,
'acquisition_4': acquisition_4,
'acquisition_5': acquisition_5,
'acquisition_6': acquisition_6,
'acquisition_7': acquisition_7,
'acquisition_8': acquisition_8,
'acquisition_9': acquisition_9,
'commprom_1': commprom_1,
'commprom_2': commprom_2,
'commprom_3': commprom_3,
'commprom_4': commprom_4,
'commprom_5': commprom_5,
'commprom_6': commprom_6,
'commprom_7': commprom_7,
'commprom_8': commprom_8,
'commprom_9': commprom_9,
'dat_1': dat_1,
'dat_2': dat_2,
'dat_3': dat_3,
'dat_4': dat_4,
'dat_5': dat_5,
'dat_6': dat_6,
'dat_7': dat_7,
'dat_8': dat_8,
'dat_9': dat_9,
'recouvrement_1': recouvrement_1,
'recouvrement_2': recouvrement_2,
'recouvrement_3': recouvrement_3,
'recouvrement_4': recouvrement_4,
'recouvrement_5': recouvrement_5,
'recouvrement_6': recouvrement_6,
'recouvrement_7': recouvrement_7,
'recouvrement_8': recouvrement_8,
'recouvrement_9': recouvrement_9,
'renouvellement_1': renouvellement_1,
'renouvellement_2': renouvellement_2,
'renouvellement_3': renouvellement_3,
'renouvellement_4': renouvellement_4,
'renouvellement_5': renouvellement_5,
'renouvellement_6': renouvellement_6,
'renouvellement_7': renouvellement_7,
'renouvellement_8': renouvellement_8,
'renouvellement_9': renouvellement_9,
'compte_dormant__1': compte_dormant__1,
'compte_dormant__2': compte_dormant__2,
'compte_dormant__3': compte_dormant__3,
'compte_dormant__4': compte_dormant__4,
'compte_dormant__5': compte_dormant__5,
'compte_dormant__6': compte_dormant__6,
'compte_dormant__7': compte_dormant__7,
'compte_dormant__8': compte_dormant__8,
'compte_dormant__9': compte_dormant__9,
}
template_name = 'pages/dashboard/dashboard_admin_view.html'
return render(request, template_name, context)
| 47.48329
| 101
| 0.735369
|
4a0193ff340f302682d2a53757a9e9dfee76f43d
| 1,024
|
py
|
Python
|
reachapp/forms.py
|
nityaoberoi/reach
|
78444afdf49baad702ebb09f3e72379763cbc709
|
[
"MIT"
] | null | null | null |
reachapp/forms.py
|
nityaoberoi/reach
|
78444afdf49baad702ebb09f3e72379763cbc709
|
[
"MIT"
] | 3
|
2015-04-29T22:56:50.000Z
|
2015-06-15T17:56:54.000Z
|
reachapp/forms.py
|
joedoublej/reach
|
78444afdf49baad702ebb09f3e72379763cbc709
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
from __future__ import unicode_literals
from django import forms
from authtools.forms import UserCreationForm
class UserCreationForm(UserCreationForm):
"""
A UserCreationForm with optional password inputs.
"""
def __init__(self, *args, **kwargs):
super(UserCreationForm, self).__init__(*args, **kwargs)
self.fields['password1'].required = False
self.fields['password2'].required = False
# If one field gets autocompleted but not the other, our 'neither
# password or both password' validation will be triggered.
self.fields['password1'].widget.attrs['autocomplete'] = 'off'
self.fields['password2'].widget.attrs['autocomplete'] = 'off'
def clean_password2(self):
password1 = self.cleaned_data.get("password1")
password2 = super(UserCreationForm, self).clean_password2()
if bool(password1) ^ bool(password2):
raise forms.ValidationError("Fill out both fields")
return password2
| 36.571429
| 73
| 0.688477
|
4a0195ee0f13af3dcdb34e743e967ddf76952870
| 3,507
|
py
|
Python
|
jointbtc/settings/default.py
|
koalalorenzo/jointbtc
|
c49724fd97e10cc8ddb5ae159baf751d11da67fc
|
[
"MIT"
] | null | null | null |
jointbtc/settings/default.py
|
koalalorenzo/jointbtc
|
c49724fd97e10cc8ddb5ae159baf751d11da67fc
|
[
"MIT"
] | null | null | null |
jointbtc/settings/default.py
|
koalalorenzo/jointbtc
|
c49724fd97e10cc8ddb5ae159baf751d11da67fc
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Django settings for jointbtc project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
import dj_database_url
BASE_DIR = os.path.join(os.path.dirname(os.path.dirname(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
DEFAULT_SECRET_KEY = '+fddo$$@8vmkpwz*-b00h7_7+4pmikbc0o9os$*25cdly9h6!a'
SECRET_KEY = os.environ.get('SECRET_KEY', DEFAULT_SECRET_KEY)
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'payments',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'jointbtc.urls'
WSGI_APPLICATION = 'jointbtc.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
# Parse database configuration from $DATABASE_URL
DATABASES['default'] = dj_database_url.config()
# Honor the 'X-Forwarded-Proto' header for request.is_secure()
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# Allow all host headers
ALLOWED_HOSTS = ['*']
# Static asset configuration
STATIC_ROOT = 'staticfiles'
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static'),
)
# Blockchain data
BLOCKCHAIN_API_CODE = os.environ.get('BLOCKCHAIN_API_CODE', "")
GENERATE_WALLET = os.environ.get('GENERATE_WALLET', "True")
if GENERATE_WALLET == "True":
from blockchain import createwallet
import random
import string
WALLET_PASSWORD = ''.join(random.SystemRandom().choice(string.ascii_uppercase + string.digits) for _ in range(20))
WALLET = createwallet.create_wallet(WALLET_PASSWORD, BLOCKCHAIN_API_CODE)
WALLET_ID = WALLET.identifier
print(WALLET_ID, WALLET_PASSWORD)
else:
WALLET_ID = os.environ.get('WALLET_ID', "")
WALLET_PASSWORD = os.environ.get('WALLET_PASSWORD', "")
WALLET = None
# Fees Wallet Addresses
DEFAULT_TRANSACTION_FEE = int(0.0002 * 100000000) # Satoshis
SERVICE_FEE_AMOUNT = int(0.0006 * 100000000) # Satoshis
SERVICE_FEE_ADDRESS = "1GsAxo7aiuBkTAoUgb4ePWhUrBm9YW9cTq"
DEFAULT_TRANSACTION_NOTE = "testing"
| 26.770992
| 118
| 0.742515
|
4a0197812d75713fd139c35de7c2ed2462944d70
| 647
|
py
|
Python
|
uni_ticket/migrations/0009_auto_20190415_0945.py
|
biotech2021/uniTicket
|
8c441eac18e67a983e158326b1c4b82f00f1f1ef
|
[
"Apache-2.0"
] | 15
|
2019-09-06T06:47:08.000Z
|
2022-01-17T06:39:54.000Z
|
uni_ticket/migrations/0009_auto_20190415_0945.py
|
biotech2021/uniTicket
|
8c441eac18e67a983e158326b1c4b82f00f1f1ef
|
[
"Apache-2.0"
] | 69
|
2019-09-06T12:03:19.000Z
|
2022-03-26T14:30:53.000Z
|
uni_ticket/migrations/0009_auto_20190415_0945.py
|
biotech2021/uniTicket
|
8c441eac18e67a983e158326b1c4b82f00f1f1ef
|
[
"Apache-2.0"
] | 13
|
2019-09-11T10:54:20.000Z
|
2021-11-23T09:09:19.000Z
|
# Generated by Django 2.1.7 on 2019-04-15 07:45
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('organizational_area', '0020_auto_20190415_0945'),
('uni_ticket', '0008_auto_20190415_0922'),
]
operations = [
migrations.AlterField(
model_name='ticketcategory',
name='slug',
field=models.SlugField(max_length=40),
),
migrations.AlterUniqueTogether(
name='ticketcategory',
unique_together={('slug', 'organizational_structure'), ('name', 'organizational_structure')},
),
]
| 26.958333
| 105
| 0.618238
|
4a0197e46620b0c12ddd7c232ebfd394d6b40fcf
| 2,128
|
py
|
Python
|
checksum.py
|
littlekign/udpoptions-tools
|
6570d48a8a52bbde802cbefe7dcbded1d08121a0
|
[
"BSD-2-Clause"
] | null | null | null |
checksum.py
|
littlekign/udpoptions-tools
|
6570d48a8a52bbde802cbefe7dcbded1d08121a0
|
[
"BSD-2-Clause"
] | null | null | null |
checksum.py
|
littlekign/udpoptions-tools
|
6570d48a8a52bbde802cbefe7dcbded1d08121a0
|
[
"BSD-2-Clause"
] | 3
|
2017-09-06T06:26:54.000Z
|
2020-01-05T05:30:20.000Z
|
#!/usr/bin/env python
import struct
def hexdump(databytes):
total = 0
count = 0
for b in databytes:
print("{:02x} ".format(b), end='')
count = count + 1
if count % 8 == 0:
print(" ", end='')
if count % 16 == 0:
print("")
def internetchecksum(pkt):
if len(pkt) % 2 != 0:
a = bytearray(pkt)
a.append(0)
pkt = bytes(a) # python is such a cluster fuck
databytes = struct.unpack("!{}H".format(int(len(pkt)/2)), pkt)
total = 0
for b in databytes:
total = total + b
while total > 0xFFFF:
high = 0xFFFF0000 & total
low = 0x0000FFFF & total
high = high >> 16
total = low + high
return total ^ 0xFFFF
def calculateocs(pkt):
res = internetchecksum(pkt)
print("computed in ck: {:04x}".format(res))
res = res ^ 0xFFFF
print("computed in ck: {:04x}".format(res))
while res > 0x00FF:
high = 0xFF00 & res
low = 0x00FF & res
high = high >> 8
res = low + high
return res ^ 0xFF
def calculate8bit(pkt):
res = 0
for b in pkt:
res = res + b
while res > 0x00FF:
high = 0xFF00 & res
low = 0x00FF & res
high = high >> 8
res = low + high
return res ^ 0xFF
if __name__ == "__main__":
data = bytes("Hello World\x01\x01\x01\x01\x01\x01\x00", 'ascii')
sourceaddr = bytearray([139, 133, 204, 55])
destaddr = bytearray([139, 133, 204, 4])
proto = 17
udplen = 8 + len(data)
sport = 2600
dport = 2500
cksum = 0
pkt = struct.pack("!4s4sBBHHHHH{}s".format(len(data)),
sourceaddr, destaddr,
0, proto, udplen,
sport, dport,
udplen, cksum,
data)
result = internetchecksum(pkt)
print("checksum: {}".format(hex(result)))
options = bytearray([0x02,0x00,0x01,0x01,0x01,0x01,0x00])
options[1] = 0x00
result = calculateocs(options)
print("checksum: 0x{:02x}".format(result))
options[1] = result
result = calculateocs(options)
print("inverse: 0x{:02x}".format(result))
| 22.638298
| 68
| 0.547932
|
4a01994b044b51671a672f4ea4a94e89c89ff6d4
| 1,780
|
py
|
Python
|
PatternDesign/Factory/abstract_factory.py
|
QAlexBall/Learning_Py
|
8a5987946928a9d86f6807555ed435ac604b2c44
|
[
"MIT"
] | 2
|
2019-01-24T15:06:59.000Z
|
2019-01-25T07:34:45.000Z
|
PatternDesign/Factory/abstract_factory.py
|
QAlexBall/Learning_Py
|
8a5987946928a9d86f6807555ed435ac604b2c44
|
[
"MIT"
] | 1
|
2019-12-23T09:45:11.000Z
|
2019-12-23T09:45:11.000Z
|
PatternDesign/Factory/abstract_factory.py
|
QAlexBall/Learning_Py
|
8a5987946928a9d86f6807555ed435ac604b2c44
|
[
"MIT"
] | 1
|
2019-07-18T14:21:35.000Z
|
2019-07-18T14:21:35.000Z
|
from abc import ABCMeta, abstractmethod
class PizzaFactory(metaclass=ABCMeta):
@abstractmethod
def create_veg_pizza(self):
pass
@abstractmethod
def create_nonveg_pizza(self):
pass
class IndianPizzaFactory(PizzaFactory):
def create_veg_pizza(self):
return DeluxVeggiePizza()
def create_nonveg_pizza(self):
return ChickenPizza()
class USPizzaFactory(PizzaFactory):
def create_veg_pizza(self):
return MexicanVegPizza()
def create_nonveg_pizza(self):
return HamPizza()
class VegPizza(metaclass=ABCMeta):
@abstractmethod
def prepare(self, VegPizza):
pass
class NonVegPizza(metaclass=ABCMeta):
@abstractmethod
def serve(self, VegPizza):
pass
class DeluxVeggiePizza(VegPizza):
def prepare(self):
print("Prepare ", type(self).__name__)
class ChickenPizza(NonVegPizza):
def serve(self, VegPizza):
print(type(self).__name__, " is served with Chicken on ", type(VegPizza).__name__)
class MexicanVegPizza(VegPizza):
def prepare(self):
print("Prepare ", type(self).__name__)
class HamPizza(NonVegPizza):
def serve(self, VegPizza):
print(type(self).__name__, " is served with Chicken on ", type(VegPizza).__name__)
class PizzaStore:
def __init__(self):
pass
def make_pizzas(self):
for factory in [IndianPizzaFactory(), USPizzaFactory()]:
self.factory = factory
self.nonveg_pizza = self.factory.create_nonveg_pizza()
self.veg_pizza = self.factory.create_veg_pizza()
self.veg_pizza.prepare()
self.nonveg_pizza.serve(self.veg_pizza)
pizza = PizzaStore()
pizza.make_pizzas()
| 22.820513
| 90
| 0.658427
|
4a019b059a59a7983105c0398424392cb10bf0b1
| 2,133
|
py
|
Python
|
tests/test_dataset.py
|
s-scherrer/gswp
|
aa059608f2e4c55d95a990cc13b58d260260e2a1
|
[
"MIT"
] | null | null | null |
tests/test_dataset.py
|
s-scherrer/gswp
|
aa059608f2e4c55d95a990cc13b58d260260e2a1
|
[
"MIT"
] | null | null | null |
tests/test_dataset.py
|
s-scherrer/gswp
|
aa059608f2e4c55d95a990cc13b58d260260e2a1
|
[
"MIT"
] | 1
|
2020-12-01T13:19:52.000Z
|
2020-12-01T13:19:52.000Z
|
from datetime import datetime
import numpy as np
from pathlib import Path
import pytest
from gswp.interface import GSWPDataset
@pytest.fixture
def filename_pattern():
here = Path(__file__).resolve().parent
return here / "test_data" / "*.nc"
def test_datetime_compatibility(filename_pattern):
"""
Tests whether reading using datetime and returning datetime arrays from
tstamps_for_daterange works.
"""
ds = GSWPDataset(filename_pattern)
date_array = ds.tstamps_for_daterange("1970-01-01", "1970-01-31")
time = date_array[0]
assert isinstance(time, datetime)
# try reading
img = ds.read(time)
assert img.timestamp == time
def test_only_land(filename_pattern):
"""
Tests if the only_land feature works as expected.
"""
ds = GSWPDataset(filename_pattern, only_land=True)
num_gpis = ds.dataset.mrsos.isel(time=0).size
assert len(ds.grid.activegpis) < num_gpis
# get random image and check whether there are any nans on land
num_times = len(ds.dataset.mrsos.time)
t = np.random.randint(num_times)
land_img = ds.dataset.mrsos.isel(time=t, latlon=ds.grid.activegpis)
assert not np.any(np.isnan(land_img))
def test_bbox(filename_pattern):
"""
Tests the bounding box feature
"""
min_lon = -160
min_lat = 15
max_lon = -150
max_lat = 25
ds = GSWPDataset(
filename_pattern, bbox=[min_lon, min_lat, max_lon, max_lat]
)
num_gpis = ds.dataset.mrsos.isel(time=0).size
assert hasattr(ds, "bbox_gpis")
assert len(ds.grid.activegpis) < num_gpis
assert len(np.unique(ds.grid.activearrcell)) == 4
assert not np.any(ds.grid.arrlon < min_lon)
assert not np.any(ds.grid.arrlat < min_lat)
assert not np.any(ds.grid.arrlon > max_lon)
assert not np.any(ds.grid.arrlat > max_lat)
def test_grid_lons(filename_pattern):
"""
Tests if the grid of the dataset has only longitudes between -180 and 180
"""
ds = GSWPDataset(filename_pattern)
lons = ds.grid.arrlon
assert np.all(lons <= 180)
assert np.all(lons > -180)
assert np.any(lons < 0)
| 25.094118
| 77
| 0.686357
|
4a019b15ac6f525121091224e6edd8d01d9c9f63
| 32,951
|
py
|
Python
|
tests/test_plugin.py
|
korygill/cmd2
|
81cbc40b5dfa6f615a621ed42c6ed437faabb4da
|
[
"MIT"
] | null | null | null |
tests/test_plugin.py
|
korygill/cmd2
|
81cbc40b5dfa6f615a621ed42c6ed437faabb4da
|
[
"MIT"
] | null | null | null |
tests/test_plugin.py
|
korygill/cmd2
|
81cbc40b5dfa6f615a621ed42c6ed437faabb4da
|
[
"MIT"
] | null | null | null |
# coding=utf-8
# flake8: noqa E302
"""
Test plugin infrastructure and hooks.
"""
import argparse
import sys
import pytest
import cmd2
from cmd2 import (
Cmd2ArgumentParser,
exceptions,
plugin,
with_argparser,
)
# Python 3.5 had some regressions in the unitest.mock module, so use 3rd party mock if available
try:
import mock
except ImportError:
from unittest import mock
class Plugin:
"""A mixin class for testing hook registration and calling"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.reset_counters()
def reset_counters(self):
self.called_preparse = 0
self.called_postparsing = 0
self.called_precmd = 0
self.called_postcmd = 0
self.called_cmdfinalization = 0
###
#
# preloop and postloop hooks
# which share the same signature and are thus interchangable
#
###
def prepost_hook_one(self) -> None:
"""Method used for preloop or postloop hooks"""
self.poutput("one")
def prepost_hook_two(self) -> None:
"""Another method used for preloop or postloop hooks"""
self.poutput("two")
def prepost_hook_too_many_parameters(self, param) -> None:
"""A preloop or postloop hook with too many parameters"""
pass
def prepost_hook_with_wrong_return_annotation(self) -> bool:
"""A preloop or postloop hook with incorrect return type"""
pass
###
#
# preparse hook
#
###
def preparse(self, data: cmd2.plugin.PostparsingData) -> cmd2.plugin.PostparsingData:
"""Preparsing hook"""
self.called_preparse += 1
return data
###
#
# Postparsing hooks
#
###
def postparse_hook(self, data: cmd2.plugin.PostparsingData) -> cmd2.plugin.PostparsingData:
"""A postparsing hook"""
self.called_postparsing += 1
return data
def postparse_hook_stop(self, data: cmd2.plugin.PostparsingData) -> cmd2.plugin.PostparsingData:
"""A postparsing hook with requests application exit"""
self.called_postparsing += 1
data.stop = True
return data
def postparse_hook_emptystatement(self, data: cmd2.plugin.PostparsingData) -> cmd2.plugin.PostparsingData:
"""A postparsing hook with raises an EmptyStatement exception"""
self.called_postparsing += 1
raise exceptions.EmptyStatement
def postparse_hook_exception(self, data: cmd2.plugin.PostparsingData) -> cmd2.plugin.PostparsingData:
"""A postparsing hook which raises an exception"""
self.called_postparsing += 1
raise ValueError
def postparse_hook_too_many_parameters(self, data1, data2) -> cmd2.plugin.PostparsingData:
"""A postparsing hook with too many parameters"""
pass
def postparse_hook_undeclared_parameter_annotation(self, data) -> cmd2.plugin.PostparsingData:
"""A postparsing hook with an undeclared parameter type"""
pass
def postparse_hook_wrong_parameter_annotation(self, data: str) -> cmd2.plugin.PostparsingData:
"""A postparsing hook with the wrong parameter type"""
pass
def postparse_hook_undeclared_return_annotation(self, data: cmd2.plugin.PostparsingData):
"""A postparsing hook with an undeclared return type"""
pass
def postparse_hook_wrong_return_annotation(self, data: cmd2.plugin.PostparsingData) -> str:
"""A postparsing hook with the wrong return type"""
pass
###
#
# precommand hooks, some valid, some invalid
#
###
def precmd(self, statement: cmd2.Statement) -> cmd2.Statement:
"""Override cmd.Cmd method"""
self.called_precmd += 1
return statement
def precmd_hook(self, data: plugin.PrecommandData) -> plugin.PrecommandData:
"""A precommand hook"""
self.called_precmd += 1
return data
def precmd_hook_emptystatement(self, data: plugin.PrecommandData) -> plugin.PrecommandData:
"""A precommand hook which raises an EmptyStatement exception"""
self.called_precmd += 1
raise exceptions.EmptyStatement
def precmd_hook_exception(self, data: plugin.PrecommandData) -> plugin.PrecommandData:
"""A precommand hook which raises an exception"""
self.called_precmd += 1
raise ValueError
def precmd_hook_not_enough_parameters(self) -> plugin.PrecommandData:
"""A precommand hook with no parameters"""
pass
def precmd_hook_too_many_parameters(self, one: plugin.PrecommandData, two: str) -> plugin.PrecommandData:
"""A precommand hook with too many parameters"""
return one
def precmd_hook_no_parameter_annotation(self, data) -> plugin.PrecommandData:
"""A precommand hook with no type annotation on the parameter"""
return data
def precmd_hook_wrong_parameter_annotation(self, data: str) -> plugin.PrecommandData:
"""A precommand hook with the incorrect type annotation on the parameter"""
return data
def precmd_hook_no_return_annotation(self, data: plugin.PrecommandData):
"""A precommand hook with no type annotation on the return value"""
return data
def precmd_hook_wrong_return_annotation(self, data: plugin.PrecommandData) -> cmd2.Statement:
return self.statement_parser.parse('hi there')
###
#
# postcommand hooks, some valid, some invalid
#
###
def postcmd(self, stop: bool, statement: cmd2.Statement) -> bool:
"""Override cmd.Cmd method"""
self.called_postcmd += 1
return stop
def postcmd_hook(self, data: plugin.PostcommandData) -> plugin.PostcommandData:
"""A postcommand hook"""
self.called_postcmd += 1
return data
def postcmd_hook_exception(self, data: plugin.PostcommandData) -> plugin.PostcommandData:
"""A postcommand hook with raises an exception"""
self.called_postcmd += 1
raise ZeroDivisionError
def postcmd_hook_not_enough_parameters(self) -> plugin.PostcommandData:
"""A precommand hook with no parameters"""
pass
def postcmd_hook_too_many_parameters(self, one: plugin.PostcommandData, two: str) -> plugin.PostcommandData:
"""A precommand hook with too many parameters"""
return one
def postcmd_hook_no_parameter_annotation(self, data) -> plugin.PostcommandData:
"""A precommand hook with no type annotation on the parameter"""
return data
def postcmd_hook_wrong_parameter_annotation(self, data: str) -> plugin.PostcommandData:
"""A precommand hook with the incorrect type annotation on the parameter"""
return data
def postcmd_hook_no_return_annotation(self, data: plugin.PostcommandData):
"""A precommand hook with no type annotation on the return value"""
return data
def postcmd_hook_wrong_return_annotation(self, data: plugin.PostcommandData) -> cmd2.Statement:
return self.statement_parser.parse('hi there')
###
#
# command finalization hooks, some valid, some invalid
#
###
def cmdfinalization_hook(self, data: plugin.CommandFinalizationData) -> plugin.CommandFinalizationData:
"""A command finalization hook."""
self.called_cmdfinalization += 1
return data
def cmdfinalization_hook_stop(self, data: cmd2.plugin.CommandFinalizationData) -> cmd2.plugin.CommandFinalizationData:
"""A command finalization hook which requests application exit"""
self.called_cmdfinalization += 1
data.stop = True
return data
def cmdfinalization_hook_exception(self, data: cmd2.plugin.CommandFinalizationData) -> cmd2.plugin.CommandFinalizationData:
"""A command finalization hook which raises an exception"""
self.called_cmdfinalization += 1
raise ValueError
def cmdfinalization_hook_system_exit(self, data: cmd2.plugin.CommandFinalizationData) -> \
cmd2.plugin.CommandFinalizationData:
"""A command finalization hook which raises a SystemExit"""
self.called_cmdfinalization += 1
raise SystemExit
def cmdfinalization_hook_keyboard_interrupt(self, data: cmd2.plugin.CommandFinalizationData) -> \
cmd2.plugin.CommandFinalizationData:
"""A command finalization hook which raises a KeyboardInterrupt"""
self.called_cmdfinalization += 1
raise KeyboardInterrupt
def cmdfinalization_hook_not_enough_parameters(self) -> plugin.CommandFinalizationData:
"""A command finalization hook with no parameters."""
pass
def cmdfinalization_hook_too_many_parameters(self, one: plugin.CommandFinalizationData, two: str) -> \
plugin.CommandFinalizationData:
"""A command finalization hook with too many parameters."""
return one
def cmdfinalization_hook_no_parameter_annotation(self, data) -> plugin.CommandFinalizationData:
"""A command finalization hook with no type annotation on the parameter."""
return data
def cmdfinalization_hook_wrong_parameter_annotation(self, data: str) -> plugin.CommandFinalizationData:
"""A command finalization hook with the incorrect type annotation on the parameter."""
return data
def cmdfinalization_hook_no_return_annotation(self, data: plugin.CommandFinalizationData):
"""A command finalizationhook with no type annotation on the return value."""
return data
def cmdfinalization_hook_wrong_return_annotation(self, data: plugin.CommandFinalizationData) -> cmd2.Statement:
"""A command finalization hook with the wrong return type annotation."""
return self.statement_parser.parse('hi there')
class PluggedApp(Plugin, cmd2.Cmd):
"""A sample app with a plugin mixed in"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def do_say(self, statement):
"""Repeat back the arguments"""
self.poutput(statement)
def do_skip_postcmd_hooks(self, _):
self.poutput("In do_skip_postcmd_hooks")
raise exceptions.SkipPostcommandHooks
parser = Cmd2ArgumentParser(description="Test parser")
parser.add_argument("my_arg", help="some help text")
@with_argparser(parser)
def do_argparse_cmd(self, namespace: argparse.Namespace):
"""Repeat back the arguments"""
self.poutput(namespace.cmd2_statement.get())
###
#
# test pre and postloop hooks
#
###
def test_register_preloop_hook_too_many_parameters():
app = PluggedApp()
with pytest.raises(TypeError):
app.register_preloop_hook(app.prepost_hook_too_many_parameters)
def test_register_preloop_hook_with_return_annotation():
app = PluggedApp()
with pytest.raises(TypeError):
app.register_preloop_hook(app.prepost_hook_with_wrong_return_annotation)
def test_preloop_hook(capsys):
# Need to patch sys.argv so cmd2 doesn't think it was called with arguments equal to the py.test args
testargs = ["prog", "say hello", 'quit']
with mock.patch.object(sys, 'argv', testargs):
app = PluggedApp()
app.register_preloop_hook(app.prepost_hook_one)
app.cmdloop()
out, err = capsys.readouterr()
assert out == 'one\nhello\n'
assert not err
def test_preloop_hooks(capsys):
# Need to patch sys.argv so cmd2 doesn't think it was called with arguments equal to the py.test args
testargs = ["prog", "say hello", 'quit']
with mock.patch.object(sys, 'argv', testargs):
app = PluggedApp()
app.register_preloop_hook(app.prepost_hook_one)
app.register_preloop_hook(app.prepost_hook_two)
app.cmdloop()
out, err = capsys.readouterr()
assert out == 'one\ntwo\nhello\n'
assert not err
def test_register_postloop_hook_too_many_parameters():
app = PluggedApp()
with pytest.raises(TypeError):
app.register_postloop_hook(app.prepost_hook_too_many_parameters)
def test_register_postloop_hook_with_wrong_return_annotation():
app = PluggedApp()
with pytest.raises(TypeError):
app.register_postloop_hook(app.prepost_hook_with_wrong_return_annotation)
def test_postloop_hook(capsys):
# Need to patch sys.argv so cmd2 doesn't think it was called with arguments equal to the py.test args
testargs = ["prog", "say hello", 'quit']
with mock.patch.object(sys, 'argv', testargs):
app = PluggedApp()
app.register_postloop_hook(app.prepost_hook_one)
app.cmdloop()
out, err = capsys.readouterr()
assert out == 'hello\none\n'
assert not err
def test_postloop_hooks(capsys):
# Need to patch sys.argv so cmd2 doesn't think it was called with arguments equal to the py.test args
testargs = ["prog", "say hello", 'quit']
with mock.patch.object(sys, 'argv', testargs):
app = PluggedApp()
app.register_postloop_hook(app.prepost_hook_one)
app.register_postloop_hook(app.prepost_hook_two)
app.cmdloop()
out, err = capsys.readouterr()
assert out == 'hello\none\ntwo\n'
assert not err
###
#
# test preparse hook
#
###
def test_preparse(capsys):
app = PluggedApp()
app.register_postparsing_hook(app.preparse)
app.onecmd_plus_hooks('say hello')
out, err = capsys.readouterr()
assert out == 'hello\n'
assert not err
assert app.called_preparse == 1
###
#
# test postparsing hooks
#
###
def test_postparsing_hook_too_many_parameters():
app = PluggedApp()
with pytest.raises(TypeError):
app.register_postparsing_hook(app.postparse_hook_too_many_parameters)
def test_postparsing_hook_undeclared_parameter_annotation():
app = PluggedApp()
with pytest.raises(TypeError):
app.register_postparsing_hook(app.postparse_hook_undeclared_parameter_annotation)
def test_postparsing_hook_wrong_parameter_annotation():
app = PluggedApp()
with pytest.raises(TypeError):
app.register_postparsing_hook(app.postparse_hook_wrong_parameter_annotation)
def test_postparsing_hook_undeclared_return_annotation():
app = PluggedApp()
with pytest.raises(TypeError):
app.register_postparsing_hook(app.postparse_hook_undeclared_return_annotation)
def test_postparsing_hook_wrong_return_annotation():
app = PluggedApp()
with pytest.raises(TypeError):
app.register_postparsing_hook(app.postparse_hook_wrong_return_annotation)
def test_postparsing_hook(capsys):
app = PluggedApp()
app.onecmd_plus_hooks('say hello')
out, err = capsys.readouterr()
assert out == 'hello\n'
assert not err
assert not app.called_postparsing
app.reset_counters()
app.register_postparsing_hook(app.postparse_hook)
app.onecmd_plus_hooks('say hello')
out, err = capsys.readouterr()
assert out == 'hello\n'
assert not err
assert app.called_postparsing == 1
# register the function again, so it should be called twice
app.reset_counters()
app.register_postparsing_hook(app.postparse_hook)
app.onecmd_plus_hooks('say hello')
out, err = capsys.readouterr()
assert out == 'hello\n'
assert not err
assert app.called_postparsing == 2
def test_postparsing_hook_stop_first(capsys):
app = PluggedApp()
app.register_postparsing_hook(app.postparse_hook_stop)
stop = app.onecmd_plus_hooks('say hello')
assert app.called_postparsing == 1
assert stop
# register another function but it shouldn't be called
app.reset_counters()
app.register_postparsing_hook(app.postparse_hook)
stop = app.onecmd_plus_hooks('say hello')
assert app.called_postparsing == 1
assert stop
def test_postparsing_hook_stop_second(capsys):
app = PluggedApp()
app.register_postparsing_hook(app.postparse_hook)
stop = app.onecmd_plus_hooks('say hello')
assert app.called_postparsing == 1
assert not stop
# register another function and make sure it gets called
app.reset_counters()
app.register_postparsing_hook(app.postparse_hook_stop)
stop = app.onecmd_plus_hooks('say hello')
assert app.called_postparsing == 2
assert stop
# register a third function which shouldn't be called
app.reset_counters()
app.register_postparsing_hook(app.postparse_hook)
stop = app.onecmd_plus_hooks('say hello')
assert app.called_postparsing == 2
assert stop
def test_postparsing_hook_emptystatement_first(capsys):
app = PluggedApp()
app.register_postparsing_hook(app.postparse_hook_emptystatement)
stop = app.onecmd_plus_hooks('say hello')
out, err = capsys.readouterr()
assert not stop
assert not out
assert not err
assert app.called_postparsing == 1
# register another function but it shouldn't be called
app.reset_counters()
stop = app.register_postparsing_hook(app.postparse_hook)
app.onecmd_plus_hooks('say hello')
out, err = capsys.readouterr()
assert not stop
assert not out
assert not err
assert app.called_postparsing == 1
def test_postparsing_hook_emptystatement_second(capsys):
app = PluggedApp()
app.register_postparsing_hook(app.postparse_hook)
stop = app.onecmd_plus_hooks('say hello')
out, err = capsys.readouterr()
assert not stop
assert out == 'hello\n'
assert not err
assert app.called_postparsing == 1
# register another function and make sure it gets called
app.reset_counters()
app.register_postparsing_hook(app.postparse_hook_emptystatement)
stop = app.onecmd_plus_hooks('say hello')
out, err = capsys.readouterr()
assert not stop
assert not out
assert not err
assert app.called_postparsing == 2
# register a third function which shouldn't be called
app.reset_counters()
app.register_postparsing_hook(app.postparse_hook)
stop = app.onecmd_plus_hooks('say hello')
out, err = capsys.readouterr()
assert not stop
assert not out
assert not err
assert app.called_postparsing == 2
def test_postparsing_hook_exception(capsys):
app = PluggedApp()
app.register_postparsing_hook(app.postparse_hook_exception)
stop = app.onecmd_plus_hooks('say hello')
out, err = capsys.readouterr()
assert not stop
assert not out
assert err
assert app.called_postparsing == 1
# register another function, but it shouldn't be called
app.reset_counters()
app.register_postparsing_hook(app.postparse_hook)
stop = app.onecmd_plus_hooks('say hello')
out, err = capsys.readouterr()
assert not stop
assert not out
assert err
assert app.called_postparsing == 1
###
#
# test precmd hooks
#
#####
def test_register_precmd_hook_parameter_count():
app = PluggedApp()
with pytest.raises(TypeError):
app.register_precmd_hook(app.precmd_hook_not_enough_parameters)
with pytest.raises(TypeError):
app.register_precmd_hook(app.precmd_hook_too_many_parameters)
def test_register_precmd_hook_no_parameter_annotation():
app = PluggedApp()
with pytest.raises(TypeError):
app.register_precmd_hook(app.precmd_hook_no_parameter_annotation)
def test_register_precmd_hook_wrong_parameter_annotation():
app = PluggedApp()
with pytest.raises(TypeError):
app.register_precmd_hook(app.precmd_hook_wrong_parameter_annotation)
def test_register_precmd_hook_no_return_annotation():
app = PluggedApp()
with pytest.raises(TypeError):
app.register_precmd_hook(app.precmd_hook_no_return_annotation)
def test_register_precmd_hook_wrong_return_annotation():
app = PluggedApp()
with pytest.raises(TypeError):
app.register_precmd_hook(app.precmd_hook_wrong_return_annotation)
def test_precmd_hook(capsys):
app = PluggedApp()
app.onecmd_plus_hooks('say hello')
out, err = capsys.readouterr()
assert out == 'hello\n'
assert not err
# without registering any hooks, precmd() should be called
assert app.called_precmd == 1
app.reset_counters()
app.register_precmd_hook(app.precmd_hook)
app.onecmd_plus_hooks('say hello')
out, err = capsys.readouterr()
assert out == 'hello\n'
assert not err
# with one hook registered, we should get precmd() and the hook
assert app.called_precmd == 2
# register the function again, so it should be called twice
app.reset_counters()
app.register_precmd_hook(app.precmd_hook)
app.onecmd_plus_hooks('say hello')
out, err = capsys.readouterr()
assert out == 'hello\n'
assert not err
# with two hooks registered, we should get precmd() and both hooks
assert app.called_precmd == 3
def test_precmd_hook_emptystatement_first(capsys):
app = PluggedApp()
app.register_precmd_hook(app.precmd_hook_emptystatement)
stop = app.onecmd_plus_hooks('say hello')
out, err = capsys.readouterr()
assert not stop
assert not out
assert not err
# since the registered hooks are called before precmd(), if a registered
# hook throws an exception, precmd() is never called
assert app.called_precmd == 1
# register another function but it shouldn't be called
app.reset_counters()
stop = app.register_precmd_hook(app.precmd_hook)
app.onecmd_plus_hooks('say hello')
out, err = capsys.readouterr()
assert not stop
assert not out
assert not err
# the exception raised by the first hook should prevent the second
# hook from being called, and it also prevents precmd() from being
# called
assert app.called_precmd == 1
def test_precmd_hook_emptystatement_second(capsys):
app = PluggedApp()
app.register_precmd_hook(app.precmd_hook)
stop = app.onecmd_plus_hooks('say hello')
out, err = capsys.readouterr()
assert not stop
assert out == 'hello\n'
assert not err
# with one hook registered, we should get precmd() and the hook
assert app.called_precmd == 2
# register another function and make sure it gets called
app.reset_counters()
app.register_precmd_hook(app.precmd_hook_emptystatement)
stop = app.onecmd_plus_hooks('say hello')
out, err = capsys.readouterr()
assert not stop
assert not out
assert not err
# since the registered hooks are called before precmd(), if a registered
# hook throws an exception, precmd() is never called
assert app.called_precmd == 2
# register a third function which shouldn't be called
app.reset_counters()
app.register_precmd_hook(app.precmd_hook)
stop = app.onecmd_plus_hooks('say hello')
out, err = capsys.readouterr()
assert not stop
assert not out
assert not err
# the exception raised by the second hook should prevent the third
# hook from being called. since the registered hooks are called before precmd(),
# if a registered hook throws an exception, precmd() is never called
assert app.called_precmd == 2
###
#
# test postcmd hooks
#
####
def test_register_postcmd_hook_parameter_count():
app = PluggedApp()
with pytest.raises(TypeError):
app.register_postcmd_hook(app.postcmd_hook_not_enough_parameters)
with pytest.raises(TypeError):
app.register_postcmd_hook(app.postcmd_hook_too_many_parameters)
def test_register_postcmd_hook_no_parameter_annotation():
app = PluggedApp()
with pytest.raises(TypeError):
app.register_postcmd_hook(app.postcmd_hook_no_parameter_annotation)
def test_register_postcmd_hook_wrong_parameter_annotation():
app = PluggedApp()
with pytest.raises(TypeError):
app.register_postcmd_hook(app.postcmd_hook_wrong_parameter_annotation)
def test_register_postcmd_hook_no_return_annotation():
app = PluggedApp()
with pytest.raises(TypeError):
app.register_postcmd_hook(app.postcmd_hook_no_return_annotation)
def test_register_postcmd_hook_wrong_return_annotation():
app = PluggedApp()
with pytest.raises(TypeError):
app.register_postcmd_hook(app.postcmd_hook_wrong_return_annotation)
def test_postcmd(capsys):
app = PluggedApp()
app.onecmd_plus_hooks('say hello')
out, err = capsys.readouterr()
assert out == 'hello\n'
assert not err
# without registering any hooks, postcmd() should be called
assert app.called_postcmd == 1
app.reset_counters()
app.register_postcmd_hook(app.postcmd_hook)
app.onecmd_plus_hooks('say hello')
out, err = capsys.readouterr()
assert out == 'hello\n'
assert not err
# with one hook registered, we should get precmd() and the hook
assert app.called_postcmd == 2
# register the function again, so it should be called twice
app.reset_counters()
app.register_postcmd_hook(app.postcmd_hook)
app.onecmd_plus_hooks('say hello')
out, err = capsys.readouterr()
assert out == 'hello\n'
assert not err
# with two hooks registered, we should get precmd() and both hooks
assert app.called_postcmd == 3
def test_postcmd_exception_first(capsys):
app = PluggedApp()
app.register_postcmd_hook(app.postcmd_hook_exception)
stop = app.onecmd_plus_hooks('say hello')
out, err = capsys.readouterr()
assert not stop
assert out == 'hello\n'
assert err
# since the registered hooks are called before postcmd(), if a registered
# hook throws an exception, postcmd() is never called. So we should have
# a count of one because we called the hook that raised the exception
assert app.called_postcmd == 1
# register another function but it shouldn't be called
app.reset_counters()
stop = app.register_postcmd_hook(app.postcmd_hook)
app.onecmd_plus_hooks('say hello')
out, err = capsys.readouterr()
assert not stop
assert out == 'hello\n'
assert err
# the exception raised by the first hook should prevent the second
# hook from being called, and it also prevents postcmd() from being
# called
assert app.called_postcmd == 1
def test_postcmd_exception_second(capsys):
app = PluggedApp()
app.register_postcmd_hook(app.postcmd_hook)
stop = app.onecmd_plus_hooks('say hello')
out, err = capsys.readouterr()
assert not stop
assert out == 'hello\n'
assert not err
# with one hook registered, we should get the hook and postcmd()
assert app.called_postcmd == 2
# register another function which should be called
app.reset_counters()
stop = app.register_postcmd_hook(app.postcmd_hook_exception)
app.onecmd_plus_hooks('say hello')
out, err = capsys.readouterr()
assert not stop
assert out == 'hello\n'
assert err
# the exception raised by the first hook should prevent the second
# hook from being called, and it also prevents postcmd() from being
# called. So we have the first hook, and the second hook, which raised
# the exception
assert app.called_postcmd == 2
##
#
# command finalization
#
###
def test_register_cmdfinalization_hook_parameter_count():
app = PluggedApp()
with pytest.raises(TypeError):
app.register_cmdfinalization_hook(app.cmdfinalization_hook_not_enough_parameters)
with pytest.raises(TypeError):
app.register_cmdfinalization_hook(app.cmdfinalization_hook_too_many_parameters)
def test_register_cmdfinalization_hook_no_parameter_annotation():
app = PluggedApp()
with pytest.raises(TypeError):
app.register_cmdfinalization_hook(app.cmdfinalization_hook_no_parameter_annotation)
def test_register_cmdfinalization_hook_wrong_parameter_annotation():
app = PluggedApp()
with pytest.raises(TypeError):
app.register_cmdfinalization_hook(app.cmdfinalization_hook_wrong_parameter_annotation)
def test_register_cmdfinalization_hook_no_return_annotation():
app = PluggedApp()
with pytest.raises(TypeError):
app.register_cmdfinalization_hook(app.cmdfinalization_hook_no_return_annotation)
def test_register_cmdfinalization_hook_wrong_return_annotation():
app = PluggedApp()
with pytest.raises(TypeError):
app.register_cmdfinalization_hook(app.cmdfinalization_hook_wrong_return_annotation)
def test_cmdfinalization(capsys):
app = PluggedApp()
app.onecmd_plus_hooks('say hello')
out, err = capsys.readouterr()
assert out == 'hello\n'
assert not err
assert app.called_cmdfinalization == 0
app.register_cmdfinalization_hook(app.cmdfinalization_hook)
app.onecmd_plus_hooks('say hello')
out, err = capsys.readouterr()
assert out == 'hello\n'
assert not err
assert app.called_cmdfinalization == 1
# register the function again, so it should be called twice
app.reset_counters()
app.register_cmdfinalization_hook(app.cmdfinalization_hook)
app.onecmd_plus_hooks('say hello')
out, err = capsys.readouterr()
assert out == 'hello\n'
assert not err
assert app.called_cmdfinalization == 2
def test_cmdfinalization_stop_first(capsys):
app = PluggedApp()
app.register_cmdfinalization_hook(app.cmdfinalization_hook_stop)
app.register_cmdfinalization_hook(app.cmdfinalization_hook)
stop = app.onecmd_plus_hooks('say hello')
out, err = capsys.readouterr()
assert out == 'hello\n'
assert not err
assert app.called_cmdfinalization == 2
assert stop
def test_cmdfinalization_stop_second(capsys):
app = PluggedApp()
app.register_cmdfinalization_hook(app.cmdfinalization_hook)
app.register_cmdfinalization_hook(app.cmdfinalization_hook_stop)
stop = app.onecmd_plus_hooks('say hello')
out, err = capsys.readouterr()
assert out == 'hello\n'
assert not err
assert app.called_cmdfinalization == 2
assert stop
def test_cmdfinalization_hook_exception(capsys):
app = PluggedApp()
app.register_cmdfinalization_hook(app.cmdfinalization_hook_exception)
stop = app.onecmd_plus_hooks('say hello')
out, err = capsys.readouterr()
assert not stop
assert out == 'hello\n'
assert err
assert app.called_cmdfinalization == 1
# register another function, but it shouldn't be called
app.reset_counters()
app.register_cmdfinalization_hook(app.cmdfinalization_hook)
stop = app.onecmd_plus_hooks('say hello')
out, err = capsys.readouterr()
assert not stop
assert out == 'hello\n'
assert err
assert app.called_cmdfinalization == 1
def test_cmdfinalization_hook_system_exit(capsys):
app = PluggedApp()
app.register_cmdfinalization_hook(app.cmdfinalization_hook_system_exit)
stop = app.onecmd_plus_hooks('say hello')
assert stop
assert app.called_cmdfinalization == 1
def test_cmdfinalization_hook_keyboard_interrupt(capsys):
app = PluggedApp()
app.register_cmdfinalization_hook(app.cmdfinalization_hook_keyboard_interrupt)
# First make sure KeyboardInterrupt isn't raised unless told to
stop = app.onecmd_plus_hooks('say hello', raise_keyboard_interrupt=False)
assert not stop
assert app.called_cmdfinalization == 1
# Now enable raising the KeyboardInterrupt
app.reset_counters()
with pytest.raises(KeyboardInterrupt):
stop = app.onecmd_plus_hooks('say hello', raise_keyboard_interrupt=True)
assert not stop
assert app.called_cmdfinalization == 1
# Now make sure KeyboardInterrupt isn't raised if stop is already True
app.reset_counters()
stop = app.onecmd_plus_hooks('quit', raise_keyboard_interrupt=True)
assert stop
assert app.called_cmdfinalization == 1
def test_skip_postcmd_hooks(capsys):
app = PluggedApp()
app.register_postcmd_hook(app.postcmd_hook)
app.register_cmdfinalization_hook(app.cmdfinalization_hook)
# Cause a SkipPostcommandHooks exception and verify no postcmd stuff runs but cmdfinalization_hook still does
app.onecmd_plus_hooks('skip_postcmd_hooks')
out, err = capsys.readouterr()
assert "In do_skip_postcmd_hooks" in out
assert app.called_postcmd == 0
assert app.called_cmdfinalization == 1
def test_cmd2_argparse_exception(capsys):
"""
Verify Cmd2ArgparseErrors raised after calling a command prevent postcmd events from
running but do not affect cmdfinalization events
"""
app = PluggedApp()
app.register_postcmd_hook(app.postcmd_hook)
app.register_cmdfinalization_hook(app.cmdfinalization_hook)
# First generate no exception and make sure postcmd_hook, postcmd, and cmdfinalization_hook run
app.onecmd_plus_hooks('argparse_cmd arg_val')
out, err = capsys.readouterr()
assert out == 'arg_val\n'
assert not err
assert app.called_postcmd == 2
assert app.called_cmdfinalization == 1
app.reset_counters()
# Next cause an argparse exception and verify no postcmd stuff runs but cmdfinalization_hook still does
app.onecmd_plus_hooks('argparse_cmd')
out, err = capsys.readouterr()
assert not out
assert "Error: the following arguments are required: my_arg" in err
assert app.called_postcmd == 0
assert app.called_cmdfinalization == 1
| 35.091587
| 127
| 0.720069
|
4a019bd0725ee302076658bbe9ec5efdf86a789f
| 4,472
|
py
|
Python
|
packageship/libs/conf/__init__.py
|
openeuler-mirror/pkgship
|
5aaa4953023fde8ff03892fe5608f0711a26a942
|
[
"MulanPSL-1.0"
] | null | null | null |
packageship/libs/conf/__init__.py
|
openeuler-mirror/pkgship
|
5aaa4953023fde8ff03892fe5608f0711a26a942
|
[
"MulanPSL-1.0"
] | null | null | null |
packageship/libs/conf/__init__.py
|
openeuler-mirror/pkgship
|
5aaa4953023fde8ff03892fe5608f0711a26a942
|
[
"MulanPSL-1.0"
] | 1
|
2021-11-20T00:10:53.000Z
|
2021-11-20T00:10:53.000Z
|
#!/usr/bin/python3
# ******************************************************************************
# Copyright (c) Huawei Technologies Co., Ltd. 2020-2020. All rights reserved.
# licensed under the Mulan PSL v2.
# You can use this software according to the terms and conditions of the Mulan PSL v2.
# You may obtain a copy of Mulan PSL v2 at:
# http://license.coscl.org.cn/MulanPSL2
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR
# PURPOSE.
# See the Mulan PSL v2 for more details.
# ******************************************************************************/
"""
System configuration file and default configuration file integration
"""
import os
import configparser
from . import global_config
USER_SETTINGS_FILE_PATH = 'SETTINGS_FILE_PATH'
class PreloadingSettings():
"""
The system default configuration file and the configuration
file changed by the user are lazily loaded.
"""
_setting_container = None
def _preloading(self):
"""
Load the default configuration in the system and the related configuration
of the user, and overwrite the default configuration items of the system
with the user's configuration data
"""
settings_file = os.environ.get(USER_SETTINGS_FILE_PATH)
if not settings_file:
raise RuntimeError(
"The system does not specify the user configuration"
"that needs to be loaded:" % USER_SETTINGS_FILE_PATH)
self._setting_container = Configs(settings_file)
def __getattr__(self, name):
"""
Return the value of a setting and cache it in self.__dict__
"""
if self._setting_container is None:
self._preloading()
value = getattr(self._setting_container, name, None)
self.__dict__[name] = value
return value
def __setattr__(self, name, value):
"""
Set the configured value and re-copy the value cached in __dict__
"""
if name is None:
raise KeyError("The set configuration key value cannot be empty")
if name == '_setting_container':
self.__dict__.clear()
self.__dict__["_setting_container"] = value
else:
self.__dict__.pop(name, None)
if self._setting_container is None:
self._preloading()
setattr(self._setting_container, name, value)
def __delattr__(self, name):
"""
Delete a setting and clear it from cache if needed
"""
if name is None:
raise KeyError("The set configuration key value cannot be empty")
if self._setting_container is None:
self._preloading()
delattr(self._setting_container, name)
self.__dict__.pop(name, None)
@property
def config_ready(self):
"""
Return True if the settings have already been configured
"""
return self._setting_container is not None
def reload(self):
"""
Add the reload mechanism
"""
self._setting_container = None
self._preloading()
class Configs():
"""
The system's default configuration items and the user's
configuration items are integrated
"""
def __init__(self, settings_file):
for config in dir(global_config):
if not config.startswith('_'):
setattr(self, config, getattr(global_config, config))
# Load user's configuration
self._conf_parser = configparser.RawConfigParser()
self._conf_parser.read(settings_file)
for section in self._conf_parser.sections():
for option in self._conf_parser.items(section):
try:
_config_value = option[1]
_key = option[0]
except IndexError:
pass
else:
if not _config_value:
continue
if _config_value.isdigit():
_config_value = int(_config_value)
elif _config_value.lower() in ('true', 'false'):
_config_value = bool(_config_value)
setattr(self, _key.upper(), _config_value)
configuration = PreloadingSettings()
| 34.4
| 98
| 0.595707
|
4a019cc8d7c54d187bd3fb118b7f5d72516d26f8
| 3,764
|
py
|
Python
|
s.py
|
riceissa/ea-forum-reader
|
c340db63705ee2eb1dc64281fd6d2701451372b5
|
[
"CC0-1.0"
] | 8
|
2018-11-10T19:52:55.000Z
|
2022-01-19T20:43:15.000Z
|
s.py
|
riceissa/ea-forum-reader
|
c340db63705ee2eb1dc64281fd6d2701451372b5
|
[
"CC0-1.0"
] | 40
|
2018-11-23T22:19:05.000Z
|
2021-08-03T17:02:33.000Z
|
s.py
|
riceissa/ea-forum-reader
|
c340db63705ee2eb1dc64281fd6d2701451372b5
|
[
"CC0-1.0"
] | 3
|
2018-11-24T06:04:28.000Z
|
2020-05-23T09:28:40.000Z
|
#!/usr/bin/env python3
import pdb
import sys
from urllib.parse import quote
import datetime
import config
import util
import linkpath
def get_sequence(sequenceid, run_query=True):
query = ("""
{
sequence(
input: {
selector: {
_id: "%s"
}
}
) {
result {
title
user {
_id
username
}
userId
createdAt
canonicalCollection {
createdAt
userId
title
slug
gridImageId
firstPageLink
version
_id
schemaVersion
}
contents {
html
_id
}
chapters {
createdAt
title
subtitle
number
sequenceId
_id
}
}
}
}
""" % sequenceid)
if not run_query:
return query + ('''\n<a href="%s">Run this query</a>\n\n''' % (config.GRAPHQL_URL.replace("graphql", "graphiql") + "?query=" + quote(query)))
request = util.send_query(query)
return util.safe_get(request.json(), ['data', 'sequence', 'result'])
def get_chapter(chapterid, run_query=True):
query = ("""
{
chapter(
input: {
selector: {
_id: "%s"
}
}
) {
result {
posts {
title
pageUrl
}
}
}
}
""" % chapterid)
if not run_query:
return query + ('''\n<a href="%s">Run this query</a>\n\n''' % (config.GRAPHQL_URL.replace("graphql", "graphiql") + "?query=" + quote(query)))
request = util.send_query(query)
return util.safe_get(request.json(), ['data', 'chapter', 'result'])
def show_sequence(sequenceid, display_format):
result = ("""<!DOCTYPE html>
<html>
""")
run_query = False if display_format == "queries" else True
sequence = get_sequence(sequenceid)
result = util.show_head(title=util.safe_get(sequence, "title"),
author=util.safe_get(sequence, ["user", "username"]),
date=util.safe_get(sequence, "createdAt"),
publisher="LessWrong 2.0" if "lesswrong" in config.GRAPHQL_URL
else "Effective Altruism Forum")
result += "<body>\n"
# result += util.show_navbar(navlinks=[
# '''<a href="%s" title="Show all the GraphQL queries used to generate this page">Queries</a>''' % linkpath.posts(postid=util.htmlescape(postid), postslug=post['slug'], display_format="queries")
# ])
result += '''<div id="wrapper">'''
result += '''<div id="content">'''
result += "<h1>" + util.safe_get(sequence, "title") + "</h1>\n"
for chapterdict in util.safe_get(sequence, "chapters"):
chapterid = chapterdict["_id"]
chapter = get_chapter(chapterid)
result += "<h2>" + util.safe_get(chapterdict, "title", default="") + "</h2>"
result += "<ul>\n"
for postdict in util.safe_get(chapter, "posts"):
alt_urls = util.alt_urls(util.safe_get(postdict, "pageUrl"))
result += ''' <li><a href="%s">%s</a></li>\n''' % (
alt_urls['reader'],
util.safe_get(postdict, "title")
)
result += "</ul>\n"
result += ("""
</div>
</div>
</body>
</html>
""")
return result
if __name__ == "__main__":
if len(sys.argv) != 2 + 1:
print("Please enter a sequence ID and display format as argument")
else:
print(show_sequence(sequenceid=sys.argv[1], display_format=sys.argv[2]))
| 27.676471
| 206
| 0.502125
|
4a019cd37536bf56f99f9ed18ed34a2c7c8e730e
| 259
|
py
|
Python
|
ddmrp/ddmrp/doctype/ddmrp_action_log/ddmrp_action_log.py
|
szufisher/ddmrp
|
761bba5e4c78049bbdd4bb4a921531389fd42d4d
|
[
"MIT"
] | null | null | null |
ddmrp/ddmrp/doctype/ddmrp_action_log/ddmrp_action_log.py
|
szufisher/ddmrp
|
761bba5e4c78049bbdd4bb4a921531389fd42d4d
|
[
"MIT"
] | null | null | null |
ddmrp/ddmrp/doctype/ddmrp_action_log/ddmrp_action_log.py
|
szufisher/ddmrp
|
761bba5e4c78049bbdd4bb4a921531389fd42d4d
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright (c) 2021, Fisher and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
# import frappe
from frappe.model.document import Document
class DDMRPActionLog(Document):
pass
| 23.545455
| 49
| 0.776062
|
4a019ce46187621d2a90476a28441466e1645599
| 4,658
|
py
|
Python
|
networks_256.py
|
bernardas78/BigGAN-tensorflow
|
70814c044512798006c3a12f981afcba970cd0c9
|
[
"MIT"
] | null | null | null |
networks_256.py
|
bernardas78/BigGAN-tensorflow
|
70814c044512798006c3a12f981afcba970cd0c9
|
[
"MIT"
] | null | null | null |
networks_256.py
|
bernardas78/BigGAN-tensorflow
|
70814c044512798006c3a12f981afcba970cd0c9
|
[
"MIT"
] | null | null | null |
from ops import *
class Generator:
def __init__(self, name):
self.name = name
def __call__(self, inputs, train_phase, y, nums_class):
z_dim = int(inputs.shape[-1]) # = 128
nums_layer = 6
remain = z_dim % nums_layer # = 128 % 6 = 2
chunk_size = (z_dim - remain) // nums_layer # = (128-2)//6 = 21
z_split = tf.split(inputs, [chunk_size] * (nums_layer - 1) + [chunk_size + remain], axis=1) # [21 21 21 21 21 23]
with tf.compat.v1.variable_scope(name_or_scope=self.name, reuse=tf.compat.v1.AUTO_REUSE):
inputs = dense("dense", inputs, 1024*4*4)
inputs = tf.reshape(inputs, [-1, 4, 4, 1024])
inputs = G_Resblock("ResBlock1", inputs, 1024, train_phase, z_split[0], y, nums_class)
print ("XXX.1 inputs.shape: {}".format(inputs.shape))
inputs = G_Resblock("ResBlock2", inputs, 512, train_phase, z_split[1], y, nums_class)
print ("XXX.2 inputs.shape: {}".format(inputs.shape))
inputs = G_Resblock("ResBlock2.5", inputs, 512, train_phase, z_split[2], y, nums_class)
print ("XXX.2.5 inputs.shape: {}".format(inputs.shape))
inputs = G_Resblock("ResBlock3", inputs, 256, train_phase, z_split[3], y, nums_class)
print ("XXX.3 inputs.shape: {}".format(inputs.shape))
inputs = non_local("Non-local", inputs, None, True) # moved non_local here due to memory constrains
inputs = G_Resblock("ResBlock4", inputs, 128, train_phase, z_split[4], y, nums_class)
print ("XXX.4 inputs.shape: {}".format(inputs.shape))
#inputs = non_local("Non-local", inputs, None, True)
print ("XXX.5 inputs.shape: {}".format(inputs.shape))
inputs = G_Resblock("ResBlock5", inputs, 64, train_phase, z_split[5], y, nums_class)
print ("XXX.6 inputs.shape: {}".format(inputs.shape))
inputs = relu(conditional_batchnorm(inputs, train_phase, "BN"))
print("XXX.7 inputs.shape: {}".format(inputs.shape))
inputs = conv("conv", inputs, k_size=3, nums_out=3, strides=1)
print("XXX.8 inputs.shape: {}".format(inputs.shape))
return tf.nn.tanh(inputs)
def var_list(self):
return tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.GLOBAL_VARIABLES, self.name)
class Discriminator:
def __init__(self, name):
self.name = name
def __call__(self, inputs, y, nums_class, update_collection=None):
with tf.compat.v1.variable_scope(name_or_scope=self.name, reuse=tf.compat.v1.AUTO_REUSE):
print("DDD.0 inputs.shape: {}".format(inputs.shape))
inputs = D_FirstResblock("ResBlock1", inputs, 64, update_collection, is_down=True)
print("DDD.1 inputs.shape: {}".format(inputs.shape))
inputs = D_Resblock("ResBlock2", inputs, 128, update_collection, is_down=True)
print("DDD.2 inputs.shape: {}".format(inputs.shape))
inputs = non_local("Non-local", inputs, None, True)
inputs = D_Resblock("ResBlock3.-1", inputs, 256, update_collection, is_down=True)
print("DDD.3.-1 inputs.shape: {}".format(inputs.shape))
inputs = D_Resblock("ResBlock3", inputs, 256, update_collection, is_down=True)
print("DDD.3 inputs.shape: {}".format(inputs.shape))
inputs = D_Resblock("ResBlock4", inputs, 512, update_collection, is_down=True)
print("DDD.4 inputs.shape: {}".format(inputs.shape))
inputs = D_Resblock("ResBlock5", inputs, 1024, update_collection, is_down=True)
print("DDD.5 inputs.shape: {}".format(inputs.shape))
inputs = D_Resblock("ResBlock6", inputs, 1024, update_collection, is_down=False)
print("DDD.6 inputs.shape: {}".format(inputs.shape))
inputs = relu(inputs)
inputs = global_sum_pooling(inputs)
temp = Inner_product(inputs, y, nums_class, update_collection)
inputs = dense("dense", inputs, 1, update_collection, is_sn=True)
inputs = temp + inputs
return inputs
def var_list(self):
return tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.GLOBAL_VARIABLES, self.name)
if __name__ == "__main__":
x = tf.compat.v1.placeholder(tf.float32, [None, 32, 32, 3])
z = tf.compat.v1.placeholder(tf.float32, [None, 100])
y = tf.compat.v1.placeholder(tf.float32, [None, 100])
train_phase = tf.compat.v1.placeholder(tf.bool)
G = Generator("generator")
D = Discriminator("discriminator")
fake_img = G(z, train_phase)
fake_logit = D(fake_img)
aaa = 0
| 55.452381
| 123
| 0.625376
|
4a019eb131f56cccf92e4935c8b3fdd7ed87cbb7
| 4,841
|
py
|
Python
|
airflow/contrib/auth/backends/kerberos_auth.py
|
jacky-nirvana/incubator-airflow
|
2318cea74d4f71fba353eaca9bb3c4fd3cdb06c0
|
[
"Apache-2.0"
] | 1
|
2019-09-16T06:56:31.000Z
|
2019-09-16T06:56:31.000Z
|
airflow/contrib/auth/backends/kerberos_auth.py
|
jacky-nirvana/incubator-airflow
|
2318cea74d4f71fba353eaca9bb3c4fd3cdb06c0
|
[
"Apache-2.0"
] | 6
|
2018-02-10T20:25:16.000Z
|
2019-11-20T03:01:03.000Z
|
airflow/contrib/auth/backends/kerberos_auth.py
|
jacky-nirvana/incubator-airflow
|
2318cea74d4f71fba353eaca9bb3c4fd3cdb06c0
|
[
"Apache-2.0"
] | 2
|
2019-09-16T06:48:41.000Z
|
2019-09-16T06:56:32.000Z
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import logging
import flask_login
from flask_login import current_user
from flask import flash
from wtforms import Form, PasswordField, StringField
from wtforms.validators import InputRequired
# pykerberos should be used as it verifies the KDC, the "kerberos" module does not do so
# and make it possible to spoof the KDC
import kerberos
from airflow.security import utils
from flask import url_for, redirect
from airflow import models
from airflow import configuration
from airflow.utils.db import provide_session
from airflow.utils.log.logging_mixin import LoggingMixin
login_manager = flask_login.LoginManager()
login_manager.login_view = 'airflow.login' # Calls login() below
login_manager.login_message = None
class AuthenticationError(Exception):
pass
class KerberosUser(models.User, LoggingMixin):
def __init__(self, user):
self.user = user
@staticmethod
def authenticate(username, password):
service_principal = "%s/%s" % (
configuration.conf.get('kerberos', 'principal'),
utils.get_fqdn()
)
realm = configuration.conf.get("kerberos", "default_realm")
user_principal = utils.principal_from_username(username)
try:
# this is pykerberos specific, verify = True is needed to prevent KDC spoofing
if not kerberos.checkPassword(user_principal,
password,
service_principal, realm, True):
raise AuthenticationError()
except kerberos.KrbError as e:
logging.error(
'Password validation for principal %s failed %s', user_principal, e)
raise AuthenticationError(e)
return
def is_active(self):
"""Required by flask_login"""
return True
def is_authenticated(self):
"""Required by flask_login"""
return True
def is_anonymous(self):
"""Required by flask_login"""
return False
def get_id(self):
"""Returns the current user id as required by flask_login"""
return self.user.get_id()
def data_profiling(self):
"""Provides access to data profiling tools"""
return True
def is_superuser(self):
"""Access all the things"""
return True
@login_manager.user_loader
@provide_session
def load_user(userid, session=None):
if not userid or userid == 'None':
return None
user = session.query(models.User).filter(models.User.id == int(userid)).first()
return KerberosUser(user)
@provide_session
def login(self, request, session=None):
if current_user.is_authenticated():
flash("You are already logged in")
return redirect(url_for('index'))
username = None
password = None
form = LoginForm(request.form)
if request.method == 'POST' and form.validate():
username = request.form.get("username")
password = request.form.get("password")
if not username or not password:
return self.render('airflow/login.html',
title="Airflow - Login",
form=form)
try:
KerberosUser.authenticate(username, password)
user = session.query(models.User).filter(
models.User.username == username).first()
if not user:
user = models.User(
username=username,
is_superuser=False)
session.merge(user)
session.commit()
flask_login.login_user(KerberosUser(user))
session.commit()
return redirect(request.args.get("next") or url_for("admin.index"))
except AuthenticationError:
flash("Incorrect login details")
return self.render('airflow/login.html',
title="Airflow - Login",
form=form)
class LoginForm(Form):
username = StringField('Username', [InputRequired()])
password = PasswordField('Password', [InputRequired()])
| 31.032051
| 90
| 0.658955
|
4a019ebbb419db0f574ddb3376dec9833e0fb1ca
| 7,245
|
py
|
Python
|
contrib/PyTorch/Official/cv/image_classification/SPNASNet_100_for_PyTorch/timm/models/layers/std_conv.py
|
Ascend/modelzoo
|
f018cfed33dbb1cc2110b9ea2e233333f71cc509
|
[
"Apache-2.0"
] | 12
|
2020-12-13T08:34:24.000Z
|
2022-03-20T15:17:17.000Z
|
contrib/PyTorch/Official/cv/image_classification/SPNASNet_100_for_PyTorch/timm/models/layers/std_conv.py
|
Ascend/modelzoo
|
f018cfed33dbb1cc2110b9ea2e233333f71cc509
|
[
"Apache-2.0"
] | 1
|
2022-01-20T03:11:05.000Z
|
2022-01-20T06:53:39.000Z
|
contrib/PyTorch/Official/cv/image_classification/SPNASNet_100_for_PyTorch/timm/models/layers/std_conv.py
|
Ascend/modelzoo
|
f018cfed33dbb1cc2110b9ea2e233333f71cc509
|
[
"Apache-2.0"
] | 2
|
2021-07-10T12:40:46.000Z
|
2021-12-17T07:55:15.000Z
|
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import torch
import torch.nn as nn
import torch.nn.functional as F
from .padding import get_padding, get_padding_value, pad_same
def get_weight(module):
std, mean = torch.std_mean(module.weight, dim=[1, 2, 3], keepdim=True, unbiased=False)
weight = (module.weight - mean) / (std + module.eps)
return weight
class StdConv2d(nn.Conv2d):
"""Conv2d with Weight Standardization. Used for BiT ResNet-V2 models.
Paper: `Micro-Batch Training with Batch-Channel Normalization and Weight Standardization` -
https://arxiv.org/abs/1903.10520v2
"""
def __init__(
self, in_channel, out_channels, kernel_size, stride=1, padding=None, dilation=1,
groups=1, bias=False, eps=1e-5):
if padding is None:
padding = get_padding(kernel_size, stride, dilation)
super().__init__(
in_channel, out_channels, kernel_size, stride=stride,
padding=padding, dilation=dilation, groups=groups, bias=bias)
self.eps = eps
def get_weight(self):
std, mean = torch.std_mean(self.weight, dim=[1, 2, 3], keepdim=True, unbiased=False)
weight = (self.weight - mean) / (std + self.eps)
return weight
def forward(self, x):
x = F.conv2d(x, self.get_weight(), self.bias, self.stride, self.padding, self.dilation, self.groups)
return x
class StdConv2dSame(nn.Conv2d):
"""Conv2d with Weight Standardization. TF compatible SAME padding. Used for ViT Hybrid model.
Paper: `Micro-Batch Training with Batch-Channel Normalization and Weight Standardization` -
https://arxiv.org/abs/1903.10520v2
"""
def __init__(
self, in_channel, out_channels, kernel_size, stride=1, padding='SAME', dilation=1,
groups=1, bias=False, eps=1e-5):
padding, is_dynamic = get_padding_value(padding, kernel_size, stride=stride, dilation=dilation)
super().__init__(
in_channel, out_channels, kernel_size, stride=stride, padding=padding, dilation=dilation,
groups=groups, bias=bias)
self.same_pad = is_dynamic
self.eps = eps
def get_weight(self):
std, mean = torch.std_mean(self.weight, dim=[1, 2, 3], keepdim=True, unbiased=False)
weight = (self.weight - mean) / (std + self.eps)
return weight
def forward(self, x):
if self.same_pad:
x = pad_same(x, self.kernel_size, self.stride, self.dilation)
x = F.conv2d(x, self.get_weight(), self.bias, self.stride, self.padding, self.dilation, self.groups)
return x
class ScaledStdConv2d(nn.Conv2d):
"""Conv2d layer with Scaled Weight Standardization.
Paper: `Characterizing signal propagation to close the performance gap in unnormalized ResNets` -
https://arxiv.org/abs/2101.08692
NOTE: the operations used in this impl differ slightly from the DeepMind Haiku impl. The impact is minor.
"""
def __init__(
self, in_channels, out_channels, kernel_size, stride=1, padding=None, dilation=1, groups=1,
bias=True, gamma=1.0, eps=1e-5, gain_init=1.0, use_layernorm=False):
if padding is None:
padding = get_padding(kernel_size, stride, dilation)
super().__init__(
in_channels, out_channels, kernel_size, stride=stride, padding=padding, dilation=dilation,
groups=groups, bias=bias)
self.gain = nn.Parameter(torch.full((self.out_channels, 1, 1, 1), gain_init))
self.scale = gamma * self.weight[0].numel() ** -0.5 # gamma * 1 / sqrt(fan-in)
self.eps = eps ** 2 if use_layernorm else eps
self.use_layernorm = use_layernorm # experimental, slightly faster/less GPU memory to hijack LN kernel
def get_weight(self):
if self.use_layernorm:
weight = self.scale * F.layer_norm(self.weight, self.weight.shape[1:], eps=self.eps)
else:
std, mean = torch.std_mean(self.weight, dim=[1, 2, 3], keepdim=True, unbiased=False)
weight = self.scale * (self.weight - mean) / (std + self.eps)
return self.gain * weight
def forward(self, x):
return F.conv2d(x, self.get_weight(), self.bias, self.stride, self.padding, self.dilation, self.groups)
class ScaledStdConv2dSame(nn.Conv2d):
"""Conv2d layer with Scaled Weight Standardization and Tensorflow-like SAME padding support
Paper: `Characterizing signal propagation to close the performance gap in unnormalized ResNets` -
https://arxiv.org/abs/2101.08692
NOTE: the operations used in this impl differ slightly from the DeepMind Haiku impl. The impact is minor.
"""
def __init__(
self, in_channels, out_channels, kernel_size, stride=1, padding='SAME', dilation=1, groups=1,
bias=True, gamma=1.0, eps=1e-5, gain_init=1.0, use_layernorm=False):
padding, is_dynamic = get_padding_value(padding, kernel_size, stride=stride, dilation=dilation)
super().__init__(
in_channels, out_channels, kernel_size, stride=stride, padding=padding, dilation=dilation,
groups=groups, bias=bias)
self.gain = nn.Parameter(torch.full((self.out_channels, 1, 1, 1), gain_init))
self.scale = gamma * self.weight[0].numel() ** -0.5
self.same_pad = is_dynamic
self.eps = eps ** 2 if use_layernorm else eps
self.use_layernorm = use_layernorm # experimental, slightly faster/less GPU memory to hijack LN kernel
# NOTE an alternate formulation to consider, closer to DeepMind Haiku impl but doesn't seem
# to make much numerical difference (+/- .002 to .004) in top-1 during eval.
# def get_weight(self):
# var, mean = torch.var_mean(self.weight, dim=[1, 2, 3], keepdim=True, unbiased=False)
# scale = torch.rsqrt((self.weight[0].numel() * var).clamp_(self.eps)) * self.gain
# weight = (self.weight - mean) * scale
# return self.gain * weight
def get_weight(self):
if self.use_layernorm:
weight = self.scale * F.layer_norm(self.weight, self.weight.shape[1:], eps=self.eps)
else:
std, mean = torch.std_mean(self.weight, dim=[1, 2, 3], keepdim=True, unbiased=False)
weight = self.scale * (self.weight - mean) / (std + self.eps)
return self.gain * weight
def forward(self, x):
if self.same_pad:
x = pad_same(x, self.kernel_size, self.stride, self.dilation)
return F.conv2d(x, self.get_weight(), self.bias, self.stride, self.padding, self.dilation, self.groups)
| 45.85443
| 111
| 0.659489
|
4a019ec420e5af71a859cfb5986eb3798cf4c711
| 7,189
|
py
|
Python
|
rst2pdf/tests/input/sphinx-issue162/conf.py
|
shakna-israel/rst2pdf
|
9eb934298aeae872c652f60247bbfd9cc3da842f
|
[
"MIT"
] | 1
|
2019-04-15T13:50:16.000Z
|
2019-04-15T13:50:16.000Z
|
rst2pdf/tests/input/sphinx-issue162/conf.py
|
shakna-israel/rst2pdf
|
9eb934298aeae872c652f60247bbfd9cc3da842f
|
[
"MIT"
] | null | null | null |
rst2pdf/tests/input/sphinx-issue162/conf.py
|
shakna-israel/rst2pdf
|
9eb934298aeae872c652f60247bbfd9cc3da842f
|
[
"MIT"
] | 2
|
2020-10-22T23:22:34.000Z
|
2021-01-27T13:32:13.000Z
|
# -*- coding: utf-8 -*-
#
# issue162 documentation build configuration file, created by
# sphinx-quickstart on Tue Aug 18 22:54:33 2009.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.append(os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['rst2pdf.pdfbuilder']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'issue162'
copyright = u'2009, RA'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = 'test'
# The full version, including alpha/beta/rc tags.
release = 'test'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'issue162doc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'issue162.tex', u'issue162 Documentation',
u'RA', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
# -- Options for PDF output --------------------------------------------------
# Grouping the document tree into PDF files. List of tuples
# (source start file, target name, title, author).
pdf_documents = [
('index', u'MyProject', u'My Project', u'Author Name'),
]
# A comma-separated list of custom stylesheets. Example:
pdf_stylesheets = ['borland']
# Create a compressed PDF
# Use True/False or 1/0
# Example: compressed=True
#pdf_compressed=False
# A colon-separated list of folders to search for fonts. Example:
# pdf_font_path=['/usr/share/fonts', '/usr/share/texmf-dist/fonts/']
# Language to be used for hyphenation support
pdf_language="en_US"
# If false, no index is generated.
pdf_use_index = False
# If false, no modindex is generated.
pdf_use_modindex = False
# If false, no coverpage is generated.
pdf_use_coverpage = False
pdf_verbosity=0
pdf_invariant = True
| 31.393013
| 80
| 0.719989
|
4a01a000a1ba12d0939930bff3499ec024edd989
| 3,457
|
py
|
Python
|
gamestonk_terminal/stocks/fundamental_analysis/market_watch_model.py
|
clairvoyant/GamestonkTerminal
|
7b40cfe61b32782e36f5de8a08d075532a08c294
|
[
"MIT"
] | 1
|
2021-09-14T14:37:29.000Z
|
2021-09-14T14:37:29.000Z
|
gamestonk_terminal/stocks/fundamental_analysis/market_watch_model.py
|
clairvoyant/GamestonkTerminal
|
7b40cfe61b32782e36f5de8a08d075532a08c294
|
[
"MIT"
] | null | null | null |
gamestonk_terminal/stocks/fundamental_analysis/market_watch_model.py
|
clairvoyant/GamestonkTerminal
|
7b40cfe61b32782e36f5de8a08d075532a08c294
|
[
"MIT"
] | null | null | null |
""" Fundamental Analysis Market Watch Model """
__docformat__ = "numpy"
import requests
import pandas as pd
from bs4 import BeautifulSoup
from gamestonk_terminal.helper_funcs import (
get_user_agent,
)
def prepare_df_financials(
ticker: str, statement: str, quarter: bool = False
) -> pd.DataFrame:
"""Builds a DataFrame with financial statements for a given company
Parameters
----------
ticker : str
Company's stock ticker
statement : str
Either income, balance or cashflow
quarter : bool, optional
Return quarterly financial statements instead of annual, by default False
Returns
-------
pd.DataFrame
A DataFrame with financial info
Raises
------
ValueError
If statement is not income, balance or cashflow
"""
financial_urls = {
"income": {
"quarter": "https://www.marketwatch.com/investing/stock/{}/financials/income/quarter",
"annual": "https://www.marketwatch.com/investing/stock/{}/financials/income",
},
"balance": {
"quarter": "https://www.marketwatch.com/investing/stock/{}/financials/balance-sheet/quarter",
"annual": "https://www.marketwatch.com/investing/stock/{}/financials/balance-sheet",
},
"cashflow": {
"quarter": "https://www.marketwatch.com/investing/stock/{}/financials/cash-flow/quarter",
"annual": "https://www.marketwatch.com/investing/stock/{}/financials/cash-flow",
},
}
if statement not in financial_urls.keys():
raise ValueError(f"type {statement} is not in {financial_urls.keys()}")
if quarter:
period = "quarter"
else:
period = "annual"
text_soup_financials = BeautifulSoup(
requests.get(
financial_urls[statement][period].format(ticker),
headers={"User-Agent": get_user_agent()},
).text,
"lxml",
)
# Define financials columns
a_financials_header = []
for financials_header in text_soup_financials.findAll(
"th", {"class": "overflow__heading"}
):
a_financials_header.append(financials_header.text.strip("\n").split("\n")[0])
s_header_end_trend = ("5-year trend", "5- qtr trend")[quarter]
if s_header_end_trend not in a_financials_header:
return pd.DataFrame()
df_financials = pd.DataFrame(
columns=a_financials_header[0 : a_financials_header.index(s_header_end_trend)]
)
find_table = text_soup_financials.findAll(
"div", {"class": "element element--table table--fixed financials"}
)
if not find_table:
return df_financials
financials_rows = find_table[0].findAll(
"tr", {"class": ["table__row is-highlighted", "table__row"]}
)
for a_row in financials_rows:
constructed_row = []
financial_columns = a_row.findAll(
"td", {"class": ["overflow__cell", "overflow__cell fixed--column"]}
)
if not financial_columns:
continue
for a_column in financial_columns:
column_to_text = a_column.text.strip()
if "\n" in column_to_text:
column_to_text = column_to_text.split("\n")[0]
if column_to_text == "":
continue
constructed_row.append(column_to_text)
df_financials.loc[len(df_financials)] = constructed_row
return df_financials
| 29.801724
| 105
| 0.628001
|
4a01a130975459e052c1b63bd0e80d15abf336f3
| 5,481
|
py
|
Python
|
example/example/settings.py
|
openedx/django-pyfs
|
3e6880dbb91aa0f60ad993f81040b9a96d3460d4
|
[
"Apache-2.0"
] | 4
|
2020-07-04T06:04:49.000Z
|
2021-11-05T00:40:11.000Z
|
example/example/settings.py
|
edx/django-pyfs
|
7b65802002515dd51e1d03efd2d87bf1a6dc07b8
|
[
"Apache-2.0"
] | 34
|
2015-10-26T14:48:09.000Z
|
2021-12-20T05:06:57.000Z
|
example/example/settings.py
|
openedx/django-pyfs
|
3e6880dbb91aa0f60ad993f81040b9a96d3460d4
|
[
"Apache-2.0"
] | 5
|
2016-01-04T18:48:45.000Z
|
2019-07-13T05:24:26.000Z
|
# Django settings for example project.
# CHANGED
DJFS = {'type': 'osfs',
'directory_root': 'sample/static/djpyfs',
'url_root': '/static/djpyfs'}
# /CHANGED
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@example.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'db.sql', # Or path to database file if using sqlite3.
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.4/ref/settings/#allowed-hosts
ALLOWED_HOSTS = []
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = '&nlg(yv5rw-t6v+i$1!5+)su!38-2@)z)$0h0qg37ygqfzly2+'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'example.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'example.wsgi.application'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'djpyfs', ## <-- CHANGED
'sample',
# Uncomment the next line to enable the admin:
# 'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
| 33.420732
| 109
| 0.696406
|
4a01a28967c23b0b0b0c369d8a6d82e4fce1d21b
| 438
|
py
|
Python
|
tests/Validation/pollTests.py
|
cesclass/projetL2S3
|
fb97f80cb7f2e43a0dd56914988ef52a59376128
|
[
"MIT"
] | null | null | null |
tests/Validation/pollTests.py
|
cesclass/projetL2S3
|
fb97f80cb7f2e43a0dd56914988ef52a59376128
|
[
"MIT"
] | null | null | null |
tests/Validation/pollTests.py
|
cesclass/projetL2S3
|
fb97f80cb7f2e43a0dd56914988ef52a59376128
|
[
"MIT"
] | null | null | null |
import networkx as nx
from networkx.algorithms import tournament
import numpy as np
import matplotlib.pyplot as plt
import pylab
G = nx.DiGraph()
# ajouter les arcs ici
print(tournament.is_tournament(G))
edge_labels=dict([((u,v,),d['weight'])
for u,v,d in G.edges(data=True)])
pos=nx.circular_layout(G)
nx.draw_networkx_edge_labels(G,pos,edge_labels=edge_labels)
nx.draw_networkx(G,pos, node_size=200)
pylab.show()
| 20.857143
| 59
| 0.742009
|
4a01a3fb333227c4638eabbf80264a7b000b7ba2
| 1,108
|
py
|
Python
|
x_rebirth_station_calculator/station_data/modules/scannar_facility.py
|
Phipsz/XRebirthStationCalculator
|
ac31c2f5816be34a7df2d7c4eb4bd5e01f7ff835
|
[
"MIT"
] | 1
|
2016-04-17T11:00:22.000Z
|
2016-04-17T11:00:22.000Z
|
x_rebirth_station_calculator/station_data/modules/scannar_facility.py
|
Phipsz/XRebirthStationCalculator
|
ac31c2f5816be34a7df2d7c4eb4bd5e01f7ff835
|
[
"MIT"
] | null | null | null |
x_rebirth_station_calculator/station_data/modules/scannar_facility.py
|
Phipsz/XRebirthStationCalculator
|
ac31c2f5816be34a7df2d7c4eb4bd5e01f7ff835
|
[
"MIT"
] | null | null | null |
from x_rebirth_station_calculator.station_data.station_base import Module
from x_rebirth_station_calculator.station_data.station_base import Production
from x_rebirth_station_calculator.station_data.station_base import Consumption
from x_rebirth_station_calculator.station_data import wares
names = {'L044': 'ScannAr Facility',
'L049': 'ScannAr-Fabrik'}
productions = {'al': [Production(wares.ScanningArray, 80)]}
consumptions = {'al': [Consumption(wares.ChemicalCompounds, 80),
Consumption(wares.CutCrystals, 640),
Consumption(wares.EnergyCells, 640),
Consumption(wares.FoodRations, 400),
Consumption(wares.Microchips, 80),
Consumption(wares.QuantumTubes, 40),
Consumption(wares.RefinedMetals, 320),
Consumption(wares.SiliconWafers, 400),
Consumption(wares.MedicalSupplies, 160, True),
Consumption(wares.Spacefuel, 120, True)]}
ScannArFacility = Module(names, productions, consumptions)
| 48.173913
| 78
| 0.66065
|
4a01a45a75f0efd3ac5a1b948cf71a67aa16ef02
| 5,882
|
py
|
Python
|
elegantrl/demo.py
|
virtualpeer/NeoFinRL
|
c581bd73a814ee37f8727021b9e2a5dbbf7fe820
|
[
"MIT"
] | 1
|
2021-09-06T05:08:55.000Z
|
2021-09-06T05:08:55.000Z
|
elegantrl/demo.py
|
virtualpeer/NeoFinRL
|
c581bd73a814ee37f8727021b9e2a5dbbf7fe820
|
[
"MIT"
] | null | null | null |
elegantrl/demo.py
|
virtualpeer/NeoFinRL
|
c581bd73a814ee37f8727021b9e2a5dbbf7fe820
|
[
"MIT"
] | null | null | null |
'''From https://github.com/AI4Finance-Foundation/ElegantRL'''
import sys
import gym # not necessary
from elegantrl.agent import *
from elegantrl.env import PreprocessEnv
from elegantrl.run import Arguments, train_and_evaluate, train_and_evaluate_mp
gym.logger.set_level(40) # Block warning
def demo_continuous_action_off_policy():
args = Arguments(if_on_policy=False)
args.agent = AgentModSAC() # AgentSAC AgentTD3 AgentDDPG
args.agent.if_use_act_target = True
args.agent.if_use_cri_target = True
args.visible_gpu = sys.argv[-1]
if_train_pendulum = 0
if if_train_pendulum:
"TotalStep: 2e5, TargetReward: -200, UsedTime: 200s"
args.env = PreprocessEnv(env=gym.make('Pendulum-v0')) # env='Pendulum-v0' is OK.
args.env.target_return = -200 # set target_reward manually for env 'Pendulum-v0'
args.reward_scale = 2 ** -2
args.gamma = 0.97
# train_and_evaluate(args)
args.env_num = 2
args.worker_num = 2
args.target_step = args.env.max_step * 4 // (args.env_num * args.worker_num)
train_and_evaluate_mp(args)
if_train_lunar_lander = 1
if if_train_lunar_lander:
"TotalStep: 4e5, TargetReward: 200, UsedTime: 900s"
args.env = PreprocessEnv(env=gym.make('LunarLanderContinuous-v2'))
args.gamma = 0.99
args.break_step = int(4e6)
# train_and_evaluate(args)
args.env_num = 2
args.worker_num = 4
args.target_step = args.env.max_step * 2 // (args.env_num * args.worker_num)
train_and_evaluate_mp(args)
if_train_bipedal_walker = 1
if if_train_bipedal_walker:
"TotalStep: 08e5, TargetReward: 300, UsedTime: 1800s TD3"
"TotalStep: 11e5, TargetReward: 329, UsedTime: 3000s TD3"
args.env = PreprocessEnv(env=gym.make('BipedalWalker-v3'))
args.gamma = 0.98
args.break_step = int(4e6)
args.max_memo = 2 ** 20
train_and_evaluate(args)
# args.env_num = 2
# args.worker_num = 4
# args.target_step = args.env.max_step * 2 // (args.env_num * args.worker_num)
# train_and_evaluate_mp(args)
def demo_continuous_action_on_policy():
args = Arguments(if_on_policy=True) # hyper-parameters of on-policy is different from off-policy
args.agent = AgentPPO()
args.agent.cri_target = True
args.visible_gpu = sys.argv[-1]
args.random_seed += 1943
if_train_pendulum = 0
if if_train_pendulum:
"TotalStep: 4e5, TargetReward: -200, UsedTime: 400s"
env = PreprocessEnv(env=gym.make('Pendulum-v0'))
env.target_return = -200
args.env_eval = env
args.env = env
args.env.env_num = 2
args.agent.cri_target = False
args.reward_scale = 2 ** -2 # RewardRange: -1800 < -200 < -50 < 0
args.gamma = 0.97
args.net_dim = 2 ** 7
args.batch_size = args.net_dim * 2
args.target_step = args.env_eval.max_step * 2
train_and_evaluate(args)
# args.worker_num = 2
# train_and_evaluate_mp(args)
if_train_lunar_lander = 0
if if_train_lunar_lander:
"TotalStep: 4e5, TargetReward: 200, UsedTime: 2000s, TD3"
args.env = PreprocessEnv(env=gym.make('LunarLanderContinuous-v2'))
args.gamma = 0.99
args.break_step = int(4e6)
# train_and_evaluate(args)
args.env_num = 2
args.worker_num = 4
args.target_step = args.env.max_step * 2 // (args.env_num * args.worker_num)
train_and_evaluate_mp(args)
if_train_bipedal_walker = 1
if if_train_bipedal_walker:
"TotalStep: 8e5, TargetReward: 300, UsedTime: 1800s"
args.env_eval = PreprocessEnv(env=gym.make('BipedalWalker-v3'))
args.env = PreprocessEnv(env=gym.make('BipedalWalker-v3'), if_print=False)
args.env.env_num = 1
args.agent.cri_target = False
args.gamma = 0.98
args.if_per_or_gae = True
args.break_step = int(8e6)
# train_and_evaluate(args)
args.env_num = 2
args.worker_num = 4
args.target_step = args.env.max_step * 16 // (args.env_num * args.worker_num)
train_and_evaluate_mp(args)
def demo_discrete_action_off_policy():
args = Arguments(if_on_policy=False)
args.agent = AgentDoubleDQN() # AgentDQN()
args.visible_gpu = '0'
if_train_cart_pole = 0
if if_train_cart_pole:
"TotalStep: 5e4, TargetReward: 200, UsedTime: 60s"
args.env = PreprocessEnv(env='CartPole-v0')
args.reward_scale = 2 ** -1
args.target_step = args.env.max_step * 8
if_train_lunar_lander = 1
if if_train_lunar_lander:
"TotalStep: 6e5, TargetReturn: 200, UsedTime: 1500s, LunarLander-v2, DQN"
args.env = PreprocessEnv(env=gym.make('LunarLander-v2'))
args.repeat_times = 2 ** 5
args.if_per_or_gae = True
train_and_evaluate(args)
def demo_discrete_action_on_policy():
args = Arguments(if_on_policy=True) # hyper-parameters of on-policy is different from off-policy
args.agent = AgentDiscretePPO()
args.visible_gpu = '0'
if_train_cart_pole = 1
if if_train_cart_pole:
"TotalStep: 5e4, TargetReward: 200, UsedTime: 60s"
args.env = PreprocessEnv(env='CartPole-v0')
args.reward_scale = 2 ** -1
args.target_step = args.env.max_step * 8
if_train_lunar_lander = 0
if if_train_lunar_lander:
"TotalStep: 6e5, TargetReturn: 200, UsedTime: 1500s, LunarLander-v2, PPO"
args.env = PreprocessEnv(env=gym.make('LunarLander-v2'))
args.repeat_times = 2 ** 5
args.if_per_or_gae = True
train_and_evaluate(args)
if __name__ == '__main__':
# demo_continuous_action_off_policy()
demo_continuous_action_on_policy()
# demo_discrete_action_off_policy()
# demo_discrete_action_on_policy()
| 34.197674
| 101
| 0.65998
|
4a01a57cca84ae1c399ea78d5546d2265b709469
| 2,069
|
py
|
Python
|
_MOM/_DBW/_SAW/_PG/Sequence.py
|
Tapyr/tapyr
|
4235fba6dce169fe747cce4d17d88dcf4a3f9f1d
|
[
"BSD-3-Clause"
] | 6
|
2016-12-10T17:51:10.000Z
|
2021-10-11T07:51:48.000Z
|
_MOM/_DBW/_SAW/_PG/Sequence.py
|
Tapyr/tapyr
|
4235fba6dce169fe747cce4d17d88dcf4a3f9f1d
|
[
"BSD-3-Clause"
] | null | null | null |
_MOM/_DBW/_SAW/_PG/Sequence.py
|
Tapyr/tapyr
|
4235fba6dce169fe747cce4d17d88dcf4a3f9f1d
|
[
"BSD-3-Clause"
] | 3
|
2020-03-29T07:37:03.000Z
|
2021-01-21T16:08:40.000Z
|
# -*- coding: utf-8 -*-
# Copyright (C) 2013 Mag. Christian Tanzer All rights reserved
# Glasauergasse 32, A--1130 Wien, Austria. tanzer@swing.co.at
# #*** <License> ************************************************************#
# This module is part of the package MOM.DBW.SAW.PG.
#
# This module is licensed under the terms of the BSD 3-Clause License
# <http://www.c-tanzer.at/license/bsd_3c.html>.
# #*** </License> ***********************************************************#
#
#++
# Name
# MOM.DBW.SAW.PG.Sequence
#
# Purpose
# Wrap a PostgreSQL sequence
#
# Revision Dates
# 24-Jun-2013 (CT) Creation
# 26-Jul-2013 (CT) Redefine `_reserve`, not `reserve`
# 28-Jul-2013 (CT) Quote `seq_name` in `SELECT setval`; fix typo
# 26-Aug-2013 (CT) Split into `Sequence`, `Sequence_PID`, `Sequence_X`
# ««revision-date»»···
#--
from _MOM import MOM
from _TFL import TFL
from _TFL.pyk import pyk
import _MOM._DBW
import _MOM._DBW._SAW._PG.DBS
import _MOM._DBW._SAW.Sequence
class _PG_Sequence_ (MOM.DBW.SAW._Sequence_S_) :
"""Wrap a PostgreSQL sequence"""
def _reserve (self, conn, value) :
result = self.__super._reserve (conn, value)
stmt = "SELECT setval('%s', %d)" % (self.seq_name, value)
conn.execute (stmt)
return result
# end def _reserve
# end class _PG_Sequence_
class PG_Sequence (_PG_Sequence_, MOM.DBW.SAW.Sequence) :
"""Wrap a PostgreSQL sequence without its own sequence table"""
_real_name = "Sequence"
Sequence = PG_Sequence # end class
class PG_Sequence_PID (_PG_Sequence_, MOM.DBW.SAW.Sequence_PID) :
"""Wrap a PostgreSQL sequence for `pid`"""
_real_name = "Sequence_PID"
Sequence_PID = PG_Sequence_PID # end class
class PG_Sequence_X (_PG_Sequence_, MOM.DBW.SAW.Sequence_X) :
"""Wrap a PostgreSQL sequence with its own sequence table"""
_real_name = "Sequence_X"
Sequence_X = PG_Sequence_X # end class
if __name__ != "__main__" :
MOM.DBW.SAW.PG._Export ("*")
### __END__ MOM.DBW.SAW.PG.Sequence
| 29.140845
| 78
| 0.62784
|
4a01a59fc202934385f5fbb686143490295ea8aa
| 554
|
py
|
Python
|
main.py
|
harsh-98/witnet_lib
|
cf224db5fe4fd0ef825a1c37f8031b07a9faddb4
|
[
"MIT"
] | 1
|
2020-09-19T09:45:22.000Z
|
2020-09-19T09:45:22.000Z
|
main.py
|
harsh-98/witnet_lib
|
cf224db5fe4fd0ef825a1c37f8031b07a9faddb4
|
[
"MIT"
] | null | null | null |
main.py
|
harsh-98/witnet_lib
|
cf224db5fe4fd0ef825a1c37f8031b07a9faddb4
|
[
"MIT"
] | null | null | null |
from witnet_lib.map_nodes import MapNodes
from witnet_lib import utils
if __name__ == "__main__":
config = utils.AttrDict({
"genesis_sec": 159555600,
"magic": 3029,
"sender_addr": "127.0.0.1:21341",
"time_per_epoch": 45,
})
mapper = MapNodes(config, ["127.0.0.1:21337"])
all_nodes, active_nodes = mapper.start_mapping_workers(3)
print(all_nodes)
with open("active.json",'w') as f:
json.dump(active_nodes, f)
with open('all_nodes.json', 'w') as f:
json.dump(list(all_nodes), f)
| 30.777778
| 61
| 0.631769
|
4a01a794dc62019897c0c75fd0c384c1dc7e037a
| 135
|
py
|
Python
|
data/PebbleCommand.py
|
JDVDev/TaskRelay
|
20213d31c90c0420e62f1e75c138ca1cb89211ee
|
[
"Apache-2.0"
] | null | null | null |
data/PebbleCommand.py
|
JDVDev/TaskRelay
|
20213d31c90c0420e62f1e75c138ca1cb89211ee
|
[
"Apache-2.0"
] | null | null | null |
data/PebbleCommand.py
|
JDVDev/TaskRelay
|
20213d31c90c0420e62f1e75c138ca1cb89211ee
|
[
"Apache-2.0"
] | null | null | null |
from enum import Enum
class PebbleCommand(Enum):
connect = "connect"
disconnect = "disconnect"
sendMessage = "sendMessage"
| 27
| 31
| 0.711111
|
4a01a7ad8137e224641801d1e4feed0f0ec80156
| 20,429
|
py
|
Python
|
histomics_detect/models/lnms_loss.py
|
Leengit/HistomicsDetect
|
ae9114c6d40af299a460417fe9470764155156a9
|
[
"Apache-2.0"
] | 2
|
2022-03-03T19:45:59.000Z
|
2022-03-11T14:05:21.000Z
|
histomics_detect/models/lnms_loss.py
|
Leengit/HistomicsDetect
|
ae9114c6d40af299a460417fe9470764155156a9
|
[
"Apache-2.0"
] | 2
|
2022-03-08T19:29:42.000Z
|
2022-03-09T19:56:49.000Z
|
histomics_detect/models/lnms_loss.py
|
Leengit/HistomicsDetect
|
ae9114c6d40af299a460417fe9470764155156a9
|
[
"Apache-2.0"
] | 1
|
2022-03-04T00:23:13.000Z
|
2022-03-04T00:23:13.000Z
|
import tensorflow as tf
import tensorflow.keras.backend as kb
from typing import List, Tuple
from histomics_detect.metrics.iou import iou, greedy_iou_mapping
def normal_loss(
loss_object: tf.keras.losses.Loss,
boxes: tf.Tensor,
rpn_boxes_positive: tf.Tensor,
scores: tf.Tensor,
positive_weight: float,
standard: List[tf.keras.metrics.Metric] = [],
weighted_loss: bool = False,
neg_pos_loss: bool = False,
use_pos_neg_loss: bool = False,
min_iou: float = 0.18,
) -> Tuple[tf.Tensor, tf.Tensor]:
"""
calculates the normal loss of a lnms output
labels are calculated based on the largest iou, the prediction that is closest to the respective
ground truth gets assigned a 1 label and the rest a 0
then a loss is applied to the objectiveness score output 'nms_output' and the labels
S: size of neighborhood
N: number of predictions
D: size of a single prediction
G: number of ground truth boxes
Parameters
----------
loss_object:
loss function for loss calculation between 'labels' and 'nms_output'
boxes: tensor (float32)
ground truth boxes
shape: G x 4
rpn_boxes_positive: tensor (float32)
predicted boxes
shape: N x 4
scores: tensor (float32)
objectiveness scores corresponding to the predicted boxes after lnms processing
shape: N x 1
positive_weight: float
weight applied to the positive labels ( == 1)
standard: metric
list of tensorflow metrics
1, 2 should be positive and negative loss respectively if 'neg_pos_loss' set to true
weighted_loss: bool
if true, loss of positive labels is weighted by the difference in numbers of positive and negative
labels
neg_pos_loss: bool
if true, the loss of the positive and the negative labels is calculated and logged in the metrics
use_pos_neg_loss: bool
returns the weighted sum of the pos and neg loss instead of the normal loss
!!! only works if neg_pos_loss is also true
min_iou: float
minimum iou such that box is considered positive prediction
Returns
-------
loss: float
loss value
indexes: tensor (float32)
indexes of the values that correspond to positive anchors
"""
labels, indexes = calculate_labels(boxes, rpn_boxes_positive, tf.shape(scores), min_iou)
# calculate negative and positive labels loss for comparing experiment
if neg_pos_loss:
(pos_loss, neg_loss), (positive_labels, negative_labels) = _pos_neg_loss_calculation(
scores, labels, loss_object, standard
)
# use negative or positive for training model
if use_pos_neg_loss:
return pos_loss * positive_weight + neg_loss, indexes
# weigh loss
if weighted_loss:
num_pos = tf.cast(tf.size(positive_labels), tf.float32)
num_neg = tf.cast(tf.size(negative_labels), tf.float32)
weighted_labels = tf.cast(labels, tf.float32) * num_neg / num_pos * positive_weight
weight = weighted_labels + (1 - labels)
loss = loss_object(weighted_labels, scores * weight)
else:
loss = loss_object(labels, scores)
return tf.reduce_sum(loss), indexes
def paper_loss(
boxes: tf.Tensor,
rpn_boxes_positive: tf.Tensor,
nms_output: tf.Tensor,
loss_object: tf.keras.losses.Loss,
positive_weight: float,
standard: List[tf.keras.metrics.Metric],
weighted_loss: bool = False,
neg_pos_loss: bool = False,
min_iou: float = 0.18,
) -> Tuple[tf.Tensor, tf.Tensor]:
"""
loss calculation of the paper "Learning Non-Max Suppression"
the loss is calculated with:
- the labels vector l with 1s for positive labels and -1 for negative labels
- the score output of the network n with values btw -1 and 1
- calculation: positive_label_weight * log(1 + exp(-l * n))
S: size of neighborhood
N: number of predictions
D: size of a single prediction
G: number of ground truth boxes
Parameters
----------
loss_object:
loss function for loss calculation between 'labels' and 'nms_output'
boxes: tensor (float32)
ground truth boxes
shape: G x 4
rpn_boxes_positive: tensor (float32)
predicted boxes
shape: N x 4
nms_output: tensor (float32)
objectiveness scores corresponding to the predicted boxes after lnms processing
shape: N x 1
positive_weight: float
weight applied to the positive labels ( == 1)
standard: metric
list of tensorflow metrics
1, 2 should be positive and negative loss respectively if 'neg_pos_loss' set to true
weighted_loss: bool
if true, loss of positive labels is weighted by the difference in numbers of positive and negative
labels
neg_pos_loss: bool
if true, the loss of the positive and the negative labels is calculated and logged in the metrixes
min_iou: float
minimum iou such that box is considered positive prediction
Returns
-------
loss: float
loss value
indexes: tensor (float32)
indexes of the values that correspond to positive anchors
"""
labels, indexes = calculate_labels(boxes, rpn_boxes_positive, tf.shape(nms_output), min_iou)
# calculate pos and neg loss
if weighted_loss or neg_pos_loss:
_, (positive_labels, negative_labels) = _pos_neg_loss_calculation(
nms_output, labels, loss_object, standard
)
if weighted_loss:
num_pos = tf.cast(tf.size(positive_labels), tf.float32)
num_neg = tf.cast(tf.size(negative_labels), tf.float32)
weight = labels * num_neg / (num_pos + 1e-8) * positive_weight + (1 - labels)
else:
weight = tf.ones(tf.shape(nms_output))
# reformat labels and output from 0, 1 space to -1, 1 space
labels = 2 * labels - 1
nms_output = (2 * nms_output) - 1
# calculate loss
loss = weight * kb.log(1 + kb.exp(-labels * nms_output))
loss = tf.reduce_sum(loss)
return loss, indexes
def calculate_labels(boxes, rpn_boxes_positive, output_shape, min_iou: float = 0.18):
"""
calculate the labels for the predictions
each ground truth has one positive predictions (label = 1) and the other predictions are
negative (label = 0)
S: size of neighborhood
N: number of predictions
D: size of a single prediction
G: number of ground truth boxes
Parameters
----------
boxes: tensor (float32)
ground truth boxes
shape: G x 4
rpn_boxes_positive: tensor (float32)
predicted boxes
shape: N x 4
output_shape: tensor (int32)
shape of the label output
min_iou: float
minimum iou such that box is considered positive prediction
Returns
-------
labels: tensor (int32)
tensor with one entry per prediction
1 -> if prediction is corresponding to a ground truth
0 -> if prediction is not corresponding to a ground truth
indexes: tensor (int32)
indexes of the predictions that are positive
"""
ious = iou(rpn_boxes_positive, boxes)
tp, fp, fn, tp_list, fp_list, fn_list = greedy_iou_mapping(ious, min_iou)
indexes = tf.reshape(tp_list[:, 0], (-1, 1))
labels = tf.scatter_nd(indexes, tf.ones(tf.shape(indexes)), output_shape)
# ious, _ = iou(boxes, rpn_boxes_positive)
# function that finds prediction with highest overlap with ground truth
# def assignment_func(i) -> tf.int32:
# index = tf.cast(i, tf.int32)
# assignment = tf.cast(tf.argmax(ious[index]), tf.int32)
# return assignment
#
# indexes = tf.map_fn(lambda x: assignment_func(x), tf.range(0, tf.shape(ious)[0]))
# indexes = tf.expand_dims(indexes, axis=1)
# labels = tf.scatter_nd(indexes, tf.ones(tf.shape(indexes)), output_shape)
return labels, indexes
def _pos_neg_loss_calculation(
nms_output: tf.Tensor,
labels: tf.Tensor,
loss_object: tf.keras.losses.Loss,
standard: List[tf.keras.metrics.Metric],
) -> Tuple[Tuple[tf.Tensor, tf.Tensor], Tuple[tf.Tensor, tf.Tensor]]:
"""
S: size of neighborhood
N: number of predictions
D: size of a single prediction
G: number of ground truth boxes
Parameters
----------
nms_output: tensor (float32)
objectiveness scores corresponding to the predicted boxes after lnms processing
shape: N x 1
labels: tensor (int32)
ground truth labels of corresponding s
shape: N x 1
loss_object:
loss function for loss calculation between 'labels' and 'nms_output'
standard: metric
list of tensorflow metrics
1, 2 should be positive and negative loss respectively if 'neg_pos_loss' set to true
Returns
-------
pos_loss: tensor (float32)
scalar value
neg_loss: tensor (float32)
scalar value
positive_labels: tensor (int32)
ones for the number of positive ground truth samples
negative_labels:
zeros for the number of positive ground truth samples
"""
positive_predictions, negative_predictions = tf.dynamic_partition(
nms_output, tf.cast(labels == 0, tf.int32), 2
)
positive_labels = tf.ones(tf.shape(positive_predictions))
negative_labels = tf.zeros(tf.shape(negative_predictions))
# calculate loss
pos_loss = tf.reduce_sum(loss_object(positive_predictions, positive_labels))
neg_loss = tf.reduce_sum(loss_object(negative_predictions, negative_labels))
def zero_func():
return 0.0
pos_loss = tf.cond(tf.size(positive_labels) > 0, lambda: pos_loss, zero_func)
neg_loss = tf.cond(tf.size(negative_labels) > 0, lambda: neg_loss, zero_func)
# update metrics
standard[1].update_state(pos_loss + 1e-8)
standard[2].update_state(neg_loss + 1e-8)
return (pos_loss, neg_loss), (positive_labels, negative_labels)
def cluster_labels_indexes(scores, cluster_assignment) -> Tuple[tf.Tensor, tf.Tensor]:
"""
calculate the labels for the predictions based on clusters and scores
the 'cluster_assignment' relates predictions to a cluster for each ground truth
for each cluster the prediction with the highest score is assigned a positive label (label = 1)
the rest is assigned a negative label (label = 0)
N: number of predictions
Parameters
----------
scores: tensor (float32)
objectiveness scores corresponding to the predicted boxes after lnms processing
shape: N x 1
cluster_assignment: tensor (int32)
cluster labels for each prediction
shape: N x 1
Returns
-------
"""
cluster_assignment = tf.expand_dims(cluster_assignment, axis=1)
# find prediction index with highest objectiveness in cluster
def max_cluster_index_func(i) -> tf.int32:
index = tf.cast(i, tf.int32)
max_index = tf.argmax(
tf.multiply(
tf.cast(scores, tf.float32),
tf.cast(tf.equal(cluster_assignment, tf.cast(index, tf.int32)), tf.float32),
)
)
return tf.cast(max_index, tf.int32)
indexes = tf.map_fn(
lambda x: max_cluster_index_func(x), tf.range(0, tf.reduce_max(cluster_assignment) + 1)
)
labels = tf.scatter_nd(indexes, tf.ones(tf.shape(indexes)), tf.shape(scores))
return labels, indexes
def clustering_loss(
nms_output: tf.Tensor,
cluster_assignment: tf.Tensor,
loss_object: tf.keras.losses.Loss,
positive_weight: float,
standard: List[tf.keras.metrics.Metric],
boxes: tf.Tensor,
rpn_positive: tf.Tensor,
weighted_loss: bool = False,
neg_pos_loss: bool = False,
add_regression_param: int = 0,
) -> Tuple[tf.Tensor, tf.Tensor]:
"""
clustering loss calculation
the loss is calculated by:
- for each cluster the prediction with the highest objectiveness score is stored
- the index of the stored predictions is set to one in a labels vector
- the values of the other indexes are 0
- the loss is calculated by calculating the difference btw. the labels and the nms_output
S: size of neighborhood
N: number of predictions
D: size of a single prediction
G: number of ground truth boxes
Parameters
----------
loss_object:
loss function for loss calculation between 'labels' and 'nms_output'
nms_output: tensor (float32)
objectiveness scores corresponding to the predicted boxes after lnms processing
shape: N x 1
cluster_assignment: tensor (int32)
cluster labels for each prediction
shape: N x 1
positive_weight: float
weight applied to the positive labels ( == 1)
standard: metric
list of tensorflow metrics
1, 2 should be positive and negative loss respectively if 'neg_pos_loss' set to true
boxes: tensor (int32)
ground truth boxes
rpn_positive: tensor (float32)
predicted boxes
weighted_loss: bool
if true, loss of positive labels is weighted by the difference in numbers of positive and negative
labels
neg_pos_loss: bool
if true, the loss of the positive and the negative labels is calculated and logged in the metrics
add_regression_param: int
0 -> lnms only predicts a single obj. score
1 -> lnms also regresses the center of the boxes
2 -> lnms regresses the full boxes
# TODO add weighting for regression vs score loss
Returns
-------
loss: float
loss value
indexes: tensor (float32)
indexes of the values that correspond to positive anchors
"""
scores = tf.expand_dims(nms_output[:, 0], axis=1)
labels, indeces = cluster_labels_indexes(scores, cluster_assignment)
# calculate pos and neg loss
if neg_pos_loss:
_pos_neg_loss_calculation(scores, labels, loss_object, standard)
if weighted_loss:
weight = labels * positive_weight + (1 - labels)
else:
weight = tf.ones(tf.shape(scores))
if add_regression_param > 0:
reg = nms_output[:, 1 : add_regression_param * 2 + 1]
def pos_prediction_dist_func(i) -> tf.float32:
index = tf.cast(i, tf.int32)
cluster_scores = tf.multiply(
tf.cast(scores, tf.float32),
tf.cast(tf.equal(cluster_assignment, tf.cast(index, tf.int32)), tf.float32),
)
max_index = tf.cast(tf.argmax(cluster_scores), tf.int32)[0]
dist = tf.math.sigmoid(
(
boxes[index, : add_regression_param * 2]
- rpn_positive[max_index, : add_regression_param * 2]
)
/ 100
)
return tf.cast(dist, tf.float32)
distances = tf.map_fn(
lambda x: pos_prediction_dist_func(x),
tf.cast(tf.range(0, tf.reduce_max(cluster_assignment) + 1), tf.float32),
)
distance_vector = tf.scatter_nd(indeces, distances, tf.shape(reg))
loss_score = loss_object(weight * labels, weight * scores)
loss_reg = loss_object(distance_vector, labels * reg)
return tf.reduce_sum(loss_score + loss_reg), labels
else:
loss = loss_object(weight * labels, weight * scores)
return tf.reduce_sum(loss), labels
def normal_clustering_loss(
nms_output: tf.Tensor,
boxes: tf.Tensor,
rpn_boxes_positive: tf.Tensor,
cluster_assignment: tf.Tensor,
loss_object: tf.keras.losses.Loss,
positive_weight: float,
standard: List[tf.keras.metrics.Metric],
weighted_loss: bool = False,
neg_pos_loss: bool = False,
use_pos_neg_loss: bool = False,
norm_loss_weight: float = 1,
add_regression_param: int = 0,
min_iou: float = 0.18,
) -> Tuple[float, tf.Tensor]:
"""
a combination between the normal and clustering loss
loss = 'norm_loss_weight' * normal_loss + clustering_loss
Parameters
----------
nms_output: tensor (float32)
objectiveness scores corresponding to the predicted boxes after lnms processing
shape: N x 1
boxes: tensor (float32)
ground truth boxes
shape: G x 4
rpn_boxes_positive: tensor (float32)
predicted boxes
shape: N x 4
cluster_assignment: tensor (int32)
cluster labels for each prediction
shape: N x 1
loss_object:
loss function for loss calculation between 'labels' and 'nms_output'
positive_weight: float
weight applied to the positive labels ( == 1)
standard: metric
list of tensorflow metrics
1, 2 should be positive and negative loss respectively if 'neg_pos_loss' set to true
weighted_loss: bool
if true, loss of positive labels is weighted by the difference in numbers of positive and negative
labels
neg_pos_loss: bool
if true, the loss of the positive and the negative labels is calculated and logged in the metrics
use_pos_neg_loss: bool
returns the weighted sum of the pos and neg loss instead of the normal loss
!!! only works if neg_pos_loss is also true
norm_loss_weight: float
weight of the normal loss
add_regression_param: int
0 -> lnms only predicts a single obj. score
1 -> lnms also regresses the center of the boxes
2 -> lnms regresses the full boxes
min_iou: float
minimum iou such that box is considered positive prediction
# TODO add weighting for regression vs score loss
Returns
-------
loss: float
the combined loss
indexes: tensor (float32)
indexes of the values that correspond to positive anchors
"""
scores = tf.expand_dims(nms_output[:, 0], axis=1)
norm_loss, indexes = normal_loss(
loss_object,
boxes,
rpn_boxes_positive,
scores,
positive_weight,
standard,
weighted_loss,
neg_pos_loss,
use_pos_neg_loss,
min_iou,
)
clust_loss, _ = clustering_loss(
nms_output,
cluster_assignment,
loss_object,
positive_weight,
standard,
boxes,
rpn_boxes_positive,
weighted_loss,
neg_pos_loss,
add_regression_param,
)
loss = norm_loss_weight * norm_loss + clust_loss
return loss, indexes
def xor_loss(nms_output: tf.Tensor, cluster_assignment: tf.Tensor):
"""
xor loss
the loss is minimal if only one score of each cluster is one and the others are zero
calculation for each cluster:
- calculate cluster sum
- subtract one and square result
calculate for each prediction
- subtract 1/2 from the score
- square the result
- subtract from previous result
sum over all prediction losses
Parameters
----------
nms_output: tensor (float32)
output scores for each prediction
cluster_assignment: tensor (int32)
assignment of each prediction to the corresponding cluster
Returns
-------
loss: float
calculated loss
"""
# TODO find error cause
# TODO add optional neg pos loss calculation
def cluster_sum(i) -> tf.float32:
pred_indexes = tf.where(tf.equal(tf.cast(cluster_assignment, tf.float32), tf.cast(i, tf.float32)))
predictions = tf.gather_nd(nms_output, pred_indexes)
sum_req = (tf.reduce_sum(predictions) - 1) ** 2
indexes = tf.cast(pred_indexes, tf.int64)
update_shape = tf.cast(tf.shape(cluster_assignment), tf.int64)
def false_fn():
return tf.scatter_nd(indexes, tf.ones(tf.shape(indexes)[0]) * sum_req, update_shape)
scattered_sum = tf.cond(tf.size(indexes) == 0, lambda: tf.zeros(update_shape), false_fn)
return tf.squeeze(scattered_sum)
number_clusters = tf.reduce_max(cluster_assignment) + 1
number_predictions = tf.shape(cluster_assignment)[0]
output_signature = tf.TensorSpec.from_tensor(tf.ones(number_predictions, dtype=tf.float32))
cluster_sums = tf.map_fn(lambda x: cluster_sum(x), tf.range(0, number_clusters), dtype=output_signature)
cluster_sums = tf.expand_dims(tf.reduce_sum(cluster_sums, axis=0), axis=1)
loss = tf.reduce_sum((cluster_sums - 1) ** 2 - (nms_output - 0.5) ** 2, axis=0)
return loss, None
| 33.600329
| 108
| 0.665084
|
4a01a805e1b76bf94f65e4dd91fd3940a90b18fa
| 2,054
|
py
|
Python
|
tetrahedron/vertex_effect.py
|
mayhem/led-tetrahedron
|
bed314d0db3c4fe355cd047434b04108e19265cf
|
[
"BSD-2-Clause"
] | null | null | null |
tetrahedron/vertex_effect.py
|
mayhem/led-tetrahedron
|
bed314d0db3c4fe355cd047434b04108e19265cf
|
[
"BSD-2-Clause"
] | null | null | null |
tetrahedron/vertex_effect.py
|
mayhem/led-tetrahedron
|
bed314d0db3c4fe355cd047434b04108e19265cf
|
[
"BSD-2-Clause"
] | null | null | null |
import math
from random import random, randint, seed
from math import fmod, sin, pi
from time import sleep, time
from colorsys import hsv_to_rgb, rgb_to_hsv, rgb_to_hsv
import undulating_effect
import gradient
import palette
import effect
"""
Vertexes:
0 - top
1 - tree 0
2 - tree 1 (tree + 1 clockwise)
3 - tree 2 (tree + 2 clockwise)
Segments:
3 - top -> tree 0
1 - top -> tree 1
0 - top -> tree 2
4 - tree 0 -> tree 1
5 - tree 1 -> tree 2
2 - tree 2 -> tree 0
"""
class VertexEffect(effect.Effect):
def __init__(self, led_art, name):
effect.Effect.__init__(self, led_art, name)
self.palettes = []
self.point_distance = .25
self.hue = random()
self.hue_increment = .005
def setup(self, num_leds):
self.num_leds = num_leds
def set_color(self, color):
pass
def make_palettes(self, hues):
''' pass in 4 hues in vertex order '''
return [ # segment
[ (0.0, hues[0]), (1.0, hues[3]) ], # 0
[ (0.0, hues[0]), (1.0, hues[2]) ], # 1
[ (0.0, hues[1]), (1.0, hues[3]) ], # 2
[ (0.0, hues[0]), (1.0, hues[1]) ], # 3
[ (0.0, hues[1]), (1.0, hues[2]) ], # 4
[ (0.0, hues[2]), (1.0, hues[3]) ], # 5
]
def create_analogous_palette(self, hue):
s = random() / 2.0
return (palette.make_hsv(hue),
palette.make_hsv(fmod(hue - s + 1.0, 1.0)),
palette.make_hsv(fmod(hue - (s * 2) + 1.0, 1.0)),
palette.make_hsv(fmod(hue + s, 1.0)))
def loop(self):
hues = self.create_analogous_palette(self.hue)
palettes = self.make_palettes(hues)
for i, pal in enumerate(palettes):
strip = 1 << i
try:
g = gradient.Gradient(self.num_leds, pal)
g.render(self.led_art, 1 << i)
except ValueError as err:
pass
self.led_art.show()
self.hue += self.hue_increment
sleep(5)
| 25.04878
| 65
| 0.525803
|
4a01a842e58a26263176fd45f848183212152cef
| 1,082
|
py
|
Python
|
raspberry-pi/ble.py
|
ciffelia/airpapyrus
|
4e6642025d6b1e81210c63f3cae46e4e361804ea
|
[
"MIT"
] | null | null | null |
raspberry-pi/ble.py
|
ciffelia/airpapyrus
|
4e6642025d6b1e81210c63f3cae46e4e361804ea
|
[
"MIT"
] | null | null | null |
raspberry-pi/ble.py
|
ciffelia/airpapyrus
|
4e6642025d6b1e81210c63f3cae46e4e361804ea
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import struct
from collections import namedtuple
from bluepy.btle import Scanner
# Test company ID
myCompanyId = "ffff"
AdvertisePayload = struct.Struct("<fffHH")
MeasurementValue = namedtuple(
"MeasurementValue", "temperature humidity pressure co2 tvoc"
)
def scan(timeout):
scanner = Scanner()
devices = scanner.scan(timeout)
for device in devices:
# Ad Type 0x09: Complete Local Name
deviceName = device.getValueText(0x09)
if deviceName != "airpapyrus":
continue
# Ad Type 0xFF: Manufacturer Specific Data
adData = device.getValueText(0xFF)
if adData is None:
continue
companyId = adData[0:4]
if companyId != myCompanyId:
continue
return parseAirpapyrusAdvertise(adData)
return None, None
def parseAirpapyrusAdvertise(advertise):
seq = advertise[4:6]
payload = bytes.fromhex(advertise[6:])
measurementValue = MeasurementValue._make(AdvertisePayload.unpack(payload))
return seq, measurementValue
| 22.081633
| 79
| 0.672828
|
4a01a8f45ac4b8a50f423ec2b59318ce538b64b9
| 102,220
|
py
|
Python
|
setup.py
|
askervin/cpython
|
001fee14e0f2ba5f41fb733adc69d5965925a094
|
[
"CNRI-Python-GPL-Compatible"
] | 2
|
2019-06-14T19:02:40.000Z
|
2020-04-19T08:20:44.000Z
|
setup.py
|
askervin/cpython
|
001fee14e0f2ba5f41fb733adc69d5965925a094
|
[
"CNRI-Python-GPL-Compatible"
] | null | null | null |
setup.py
|
askervin/cpython
|
001fee14e0f2ba5f41fb733adc69d5965925a094
|
[
"CNRI-Python-GPL-Compatible"
] | 1
|
2020-12-09T03:51:45.000Z
|
2020-12-09T03:51:45.000Z
|
# Autodetecting setup.py script for building the Python extensions
#
import sys, os, importlib.machinery, re, argparse
from glob import glob
import importlib._bootstrap
import importlib.util
import sysconfig
from distutils import log
from distutils.errors import *
from distutils.core import Extension, setup
from distutils.command.build_ext import build_ext
from distutils.command.install import install
from distutils.command.install_lib import install_lib
from distutils.command.build_scripts import build_scripts
from distutils.spawn import find_executable
cross_compiling = "_PYTHON_HOST_PLATFORM" in os.environ
# Set common compiler and linker flags derived from the Makefile,
# reserved for building the interpreter and the stdlib modules.
# See bpo-21121 and bpo-35257
def set_compiler_flags(compiler_flags, compiler_py_flags_nodist):
flags = sysconfig.get_config_var(compiler_flags)
py_flags_nodist = sysconfig.get_config_var(compiler_py_flags_nodist)
sysconfig.get_config_vars()[compiler_flags] = flags + ' ' + py_flags_nodist
set_compiler_flags('CFLAGS', 'PY_CFLAGS_NODIST')
set_compiler_flags('LDFLAGS', 'PY_LDFLAGS_NODIST')
class Dummy:
"""Hack for parallel build"""
ProcessPoolExecutor = None
sys.modules['concurrent.futures.process'] = Dummy
def get_platform():
# cross build
if "_PYTHON_HOST_PLATFORM" in os.environ:
return os.environ["_PYTHON_HOST_PLATFORM"]
# Get value of sys.platform
if sys.platform.startswith('osf1'):
return 'osf1'
return sys.platform
host_platform = get_platform()
# Were we compiled --with-pydebug or with #define Py_DEBUG?
COMPILED_WITH_PYDEBUG = ('--with-pydebug' in sysconfig.get_config_var("CONFIG_ARGS"))
# This global variable is used to hold the list of modules to be disabled.
disabled_module_list = []
def add_dir_to_list(dirlist, dir):
"""Add the directory 'dir' to the list 'dirlist' (after any relative
directories) if:
1) 'dir' is not already in 'dirlist'
2) 'dir' actually exists, and is a directory.
"""
if dir is None or not os.path.isdir(dir) or dir in dirlist:
return
for i, path in enumerate(dirlist):
if not os.path.isabs(path):
dirlist.insert(i + 1, dir)
return
dirlist.insert(0, dir)
def sysroot_paths(make_vars, subdirs):
"""Get the paths of sysroot sub-directories.
* make_vars: a sequence of names of variables of the Makefile where
sysroot may be set.
* subdirs: a sequence of names of subdirectories used as the location for
headers or libraries.
"""
dirs = []
for var_name in make_vars:
var = sysconfig.get_config_var(var_name)
if var is not None:
m = re.search(r'--sysroot=([^"]\S*|"[^"]+")', var)
if m is not None:
sysroot = m.group(1).strip('"')
for subdir in subdirs:
if os.path.isabs(subdir):
subdir = subdir[1:]
path = os.path.join(sysroot, subdir)
if os.path.isdir(path):
dirs.append(path)
break
return dirs
def macosx_sdk_root():
"""
Return the directory of the current OSX SDK,
or '/' if no SDK was specified.
"""
cflags = sysconfig.get_config_var('CFLAGS')
m = re.search(r'-isysroot\s+(\S+)', cflags)
if m is None:
sysroot = '/'
else:
sysroot = m.group(1)
return sysroot
def is_macosx_sdk_path(path):
"""
Returns True if 'path' can be located in an OSX SDK
"""
return ( (path.startswith('/usr/') and not path.startswith('/usr/local'))
or path.startswith('/System/')
or path.startswith('/Library/') )
def find_file(filename, std_dirs, paths):
"""Searches for the directory where a given file is located,
and returns a possibly-empty list of additional directories, or None
if the file couldn't be found at all.
'filename' is the name of a file, such as readline.h or libcrypto.a.
'std_dirs' is the list of standard system directories; if the
file is found in one of them, no additional directives are needed.
'paths' is a list of additional locations to check; if the file is
found in one of them, the resulting list will contain the directory.
"""
if host_platform == 'darwin':
# Honor the MacOSX SDK setting when one was specified.
# An SDK is a directory with the same structure as a real
# system, but with only header files and libraries.
sysroot = macosx_sdk_root()
# Check the standard locations
for dir in std_dirs:
f = os.path.join(dir, filename)
if host_platform == 'darwin' and is_macosx_sdk_path(dir):
f = os.path.join(sysroot, dir[1:], filename)
if os.path.exists(f): return []
# Check the additional directories
for dir in paths:
f = os.path.join(dir, filename)
if host_platform == 'darwin' and is_macosx_sdk_path(dir):
f = os.path.join(sysroot, dir[1:], filename)
if os.path.exists(f):
return [dir]
# Not found anywhere
return None
def find_library_file(compiler, libname, std_dirs, paths):
result = compiler.find_library_file(std_dirs + paths, libname)
if result is None:
return None
if host_platform == 'darwin':
sysroot = macosx_sdk_root()
# Check whether the found file is in one of the standard directories
dirname = os.path.dirname(result)
for p in std_dirs:
# Ensure path doesn't end with path separator
p = p.rstrip(os.sep)
if host_platform == 'darwin' and is_macosx_sdk_path(p):
# Note that, as of Xcode 7, Apple SDKs may contain textual stub
# libraries with .tbd extensions rather than the normal .dylib
# shared libraries installed in /. The Apple compiler tool
# chain handles this transparently but it can cause problems
# for programs that are being built with an SDK and searching
# for specific libraries. Distutils find_library_file() now
# knows to also search for and return .tbd files. But callers
# of find_library_file need to keep in mind that the base filename
# of the returned SDK library file might have a different extension
# from that of the library file installed on the running system,
# for example:
# /Applications/Xcode.app/Contents/Developer/Platforms/
# MacOSX.platform/Developer/SDKs/MacOSX10.11.sdk/
# usr/lib/libedit.tbd
# vs
# /usr/lib/libedit.dylib
if os.path.join(sysroot, p[1:]) == dirname:
return [ ]
if p == dirname:
return [ ]
# Otherwise, it must have been in one of the additional directories,
# so we have to figure out which one.
for p in paths:
# Ensure path doesn't end with path separator
p = p.rstrip(os.sep)
if host_platform == 'darwin' and is_macosx_sdk_path(p):
if os.path.join(sysroot, p[1:]) == dirname:
return [ p ]
if p == dirname:
return [p]
else:
assert False, "Internal error: Path not found in std_dirs or paths"
def module_enabled(extlist, modname):
"""Returns whether the module 'modname' is present in the list
of extensions 'extlist'."""
extlist = [ext for ext in extlist if ext.name == modname]
return len(extlist)
def find_module_file(module, dirlist):
"""Find a module in a set of possible folders. If it is not found
return the unadorned filename"""
list = find_file(module, [], dirlist)
if not list:
return module
if len(list) > 1:
log.info("WARNING: multiple copies of %s found", module)
return os.path.join(list[0], module)
class PyBuildExt(build_ext):
def __init__(self, dist):
build_ext.__init__(self, dist)
self.failed = []
self.failed_on_import = []
if '-j' in os.environ.get('MAKEFLAGS', ''):
self.parallel = True
def build_extensions(self):
# Detect which modules should be compiled
missing = self.detect_modules()
# Remove modules that are present on the disabled list
extensions = [ext for ext in self.extensions
if ext.name not in disabled_module_list]
# move ctypes to the end, it depends on other modules
ext_map = dict((ext.name, i) for i, ext in enumerate(extensions))
if "_ctypes" in ext_map:
ctypes = extensions.pop(ext_map["_ctypes"])
extensions.append(ctypes)
self.extensions = extensions
# Fix up the autodetected modules, prefixing all the source files
# with Modules/.
srcdir = sysconfig.get_config_var('srcdir')
if not srcdir:
# Maybe running on Windows but not using CYGWIN?
raise ValueError("No source directory; cannot proceed.")
srcdir = os.path.abspath(srcdir)
moddirlist = [os.path.join(srcdir, 'Modules')]
# Fix up the paths for scripts, too
self.distribution.scripts = [os.path.join(srcdir, filename)
for filename in self.distribution.scripts]
# Python header files
headers = [sysconfig.get_config_h_filename()]
headers += glob(os.path.join(sysconfig.get_path('include'), "*.h"))
# The sysconfig variables built by makesetup that list the already
# built modules and the disabled modules as configured by the Setup
# files.
sysconf_built = sysconfig.get_config_var('MODBUILT_NAMES').split()
sysconf_dis = sysconfig.get_config_var('MODDISABLED_NAMES').split()
mods_built = []
mods_disabled = []
for ext in self.extensions:
ext.sources = [ find_module_file(filename, moddirlist)
for filename in ext.sources ]
if ext.depends is not None:
ext.depends = [find_module_file(filename, moddirlist)
for filename in ext.depends]
else:
ext.depends = []
# re-compile extensions if a header file has been changed
ext.depends.extend(headers)
# If a module has already been built or has been disabled in the
# Setup files, don't build it here.
if ext.name in sysconf_built:
mods_built.append(ext)
if ext.name in sysconf_dis:
mods_disabled.append(ext)
mods_configured = mods_built + mods_disabled
if mods_configured:
self.extensions = [x for x in self.extensions if x not in
mods_configured]
# Remove the shared libraries built by a previous build.
for ext in mods_configured:
fullpath = self.get_ext_fullpath(ext.name)
if os.path.exists(fullpath):
os.unlink(fullpath)
# When you run "make CC=altcc" or something similar, you really want
# those environment variables passed into the setup.py phase. Here's
# a small set of useful ones.
compiler = os.environ.get('CC')
args = {}
# unfortunately, distutils doesn't let us provide separate C and C++
# compilers
if compiler is not None:
(ccshared,cflags) = sysconfig.get_config_vars('CCSHARED','CFLAGS')
args['compiler_so'] = compiler + ' ' + ccshared + ' ' + cflags
self.compiler.set_executables(**args)
build_ext.build_extensions(self)
for ext in self.extensions:
self.check_extension_import(ext)
longest = max([len(e.name) for e in self.extensions], default=0)
if self.failed or self.failed_on_import:
all_failed = self.failed + self.failed_on_import
longest = max(longest, max([len(name) for name in all_failed]))
def print_three_column(lst):
lst.sort(key=str.lower)
# guarantee zip() doesn't drop anything
while len(lst) % 3:
lst.append("")
for e, f, g in zip(lst[::3], lst[1::3], lst[2::3]):
print("%-*s %-*s %-*s" % (longest, e, longest, f,
longest, g))
if missing:
print()
print("Python build finished successfully!")
print("The necessary bits to build these optional modules were not "
"found:")
print_three_column(missing)
print("To find the necessary bits, look in setup.py in"
" detect_modules() for the module's name.")
print()
if mods_built:
print()
print("The following modules found by detect_modules() in"
" setup.py, have been")
print("built by the Makefile instead, as configured by the"
" Setup files:")
print_three_column([ext.name for ext in mods_built])
print()
if mods_disabled:
print()
print("The following modules found by detect_modules() in"
" setup.py have not")
print("been built, they are *disabled* in the Setup files:")
print_three_column([ext.name for ext in mods_disabled])
print()
if self.failed:
failed = self.failed[:]
print()
print("Failed to build these modules:")
print_three_column(failed)
print()
if self.failed_on_import:
failed = self.failed_on_import[:]
print()
print("Following modules built successfully"
" but were removed because they could not be imported:")
print_three_column(failed)
print()
if any('_ssl' in l
for l in (missing, self.failed, self.failed_on_import)):
print()
print("Could not build the ssl module!")
print("Python requires an OpenSSL 1.0.2 or 1.1 compatible "
"libssl with X509_VERIFY_PARAM_set1_host().")
print("LibreSSL 2.6.4 and earlier do not provide the necessary "
"APIs, https://github.com/libressl-portable/portable/issues/381")
print()
def build_extension(self, ext):
if ext.name == '_ctypes':
if not self.configure_ctypes(ext):
self.failed.append(ext.name)
return
try:
build_ext.build_extension(self, ext)
except (CCompilerError, DistutilsError) as why:
self.announce('WARNING: building of extension "%s" failed: %s' %
(ext.name, sys.exc_info()[1]))
self.failed.append(ext.name)
return
def check_extension_import(self, ext):
# Don't try to import an extension that has failed to compile
if ext.name in self.failed:
self.announce(
'WARNING: skipping import check for failed build "%s"' %
ext.name, level=1)
return
# Workaround for Mac OS X: The Carbon-based modules cannot be
# reliably imported into a command-line Python
if 'Carbon' in ext.extra_link_args:
self.announce(
'WARNING: skipping import check for Carbon-based "%s"' %
ext.name)
return
if host_platform == 'darwin' and (
sys.maxsize > 2**32 and '-arch' in ext.extra_link_args):
# Don't bother doing an import check when an extension was
# build with an explicit '-arch' flag on OSX. That's currently
# only used to build 32-bit only extensions in a 4-way
# universal build and loading 32-bit code into a 64-bit
# process will fail.
self.announce(
'WARNING: skipping import check for "%s"' %
ext.name)
return
# Workaround for Cygwin: Cygwin currently has fork issues when many
# modules have been imported
if host_platform == 'cygwin':
self.announce('WARNING: skipping import check for Cygwin-based "%s"'
% ext.name)
return
ext_filename = os.path.join(
self.build_lib,
self.get_ext_filename(self.get_ext_fullname(ext.name)))
# If the build directory didn't exist when setup.py was
# started, sys.path_importer_cache has a negative result
# cached. Clear that cache before trying to import.
sys.path_importer_cache.clear()
# Don't try to load extensions for cross builds
if cross_compiling:
return
loader = importlib.machinery.ExtensionFileLoader(ext.name, ext_filename)
spec = importlib.util.spec_from_file_location(ext.name, ext_filename,
loader=loader)
try:
importlib._bootstrap._load(spec)
except ImportError as why:
self.failed_on_import.append(ext.name)
self.announce('*** WARNING: renaming "%s" since importing it'
' failed: %s' % (ext.name, why), level=3)
assert not self.inplace
basename, tail = os.path.splitext(ext_filename)
newname = basename + "_failed" + tail
if os.path.exists(newname):
os.remove(newname)
os.rename(ext_filename, newname)
except:
exc_type, why, tb = sys.exc_info()
self.announce('*** WARNING: importing extension "%s" '
'failed with %s: %s' % (ext.name, exc_type, why),
level=3)
self.failed.append(ext.name)
def add_multiarch_paths(self):
# Debian/Ubuntu multiarch support.
# https://wiki.ubuntu.com/MultiarchSpec
cc = sysconfig.get_config_var('CC')
tmpfile = os.path.join(self.build_temp, 'multiarch')
if not os.path.exists(self.build_temp):
os.makedirs(self.build_temp)
ret = os.system(
'%s -print-multiarch > %s 2> /dev/null' % (cc, tmpfile))
multiarch_path_component = ''
try:
if ret >> 8 == 0:
with open(tmpfile) as fp:
multiarch_path_component = fp.readline().strip()
finally:
os.unlink(tmpfile)
if multiarch_path_component != '':
add_dir_to_list(self.compiler.library_dirs,
'/usr/lib/' + multiarch_path_component)
add_dir_to_list(self.compiler.include_dirs,
'/usr/include/' + multiarch_path_component)
return
if not find_executable('dpkg-architecture'):
return
opt = ''
if cross_compiling:
opt = '-t' + sysconfig.get_config_var('HOST_GNU_TYPE')
tmpfile = os.path.join(self.build_temp, 'multiarch')
if not os.path.exists(self.build_temp):
os.makedirs(self.build_temp)
ret = os.system(
'dpkg-architecture %s -qDEB_HOST_MULTIARCH > %s 2> /dev/null' %
(opt, tmpfile))
try:
if ret >> 8 == 0:
with open(tmpfile) as fp:
multiarch_path_component = fp.readline().strip()
add_dir_to_list(self.compiler.library_dirs,
'/usr/lib/' + multiarch_path_component)
add_dir_to_list(self.compiler.include_dirs,
'/usr/include/' + multiarch_path_component)
finally:
os.unlink(tmpfile)
def add_gcc_paths(self):
gcc = sysconfig.get_config_var('CC')
tmpfile = os.path.join(self.build_temp, 'gccpaths')
if not os.path.exists(self.build_temp):
os.makedirs(self.build_temp)
ret = os.system('%s -E -v - </dev/null 2>%s 1>/dev/null' % (gcc, tmpfile))
is_gcc = False
in_incdirs = False
inc_dirs = []
lib_dirs = []
try:
if ret >> 8 == 0:
with open(tmpfile) as fp:
for line in fp.readlines():
if line.startswith("gcc version"):
is_gcc = True
elif line.startswith("#include <...>"):
in_incdirs = True
elif line.startswith("End of search list"):
in_incdirs = False
elif is_gcc and line.startswith("LIBRARY_PATH"):
for d in line.strip().split("=")[1].split(":"):
d = os.path.normpath(d)
if '/gcc/' not in d:
add_dir_to_list(self.compiler.library_dirs,
d)
elif is_gcc and in_incdirs and '/gcc/' not in line:
add_dir_to_list(self.compiler.include_dirs,
line.strip())
finally:
os.unlink(tmpfile)
def detect_modules(self):
# Ensure that /usr/local is always used, but the local build
# directories (i.e. '.' and 'Include') must be first. See issue
# 10520.
if not cross_compiling:
add_dir_to_list(self.compiler.library_dirs, '/usr/local/lib')
add_dir_to_list(self.compiler.include_dirs, '/usr/local/include')
# only change this for cross builds for 3.3, issues on Mageia
if cross_compiling:
self.add_gcc_paths()
self.add_multiarch_paths()
# Add paths specified in the environment variables LDFLAGS and
# CPPFLAGS for header and library files.
# We must get the values from the Makefile and not the environment
# directly since an inconsistently reproducible issue comes up where
# the environment variable is not set even though the value were passed
# into configure and stored in the Makefile (issue found on OS X 10.3).
for env_var, arg_name, dir_list in (
('LDFLAGS', '-R', self.compiler.runtime_library_dirs),
('LDFLAGS', '-L', self.compiler.library_dirs),
('CPPFLAGS', '-I', self.compiler.include_dirs)):
env_val = sysconfig.get_config_var(env_var)
if env_val:
parser = argparse.ArgumentParser()
parser.add_argument(arg_name, dest="dirs", action="append")
options, _ = parser.parse_known_args(env_val.split())
if options.dirs:
for directory in reversed(options.dirs):
add_dir_to_list(dir_list, directory)
if (not cross_compiling and
os.path.normpath(sys.base_prefix) != '/usr' and
not sysconfig.get_config_var('PYTHONFRAMEWORK')):
# OSX note: Don't add LIBDIR and INCLUDEDIR to building a framework
# (PYTHONFRAMEWORK is set) to avoid # linking problems when
# building a framework with different architectures than
# the one that is currently installed (issue #7473)
add_dir_to_list(self.compiler.library_dirs,
sysconfig.get_config_var("LIBDIR"))
add_dir_to_list(self.compiler.include_dirs,
sysconfig.get_config_var("INCLUDEDIR"))
system_lib_dirs = ['/lib64', '/usr/lib64', '/lib', '/usr/lib']
system_include_dirs = ['/usr/include']
# lib_dirs and inc_dirs are used to search for files;
# if a file is found in one of those directories, it can
# be assumed that no additional -I,-L directives are needed.
if not cross_compiling:
lib_dirs = self.compiler.library_dirs + system_lib_dirs
inc_dirs = self.compiler.include_dirs + system_include_dirs
else:
# Add the sysroot paths. 'sysroot' is a compiler option used to
# set the logical path of the standard system headers and
# libraries.
lib_dirs = (self.compiler.library_dirs +
sysroot_paths(('LDFLAGS', 'CC'), system_lib_dirs))
inc_dirs = (self.compiler.include_dirs +
sysroot_paths(('CPPFLAGS', 'CFLAGS', 'CC'),
system_include_dirs))
exts = []
missing = []
config_h = sysconfig.get_config_h_filename()
with open(config_h) as file:
config_h_vars = sysconfig.parse_config_h(file)
srcdir = sysconfig.get_config_var('srcdir')
# OSF/1 and Unixware have some stuff in /usr/ccs/lib (like -ldb)
if host_platform in ['osf1', 'unixware7', 'openunix8']:
lib_dirs += ['/usr/ccs/lib']
# HP-UX11iv3 keeps files in lib/hpux folders.
if host_platform == 'hp-ux11':
lib_dirs += ['/usr/lib/hpux64', '/usr/lib/hpux32']
if host_platform == 'darwin':
# This should work on any unixy platform ;-)
# If the user has bothered specifying additional -I and -L flags
# in OPT and LDFLAGS we might as well use them here.
#
# NOTE: using shlex.split would technically be more correct, but
# also gives a bootstrap problem. Let's hope nobody uses
# directories with whitespace in the name to store libraries.
cflags, ldflags = sysconfig.get_config_vars(
'CFLAGS', 'LDFLAGS')
for item in cflags.split():
if item.startswith('-I'):
inc_dirs.append(item[2:])
for item in ldflags.split():
if item.startswith('-L'):
lib_dirs.append(item[2:])
#
# The following modules are all pretty straightforward, and compile
# on pretty much any POSIXish platform.
#
# array objects
exts.append( Extension('array', ['arraymodule.c']) )
# Context Variables
exts.append( Extension('_contextvars', ['_contextvarsmodule.c']) )
shared_math = 'Modules/_math.o'
# complex math library functions
exts.append( Extension('cmath', ['cmathmodule.c'],
extra_objects=[shared_math],
depends=['_math.h', shared_math],
libraries=['m']) )
# math library functions, e.g. sin()
exts.append( Extension('math', ['mathmodule.c'],
extra_objects=[shared_math],
depends=['_math.h', shared_math],
libraries=['m']) )
# time libraries: librt may be needed for clock_gettime()
time_libs = []
lib = sysconfig.get_config_var('TIMEMODULE_LIB')
if lib:
time_libs.append(lib)
# time operations and variables
exts.append( Extension('time', ['timemodule.c'],
libraries=time_libs) )
# libm is needed by delta_new() that uses round() and by accum() that
# uses modf().
exts.append( Extension('_datetime', ['_datetimemodule.c'],
libraries=['m']) )
# random number generator implemented in C
exts.append( Extension("_random", ["_randommodule.c"]) )
# bisect
exts.append( Extension("_bisect", ["_bisectmodule.c"]) )
# heapq
exts.append( Extension("_heapq", ["_heapqmodule.c"]) )
# C-optimized pickle replacement
exts.append( Extension("_pickle", ["_pickle.c"]) )
# atexit
exts.append( Extension("atexit", ["atexitmodule.c"]) )
# _json speedups
exts.append( Extension("_json", ["_json.c"],
# pycore_accu.h requires Py_BUILD_CORE_BUILTIN
extra_compile_args=['-DPy_BUILD_CORE_BUILTIN']) )
# Python C API test module
exts.append( Extension('_testcapi', ['_testcapimodule.c'],
depends=['testcapi_long.h']) )
# Python PEP-3118 (buffer protocol) test module
exts.append( Extension('_testbuffer', ['_testbuffer.c']) )
# Test loading multiple modules from one compiled file (http://bugs.python.org/issue16421)
exts.append( Extension('_testimportmultiple', ['_testimportmultiple.c']) )
# Test multi-phase extension module init (PEP 489)
exts.append( Extension('_testmultiphase', ['_testmultiphase.c']) )
# profiler (_lsprof is for cProfile.py)
exts.append( Extension('_lsprof', ['_lsprof.c', 'rotatingtree.c']) )
# static Unicode character database
exts.append( Extension('unicodedata', ['unicodedata.c'],
depends=['unicodedata_db.h', 'unicodename_db.h']) )
# _opcode module
exts.append( Extension('_opcode', ['_opcode.c']) )
# asyncio speedups
exts.append( Extension("_asyncio", ["_asynciomodule.c"]) )
# _abc speedups
exts.append( Extension("_abc", ["_abc.c"]) )
# _queue module
exts.append( Extension("_queue", ["_queuemodule.c"]) )
# Modules with some UNIX dependencies -- on by default:
# (If you have a really backward UNIX, select and socket may not be
# supported...)
# fcntl(2) and ioctl(2)
libs = []
if (config_h_vars.get('FLOCK_NEEDS_LIBBSD', False)):
# May be necessary on AIX for flock function
libs = ['bsd']
exts.append( Extension('fcntl', ['fcntlmodule.c'], libraries=libs) )
# pwd(3)
exts.append( Extension('pwd', ['pwdmodule.c']) )
# grp(3)
exts.append( Extension('grp', ['grpmodule.c']) )
# spwd, shadow passwords
if (config_h_vars.get('HAVE_GETSPNAM', False) or
config_h_vars.get('HAVE_GETSPENT', False)):
exts.append( Extension('spwd', ['spwdmodule.c']) )
else:
missing.append('spwd')
# select(2); not on ancient System V
exts.append( Extension('select', ['selectmodule.c']) )
# Fred Drake's interface to the Python parser
exts.append( Extension('parser', ['parsermodule.c']) )
# Memory-mapped files (also works on Win32).
exts.append( Extension('mmap', ['mmapmodule.c']) )
# Lance Ellinghaus's syslog module
# syslog daemon interface
exts.append( Extension('syslog', ['syslogmodule.c']) )
# Fuzz tests.
exts.append( Extension(
'_xxtestfuzz',
['_xxtestfuzz/_xxtestfuzz.c', '_xxtestfuzz/fuzzer.c'])
)
# Python interface to subinterpreter C-API.
exts.append(Extension('_xxsubinterpreters', ['_xxsubinterpretersmodule.c'],
define_macros=[('Py_BUILD_CORE', '')]))
#
# Here ends the simple stuff. From here on, modules need certain
# libraries, are platform-specific, or present other surprises.
#
# Multimedia modules
# These don't work for 64-bit platforms!!!
# These represent audio samples or images as strings:
#
# Operations on audio samples
# According to #993173, this one should actually work fine on
# 64-bit platforms.
#
# audioop needs libm for floor() in multiple functions.
exts.append( Extension('audioop', ['audioop.c'],
libraries=['m']) )
# readline
do_readline = self.compiler.find_library_file(lib_dirs, 'readline')
readline_termcap_library = ""
curses_library = ""
# Cannot use os.popen here in py3k.
tmpfile = os.path.join(self.build_temp, 'readline_termcap_lib')
if not os.path.exists(self.build_temp):
os.makedirs(self.build_temp)
# Determine if readline is already linked against curses or tinfo.
if do_readline:
if cross_compiling:
ret = os.system("%s -d %s | grep '(NEEDED)' > %s" \
% (sysconfig.get_config_var('READELF'),
do_readline, tmpfile))
elif find_executable('ldd'):
ret = os.system("ldd %s > %s" % (do_readline, tmpfile))
else:
ret = 256
if ret >> 8 == 0:
with open(tmpfile) as fp:
for ln in fp:
if 'curses' in ln:
readline_termcap_library = re.sub(
r'.*lib(n?cursesw?)\.so.*', r'\1', ln
).rstrip()
break
# termcap interface split out from ncurses
if 'tinfo' in ln:
readline_termcap_library = 'tinfo'
break
if os.path.exists(tmpfile):
os.unlink(tmpfile)
# Issue 7384: If readline is already linked against curses,
# use the same library for the readline and curses modules.
if 'curses' in readline_termcap_library:
curses_library = readline_termcap_library
elif self.compiler.find_library_file(lib_dirs, 'ncursesw'):
curses_library = 'ncursesw'
elif self.compiler.find_library_file(lib_dirs, 'ncurses'):
curses_library = 'ncurses'
elif self.compiler.find_library_file(lib_dirs, 'curses'):
curses_library = 'curses'
if host_platform == 'darwin':
os_release = int(os.uname()[2].split('.')[0])
dep_target = sysconfig.get_config_var('MACOSX_DEPLOYMENT_TARGET')
if (dep_target and
(tuple(int(n) for n in dep_target.split('.')[0:2])
< (10, 5) ) ):
os_release = 8
if os_release < 9:
# MacOSX 10.4 has a broken readline. Don't try to build
# the readline module unless the user has installed a fixed
# readline package
if find_file('readline/rlconf.h', inc_dirs, []) is None:
do_readline = False
if do_readline:
if host_platform == 'darwin' and os_release < 9:
# In every directory on the search path search for a dynamic
# library and then a static library, instead of first looking
# for dynamic libraries on the entire path.
# This way a statically linked custom readline gets picked up
# before the (possibly broken) dynamic library in /usr/lib.
readline_extra_link_args = ('-Wl,-search_paths_first',)
else:
readline_extra_link_args = ()
readline_libs = ['readline']
if readline_termcap_library:
pass # Issue 7384: Already linked against curses or tinfo.
elif curses_library:
readline_libs.append(curses_library)
elif self.compiler.find_library_file(lib_dirs +
['/usr/lib/termcap'],
'termcap'):
readline_libs.append('termcap')
exts.append( Extension('readline', ['readline.c'],
library_dirs=['/usr/lib/termcap'],
extra_link_args=readline_extra_link_args,
libraries=readline_libs) )
else:
missing.append('readline')
# crypt module.
if self.compiler.find_library_file(lib_dirs, 'crypt'):
libs = ['crypt']
else:
libs = []
exts.append( Extension('_crypt', ['_cryptmodule.c'], libraries=libs) )
# CSV files
exts.append( Extension('_csv', ['_csv.c']) )
# POSIX subprocess module helper.
exts.append( Extension('_posixsubprocess', ['_posixsubprocess.c']) )
# socket(2)
exts.append( Extension('_socket', ['socketmodule.c'],
depends = ['socketmodule.h']) )
# Detect SSL support for the socket module (via _ssl)
ssl_ext, hashlib_ext = self._detect_openssl(inc_dirs, lib_dirs)
if ssl_ext is not None:
exts.append(ssl_ext)
else:
missing.append('_ssl')
if hashlib_ext is not None:
exts.append(hashlib_ext)
else:
missing.append('_hashlib')
# We always compile these even when OpenSSL is available (issue #14693).
# It's harmless and the object code is tiny (40-50 KiB per module,
# only loaded when actually used).
exts.append( Extension('_sha256', ['sha256module.c'],
depends=['hashlib.h']) )
exts.append( Extension('_sha512', ['sha512module.c'],
depends=['hashlib.h']) )
exts.append( Extension('_md5', ['md5module.c'],
depends=['hashlib.h']) )
exts.append( Extension('_sha1', ['sha1module.c'],
depends=['hashlib.h']) )
blake2_deps = glob(os.path.join(os.getcwd(), srcdir,
'Modules/_blake2/impl/*'))
blake2_deps.append('hashlib.h')
exts.append( Extension('_blake2',
['_blake2/blake2module.c',
'_blake2/blake2b_impl.c',
'_blake2/blake2s_impl.c'],
depends=blake2_deps) )
sha3_deps = glob(os.path.join(os.getcwd(), srcdir,
'Modules/_sha3/kcp/*'))
sha3_deps.append('hashlib.h')
exts.append( Extension('_sha3',
['_sha3/sha3module.c'],
depends=sha3_deps))
# Modules that provide persistent dictionary-like semantics. You will
# probably want to arrange for at least one of them to be available on
# your machine, though none are defined by default because of library
# dependencies. The Python module dbm/__init__.py provides an
# implementation independent wrapper for these; dbm/dumb.py provides
# similar functionality (but slower of course) implemented in Python.
# Sleepycat^WOracle Berkeley DB interface.
# http://www.oracle.com/database/berkeley-db/db/index.html
#
# This requires the Sleepycat^WOracle DB code. The supported versions
# are set below. Visit the URL above to download
# a release. Most open source OSes come with one or more
# versions of BerkeleyDB already installed.
max_db_ver = (5, 3)
min_db_ver = (3, 3)
db_setup_debug = False # verbose debug prints from this script?
def allow_db_ver(db_ver):
"""Returns a boolean if the given BerkeleyDB version is acceptable.
Args:
db_ver: A tuple of the version to verify.
"""
if not (min_db_ver <= db_ver <= max_db_ver):
return False
return True
def gen_db_minor_ver_nums(major):
if major == 4:
for x in range(max_db_ver[1]+1):
if allow_db_ver((4, x)):
yield x
elif major == 3:
for x in (3,):
if allow_db_ver((3, x)):
yield x
else:
raise ValueError("unknown major BerkeleyDB version", major)
# construct a list of paths to look for the header file in on
# top of the normal inc_dirs.
db_inc_paths = [
'/usr/include/db4',
'/usr/local/include/db4',
'/opt/sfw/include/db4',
'/usr/include/db3',
'/usr/local/include/db3',
'/opt/sfw/include/db3',
# Fink defaults (http://fink.sourceforge.net/)
'/sw/include/db4',
'/sw/include/db3',
]
# 4.x minor number specific paths
for x in gen_db_minor_ver_nums(4):
db_inc_paths.append('/usr/include/db4%d' % x)
db_inc_paths.append('/usr/include/db4.%d' % x)
db_inc_paths.append('/usr/local/BerkeleyDB.4.%d/include' % x)
db_inc_paths.append('/usr/local/include/db4%d' % x)
db_inc_paths.append('/pkg/db-4.%d/include' % x)
db_inc_paths.append('/opt/db-4.%d/include' % x)
# MacPorts default (http://www.macports.org/)
db_inc_paths.append('/opt/local/include/db4%d' % x)
# 3.x minor number specific paths
for x in gen_db_minor_ver_nums(3):
db_inc_paths.append('/usr/include/db3%d' % x)
db_inc_paths.append('/usr/local/BerkeleyDB.3.%d/include' % x)
db_inc_paths.append('/usr/local/include/db3%d' % x)
db_inc_paths.append('/pkg/db-3.%d/include' % x)
db_inc_paths.append('/opt/db-3.%d/include' % x)
if cross_compiling:
db_inc_paths = []
# Add some common subdirectories for Sleepycat DB to the list,
# based on the standard include directories. This way DB3/4 gets
# picked up when it is installed in a non-standard prefix and
# the user has added that prefix into inc_dirs.
std_variants = []
for dn in inc_dirs:
std_variants.append(os.path.join(dn, 'db3'))
std_variants.append(os.path.join(dn, 'db4'))
for x in gen_db_minor_ver_nums(4):
std_variants.append(os.path.join(dn, "db4%d"%x))
std_variants.append(os.path.join(dn, "db4.%d"%x))
for x in gen_db_minor_ver_nums(3):
std_variants.append(os.path.join(dn, "db3%d"%x))
std_variants.append(os.path.join(dn, "db3.%d"%x))
db_inc_paths = std_variants + db_inc_paths
db_inc_paths = [p for p in db_inc_paths if os.path.exists(p)]
db_ver_inc_map = {}
if host_platform == 'darwin':
sysroot = macosx_sdk_root()
class db_found(Exception): pass
try:
# See whether there is a Sleepycat header in the standard
# search path.
for d in inc_dirs + db_inc_paths:
f = os.path.join(d, "db.h")
if host_platform == 'darwin' and is_macosx_sdk_path(d):
f = os.path.join(sysroot, d[1:], "db.h")
if db_setup_debug: print("db: looking for db.h in", f)
if os.path.exists(f):
with open(f, 'rb') as file:
f = file.read()
m = re.search(br"#define\WDB_VERSION_MAJOR\W(\d+)", f)
if m:
db_major = int(m.group(1))
m = re.search(br"#define\WDB_VERSION_MINOR\W(\d+)", f)
db_minor = int(m.group(1))
db_ver = (db_major, db_minor)
# Avoid 4.6 prior to 4.6.21 due to a BerkeleyDB bug
if db_ver == (4, 6):
m = re.search(br"#define\WDB_VERSION_PATCH\W(\d+)", f)
db_patch = int(m.group(1))
if db_patch < 21:
print("db.h:", db_ver, "patch", db_patch,
"being ignored (4.6.x must be >= 4.6.21)")
continue
if ( (db_ver not in db_ver_inc_map) and
allow_db_ver(db_ver) ):
# save the include directory with the db.h version
# (first occurrence only)
db_ver_inc_map[db_ver] = d
if db_setup_debug:
print("db.h: found", db_ver, "in", d)
else:
# we already found a header for this library version
if db_setup_debug: print("db.h: ignoring", d)
else:
# ignore this header, it didn't contain a version number
if db_setup_debug:
print("db.h: no version number version in", d)
db_found_vers = list(db_ver_inc_map.keys())
db_found_vers.sort()
while db_found_vers:
db_ver = db_found_vers.pop()
db_incdir = db_ver_inc_map[db_ver]
# check lib directories parallel to the location of the header
db_dirs_to_check = [
db_incdir.replace("include", 'lib64'),
db_incdir.replace("include", 'lib'),
]
if host_platform != 'darwin':
db_dirs_to_check = list(filter(os.path.isdir, db_dirs_to_check))
else:
# Same as other branch, but takes OSX SDK into account
tmp = []
for dn in db_dirs_to_check:
if is_macosx_sdk_path(dn):
if os.path.isdir(os.path.join(sysroot, dn[1:])):
tmp.append(dn)
else:
if os.path.isdir(dn):
tmp.append(dn)
db_dirs_to_check = tmp
db_dirs_to_check = tmp
# Look for a version specific db-X.Y before an ambiguous dbX
# XXX should we -ever- look for a dbX name? Do any
# systems really not name their library by version and
# symlink to more general names?
for dblib in (('db-%d.%d' % db_ver),
('db%d%d' % db_ver),
('db%d' % db_ver[0])):
dblib_file = self.compiler.find_library_file(
db_dirs_to_check + lib_dirs, dblib )
if dblib_file:
dblib_dir = [ os.path.abspath(os.path.dirname(dblib_file)) ]
raise db_found
else:
if db_setup_debug: print("db lib: ", dblib, "not found")
except db_found:
if db_setup_debug:
print("bsddb using BerkeleyDB lib:", db_ver, dblib)
print("bsddb lib dir:", dblib_dir, " inc dir:", db_incdir)
dblibs = [dblib]
# Only add the found library and include directories if they aren't
# already being searched. This avoids an explicit runtime library
# dependency.
if db_incdir in inc_dirs:
db_incs = None
else:
db_incs = [db_incdir]
if dblib_dir[0] in lib_dirs:
dblib_dir = None
else:
if db_setup_debug: print("db: no appropriate library found")
db_incs = None
dblibs = []
dblib_dir = None
# The sqlite interface
sqlite_setup_debug = False # verbose debug prints from this script?
# We hunt for #define SQLITE_VERSION "n.n.n"
# We need to find >= sqlite version 3.0.8
sqlite_incdir = sqlite_libdir = None
sqlite_inc_paths = [ '/usr/include',
'/usr/include/sqlite',
'/usr/include/sqlite3',
'/usr/local/include',
'/usr/local/include/sqlite',
'/usr/local/include/sqlite3',
]
if cross_compiling:
sqlite_inc_paths = []
MIN_SQLITE_VERSION_NUMBER = (3, 0, 8)
MIN_SQLITE_VERSION = ".".join([str(x)
for x in MIN_SQLITE_VERSION_NUMBER])
# Scan the default include directories before the SQLite specific
# ones. This allows one to override the copy of sqlite on OSX,
# where /usr/include contains an old version of sqlite.
if host_platform == 'darwin':
sysroot = macosx_sdk_root()
for d_ in inc_dirs + sqlite_inc_paths:
d = d_
if host_platform == 'darwin' and is_macosx_sdk_path(d):
d = os.path.join(sysroot, d[1:])
f = os.path.join(d, "sqlite3.h")
if os.path.exists(f):
if sqlite_setup_debug: print("sqlite: found %s"%f)
with open(f) as file:
incf = file.read()
m = re.search(
r'\s*.*#\s*.*define\s.*SQLITE_VERSION\W*"([\d\.]*)"', incf)
if m:
sqlite_version = m.group(1)
sqlite_version_tuple = tuple([int(x)
for x in sqlite_version.split(".")])
if sqlite_version_tuple >= MIN_SQLITE_VERSION_NUMBER:
# we win!
if sqlite_setup_debug:
print("%s/sqlite3.h: version %s"%(d, sqlite_version))
sqlite_incdir = d
break
else:
if sqlite_setup_debug:
print("%s: version %d is too old, need >= %s"%(d,
sqlite_version, MIN_SQLITE_VERSION))
elif sqlite_setup_debug:
print("sqlite: %s had no SQLITE_VERSION"%(f,))
if sqlite_incdir:
sqlite_dirs_to_check = [
os.path.join(sqlite_incdir, '..', 'lib64'),
os.path.join(sqlite_incdir, '..', 'lib'),
os.path.join(sqlite_incdir, '..', '..', 'lib64'),
os.path.join(sqlite_incdir, '..', '..', 'lib'),
]
sqlite_libfile = self.compiler.find_library_file(
sqlite_dirs_to_check + lib_dirs, 'sqlite3')
if sqlite_libfile:
sqlite_libdir = [os.path.abspath(os.path.dirname(sqlite_libfile))]
if sqlite_incdir and sqlite_libdir:
sqlite_srcs = ['_sqlite/cache.c',
'_sqlite/connection.c',
'_sqlite/cursor.c',
'_sqlite/microprotocols.c',
'_sqlite/module.c',
'_sqlite/prepare_protocol.c',
'_sqlite/row.c',
'_sqlite/statement.c',
'_sqlite/util.c', ]
sqlite_defines = []
if host_platform != "win32":
sqlite_defines.append(('MODULE_NAME', '"sqlite3"'))
else:
sqlite_defines.append(('MODULE_NAME', '\\"sqlite3\\"'))
# Enable support for loadable extensions in the sqlite3 module
# if --enable-loadable-sqlite-extensions configure option is used.
if '--enable-loadable-sqlite-extensions' not in sysconfig.get_config_var("CONFIG_ARGS"):
sqlite_defines.append(("SQLITE_OMIT_LOAD_EXTENSION", "1"))
if host_platform == 'darwin':
# In every directory on the search path search for a dynamic
# library and then a static library, instead of first looking
# for dynamic libraries on the entire path.
# This way a statically linked custom sqlite gets picked up
# before the dynamic library in /usr/lib.
sqlite_extra_link_args = ('-Wl,-search_paths_first',)
else:
sqlite_extra_link_args = ()
include_dirs = ["Modules/_sqlite"]
# Only include the directory where sqlite was found if it does
# not already exist in set include directories, otherwise you
# can end up with a bad search path order.
if sqlite_incdir not in self.compiler.include_dirs:
include_dirs.append(sqlite_incdir)
# avoid a runtime library path for a system library dir
if sqlite_libdir and sqlite_libdir[0] in lib_dirs:
sqlite_libdir = None
exts.append(Extension('_sqlite3', sqlite_srcs,
define_macros=sqlite_defines,
include_dirs=include_dirs,
library_dirs=sqlite_libdir,
extra_link_args=sqlite_extra_link_args,
libraries=["sqlite3",]))
else:
missing.append('_sqlite3')
dbm_setup_debug = False # verbose debug prints from this script?
dbm_order = ['gdbm']
# The standard Unix dbm module:
if host_platform not in ['cygwin']:
config_args = [arg.strip("'")
for arg in sysconfig.get_config_var("CONFIG_ARGS").split()]
dbm_args = [arg for arg in config_args
if arg.startswith('--with-dbmliborder=')]
if dbm_args:
dbm_order = [arg.split('=')[-1] for arg in dbm_args][-1].split(":")
else:
dbm_order = "ndbm:gdbm:bdb".split(":")
dbmext = None
for cand in dbm_order:
if cand == "ndbm":
if find_file("ndbm.h", inc_dirs, []) is not None:
# Some systems have -lndbm, others have -lgdbm_compat,
# others don't have either
if self.compiler.find_library_file(lib_dirs,
'ndbm'):
ndbm_libs = ['ndbm']
elif self.compiler.find_library_file(lib_dirs,
'gdbm_compat'):
ndbm_libs = ['gdbm_compat']
else:
ndbm_libs = []
if dbm_setup_debug: print("building dbm using ndbm")
dbmext = Extension('_dbm', ['_dbmmodule.c'],
define_macros=[
('HAVE_NDBM_H',None),
],
libraries=ndbm_libs)
break
elif cand == "gdbm":
if self.compiler.find_library_file(lib_dirs, 'gdbm'):
gdbm_libs = ['gdbm']
if self.compiler.find_library_file(lib_dirs,
'gdbm_compat'):
gdbm_libs.append('gdbm_compat')
if find_file("gdbm/ndbm.h", inc_dirs, []) is not None:
if dbm_setup_debug: print("building dbm using gdbm")
dbmext = Extension(
'_dbm', ['_dbmmodule.c'],
define_macros=[
('HAVE_GDBM_NDBM_H', None),
],
libraries = gdbm_libs)
break
if find_file("gdbm-ndbm.h", inc_dirs, []) is not None:
if dbm_setup_debug: print("building dbm using gdbm")
dbmext = Extension(
'_dbm', ['_dbmmodule.c'],
define_macros=[
('HAVE_GDBM_DASH_NDBM_H', None),
],
libraries = gdbm_libs)
break
elif cand == "bdb":
if dblibs:
if dbm_setup_debug: print("building dbm using bdb")
dbmext = Extension('_dbm', ['_dbmmodule.c'],
library_dirs=dblib_dir,
runtime_library_dirs=dblib_dir,
include_dirs=db_incs,
define_macros=[
('HAVE_BERKDB_H', None),
('DB_DBM_HSEARCH', None),
],
libraries=dblibs)
break
if dbmext is not None:
exts.append(dbmext)
else:
missing.append('_dbm')
# Anthony Baxter's gdbm module. GNU dbm(3) will require -lgdbm:
if ('gdbm' in dbm_order and
self.compiler.find_library_file(lib_dirs, 'gdbm')):
exts.append( Extension('_gdbm', ['_gdbmmodule.c'],
libraries = ['gdbm'] ) )
else:
missing.append('_gdbm')
# Unix-only modules
if host_platform != 'win32':
# Steen Lumholt's termios module
exts.append( Extension('termios', ['termios.c']) )
# Jeremy Hylton's rlimit interface
exts.append( Extension('resource', ['resource.c']) )
else:
missing.extend(['resource', 'termios'])
nis = self._detect_nis(inc_dirs, lib_dirs)
if nis is not None:
exts.append(nis)
else:
missing.append('nis')
# Curses support, requiring the System V version of curses, often
# provided by the ncurses library.
curses_defines = []
curses_includes = []
panel_library = 'panel'
if curses_library == 'ncursesw':
curses_defines.append(('HAVE_NCURSESW', '1'))
if not cross_compiling:
curses_includes.append('/usr/include/ncursesw')
# Bug 1464056: If _curses.so links with ncursesw,
# _curses_panel.so must link with panelw.
panel_library = 'panelw'
if host_platform == 'darwin':
# On OS X, there is no separate /usr/lib/libncursesw nor
# libpanelw. If we are here, we found a locally-supplied
# version of libncursesw. There should also be a
# libpanelw. _XOPEN_SOURCE defines are usually excluded
# for OS X but we need _XOPEN_SOURCE_EXTENDED here for
# ncurses wide char support
curses_defines.append(('_XOPEN_SOURCE_EXTENDED', '1'))
elif host_platform == 'darwin' and curses_library == 'ncurses':
# Building with the system-suppied combined libncurses/libpanel
curses_defines.append(('HAVE_NCURSESW', '1'))
curses_defines.append(('_XOPEN_SOURCE_EXTENDED', '1'))
if curses_library.startswith('ncurses'):
curses_libs = [curses_library]
exts.append( Extension('_curses', ['_cursesmodule.c'],
include_dirs=curses_includes,
define_macros=curses_defines,
libraries = curses_libs) )
elif curses_library == 'curses' and host_platform != 'darwin':
# OSX has an old Berkeley curses, not good enough for
# the _curses module.
if (self.compiler.find_library_file(lib_dirs, 'terminfo')):
curses_libs = ['curses', 'terminfo']
elif (self.compiler.find_library_file(lib_dirs, 'termcap')):
curses_libs = ['curses', 'termcap']
else:
curses_libs = ['curses']
exts.append( Extension('_curses', ['_cursesmodule.c'],
define_macros=curses_defines,
libraries = curses_libs) )
else:
missing.append('_curses')
# If the curses module is enabled, check for the panel module
if (module_enabled(exts, '_curses') and
self.compiler.find_library_file(lib_dirs, panel_library)):
exts.append( Extension('_curses_panel', ['_curses_panel.c'],
include_dirs=curses_includes,
define_macros=curses_defines,
libraries = [panel_library] + curses_libs) )
else:
missing.append('_curses_panel')
# Andrew Kuchling's zlib module. Note that some versions of zlib
# 1.1.3 have security problems. See CERT Advisory CA-2002-07:
# http://www.cert.org/advisories/CA-2002-07.html
#
# zlib 1.1.4 is fixed, but at least one vendor (RedHat) has decided to
# patch its zlib 1.1.3 package instead of upgrading to 1.1.4. For
# now, we still accept 1.1.3, because we think it's difficult to
# exploit this in Python, and we'd rather make it RedHat's problem
# than our problem <wink>.
#
# You can upgrade zlib to version 1.1.4 yourself by going to
# http://www.gzip.org/zlib/
zlib_inc = find_file('zlib.h', [], inc_dirs)
have_zlib = False
if zlib_inc is not None:
zlib_h = zlib_inc[0] + '/zlib.h'
version = '"0.0.0"'
version_req = '"1.1.3"'
if host_platform == 'darwin' and is_macosx_sdk_path(zlib_h):
zlib_h = os.path.join(macosx_sdk_root(), zlib_h[1:])
with open(zlib_h) as fp:
while 1:
line = fp.readline()
if not line:
break
if line.startswith('#define ZLIB_VERSION'):
version = line.split()[2]
break
if version >= version_req:
if (self.compiler.find_library_file(lib_dirs, 'z')):
if host_platform == "darwin":
zlib_extra_link_args = ('-Wl,-search_paths_first',)
else:
zlib_extra_link_args = ()
exts.append( Extension('zlib', ['zlibmodule.c'],
libraries = ['z'],
extra_link_args = zlib_extra_link_args))
have_zlib = True
else:
missing.append('zlib')
else:
missing.append('zlib')
else:
missing.append('zlib')
# Helper module for various ascii-encoders. Uses zlib for an optimized
# crc32 if we have it. Otherwise binascii uses its own.
if have_zlib:
extra_compile_args = ['-DUSE_ZLIB_CRC32']
libraries = ['z']
extra_link_args = zlib_extra_link_args
else:
extra_compile_args = []
libraries = []
extra_link_args = []
exts.append( Extension('binascii', ['binascii.c'],
extra_compile_args = extra_compile_args,
libraries = libraries,
extra_link_args = extra_link_args) )
# Gustavo Niemeyer's bz2 module.
if (self.compiler.find_library_file(lib_dirs, 'bz2')):
if host_platform == "darwin":
bz2_extra_link_args = ('-Wl,-search_paths_first',)
else:
bz2_extra_link_args = ()
exts.append( Extension('_bz2', ['_bz2module.c'],
libraries = ['bz2'],
extra_link_args = bz2_extra_link_args) )
else:
missing.append('_bz2')
# LZMA compression support.
if self.compiler.find_library_file(lib_dirs, 'lzma'):
exts.append( Extension('_lzma', ['_lzmamodule.c'],
libraries = ['lzma']) )
else:
missing.append('_lzma')
# Interface to the Expat XML parser
#
# Expat was written by James Clark and is now maintained by a group of
# developers on SourceForge; see www.libexpat.org for more information.
# The pyexpat module was written by Paul Prescod after a prototype by
# Jack Jansen. The Expat source is included in Modules/expat/. Usage
# of a system shared libexpat.so is possible with --with-system-expat
# configure option.
#
# More information on Expat can be found at www.libexpat.org.
#
if '--with-system-expat' in sysconfig.get_config_var("CONFIG_ARGS"):
expat_inc = []
define_macros = []
extra_compile_args = []
expat_lib = ['expat']
expat_sources = []
expat_depends = []
else:
expat_inc = [os.path.join(os.getcwd(), srcdir, 'Modules', 'expat')]
define_macros = [
('HAVE_EXPAT_CONFIG_H', '1'),
# bpo-30947: Python uses best available entropy sources to
# call XML_SetHashSalt(), expat entropy sources are not needed
('XML_POOR_ENTROPY', '1'),
]
extra_compile_args = []
expat_lib = []
expat_sources = ['expat/xmlparse.c',
'expat/xmlrole.c',
'expat/xmltok.c']
expat_depends = ['expat/ascii.h',
'expat/asciitab.h',
'expat/expat.h',
'expat/expat_config.h',
'expat/expat_external.h',
'expat/internal.h',
'expat/latin1tab.h',
'expat/utf8tab.h',
'expat/xmlrole.h',
'expat/xmltok.h',
'expat/xmltok_impl.h'
]
cc = sysconfig.get_config_var('CC').split()[0]
ret = os.system(
'"%s" -Werror -Wimplicit-fallthrough -E -xc /dev/null >/dev/null 2>&1' % cc)
if ret >> 8 == 0:
extra_compile_args.append('-Wno-implicit-fallthrough')
exts.append(Extension('pyexpat',
define_macros = define_macros,
extra_compile_args = extra_compile_args,
include_dirs = expat_inc,
libraries = expat_lib,
sources = ['pyexpat.c'] + expat_sources,
depends = expat_depends,
))
# Fredrik Lundh's cElementTree module. Note that this also
# uses expat (via the CAPI hook in pyexpat).
if os.path.isfile(os.path.join(srcdir, 'Modules', '_elementtree.c')):
define_macros.append(('USE_PYEXPAT_CAPI', None))
exts.append(Extension('_elementtree',
define_macros = define_macros,
include_dirs = expat_inc,
libraries = expat_lib,
sources = ['_elementtree.c'],
depends = ['pyexpat.c'] + expat_sources +
expat_depends,
))
else:
missing.append('_elementtree')
# Hye-Shik Chang's CJKCodecs modules.
exts.append(Extension('_multibytecodec',
['cjkcodecs/multibytecodec.c']))
for loc in ('kr', 'jp', 'cn', 'tw', 'hk', 'iso2022'):
exts.append(Extension('_codecs_%s' % loc,
['cjkcodecs/_codecs_%s.c' % loc]))
# Stefan Krah's _decimal module
exts.append(self._decimal_ext())
# Thomas Heller's _ctypes module
self.detect_ctypes(inc_dirs, lib_dirs)
# Richard Oudkerk's multiprocessing module
if host_platform == 'win32': # Windows
macros = dict()
libraries = ['ws2_32']
elif host_platform == 'darwin': # Mac OSX
macros = dict()
libraries = []
elif host_platform == 'cygwin': # Cygwin
macros = dict()
libraries = []
elif host_platform.startswith('openbsd'):
macros = dict()
libraries = []
elif host_platform.startswith('netbsd'):
macros = dict()
libraries = []
else: # Linux and other unices
macros = dict()
libraries = ['rt']
if host_platform == 'win32':
multiprocessing_srcs = [ '_multiprocessing/multiprocessing.c',
'_multiprocessing/semaphore.c',
]
else:
multiprocessing_srcs = [ '_multiprocessing/multiprocessing.c',
]
if (sysconfig.get_config_var('HAVE_SEM_OPEN') and not
sysconfig.get_config_var('POSIX_SEMAPHORES_NOT_ENABLED')):
multiprocessing_srcs.append('_multiprocessing/semaphore.c')
if (sysconfig.get_config_var('HAVE_SHM_OPEN') and
sysconfig.get_config_var('HAVE_SHM_UNLINK')):
posixshmem_srcs = [ '_multiprocessing/posixshmem.c',
]
libs = []
if sysconfig.get_config_var('SHM_NEEDS_LIBRT'):
# need to link with librt to get shm_open()
libs.append('rt')
exts.append( Extension('_posixshmem', posixshmem_srcs,
define_macros={},
libraries=libs,
include_dirs=["Modules/_multiprocessing"]))
exts.append ( Extension('_multiprocessing', multiprocessing_srcs,
define_macros=list(macros.items()),
include_dirs=["Modules/_multiprocessing"]))
# End multiprocessing
# Platform-specific libraries
if host_platform.startswith(('linux', 'freebsd', 'gnukfreebsd')):
exts.append( Extension('ossaudiodev', ['ossaudiodev.c']) )
else:
missing.append('ossaudiodev')
if host_platform == 'darwin':
exts.append(
Extension('_scproxy', ['_scproxy.c'],
extra_link_args=[
'-framework', 'SystemConfiguration',
'-framework', 'CoreFoundation',
]))
self.extensions.extend(exts)
# Call the method for detecting whether _tkinter can be compiled
self.detect_tkinter(inc_dirs, lib_dirs)
if '_tkinter' not in [e.name for e in self.extensions]:
missing.append('_tkinter')
# Build the _uuid module if possible
uuid_incs = find_file("uuid.h", inc_dirs, ["/usr/include/uuid"])
if uuid_incs is not None:
if self.compiler.find_library_file(lib_dirs, 'uuid'):
uuid_libs = ['uuid']
else:
uuid_libs = []
self.extensions.append(Extension('_uuid', ['_uuidmodule.c'],
libraries=uuid_libs,
include_dirs=uuid_incs))
else:
missing.append('_uuid')
## # Uncomment these lines if you want to play with xxmodule.c
## ext = Extension('xx', ['xxmodule.c'])
## self.extensions.append(ext)
if 'd' not in sysconfig.get_config_var('ABIFLAGS'):
ext = Extension('xxlimited', ['xxlimited.c'],
define_macros=[('Py_LIMITED_API', '0x03050000')])
self.extensions.append(ext)
return missing
def detect_tkinter_explicitly(self):
# Build _tkinter using explicit locations for Tcl/Tk.
#
# This is enabled when both arguments are given to ./configure:
#
# --with-tcltk-includes="-I/path/to/tclincludes \
# -I/path/to/tkincludes"
# --with-tcltk-libs="-L/path/to/tcllibs -ltclm.n \
# -L/path/to/tklibs -ltkm.n"
#
# These values can also be specified or overridden via make:
# make TCLTK_INCLUDES="..." TCLTK_LIBS="..."
#
# This can be useful for building and testing tkinter with multiple
# versions of Tcl/Tk. Note that a build of Tk depends on a particular
# build of Tcl so you need to specify both arguments and use care when
# overriding.
# The _TCLTK variables are created in the Makefile sharedmods target.
tcltk_includes = os.environ.get('_TCLTK_INCLUDES')
tcltk_libs = os.environ.get('_TCLTK_LIBS')
if not (tcltk_includes and tcltk_libs):
# Resume default configuration search.
return 0
extra_compile_args = tcltk_includes.split()
extra_link_args = tcltk_libs.split()
ext = Extension('_tkinter', ['_tkinter.c', 'tkappinit.c'],
define_macros=[('WITH_APPINIT', 1)],
extra_compile_args = extra_compile_args,
extra_link_args = extra_link_args,
)
self.extensions.append(ext)
return 1
def detect_tkinter_darwin(self, inc_dirs, lib_dirs):
# The _tkinter module, using frameworks. Since frameworks are quite
# different the UNIX search logic is not sharable.
from os.path import join, exists
framework_dirs = [
'/Library/Frameworks',
'/System/Library/Frameworks/',
join(os.getenv('HOME'), '/Library/Frameworks')
]
sysroot = macosx_sdk_root()
# Find the directory that contains the Tcl.framework and Tk.framework
# bundles.
# XXX distutils should support -F!
for F in framework_dirs:
# both Tcl.framework and Tk.framework should be present
for fw in 'Tcl', 'Tk':
if is_macosx_sdk_path(F):
if not exists(join(sysroot, F[1:], fw + '.framework')):
break
else:
if not exists(join(F, fw + '.framework')):
break
else:
# ok, F is now directory with both frameworks. Continure
# building
break
else:
# Tk and Tcl frameworks not found. Normal "unix" tkinter search
# will now resume.
return 0
# For 8.4a2, we must add -I options that point inside the Tcl and Tk
# frameworks. In later release we should hopefully be able to pass
# the -F option to gcc, which specifies a framework lookup path.
#
include_dirs = [
join(F, fw + '.framework', H)
for fw in ('Tcl', 'Tk')
for H in ('Headers', 'Versions/Current/PrivateHeaders')
]
# For 8.4a2, the X11 headers are not included. Rather than include a
# complicated search, this is a hard-coded path. It could bail out
# if X11 libs are not found...
include_dirs.append('/usr/X11R6/include')
frameworks = ['-framework', 'Tcl', '-framework', 'Tk']
# All existing framework builds of Tcl/Tk don't support 64-bit
# architectures.
cflags = sysconfig.get_config_vars('CFLAGS')[0]
archs = re.findall(r'-arch\s+(\w+)', cflags)
tmpfile = os.path.join(self.build_temp, 'tk.arch')
if not os.path.exists(self.build_temp):
os.makedirs(self.build_temp)
# Note: cannot use os.popen or subprocess here, that
# requires extensions that are not available here.
if is_macosx_sdk_path(F):
os.system("file %s/Tk.framework/Tk | grep 'for architecture' > %s"%(os.path.join(sysroot, F[1:]), tmpfile))
else:
os.system("file %s/Tk.framework/Tk | grep 'for architecture' > %s"%(F, tmpfile))
with open(tmpfile) as fp:
detected_archs = []
for ln in fp:
a = ln.split()[-1]
if a in archs:
detected_archs.append(ln.split()[-1])
os.unlink(tmpfile)
for a in detected_archs:
frameworks.append('-arch')
frameworks.append(a)
ext = Extension('_tkinter', ['_tkinter.c', 'tkappinit.c'],
define_macros=[('WITH_APPINIT', 1)],
include_dirs = include_dirs,
libraries = [],
extra_compile_args = frameworks[2:],
extra_link_args = frameworks,
)
self.extensions.append(ext)
return 1
def detect_tkinter(self, inc_dirs, lib_dirs):
# The _tkinter module.
# Check whether --with-tcltk-includes and --with-tcltk-libs were
# configured or passed into the make target. If so, use these values
# to build tkinter and bypass the searches for Tcl and TK in standard
# locations.
if self.detect_tkinter_explicitly():
return
# Rather than complicate the code below, detecting and building
# AquaTk is a separate method. Only one Tkinter will be built on
# Darwin - either AquaTk, if it is found, or X11 based Tk.
if (host_platform == 'darwin' and
self.detect_tkinter_darwin(inc_dirs, lib_dirs)):
return
# Assume we haven't found any of the libraries or include files
# The versions with dots are used on Unix, and the versions without
# dots on Windows, for detection by cygwin.
tcllib = tklib = tcl_includes = tk_includes = None
for version in ['8.6', '86', '8.5', '85', '8.4', '84', '8.3', '83',
'8.2', '82', '8.1', '81', '8.0', '80']:
tklib = self.compiler.find_library_file(lib_dirs,
'tk' + version)
tcllib = self.compiler.find_library_file(lib_dirs,
'tcl' + version)
if tklib and tcllib:
# Exit the loop when we've found the Tcl/Tk libraries
break
# Now check for the header files
if tklib and tcllib:
# Check for the include files on Debian and {Free,Open}BSD, where
# they're put in /usr/include/{tcl,tk}X.Y
dotversion = version
if '.' not in dotversion and "bsd" in host_platform.lower():
# OpenBSD and FreeBSD use Tcl/Tk library names like libtcl83.a,
# but the include subdirs are named like .../include/tcl8.3.
dotversion = dotversion[:-1] + '.' + dotversion[-1]
tcl_include_sub = []
tk_include_sub = []
for dir in inc_dirs:
tcl_include_sub += [dir + os.sep + "tcl" + dotversion]
tk_include_sub += [dir + os.sep + "tk" + dotversion]
tk_include_sub += tcl_include_sub
tcl_includes = find_file('tcl.h', inc_dirs, tcl_include_sub)
tk_includes = find_file('tk.h', inc_dirs, tk_include_sub)
if (tcllib is None or tklib is None or
tcl_includes is None or tk_includes is None):
self.announce("INFO: Can't locate Tcl/Tk libs and/or headers", 2)
return
# OK... everything seems to be present for Tcl/Tk.
include_dirs = [] ; libs = [] ; defs = [] ; added_lib_dirs = []
for dir in tcl_includes + tk_includes:
if dir not in include_dirs:
include_dirs.append(dir)
# Check for various platform-specific directories
if host_platform == 'sunos5':
include_dirs.append('/usr/openwin/include')
added_lib_dirs.append('/usr/openwin/lib')
elif os.path.exists('/usr/X11R6/include'):
include_dirs.append('/usr/X11R6/include')
added_lib_dirs.append('/usr/X11R6/lib64')
added_lib_dirs.append('/usr/X11R6/lib')
elif os.path.exists('/usr/X11R5/include'):
include_dirs.append('/usr/X11R5/include')
added_lib_dirs.append('/usr/X11R5/lib')
else:
# Assume default location for X11
include_dirs.append('/usr/X11/include')
added_lib_dirs.append('/usr/X11/lib')
# If Cygwin, then verify that X is installed before proceeding
if host_platform == 'cygwin':
x11_inc = find_file('X11/Xlib.h', [], include_dirs)
if x11_inc is None:
return
# Check for BLT extension
if self.compiler.find_library_file(lib_dirs + added_lib_dirs,
'BLT8.0'):
defs.append( ('WITH_BLT', 1) )
libs.append('BLT8.0')
elif self.compiler.find_library_file(lib_dirs + added_lib_dirs,
'BLT'):
defs.append( ('WITH_BLT', 1) )
libs.append('BLT')
# Add the Tcl/Tk libraries
libs.append('tk'+ version)
libs.append('tcl'+ version)
if host_platform in ['aix3', 'aix4']:
libs.append('ld')
# Finally, link with the X11 libraries (not appropriate on cygwin)
if host_platform != "cygwin":
libs.append('X11')
ext = Extension('_tkinter', ['_tkinter.c', 'tkappinit.c'],
define_macros=[('WITH_APPINIT', 1)] + defs,
include_dirs = include_dirs,
libraries = libs,
library_dirs = added_lib_dirs,
)
self.extensions.append(ext)
# XXX handle these, but how to detect?
# *** Uncomment and edit for PIL (TkImaging) extension only:
# -DWITH_PIL -I../Extensions/Imaging/libImaging tkImaging.c \
# *** Uncomment and edit for TOGL extension only:
# -DWITH_TOGL togl.c \
# *** Uncomment these for TOGL extension only:
# -lGL -lGLU -lXext -lXmu \
def configure_ctypes_darwin(self, ext):
# Darwin (OS X) uses preconfigured files, in
# the Modules/_ctypes/libffi_osx directory.
srcdir = sysconfig.get_config_var('srcdir')
ffi_srcdir = os.path.abspath(os.path.join(srcdir, 'Modules',
'_ctypes', 'libffi_osx'))
sources = [os.path.join(ffi_srcdir, p)
for p in ['ffi.c',
'x86/darwin64.S',
'x86/x86-darwin.S',
'x86/x86-ffi_darwin.c',
'x86/x86-ffi64.c',
'powerpc/ppc-darwin.S',
'powerpc/ppc-darwin_closure.S',
'powerpc/ppc-ffi_darwin.c',
'powerpc/ppc64-darwin_closure.S',
]]
# Add .S (preprocessed assembly) to C compiler source extensions.
self.compiler.src_extensions.append('.S')
include_dirs = [os.path.join(ffi_srcdir, 'include'),
os.path.join(ffi_srcdir, 'powerpc')]
ext.include_dirs.extend(include_dirs)
ext.sources.extend(sources)
return True
def configure_ctypes(self, ext):
if not self.use_system_libffi:
if host_platform == 'darwin':
return self.configure_ctypes_darwin(ext)
print('INFO: Could not locate ffi libs and/or headers')
return False
return True
def detect_ctypes(self, inc_dirs, lib_dirs):
self.use_system_libffi = False
include_dirs = []
extra_compile_args = []
extra_link_args = []
sources = ['_ctypes/_ctypes.c',
'_ctypes/callbacks.c',
'_ctypes/callproc.c',
'_ctypes/stgdict.c',
'_ctypes/cfield.c']
depends = ['_ctypes/ctypes.h']
if host_platform == 'darwin':
sources.append('_ctypes/malloc_closure.c')
sources.append('_ctypes/darwin/dlfcn_simple.c')
extra_compile_args.append('-DMACOSX')
include_dirs.append('_ctypes/darwin')
# XXX Is this still needed?
## extra_link_args.extend(['-read_only_relocs', 'warning'])
elif host_platform == 'sunos5':
# XXX This shouldn't be necessary; it appears that some
# of the assembler code is non-PIC (i.e. it has relocations
# when it shouldn't. The proper fix would be to rewrite
# the assembler code to be PIC.
# This only works with GCC; the Sun compiler likely refuses
# this option. If you want to compile ctypes with the Sun
# compiler, please research a proper solution, instead of
# finding some -z option for the Sun compiler.
extra_link_args.append('-mimpure-text')
elif host_platform.startswith('hp-ux'):
extra_link_args.append('-fPIC')
ext = Extension('_ctypes',
include_dirs=include_dirs,
extra_compile_args=extra_compile_args,
extra_link_args=extra_link_args,
libraries=[],
sources=sources,
depends=depends)
# function my_sqrt() needs libm for sqrt()
ext_test = Extension('_ctypes_test',
sources=['_ctypes/_ctypes_test.c'],
libraries=['m'])
self.extensions.extend([ext, ext_test])
if host_platform == 'darwin':
if '--with-system-ffi' not in sysconfig.get_config_var("CONFIG_ARGS"):
return
# OS X 10.5 comes with libffi.dylib; the include files are
# in /usr/include/ffi
inc_dirs.append('/usr/include/ffi')
ffi_inc = [sysconfig.get_config_var("LIBFFI_INCLUDEDIR")]
if not ffi_inc or ffi_inc[0] == '':
ffi_inc = find_file('ffi.h', [], inc_dirs)
if ffi_inc is not None:
ffi_h = ffi_inc[0] + '/ffi.h'
if not os.path.exists(ffi_h):
ffi_inc = None
print('Header file {} does not exist'.format(ffi_h))
ffi_lib = None
if ffi_inc is not None:
for lib_name in ('ffi', 'ffi_pic'):
if (self.compiler.find_library_file(lib_dirs, lib_name)):
ffi_lib = lib_name
break
if ffi_inc and ffi_lib:
ext.include_dirs.extend(ffi_inc)
ext.libraries.append(ffi_lib)
self.use_system_libffi = True
if sysconfig.get_config_var('HAVE_LIBDL'):
# for dlopen, see bpo-32647
ext.libraries.append('dl')
def _decimal_ext(self):
extra_compile_args = []
undef_macros = []
if '--with-system-libmpdec' in sysconfig.get_config_var("CONFIG_ARGS"):
include_dirs = []
libraries = [':libmpdec.so.2']
sources = ['_decimal/_decimal.c']
depends = ['_decimal/docstrings.h']
else:
srcdir = sysconfig.get_config_var('srcdir')
include_dirs = [os.path.abspath(os.path.join(srcdir,
'Modules',
'_decimal',
'libmpdec'))]
libraries = ['m']
sources = [
'_decimal/_decimal.c',
'_decimal/libmpdec/basearith.c',
'_decimal/libmpdec/constants.c',
'_decimal/libmpdec/context.c',
'_decimal/libmpdec/convolute.c',
'_decimal/libmpdec/crt.c',
'_decimal/libmpdec/difradix2.c',
'_decimal/libmpdec/fnt.c',
'_decimal/libmpdec/fourstep.c',
'_decimal/libmpdec/io.c',
'_decimal/libmpdec/memory.c',
'_decimal/libmpdec/mpdecimal.c',
'_decimal/libmpdec/numbertheory.c',
'_decimal/libmpdec/sixstep.c',
'_decimal/libmpdec/transpose.c',
]
depends = [
'_decimal/docstrings.h',
'_decimal/libmpdec/basearith.h',
'_decimal/libmpdec/bits.h',
'_decimal/libmpdec/constants.h',
'_decimal/libmpdec/convolute.h',
'_decimal/libmpdec/crt.h',
'_decimal/libmpdec/difradix2.h',
'_decimal/libmpdec/fnt.h',
'_decimal/libmpdec/fourstep.h',
'_decimal/libmpdec/io.h',
'_decimal/libmpdec/mpalloc.h',
'_decimal/libmpdec/mpdecimal.h',
'_decimal/libmpdec/numbertheory.h',
'_decimal/libmpdec/sixstep.h',
'_decimal/libmpdec/transpose.h',
'_decimal/libmpdec/typearith.h',
'_decimal/libmpdec/umodarith.h',
]
config = {
'x64': [('CONFIG_64','1'), ('ASM','1')],
'uint128': [('CONFIG_64','1'), ('ANSI','1'), ('HAVE_UINT128_T','1')],
'ansi64': [('CONFIG_64','1'), ('ANSI','1')],
'ppro': [('CONFIG_32','1'), ('PPRO','1'), ('ASM','1')],
'ansi32': [('CONFIG_32','1'), ('ANSI','1')],
'ansi-legacy': [('CONFIG_32','1'), ('ANSI','1'),
('LEGACY_COMPILER','1')],
'universal': [('UNIVERSAL','1')]
}
cc = sysconfig.get_config_var('CC')
sizeof_size_t = sysconfig.get_config_var('SIZEOF_SIZE_T')
machine = os.environ.get('PYTHON_DECIMAL_WITH_MACHINE')
if machine:
# Override automatic configuration to facilitate testing.
define_macros = config[machine]
elif host_platform == 'darwin':
# Universal here means: build with the same options Python
# was built with.
define_macros = config['universal']
elif sizeof_size_t == 8:
if sysconfig.get_config_var('HAVE_GCC_ASM_FOR_X64'):
define_macros = config['x64']
elif sysconfig.get_config_var('HAVE_GCC_UINT128_T'):
define_macros = config['uint128']
else:
define_macros = config['ansi64']
elif sizeof_size_t == 4:
ppro = sysconfig.get_config_var('HAVE_GCC_ASM_FOR_X87')
if ppro and ('gcc' in cc or 'clang' in cc) and \
not 'sunos' in host_platform:
# solaris: problems with register allocation.
# icc >= 11.0 works as well.
define_macros = config['ppro']
extra_compile_args.append('-Wno-unknown-pragmas')
else:
define_macros = config['ansi32']
else:
raise DistutilsError("_decimal: unsupported architecture")
# Workarounds for toolchain bugs:
if sysconfig.get_config_var('HAVE_IPA_PURE_CONST_BUG'):
# Some versions of gcc miscompile inline asm:
# http://gcc.gnu.org/bugzilla/show_bug.cgi?id=46491
# http://gcc.gnu.org/ml/gcc/2010-11/msg00366.html
extra_compile_args.append('-fno-ipa-pure-const')
if sysconfig.get_config_var('HAVE_GLIBC_MEMMOVE_BUG'):
# _FORTIFY_SOURCE wrappers for memmove and bcopy are incorrect:
# http://sourceware.org/ml/libc-alpha/2010-12/msg00009.html
undef_macros.append('_FORTIFY_SOURCE')
# Uncomment for extra functionality:
#define_macros.append(('EXTRA_FUNCTIONALITY', 1))
ext = Extension (
'_decimal',
include_dirs=include_dirs,
libraries=libraries,
define_macros=define_macros,
undef_macros=undef_macros,
extra_compile_args=extra_compile_args,
sources=sources,
depends=depends
)
return ext
def _detect_openssl(self, inc_dirs, lib_dirs):
config_vars = sysconfig.get_config_vars()
def split_var(name, sep):
# poor man's shlex, the re module is not available yet.
value = config_vars.get(name)
if not value:
return ()
# This trick works because ax_check_openssl uses --libs-only-L,
# --libs-only-l, and --cflags-only-I.
value = ' ' + value
sep = ' ' + sep
return [v.strip() for v in value.split(sep) if v.strip()]
openssl_includes = split_var('OPENSSL_INCLUDES', '-I')
openssl_libdirs = split_var('OPENSSL_LDFLAGS', '-L')
openssl_libs = split_var('OPENSSL_LIBS', '-l')
if not openssl_libs:
# libssl and libcrypto not found
return None, None
# Find OpenSSL includes
ssl_incs = find_file(
'openssl/ssl.h', inc_dirs, openssl_includes
)
if ssl_incs is None:
return None, None
# OpenSSL 1.0.2 uses Kerberos for KRB5 ciphers
krb5_h = find_file(
'krb5.h', inc_dirs,
['/usr/kerberos/include']
)
if krb5_h:
ssl_incs.extend(krb5_h)
if config_vars.get("HAVE_X509_VERIFY_PARAM_SET1_HOST"):
ssl_ext = Extension(
'_ssl', ['_ssl.c'],
include_dirs=openssl_includes,
library_dirs=openssl_libdirs,
libraries=openssl_libs,
depends=['socketmodule.h']
)
else:
ssl_ext = None
hashlib_ext = Extension(
'_hashlib', ['_hashopenssl.c'],
depends=['hashlib.h'],
include_dirs=openssl_includes,
library_dirs=openssl_libdirs,
libraries=openssl_libs,
)
return ssl_ext, hashlib_ext
def _detect_nis(self, inc_dirs, lib_dirs):
if host_platform in {'win32', 'cygwin', 'qnx6'}:
return None
libs = []
library_dirs = []
includes_dirs = []
# bpo-32521: glibc has deprecated Sun RPC for some time. Fedora 28
# moved headers and libraries to libtirpc and libnsl. The headers
# are in tircp and nsl sub directories.
rpcsvc_inc = find_file(
'rpcsvc/yp_prot.h', inc_dirs,
[os.path.join(inc_dir, 'nsl') for inc_dir in inc_dirs]
)
rpc_inc = find_file(
'rpc/rpc.h', inc_dirs,
[os.path.join(inc_dir, 'tirpc') for inc_dir in inc_dirs]
)
if rpcsvc_inc is None or rpc_inc is None:
# not found
return None
includes_dirs.extend(rpcsvc_inc)
includes_dirs.extend(rpc_inc)
if self.compiler.find_library_file(lib_dirs, 'nsl'):
libs.append('nsl')
else:
# libnsl-devel: check for libnsl in nsl/ subdirectory
nsl_dirs = [os.path.join(lib_dir, 'nsl') for lib_dir in lib_dirs]
libnsl = self.compiler.find_library_file(nsl_dirs, 'nsl')
if libnsl is not None:
library_dirs.append(os.path.dirname(libnsl))
libs.append('nsl')
if self.compiler.find_library_file(lib_dirs, 'tirpc'):
libs.append('tirpc')
return Extension(
'nis', ['nismodule.c'],
libraries=libs,
library_dirs=library_dirs,
include_dirs=includes_dirs
)
class PyBuildInstall(install):
# Suppress the warning about installation into the lib_dynload
# directory, which is not in sys.path when running Python during
# installation:
def initialize_options (self):
install.initialize_options(self)
self.warn_dir=0
# Customize subcommands to not install an egg-info file for Python
sub_commands = [('install_lib', install.has_lib),
('install_headers', install.has_headers),
('install_scripts', install.has_scripts),
('install_data', install.has_data)]
class PyBuildInstallLib(install_lib):
# Do exactly what install_lib does but make sure correct access modes get
# set on installed directories and files. All installed files with get
# mode 644 unless they are a shared library in which case they will get
# mode 755. All installed directories will get mode 755.
# this is works for EXT_SUFFIX too, which ends with SHLIB_SUFFIX
shlib_suffix = sysconfig.get_config_var("SHLIB_SUFFIX")
def install(self):
outfiles = install_lib.install(self)
self.set_file_modes(outfiles, 0o644, 0o755)
self.set_dir_modes(self.install_dir, 0o755)
return outfiles
def set_file_modes(self, files, defaultMode, sharedLibMode):
if not self.is_chmod_supported(): return
if not files: return
for filename in files:
if os.path.islink(filename): continue
mode = defaultMode
if filename.endswith(self.shlib_suffix): mode = sharedLibMode
log.info("changing mode of %s to %o", filename, mode)
if not self.dry_run: os.chmod(filename, mode)
def set_dir_modes(self, dirname, mode):
if not self.is_chmod_supported(): return
for dirpath, dirnames, fnames in os.walk(dirname):
if os.path.islink(dirpath):
continue
log.info("changing mode of %s to %o", dirpath, mode)
if not self.dry_run: os.chmod(dirpath, mode)
def is_chmod_supported(self):
return hasattr(os, 'chmod')
class PyBuildScripts(build_scripts):
def copy_scripts(self):
outfiles, updated_files = build_scripts.copy_scripts(self)
fullversion = '-{0[0]}.{0[1]}'.format(sys.version_info)
minoronly = '.{0[1]}'.format(sys.version_info)
newoutfiles = []
newupdated_files = []
for filename in outfiles:
if filename.endswith('2to3'):
newfilename = filename + fullversion
else:
newfilename = filename + minoronly
log.info('renaming %s to %s', filename, newfilename)
os.rename(filename, newfilename)
newoutfiles.append(newfilename)
if filename in updated_files:
newupdated_files.append(newfilename)
return newoutfiles, newupdated_files
SUMMARY = """
Python is an interpreted, interactive, object-oriented programming
language. It is often compared to Tcl, Perl, Scheme or Java.
Python combines remarkable power with very clear syntax. It has
modules, classes, exceptions, very high level dynamic data types, and
dynamic typing. There are interfaces to many system calls and
libraries, as well as to various windowing systems (X11, Motif, Tk,
Mac, MFC). New built-in modules are easily written in C or C++. Python
is also usable as an extension language for applications that need a
programmable interface.
The Python implementation is portable: it runs on many brands of UNIX,
on Windows, DOS, Mac, Amiga... If your favorite system isn't
listed here, it may still be supported, if there's a C compiler for
it. Ask around on comp.lang.python -- or just try compiling Python
yourself.
"""
CLASSIFIERS = """
Development Status :: 6 - Mature
License :: OSI Approved :: Python Software Foundation License
Natural Language :: English
Programming Language :: C
Programming Language :: Python
Topic :: Software Development
"""
def main():
# turn off warnings when deprecated modules are imported
import warnings
warnings.filterwarnings("ignore",category=DeprecationWarning)
setup(# PyPI Metadata (PEP 301)
name = "Python",
version = sys.version.split()[0],
url = "http://www.python.org/%d.%d" % sys.version_info[:2],
maintainer = "Guido van Rossum and the Python community",
maintainer_email = "python-dev@python.org",
description = "A high-level object-oriented programming language",
long_description = SUMMARY.strip(),
license = "PSF license",
classifiers = [x for x in CLASSIFIERS.split("\n") if x],
platforms = ["Many"],
# Build info
cmdclass = {'build_ext': PyBuildExt,
'build_scripts': PyBuildScripts,
'install': PyBuildInstall,
'install_lib': PyBuildInstallLib},
# The struct module is defined here, because build_ext won't be
# called unless there's at least one extension module defined.
ext_modules=[Extension('_struct', ['_struct.c'])],
# If you change the scripts installed here, you also need to
# check the PyBuildScripts command above, and change the links
# created by the bininstall target in Makefile.pre.in
scripts = ["Tools/scripts/pydoc3", "Tools/scripts/idle3",
"Tools/scripts/2to3"]
)
# --install-platlib
if __name__ == '__main__':
main()
| 43.094435
| 119
| 0.541381
|
4a01a967a09607db3cca27aca17d8e879a1d719d
| 4,742
|
py
|
Python
|
sdk/python/pulumi_rancher2/cluster_sync.py
|
mitchellmaler/pulumi-rancher2
|
e6ca44b58b5b10c12a4e628e61aa8d98330f0863
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_rancher2/cluster_sync.py
|
mitchellmaler/pulumi-rancher2
|
e6ca44b58b5b10c12a4e628e61aa8d98330f0863
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_rancher2/cluster_sync.py
|
mitchellmaler/pulumi-rancher2
|
e6ca44b58b5b10c12a4e628e61aa8d98330f0863
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import json
import warnings
import pulumi
import pulumi.runtime
from typing import Union
from . import utilities, tables
class ClusterSync(pulumi.CustomResource):
cluster_id: pulumi.Output[str]
"""
The cluster ID that is syncing (string)
"""
default_project_id: pulumi.Output[str]
"""
(Computed) Default project ID for the cluster sync (string)
"""
kube_config: pulumi.Output[str]
"""
(Computed) Kube Config generated for the cluster sync (string)
"""
node_pool_ids: pulumi.Output[list]
"""
The node pool IDs used by the cluster id (list)
"""
synced: pulumi.Output[bool]
system_project_id: pulumi.Output[str]
"""
(Computed) System project ID for the cluster sync (string)
"""
def __init__(__self__, resource_name, opts=None, cluster_id=None, node_pool_ids=None, synced=None, __props__=None, __name__=None, __opts__=None):
"""
Create a ClusterSync resource with the given unique name, props, and options.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] cluster_id: The cluster ID that is syncing (string)
:param pulumi.Input[list] node_pool_ids: The node pool IDs used by the cluster id (list)
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
if cluster_id is None:
raise TypeError("Missing required property 'cluster_id'")
__props__['cluster_id'] = cluster_id
__props__['node_pool_ids'] = node_pool_ids
__props__['synced'] = synced
__props__['default_project_id'] = None
__props__['kube_config'] = None
__props__['system_project_id'] = None
super(ClusterSync, __self__).__init__(
'rancher2:index/clusterSync:ClusterSync',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name, id, opts=None, cluster_id=None, default_project_id=None, kube_config=None, node_pool_ids=None, synced=None, system_project_id=None):
"""
Get an existing ClusterSync resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param str id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] cluster_id: The cluster ID that is syncing (string)
:param pulumi.Input[str] default_project_id: (Computed) Default project ID for the cluster sync (string)
:param pulumi.Input[str] kube_config: (Computed) Kube Config generated for the cluster sync (string)
:param pulumi.Input[list] node_pool_ids: The node pool IDs used by the cluster id (list)
:param pulumi.Input[str] system_project_id: (Computed) System project ID for the cluster sync (string)
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["cluster_id"] = cluster_id
__props__["default_project_id"] = default_project_id
__props__["kube_config"] = kube_config
__props__["node_pool_ids"] = node_pool_ids
__props__["synced"] = synced
__props__["system_project_id"] = system_project_id
return ClusterSync(resource_name, opts=opts, __props__=__props__)
def translate_output_property(self, prop):
return tables._CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return tables._SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| 45.161905
| 159
| 0.674399
|
4a01aa76dda85489cdc72ed3064f50a91139321b
| 1,154
|
py
|
Python
|
mastering-flask/migrations/versions/43297e7634a2_add_roles_and_roles_users.py
|
zhchnchn/flask-repo
|
51aad8c9e80112d53a6455221bc94cc9a523e356
|
[
"Apache-2.0"
] | null | null | null |
mastering-flask/migrations/versions/43297e7634a2_add_roles_and_roles_users.py
|
zhchnchn/flask-repo
|
51aad8c9e80112d53a6455221bc94cc9a523e356
|
[
"Apache-2.0"
] | null | null | null |
mastering-flask/migrations/versions/43297e7634a2_add_roles_and_roles_users.py
|
zhchnchn/flask-repo
|
51aad8c9e80112d53a6455221bc94cc9a523e356
|
[
"Apache-2.0"
] | null | null | null |
"""add roles and roles_users
Revision ID: 43297e7634a2
Revises: 17aa4630e008
Create Date: 2018-01-11 17:44:01.234594
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '43297e7634a2'
down_revision = '17aa4630e008'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('roles',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=80), nullable=True),
sa.Column('description', sa.String(length=255), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('name')
)
op.create_table('roles_users',
sa.Column('role_id', sa.Integer(), nullable=True),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['role_id'], ['roles.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], )
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('roles_users')
op.drop_table('roles')
# ### end Alembic commands ###
| 27.47619
| 67
| 0.672444
|
4a01aade2b27280756c85a58b347a5dff3da4def
| 512
|
py
|
Python
|
urls.py
|
sideeffects/stats_core
|
333c3111bef466541d754c962db9817769b260cd
|
[
"MIT"
] | 1
|
2021-02-09T18:09:30.000Z
|
2021-02-09T18:09:30.000Z
|
urls.py
|
sideeffects/stats_core
|
333c3111bef466541d754c962db9817769b260cd
|
[
"MIT"
] | null | null | null |
urls.py
|
sideeffects/stats_core
|
333c3111bef466541d754c962db9817769b260cd
|
[
"MIT"
] | 1
|
2021-08-09T03:34:06.000Z
|
2021-08-09T03:34:06.000Z
|
try:
from django.conf.urls import patterns, include, url
except ImportError:
from django.conf.urls.defaults import patterns, include, url
# To use admin
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
# Enable admin documentation:
url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Enable the admin:
url(r'^admin/', include(admin.site.urls)),
(r'', include('stats_main.urls')),
)
handler500 = 'stats_main.views.custom_500'
| 24.380952
| 66
| 0.693359
|
4a01ac03099abb7589739eaad18c62385d0f1d10
| 11,766
|
py
|
Python
|
snuba/query/expressions.py
|
fpacifici/snuba
|
cf732b71383c948f9387fbe64e9404ca71f8e9c5
|
[
"Apache-2.0"
] | null | null | null |
snuba/query/expressions.py
|
fpacifici/snuba
|
cf732b71383c948f9387fbe64e9404ca71f8e9c5
|
[
"Apache-2.0"
] | null | null | null |
snuba/query/expressions.py
|
fpacifici/snuba
|
cf732b71383c948f9387fbe64e9404ca71f8e9c5
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import annotations
from abc import ABC, abstractmethod
from dataclasses import dataclass, replace
from datetime import date, datetime
from typing import (
Callable,
Generic,
Iterator,
Optional,
TypeVar,
Tuple,
Union,
)
TVisited = TypeVar("TVisited")
@dataclass(frozen=True)
class Expression(ABC):
"""
A node in the Query AST. This can be a leaf or an intermediate node.
It represents an expression that can be resolved to a value. This
includes column names, function calls and boolean conditions (which are
function calls themselves in the AST), literals, etc.
All expressions can have an optional alias.
"""
# TODO: Make it impossible to assign empty string as an alias.
alias: Optional[str]
@abstractmethod
def transform(self, func: Callable[[Expression], Expression]) -> Expression:
"""
Transforms this expression through the function passed in input.
This works almost like a map function over sequences though, contrarily to
sequences, this acts on a subtree. The semantics of transform can be different
between intermediate nodes and leaves, so each node class can implement it
its own way.
All expressions are frozen dataclasses. This means they are immutable and
format will either return self or a new instance. It cannot transform the
expression in place.
"""
raise NotImplementedError
@abstractmethod
def __iter__(self) -> Iterator[Expression]:
"""
Used to iterate over this expression and its children. The exact
semantics depends on the structure of the expression.
See the implementations for more details.
"""
raise NotImplementedError
@abstractmethod
def accept(self, visitor: ExpressionVisitor[TVisited]) -> TVisited:
"""
Accepts a visitor class to traverse the tree. The only role of this method is to
call the right visit method on the visitor object. Requiring the implementation
to call the method with the right type forces us to keep the visitor interface
up to date every time we create a new subclass of Expression.
"""
raise NotImplementedError
class ExpressionVisitor(ABC, Generic[TVisited]):
"""
Implementation of a Visitor pattern to simplify traversal of the AST while preserving
the structure and delegating the control of the traversal algorithm to the client.
This pattern is generally used for evaluation or formatting. While the iteration
defined above is for stateless use cases where the order of the nodes is not important.
The original Visitor pattern does not foresee a return type for visit and accept
methods, instead it relies on having the Visitor class stateful (any side effect a visit method
could produce has to make changes to the state of the visitor object). This implementation
allows the Visitor to define a return type which is generic.
"""
@abstractmethod
def visit_literal(self, exp: Literal) -> TVisited:
raise NotImplementedError
@abstractmethod
def visit_column(self, exp: Column) -> TVisited:
raise NotImplementedError
@abstractmethod
def visit_subscriptable_reference(self, exp: SubscriptableReference) -> TVisited:
raise NotImplementedError
@abstractmethod
def visit_function_call(self, exp: FunctionCall) -> TVisited:
raise NotImplementedError
@abstractmethod
def visit_curried_function_call(self, exp: CurriedFunctionCall) -> TVisited:
raise NotImplementedError
@abstractmethod
def visit_argument(self, exp: Argument) -> TVisited:
raise NotImplementedError
@abstractmethod
def visit_lambda(self, exp: Lambda) -> TVisited:
raise NotImplementedError
OptionalScalarType = Union[None, bool, str, float, int, date, datetime]
@dataclass(frozen=True)
class Literal(Expression):
"""
A literal in the SQL expression
"""
value: OptionalScalarType
def transform(self, func: Callable[[Expression], Expression]) -> Expression:
return func(self)
def __iter__(self) -> Iterator[Expression]:
yield self
def accept(self, visitor: ExpressionVisitor[TVisited]) -> TVisited:
return visitor.visit_literal(self)
@dataclass(frozen=True)
class Column(Expression):
"""
Represent a column in the schema of the dataset.
"""
table_name: Optional[str]
column_name: str
def transform(self, func: Callable[[Expression], Expression]) -> Expression:
return func(self)
def __iter__(self) -> Iterator[Expression]:
yield self
def accept(self, visitor: ExpressionVisitor[TVisited]) -> TVisited:
return visitor.visit_column(self)
@dataclass(frozen=True)
class SubscriptableReference(Expression):
"""
Accesses one entry of a subscriptable column (for example key based access on
a mapping column like tags[key]).
The only subscriptable column we support now in the query language is a key-value
mapping, the key is required to be a literal (not any expression) and the subscriptable
column cannot be the result of an expression itself (func(asd)[key] is not allowed).
These constraints could be relaxed should we decided to support them in the query language.
"""
column: Column
key: Literal
def accept(self, visitor: ExpressionVisitor[TVisited]) -> TVisited:
return visitor.visit_subscriptable_reference(self)
def transform(self, func: Callable[[Expression], Expression]) -> Expression:
transformed = replace(
self, column=self.column.transform(func), key=self.key.transform(func),
)
return func(transformed)
def __iter__(self) -> Iterator[Expression]:
# Since column is a column and key is a literal and since none of
# them is a composite expression we would achieve the same result by yielding
# directly the column and the key instead of iterating over them.
# We iterate over them so that this would work correctly independently from
# any future changes on their __iter__ methods as long as they remain Expressions.
for sub in self.column:
yield sub
for sub in self.key:
yield sub
yield self
@dataclass(frozen=True)
class FunctionCall(Expression):
"""
Represents an expression that resolves to a function call on Clickhouse.
This class also represent conditions. Since Clickhouse supports both the conventional
infix notation for condition and the functional one, we converge into one
representation only in the AST to make query processing easier.
A query processor would not have to care of processing both functional conditions
and infix conditions.
"""
function_name: str
# This is a tuple with variable size and not a Sequence to enforce it is hashable
parameters: Tuple[Expression, ...]
def transform(self, func: Callable[[Expression], Expression]) -> Expression:
"""
Transforms the subtree starting from the children and then applying
the transformation function to the root.
This order is chosen to make the semantics of transform more meaningful,
the transform operation will be performed on the children first (think
about the parameters of a function call) and then to the node itself.
The consequence of this is that, if the transformation function replaces
the root with something else, with different children, we trust the
transformation function and we do not run that same function over the
new children.
"""
transformed = replace(
self,
parameters=tuple(map(lambda child: child.transform(func), self.parameters)),
)
return func(transformed)
def __iter__(self) -> Iterator[Expression]:
"""
Traverse the subtree in a postfix order.
The order here is arbitrary, postfix is chosen to follow the same
order we have in the transform method.
"""
for child in self.parameters:
for sub in child:
yield sub
yield self
def accept(self, visitor: ExpressionVisitor[TVisited]) -> TVisited:
return visitor.visit_function_call(self)
@dataclass(frozen=True)
class CurriedFunctionCall(Expression):
"""
This function call represent a function with currying: f(x)(y).
it means applying the function returned by f(x) to y.
Clickhouse has a few of these functions, like topK(5)(col).
We intentionally support only two groups of parameters to avoid an infinite
number of parameters groups recursively.
"""
# The function on left side of the expression.
# for topK this would be topK(5)
internal_function: FunctionCall
# The parameters to apply to the result of internal_function.
# This is a tuple with variable size and not a Sequence to enforce it is hashable
parameters: Tuple[Expression, ...]
def transform(self, func: Callable[[Expression], Expression]) -> Expression:
"""
Applies the transformation function to this expression following
the same policy of FunctionCall. The only difference is that this
one transforms the internal function before applying the function to the
parameters.
"""
transformed = replace(
self,
internal_function=self.internal_function.transform(func),
parameters=tuple(map(lambda child: child.transform(func), self.parameters)),
)
return func(transformed)
def __iter__(self) -> Iterator[Expression]:
"""
Traverse the subtree in a postfix order.
"""
for child in self.internal_function:
yield child
for child in self.parameters:
for sub in child:
yield sub
yield self
def accept(self, visitor: ExpressionVisitor[TVisited]) -> TVisited:
return visitor.visit_curried_function_call(self)
@dataclass(frozen=True)
class Argument(Expression):
"""
A bound variable in a lambda expression. This is used to refer to variables
declared in the lambda expression
"""
name: str
def transform(self, func: Callable[[Expression], Expression]) -> Expression:
return func(self)
def __iter__(self) -> Iterator[Expression]:
yield self
def accept(self, visitor: ExpressionVisitor[TVisited]) -> TVisited:
return visitor.visit_argument(self)
@dataclass(frozen=True)
class Lambda(Expression):
"""
A lambda expression in the form (x,y,z -> transform(x,y,z))
"""
# the parameters in the expressions. These are intentionally not expressions
# since they are variable names and cannot have aliases
# This is a tuple with variable size and not a Sequence to enforce it is hashable
parameters: Tuple[str, ...]
transformation: Expression
def transform(self, func: Callable[[Expression], Expression]) -> Expression:
"""
Applies the transformation to the inner expression but not to the parameters
declaration.
"""
transformed = replace(self, transformation=self.transformation.transform(func))
return func(transformed)
def __iter__(self) -> Iterator[Expression]:
"""
Traverse the subtree in a postfix order.
"""
for child in self.transformation:
yield child
yield self
def accept(self, visitor: ExpressionVisitor[TVisited]) -> TVisited:
return visitor.visit_lambda(self)
| 35.439759
| 99
| 0.689784
|
4a01ac2b5efc2cd4437d2d701a55553ad2157703
| 2,903
|
py
|
Python
|
lib/scripts/mouse.py
|
pacifio/dart-autogui
|
45e99958b0a21ddb93d26638cea2e348e2e14efa
|
[
"MIT"
] | 2
|
2021-09-11T05:43:45.000Z
|
2021-09-11T07:06:21.000Z
|
lib/scripts/mouse.py
|
pacifio/dart-autogui
|
45e99958b0a21ddb93d26638cea2e348e2e14efa
|
[
"MIT"
] | null | null | null |
lib/scripts/mouse.py
|
pacifio/dart-autogui
|
45e99958b0a21ddb93d26638cea2e348e2e14efa
|
[
"MIT"
] | null | null | null |
import json
import sys
import pyautogui as auto
def map_tween(tween):
if tween == "linear":
return auto.linear
elif tween == "ease-in":
return auto.easeInQuad
elif tween == "ease-out":
return auto.easeOutQuad
elif tween == "ease-in-out":
return auto.easeInOutQuad
elif tween == 'bounce':
return auto.easeInBounce
elif tween == "elastic":
return auto.easeInElastic
else:
return auto.linear
class Mouse:
@staticmethod
def get_mouse_pos() -> tuple:
x, y = auto.position()
data = {
'x': x,
'y': y
}
return json.dumps(data)
@staticmethod
def move_to(x: int = 0, y: int = 0, duration: int = 0, tween: str = "linear") -> None:
tween_func = map_tween(tween)
auto.moveTo(x=x, y=y, duration=duration, tween=tween_func)
@staticmethod
def move_rel(x: int = 0, y: int = 0, duration: int = 0) -> None:
auto.moveRel(xOffset=x, yOffset=y, duration=duration)
@staticmethod
def drag_to(x: int = 0, y: int = 0, duration: int = 0, tween: str = "linear", button: str = "left") -> None:
tween_func = map_tween(tween)
auto.dragTo(x, y, duration, tween_func, button)
@staticmethod
def drag_rel(x: int = 0, y: int = 0, duration: int = 0, tween: str = "linear", button: str = "left") -> None:
tween_func = map_tween(tween)
auto.dragTo(x, y, duration, tween_func, button)
@staticmethod
def click(x: int = 0, y: int = 0, clicks: int = 1, interval: int = 0, button: str = "left", duration: int = 0, tween: str = "linear"):
tween_func = map_tween(tween)
auto.click(x, y, clicks, interval, button, duration, tween_func)
@staticmethod
def default(): return json.dumps({})
if __name__ == "__main__":
arg = sys.argv
command = arg[1]
if command == "pos":
print(Mouse.get_mouse_pos())
elif command == "move_to":
x, y, dur, tween = arg[2], arg[3], arg[4], arg[5]
Mouse.move_to(int(x), int(y), int(dur), str(tween))
elif command == "move_rel":
x, y, dur = arg[2], arg[3], arg[4]
Mouse.move_rel(int(x), int(y), int(dur))
elif command == "drag_to":
x, y, dur, tween, button = arg[2], arg[3], arg[4], arg[5], arg[6]
Mouse.drag_to(int(x), int(y), int(dur), str(tween), str(button))
elif command == "drag_rel":
x, y, dur, tween, button = arg[2], arg[3], arg[4], arg[5], arg[6]
Mouse.drag_rel(int(x), int(y), int(dur), str(tween), str(button))
elif command == "click":
x, y, clicks, interval, button, duration, tween = arg[
2], arg[3], arg[4], arg[5], arg[6], arg[7], arg[8]
Mouse.click(int(x), int(y), int(clicks), int(interval),
str(button), int(duration), str(tween))
else:
print(Mouse.default())
| 32.988636
| 138
| 0.568722
|
4a01ac63094f5e0150887f9e201a35ff24119cf3
| 1,556
|
py
|
Python
|
universal/algos/rmr.py
|
mannmann2/universal-portfolios
|
7a9b563193353d93c9713761544da8750b0b06ab
|
[
"MIT"
] | 1
|
2022-01-06T14:47:02.000Z
|
2022-01-06T14:47:02.000Z
|
universal/algos/rmr.py
|
jmrichardson/universal-portfolios
|
f49455b01f74223707474047089f10fb5360da37
|
[
"MIT"
] | null | null | null |
universal/algos/rmr.py
|
jmrichardson/universal-portfolios
|
f49455b01f74223707474047089f10fb5360da37
|
[
"MIT"
] | null | null | null |
import numpy as np
import pandas as pd
from .. import tools
from ..algo import Algo
from .olmar import OLMAR
def norm(x):
if isinstance(x, pd.Series):
axis = 0
else:
axis = 1
return np.sqrt((x ** 2).sum(axis=axis))
class RMR(OLMAR):
"""Robust Median Reversion. Strategy exploiting mean-reversion by robust
L1-median estimator. Practically the same as OLMAR.
Reference:
Dingjiang Huang, Junlong Zhou, Bin Li, Steven C.H. Hoi, Shuigeng Zhou
Robust Median Reversion Strategy for On-Line Portfolio Selection, 2013.
http://ijcai.org/papers13/Papers/IJCAI13-296.pdf
"""
PRICE_TYPE = "raw"
REPLACE_MISSING = True
def __init__(self, window=5, eps=10.0, tau=0.001):
"""
:param window: Lookback window.
:param eps: Constraint on return for new weights on last price (average of prices).
x * w >= eps for new weights w.
:param tau: Precision for finding median. Recommended value is around 0.001. Strongly
affects algo speed.
"""
super(RMR, self).__init__(window, eps)
self.tau = tau
def predict(self, x, history):
"""find L1 median to historical prices"""
y = history.mean()
y_last = None
while y_last is None or norm(y - y_last) / norm(y_last) > self.tau:
y_last = y
d = norm(history - y)
y = history.div(d, axis=0).sum() / (1.0 / d).sum()
return y / x
if __name__ == "__main__":
tools.quickrun(RMR())
| 28.814815
| 93
| 0.602185
|
4a01ad7d8efb5e075b513c39a7131f93d2b371ad
| 12,490
|
py
|
Python
|
host/agent/agent.py
|
MSAdministrator/cuckoo-config
|
85f53164087bfcde79f0392b904903b0f5d00815
|
[
"MIT"
] | 4
|
2020-04-18T19:10:36.000Z
|
2021-09-03T09:07:27.000Z
|
host/agent/agent.py
|
MSAdministrator/cuckoo-config
|
85f53164087bfcde79f0392b904903b0f5d00815
|
[
"MIT"
] | null | null | null |
host/agent/agent.py
|
MSAdministrator/cuckoo-config
|
85f53164087bfcde79f0392b904903b0f5d00815
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# Copyright (C) 2015-2017 Cuckoo Foundation.
# This file is part of Cuckoo Sandbox - http://www.cuckoosandbox.org
# See the file 'docs/LICENSE' for copying permission.
import argparse
import cgi
import io
import json
import os
import platform
import re
import shutil
import stat
import subprocess
import sys
import tempfile
import traceback
import zipfile
import SimpleHTTPServer
import SocketServer
AGENT_VERSION = "0.8"
AGENT_FEATURES = [
"execpy", "pinning", "logs", "largefile", "unicodepath",
]
sys.stdout = io.BytesIO()
sys.stderr = io.BytesIO()
class MiniHTTPRequestHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
server_version = "Cuckoo Agent"
def do_GET(self):
request.client_ip, request.client_port = self.client_address
request.form = {}
request.files = {}
if "client_ip" not in state or request.client_ip == state["client_ip"]:
self.httpd.handle(self)
def do_POST(self):
environ = {
"REQUEST_METHOD": "POST",
"CONTENT_TYPE": self.headers.get("Content-Type"),
}
form = cgi.FieldStorage(fp=self.rfile,
headers=self.headers,
environ=environ)
request.form = {}
request.files = {}
# Another pretty fancy workaround. Since we provide backwards
# compatibility with the Old Agent we will get an xmlrpc request
# from the analyzer when the analysis has finished. Now xmlrpc being
# xmlrpc we're getting text/xml as content-type which cgi does not
# handle. This check detects when there is no available data rather
# than getting a hard exception trying to do so.
if form.list:
for key in form.keys():
value = form[key]
if value.filename:
request.files[key] = value.file
else:
request.form[key] = value.value.decode("utf8")
if "client_ip" not in state or request.client_ip == state["client_ip"]:
self.httpd.handle(self)
class MiniHTTPServer(object):
def __init__(self):
self.handler = MiniHTTPRequestHandler
# Reference back to the server.
self.handler.httpd = self
self.routes = {
"GET": [],
"POST": [],
}
def run(self, host="0.0.0.0", port=8000):
self.s = SocketServer.TCPServer((host, port), self.handler)
self.s.allow_reuse_address = True
self.s.serve_forever()
def route(self, path, methods=["GET"]):
def register(fn):
for method in methods:
self.routes[method].append((re.compile(path + "$"), fn))
return fn
return register
def handle(self, obj):
for route, fn in self.routes[obj.command]:
if route.match(obj.path):
ret = fn()
break
else:
ret = json_error(404, message="Route not found")
ret.init()
obj.send_response(ret.status_code)
ret.headers(obj)
obj.end_headers()
if isinstance(ret, jsonify):
obj.wfile.write(ret.json())
elif isinstance(ret, send_file):
ret.write(obj.wfile)
def shutdown(self):
# BaseServer also features a .shutdown() method, but you can't use
# that from the same thread as that will deadlock the whole thing.
self.s._BaseServer__shutdown_request = True
class jsonify(object):
"""Wrapper that represents Flask.jsonify functionality."""
def __init__(self, **kwargs):
self.status_code = 200
self.values = kwargs
def init(self):
pass
def json(self):
return json.dumps(self.values)
def headers(self, obj):
pass
class send_file(object):
"""Wrapper that represents Flask.send_file functionality."""
def __init__(self, path):
self.path = path
self.status_code = 200
def init(self):
if not os.path.isfile(self.path):
self.status_code = 404
self.length = 0
else:
self.length = os.path.getsize(self.path)
def write(self, sock):
if not self.length:
return
with open(self.path, "rb") as f:
while True:
buf = f.read(1024 * 1024)
if not buf:
break
sock.write(buf)
def headers(self, obj):
obj.send_header("Content-Length", self.length)
class request(object):
form = {}
files = {}
client_ip = None
client_port = None
environ = {
"werkzeug.server.shutdown": lambda: app.shutdown(),
}
app = MiniHTTPServer()
state = {}
def json_error(error_code, message):
r = jsonify(message=message, error_code=error_code)
r.status_code = error_code
return r
def json_exception(message):
r = jsonify(message=message, error_code=500,
traceback=traceback.format_exc())
r.status_code = 500
return r
def json_success(message, **kwargs):
return jsonify(message=message, **kwargs)
@app.route("/")
def get_index():
return json_success(
"Cuckoo Agent!", version=AGENT_VERSION, features=AGENT_FEATURES
)
@app.route("/status")
def get_status():
return json_success("Analysis status",
status=state.get("status"),
description=state.get("description"))
@app.route("/status", methods=["POST"])
def put_status():
if "status" not in request.form:
return json_error(400, "No status has been provided")
state["status"] = request.form["status"]
state["description"] = request.form.get("description")
return json_success("Analysis status updated")
@app.route("/logs")
def get_logs():
return json_success(
"Agent logs",
stdout=sys.stdout.getvalue(),
stderr=sys.stderr.getvalue()
)
@app.route("/system")
def get_system():
return json_success("System", system=platform.system())
@app.route("/environ")
def get_environ():
return json_success("Environment variables", environ=dict(os.environ))
@app.route("/path")
def get_path():
return json_success("Agent path", filepath=os.path.abspath(__file__))
@app.route("/mkdir", methods=["POST"])
def do_mkdir():
if "dirpath" not in request.form:
return json_error(400, "No dirpath has been provided")
mode = int(request.form.get("mode", 0777))
try:
os.makedirs(request.form["dirpath"], mode=mode)
except:
return json_exception("Error creating directory")
return json_success("Successfully created directory")
@app.route("/mktemp", methods=["GET", "POST"])
def do_mktemp():
suffix = request.form.get("suffix", "")
prefix = request.form.get("prefix", "tmp")
dirpath = request.form.get("dirpath")
try:
fd, filepath = tempfile.mkstemp(suffix=suffix, prefix=prefix, dir=dirpath)
except:
return json_exception("Error creating temporary file")
os.close(fd)
return json_success("Successfully created temporary file",
filepath=filepath)
@app.route("/mkdtemp", methods=["GET", "POST"])
def do_mkdtemp():
suffix = request.form.get("suffix", "")
prefix = request.form.get("prefix", "tmp")
dirpath = request.form.get("dirpath")
try:
dirpath = tempfile.mkdtemp(suffix=suffix, prefix=prefix, dir=dirpath)
except:
return json_exception("Error creating temporary directory")
return json_success("Successfully created temporary directory",
dirpath=dirpath)
@app.route("/store", methods=["POST"])
def do_store():
if "filepath" not in request.form:
return json_error(400, "No filepath has been provided")
if "file" not in request.files:
return json_error(400, "No file has been provided")
try:
with open(request.form["filepath"], "wb") as f:
shutil.copyfileobj(request.files["file"], f, 10*1024*1024)
except:
return json_exception("Error storing file")
return json_success("Successfully stored file")
@app.route("/retrieve", methods=["POST"])
def do_retrieve():
if "filepath" not in request.form:
return json_error(400, "No filepath has been provided")
return send_file(request.form["filepath"])
@app.route("/extract", methods=["POST"])
def do_extract():
if "dirpath" not in request.form:
return json_error(400, "No dirpath has been provided")
if "zipfile" not in request.files:
return json_error(400, "No zip file has been provided")
try:
with zipfile.ZipFile(request.files["zipfile"], "r") as archive:
archive.extractall(request.form["dirpath"])
except:
return json_exception("Error extracting zip file")
return json_success("Successfully extracted zip file")
@app.route("/remove", methods=["POST"])
def do_remove():
if "path" not in request.form:
return json_error(400, "No path has been provided")
try:
if os.path.isdir(request.form["path"]):
# Mark all files as readable so they can be deleted.
for dirpath, _, filenames in os.walk(request.form["path"]):
for filename in filenames:
os.chmod(os.path.join(dirpath, filename), stat.S_IWRITE)
shutil.rmtree(request.form["path"])
message = "Successfully deleted directory"
elif os.path.isfile(request.form["path"]):
os.chmod(request.form["path"], stat.S_IWRITE)
os.remove(request.form["path"])
message = "Successfully deleted file"
else:
return json_error(404, "Path provided does not exist")
except:
return json_exception("Error removing file or directory")
return json_success(message)
@app.route("/execute", methods=["POST"])
def do_execute():
if "command" not in request.form:
return json_error(400, "No command has been provided")
# Execute the command asynchronously? As a shell command?
async = "async" in request.form
shell = "shell" in request.form
cwd = request.form.get("cwd")
stdout = stderr = None
try:
if async:
subprocess.Popen(request.form["command"], shell=shell, cwd=cwd)
else:
p = subprocess.Popen(
request.form["command"], shell=shell, cwd=cwd,
stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
stdout, stderr = p.communicate()
except:
return json_exception("Error executing command")
return json_success("Successfully executed command",
stdout=stdout, stderr=stderr)
@app.route("/execpy", methods=["POST"])
def do_execpy():
if "filepath" not in request.form:
return json_error(400, "No Python file has been provided")
# Execute the command asynchronously? As a shell command?
async = "async" in request.form
cwd = request.form.get("cwd")
stdout = stderr = None
args = [
sys.executable,
request.form["filepath"],
]
try:
if async:
subprocess.Popen(args, cwd=cwd)
else:
p = subprocess.Popen(args, cwd=cwd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
except:
return json_exception("Error executing command")
return json_success("Successfully executed command",
stdout=stdout, stderr=stderr)
@app.route("/pinning")
def do_pinning():
if "client_ip" in state:
return json_error(500, "Agent has already been pinned to an IP!")
state["client_ip"] = request.client_ip
return json_success("Successfully pinned Agent",
client_ip=request.client_ip)
@app.route("/kill")
def do_kill():
shutdown = request.environ.get("werkzeug.server.shutdown")
if shutdown is None:
return json_error(500, "Not running with the Werkzeug server")
shutdown()
return json_success("Quit the Cuckoo Agent")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("host", nargs="?", default="0.0.0.0")
parser.add_argument("port", nargs="?", default="8000")
args = parser.parse_args()
app.run(host=args.host, port=int(args.port))
| 29.738095
| 82
| 0.617374
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.