hexsha
stringlengths 40
40
| size
int64 4
996k
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
245
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
245
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
996k
| avg_line_length
float64 1.33
58.2k
| max_line_length
int64 2
323k
| alphanum_fraction
float64 0
0.97
| content_no_comment
stringlengths 0
946k
| is_comment_constant_removed
bool 2
classes | is_sharp_comment_removed
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
790aadb441e4de5e6bce99cf2422a6b0a6a9d785
| 3,325
|
py
|
Python
|
tvsynth/script_train_unet_it_tikh_jitter.py
|
jmaces/robust-nets
|
25d49302f9fa5fcc9ded2727de75e96e25243d09
|
[
"MIT"
] | 14
|
2020-11-10T07:37:23.000Z
|
2022-03-21T15:19:22.000Z
|
tvsynth/script_train_unet_it_tikh_jitter.py
|
jmaces/robust-nets
|
25d49302f9fa5fcc9ded2727de75e96e25243d09
|
[
"MIT"
] | null | null | null |
tvsynth/script_train_unet_it_tikh_jitter.py
|
jmaces/robust-nets
|
25d49302f9fa5fcc9ded2727de75e96e25243d09
|
[
"MIT"
] | 2
|
2021-03-13T14:39:36.000Z
|
2022-02-17T06:44:29.000Z
|
import os
import matplotlib as mpl
import torch
from data_management import Jitter, load_dataset
from networks import IterativeNet, UNet
from operators import TVAnalysis, get_tikhonov_matrix
# --- load configuration -----
import config # isort:skip
# ----- general setup -----
mpl.use("agg")
device = torch.device("cuda:0")
# ----- operators -----
OpA = config.meas_op(config.m, config.n, device=device, **config.meas_params)
OpTV = TVAnalysis(config.n, device=device)
# ----- build linear inverter ------
reg_fac = 2e-2
inverter = torch.nn.Linear(OpA.m, OpA.n, bias=False)
inverter.weight.requires_grad = False
inverter.weight.data = get_tikhonov_matrix(OpA, OpTV, reg_fac)
# ----- network configuration -----
subnet_params = {
"in_channels": 1,
"out_channels": 1,
"drop_factor": 0.0,
"base_features": 64,
}
subnet = UNet
it_net_params = {
"operator": OpA,
"inverter": inverter,
"num_iter": 8,
"lam": 8 * [0.1],
"lam_learnable": True,
"final_dc": True,
}
# ----- training setup ------
mse_loss = torch.nn.MSELoss(reduction="sum")
def loss_func(pred, tar):
return mse_loss(pred, tar) / pred.shape[0]
train_phases = 2
train_params = {
"num_epochs": [100, 5],
"batch_size": [40, 40],
"loss_func": loss_func,
"save_path": [
os.path.join(
config.RESULTS_PATH,
"unet_it_tikh_jitter_"
"train_phase_{}".format((i + 1) % (train_phases + 1)),
)
for i in range(train_phases + 1)
],
"save_epochs": 1,
"optimizer": torch.optim.Adam,
"optimizer_params": [
{"lr": 5e-5, "eps": 1e-5, "weight_decay": 5e-4},
{"lr": 2e-5, "eps": 1e-5, "weight_decay": 5e-4},
],
"scheduler": torch.optim.lr_scheduler.StepLR,
"scheduler_params": {"step_size": 1, "gamma": 1.0},
"acc_steps": [1, 200],
"train_transform": Jitter(2e0, 0.0, 1.0),
"val_transform": None,
}
# -----data prep -----
X_train, C_train, Y_train = [
tmp.unsqueeze(-2).to(device)
for tmp in load_dataset(config.set_params["path"], subset="train")
]
X_val, C_val, Y_val = [
tmp.unsqueeze(-2).to(device)
for tmp in load_dataset(config.set_params["path"], subset="val")
]
# ------ save hyperparameters -------
os.makedirs(train_params["save_path"][-1], exist_ok=True)
with open(
os.path.join(train_params["save_path"][-1], "hyperparameters.txt"), "w"
) as file:
for key, value in subnet_params.items():
file.write(key + ": " + str(value) + "\n")
for key, value in it_net_params.items():
file.write(key + ": " + str(value) + "\n")
for key, value in train_params.items():
file.write(key + ": " + str(value) + "\n")
file.write("train_phases" + ": " + str(train_phases) + "\n")
# ------ construct network and train -----
subnet = subnet(**subnet_params).to(device)
it_net = IterativeNet(subnet, **it_net_params).to(device)
for i in range(train_phases):
train_params_cur = {}
for key, value in train_params.items():
train_params_cur[key] = (
value[i] if isinstance(value, (tuple, list)) else value
)
print("Phase {}:".format(i + 1))
for key, value in train_params_cur.items():
print(key + ": " + str(value))
it_net.train_on((Y_train, X_train), (Y_val, X_val), **train_params_cur)
| 26.814516
| 77
| 0.616241
|
import os
import matplotlib as mpl
import torch
from data_management import Jitter, load_dataset
from networks import IterativeNet, UNet
from operators import TVAnalysis, get_tikhonov_matrix
import config
mpl.use("agg")
device = torch.device("cuda:0")
OpA = config.meas_op(config.m, config.n, device=device, **config.meas_params)
OpTV = TVAnalysis(config.n, device=device)
reg_fac = 2e-2
inverter = torch.nn.Linear(OpA.m, OpA.n, bias=False)
inverter.weight.requires_grad = False
inverter.weight.data = get_tikhonov_matrix(OpA, OpTV, reg_fac)
subnet_params = {
"in_channels": 1,
"out_channels": 1,
"drop_factor": 0.0,
"base_features": 64,
}
subnet = UNet
it_net_params = {
"operator": OpA,
"inverter": inverter,
"num_iter": 8,
"lam": 8 * [0.1],
"lam_learnable": True,
"final_dc": True,
}
mse_loss = torch.nn.MSELoss(reduction="sum")
def loss_func(pred, tar):
return mse_loss(pred, tar) / pred.shape[0]
train_phases = 2
train_params = {
"num_epochs": [100, 5],
"batch_size": [40, 40],
"loss_func": loss_func,
"save_path": [
os.path.join(
config.RESULTS_PATH,
"unet_it_tikh_jitter_"
"train_phase_{}".format((i + 1) % (train_phases + 1)),
)
for i in range(train_phases + 1)
],
"save_epochs": 1,
"optimizer": torch.optim.Adam,
"optimizer_params": [
{"lr": 5e-5, "eps": 1e-5, "weight_decay": 5e-4},
{"lr": 2e-5, "eps": 1e-5, "weight_decay": 5e-4},
],
"scheduler": torch.optim.lr_scheduler.StepLR,
"scheduler_params": {"step_size": 1, "gamma": 1.0},
"acc_steps": [1, 200],
"train_transform": Jitter(2e0, 0.0, 1.0),
"val_transform": None,
}
X_train, C_train, Y_train = [
tmp.unsqueeze(-2).to(device)
for tmp in load_dataset(config.set_params["path"], subset="train")
]
X_val, C_val, Y_val = [
tmp.unsqueeze(-2).to(device)
for tmp in load_dataset(config.set_params["path"], subset="val")
]
os.makedirs(train_params["save_path"][-1], exist_ok=True)
with open(
os.path.join(train_params["save_path"][-1], "hyperparameters.txt"), "w"
) as file:
for key, value in subnet_params.items():
file.write(key + ": " + str(value) + "\n")
for key, value in it_net_params.items():
file.write(key + ": " + str(value) + "\n")
for key, value in train_params.items():
file.write(key + ": " + str(value) + "\n")
file.write("train_phases" + ": " + str(train_phases) + "\n")
subnet = subnet(**subnet_params).to(device)
it_net = IterativeNet(subnet, **it_net_params).to(device)
for i in range(train_phases):
train_params_cur = {}
for key, value in train_params.items():
train_params_cur[key] = (
value[i] if isinstance(value, (tuple, list)) else value
)
print("Phase {}:".format(i + 1))
for key, value in train_params_cur.items():
print(key + ": " + str(value))
it_net.train_on((Y_train, X_train), (Y_val, X_val), **train_params_cur)
| true
| true
|
790aadc72c81025abce53266b5ee19b90d22938d
| 20,731
|
py
|
Python
|
tests/test_utils.py
|
sethvargo/datasette
|
30e5f0e67c38054a8087a2a4eae3fc4d1779af90
|
[
"Apache-2.0"
] | null | null | null |
tests/test_utils.py
|
sethvargo/datasette
|
30e5f0e67c38054a8087a2a4eae3fc4d1779af90
|
[
"Apache-2.0"
] | null | null | null |
tests/test_utils.py
|
sethvargo/datasette
|
30e5f0e67c38054a8087a2a4eae3fc4d1779af90
|
[
"Apache-2.0"
] | null | null | null |
"""
Tests for various datasette helper functions.
"""
from datasette.app import Datasette
from datasette import utils
from datasette.utils.asgi import Request
from datasette.utils.sqlite import sqlite3
import json
import os
import pathlib
import pytest
import tempfile
from unittest.mock import patch
@pytest.mark.parametrize(
"path,expected",
[
("foo", ["foo"]),
("foo,bar", ["foo", "bar"]),
("123,433,112", ["123", "433", "112"]),
("123~2C433,112", ["123,433", "112"]),
("123~2F433~2F112", ["123/433/112"]),
],
)
def test_urlsafe_components(path, expected):
assert expected == utils.urlsafe_components(path)
@pytest.mark.parametrize(
"path,added_args,expected",
[
("/foo", {"bar": 1}, "/foo?bar=1"),
("/foo?bar=1", {"baz": 2}, "/foo?bar=1&baz=2"),
("/foo?bar=1&bar=2", {"baz": 3}, "/foo?bar=1&bar=2&baz=3"),
("/foo?bar=1", {"bar": None}, "/foo"),
# Test order is preserved
(
"/?_facet=prim_state&_facet=area_name",
(("prim_state", "GA"),),
"/?_facet=prim_state&_facet=area_name&prim_state=GA",
),
(
"/?_facet=state&_facet=city&state=MI",
(("city", "Detroit"),),
"/?_facet=state&_facet=city&state=MI&city=Detroit",
),
(
"/?_facet=state&_facet=city",
(("_facet", "planet_int"),),
"/?_facet=state&_facet=city&_facet=planet_int",
),
],
)
def test_path_with_added_args(path, added_args, expected):
request = Request.fake(path)
actual = utils.path_with_added_args(request, added_args)
assert expected == actual
@pytest.mark.parametrize(
"path,args,expected",
[
("/foo?bar=1", {"bar"}, "/foo"),
("/foo?bar=1&baz=2", {"bar"}, "/foo?baz=2"),
("/foo?bar=1&bar=2&bar=3", {"bar": "2"}, "/foo?bar=1&bar=3"),
],
)
def test_path_with_removed_args(path, args, expected):
request = Request.fake(path)
actual = utils.path_with_removed_args(request, args)
assert expected == actual
# Run the test again but this time use the path= argument
request = Request.fake("/")
actual = utils.path_with_removed_args(request, args, path=path)
assert expected == actual
@pytest.mark.parametrize(
"path,args,expected",
[
("/foo?bar=1", {"bar": 2}, "/foo?bar=2"),
("/foo?bar=1&baz=2", {"bar": None}, "/foo?baz=2"),
],
)
def test_path_with_replaced_args(path, args, expected):
request = Request.fake(path)
actual = utils.path_with_replaced_args(request, args)
assert expected == actual
@pytest.mark.parametrize(
"row,pks,expected_path",
[
({"A": "foo", "B": "bar"}, ["A", "B"], "foo,bar"),
({"A": "f,o", "B": "bar"}, ["A", "B"], "f~2Co,bar"),
({"A": 123}, ["A"], "123"),
(
utils.CustomRow(
["searchable_id", "tag"],
[
("searchable_id", {"value": 1, "label": "1"}),
("tag", {"value": "feline", "label": "feline"}),
],
),
["searchable_id", "tag"],
"1,feline",
),
],
)
def test_path_from_row_pks(row, pks, expected_path):
actual_path = utils.path_from_row_pks(row, pks, False)
assert expected_path == actual_path
@pytest.mark.parametrize(
"obj,expected",
[
(
{
"Description": "Soft drinks",
"Picture": b"\x15\x1c\x02\xc7\xad\x05\xfe",
"CategoryID": 1,
},
"""
{"CategoryID": 1, "Description": "Soft drinks", "Picture": {"$base64": true, "encoded": "FRwCx60F/g=="}}
""".strip(),
)
],
)
def test_custom_json_encoder(obj, expected):
actual = json.dumps(obj, cls=utils.CustomJSONEncoder, sort_keys=True)
assert expected == actual
@pytest.mark.parametrize(
"bad_sql",
[
"update blah;",
"-- sql comment to skip\nupdate blah;",
"update blah set some_column='# Hello there\n\n* This is a list\n* of items\n--\n[And a link](https://github.com/simonw/datasette-render-markdown).'\nas demo_markdown",
"PRAGMA case_sensitive_like = true",
"SELECT * FROM pragma_not_on_allow_list('idx52')",
],
)
def test_validate_sql_select_bad(bad_sql):
with pytest.raises(utils.InvalidSql):
utils.validate_sql_select(bad_sql)
@pytest.mark.parametrize(
"good_sql",
[
"select count(*) from airports",
"select foo from bar",
"--sql comment to skip\nselect foo from bar",
"select '# Hello there\n\n* This is a list\n* of items\n--\n[And a link](https://github.com/simonw/datasette-render-markdown).'\nas demo_markdown",
"select 1 + 1",
"explain select 1 + 1",
"explain\nselect 1 + 1",
"explain query plan select 1 + 1",
"explain query plan\nselect 1 + 1",
"SELECT\nblah FROM foo",
"WITH RECURSIVE cnt(x) AS (SELECT 1 UNION ALL SELECT x+1 FROM cnt LIMIT 10) SELECT x FROM cnt;",
"explain WITH RECURSIVE cnt(x) AS (SELECT 1 UNION ALL SELECT x+1 FROM cnt LIMIT 10) SELECT x FROM cnt;",
"explain query plan WITH RECURSIVE cnt(x) AS (SELECT 1 UNION ALL SELECT x+1 FROM cnt LIMIT 10) SELECT x FROM cnt;",
"SELECT * FROM pragma_index_info('idx52')",
"select * from pragma_table_xinfo('table')",
],
)
def test_validate_sql_select_good(good_sql):
utils.validate_sql_select(good_sql)
@pytest.mark.parametrize("open_quote,close_quote", [('"', '"'), ("[", "]")])
def test_detect_fts(open_quote, close_quote):
sql = """
CREATE TABLE "Dumb_Table" (
"TreeID" INTEGER,
"qSpecies" TEXT
);
CREATE TABLE "Street_Tree_List" (
"TreeID" INTEGER,
"qSpecies" TEXT,
"qAddress" TEXT,
"SiteOrder" INTEGER,
"qSiteInfo" TEXT,
"PlantType" TEXT,
"qCaretaker" TEXT
);
CREATE VIEW Test_View AS SELECT * FROM Dumb_Table;
CREATE VIRTUAL TABLE {open}Street_Tree_List_fts{close} USING FTS4 ("qAddress", "qCaretaker", "qSpecies", content={open}Street_Tree_List{close});
CREATE VIRTUAL TABLE r USING rtree(a, b, c);
""".format(
open=open_quote, close=close_quote
)
conn = utils.sqlite3.connect(":memory:")
conn.executescript(sql)
assert None is utils.detect_fts(conn, "Dumb_Table")
assert None is utils.detect_fts(conn, "Test_View")
assert None is utils.detect_fts(conn, "r")
assert "Street_Tree_List_fts" == utils.detect_fts(conn, "Street_Tree_List")
@pytest.mark.parametrize("table", ("regular", "has'single quote"))
def test_detect_fts_different_table_names(table):
sql = """
CREATE TABLE [{table}] (
"TreeID" INTEGER,
"qSpecies" TEXT
);
CREATE VIRTUAL TABLE [{table}_fts] USING FTS4 ("qSpecies", content="{table}");
""".format(
table=table
)
conn = utils.sqlite3.connect(":memory:")
conn.executescript(sql)
assert "{table}_fts".format(table=table) == utils.detect_fts(conn, table)
@pytest.mark.parametrize(
"url,expected",
[
("http://www.google.com/", True),
("https://example.com/", True),
("www.google.com", False),
("http://www.google.com/ is a search engine", False),
],
)
def test_is_url(url, expected):
assert expected == utils.is_url(url)
@pytest.mark.parametrize(
"s,expected",
[
("simple", "simple"),
("MixedCase", "MixedCase"),
("-no-leading-hyphens", "no-leading-hyphens-65bea6"),
("_no-leading-underscores", "no-leading-underscores-b921bc"),
("no spaces", "no-spaces-7088d7"),
("-", "336d5e"),
("no $ characters", "no--characters-59e024"),
],
)
def test_to_css_class(s, expected):
assert expected == utils.to_css_class(s)
def test_temporary_docker_directory_uses_hard_link():
with tempfile.TemporaryDirectory() as td:
os.chdir(td)
with open("hello", "w") as fp:
fp.write("world")
# Default usage of this should use symlink
with utils.temporary_docker_directory(
files=["hello"],
name="t",
metadata=None,
extra_options=None,
branch=None,
template_dir=None,
plugins_dir=None,
static=[],
install=[],
spatialite=False,
version_note=None,
secret="secret",
) as temp_docker:
hello = os.path.join(temp_docker, "hello")
with open(hello) as fp:
assert "world" == fp.read()
# It should be a hard link
assert 2 == os.stat(hello).st_nlink
@patch("os.link")
def test_temporary_docker_directory_uses_copy_if_hard_link_fails(mock_link):
# Copy instead if os.link raises OSError (normally due to different device)
mock_link.side_effect = OSError
with tempfile.TemporaryDirectory() as td:
os.chdir(td)
with open("hello", "w") as fp:
fp.write("world")
# Default usage of this should use symlink
with utils.temporary_docker_directory(
files=["hello"],
name="t",
metadata=None,
extra_options=None,
branch=None,
template_dir=None,
plugins_dir=None,
static=[],
install=[],
spatialite=False,
version_note=None,
secret=None,
) as temp_docker:
hello = os.path.join(temp_docker, "hello")
with open(hello) as fp:
assert "world" == fp.read()
# It should be a copy, not a hard link
assert 1 == os.stat(hello).st_nlink
def test_temporary_docker_directory_quotes_args():
with tempfile.TemporaryDirectory() as td:
os.chdir(td)
with open("hello", "w") as fp:
fp.write("world")
with utils.temporary_docker_directory(
files=["hello"],
name="t",
metadata=None,
extra_options="--$HOME",
branch=None,
template_dir=None,
plugins_dir=None,
static=[],
install=[],
spatialite=False,
version_note="$PWD",
secret="secret",
) as temp_docker:
df = os.path.join(temp_docker, "Dockerfile")
with open(df) as fp:
df_contents = fp.read()
assert "'$PWD'" in df_contents
assert "'--$HOME'" in df_contents
assert "ENV DATASETTE_SECRET 'secret'" in df_contents
def test_compound_keys_after_sql():
assert "((a > :p0))" == utils.compound_keys_after_sql(["a"])
assert """
((a > :p0)
or
(a = :p0 and b > :p1))
""".strip() == utils.compound_keys_after_sql(
["a", "b"]
)
assert """
((a > :p0)
or
(a = :p0 and b > :p1)
or
(a = :p0 and b = :p1 and c > :p2))
""".strip() == utils.compound_keys_after_sql(
["a", "b", "c"]
)
async def table_exists(table):
return table == "exists.csv"
@pytest.mark.asyncio
@pytest.mark.parametrize(
"table_and_format,expected_table,expected_format",
[
("blah", "blah", None),
("blah.csv", "blah", "csv"),
("blah.json", "blah", "json"),
("blah.baz", "blah.baz", None),
("exists.csv", "exists.csv", None),
],
)
async def test_resolve_table_and_format(
table_and_format, expected_table, expected_format
):
actual_table, actual_format = await utils.resolve_table_and_format(
table_and_format, table_exists, ["json"]
)
assert expected_table == actual_table
assert expected_format == actual_format
def test_table_columns():
conn = sqlite3.connect(":memory:")
conn.executescript(
"""
create table places (id integer primary key, name text, bob integer)
"""
)
assert ["id", "name", "bob"] == utils.table_columns(conn, "places")
@pytest.mark.parametrize(
"path,format,extra_qs,expected",
[
("/foo?sql=select+1", "csv", {}, "/foo.csv?sql=select+1"),
("/foo?sql=select+1", "json", {}, "/foo.json?sql=select+1"),
("/foo/bar", "json", {}, "/foo/bar.json"),
("/foo/bar", "csv", {}, "/foo/bar.csv"),
("/foo/bar", "csv", {"_dl": 1}, "/foo/bar.csv?_dl=1"),
(
"/sf-trees/Street_Tree_List?_search=cherry&_size=1000",
"csv",
{"_dl": 1},
"/sf-trees/Street_Tree_List.csv?_search=cherry&_size=1000&_dl=1",
),
],
)
def test_path_with_format(path, format, extra_qs, expected):
request = Request.fake(path)
actual = utils.path_with_format(request=request, format=format, extra_qs=extra_qs)
assert expected == actual
@pytest.mark.parametrize(
"bytes,expected",
[
(120, "120 bytes"),
(1024, "1.0 KB"),
(1024 * 1024, "1.0 MB"),
(1024 * 1024 * 1024, "1.0 GB"),
(1024 * 1024 * 1024 * 1.3, "1.3 GB"),
(1024 * 1024 * 1024 * 1024, "1.0 TB"),
],
)
def test_format_bytes(bytes, expected):
assert expected == utils.format_bytes(bytes)
@pytest.mark.parametrize(
"query,expected",
[
("dog", '"dog"'),
("cat,", '"cat,"'),
("cat dog", '"cat" "dog"'),
# If a phrase is already double quoted, leave it so
('"cat dog"', '"cat dog"'),
('"cat dog" fish', '"cat dog" "fish"'),
# Sensibly handle unbalanced double quotes
('cat"', '"cat"'),
('"cat dog" "fish', '"cat dog" "fish"'),
],
)
def test_escape_fts(query, expected):
assert expected == utils.escape_fts(query)
@pytest.mark.parametrize(
"input,expected",
[
("dog", "dog"),
('dateutil_parse("1/2/2020")', r"dateutil_parse(\0000221/2/2020\000022)"),
("this\r\nand\r\nthat", r"this\00000Aand\00000Athat"),
],
)
def test_escape_css_string(input, expected):
assert expected == utils.escape_css_string(input)
def test_check_connection_spatialite_raises():
path = str(pathlib.Path(__file__).parent / "spatialite.db")
conn = sqlite3.connect(path)
with pytest.raises(utils.SpatialiteConnectionProblem):
utils.check_connection(conn)
def test_check_connection_passes():
conn = sqlite3.connect(":memory:")
utils.check_connection(conn)
def test_call_with_supported_arguments():
def foo(a, b):
return f"{a}+{b}"
assert "1+2" == utils.call_with_supported_arguments(foo, a=1, b=2)
assert "1+2" == utils.call_with_supported_arguments(foo, a=1, b=2, c=3)
with pytest.raises(TypeError):
utils.call_with_supported_arguments(foo, a=1)
@pytest.mark.parametrize(
"data,should_raise",
[
([["foo", "bar"], ["foo", "baz"]], False),
([("foo", "bar"), ("foo", "baz")], False),
((["foo", "bar"], ["foo", "baz"]), False),
([["foo", "bar"], ["foo", "baz", "bax"]], True),
({"foo": ["bar", "baz"]}, False),
({"foo": ("bar", "baz")}, False),
({"foo": "bar"}, True),
],
)
def test_multi_params(data, should_raise):
if should_raise:
with pytest.raises(AssertionError):
utils.MultiParams(data)
return
p1 = utils.MultiParams(data)
assert "bar" == p1["foo"]
assert ["bar", "baz"] == list(p1.getlist("foo"))
@pytest.mark.parametrize(
"actor,allow,expected",
[
# Default is to allow:
(None, None, True),
# {} means deny-all:
(None, {}, False),
({"id": "root"}, {}, False),
# true means allow-all
({"id": "root"}, True, True),
(None, True, True),
# false means deny-all
({"id": "root"}, False, False),
(None, False, False),
# Special case for "unauthenticated": true
(None, {"unauthenticated": True}, True),
(None, {"unauthenticated": False}, False),
# Match on just one property:
(None, {"id": "root"}, False),
({"id": "root"}, None, True),
({"id": "simon", "staff": True}, {"staff": True}, True),
({"id": "simon", "staff": False}, {"staff": True}, False),
# Special "*" value for any key:
({"id": "root"}, {"id": "*"}, True),
({}, {"id": "*"}, False),
({"name": "root"}, {"id": "*"}, False),
# Supports single strings or list of values:
({"id": "root"}, {"id": "bob"}, False),
({"id": "root"}, {"id": ["bob"]}, False),
({"id": "root"}, {"id": "root"}, True),
({"id": "root"}, {"id": ["root"]}, True),
# Any matching role will work:
({"id": "garry", "roles": ["staff", "dev"]}, {"roles": ["staff"]}, True),
({"id": "garry", "roles": ["staff", "dev"]}, {"roles": ["dev"]}, True),
({"id": "garry", "roles": ["staff", "dev"]}, {"roles": ["otter"]}, False),
({"id": "garry", "roles": ["staff", "dev"]}, {"roles": ["dev", "otter"]}, True),
({"id": "garry", "roles": []}, {"roles": ["staff"]}, False),
({"id": "garry"}, {"roles": ["staff"]}, False),
# Any single matching key works:
({"id": "root"}, {"bot_id": "my-bot", "id": ["root"]}, True),
],
)
def test_actor_matches_allow(actor, allow, expected):
assert expected == utils.actor_matches_allow(actor, allow)
@pytest.mark.parametrize(
"config,expected",
[
({"foo": "bar"}, {"foo": "bar"}),
({"$env": "FOO"}, "x"),
({"k": {"$env": "FOO"}}, {"k": "x"}),
([{"k": {"$env": "FOO"}}, {"z": {"$env": "FOO"}}], [{"k": "x"}, {"z": "x"}]),
({"k": [{"in_a_list": {"$env": "FOO"}}]}, {"k": [{"in_a_list": "x"}]}),
],
)
def test_resolve_env_secrets(config, expected):
assert expected == utils.resolve_env_secrets(config, {"FOO": "x"})
@pytest.mark.parametrize(
"actor,expected",
[
({"id": "blah"}, "blah"),
({"id": "blah", "login": "l"}, "l"),
({"id": "blah", "login": "l"}, "l"),
({"id": "blah", "login": "l", "username": "u"}, "u"),
({"login": "l", "name": "n"}, "n"),
(
{"id": "blah", "login": "l", "username": "u", "name": "n", "display": "d"},
"d",
),
({"weird": "shape"}, "{'weird': 'shape'}"),
],
)
def test_display_actor(actor, expected):
assert expected == utils.display_actor(actor)
@pytest.mark.asyncio
@pytest.mark.parametrize(
"dbs,expected_path",
[
(["one_table"], "/one/one"),
(["two_tables"], "/two"),
(["one_table", "two_tables"], "/"),
],
)
async def test_initial_path_for_datasette(tmp_path_factory, dbs, expected_path):
db_dir = tmp_path_factory.mktemp("dbs")
one_table = str(db_dir / "one.db")
sqlite3.connect(one_table).execute("create table one (id integer primary key)")
two_tables = str(db_dir / "two.db")
sqlite3.connect(two_tables).execute("create table two (id integer primary key)")
sqlite3.connect(two_tables).execute("create table three (id integer primary key)")
datasette = Datasette(
[{"one_table": one_table, "two_tables": two_tables}[db] for db in dbs]
)
path = await utils.initial_path_for_datasette(datasette)
assert path == expected_path
@pytest.mark.parametrize(
"content,expected",
(
("title: Hello", {"title": "Hello"}),
('{"title": "Hello"}', {"title": "Hello"}),
("{{ this }} is {{ bad }}", None),
),
)
def test_parse_metadata(content, expected):
if expected is None:
with pytest.raises(utils.BadMetadataError):
utils.parse_metadata(content)
else:
assert utils.parse_metadata(content) == expected
@pytest.mark.asyncio
@pytest.mark.parametrize(
"sql,expected",
(
("select 1", []),
("select 1 + :one", ["one"]),
("select 1 + :one + :two", ["one", "two"]),
("select 'bob' || '0:00' || :cat", ["cat"]),
("select this is invalid :one, :two, :three", ["one", "two", "three"]),
),
)
async def test_derive_named_parameters(sql, expected):
ds = Datasette([], memory=True)
db = ds.get_database("_memory")
params = await utils.derive_named_parameters(db, sql)
assert params == expected
@pytest.mark.parametrize(
"original,expected",
(
("abc", "abc"),
("/foo/bar", "~2Ffoo~2Fbar"),
("/-/bar", "~2F-~2Fbar"),
("-/db-/table.csv", "-~2Fdb-~2Ftable~2Ecsv"),
(r"%~-/", "~25~7E-~2F"),
("~25~7E~2D~2F", "~7E25~7E7E~7E2D~7E2F"),
),
)
def test_tilde_encoding(original, expected):
actual = utils.tilde_encode(original)
assert actual == expected
# And test round-trip
assert original == utils.tilde_decode(actual)
| 31.74732
| 176
| 0.556268
|
from datasette.app import Datasette
from datasette import utils
from datasette.utils.asgi import Request
from datasette.utils.sqlite import sqlite3
import json
import os
import pathlib
import pytest
import tempfile
from unittest.mock import patch
@pytest.mark.parametrize(
"path,expected",
[
("foo", ["foo"]),
("foo,bar", ["foo", "bar"]),
("123,433,112", ["123", "433", "112"]),
("123~2C433,112", ["123,433", "112"]),
("123~2F433~2F112", ["123/433/112"]),
],
)
def test_urlsafe_components(path, expected):
assert expected == utils.urlsafe_components(path)
@pytest.mark.parametrize(
"path,added_args,expected",
[
("/foo", {"bar": 1}, "/foo?bar=1"),
("/foo?bar=1", {"baz": 2}, "/foo?bar=1&baz=2"),
("/foo?bar=1&bar=2", {"baz": 3}, "/foo?bar=1&bar=2&baz=3"),
("/foo?bar=1", {"bar": None}, "/foo"),
(
"/?_facet=prim_state&_facet=area_name",
(("prim_state", "GA"),),
"/?_facet=prim_state&_facet=area_name&prim_state=GA",
),
(
"/?_facet=state&_facet=city&state=MI",
(("city", "Detroit"),),
"/?_facet=state&_facet=city&state=MI&city=Detroit",
),
(
"/?_facet=state&_facet=city",
(("_facet", "planet_int"),),
"/?_facet=state&_facet=city&_facet=planet_int",
),
],
)
def test_path_with_added_args(path, added_args, expected):
request = Request.fake(path)
actual = utils.path_with_added_args(request, added_args)
assert expected == actual
@pytest.mark.parametrize(
"path,args,expected",
[
("/foo?bar=1", {"bar"}, "/foo"),
("/foo?bar=1&baz=2", {"bar"}, "/foo?baz=2"),
("/foo?bar=1&bar=2&bar=3", {"bar": "2"}, "/foo?bar=1&bar=3"),
],
)
def test_path_with_removed_args(path, args, expected):
request = Request.fake(path)
actual = utils.path_with_removed_args(request, args)
assert expected == actual
request = Request.fake("/")
actual = utils.path_with_removed_args(request, args, path=path)
assert expected == actual
@pytest.mark.parametrize(
"path,args,expected",
[
("/foo?bar=1", {"bar": 2}, "/foo?bar=2"),
("/foo?bar=1&baz=2", {"bar": None}, "/foo?baz=2"),
],
)
def test_path_with_replaced_args(path, args, expected):
request = Request.fake(path)
actual = utils.path_with_replaced_args(request, args)
assert expected == actual
@pytest.mark.parametrize(
"row,pks,expected_path",
[
({"A": "foo", "B": "bar"}, ["A", "B"], "foo,bar"),
({"A": "f,o", "B": "bar"}, ["A", "B"], "f~2Co,bar"),
({"A": 123}, ["A"], "123"),
(
utils.CustomRow(
["searchable_id", "tag"],
[
("searchable_id", {"value": 1, "label": "1"}),
("tag", {"value": "feline", "label": "feline"}),
],
),
["searchable_id", "tag"],
"1,feline",
),
],
)
def test_path_from_row_pks(row, pks, expected_path):
actual_path = utils.path_from_row_pks(row, pks, False)
assert expected_path == actual_path
@pytest.mark.parametrize(
"obj,expected",
[
(
{
"Description": "Soft drinks",
"Picture": b"\x15\x1c\x02\xc7\xad\x05\xfe",
"CategoryID": 1,
},
"""
{"CategoryID": 1, "Description": "Soft drinks", "Picture": {"$base64": true, "encoded": "FRwCx60F/g=="}}
""".strip(),
)
],
)
def test_custom_json_encoder(obj, expected):
actual = json.dumps(obj, cls=utils.CustomJSONEncoder, sort_keys=True)
assert expected == actual
@pytest.mark.parametrize(
"bad_sql",
[
"update blah;",
"-- sql comment to skip\nupdate blah;",
"update blah set some_column='# Hello there\n\n* This is a list\n* of items\n--\n[And a link](https://github.com/simonw/datasette-render-markdown).'\nas demo_markdown",
"PRAGMA case_sensitive_like = true",
"SELECT * FROM pragma_not_on_allow_list('idx52')",
],
)
def test_validate_sql_select_bad(bad_sql):
with pytest.raises(utils.InvalidSql):
utils.validate_sql_select(bad_sql)
@pytest.mark.parametrize(
"good_sql",
[
"select count(*) from airports",
"select foo from bar",
"--sql comment to skip\nselect foo from bar",
"select '# Hello there\n\n* This is a list\n* of items\n--\n[And a link](https://github.com/simonw/datasette-render-markdown).'\nas demo_markdown",
"select 1 + 1",
"explain select 1 + 1",
"explain\nselect 1 + 1",
"explain query plan select 1 + 1",
"explain query plan\nselect 1 + 1",
"SELECT\nblah FROM foo",
"WITH RECURSIVE cnt(x) AS (SELECT 1 UNION ALL SELECT x+1 FROM cnt LIMIT 10) SELECT x FROM cnt;",
"explain WITH RECURSIVE cnt(x) AS (SELECT 1 UNION ALL SELECT x+1 FROM cnt LIMIT 10) SELECT x FROM cnt;",
"explain query plan WITH RECURSIVE cnt(x) AS (SELECT 1 UNION ALL SELECT x+1 FROM cnt LIMIT 10) SELECT x FROM cnt;",
"SELECT * FROM pragma_index_info('idx52')",
"select * from pragma_table_xinfo('table')",
],
)
def test_validate_sql_select_good(good_sql):
utils.validate_sql_select(good_sql)
@pytest.mark.parametrize("open_quote,close_quote", [('"', '"'), ("[", "]")])
def test_detect_fts(open_quote, close_quote):
sql = """
CREATE TABLE "Dumb_Table" (
"TreeID" INTEGER,
"qSpecies" TEXT
);
CREATE TABLE "Street_Tree_List" (
"TreeID" INTEGER,
"qSpecies" TEXT,
"qAddress" TEXT,
"SiteOrder" INTEGER,
"qSiteInfo" TEXT,
"PlantType" TEXT,
"qCaretaker" TEXT
);
CREATE VIEW Test_View AS SELECT * FROM Dumb_Table;
CREATE VIRTUAL TABLE {open}Street_Tree_List_fts{close} USING FTS4 ("qAddress", "qCaretaker", "qSpecies", content={open}Street_Tree_List{close});
CREATE VIRTUAL TABLE r USING rtree(a, b, c);
""".format(
open=open_quote, close=close_quote
)
conn = utils.sqlite3.connect(":memory:")
conn.executescript(sql)
assert None is utils.detect_fts(conn, "Dumb_Table")
assert None is utils.detect_fts(conn, "Test_View")
assert None is utils.detect_fts(conn, "r")
assert "Street_Tree_List_fts" == utils.detect_fts(conn, "Street_Tree_List")
@pytest.mark.parametrize("table", ("regular", "has'single quote"))
def test_detect_fts_different_table_names(table):
sql = """
CREATE TABLE [{table}] (
"TreeID" INTEGER,
"qSpecies" TEXT
);
CREATE VIRTUAL TABLE [{table}_fts] USING FTS4 ("qSpecies", content="{table}");
""".format(
table=table
)
conn = utils.sqlite3.connect(":memory:")
conn.executescript(sql)
assert "{table}_fts".format(table=table) == utils.detect_fts(conn, table)
@pytest.mark.parametrize(
"url,expected",
[
("http://www.google.com/", True),
("https://example.com/", True),
("www.google.com", False),
("http://www.google.com/ is a search engine", False),
],
)
def test_is_url(url, expected):
assert expected == utils.is_url(url)
@pytest.mark.parametrize(
"s,expected",
[
("simple", "simple"),
("MixedCase", "MixedCase"),
("-no-leading-hyphens", "no-leading-hyphens-65bea6"),
("_no-leading-underscores", "no-leading-underscores-b921bc"),
("no spaces", "no-spaces-7088d7"),
("-", "336d5e"),
("no $ characters", "no--characters-59e024"),
],
)
def test_to_css_class(s, expected):
assert expected == utils.to_css_class(s)
def test_temporary_docker_directory_uses_hard_link():
with tempfile.TemporaryDirectory() as td:
os.chdir(td)
with open("hello", "w") as fp:
fp.write("world")
# Default usage of this should use symlink
with utils.temporary_docker_directory(
files=["hello"],
name="t",
metadata=None,
extra_options=None,
branch=None,
template_dir=None,
plugins_dir=None,
static=[],
install=[],
spatialite=False,
version_note=None,
secret="secret",
) as temp_docker:
hello = os.path.join(temp_docker, "hello")
with open(hello) as fp:
assert "world" == fp.read()
# It should be a hard link
assert 2 == os.stat(hello).st_nlink
@patch("os.link")
def test_temporary_docker_directory_uses_copy_if_hard_link_fails(mock_link):
# Copy instead if os.link raises OSError (normally due to different device)
mock_link.side_effect = OSError
with tempfile.TemporaryDirectory() as td:
os.chdir(td)
with open("hello", "w") as fp:
fp.write("world")
# Default usage of this should use symlink
with utils.temporary_docker_directory(
files=["hello"],
name="t",
metadata=None,
extra_options=None,
branch=None,
template_dir=None,
plugins_dir=None,
static=[],
install=[],
spatialite=False,
version_note=None,
secret=None,
) as temp_docker:
hello = os.path.join(temp_docker, "hello")
with open(hello) as fp:
assert "world" == fp.read()
# It should be a copy, not a hard link
assert 1 == os.stat(hello).st_nlink
def test_temporary_docker_directory_quotes_args():
with tempfile.TemporaryDirectory() as td:
os.chdir(td)
with open("hello", "w") as fp:
fp.write("world")
with utils.temporary_docker_directory(
files=["hello"],
name="t",
metadata=None,
extra_options="--$HOME",
branch=None,
template_dir=None,
plugins_dir=None,
static=[],
install=[],
spatialite=False,
version_note="$PWD",
secret="secret",
) as temp_docker:
df = os.path.join(temp_docker, "Dockerfile")
with open(df) as fp:
df_contents = fp.read()
assert "'$PWD'" in df_contents
assert "'--$HOME'" in df_contents
assert "ENV DATASETTE_SECRET 'secret'" in df_contents
def test_compound_keys_after_sql():
assert "((a > :p0))" == utils.compound_keys_after_sql(["a"])
assert """
((a > :p0)
or
(a = :p0 and b > :p1))
""".strip() == utils.compound_keys_after_sql(
["a", "b"]
)
assert """
((a > :p0)
or
(a = :p0 and b > :p1)
or
(a = :p0 and b = :p1 and c > :p2))
""".strip() == utils.compound_keys_after_sql(
["a", "b", "c"]
)
async def table_exists(table):
return table == "exists.csv"
@pytest.mark.asyncio
@pytest.mark.parametrize(
"table_and_format,expected_table,expected_format",
[
("blah", "blah", None),
("blah.csv", "blah", "csv"),
("blah.json", "blah", "json"),
("blah.baz", "blah.baz", None),
("exists.csv", "exists.csv", None),
],
)
async def test_resolve_table_and_format(
table_and_format, expected_table, expected_format
):
actual_table, actual_format = await utils.resolve_table_and_format(
table_and_format, table_exists, ["json"]
)
assert expected_table == actual_table
assert expected_format == actual_format
def test_table_columns():
conn = sqlite3.connect(":memory:")
conn.executescript(
"""
create table places (id integer primary key, name text, bob integer)
"""
)
assert ["id", "name", "bob"] == utils.table_columns(conn, "places")
@pytest.mark.parametrize(
"path,format,extra_qs,expected",
[
("/foo?sql=select+1", "csv", {}, "/foo.csv?sql=select+1"),
("/foo?sql=select+1", "json", {}, "/foo.json?sql=select+1"),
("/foo/bar", "json", {}, "/foo/bar.json"),
("/foo/bar", "csv", {}, "/foo/bar.csv"),
("/foo/bar", "csv", {"_dl": 1}, "/foo/bar.csv?_dl=1"),
(
"/sf-trees/Street_Tree_List?_search=cherry&_size=1000",
"csv",
{"_dl": 1},
"/sf-trees/Street_Tree_List.csv?_search=cherry&_size=1000&_dl=1",
),
],
)
def test_path_with_format(path, format, extra_qs, expected):
request = Request.fake(path)
actual = utils.path_with_format(request=request, format=format, extra_qs=extra_qs)
assert expected == actual
@pytest.mark.parametrize(
"bytes,expected",
[
(120, "120 bytes"),
(1024, "1.0 KB"),
(1024 * 1024, "1.0 MB"),
(1024 * 1024 * 1024, "1.0 GB"),
(1024 * 1024 * 1024 * 1.3, "1.3 GB"),
(1024 * 1024 * 1024 * 1024, "1.0 TB"),
],
)
def test_format_bytes(bytes, expected):
assert expected == utils.format_bytes(bytes)
@pytest.mark.parametrize(
"query,expected",
[
("dog", '"dog"'),
("cat,", '"cat,"'),
("cat dog", '"cat" "dog"'),
# If a phrase is already double quoted, leave it so
('"cat dog"', '"cat dog"'),
('"cat dog" fish', '"cat dog" "fish"'),
# Sensibly handle unbalanced double quotes
('cat"', '"cat"'),
('"cat dog" "fish', '"cat dog" "fish"'),
],
)
def test_escape_fts(query, expected):
assert expected == utils.escape_fts(query)
@pytest.mark.parametrize(
"input,expected",
[
("dog", "dog"),
('dateutil_parse("1/2/2020")', r"dateutil_parse(\0000221/2/2020\000022)"),
("this\r\nand\r\nthat", r"this\00000Aand\00000Athat"),
],
)
def test_escape_css_string(input, expected):
assert expected == utils.escape_css_string(input)
def test_check_connection_spatialite_raises():
path = str(pathlib.Path(__file__).parent / "spatialite.db")
conn = sqlite3.connect(path)
with pytest.raises(utils.SpatialiteConnectionProblem):
utils.check_connection(conn)
def test_check_connection_passes():
conn = sqlite3.connect(":memory:")
utils.check_connection(conn)
def test_call_with_supported_arguments():
def foo(a, b):
return f"{a}+{b}"
assert "1+2" == utils.call_with_supported_arguments(foo, a=1, b=2)
assert "1+2" == utils.call_with_supported_arguments(foo, a=1, b=2, c=3)
with pytest.raises(TypeError):
utils.call_with_supported_arguments(foo, a=1)
@pytest.mark.parametrize(
"data,should_raise",
[
([["foo", "bar"], ["foo", "baz"]], False),
([("foo", "bar"), ("foo", "baz")], False),
((["foo", "bar"], ["foo", "baz"]), False),
([["foo", "bar"], ["foo", "baz", "bax"]], True),
({"foo": ["bar", "baz"]}, False),
({"foo": ("bar", "baz")}, False),
({"foo": "bar"}, True),
],
)
def test_multi_params(data, should_raise):
if should_raise:
with pytest.raises(AssertionError):
utils.MultiParams(data)
return
p1 = utils.MultiParams(data)
assert "bar" == p1["foo"]
assert ["bar", "baz"] == list(p1.getlist("foo"))
@pytest.mark.parametrize(
"actor,allow,expected",
[
# Default is to allow:
(None, None, True),
# {} means deny-all:
(None, {}, False),
({"id": "root"}, {}, False),
# true means allow-all
({"id": "root"}, True, True),
(None, True, True),
# false means deny-all
({"id": "root"}, False, False),
(None, False, False),
# Special case for "unauthenticated": true
(None, {"unauthenticated": True}, True),
(None, {"unauthenticated": False}, False),
# Match on just one property:
(None, {"id": "root"}, False),
({"id": "root"}, None, True),
({"id": "simon", "staff": True}, {"staff": True}, True),
({"id": "simon", "staff": False}, {"staff": True}, False),
# Special "*" value for any key:
({"id": "root"}, {"id": "*"}, True),
({}, {"id": "*"}, False),
({"name": "root"}, {"id": "*"}, False),
# Supports single strings or list of values:
({"id": "root"}, {"id": "bob"}, False),
({"id": "root"}, {"id": ["bob"]}, False),
({"id": "root"}, {"id": "root"}, True),
({"id": "root"}, {"id": ["root"]}, True),
# Any matching role will work:
({"id": "garry", "roles": ["staff", "dev"]}, {"roles": ["staff"]}, True),
({"id": "garry", "roles": ["staff", "dev"]}, {"roles": ["dev"]}, True),
({"id": "garry", "roles": ["staff", "dev"]}, {"roles": ["otter"]}, False),
({"id": "garry", "roles": ["staff", "dev"]}, {"roles": ["dev", "otter"]}, True),
({"id": "garry", "roles": []}, {"roles": ["staff"]}, False),
({"id": "garry"}, {"roles": ["staff"]}, False),
# Any single matching key works:
({"id": "root"}, {"bot_id": "my-bot", "id": ["root"]}, True),
],
)
def test_actor_matches_allow(actor, allow, expected):
assert expected == utils.actor_matches_allow(actor, allow)
@pytest.mark.parametrize(
"config,expected",
[
({"foo": "bar"}, {"foo": "bar"}),
({"$env": "FOO"}, "x"),
({"k": {"$env": "FOO"}}, {"k": "x"}),
([{"k": {"$env": "FOO"}}, {"z": {"$env": "FOO"}}], [{"k": "x"}, {"z": "x"}]),
({"k": [{"in_a_list": {"$env": "FOO"}}]}, {"k": [{"in_a_list": "x"}]}),
],
)
def test_resolve_env_secrets(config, expected):
assert expected == utils.resolve_env_secrets(config, {"FOO": "x"})
@pytest.mark.parametrize(
"actor,expected",
[
({"id": "blah"}, "blah"),
({"id": "blah", "login": "l"}, "l"),
({"id": "blah", "login": "l"}, "l"),
({"id": "blah", "login": "l", "username": "u"}, "u"),
({"login": "l", "name": "n"}, "n"),
(
{"id": "blah", "login": "l", "username": "u", "name": "n", "display": "d"},
"d",
),
({"weird": "shape"}, "{'weird': 'shape'}"),
],
)
def test_display_actor(actor, expected):
assert expected == utils.display_actor(actor)
@pytest.mark.asyncio
@pytest.mark.parametrize(
"dbs,expected_path",
[
(["one_table"], "/one/one"),
(["two_tables"], "/two"),
(["one_table", "two_tables"], "/"),
],
)
async def test_initial_path_for_datasette(tmp_path_factory, dbs, expected_path):
db_dir = tmp_path_factory.mktemp("dbs")
one_table = str(db_dir / "one.db")
sqlite3.connect(one_table).execute("create table one (id integer primary key)")
two_tables = str(db_dir / "two.db")
sqlite3.connect(two_tables).execute("create table two (id integer primary key)")
sqlite3.connect(two_tables).execute("create table three (id integer primary key)")
datasette = Datasette(
[{"one_table": one_table, "two_tables": two_tables}[db] for db in dbs]
)
path = await utils.initial_path_for_datasette(datasette)
assert path == expected_path
@pytest.mark.parametrize(
"content,expected",
(
("title: Hello", {"title": "Hello"}),
('{"title": "Hello"}', {"title": "Hello"}),
("{{ this }} is {{ bad }}", None),
),
)
def test_parse_metadata(content, expected):
if expected is None:
with pytest.raises(utils.BadMetadataError):
utils.parse_metadata(content)
else:
assert utils.parse_metadata(content) == expected
@pytest.mark.asyncio
@pytest.mark.parametrize(
"sql,expected",
(
("select 1", []),
("select 1 + :one", ["one"]),
("select 1 + :one + :two", ["one", "two"]),
("select 'bob' || '0:00' || :cat", ["cat"]),
("select this is invalid :one, :two, :three", ["one", "two", "three"]),
),
)
async def test_derive_named_parameters(sql, expected):
ds = Datasette([], memory=True)
db = ds.get_database("_memory")
params = await utils.derive_named_parameters(db, sql)
assert params == expected
@pytest.mark.parametrize(
"original,expected",
(
("abc", "abc"),
("/foo/bar", "~2Ffoo~2Fbar"),
("/-/bar", "~2F-~2Fbar"),
("-/db-/table.csv", "-~2Fdb-~2Ftable~2Ecsv"),
(r"%~-/", "~25~7E-~2F"),
("~25~7E~2D~2F", "~7E25~7E7E~7E2D~7E2F"),
),
)
def test_tilde_encoding(original, expected):
actual = utils.tilde_encode(original)
assert actual == expected
# And test round-trip
assert original == utils.tilde_decode(actual)
| true
| true
|
790aae41bd5da16a5e05e117c4ed51e6e62bfed5
| 4,682
|
py
|
Python
|
basisopt/opt/eventemper.py
|
robashaw/basisopt
|
c02fd307bc72c576ed298ea14648818b237d2f30
|
[
"MIT"
] | 4
|
2022-03-23T09:22:54.000Z
|
2022-03-26T13:18:45.000Z
|
basisopt/opt/eventemper.py
|
robashaw/basisopt
|
c02fd307bc72c576ed298ea14648818b237d2f30
|
[
"MIT"
] | null | null | null |
basisopt/opt/eventemper.py
|
robashaw/basisopt
|
c02fd307bc72c576ed298ea14648818b237d2f30
|
[
"MIT"
] | 1
|
2022-03-23T09:22:54.000Z
|
2022-03-23T09:22:54.000Z
|
import numpy as np
from mendeleev import element as md_element
from basisopt import api, data
from basisopt.exceptions import PropertyNotAvailable
from basisopt.basis import even_temper_expansion
from basisopt.basis.guesses import null_guess
from .preconditioners import unit
from .strategies import Strategy
_INITIAL_GUESS = (0.3, 2.0, 8)
class EvenTemperedStrategy(Strategy):
""" Implements a strategy for an even tempered basis set, where each angular
momentum shell is described by three parameters: (c, x, n)
Each exponent in that shell is then given by
y_k = c*(x**k) for k=0,...,n
--------------------------- ALGORITHM ----------------------------
Evaluate: energy (can change to any RMSE-compatible property)
Loss: root-mean-square error
Guess: null, uses _INITIAL_GUESS above
Pre-conditioner: None
Initialisation:
- Find minimum no. of shells needed
- max_l >= min_l
- generate initial parameters for each shell
First run:
- optimize parameters for each shell once, sequentially
Next shell in list not marked finished:
- re-optimise
- below threshold or n=max_n: mark finished
- above threshold: increment n
Repeat until all shells are marked finished.
Uses iteration, limited by two parameters:
max_n: max number of exponents in shell
target: threshold for objective function
------------------------------------------------------------------
Additional attributes:
shells (list): list of (c, x, n) parameter tuples
shell_done (list): list of flags for whether shell is finished (0) or not (1)
target (float): threshold for optimization delta
max_n (int): maximum number of primitives in shell expansion
max_l (int): maximum angular momentum shell to do;
if -1, does minimal configuration
first_run (bool): setting to True restarts optimization from beginning
last_objective (var): last value of objective function
"""
def __init__(self, eval_type='energy', target=1e-5, max_n=18, max_l=-1):
Strategy.__init__(self, eval_type=eval_type, pre=unit)
self.name = 'EvenTemper'
self.shells = []
self.shell_done = []
self.last_objective = 0
self.target = target
self.guess = null_guess
self.guess_params = {}
self.max_n = max_n
self.max_l = max_l
self.first_run = True
def set_basis_shells(self, basis, element):
"""Expands parameters into a basis set"""
basis[element] = even_temper_expansion(self.shells)
def initialise(self, basis, element):
if self.max_l < 0:
el = md_element(element.title())
l_list = [l for (n, l) in el.ec.conf.keys()]
min_l = len(set(l_list))
self.max_l = max(min_l, self.max_l)
self.shells = [_INITIAL_GUESS] * self.max_l
self.shell_done = [1] * self.max_l
self.set_basis_shells(basis, element)
self.last_objective = 0
def get_active(self, basis, element):
(c, x, _) = self.shells[self._step]
return np.array([c, x])
def set_active(self, values, basis, element):
(c, x, n) = self.shells[self._step]
c = max(values[0], 1e-5)
x = max(values[1], 1.01)
self.shells[self._step] = (c, x, n)
self.set_basis_shells(basis, element)
def next(self, basis, element, objective):
delta_objective = np.abs(self.last_objective - objective)
self.last_objective = objective
carry_on = True
if self.first_run:
self._step = self._step + 1
if self._step == self.max_l:
self.first_run = False
self._step = 0
(c, x, n) = self.shells[self._step]
self.shells[self._step] = (c, x, min(n+1, self.max_n))
else:
if delta_objective < self.target:
self.shell_done[self._step] = 0
self._step = (self._step + 1) % self.max_l
(c, x, n) = self.shells[self._step]
if n == self.max_n:
self.shell_done[self._step] = 0
elif self.shell_done[self._step] != 0:
self.shells[self._step] = (c, x, n+1)
carry_on = np.sum(self.shell_done) != 0
return carry_on
| 37.758065
| 89
| 0.566852
|
import numpy as np
from mendeleev import element as md_element
from basisopt import api, data
from basisopt.exceptions import PropertyNotAvailable
from basisopt.basis import even_temper_expansion
from basisopt.basis.guesses import null_guess
from .preconditioners import unit
from .strategies import Strategy
_INITIAL_GUESS = (0.3, 2.0, 8)
class EvenTemperedStrategy(Strategy):
def __init__(self, eval_type='energy', target=1e-5, max_n=18, max_l=-1):
Strategy.__init__(self, eval_type=eval_type, pre=unit)
self.name = 'EvenTemper'
self.shells = []
self.shell_done = []
self.last_objective = 0
self.target = target
self.guess = null_guess
self.guess_params = {}
self.max_n = max_n
self.max_l = max_l
self.first_run = True
def set_basis_shells(self, basis, element):
basis[element] = even_temper_expansion(self.shells)
def initialise(self, basis, element):
if self.max_l < 0:
el = md_element(element.title())
l_list = [l for (n, l) in el.ec.conf.keys()]
min_l = len(set(l_list))
self.max_l = max(min_l, self.max_l)
self.shells = [_INITIAL_GUESS] * self.max_l
self.shell_done = [1] * self.max_l
self.set_basis_shells(basis, element)
self.last_objective = 0
def get_active(self, basis, element):
(c, x, _) = self.shells[self._step]
return np.array([c, x])
def set_active(self, values, basis, element):
(c, x, n) = self.shells[self._step]
c = max(values[0], 1e-5)
x = max(values[1], 1.01)
self.shells[self._step] = (c, x, n)
self.set_basis_shells(basis, element)
def next(self, basis, element, objective):
delta_objective = np.abs(self.last_objective - objective)
self.last_objective = objective
carry_on = True
if self.first_run:
self._step = self._step + 1
if self._step == self.max_l:
self.first_run = False
self._step = 0
(c, x, n) = self.shells[self._step]
self.shells[self._step] = (c, x, min(n+1, self.max_n))
else:
if delta_objective < self.target:
self.shell_done[self._step] = 0
self._step = (self._step + 1) % self.max_l
(c, x, n) = self.shells[self._step]
if n == self.max_n:
self.shell_done[self._step] = 0
elif self.shell_done[self._step] != 0:
self.shells[self._step] = (c, x, n+1)
carry_on = np.sum(self.shell_done) != 0
return carry_on
| true
| true
|
790aaf424bd9657c1e4b6172ed739772d3cabb0d
| 219
|
py
|
Python
|
profiles_api/admin.py
|
yash-hash/Profiles-rest-api
|
669961dd1a6a557622c6f856101d390a0b944794
|
[
"MIT"
] | null | null | null |
profiles_api/admin.py
|
yash-hash/Profiles-rest-api
|
669961dd1a6a557622c6f856101d390a0b944794
|
[
"MIT"
] | null | null | null |
profiles_api/admin.py
|
yash-hash/Profiles-rest-api
|
669961dd1a6a557622c6f856101d390a0b944794
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from profiles_api import models
# Register your models here.
admin.site.register(models.UserProfile) # registers the model on the admin site
admin.site.register(models.ProfileFeedItem)
| 31.285714
| 79
| 0.826484
|
from django.contrib import admin
from profiles_api import models
admin.site.register(models.UserProfile)
admin.site.register(models.ProfileFeedItem)
| true
| true
|
790aaf4ad87a1b3d6110aa1152c9fa874d1b4c13
| 1,001
|
py
|
Python
|
perceptual.py
|
liuh127/Two-branch-dehazing
|
00f7612d38828688123b13323b33d48d08fbd3c0
|
[
"MIT"
] | 15
|
2021-04-21T01:19:29.000Z
|
2021-08-16T14:34:53.000Z
|
perceptual.py
|
sagnik3141/DW-GAN-Dehazing
|
2861089977876e2809f094b19dc529200af54f00
|
[
"MIT"
] | 6
|
2021-04-22T03:28:04.000Z
|
2021-09-21T13:18:06.000Z
|
perceptual.py
|
sagnik3141/DW-GAN-Dehazing
|
2861089977876e2809f094b19dc529200af54f00
|
[
"MIT"
] | 3
|
2021-04-22T16:34:27.000Z
|
2021-06-28T06:21:12.000Z
|
# --- Imports --- #
import torch
import torch.nn.functional as F
# --- Perceptual loss network --- #
class LossNetwork(torch.nn.Module):
def __init__(self, vgg_model):
super(LossNetwork, self).__init__()
self.vgg_layers = vgg_model
self.layer_name_mapping = {
'3': "relu1_2",
'8': "relu2_2",
'15': "relu3_3"
}
def output_features(self, x):
output = {}
for name, module in self.vgg_layers._modules.items():
x = module(x)
if name in self.layer_name_mapping:
output[self.layer_name_mapping[name]] = x
return list(output.values())
def forward(self, dehaze, gt):
loss = []
dehaze_features = self.output_features(dehaze)
gt_features = self.output_features(gt)
for dehaze_feature, gt_feature in zip(dehaze_features, gt_features):
loss.append(F.mse_loss(dehaze_feature, gt_feature))
return sum(loss)/len(loss)
| 33.366667
| 76
| 0.598402
|
import torch
import torch.nn.functional as F
class LossNetwork(torch.nn.Module):
def __init__(self, vgg_model):
super(LossNetwork, self).__init__()
self.vgg_layers = vgg_model
self.layer_name_mapping = {
'3': "relu1_2",
'8': "relu2_2",
'15': "relu3_3"
}
def output_features(self, x):
output = {}
for name, module in self.vgg_layers._modules.items():
x = module(x)
if name in self.layer_name_mapping:
output[self.layer_name_mapping[name]] = x
return list(output.values())
def forward(self, dehaze, gt):
loss = []
dehaze_features = self.output_features(dehaze)
gt_features = self.output_features(gt)
for dehaze_feature, gt_feature in zip(dehaze_features, gt_features):
loss.append(F.mse_loss(dehaze_feature, gt_feature))
return sum(loss)/len(loss)
| true
| true
|
790ab10c114ed809a1b80f3e101c2509b9257268
| 332
|
py
|
Python
|
modules/timeblock.py
|
5225225/bar
|
cc72eb45f21ac2b2e070c6d9f66b306ed51aef35
|
[
"MIT"
] | 1
|
2015-09-05T17:07:59.000Z
|
2015-09-05T17:07:59.000Z
|
modules/timeblock.py
|
5225225/bar
|
cc72eb45f21ac2b2e070c6d9f66b306ed51aef35
|
[
"MIT"
] | null | null | null |
modules/timeblock.py
|
5225225/bar
|
cc72eb45f21ac2b2e070c6d9f66b306ed51aef35
|
[
"MIT"
] | 2
|
2015-09-05T17:08:02.000Z
|
2019-02-22T21:14:08.000Z
|
import linelib
import datetime
import signal
def handler(x, y):
pass
signal.signal(signal.SIGUSR1, handler)
signal.signal(signal.SIGALRM, handler)
while True:
linelib.sendblock("date", {"full_text": datetime.datetime.now().strftime(
"%Y-%m-%e %H:%M:%S"
)})
linelib.sendPID("date")
linelib.waitsig(1)
| 18.444444
| 77
| 0.671687
|
import linelib
import datetime
import signal
def handler(x, y):
pass
signal.signal(signal.SIGUSR1, handler)
signal.signal(signal.SIGALRM, handler)
while True:
linelib.sendblock("date", {"full_text": datetime.datetime.now().strftime(
"%Y-%m-%e %H:%M:%S"
)})
linelib.sendPID("date")
linelib.waitsig(1)
| true
| true
|
790ab1a20b5574df9d37d70dbe96e1b795d53953
| 103
|
py
|
Python
|
python/learn/base/module/l1/pack/big/b1.py
|
qrsforever/workspace
|
53c7ce7ca7da62c9fbb3d991ae9e4e34d07ece5f
|
[
"MIT"
] | 2
|
2017-06-07T03:20:42.000Z
|
2020-01-07T09:14:26.000Z
|
python/learn/base/module/l1/pack/big/b1.py
|
qrsforever/workspace
|
53c7ce7ca7da62c9fbb3d991ae9e4e34d07ece5f
|
[
"MIT"
] | null | null | null |
python/learn/base/module/l1/pack/big/b1.py
|
qrsforever/workspace
|
53c7ce7ca7da62c9fbb3d991ae9e4e34d07ece5f
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python2.7
print "run here: pack/big/b1.py"
def b1_fun(): print "function: pack/big/b1.py"
| 17.166667
| 46
| 0.679612
|
print "run here: pack/big/b1.py"
def b1_fun(): print "function: pack/big/b1.py"
| false
| true
|
790ab21527b6740f6f32ad4c5cfd07ec514bd8ee
| 858
|
py
|
Python
|
src/build_data_source.py
|
datarootsio/notion-dbs-data-quality
|
5d2c03d6754b5b7242d69557803ed40d1c27d29a
|
[
"MIT"
] | 5
|
2021-12-16T04:54:39.000Z
|
2022-03-11T13:49:05.000Z
|
src/build_data_source.py
|
datarootsio/notion-dbs-data-quality
|
5d2c03d6754b5b7242d69557803ed40d1c27d29a
|
[
"MIT"
] | null | null | null |
src/build_data_source.py
|
datarootsio/notion-dbs-data-quality
|
5d2c03d6754b5b7242d69557803ed40d1c27d29a
|
[
"MIT"
] | null | null | null |
from ruamel import yaml
import great_expectations as ge
if __name__ == "__main__":
context = ge.get_context()
datasource_config = {
"name": "my_notion_pandas_data_source",
"class_name": "Datasource",
"module_name": "great_expectations.datasource",
"execution_engine": {
"module_name": "great_expectations.execution_engine",
"class_name": "PandasExecutionEngine",
},
"data_connectors": {
"my_notion_pandas_data_connector": {
"class_name": "RuntimeDataConnector",
"module_name": "great_expectations.datasource.data_connector",
"batch_identifiers": ["default_identifier_name"],
},
},
}
context.test_yaml_config(yaml.dump(datasource_config))
context.add_datasource(**datasource_config)
| 31.777778
| 78
| 0.630536
|
from ruamel import yaml
import great_expectations as ge
if __name__ == "__main__":
context = ge.get_context()
datasource_config = {
"name": "my_notion_pandas_data_source",
"class_name": "Datasource",
"module_name": "great_expectations.datasource",
"execution_engine": {
"module_name": "great_expectations.execution_engine",
"class_name": "PandasExecutionEngine",
},
"data_connectors": {
"my_notion_pandas_data_connector": {
"class_name": "RuntimeDataConnector",
"module_name": "great_expectations.datasource.data_connector",
"batch_identifiers": ["default_identifier_name"],
},
},
}
context.test_yaml_config(yaml.dump(datasource_config))
context.add_datasource(**datasource_config)
| true
| true
|
790ab284d48ac4a8577668518e214ac28d1d4e82
| 6,649
|
py
|
Python
|
tests/tests.py
|
michaelcrain/pdfquery
|
89ee378af2e0d97f75d7798ec91a86654b154265
|
[
"MIT"
] | 2
|
2017-07-13T19:37:36.000Z
|
2021-08-25T04:47:35.000Z
|
tests/tests.py
|
michaelcrain/pdfquery
|
89ee378af2e0d97f75d7798ec91a86654b154265
|
[
"MIT"
] | null | null | null |
tests/tests.py
|
michaelcrain/pdfquery
|
89ee378af2e0d97f75d7798ec91a86654b154265
|
[
"MIT"
] | null | null | null |
# to run:
# pip install unittest2
# unit2 discover
#
# to debug:
# pip install nose
# nosetests --pdb
import StringIO
import sys
import pdfquery
import unittest2
from pdfquery.cache import FileCache
class TestPDFQuery(unittest2.TestCase):
"""
Various tests based on the IRS_1040A sample doc.
"""
@classmethod
def setUpClass(cls):
cls.pdf = pdfquery.PDFQuery(
"tests/samples/IRS_1040A.pdf",
parse_tree_cacher=FileCache("/tmp/") if sys.argv[1] == 'cache' else None,
)
cls.pdf.load()
def test_xml_conversion(self):
"""
Test that converted XML hasn't changed from saved version.
"""
# get current XML for sample file
tree_string = StringIO.StringIO()
self.pdf.tree.write(tree_string, pretty_print=True, encoding="utf-8")
tree_string = tree_string.getvalue()
# get previous XML
# this varies by Python version, because the float handling isn't quite
# the same
comparison_file = "tests/saved_output/IRS_1040A_output%s.xml" % (
"_python_2.6" if sys.version_info[0] == 2 and sys.version_info[1] < 7 else "")
with open(comparison_file, 'rb') as f:
saved_string = f.read()
# compare current to previous
if tree_string != saved_string:
with open("tests/failed_output.xml", "wb") as out:
out.write(tree_string)
self.fail("XML conversion of sample pdf has changed! Compare %s to "
"tests/failed_output.xml." % comparison_file)
def test_selectors(self):
"""
Test the :contains and :in_bbox selectors.
"""
label = self.pdf.pq('LTTextLineHorizontal:contains("Your first name '
'and initial")')
self.assertEqual(len(label), 1)
left_corner = float(label.attr('x0'))
self.assertEqual(left_corner, 143.651)
bottom_corner = float(label.attr('y0'))
self.assertEqual(bottom_corner, 714.694)
name = self.pdf.pq('LTTextLineHorizontal:in_bbox("%s, %s, %s, %s")' %
(left_corner,
bottom_corner - 30,
left_corner + 150,
bottom_corner)
).text()
self.assertEqual(name, "John E.")
def test_extract(self):
"""
Test the extract() function.
"""
values = self.pdf.extract([
('with_parent', 'LTPage[pageid="1"]'),
('with_formatter', 'text'),
('last_name', 'LTTextLineHorizontal:in_bbox("315,680,395,700")'),
('spouse', 'LTTextLineHorizontal:in_bbox("170,650,220,680")'),
('with_parent', 'LTPage[pageid="2"]'),
('oath', 'LTTextLineHorizontal:contains("perjury")',
lambda match: match.text()[:30] + "..."),
('year', 'LTTextLineHorizontal:contains("Form 1040A (")',
lambda match: int(match.text()[-5:-1]))
])
self.assertDictEqual(values, {
'last_name': 'Michaels',
'spouse': 'Susan R.',
'oath': u'Under penalties of perjury, I ...',
'year': 2007
})
def test_page_numbers(self):
self.assertEqual(self.pdf.tree.getroot()[0].get('page_label'), '1')
class TestDocInfo(unittest2.TestCase):
def test_docinfo(self):
doc_info_results = [
["tests/samples/bug11.pdf",
{'Producer': 'Mac OS X 10.9.3 Quartz PDFContext',
'Title': u'\u262d\U0001f61c\U0001f4a9Unicode is fun!',
'Author': 'Russkel', 'Creator': 'Firefox',
'ModDate': "D:20140528141914+08'00'",
'CreationDate': 'D:20140528061106Z', 'Subject': ''}],
["tests/samples/bug15.pdf",
{'Producer': 'Mac OS X 10.9.3 Quartz PDFContext',
'Author': 'Brepols Publishers',
'Creator': 'PDFsharp 1.2.1269-g (www.pdfsharp.com)',
'AAPL_Keywords': "[u'Brepols', u'Publishers', u'CTLO']",
'Title': 'Exporter',
'ModDate': "D:20140614192741Z00'00'",
'Keywords': 'Brepols, Publishers, CTLO',
'CreationDate': "D:20140614192741Z00'00'",
'Subject': 'Extrait de la Library of Latin Texts - Series A'}],
["tests/samples/bug17.pdf",
{'CreationDate': 'D:20140328164512Z',
'Creator': 'Adobe InDesign CC (Macintosh)',
'ModDate': 'D:20140328164513Z',
'Producer': 'Adobe PDF Library 10.0.1', 'Trapped': '/False'}]
]
for file_path, expected_results in doc_info_results:
pdf = pdfquery.PDFQuery(file_path)
pdf.load(None)
self.assertDictEqual(
dict(pdf.tree.getroot().attrib),
expected_results
)
class TestUnicode(unittest2.TestCase):
def test_unicode_text(self):
pdf = pdfquery.PDFQuery("tests/samples/bug18.pdf")
pdf.load()
self.assertEqual(
pdf.pq('LTTextLineHorizontal:contains("Hop Hing Oils")').text(),
(u'5 Hop Hing Oils and Fats (Hong Kong) Ltd \uf06c '
u'\u7279\u5bf6\u7cbe\u88fd\u8c6c\u6cb9')
)
class TestAnnotations(unittest2.TestCase):
"""
Ensure that annotations such as links are getting added to the PDFs
properly, as discussed in issue #28.
"""
@classmethod
def setUpClass(cls):
cls.pdf = pdfquery.PDFQuery(
"tests/samples/bug28.pdf",
parse_tree_cacher=FileCache("/tmp/") if sys.argv[1] == 'cache' else None,
)
cls.pdf.load()
def test_xml_conversion(self):
"""
Test that converted XML hasn't changed from saved version.
"""
# get current XML for sample file
tree_string = StringIO.StringIO()
self.pdf.tree.write(tree_string, pretty_print=True, encoding="utf-8")
tree_string = tree_string.getvalue()
# get previous XML
comparison_file = 'tests/saved_output/bug28.xml'
with open(comparison_file, 'rb') as f:
saved_string = f.read()
# compare current to previous
if tree_string != saved_string:
with open("tests/failed_output.xml", "wb") as out:
out.write(tree_string)
self.fail("XML conversion of sample pdf has changed! Compare %s "
"to tests/failed_output.xml." % comparison_file)
if __name__ == '__main__':
unittest2.main()
| 34.630208
| 90
| 0.563393
|
import StringIO
import sys
import pdfquery
import unittest2
from pdfquery.cache import FileCache
class TestPDFQuery(unittest2.TestCase):
@classmethod
def setUpClass(cls):
cls.pdf = pdfquery.PDFQuery(
"tests/samples/IRS_1040A.pdf",
parse_tree_cacher=FileCache("/tmp/") if sys.argv[1] == 'cache' else None,
)
cls.pdf.load()
def test_xml_conversion(self):
tree_string = StringIO.StringIO()
self.pdf.tree.write(tree_string, pretty_print=True, encoding="utf-8")
tree_string = tree_string.getvalue()
# the same
comparison_file = "tests/saved_output/IRS_1040A_output%s.xml" % (
"_python_2.6" if sys.version_info[0] == 2 and sys.version_info[1] < 7 else "")
with open(comparison_file, 'rb') as f:
saved_string = f.read()
# compare current to previous
if tree_string != saved_string:
with open("tests/failed_output.xml", "wb") as out:
out.write(tree_string)
self.fail("XML conversion of sample pdf has changed! Compare %s to "
"tests/failed_output.xml." % comparison_file)
def test_selectors(self):
label = self.pdf.pq('LTTextLineHorizontal:contains("Your first name '
'and initial")')
self.assertEqual(len(label), 1)
left_corner = float(label.attr('x0'))
self.assertEqual(left_corner, 143.651)
bottom_corner = float(label.attr('y0'))
self.assertEqual(bottom_corner, 714.694)
name = self.pdf.pq('LTTextLineHorizontal:in_bbox("%s, %s, %s, %s")' %
(left_corner,
bottom_corner - 30,
left_corner + 150,
bottom_corner)
).text()
self.assertEqual(name, "John E.")
def test_extract(self):
values = self.pdf.extract([
('with_parent', 'LTPage[pageid="1"]'),
('with_formatter', 'text'),
('last_name', 'LTTextLineHorizontal:in_bbox("315,680,395,700")'),
('spouse', 'LTTextLineHorizontal:in_bbox("170,650,220,680")'),
('with_parent', 'LTPage[pageid="2"]'),
('oath', 'LTTextLineHorizontal:contains("perjury")',
lambda match: match.text()[:30] + "..."),
('year', 'LTTextLineHorizontal:contains("Form 1040A (")',
lambda match: int(match.text()[-5:-1]))
])
self.assertDictEqual(values, {
'last_name': 'Michaels',
'spouse': 'Susan R.',
'oath': u'Under penalties of perjury, I ...',
'year': 2007
})
def test_page_numbers(self):
self.assertEqual(self.pdf.tree.getroot()[0].get('page_label'), '1')
class TestDocInfo(unittest2.TestCase):
def test_docinfo(self):
doc_info_results = [
["tests/samples/bug11.pdf",
{'Producer': 'Mac OS X 10.9.3 Quartz PDFContext',
'Title': u'\u262d\U0001f61c\U0001f4a9Unicode is fun!',
'Author': 'Russkel', 'Creator': 'Firefox',
'ModDate': "D:20140528141914+08'00'",
'CreationDate': 'D:20140528061106Z', 'Subject': ''}],
["tests/samples/bug15.pdf",
{'Producer': 'Mac OS X 10.9.3 Quartz PDFContext',
'Author': 'Brepols Publishers',
'Creator': 'PDFsharp 1.2.1269-g (www.pdfsharp.com)',
'AAPL_Keywords': "[u'Brepols', u'Publishers', u'CTLO']",
'Title': 'Exporter',
'ModDate': "D:20140614192741Z00'00'",
'Keywords': 'Brepols, Publishers, CTLO',
'CreationDate': "D:20140614192741Z00'00'",
'Subject': 'Extrait de la Library of Latin Texts - Series A'}],
["tests/samples/bug17.pdf",
{'CreationDate': 'D:20140328164512Z',
'Creator': 'Adobe InDesign CC (Macintosh)',
'ModDate': 'D:20140328164513Z',
'Producer': 'Adobe PDF Library 10.0.1', 'Trapped': '/False'}]
]
for file_path, expected_results in doc_info_results:
pdf = pdfquery.PDFQuery(file_path)
pdf.load(None)
self.assertDictEqual(
dict(pdf.tree.getroot().attrib),
expected_results
)
class TestUnicode(unittest2.TestCase):
def test_unicode_text(self):
pdf = pdfquery.PDFQuery("tests/samples/bug18.pdf")
pdf.load()
self.assertEqual(
pdf.pq('LTTextLineHorizontal:contains("Hop Hing Oils")').text(),
(u'5 Hop Hing Oils and Fats (Hong Kong) Ltd \uf06c '
u'\u7279\u5bf6\u7cbe\u88fd\u8c6c\u6cb9')
)
class TestAnnotations(unittest2.TestCase):
@classmethod
def setUpClass(cls):
cls.pdf = pdfquery.PDFQuery(
"tests/samples/bug28.pdf",
parse_tree_cacher=FileCache("/tmp/") if sys.argv[1] == 'cache' else None,
)
cls.pdf.load()
def test_xml_conversion(self):
# get current XML for sample file
tree_string = StringIO.StringIO()
self.pdf.tree.write(tree_string, pretty_print=True, encoding="utf-8")
tree_string = tree_string.getvalue()
# get previous XML
comparison_file = 'tests/saved_output/bug28.xml'
with open(comparison_file, 'rb') as f:
saved_string = f.read()
# compare current to previous
if tree_string != saved_string:
with open("tests/failed_output.xml", "wb") as out:
out.write(tree_string)
self.fail("XML conversion of sample pdf has changed! Compare %s "
"to tests/failed_output.xml." % comparison_file)
if __name__ == '__main__':
unittest2.main()
| true
| true
|
790ab2b565b507b84e4f23fb2640510b6fe33567
| 1,595
|
py
|
Python
|
ex3.py
|
Felipe-Gs/Dupla-2-F
|
d19854fdc6a3ddc0e59eae99bfff49c20cc434f3
|
[
"MIT"
] | 2
|
2021-10-15T18:55:15.000Z
|
2022-03-20T02:30:11.000Z
|
ex3.py
|
Felipe-Gs/Dupla-2-F
|
d19854fdc6a3ddc0e59eae99bfff49c20cc434f3
|
[
"MIT"
] | null | null | null |
ex3.py
|
Felipe-Gs/Dupla-2-F
|
d19854fdc6a3ddc0e59eae99bfff49c20cc434f3
|
[
"MIT"
] | null | null | null |
class Pessoa():
def criar_lista(self, n, id_ade, sexo, saude):
lista = []
qunt = int(input('quantas pessoas são: '))
for c in range(qunt):
n = input('digite seu nome: ')
lista.append(n)
idade = int(input('digite sua idade: '))
while idade < 0:
print('idade invalida, digite novamente:')
idade = int(input('digite sua idade: '))
sexo = input('digite seu sexo: F ou M ').upper()
while sexo != 'F' and sexo != 'M':
print('sexo invalido, digite novamente: ')
sexo = input('digite seu sexo: F ou M ').upper()
saude = input('diga como está sua saúde: boa ou ruim ').upper()
while saude != 'BOA' and saude != 'RUIM':
print('opção de saude invalida. Digite novamente')
saude = input('diga como está sua saúde: boa ou ruim ').upper()
if idade < 18:
print('voce nao está apta a cumprir o serviço militar obrigatório!')
continue
elif sexo == 'F':
print('voce nao está apta a cumprir o serviço militar obrigatório!')
continue
elif saude == 'RUIM':
print('voce nao está apta a cumprir o serviço militar obrigatório! Cuide da sua saude primeiro.')
continue
else:
print('parabens, voce ESTÁ apta a cumprir o serviço militar obrigatório!')
pessoa = Pessoa()
pessoa.criar_lista(None, int, None, None)
| 39.875
| 113
| 0.523511
|
class Pessoa():
def criar_lista(self, n, id_ade, sexo, saude):
lista = []
qunt = int(input('quantas pessoas são: '))
for c in range(qunt):
n = input('digite seu nome: ')
lista.append(n)
idade = int(input('digite sua idade: '))
while idade < 0:
print('idade invalida, digite novamente:')
idade = int(input('digite sua idade: '))
sexo = input('digite seu sexo: F ou M ').upper()
while sexo != 'F' and sexo != 'M':
print('sexo invalido, digite novamente: ')
sexo = input('digite seu sexo: F ou M ').upper()
saude = input('diga como está sua saúde: boa ou ruim ').upper()
while saude != 'BOA' and saude != 'RUIM':
print('opção de saude invalida. Digite novamente')
saude = input('diga como está sua saúde: boa ou ruim ').upper()
if idade < 18:
print('voce nao está apta a cumprir o serviço militar obrigatório!')
continue
elif sexo == 'F':
print('voce nao está apta a cumprir o serviço militar obrigatório!')
continue
elif saude == 'RUIM':
print('voce nao está apta a cumprir o serviço militar obrigatório! Cuide da sua saude primeiro.')
continue
else:
print('parabens, voce ESTÁ apta a cumprir o serviço militar obrigatório!')
pessoa = Pessoa()
pessoa.criar_lista(None, int, None, None)
| true
| true
|
790ab2e897bf9666d58e716d0b143db895baaf53
| 655
|
py
|
Python
|
tests/text_classification_nonsync.py
|
anthliu/turkit2
|
f8033d5d5b1be4afe838d5f2f49bff532a656119
|
[
"MIT"
] | 1
|
2021-01-18T20:11:04.000Z
|
2021-01-18T20:11:04.000Z
|
tests/text_classification_nonsync.py
|
anthliu/turkit2
|
f8033d5d5b1be4afe838d5f2f49bff532a656119
|
[
"MIT"
] | null | null | null |
tests/text_classification_nonsync.py
|
anthliu/turkit2
|
f8033d5d5b1be4afe838d5f2f49bff532a656119
|
[
"MIT"
] | null | null | null |
import uuid
from turkit2.common import TextClassification
from turkit2.qualifications import Unique, Locale, AcceptRate
from utils import get_client
client = get_client()
quals = [Locale(), AcceptRate()]
task = TextClassification(client, 'Test3', '0.01', 'test test', 600, 6000, ['positive', 'negative'], question='Which class does this text match, positive or negative?', qualifications=quals)
documents = [f'test{i}' for i in range(5)]
def proc(text):
for answer, assignment in task.ask(verbosity=100, text=text):
print(answer)
print(assignment)
def main():
tasks = []
for text in documents:
proc(text)
main()
| 26.2
| 190
| 0.700763
|
import uuid
from turkit2.common import TextClassification
from turkit2.qualifications import Unique, Locale, AcceptRate
from utils import get_client
client = get_client()
quals = [Locale(), AcceptRate()]
task = TextClassification(client, 'Test3', '0.01', 'test test', 600, 6000, ['positive', 'negative'], question='Which class does this text match, positive or negative?', qualifications=quals)
documents = [f'test{i}' for i in range(5)]
def proc(text):
for answer, assignment in task.ask(verbosity=100, text=text):
print(answer)
print(assignment)
def main():
tasks = []
for text in documents:
proc(text)
main()
| true
| true
|
790ab2edb90fd0066f6d5b94210ace24210702f9
| 1,289
|
py
|
Python
|
gwells/models/ScreenIntakeMethodCode.py
|
fieranmason/gwells
|
867d56bbe52343054862c72ac6285058a6556a58
|
[
"Apache-2.0"
] | 1
|
2020-01-29T22:42:40.000Z
|
2020-01-29T22:42:40.000Z
|
gwells/models/ScreenIntakeMethodCode.py
|
fieranmason/gwells
|
867d56bbe52343054862c72ac6285058a6556a58
|
[
"Apache-2.0"
] | 1
|
2018-05-02T05:28:33.000Z
|
2018-05-09T15:58:07.000Z
|
gwells/models/ScreenIntakeMethodCode.py
|
fieranmason/gwells
|
867d56bbe52343054862c72ac6285058a6556a58
|
[
"Apache-2.0"
] | 1
|
2018-05-02T23:56:48.000Z
|
2018-05-02T23:56:48.000Z
|
"""
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from .AuditModel import AuditModel
from django.db import models
class ScreenIntakeMethodCode(AuditModel):
"""
Refers to the type of intake mechanism for a well screen, i.e. Screen, Open Bottom, Uncased Hole.
"""
screen_intake_code = models.CharField(primary_key=True, max_length=10, editable=False)
description = models.CharField(max_length=100)
display_order = models.PositiveIntegerField()
effective_date = models.DateTimeField(blank=True, null=True)
expiry_date = models.DateTimeField(blank=True, null=True)
class Meta:
db_table = 'screen_intake_method_code'
ordering = ['display_order', 'description']
def __str__(self):
return self.description
| 37.911765
| 102
| 0.728472
|
from .AuditModel import AuditModel
from django.db import models
class ScreenIntakeMethodCode(AuditModel):
screen_intake_code = models.CharField(primary_key=True, max_length=10, editable=False)
description = models.CharField(max_length=100)
display_order = models.PositiveIntegerField()
effective_date = models.DateTimeField(blank=True, null=True)
expiry_date = models.DateTimeField(blank=True, null=True)
class Meta:
db_table = 'screen_intake_method_code'
ordering = ['display_order', 'description']
def __str__(self):
return self.description
| true
| true
|
790ab3a508398cd01d952faf3eef6c96f6f2fb18
| 26,681
|
py
|
Python
|
pysnmp/ASCEND-MIBSYS1-MIB.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 11
|
2021-02-02T16:27:16.000Z
|
2021-08-31T06:22:49.000Z
|
pysnmp/ASCEND-MIBSYS1-MIB.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 75
|
2021-02-24T17:30:31.000Z
|
2021-12-08T00:01:18.000Z
|
pysnmp/ASCEND-MIBSYS1-MIB.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 10
|
2019-04-30T05:51:36.000Z
|
2022-02-16T03:33:41.000Z
|
#
# PySNMP MIB module ASCEND-MIBSYS1-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/ASCEND-MIBSYS1-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 17:12:34 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
configuration, = mibBuilder.importSymbols("ASCEND-MIB", "configuration")
Integer, OctetString, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "Integer", "OctetString", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueRangeConstraint, SingleValueConstraint, ConstraintsIntersection, ValueSizeConstraint, ConstraintsUnion = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueRangeConstraint", "SingleValueConstraint", "ConstraintsIntersection", "ValueSizeConstraint", "ConstraintsUnion")
ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup")
MibScalar, MibTable, MibTableRow, MibTableColumn, TimeTicks, NotificationType, MibIdentifier, ModuleIdentity, ObjectIdentity, Gauge32, Unsigned32, Integer32, iso, Counter64, Bits, Counter32, IpAddress = mibBuilder.importSymbols("SNMPv2-SMI", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "TimeTicks", "NotificationType", "MibIdentifier", "ModuleIdentity", "ObjectIdentity", "Gauge32", "Unsigned32", "Integer32", "iso", "Counter64", "Bits", "Counter32", "IpAddress")
DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention")
class DisplayString(OctetString):
pass
mibsystemProfile = MibIdentifier((1, 3, 6, 1, 4, 1, 529, 23, 125))
mibsystemProfileTable = MibTable((1, 3, 6, 1, 4, 1, 529, 23, 125, 1), )
if mibBuilder.loadTexts: mibsystemProfileTable.setStatus('mandatory')
mibsystemProfileEntry = MibTableRow((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1), ).setIndexNames((0, "ASCEND-MIBSYS1-MIB", "systemProfile-Index-o"))
if mibBuilder.loadTexts: mibsystemProfileEntry.setStatus('mandatory')
systemProfile_Index_o = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 1), Integer32()).setLabel("systemProfile-Index-o").setMaxAccess("readonly")
if mibBuilder.loadTexts: systemProfile_Index_o.setStatus('mandatory')
systemProfile_Name = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 2), DisplayString()).setLabel("systemProfile-Name").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_Name.setStatus('mandatory')
systemProfile_Contact = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 3), DisplayString()).setLabel("systemProfile-Contact").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_Contact.setStatus('mandatory')
systemProfile_Location = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 4), DisplayString()).setLabel("systemProfile-Location").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_Location.setStatus('mandatory')
systemProfile_TermRate = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8, 9))).clone(namedValues=NamedValues(("n-300Bps", 1), ("n-1200Bps", 2), ("n-2400Bps", 3), ("n-4800Bps", 4), ("n-9600Bps", 5), ("n-19200Bps", 6), ("n-38400Bps", 7), ("n-57600Bps", 8), ("n-115200Bps", 9)))).setLabel("systemProfile-TermRate").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_TermRate.setStatus('mandatory')
systemProfile_Console = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("standard", 1), ("limited", 2), ("mif", 3)))).setLabel("systemProfile-Console").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_Console.setStatus('mandatory')
systemProfile_ConsoleSecurity = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 7), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("consoleSecurityNone", 1), ("consoleSecurityProfile", 2), ("consoleSecurityAuthSetting", 3)))).setLabel("systemProfile-ConsoleSecurity").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_ConsoleSecurity.setStatus('mandatory')
systemProfile_SystemRmtMgmt = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 8), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("no", 1), ("yes", 2)))).setLabel("systemProfile-SystemRmtMgmt").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_SystemRmtMgmt.setStatus('mandatory')
systemProfile_SubAddressMode = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 9), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("noSubaddress", 1), ("routingSubaddress", 2), ("termselSubaddress", 3)))).setLabel("systemProfile-SubAddressMode").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_SubAddressMode.setStatus('mandatory')
systemProfile_SerialSubaddress = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 10), Integer32()).setLabel("systemProfile-SerialSubaddress").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_SerialSubaddress.setStatus('mandatory')
systemProfile_LanSubaddress = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 11), Integer32()).setLabel("systemProfile-LanSubaddress").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_LanSubaddress.setStatus('mandatory')
systemProfile_DmSubaddress = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 12), Integer32()).setLabel("systemProfile-DmSubaddress").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_DmSubaddress.setStatus('mandatory')
systemProfile_V110Subaddress = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 13), Integer32()).setLabel("systemProfile-V110Subaddress").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_V110Subaddress.setStatus('mandatory')
systemProfile_UseTrunkGroups = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 14), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("no", 1), ("yes", 2)))).setLabel("systemProfile-UseTrunkGroups").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_UseTrunkGroups.setStatus('mandatory')
systemProfile_NumDigitsTrunkGroups = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 15), Integer32()).setLabel("systemProfile-NumDigitsTrunkGroups").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_NumDigitsTrunkGroups.setStatus('mandatory')
systemProfile_AutoLogout = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 16), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("no", 1), ("yes", 2)))).setLabel("systemProfile-AutoLogout").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_AutoLogout.setStatus('mandatory')
systemProfile_IdleLogout = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 17), Integer32()).setLabel("systemProfile-IdleLogout").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_IdleLogout.setStatus('mandatory')
systemProfile_P50SwitchUsage = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 18), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("switchUnused", 1), ("switchSerialWan", 2), ("switchNumberOfUses", 3)))).setLabel("systemProfile-P50SwitchUsage").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_P50SwitchUsage.setStatus('mandatory')
systemProfile_oDS0MinRst = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 19), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("off", 1), ("daily", 2), ("monthly", 3)))).setLabel("systemProfile-oDS0MinRst").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_oDS0MinRst.setStatus('mandatory')
systemProfile_MaxSystemDS0Mins = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 20), Integer32()).setLabel("systemProfile-MaxSystemDS0Mins").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_MaxSystemDS0Mins.setStatus('mandatory')
systemProfile_MaxDialoutTime = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 21), Integer32()).setLabel("systemProfile-MaxDialoutTime").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_MaxDialoutTime.setStatus('mandatory')
systemProfile_ParallelDialing = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 22), Integer32()).setLabel("systemProfile-ParallelDialing").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_ParallelDialing.setStatus('mandatory')
systemProfile_SingleFileIncoming = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 23), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("no", 1), ("yes", 2)))).setLabel("systemProfile-SingleFileIncoming").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_SingleFileIncoming.setStatus('mandatory')
systemProfile_DelayDualPortDialing = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 24), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("no", 1), ("yes", 2)))).setLabel("systemProfile-DelayDualPortDialing").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_DelayDualPortDialing.setStatus('mandatory')
systemProfile_EditNumber = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 25), DisplayString()).setLabel("systemProfile-EditNumber").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_EditNumber.setStatus('mandatory')
systemProfile_AnalogEncoding = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 26), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("uLaw", 1), ("aLaw", 2)))).setLabel("systemProfile-AnalogEncoding").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_AnalogEncoding.setStatus('mandatory')
systemProfile_SessionidBase = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 27), Integer32()).setLabel("systemProfile-SessionidBase").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_SessionidBase.setStatus('mandatory')
systemProfile_TOnline = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 28), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("no", 1), ("yes", 2)))).setLabel("systemProfile-TOnline").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_TOnline.setStatus('mandatory')
systemProfile_TOnlineMostAvailChan = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 29), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("no", 1), ("yes", 2)))).setLabel("systemProfile-TOnlineMostAvailChan").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_TOnlineMostAvailChan.setStatus('mandatory')
systemProfile_T302Timer = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 30), Integer32()).setLabel("systemProfile-T302Timer").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_T302Timer.setStatus('mandatory')
systemProfile_CallRoutingSortMethod = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 31), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("itemFirst", 1), ("slotFirst", 2)))).setLabel("systemProfile-CallRoutingSortMethod").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_CallRoutingSortMethod.setStatus('mandatory')
systemProfile_DigitalCallRoutingSortMethod = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 32), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("itemFirst", 1), ("slotFirst", 2)))).setLabel("systemProfile-DigitalCallRoutingSortMethod").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_DigitalCallRoutingSortMethod.setStatus('mandatory')
systemProfile_ExactMatchCallRouting = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 33), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("no", 1), ("yes", 2)))).setLabel("systemProfile-ExactMatchCallRouting").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_ExactMatchCallRouting.setStatus('mandatory')
systemProfile_ShelfControllerType = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 34), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("standalone", 1), ("master", 2), ("slave", 3)))).setLabel("systemProfile-ShelfControllerType").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_ShelfControllerType.setStatus('mandatory')
systemProfile_MasterShelfController = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 35), Integer32()).setLabel("systemProfile-MasterShelfController").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_MasterShelfController.setStatus('mandatory')
systemProfile_NewNasPortIdFormat = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 36), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("no", 1), ("yes", 2)))).setLabel("systemProfile-NewNasPortIdFormat").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_NewNasPortIdFormat.setStatus('mandatory')
systemProfile_NasPortFormat = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 59), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))).clone(namedValues=NamedValues(("notApplicable", 1), ("n-2455", 2), ("n-655", 3), ("n-122", 4), ("n-1233", 5)))).setLabel("systemProfile-NasPortFormat").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_NasPortFormat.setStatus('mandatory')
systemProfile_ModemPriTypeOfNumber = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 37), Integer32()).setLabel("systemProfile-ModemPriTypeOfNumber").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_ModemPriTypeOfNumber.setStatus('mandatory')
systemProfile_ModemPriNumberingPlanId = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 38), Integer32()).setLabel("systemProfile-ModemPriNumberingPlanId").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_ModemPriNumberingPlanId.setStatus('mandatory')
systemProfile_WanInterface = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 39), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("wanT1", 1), ("wanSwan", 2)))).setLabel("systemProfile-WanInterface").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_WanInterface.setStatus('mandatory')
systemProfile_PermConnUpdMode = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 40), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("all", 1), ("changed", 2)))).setLabel("systemProfile-PermConnUpdMode").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_PermConnUpdMode.setStatus('mandatory')
systemProfile_UserstatFormat = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 41), DisplayString()).setLabel("systemProfile-UserstatFormat").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_UserstatFormat.setStatus('mandatory')
systemProfile_ControlBusType = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 42), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("dpram", 1), ("pbus", 2)))).setLabel("systemProfile-ControlBusType").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_ControlBusType.setStatus('mandatory')
systemProfile_BootSrVersion = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 43), DisplayString()).setLabel("systemProfile-BootSrVersion").setMaxAccess("readonly")
if mibBuilder.loadTexts: systemProfile_BootSrVersion.setStatus('mandatory')
systemProfile_SysModemProfile_oATAnswerString = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 44), DisplayString()).setLabel("systemProfile-SysModemProfile-oATAnswerString").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_SysModemProfile_oATAnswerString.setStatus('mandatory')
systemProfile_CallByCall = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 45), Integer32()).setLabel("systemProfile-CallByCall").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_CallByCall.setStatus('mandatory')
systemProfile_Country = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 46), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(2, 3, 4, 23, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22))).clone(namedValues=NamedValues(("argentina", 2), ("australia", 3), ("belgium", 4), ("brazil", 23), ("china", 5), ("costaRica", 6), ("finland", 7), ("france", 8), ("germany", 9), ("hongKong", 10), ("italy", 11), ("japan", 12), ("korea", 13), ("mexico", 14), ("netherlands", 15), ("newZealand", 16), ("singapore", 17), ("spain", 18), ("sweden", 19), ("switzerland", 20), ("uk", 21), ("us", 22)))).setLabel("systemProfile-Country").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_Country.setStatus('mandatory')
systemProfile_PotsDigitTimeout = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 47), Integer32()).setLabel("systemProfile-PotsDigitTimeout").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_PotsDigitTimeout.setStatus('mandatory')
systemProfile_System8kClock = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 48), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(2, 3, 4, 5))).clone(namedValues=NamedValues(("controller", 2), ("limOrTrunkModule", 3), ("bits", 4), ("ami8k", 5)))).setLabel("systemProfile-System8kClock").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_System8kClock.setStatus('mandatory')
systemProfile_SupportDbcs = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 49), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("no", 1), ("yes", 2)))).setLabel("systemProfile-SupportDbcs").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_SupportDbcs.setStatus('mandatory')
systemProfile_IncCallDistrib = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 50), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(2, 3))).clone(namedValues=NamedValues(("firstAvailable", 2), ("fairShare", 3)))).setLabel("systemProfile-IncCallDistrib").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_IncCallDistrib.setStatus('mandatory')
systemProfile_IgnoreLineup = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 51), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("no", 1), ("yes", 2)))).setLabel("systemProfile-IgnoreLineup").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_IgnoreLineup.setStatus('mandatory')
systemProfile_JamFileComponents_JamFile1 = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 53), DisplayString()).setLabel("systemProfile-JamFileComponents-JamFile1").setMaxAccess("readonly")
if mibBuilder.loadTexts: systemProfile_JamFileComponents_JamFile1.setStatus('mandatory')
systemProfile_JamFileComponents_JamFile2 = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 54), DisplayString()).setLabel("systemProfile-JamFileComponents-JamFile2").setMaxAccess("readonly")
if mibBuilder.loadTexts: systemProfile_JamFileComponents_JamFile2.setStatus('mandatory')
systemProfile_JamFileComponents_JamFile3 = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 55), DisplayString()).setLabel("systemProfile-JamFileComponents-JamFile3").setMaxAccess("readonly")
if mibBuilder.loadTexts: systemProfile_JamFileComponents_JamFile3.setStatus('mandatory')
systemProfile_JamFileComponents_JamFile4 = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 56), DisplayString()).setLabel("systemProfile-JamFileComponents-JamFile4").setMaxAccess("readonly")
if mibBuilder.loadTexts: systemProfile_JamFileComponents_JamFile4.setStatus('mandatory')
systemProfile_JamFileComponents_JamFile5 = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 57), DisplayString()).setLabel("systemProfile-JamFileComponents-JamFile5").setMaxAccess("readonly")
if mibBuilder.loadTexts: systemProfile_JamFileComponents_JamFile5.setStatus('mandatory')
systemProfile_JamFileComponents_JamFile6 = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 58), DisplayString()).setLabel("systemProfile-JamFileComponents-JamFile6").setMaxAccess("readonly")
if mibBuilder.loadTexts: systemProfile_JamFileComponents_JamFile6.setStatus('mandatory')
systemProfile_Action_o = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 52), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("noAction", 1), ("createProfile", 2), ("deleteProfile", 3)))).setLabel("systemProfile-Action-o").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_Action_o.setStatus('mandatory')
mibsystemProfile_StatusNumberTable = MibTable((1, 3, 6, 1, 4, 1, 529, 23, 125, 2), ).setLabel("mibsystemProfile-StatusNumberTable")
if mibBuilder.loadTexts: mibsystemProfile_StatusNumberTable.setStatus('mandatory')
mibsystemProfile_StatusNumberEntry = MibTableRow((1, 3, 6, 1, 4, 1, 529, 23, 125, 2, 1), ).setLabel("mibsystemProfile-StatusNumberEntry").setIndexNames((0, "ASCEND-MIBSYS1-MIB", "systemProfile-StatusNumber-Index-o"), (0, "ASCEND-MIBSYS1-MIB", "systemProfile-StatusNumber-Index1-o"))
if mibBuilder.loadTexts: mibsystemProfile_StatusNumberEntry.setStatus('mandatory')
systemProfile_StatusNumber_Index_o = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 2, 1, 1), Integer32()).setLabel("systemProfile-StatusNumber-Index-o").setMaxAccess("readonly")
if mibBuilder.loadTexts: systemProfile_StatusNumber_Index_o.setStatus('mandatory')
systemProfile_StatusNumber_Index1_o = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 2, 1, 2), Integer32()).setLabel("systemProfile-StatusNumber-Index1-o").setMaxAccess("readonly")
if mibBuilder.loadTexts: systemProfile_StatusNumber_Index1_o.setStatus('mandatory')
systemProfile_StatusNumber = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 2, 1, 3), DisplayString()).setLabel("systemProfile-StatusNumber").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_StatusNumber.setStatus('mandatory')
mibBuilder.exportSymbols("ASCEND-MIBSYS1-MIB", systemProfile_CallRoutingSortMethod=systemProfile_CallRoutingSortMethod, systemProfile_MasterShelfController=systemProfile_MasterShelfController, systemProfile_PotsDigitTimeout=systemProfile_PotsDigitTimeout, systemProfile_JamFileComponents_JamFile6=systemProfile_JamFileComponents_JamFile6, systemProfile_Index_o=systemProfile_Index_o, systemProfile_MaxDialoutTime=systemProfile_MaxDialoutTime, mibsystemProfileEntry=mibsystemProfileEntry, systemProfile_SessionidBase=systemProfile_SessionidBase, systemProfile_ExactMatchCallRouting=systemProfile_ExactMatchCallRouting, systemProfile_CallByCall=systemProfile_CallByCall, systemProfile_AutoLogout=systemProfile_AutoLogout, DisplayString=DisplayString, systemProfile_UserstatFormat=systemProfile_UserstatFormat, systemProfile_IdleLogout=systemProfile_IdleLogout, systemProfile_EditNumber=systemProfile_EditNumber, systemProfile_P50SwitchUsage=systemProfile_P50SwitchUsage, systemProfile_DigitalCallRoutingSortMethod=systemProfile_DigitalCallRoutingSortMethod, systemProfile_JamFileComponents_JamFile1=systemProfile_JamFileComponents_JamFile1, systemProfile_IncCallDistrib=systemProfile_IncCallDistrib, mibsystemProfile_StatusNumberTable=mibsystemProfile_StatusNumberTable, systemProfile_ParallelDialing=systemProfile_ParallelDialing, systemProfile_SystemRmtMgmt=systemProfile_SystemRmtMgmt, systemProfile_AnalogEncoding=systemProfile_AnalogEncoding, systemProfile_ControlBusType=systemProfile_ControlBusType, systemProfile_Name=systemProfile_Name, systemProfile_IgnoreLineup=systemProfile_IgnoreLineup, systemProfile_JamFileComponents_JamFile2=systemProfile_JamFileComponents_JamFile2, systemProfile_Console=systemProfile_Console, systemProfile_SubAddressMode=systemProfile_SubAddressMode, systemProfile_NumDigitsTrunkGroups=systemProfile_NumDigitsTrunkGroups, systemProfile_Contact=systemProfile_Contact, systemProfile_ModemPriNumberingPlanId=systemProfile_ModemPriNumberingPlanId, systemProfile_BootSrVersion=systemProfile_BootSrVersion, systemProfile_DmSubaddress=systemProfile_DmSubaddress, systemProfile_V110Subaddress=systemProfile_V110Subaddress, mibsystemProfileTable=mibsystemProfileTable, systemProfile_Location=systemProfile_Location, systemProfile_oDS0MinRst=systemProfile_oDS0MinRst, systemProfile_JamFileComponents_JamFile3=systemProfile_JamFileComponents_JamFile3, systemProfile_StatusNumber=systemProfile_StatusNumber, systemProfile_UseTrunkGroups=systemProfile_UseTrunkGroups, systemProfile_TermRate=systemProfile_TermRate, mibsystemProfile=mibsystemProfile, mibsystemProfile_StatusNumberEntry=mibsystemProfile_StatusNumberEntry, systemProfile_ShelfControllerType=systemProfile_ShelfControllerType, systemProfile_WanInterface=systemProfile_WanInterface, systemProfile_PermConnUpdMode=systemProfile_PermConnUpdMode, systemProfile_NasPortFormat=systemProfile_NasPortFormat, systemProfile_ModemPriTypeOfNumber=systemProfile_ModemPriTypeOfNumber, systemProfile_SupportDbcs=systemProfile_SupportDbcs, systemProfile_DelayDualPortDialing=systemProfile_DelayDualPortDialing, systemProfile_TOnline=systemProfile_TOnline, systemProfile_SerialSubaddress=systemProfile_SerialSubaddress, systemProfile_JamFileComponents_JamFile5=systemProfile_JamFileComponents_JamFile5, systemProfile_T302Timer=systemProfile_T302Timer, systemProfile_LanSubaddress=systemProfile_LanSubaddress, systemProfile_SingleFileIncoming=systemProfile_SingleFileIncoming, systemProfile_NewNasPortIdFormat=systemProfile_NewNasPortIdFormat, systemProfile_Country=systemProfile_Country, systemProfile_SysModemProfile_oATAnswerString=systemProfile_SysModemProfile_oATAnswerString, systemProfile_System8kClock=systemProfile_System8kClock, systemProfile_Action_o=systemProfile_Action_o, systemProfile_MaxSystemDS0Mins=systemProfile_MaxSystemDS0Mins, systemProfile_JamFileComponents_JamFile4=systemProfile_JamFileComponents_JamFile4, systemProfile_ConsoleSecurity=systemProfile_ConsoleSecurity, systemProfile_TOnlineMostAvailChan=systemProfile_TOnlineMostAvailChan, systemProfile_StatusNumber_Index_o=systemProfile_StatusNumber_Index_o, systemProfile_StatusNumber_Index1_o=systemProfile_StatusNumber_Index1_o)
| 175.532895
| 4,174
| 0.793973
|
configuration, = mibBuilder.importSymbols("ASCEND-MIB", "configuration")
Integer, OctetString, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "Integer", "OctetString", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueRangeConstraint, SingleValueConstraint, ConstraintsIntersection, ValueSizeConstraint, ConstraintsUnion = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueRangeConstraint", "SingleValueConstraint", "ConstraintsIntersection", "ValueSizeConstraint", "ConstraintsUnion")
ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup")
MibScalar, MibTable, MibTableRow, MibTableColumn, TimeTicks, NotificationType, MibIdentifier, ModuleIdentity, ObjectIdentity, Gauge32, Unsigned32, Integer32, iso, Counter64, Bits, Counter32, IpAddress = mibBuilder.importSymbols("SNMPv2-SMI", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "TimeTicks", "NotificationType", "MibIdentifier", "ModuleIdentity", "ObjectIdentity", "Gauge32", "Unsigned32", "Integer32", "iso", "Counter64", "Bits", "Counter32", "IpAddress")
DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention")
class DisplayString(OctetString):
pass
mibsystemProfile = MibIdentifier((1, 3, 6, 1, 4, 1, 529, 23, 125))
mibsystemProfileTable = MibTable((1, 3, 6, 1, 4, 1, 529, 23, 125, 1), )
if mibBuilder.loadTexts: mibsystemProfileTable.setStatus('mandatory')
mibsystemProfileEntry = MibTableRow((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1), ).setIndexNames((0, "ASCEND-MIBSYS1-MIB", "systemProfile-Index-o"))
if mibBuilder.loadTexts: mibsystemProfileEntry.setStatus('mandatory')
systemProfile_Index_o = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 1), Integer32()).setLabel("systemProfile-Index-o").setMaxAccess("readonly")
if mibBuilder.loadTexts: systemProfile_Index_o.setStatus('mandatory')
systemProfile_Name = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 2), DisplayString()).setLabel("systemProfile-Name").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_Name.setStatus('mandatory')
systemProfile_Contact = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 3), DisplayString()).setLabel("systemProfile-Contact").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_Contact.setStatus('mandatory')
systemProfile_Location = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 4), DisplayString()).setLabel("systemProfile-Location").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_Location.setStatus('mandatory')
systemProfile_TermRate = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8, 9))).clone(namedValues=NamedValues(("n-300Bps", 1), ("n-1200Bps", 2), ("n-2400Bps", 3), ("n-4800Bps", 4), ("n-9600Bps", 5), ("n-19200Bps", 6), ("n-38400Bps", 7), ("n-57600Bps", 8), ("n-115200Bps", 9)))).setLabel("systemProfile-TermRate").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_TermRate.setStatus('mandatory')
systemProfile_Console = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("standard", 1), ("limited", 2), ("mif", 3)))).setLabel("systemProfile-Console").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_Console.setStatus('mandatory')
systemProfile_ConsoleSecurity = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 7), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("consoleSecurityNone", 1), ("consoleSecurityProfile", 2), ("consoleSecurityAuthSetting", 3)))).setLabel("systemProfile-ConsoleSecurity").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_ConsoleSecurity.setStatus('mandatory')
systemProfile_SystemRmtMgmt = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 8), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("no", 1), ("yes", 2)))).setLabel("systemProfile-SystemRmtMgmt").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_SystemRmtMgmt.setStatus('mandatory')
systemProfile_SubAddressMode = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 9), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("noSubaddress", 1), ("routingSubaddress", 2), ("termselSubaddress", 3)))).setLabel("systemProfile-SubAddressMode").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_SubAddressMode.setStatus('mandatory')
systemProfile_SerialSubaddress = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 10), Integer32()).setLabel("systemProfile-SerialSubaddress").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_SerialSubaddress.setStatus('mandatory')
systemProfile_LanSubaddress = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 11), Integer32()).setLabel("systemProfile-LanSubaddress").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_LanSubaddress.setStatus('mandatory')
systemProfile_DmSubaddress = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 12), Integer32()).setLabel("systemProfile-DmSubaddress").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_DmSubaddress.setStatus('mandatory')
systemProfile_V110Subaddress = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 13), Integer32()).setLabel("systemProfile-V110Subaddress").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_V110Subaddress.setStatus('mandatory')
systemProfile_UseTrunkGroups = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 14), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("no", 1), ("yes", 2)))).setLabel("systemProfile-UseTrunkGroups").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_UseTrunkGroups.setStatus('mandatory')
systemProfile_NumDigitsTrunkGroups = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 15), Integer32()).setLabel("systemProfile-NumDigitsTrunkGroups").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_NumDigitsTrunkGroups.setStatus('mandatory')
systemProfile_AutoLogout = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 16), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("no", 1), ("yes", 2)))).setLabel("systemProfile-AutoLogout").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_AutoLogout.setStatus('mandatory')
systemProfile_IdleLogout = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 17), Integer32()).setLabel("systemProfile-IdleLogout").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_IdleLogout.setStatus('mandatory')
systemProfile_P50SwitchUsage = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 18), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("switchUnused", 1), ("switchSerialWan", 2), ("switchNumberOfUses", 3)))).setLabel("systemProfile-P50SwitchUsage").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_P50SwitchUsage.setStatus('mandatory')
systemProfile_oDS0MinRst = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 19), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("off", 1), ("daily", 2), ("monthly", 3)))).setLabel("systemProfile-oDS0MinRst").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_oDS0MinRst.setStatus('mandatory')
systemProfile_MaxSystemDS0Mins = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 20), Integer32()).setLabel("systemProfile-MaxSystemDS0Mins").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_MaxSystemDS0Mins.setStatus('mandatory')
systemProfile_MaxDialoutTime = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 21), Integer32()).setLabel("systemProfile-MaxDialoutTime").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_MaxDialoutTime.setStatus('mandatory')
systemProfile_ParallelDialing = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 22), Integer32()).setLabel("systemProfile-ParallelDialing").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_ParallelDialing.setStatus('mandatory')
systemProfile_SingleFileIncoming = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 23), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("no", 1), ("yes", 2)))).setLabel("systemProfile-SingleFileIncoming").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_SingleFileIncoming.setStatus('mandatory')
systemProfile_DelayDualPortDialing = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 24), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("no", 1), ("yes", 2)))).setLabel("systemProfile-DelayDualPortDialing").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_DelayDualPortDialing.setStatus('mandatory')
systemProfile_EditNumber = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 25), DisplayString()).setLabel("systemProfile-EditNumber").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_EditNumber.setStatus('mandatory')
systemProfile_AnalogEncoding = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 26), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("uLaw", 1), ("aLaw", 2)))).setLabel("systemProfile-AnalogEncoding").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_AnalogEncoding.setStatus('mandatory')
systemProfile_SessionidBase = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 27), Integer32()).setLabel("systemProfile-SessionidBase").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_SessionidBase.setStatus('mandatory')
systemProfile_TOnline = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 28), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("no", 1), ("yes", 2)))).setLabel("systemProfile-TOnline").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_TOnline.setStatus('mandatory')
systemProfile_TOnlineMostAvailChan = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 29), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("no", 1), ("yes", 2)))).setLabel("systemProfile-TOnlineMostAvailChan").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_TOnlineMostAvailChan.setStatus('mandatory')
systemProfile_T302Timer = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 30), Integer32()).setLabel("systemProfile-T302Timer").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_T302Timer.setStatus('mandatory')
systemProfile_CallRoutingSortMethod = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 31), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("itemFirst", 1), ("slotFirst", 2)))).setLabel("systemProfile-CallRoutingSortMethod").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_CallRoutingSortMethod.setStatus('mandatory')
systemProfile_DigitalCallRoutingSortMethod = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 32), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("itemFirst", 1), ("slotFirst", 2)))).setLabel("systemProfile-DigitalCallRoutingSortMethod").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_DigitalCallRoutingSortMethod.setStatus('mandatory')
systemProfile_ExactMatchCallRouting = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 33), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("no", 1), ("yes", 2)))).setLabel("systemProfile-ExactMatchCallRouting").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_ExactMatchCallRouting.setStatus('mandatory')
systemProfile_ShelfControllerType = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 34), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("standalone", 1), ("master", 2), ("slave", 3)))).setLabel("systemProfile-ShelfControllerType").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_ShelfControllerType.setStatus('mandatory')
systemProfile_MasterShelfController = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 35), Integer32()).setLabel("systemProfile-MasterShelfController").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_MasterShelfController.setStatus('mandatory')
systemProfile_NewNasPortIdFormat = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 36), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("no", 1), ("yes", 2)))).setLabel("systemProfile-NewNasPortIdFormat").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_NewNasPortIdFormat.setStatus('mandatory')
systemProfile_NasPortFormat = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 59), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))).clone(namedValues=NamedValues(("notApplicable", 1), ("n-2455", 2), ("n-655", 3), ("n-122", 4), ("n-1233", 5)))).setLabel("systemProfile-NasPortFormat").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_NasPortFormat.setStatus('mandatory')
systemProfile_ModemPriTypeOfNumber = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 37), Integer32()).setLabel("systemProfile-ModemPriTypeOfNumber").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_ModemPriTypeOfNumber.setStatus('mandatory')
systemProfile_ModemPriNumberingPlanId = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 38), Integer32()).setLabel("systemProfile-ModemPriNumberingPlanId").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_ModemPriNumberingPlanId.setStatus('mandatory')
systemProfile_WanInterface = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 39), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("wanT1", 1), ("wanSwan", 2)))).setLabel("systemProfile-WanInterface").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_WanInterface.setStatus('mandatory')
systemProfile_PermConnUpdMode = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 40), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("all", 1), ("changed", 2)))).setLabel("systemProfile-PermConnUpdMode").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_PermConnUpdMode.setStatus('mandatory')
systemProfile_UserstatFormat = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 41), DisplayString()).setLabel("systemProfile-UserstatFormat").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_UserstatFormat.setStatus('mandatory')
systemProfile_ControlBusType = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 42), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("dpram", 1), ("pbus", 2)))).setLabel("systemProfile-ControlBusType").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_ControlBusType.setStatus('mandatory')
systemProfile_BootSrVersion = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 43), DisplayString()).setLabel("systemProfile-BootSrVersion").setMaxAccess("readonly")
if mibBuilder.loadTexts: systemProfile_BootSrVersion.setStatus('mandatory')
systemProfile_SysModemProfile_oATAnswerString = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 44), DisplayString()).setLabel("systemProfile-SysModemProfile-oATAnswerString").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_SysModemProfile_oATAnswerString.setStatus('mandatory')
systemProfile_CallByCall = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 45), Integer32()).setLabel("systemProfile-CallByCall").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_CallByCall.setStatus('mandatory')
systemProfile_Country = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 46), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(2, 3, 4, 23, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22))).clone(namedValues=NamedValues(("argentina", 2), ("australia", 3), ("belgium", 4), ("brazil", 23), ("china", 5), ("costaRica", 6), ("finland", 7), ("france", 8), ("germany", 9), ("hongKong", 10), ("italy", 11), ("japan", 12), ("korea", 13), ("mexico", 14), ("netherlands", 15), ("newZealand", 16), ("singapore", 17), ("spain", 18), ("sweden", 19), ("switzerland", 20), ("uk", 21), ("us", 22)))).setLabel("systemProfile-Country").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_Country.setStatus('mandatory')
systemProfile_PotsDigitTimeout = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 47), Integer32()).setLabel("systemProfile-PotsDigitTimeout").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_PotsDigitTimeout.setStatus('mandatory')
systemProfile_System8kClock = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 48), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(2, 3, 4, 5))).clone(namedValues=NamedValues(("controller", 2), ("limOrTrunkModule", 3), ("bits", 4), ("ami8k", 5)))).setLabel("systemProfile-System8kClock").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_System8kClock.setStatus('mandatory')
systemProfile_SupportDbcs = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 49), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("no", 1), ("yes", 2)))).setLabel("systemProfile-SupportDbcs").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_SupportDbcs.setStatus('mandatory')
systemProfile_IncCallDistrib = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 50), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(2, 3))).clone(namedValues=NamedValues(("firstAvailable", 2), ("fairShare", 3)))).setLabel("systemProfile-IncCallDistrib").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_IncCallDistrib.setStatus('mandatory')
systemProfile_IgnoreLineup = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 51), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("no", 1), ("yes", 2)))).setLabel("systemProfile-IgnoreLineup").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_IgnoreLineup.setStatus('mandatory')
systemProfile_JamFileComponents_JamFile1 = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 53), DisplayString()).setLabel("systemProfile-JamFileComponents-JamFile1").setMaxAccess("readonly")
if mibBuilder.loadTexts: systemProfile_JamFileComponents_JamFile1.setStatus('mandatory')
systemProfile_JamFileComponents_JamFile2 = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 54), DisplayString()).setLabel("systemProfile-JamFileComponents-JamFile2").setMaxAccess("readonly")
if mibBuilder.loadTexts: systemProfile_JamFileComponents_JamFile2.setStatus('mandatory')
systemProfile_JamFileComponents_JamFile3 = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 55), DisplayString()).setLabel("systemProfile-JamFileComponents-JamFile3").setMaxAccess("readonly")
if mibBuilder.loadTexts: systemProfile_JamFileComponents_JamFile3.setStatus('mandatory')
systemProfile_JamFileComponents_JamFile4 = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 56), DisplayString()).setLabel("systemProfile-JamFileComponents-JamFile4").setMaxAccess("readonly")
if mibBuilder.loadTexts: systemProfile_JamFileComponents_JamFile4.setStatus('mandatory')
systemProfile_JamFileComponents_JamFile5 = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 57), DisplayString()).setLabel("systemProfile-JamFileComponents-JamFile5").setMaxAccess("readonly")
if mibBuilder.loadTexts: systemProfile_JamFileComponents_JamFile5.setStatus('mandatory')
systemProfile_JamFileComponents_JamFile6 = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 58), DisplayString()).setLabel("systemProfile-JamFileComponents-JamFile6").setMaxAccess("readonly")
if mibBuilder.loadTexts: systemProfile_JamFileComponents_JamFile6.setStatus('mandatory')
systemProfile_Action_o = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 52), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("noAction", 1), ("createProfile", 2), ("deleteProfile", 3)))).setLabel("systemProfile-Action-o").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_Action_o.setStatus('mandatory')
mibsystemProfile_StatusNumberTable = MibTable((1, 3, 6, 1, 4, 1, 529, 23, 125, 2), ).setLabel("mibsystemProfile-StatusNumberTable")
if mibBuilder.loadTexts: mibsystemProfile_StatusNumberTable.setStatus('mandatory')
mibsystemProfile_StatusNumberEntry = MibTableRow((1, 3, 6, 1, 4, 1, 529, 23, 125, 2, 1), ).setLabel("mibsystemProfile-StatusNumberEntry").setIndexNames((0, "ASCEND-MIBSYS1-MIB", "systemProfile-StatusNumber-Index-o"), (0, "ASCEND-MIBSYS1-MIB", "systemProfile-StatusNumber-Index1-o"))
if mibBuilder.loadTexts: mibsystemProfile_StatusNumberEntry.setStatus('mandatory')
systemProfile_StatusNumber_Index_o = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 2, 1, 1), Integer32()).setLabel("systemProfile-StatusNumber-Index-o").setMaxAccess("readonly")
if mibBuilder.loadTexts: systemProfile_StatusNumber_Index_o.setStatus('mandatory')
systemProfile_StatusNumber_Index1_o = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 2, 1, 2), Integer32()).setLabel("systemProfile-StatusNumber-Index1-o").setMaxAccess("readonly")
if mibBuilder.loadTexts: systemProfile_StatusNumber_Index1_o.setStatus('mandatory')
systemProfile_StatusNumber = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 2, 1, 3), DisplayString()).setLabel("systemProfile-StatusNumber").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_StatusNumber.setStatus('mandatory')
mibBuilder.exportSymbols("ASCEND-MIBSYS1-MIB", systemProfile_CallRoutingSortMethod=systemProfile_CallRoutingSortMethod, systemProfile_MasterShelfController=systemProfile_MasterShelfController, systemProfile_PotsDigitTimeout=systemProfile_PotsDigitTimeout, systemProfile_JamFileComponents_JamFile6=systemProfile_JamFileComponents_JamFile6, systemProfile_Index_o=systemProfile_Index_o, systemProfile_MaxDialoutTime=systemProfile_MaxDialoutTime, mibsystemProfileEntry=mibsystemProfileEntry, systemProfile_SessionidBase=systemProfile_SessionidBase, systemProfile_ExactMatchCallRouting=systemProfile_ExactMatchCallRouting, systemProfile_CallByCall=systemProfile_CallByCall, systemProfile_AutoLogout=systemProfile_AutoLogout, DisplayString=DisplayString, systemProfile_UserstatFormat=systemProfile_UserstatFormat, systemProfile_IdleLogout=systemProfile_IdleLogout, systemProfile_EditNumber=systemProfile_EditNumber, systemProfile_P50SwitchUsage=systemProfile_P50SwitchUsage, systemProfile_DigitalCallRoutingSortMethod=systemProfile_DigitalCallRoutingSortMethod, systemProfile_JamFileComponents_JamFile1=systemProfile_JamFileComponents_JamFile1, systemProfile_IncCallDistrib=systemProfile_IncCallDistrib, mibsystemProfile_StatusNumberTable=mibsystemProfile_StatusNumberTable, systemProfile_ParallelDialing=systemProfile_ParallelDialing, systemProfile_SystemRmtMgmt=systemProfile_SystemRmtMgmt, systemProfile_AnalogEncoding=systemProfile_AnalogEncoding, systemProfile_ControlBusType=systemProfile_ControlBusType, systemProfile_Name=systemProfile_Name, systemProfile_IgnoreLineup=systemProfile_IgnoreLineup, systemProfile_JamFileComponents_JamFile2=systemProfile_JamFileComponents_JamFile2, systemProfile_Console=systemProfile_Console, systemProfile_SubAddressMode=systemProfile_SubAddressMode, systemProfile_NumDigitsTrunkGroups=systemProfile_NumDigitsTrunkGroups, systemProfile_Contact=systemProfile_Contact, systemProfile_ModemPriNumberingPlanId=systemProfile_ModemPriNumberingPlanId, systemProfile_BootSrVersion=systemProfile_BootSrVersion, systemProfile_DmSubaddress=systemProfile_DmSubaddress, systemProfile_V110Subaddress=systemProfile_V110Subaddress, mibsystemProfileTable=mibsystemProfileTable, systemProfile_Location=systemProfile_Location, systemProfile_oDS0MinRst=systemProfile_oDS0MinRst, systemProfile_JamFileComponents_JamFile3=systemProfile_JamFileComponents_JamFile3, systemProfile_StatusNumber=systemProfile_StatusNumber, systemProfile_UseTrunkGroups=systemProfile_UseTrunkGroups, systemProfile_TermRate=systemProfile_TermRate, mibsystemProfile=mibsystemProfile, mibsystemProfile_StatusNumberEntry=mibsystemProfile_StatusNumberEntry, systemProfile_ShelfControllerType=systemProfile_ShelfControllerType, systemProfile_WanInterface=systemProfile_WanInterface, systemProfile_PermConnUpdMode=systemProfile_PermConnUpdMode, systemProfile_NasPortFormat=systemProfile_NasPortFormat, systemProfile_ModemPriTypeOfNumber=systemProfile_ModemPriTypeOfNumber, systemProfile_SupportDbcs=systemProfile_SupportDbcs, systemProfile_DelayDualPortDialing=systemProfile_DelayDualPortDialing, systemProfile_TOnline=systemProfile_TOnline, systemProfile_SerialSubaddress=systemProfile_SerialSubaddress, systemProfile_JamFileComponents_JamFile5=systemProfile_JamFileComponents_JamFile5, systemProfile_T302Timer=systemProfile_T302Timer, systemProfile_LanSubaddress=systemProfile_LanSubaddress, systemProfile_SingleFileIncoming=systemProfile_SingleFileIncoming, systemProfile_NewNasPortIdFormat=systemProfile_NewNasPortIdFormat, systemProfile_Country=systemProfile_Country, systemProfile_SysModemProfile_oATAnswerString=systemProfile_SysModemProfile_oATAnswerString, systemProfile_System8kClock=systemProfile_System8kClock, systemProfile_Action_o=systemProfile_Action_o, systemProfile_MaxSystemDS0Mins=systemProfile_MaxSystemDS0Mins, systemProfile_JamFileComponents_JamFile4=systemProfile_JamFileComponents_JamFile4, systemProfile_ConsoleSecurity=systemProfile_ConsoleSecurity, systemProfile_TOnlineMostAvailChan=systemProfile_TOnlineMostAvailChan, systemProfile_StatusNumber_Index_o=systemProfile_StatusNumber_Index_o, systemProfile_StatusNumber_Index1_o=systemProfile_StatusNumber_Index1_o)
| true
| true
|
790ab49fa3eb6a146fbbcdcd2af46fefaf17821d
| 24,781
|
py
|
Python
|
tb_rest_client/api/api_pe/widgets_bundle_controller_api.py
|
samson0v/python_tb_rest_client
|
08ff7898740f7cec2170e85d5c3c89e222e967f7
|
[
"Apache-2.0"
] | 30
|
2020-06-19T06:42:50.000Z
|
2021-08-23T21:16:36.000Z
|
tb_rest_client/api/api_pe/widgets_bundle_controller_api.py
|
samson0v/python_tb_rest_client
|
08ff7898740f7cec2170e85d5c3c89e222e967f7
|
[
"Apache-2.0"
] | 25
|
2021-08-30T01:17:27.000Z
|
2022-03-16T14:10:14.000Z
|
tb_rest_client/api/api_pe/widgets_bundle_controller_api.py
|
samson0v/python_tb_rest_client
|
08ff7898740f7cec2170e85d5c3c89e222e967f7
|
[
"Apache-2.0"
] | 23
|
2020-07-06T13:41:54.000Z
|
2021-08-23T21:04:50.000Z
|
# coding: utf-8
"""
ThingsBoard REST API
ThingsBoard Professional Edition IoT platform REST API documentation. # noqa: E501
OpenAPI spec version: 3.3.3PAAS-RC1
Contact: info@thingsboard.io
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from tb_rest_client.api_client import ApiClient
class WidgetsBundleControllerApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def delete_widgets_bundle_using_delete(self, widgets_bundle_id, **kwargs): # noqa: E501
"""Delete widgets bundle (deleteWidgetsBundle) # noqa: E501
Deletes the widget bundle. Referencing non-existing Widget Bundle Id will cause an error. Available for users with 'SYS_ADMIN' or 'TENANT_ADMIN' authority. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_widgets_bundle_using_delete(widgets_bundle_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str widgets_bundle_id: A string value representing the widget bundle id. For example, '784f394c-42b6-435a-983c-b7beff2784f9' (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_widgets_bundle_using_delete_with_http_info(widgets_bundle_id, **kwargs) # noqa: E501
else:
(data) = self.delete_widgets_bundle_using_delete_with_http_info(widgets_bundle_id, **kwargs) # noqa: E501
return data
def delete_widgets_bundle_using_delete_with_http_info(self, widgets_bundle_id, **kwargs): # noqa: E501
"""Delete widgets bundle (deleteWidgetsBundle) # noqa: E501
Deletes the widget bundle. Referencing non-existing Widget Bundle Id will cause an error. Available for users with 'SYS_ADMIN' or 'TENANT_ADMIN' authority. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_widgets_bundle_using_delete_with_http_info(widgets_bundle_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str widgets_bundle_id: A string value representing the widget bundle id. For example, '784f394c-42b6-435a-983c-b7beff2784f9' (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['widgets_bundle_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_widgets_bundle_using_delete" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'widgets_bundle_id' is set
if ('widgets_bundle_id' not in params or
params['widgets_bundle_id'] is None):
raise ValueError("Missing the required parameter `widgets_bundle_id` when calling `delete_widgets_bundle_using_delete`") # noqa: E501
collection_formats = {}
path_params = {}
if 'widgets_bundle_id' in params:
path_params['widgetsBundleId'] = params['widgets_bundle_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['X-Authorization'] # noqa: E501
return self.api_client.call_api(
'/api/widgetsBundle/{widgetsBundleId}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_widgets_bundle_by_id_using_get(self, widgets_bundle_id, **kwargs): # noqa: E501
"""Get Widget Bundle (getWidgetsBundleById) # noqa: E501
Get the Widget Bundle based on the provided Widget Bundle Id. Widget Bundle represents a group(bundle) of widgets. Widgets are grouped into bundle by type or use case. Available for any authorized user. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_widgets_bundle_by_id_using_get(widgets_bundle_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str widgets_bundle_id: A string value representing the widget bundle id. For example, '784f394c-42b6-435a-983c-b7beff2784f9' (required)
:return: WidgetsBundle
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_widgets_bundle_by_id_using_get_with_http_info(widgets_bundle_id, **kwargs) # noqa: E501
else:
(data) = self.get_widgets_bundle_by_id_using_get_with_http_info(widgets_bundle_id, **kwargs) # noqa: E501
return data
def get_widgets_bundle_by_id_using_get_with_http_info(self, widgets_bundle_id, **kwargs): # noqa: E501
"""Get Widget Bundle (getWidgetsBundleById) # noqa: E501
Get the Widget Bundle based on the provided Widget Bundle Id. Widget Bundle represents a group(bundle) of widgets. Widgets are grouped into bundle by type or use case. Available for any authorized user. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_widgets_bundle_by_id_using_get_with_http_info(widgets_bundle_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str widgets_bundle_id: A string value representing the widget bundle id. For example, '784f394c-42b6-435a-983c-b7beff2784f9' (required)
:return: WidgetsBundle
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['widgets_bundle_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_widgets_bundle_by_id_using_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'widgets_bundle_id' is set
if ('widgets_bundle_id' not in params or
params['widgets_bundle_id'] is None):
raise ValueError("Missing the required parameter `widgets_bundle_id` when calling `get_widgets_bundle_by_id_using_get`") # noqa: E501
collection_formats = {}
path_params = {}
if 'widgets_bundle_id' in params:
path_params['widgetsBundleId'] = params['widgets_bundle_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['X-Authorization'] # noqa: E501
return self.api_client.call_api(
'/api/widgetsBundle/{widgetsBundleId}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='WidgetsBundle', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_widgets_bundles_using_get(self, **kwargs): # noqa: E501
"""Get all Widget Bundles (getWidgetsBundles) # noqa: E501
Returns an array of Widget Bundle objects that are available for current user.Widget Bundle represents a group(bundle) of widgets. Widgets are grouped into bundle by type or use case. Available for any authorized user. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_widgets_bundles_using_get(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: list[WidgetsBundle]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_widgets_bundles_using_get_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.get_widgets_bundles_using_get_with_http_info(**kwargs) # noqa: E501
return data
def get_widgets_bundles_using_get_with_http_info(self, **kwargs): # noqa: E501
"""Get all Widget Bundles (getWidgetsBundles) # noqa: E501
Returns an array of Widget Bundle objects that are available for current user.Widget Bundle represents a group(bundle) of widgets. Widgets are grouped into bundle by type or use case. Available for any authorized user. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_widgets_bundles_using_get_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: list[WidgetsBundle]
If the method is called asynchronously,
returns the request thread.
"""
all_params = [] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_widgets_bundles_using_get" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['X-Authorization'] # noqa: E501
return self.api_client.call_api(
'/api/widgetsBundles', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[WidgetsBundle]', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_widgets_bundles_using_get1(self, page_size, page, **kwargs): # noqa: E501
"""Get Widget Bundles (getWidgetsBundles) # noqa: E501
Returns a page of Widget Bundle objects available for current user. Widget Bundle represents a group(bundle) of widgets. Widgets are grouped into bundle by type or use case. You can specify parameters to filter the results. The result is wrapped with PageData object that allows you to iterate over result set using pagination. See the 'Model' tab of the Response Class for more details. Available for any authorized user. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_widgets_bundles_using_get1(page_size, page, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int page_size: Maximum amount of entities in a one page (required)
:param int page: Sequence number of page starting from 0 (required)
:param str text_search: The case insensitive 'startsWith' filter based on the widget bundle title.
:param str sort_property: Property of entity to sort by
:param str sort_order: Sort order. ASC (ASCENDING) or DESC (DESCENDING)
:return: PageDataWidgetsBundle
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_widgets_bundles_using_get1_with_http_info(page_size, page, **kwargs) # noqa: E501
else:
(data) = self.get_widgets_bundles_using_get1_with_http_info(page_size, page, **kwargs) # noqa: E501
return data
def get_widgets_bundles_using_get1_with_http_info(self, page_size, page, **kwargs): # noqa: E501
"""Get Widget Bundles (getWidgetsBundles) # noqa: E501
Returns a page of Widget Bundle objects available for current user. Widget Bundle represents a group(bundle) of widgets. Widgets are grouped into bundle by type or use case. You can specify parameters to filter the results. The result is wrapped with PageData object that allows you to iterate over result set using pagination. See the 'Model' tab of the Response Class for more details. Available for any authorized user. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_widgets_bundles_using_get1_with_http_info(page_size, page, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int page_size: Maximum amount of entities in a one page (required)
:param int page: Sequence number of page starting from 0 (required)
:param str text_search: The case insensitive 'startsWith' filter based on the widget bundle title.
:param str sort_property: Property of entity to sort by
:param str sort_order: Sort order. ASC (ASCENDING) or DESC (DESCENDING)
:return: PageDataWidgetsBundle
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['page_size', 'page', 'text_search', 'sort_property', 'sort_order'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_widgets_bundles_using_get1" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'page_size' is set
if ('page_size' not in params or
params['page_size'] is None):
raise ValueError("Missing the required parameter `page_size` when calling `get_widgets_bundles_using_get1`") # noqa: E501
# verify the required parameter 'page' is set
if ('page' not in params or
params['page'] is None):
raise ValueError("Missing the required parameter `page` when calling `get_widgets_bundles_using_get1`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'page_size' in params:
query_params.append(('pageSize', params['page_size'])) # noqa: E501
if 'page' in params:
query_params.append(('page', params['page'])) # noqa: E501
if 'text_search' in params:
query_params.append(('textSearch', params['text_search'])) # noqa: E501
if 'sort_property' in params:
query_params.append(('sortProperty', params['sort_property'])) # noqa: E501
if 'sort_order' in params:
query_params.append(('sortOrder', params['sort_order'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['X-Authorization'] # noqa: E501
return self.api_client.call_api(
'/api/widgetsBundles{?page,pageSize,sortOrder,sortProperty,textSearch}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='PageDataWidgetsBundle', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def save_widgets_bundle_using_post(self, **kwargs): # noqa: E501
"""Create Or Update Widget Bundle (saveWidgetsBundle) # noqa: E501
Create or update the Widget Bundle. Widget Bundle represents a group(bundle) of widgets. Widgets are grouped into bundle by type or use case. When creating the bundle, platform generates Widget Bundle Id as [time-based UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier#Version_1_(date-time_and_MAC_address)). The newly created Widget Bundle Id will be present in the response. Specify existing Widget Bundle id to update the Widget Bundle. Referencing non-existing Widget Bundle Id will cause 'Not Found' error. Widget Bundle alias is unique in the scope of tenant. Special Tenant Id '13814000-1dd2-11b2-8080-808080808080' is automatically used if the create bundle request is sent by user with 'SYS_ADMIN' authority. Available for users with 'SYS_ADMIN' or 'TENANT_ADMIN' authority. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.save_widgets_bundle_using_post(async_req=True)
>>> result = thread.get()
:param async_req bool
:param WidgetsBundle body:
:return: WidgetsBundle
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.save_widgets_bundle_using_post_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.save_widgets_bundle_using_post_with_http_info(**kwargs) # noqa: E501
return data
def save_widgets_bundle_using_post_with_http_info(self, **kwargs): # noqa: E501
"""Create Or Update Widget Bundle (saveWidgetsBundle) # noqa: E501
Create or update the Widget Bundle. Widget Bundle represents a group(bundle) of widgets. Widgets are grouped into bundle by type or use case. When creating the bundle, platform generates Widget Bundle Id as [time-based UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier#Version_1_(date-time_and_MAC_address)). The newly created Widget Bundle Id will be present in the response. Specify existing Widget Bundle id to update the Widget Bundle. Referencing non-existing Widget Bundle Id will cause 'Not Found' error. Widget Bundle alias is unique in the scope of tenant. Special Tenant Id '13814000-1dd2-11b2-8080-808080808080' is automatically used if the create bundle request is sent by user with 'SYS_ADMIN' authority. Available for users with 'SYS_ADMIN' or 'TENANT_ADMIN' authority. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.save_widgets_bundle_using_post_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param WidgetsBundle body:
:return: WidgetsBundle
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method save_widgets_bundle_using_post" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['X-Authorization'] # noqa: E501
return self.api_client.call_api(
'/api/widgetsBundle', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='WidgetsBundle', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| 47.564299
| 824
| 0.655179
|
from __future__ import absolute_import
import re
import six
from tb_rest_client.api_client import ApiClient
class WidgetsBundleControllerApi(object):
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def delete_widgets_bundle_using_delete(self, widgets_bundle_id, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_widgets_bundle_using_delete_with_http_info(widgets_bundle_id, **kwargs)
else:
(data) = self.delete_widgets_bundle_using_delete_with_http_info(widgets_bundle_id, **kwargs)
return data
def delete_widgets_bundle_using_delete_with_http_info(self, widgets_bundle_id, **kwargs):
all_params = ['widgets_bundle_id']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_widgets_bundle_using_delete" % key
)
params[key] = val
del params['kwargs']
if ('widgets_bundle_id' not in params or
params['widgets_bundle_id'] is None):
raise ValueError("Missing the required parameter `widgets_bundle_id` when calling `delete_widgets_bundle_using_delete`")
collection_formats = {}
path_params = {}
if 'widgets_bundle_id' in params:
path_params['widgetsBundleId'] = params['widgets_bundle_id']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
header_params['Accept'] = self.api_client.select_header_accept(
['application/json'])
auth_settings = ['X-Authorization']
return self.api_client.call_api(
'/api/widgetsBundle/{widgetsBundleId}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_widgets_bundle_by_id_using_get(self, widgets_bundle_id, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_widgets_bundle_by_id_using_get_with_http_info(widgets_bundle_id, **kwargs)
else:
(data) = self.get_widgets_bundle_by_id_using_get_with_http_info(widgets_bundle_id, **kwargs)
return data
def get_widgets_bundle_by_id_using_get_with_http_info(self, widgets_bundle_id, **kwargs):
all_params = ['widgets_bundle_id']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_widgets_bundle_by_id_using_get" % key
)
params[key] = val
del params['kwargs']
if ('widgets_bundle_id' not in params or
params['widgets_bundle_id'] is None):
raise ValueError("Missing the required parameter `widgets_bundle_id` when calling `get_widgets_bundle_by_id_using_get`")
collection_formats = {}
path_params = {}
if 'widgets_bundle_id' in params:
path_params['widgetsBundleId'] = params['widgets_bundle_id']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
header_params['Accept'] = self.api_client.select_header_accept(
['application/json'])
auth_settings = ['X-Authorization']
return self.api_client.call_api(
'/api/widgetsBundle/{widgetsBundleId}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='WidgetsBundle',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_widgets_bundles_using_get(self, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_widgets_bundles_using_get_with_http_info(**kwargs)
else:
(data) = self.get_widgets_bundles_using_get_with_http_info(**kwargs)
return data
def get_widgets_bundles_using_get_with_http_info(self, **kwargs):
all_params = []
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_widgets_bundles_using_get" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
header_params['Accept'] = self.api_client.select_header_accept(
['application/json'])
auth_settings = ['X-Authorization']
return self.api_client.call_api(
'/api/widgetsBundles', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[WidgetsBundle]',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_widgets_bundles_using_get1(self, page_size, page, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_widgets_bundles_using_get1_with_http_info(page_size, page, **kwargs)
else:
(data) = self.get_widgets_bundles_using_get1_with_http_info(page_size, page, **kwargs)
return data
def get_widgets_bundles_using_get1_with_http_info(self, page_size, page, **kwargs):
all_params = ['page_size', 'page', 'text_search', 'sort_property', 'sort_order']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_widgets_bundles_using_get1" % key
)
params[key] = val
del params['kwargs']
if ('page_size' not in params or
params['page_size'] is None):
raise ValueError("Missing the required parameter `page_size` when calling `get_widgets_bundles_using_get1`")
if ('page' not in params or
params['page'] is None):
raise ValueError("Missing the required parameter `page` when calling `get_widgets_bundles_using_get1`")
collection_formats = {}
path_params = {}
query_params = []
if 'page_size' in params:
query_params.append(('pageSize', params['page_size']))
if 'page' in params:
query_params.append(('page', params['page']))
if 'text_search' in params:
query_params.append(('textSearch', params['text_search']))
if 'sort_property' in params:
query_params.append(('sortProperty', params['sort_property']))
if 'sort_order' in params:
query_params.append(('sortOrder', params['sort_order']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
header_params['Accept'] = self.api_client.select_header_accept(
['application/json'])
auth_settings = ['X-Authorization']
return self.api_client.call_api(
'/api/widgetsBundles{?page,pageSize,sortOrder,sortProperty,textSearch}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='PageDataWidgetsBundle',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def save_widgets_bundle_using_post(self, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.save_widgets_bundle_using_post_with_http_info(**kwargs)
else:
(data) = self.save_widgets_bundle_using_post_with_http_info(**kwargs)
return data
def save_widgets_bundle_using_post_with_http_info(self, **kwargs):
all_params = ['body']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method save_widgets_bundle_using_post" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
header_params['Accept'] = self.api_client.select_header_accept(
['application/json'])
header_params['Content-Type'] = self.api_client.select_header_content_type(
['application/json'])
auth_settings = ['X-Authorization']
return self.api_client.call_api(
'/api/widgetsBundle', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='WidgetsBundle',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| true
| true
|
790ab4eb85e8bcfb94fe52b0ec8e99e61f4cf09f
| 667
|
py
|
Python
|
setup.py
|
Shimwell/inference-tools
|
755b68bcf2e8c40414f4379afd80c2d7574583ed
|
[
"MIT"
] | null | null | null |
setup.py
|
Shimwell/inference-tools
|
755b68bcf2e8c40414f4379afd80c2d7574583ed
|
[
"MIT"
] | null | null | null |
setup.py
|
Shimwell/inference-tools
|
755b68bcf2e8c40414f4379afd80c2d7574583ed
|
[
"MIT"
] | null | null | null |
from setuptools import setup, find_packages
with open("README.md", "r") as fh:
long_description = fh.read()
setup(
name="inference-tools",
version="0.5.2",
author="Chris Bowman",
author_email="chris.bowman.physics@gmail.com",
description="A collection of python tools for Bayesian data analysis",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/C-bowman/inference-tools",
packages=find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
)
| 31.761905
| 74
| 0.677661
|
from setuptools import setup, find_packages
with open("README.md", "r") as fh:
long_description = fh.read()
setup(
name="inference-tools",
version="0.5.2",
author="Chris Bowman",
author_email="chris.bowman.physics@gmail.com",
description="A collection of python tools for Bayesian data analysis",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/C-bowman/inference-tools",
packages=find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
)
| true
| true
|
790ab52c56461635760ab79d3119898270aad495
| 55,099
|
py
|
Python
|
release/scripts/addons/io_scene_gltf2/io/com/gltf2_io.py
|
naetherm/Bforartists
|
4d78856b76544b9eeb49e7dd388b4cf41d58d7e4
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | 3
|
2019-09-16T10:29:19.000Z
|
2022-02-11T14:43:18.000Z
|
release/scripts/addons/io_scene_gltf2/io/com/gltf2_io.py
|
naetherm/Bforartists
|
4d78856b76544b9eeb49e7dd388b4cf41d58d7e4
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null |
release/scripts/addons/io_scene_gltf2/io/com/gltf2_io.py
|
naetherm/Bforartists
|
4d78856b76544b9eeb49e7dd388b4cf41d58d7e4
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null |
# Copyright 2018-2019 The glTF-Blender-IO authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# NOTE: Generated from latest glTF 2.0 JSON Scheme specs using quicktype (https://github.com/quicktype/quicktype)
# command used:
# quicktype --src glTF.schema.json --src-lang schema -t gltf --lang python --python-version 3.5
# TODO: add __slots__ to all classes by extending the generator
# TODO: REMOVE traceback import
import sys
import traceback
from io_scene_gltf2.io.com import gltf2_io_debug
def from_int(x):
assert isinstance(x, int) and not isinstance(x, bool)
return x
def from_none(x):
assert x is None
return x
def from_union(fs, x):
tracebacks = []
for f in fs:
try:
return f(x)
except AssertionError:
_, _, tb = sys.exc_info()
tracebacks.append(tb)
for tb in tracebacks:
traceback.print_tb(tb) # Fixed format
tb_info = traceback.extract_tb(tb)
for tbi in tb_info:
filename, line, func, text = tbi
gltf2_io_debug.print_console('ERROR', 'An error occurred on line {} in statement {}'.format(line, text))
assert False
def from_dict(f, x):
assert isinstance(x, dict)
return {k: f(v) for (k, v) in x.items()}
def to_class(c, x):
assert isinstance(x, c)
return x.to_dict()
def from_list(f, x):
assert isinstance(x, list)
return [f(y) for y in x]
def from_float(x):
assert isinstance(x, (float, int)) and not isinstance(x, bool)
return float(x)
def from_str(x):
assert isinstance(x, str)
return x
def from_bool(x):
assert isinstance(x, bool)
return x
def to_float(x):
assert isinstance(x, float)
return x
class AccessorSparseIndices:
"""Index array of size `count` that points to those accessor attributes that deviate from
their initialization value. Indices must strictly increase.
Indices of those attributes that deviate from their initialization value.
"""
def __init__(self, buffer_view, byte_offset, component_type, extensions, extras):
self.buffer_view = buffer_view
self.byte_offset = byte_offset
self.component_type = component_type
self.extensions = extensions
self.extras = extras
@staticmethod
def from_dict(obj):
assert isinstance(obj, dict)
buffer_view = from_int(obj.get("bufferView"))
byte_offset = from_union([from_int, from_none], obj.get("byteOffset"))
component_type = from_int(obj.get("componentType"))
extensions = from_union([lambda x: from_dict(lambda x: from_dict(lambda x: x, x), x), from_none],
obj.get("extensions"))
extras = obj.get("extras")
return AccessorSparseIndices(buffer_view, byte_offset, component_type, extensions, extras)
def to_dict(self):
result = {}
result["bufferView"] = from_int(self.buffer_view)
result["byteOffset"] = from_union([from_int, from_none], self.byte_offset)
result["componentType"] = from_int(self.component_type)
result["extensions"] = from_union([lambda x: from_dict(lambda x: from_dict(lambda x: x, x), x), from_none],
self.extensions)
result["extras"] = self.extras
return result
class AccessorSparseValues:
"""Array of size `count` times number of components, storing the displaced accessor
attributes pointed by `indices`. Substituted values must have the same `componentType`
and number of components as the base accessor.
Array of size `accessor.sparse.count` times number of components storing the displaced
accessor attributes pointed by `accessor.sparse.indices`.
"""
def __init__(self, buffer_view, byte_offset, extensions, extras):
self.buffer_view = buffer_view
self.byte_offset = byte_offset
self.extensions = extensions
self.extras = extras
@staticmethod
def from_dict(obj):
assert isinstance(obj, dict)
buffer_view = from_int(obj.get("bufferView"))
byte_offset = from_union([from_int, from_none], obj.get("byteOffset"))
extensions = from_union([lambda x: from_dict(lambda x: from_dict(lambda x: x, x), x), from_none],
obj.get("extensions"))
extras = obj.get("extras")
return AccessorSparseValues(buffer_view, byte_offset, extensions, extras)
def to_dict(self):
result = {}
result["bufferView"] = from_int(self.buffer_view)
result["byteOffset"] = from_union([from_int, from_none], self.byte_offset)
result["extensions"] = from_union([lambda x: from_dict(lambda x: from_dict(lambda x: x, x), x), from_none],
self.extensions)
result["extras"] = self.extras
return result
class AccessorSparse:
"""Sparse storage of attributes that deviate from their initialization value."""
def __init__(self, count, extensions, extras, indices, values):
self.count = count
self.extensions = extensions
self.extras = extras
self.indices = indices
self.values = values
@staticmethod
def from_dict(obj):
assert isinstance(obj, dict)
count = from_int(obj.get("count"))
extensions = from_union([lambda x: from_dict(lambda x: from_dict(lambda x: x, x), x), from_none],
obj.get("extensions"))
extras = obj.get("extras")
indices = AccessorSparseIndices.from_dict(obj.get("indices"))
values = AccessorSparseValues.from_dict(obj.get("values"))
return AccessorSparse(count, extensions, extras, indices, values)
def to_dict(self):
result = {}
result["count"] = from_int(self.count)
result["extensions"] = from_union([lambda x: from_dict(lambda x: from_dict(lambda x: x, x), x), from_none],
self.extensions)
result["extras"] = self.extras
result["indices"] = to_class(AccessorSparseIndices, self.indices)
result["values"] = to_class(AccessorSparseValues, self.values)
return result
class Accessor:
"""A typed view into a bufferView. A bufferView contains raw binary data. An accessor
provides a typed view into a bufferView or a subset of a bufferView similar to how
WebGL's `vertexAttribPointer()` defines an attribute in a buffer.
"""
def __init__(self, buffer_view, byte_offset, component_type, count, extensions, extras, max, min, name, normalized,
sparse, type):
self.buffer_view = buffer_view
self.byte_offset = byte_offset
self.component_type = component_type
self.count = count
self.extensions = extensions
self.extras = extras
self.max = max
self.min = min
self.name = name
self.normalized = normalized
self.sparse = sparse
self.type = type
@staticmethod
def from_dict(obj):
assert isinstance(obj, dict)
buffer_view = from_union([from_int, from_none], obj.get("bufferView"))
byte_offset = from_union([from_int, from_none], obj.get("byteOffset"))
component_type = from_int(obj.get("componentType"))
count = from_int(obj.get("count"))
extensions = from_union([lambda x: from_dict(lambda x: from_dict(lambda x: x, x), x), from_none],
obj.get("extensions"))
extras = obj.get("extras")
max = from_union([lambda x: from_list(from_float, x), from_none], obj.get("max"))
min = from_union([lambda x: from_list(from_float, x), from_none], obj.get("min"))
name = from_union([from_str, from_none], obj.get("name"))
normalized = from_union([from_bool, from_none], obj.get("normalized"))
sparse = from_union([AccessorSparse.from_dict, from_none], obj.get("sparse"))
type = from_str(obj.get("type"))
return Accessor(buffer_view, byte_offset, component_type, count, extensions, extras, max, min, name, normalized,
sparse, type)
def to_dict(self):
result = {}
result["bufferView"] = from_union([from_int, from_none], self.buffer_view)
result["byteOffset"] = from_union([from_int, from_none], self.byte_offset)
result["componentType"] = from_int(self.component_type)
result["count"] = from_int(self.count)
result["extensions"] = from_union([lambda x: from_dict(lambda x: from_dict(lambda x: x, x), x), from_none],
self.extensions)
result["extras"] = self.extras
result["max"] = from_union([lambda x: from_list(to_float, x), from_none], self.max)
result["min"] = from_union([lambda x: from_list(to_float, x), from_none], self.min)
result["name"] = from_union([from_str, from_none], self.name)
result["normalized"] = from_union([from_bool, from_none], self.normalized)
result["sparse"] = from_union([lambda x: to_class(AccessorSparse, x), from_none], self.sparse)
result["type"] = from_str(self.type)
return result
class AnimationChannelTarget:
"""The index of the node and TRS property to target.
The index of the node and TRS property that an animation channel targets.
"""
def __init__(self, extensions, extras, node, path):
self.extensions = extensions
self.extras = extras
self.node = node
self.path = path
@staticmethod
def from_dict(obj):
assert isinstance(obj, dict)
extensions = from_union([lambda x: from_dict(lambda x: from_dict(lambda x: x, x), x), from_none],
obj.get("extensions"))
extras = obj.get("extras")
node = from_union([from_int, from_none], obj.get("node"))
path = from_str(obj.get("path"))
return AnimationChannelTarget(extensions, extras, node, path)
def to_dict(self):
result = {}
result["extensions"] = from_union([lambda x: from_dict(lambda x: from_dict(lambda x: x, x), x), from_none],
self.extensions)
result["extras"] = self.extras
result["node"] = from_union([from_int, from_none], self.node)
result["path"] = from_str(self.path)
return result
class AnimationChannel:
"""Targets an animation's sampler at a node's property."""
def __init__(self, extensions, extras, sampler, target):
self.extensions = extensions
self.extras = extras
self.sampler = sampler
self.target = target
@staticmethod
def from_dict(obj):
assert isinstance(obj, dict)
extensions = from_union([lambda x: from_dict(lambda x: from_dict(lambda x: x, x), x), from_none],
obj.get("extensions"))
extras = obj.get("extras")
sampler = from_int(obj.get("sampler"))
target = AnimationChannelTarget.from_dict(obj.get("target"))
return AnimationChannel(extensions, extras, sampler, target)
def to_dict(self):
result = {}
result["extensions"] = from_union([lambda x: from_dict(lambda x: from_dict(lambda x: x, x), x), from_none],
self.extensions)
result["extras"] = self.extras
result["sampler"] = from_int(self.sampler)
result["target"] = to_class(AnimationChannelTarget, self.target)
return result
class AnimationSampler:
"""Combines input and output accessors with an interpolation algorithm to define a keyframe
graph (but not its target).
"""
def __init__(self, extensions, extras, input, interpolation, output):
self.extensions = extensions
self.extras = extras
self.input = input
self.interpolation = interpolation
self.output = output
@staticmethod
def from_dict(obj):
assert isinstance(obj, dict)
extensions = from_union([lambda x: from_dict(lambda x: from_dict(lambda x: x, x), x), from_none],
obj.get("extensions"))
extras = obj.get("extras")
input = from_int(obj.get("input"))
interpolation = from_union([from_str, from_none], obj.get("interpolation"))
output = from_int(obj.get("output"))
return AnimationSampler(extensions, extras, input, interpolation, output)
def to_dict(self):
result = {}
result["extensions"] = from_union([lambda x: from_dict(lambda x: from_dict(lambda x: x, x), x), from_none],
self.extensions)
result["extras"] = self.extras
result["input"] = from_int(self.input)
result["interpolation"] = from_union([from_str, from_none], self.interpolation)
result["output"] = from_int(self.output)
return result
class Animation:
"""A keyframe animation."""
def __init__(self, channels, extensions, extras, name, samplers):
self.channels = channels
self.extensions = extensions
self.extras = extras
self.name = name
self.samplers = samplers
@staticmethod
def from_dict(obj):
assert isinstance(obj, dict)
channels = from_list(AnimationChannel.from_dict, obj.get("channels"))
extensions = from_union([lambda x: from_dict(lambda x: from_dict(lambda x: x, x), x), from_none],
obj.get("extensions"))
extras = obj.get("extras")
name = from_union([from_str, from_none], obj.get("name"))
samplers = from_list(AnimationSampler.from_dict, obj.get("samplers"))
return Animation(channels, extensions, extras, name, samplers)
def to_dict(self):
result = {}
result["channels"] = from_list(lambda x: to_class(AnimationChannel, x), self.channels)
result["extensions"] = from_union([lambda x: from_dict(lambda x: from_dict(lambda x: x, x), x), from_none],
self.extensions)
result["extras"] = self.extras
result["name"] = from_union([from_str, from_none], self.name)
result["samplers"] = from_list(lambda x: to_class(AnimationSampler, x), self.samplers)
return result
class Asset:
"""Metadata about the glTF asset."""
def __init__(self, copyright, extensions, extras, generator, min_version, version):
self.copyright = copyright
self.extensions = extensions
self.extras = extras
self.generator = generator
self.min_version = min_version
self.version = version
@staticmethod
def from_dict(obj):
assert isinstance(obj, dict)
copyright = from_union([from_str, from_none], obj.get("copyright"))
extensions = from_union([lambda x: from_dict(lambda x: from_dict(lambda x: x, x), x), from_none],
obj.get("extensions"))
extras = obj.get("extras")
generator = from_union([from_str, from_none], obj.get("generator"))
min_version = from_union([from_str, from_none], obj.get("minVersion"))
version = from_str(obj.get("version"))
return Asset(copyright, extensions, extras, generator, min_version, version)
def to_dict(self):
result = {}
result["copyright"] = from_union([from_str, from_none], self.copyright)
result["extensions"] = from_union([lambda x: from_dict(lambda x: from_dict(lambda x: x, x), x), from_none],
self.extensions)
result["extras"] = self.extras
result["generator"] = from_union([from_str, from_none], self.generator)
result["minVersion"] = from_union([from_str, from_none], self.min_version)
result["version"] = from_str(self.version)
return result
class BufferView:
"""A view into a buffer generally representing a subset of the buffer."""
def __init__(self, buffer, byte_length, byte_offset, byte_stride, extensions, extras, name, target):
self.buffer = buffer
self.byte_length = byte_length
self.byte_offset = byte_offset
self.byte_stride = byte_stride
self.extensions = extensions
self.extras = extras
self.name = name
self.target = target
@staticmethod
def from_dict(obj):
assert isinstance(obj, dict)
buffer = from_int(obj.get("buffer"))
byte_length = from_int(obj.get("byteLength"))
byte_offset = from_union([from_int, from_none], obj.get("byteOffset"))
byte_stride = from_union([from_int, from_none], obj.get("byteStride"))
extensions = from_union([lambda x: from_dict(lambda x: from_dict(lambda x: x, x), x), from_none],
obj.get("extensions"))
extras = obj.get("extras")
name = from_union([from_str, from_none], obj.get("name"))
target = from_union([from_int, from_none], obj.get("target"))
return BufferView(buffer, byte_length, byte_offset, byte_stride, extensions, extras, name, target)
def to_dict(self):
result = {}
result["buffer"] = from_int(self.buffer)
result["byteLength"] = from_int(self.byte_length)
result["byteOffset"] = from_union([from_int, from_none], self.byte_offset)
result["byteStride"] = from_union([from_int, from_none], self.byte_stride)
result["extensions"] = from_union([lambda x: from_dict(lambda x: from_dict(lambda x: x, x), x), from_none],
self.extensions)
result["extras"] = self.extras
result["name"] = from_union([from_str, from_none], self.name)
result["target"] = from_union([from_int, from_none], self.target)
return result
class Buffer:
"""A buffer points to binary geometry, animation, or skins."""
def __init__(self, byte_length, extensions, extras, name, uri):
self.byte_length = byte_length
self.extensions = extensions
self.extras = extras
self.name = name
self.uri = uri
@staticmethod
def from_dict(obj):
assert isinstance(obj, dict)
byte_length = from_int(obj.get("byteLength"))
extensions = from_union([lambda x: from_dict(lambda x: from_dict(lambda x: x, x), x), from_none],
obj.get("extensions"))
extras = obj.get("extras")
name = from_union([from_str, from_none], obj.get("name"))
uri = from_union([from_str, from_none], obj.get("uri"))
return Buffer(byte_length, extensions, extras, name, uri)
def to_dict(self):
result = {}
result["byteLength"] = from_int(self.byte_length)
result["extensions"] = from_union([lambda x: from_dict(lambda x: from_dict(lambda x: x, x), x), from_none],
self.extensions)
result["extras"] = self.extras
result["name"] = from_union([from_str, from_none], self.name)
result["uri"] = from_union([from_str, from_none], self.uri)
return result
class CameraOrthographic:
"""An orthographic camera containing properties to create an orthographic projection matrix."""
def __init__(self, extensions, extras, xmag, ymag, zfar, znear):
self.extensions = extensions
self.extras = extras
self.xmag = xmag
self.ymag = ymag
self.zfar = zfar
self.znear = znear
@staticmethod
def from_dict(obj):
assert isinstance(obj, dict)
extensions = from_union([lambda x: from_dict(lambda x: from_dict(lambda x: x, x), x), from_none],
obj.get("extensions"))
extras = obj.get("extras")
xmag = from_float(obj.get("xmag"))
ymag = from_float(obj.get("ymag"))
zfar = from_float(obj.get("zfar"))
znear = from_float(obj.get("znear"))
return CameraOrthographic(extensions, extras, xmag, ymag, zfar, znear)
def to_dict(self):
result = {}
result["extensions"] = from_union([lambda x: from_dict(lambda x: from_dict(lambda x: x, x), x), from_none],
self.extensions)
result["extras"] = self.extras
result["xmag"] = to_float(self.xmag)
result["ymag"] = to_float(self.ymag)
result["zfar"] = to_float(self.zfar)
result["znear"] = to_float(self.znear)
return result
class CameraPerspective:
"""A perspective camera containing properties to create a perspective projection matrix."""
def __init__(self, aspect_ratio, extensions, extras, yfov, zfar, znear):
self.aspect_ratio = aspect_ratio
self.extensions = extensions
self.extras = extras
self.yfov = yfov
self.zfar = zfar
self.znear = znear
@staticmethod
def from_dict(obj):
assert isinstance(obj, dict)
aspect_ratio = from_union([from_float, from_none], obj.get("aspectRatio"))
extensions = from_union([lambda x: from_dict(lambda x: from_dict(lambda x: x, x), x), from_none],
obj.get("extensions"))
extras = obj.get("extras")
yfov = from_float(obj.get("yfov"))
zfar = from_union([from_float, from_none], obj.get("zfar"))
znear = from_float(obj.get("znear"))
return CameraPerspective(aspect_ratio, extensions, extras, yfov, zfar, znear)
def to_dict(self):
result = {}
result["aspectRatio"] = from_union([to_float, from_none], self.aspect_ratio)
result["extensions"] = from_union([lambda x: from_dict(lambda x: from_dict(lambda x: x, x), x), from_none],
self.extensions)
result["extras"] = self.extras
result["yfov"] = to_float(self.yfov)
result["zfar"] = from_union([to_float, from_none], self.zfar)
result["znear"] = to_float(self.znear)
return result
class Camera:
"""A camera's projection. A node can reference a camera to apply a transform to place the
camera in the scene.
"""
def __init__(self, extensions, extras, name, orthographic, perspective, type):
self.extensions = extensions
self.extras = extras
self.name = name
self.orthographic = orthographic
self.perspective = perspective
self.type = type
@staticmethod
def from_dict(obj):
assert isinstance(obj, dict)
extensions = from_union([lambda x: from_dict(lambda x: from_dict(lambda x: x, x), x), from_none],
obj.get("extensions"))
extras = obj.get("extras")
name = from_union([from_str, from_none], obj.get("name"))
orthographic = from_union([CameraOrthographic.from_dict, from_none], obj.get("orthographic"))
perspective = from_union([CameraPerspective.from_dict, from_none], obj.get("perspective"))
type = from_str(obj.get("type"))
return Camera(extensions, extras, name, orthographic, perspective, type)
def to_dict(self):
result = {}
result["extensions"] = from_union([lambda x: from_dict(lambda x: from_dict(lambda x: x, x), x), from_none],
self.extensions)
result["extras"] = self.extras
result["name"] = from_union([from_str, from_none], self.name)
result["orthographic"] = from_union([lambda x: to_class(CameraOrthographic, x), from_none], self.orthographic)
result["perspective"] = from_union([lambda x: to_class(CameraPerspective, x), from_none], self.perspective)
result["type"] = from_str(self.type)
return result
class Image:
"""Image data used to create a texture. Image can be referenced by URI or `bufferView`
index. `mimeType` is required in the latter case.
"""
def __init__(self, buffer_view, extensions, extras, mime_type, name, uri):
self.buffer_view = buffer_view
self.extensions = extensions
self.extras = extras
self.mime_type = mime_type
self.name = name
self.uri = uri
@staticmethod
def from_dict(obj):
assert isinstance(obj, dict)
buffer_view = from_union([from_int, from_none], obj.get("bufferView"))
extensions = from_union([lambda x: from_dict(lambda x: from_dict(lambda x: x, x), x), from_none],
obj.get("extensions"))
extras = obj.get("extras")
mime_type = from_union([from_str, from_none], obj.get("mimeType"))
name = from_union([from_str, from_none], obj.get("name"))
uri = from_union([from_str, from_none], obj.get("uri"))
return Image(buffer_view, extensions, extras, mime_type, name, uri)
def to_dict(self):
result = {}
result["bufferView"] = from_union([from_int, from_none], self.buffer_view)
result["extensions"] = from_union([lambda x: from_dict(lambda x: from_dict(lambda x: x, x), x), from_none],
self.extensions)
result["extras"] = self.extras
result["mimeType"] = from_union([from_str, from_none], self.mime_type)
result["name"] = from_union([from_str, from_none], self.name)
result["uri"] = from_union([from_str, from_none], self.uri)
return result
class TextureInfo:
"""The emissive map texture.
The base color texture.
The metallic-roughness texture.
Reference to a texture.
"""
def __init__(self, extensions, extras, index, tex_coord):
self.extensions = extensions
self.extras = extras
self.index = index
self.tex_coord = tex_coord
@staticmethod
def from_dict(obj):
assert isinstance(obj, dict)
extensions = from_union([lambda x: from_dict(lambda x: from_dict(lambda x: x, x), x), from_none],
obj.get("extensions"))
extras = obj.get("extras")
index = from_int(obj.get("index"))
tex_coord = from_union([from_int, from_none], obj.get("texCoord"))
return TextureInfo(extensions, extras, index, tex_coord)
def to_dict(self):
result = {}
result["extensions"] = from_union([lambda x: from_dict(lambda x: from_dict(lambda x: x, x), x), from_none],
self.extensions)
result["extras"] = self.extras
result["index"] = from_int(self.index)
result["texCoord"] = from_union([from_int, from_none], self.tex_coord)
return result
class MaterialNormalTextureInfoClass:
"""The normal map texture.
Reference to a texture.
"""
def __init__(self, extensions, extras, index, scale, tex_coord):
self.extensions = extensions
self.extras = extras
self.index = index
self.scale = scale
self.tex_coord = tex_coord
@staticmethod
def from_dict(obj):
assert isinstance(obj, dict)
extensions = from_union([lambda x: from_dict(lambda x: from_dict(lambda x: x, x), x), from_none],
obj.get("extensions"))
extras = obj.get("extras")
index = from_int(obj.get("index"))
scale = from_union([from_float, from_none], obj.get("scale"))
tex_coord = from_union([from_int, from_none], obj.get("texCoord"))
return MaterialNormalTextureInfoClass(extensions, extras, index, scale, tex_coord)
def to_dict(self):
result = {}
result["extensions"] = from_union([lambda x: from_dict(lambda x: from_dict(lambda x: x, x), x), from_none],
self.extensions)
result["extras"] = self.extras
result["index"] = from_int(self.index)
result["scale"] = from_union([to_float, from_none], self.scale)
result["texCoord"] = from_union([from_int, from_none], self.tex_coord)
return result
class MaterialOcclusionTextureInfoClass:
"""The occlusion map texture.
Reference to a texture.
"""
def __init__(self, extensions, extras, index, strength, tex_coord):
self.extensions = extensions
self.extras = extras
self.index = index
self.strength = strength
self.tex_coord = tex_coord
@staticmethod
def from_dict(obj):
assert isinstance(obj, dict)
extensions = from_union([lambda x: from_dict(lambda x: from_dict(lambda x: x, x), x), from_none],
obj.get("extensions"))
extras = obj.get("extras")
index = from_int(obj.get("index"))
strength = from_union([from_float, from_none], obj.get("strength"))
tex_coord = from_union([from_int, from_none], obj.get("texCoord"))
return MaterialOcclusionTextureInfoClass(extensions, extras, index, strength, tex_coord)
def to_dict(self):
result = {}
result["extensions"] = from_union([lambda x: from_dict(lambda x: from_dict(lambda x: x, x), x), from_none],
self.extensions)
result["extras"] = self.extras
result["index"] = from_int(self.index)
result["strength"] = from_union([to_float, from_none], self.strength)
result["texCoord"] = from_union([from_int, from_none], self.tex_coord)
return result
class MaterialPBRMetallicRoughness:
"""A set of parameter values that are used to define the metallic-roughness material model
from Physically-Based Rendering (PBR) methodology. When not specified, all the default
values of `pbrMetallicRoughness` apply.
A set of parameter values that are used to define the metallic-roughness material model
from Physically-Based Rendering (PBR) methodology.
"""
def __init__(self, base_color_factor, base_color_texture, extensions, extras, metallic_factor,
metallic_roughness_texture, roughness_factor):
self.base_color_factor = base_color_factor
self.base_color_texture = base_color_texture
self.extensions = extensions
self.extras = extras
self.metallic_factor = metallic_factor
self.metallic_roughness_texture = metallic_roughness_texture
self.roughness_factor = roughness_factor
@staticmethod
def from_dict(obj):
assert isinstance(obj, dict)
base_color_factor = from_union([lambda x: from_list(from_float, x), from_none], obj.get("baseColorFactor"))
base_color_texture = from_union([TextureInfo.from_dict, from_none], obj.get("baseColorTexture"))
extensions = from_union([lambda x: from_dict(lambda x: from_dict(lambda x: x, x), x), from_none],
obj.get("extensions"))
extras = obj.get("extras")
metallic_factor = from_union([from_float, from_none], obj.get("metallicFactor"))
metallic_roughness_texture = from_union([TextureInfo.from_dict, from_none], obj.get("metallicRoughnessTexture"))
roughness_factor = from_union([from_float, from_none], obj.get("roughnessFactor"))
return MaterialPBRMetallicRoughness(base_color_factor, base_color_texture, extensions, extras, metallic_factor,
metallic_roughness_texture, roughness_factor)
def to_dict(self):
result = {}
result["baseColorFactor"] = from_union([lambda x: from_list(to_float, x), from_none], self.base_color_factor)
result["baseColorTexture"] = from_union([lambda x: to_class(TextureInfo, x), from_none],
self.base_color_texture)
result["extensions"] = from_union([lambda x: from_dict(lambda x: from_dict(lambda x: x, x), x), from_none],
self.extensions)
result["extras"] = self.extras
result["metallicFactor"] = from_union([to_float, from_none], self.metallic_factor)
result["metallicRoughnessTexture"] = from_union([lambda x: to_class(TextureInfo, x), from_none],
self.metallic_roughness_texture)
result["roughnessFactor"] = from_union([to_float, from_none], self.roughness_factor)
return result
class Material:
"""The material appearance of a primitive."""
def __init__(self, alpha_cutoff, alpha_mode, double_sided, emissive_factor, emissive_texture, extensions, extras,
name, normal_texture, occlusion_texture, pbr_metallic_roughness):
self.alpha_cutoff = alpha_cutoff
self.alpha_mode = alpha_mode
self.double_sided = double_sided
self.emissive_factor = emissive_factor
self.emissive_texture = emissive_texture
self.extensions = extensions
self.extras = extras
self.name = name
self.normal_texture = normal_texture
self.occlusion_texture = occlusion_texture
self.pbr_metallic_roughness = pbr_metallic_roughness
@staticmethod
def from_dict(obj):
assert isinstance(obj, dict)
alpha_cutoff = from_union([from_float, from_none], obj.get("alphaCutoff"))
alpha_mode = from_union([from_str, from_none], obj.get("alphaMode"))
double_sided = from_union([from_bool, from_none], obj.get("doubleSided"))
emissive_factor = from_union([lambda x: from_list(from_float, x), from_none], obj.get("emissiveFactor"))
emissive_texture = from_union([TextureInfo.from_dict, from_none], obj.get("emissiveTexture"))
extensions = from_union([lambda x: from_dict(lambda x: from_dict(lambda x: x, x), x), from_none],
obj.get("extensions"))
extras = obj.get("extras")
name = from_union([from_str, from_none], obj.get("name"))
normal_texture = from_union([MaterialNormalTextureInfoClass.from_dict, from_none], obj.get("normalTexture"))
occlusion_texture = from_union([MaterialOcclusionTextureInfoClass.from_dict, from_none],
obj.get("occlusionTexture"))
pbr_metallic_roughness = from_union([MaterialPBRMetallicRoughness.from_dict, from_none],
obj.get("pbrMetallicRoughness"))
return Material(alpha_cutoff, alpha_mode, double_sided, emissive_factor, emissive_texture, extensions, extras,
name, normal_texture, occlusion_texture, pbr_metallic_roughness)
def to_dict(self):
result = {}
result["alphaCutoff"] = from_union([to_float, from_none], self.alpha_cutoff)
result["alphaMode"] = from_union([from_str, from_none], self.alpha_mode)
result["doubleSided"] = from_union([from_bool, from_none], self.double_sided)
result["emissiveFactor"] = from_union([lambda x: from_list(to_float, x), from_none], self.emissive_factor)
result["emissiveTexture"] = from_union([lambda x: to_class(TextureInfo, x), from_none], self.emissive_texture)
result["extensions"] = from_union([lambda x: from_dict(lambda x: from_dict(lambda x: x, x), x), from_none],
self.extensions)
result["extras"] = self.extras
result["name"] = from_union([from_str, from_none], self.name)
result["normalTexture"] = from_union([lambda x: to_class(MaterialNormalTextureInfoClass, x), from_none],
self.normal_texture)
result["occlusionTexture"] = from_union([lambda x: to_class(MaterialOcclusionTextureInfoClass, x), from_none],
self.occlusion_texture)
result["pbrMetallicRoughness"] = from_union([lambda x: to_class(MaterialPBRMetallicRoughness, x), from_none],
self.pbr_metallic_roughness)
return result
class MeshPrimitive:
"""Geometry to be rendered with the given material."""
def __init__(self, attributes, extensions, extras, indices, material, mode, targets):
self.attributes = attributes
self.extensions = extensions
self.extras = extras
self.indices = indices
self.material = material
self.mode = mode
self.targets = targets
@staticmethod
def from_dict(obj):
assert isinstance(obj, dict)
attributes = from_dict(from_int, obj.get("attributes"))
extensions = from_union([lambda x: from_dict(lambda x: from_dict(lambda x: x, x), x), from_none],
obj.get("extensions"))
extras = obj.get("extras")
indices = from_union([from_int, from_none], obj.get("indices"))
material = from_union([from_int, from_none], obj.get("material"))
mode = from_union([from_int, from_none], obj.get("mode"))
targets = from_union([lambda x: from_list(lambda x: from_dict(from_int, x), x), from_none], obj.get("targets"))
return MeshPrimitive(attributes, extensions, extras, indices, material, mode, targets)
def to_dict(self):
result = {}
result["attributes"] = from_dict(from_int, self.attributes)
result["extensions"] = from_union([lambda x: from_dict(lambda x: from_dict(lambda x: x, x), x), from_none],
self.extensions)
result["extras"] = self.extras
result["indices"] = from_union([from_int, from_none], self.indices)
result["material"] = from_union([from_int, from_none], self.material)
result["mode"] = from_union([from_int, from_none], self.mode)
result["targets"] = from_union([lambda x: from_list(lambda x: from_dict(from_int, x), x), from_none],
self.targets)
return result
class Mesh:
"""A set of primitives to be rendered. A node can contain one mesh. A node's transform
places the mesh in the scene.
"""
def __init__(self, extensions, extras, name, primitives, weights):
self.extensions = extensions
self.extras = extras
self.name = name
self.primitives = primitives
self.weights = weights
@staticmethod
def from_dict(obj):
assert isinstance(obj, dict)
extensions = from_union([lambda x: from_dict(lambda x: from_dict(lambda x: x, x), x), from_none],
obj.get("extensions"))
extras = obj.get("extras")
name = from_union([from_str, from_none], obj.get("name"))
primitives = from_list(MeshPrimitive.from_dict, obj.get("primitives"))
weights = from_union([lambda x: from_list(from_float, x), from_none], obj.get("weights"))
return Mesh(extensions, extras, name, primitives, weights)
def to_dict(self):
result = {}
result["extensions"] = from_union([lambda x: from_dict(lambda x: from_dict(lambda x: x, x), x), from_none],
self.extensions)
result["extras"] = self.extras
result["name"] = from_union([from_str, from_none], self.name)
result["primitives"] = from_list(lambda x: to_class(MeshPrimitive, x), self.primitives)
result["weights"] = from_union([lambda x: from_list(to_float, x), from_none], self.weights)
return result
class Node:
"""A node in the node hierarchy. When the node contains `skin`, all `mesh.primitives` must
contain `JOINTS_0` and `WEIGHTS_0` attributes. A node can have either a `matrix` or any
combination of `translation`/`rotation`/`scale` (TRS) properties. TRS properties are
converted to matrices and postmultiplied in the `T * R * S` order to compose the
transformation matrix; first the scale is applied to the vertices, then the rotation, and
then the translation. If none are provided, the transform is the identity. When a node is
targeted for animation (referenced by an animation.channel.target), only TRS properties
may be present; `matrix` will not be present.
"""
def __init__(self, camera, children, extensions, extras, matrix, mesh, name, rotation, scale, skin, translation,
weights):
self.camera = camera
self.children = children
self.extensions = extensions
self.extras = extras
self.matrix = matrix
self.mesh = mesh
self.name = name
self.rotation = rotation
self.scale = scale
self.skin = skin
self.translation = translation
self.weights = weights
@staticmethod
def from_dict(obj):
assert isinstance(obj, dict)
camera = from_union([from_int, from_none], obj.get("camera"))
children = from_union([lambda x: from_list(from_int, x), from_none], obj.get("children"))
extensions = from_union([lambda x: from_dict(lambda x: from_dict(lambda x: x, x), x), from_none],
obj.get("extensions"))
extras = obj.get("extras")
matrix = from_union([lambda x: from_list(from_float, x), from_none], obj.get("matrix"))
mesh = from_union([from_int, from_none], obj.get("mesh"))
name = from_union([from_str, from_none], obj.get("name"))
rotation = from_union([lambda x: from_list(from_float, x), from_none], obj.get("rotation"))
scale = from_union([lambda x: from_list(from_float, x), from_none], obj.get("scale"))
skin = from_union([from_int, from_none], obj.get("skin"))
translation = from_union([lambda x: from_list(from_float, x), from_none], obj.get("translation"))
weights = from_union([lambda x: from_list(from_float, x), from_none], obj.get("weights"))
return Node(camera, children, extensions, extras, matrix, mesh, name, rotation, scale, skin, translation,
weights)
def to_dict(self):
result = {}
result["camera"] = from_union([from_int, from_none], self.camera)
result["children"] = from_union([lambda x: from_list(from_int, x), from_none], self.children)
result["extensions"] = from_union([lambda x: from_dict(lambda x: from_dict(lambda x: x, x), x), from_none],
self.extensions)
result["extras"] = self.extras
result["matrix"] = from_union([lambda x: from_list(to_float, x), from_none], self.matrix)
result["mesh"] = from_union([from_int, from_none], self.mesh)
result["name"] = from_union([from_str, from_none], self.name)
result["rotation"] = from_union([lambda x: from_list(to_float, x), from_none], self.rotation)
result["scale"] = from_union([lambda x: from_list(to_float, x), from_none], self.scale)
result["skin"] = from_union([from_int, from_none], self.skin)
result["translation"] = from_union([lambda x: from_list(to_float, x), from_none], self.translation)
result["weights"] = from_union([lambda x: from_list(to_float, x), from_none], self.weights)
return result
class Sampler:
"""Texture sampler properties for filtering and wrapping modes."""
def __init__(self, extensions, extras, mag_filter, min_filter, name, wrap_s, wrap_t):
self.extensions = extensions
self.extras = extras
self.mag_filter = mag_filter
self.min_filter = min_filter
self.name = name
self.wrap_s = wrap_s
self.wrap_t = wrap_t
@staticmethod
def from_dict(obj):
assert isinstance(obj, dict)
extensions = from_union([lambda x: from_dict(lambda x: from_dict(lambda x: x, x), x), from_none],
obj.get("extensions"))
extras = obj.get("extras")
mag_filter = from_union([from_int, from_none], obj.get("magFilter"))
min_filter = from_union([from_int, from_none], obj.get("minFilter"))
name = from_union([from_str, from_none], obj.get("name"))
wrap_s = from_union([from_int, from_none], obj.get("wrapS"))
wrap_t = from_union([from_int, from_none], obj.get("wrapT"))
return Sampler(extensions, extras, mag_filter, min_filter, name, wrap_s, wrap_t)
def to_dict(self):
result = {}
result["extensions"] = from_union([lambda x: from_dict(lambda x: from_dict(lambda x: x, x), x), from_none],
self.extensions)
result["extras"] = self.extras
result["magFilter"] = from_union([from_int, from_none], self.mag_filter)
result["minFilter"] = from_union([from_int, from_none], self.min_filter)
result["name"] = from_union([from_str, from_none], self.name)
result["wrapS"] = from_union([from_int, from_none], self.wrap_s)
result["wrapT"] = from_union([from_int, from_none], self.wrap_t)
return result
class Scene:
"""The root nodes of a scene."""
def __init__(self, extensions, extras, name, nodes):
self.extensions = extensions
self.extras = extras
self.name = name
self.nodes = nodes
@staticmethod
def from_dict(obj):
assert isinstance(obj, dict)
extensions = from_union([lambda x: from_dict(lambda x: from_dict(lambda x: x, x), x), from_none],
obj.get("extensions"))
extras = obj.get("extras")
name = from_union([from_str, from_none], obj.get("name"))
nodes = from_union([lambda x: from_list(from_int, x), from_none], obj.get("nodes"))
return Scene(extensions, extras, name, nodes)
def to_dict(self):
result = {}
result["extensions"] = from_union([lambda x: from_dict(lambda x: from_dict(lambda x: x, x), x), from_none],
self.extensions)
result["extras"] = self.extras
result["name"] = from_union([from_str, from_none], self.name)
result["nodes"] = from_union([lambda x: from_list(from_int, x), from_none], self.nodes)
return result
class Skin:
"""Joints and matrices defining a skin."""
def __init__(self, extensions, extras, inverse_bind_matrices, joints, name, skeleton):
self.extensions = extensions
self.extras = extras
self.inverse_bind_matrices = inverse_bind_matrices
self.joints = joints
self.name = name
self.skeleton = skeleton
@staticmethod
def from_dict(obj):
assert isinstance(obj, dict)
extensions = from_union([lambda x: from_dict(lambda x: from_dict(lambda x: x, x), x), from_none],
obj.get("extensions"))
extras = obj.get("extras")
inverse_bind_matrices = from_union([from_int, from_none], obj.get("inverseBindMatrices"))
joints = from_list(from_int, obj.get("joints"))
name = from_union([from_str, from_none], obj.get("name"))
skeleton = from_union([from_int, from_none], obj.get("skeleton"))
return Skin(extensions, extras, inverse_bind_matrices, joints, name, skeleton)
def to_dict(self):
result = {}
result["extensions"] = from_union([lambda x: from_dict(lambda x: from_dict(lambda x: x, x), x), from_none],
self.extensions)
result["extras"] = self.extras
result["inverseBindMatrices"] = from_union([from_int, from_none], self.inverse_bind_matrices)
result["joints"] = from_list(from_int, self.joints)
result["name"] = from_union([from_str, from_none], self.name)
result["skeleton"] = from_union([from_int, from_none], self.skeleton)
return result
class Texture:
"""A texture and its sampler."""
def __init__(self, extensions, extras, name, sampler, source):
self.extensions = extensions
self.extras = extras
self.name = name
self.sampler = sampler
self.source = source
@staticmethod
def from_dict(obj):
assert isinstance(obj, dict)
extensions = from_union([lambda x: from_dict(lambda x: from_dict(lambda x: x, x), x), from_none],
obj.get("extensions"))
extras = obj.get("extras")
name = from_union([from_str, from_none], obj.get("name"))
sampler = from_union([from_int, from_none], obj.get("sampler"))
source = from_int(obj.get("source"))
return Texture(extensions, extras, name, sampler, source)
def to_dict(self):
result = {}
result["extensions"] = from_union([lambda x: from_dict(lambda x: from_dict(lambda x: x, x), x), from_none],
self.extensions)
result["extras"] = self.extras
result["name"] = from_union([from_str, from_none], self.name)
result["sampler"] = from_union([from_int, from_none], self.sampler)
result["source"] = from_int(self.source) # most viewers can't handle missing sources
return result
class Gltf:
"""The root object for a glTF asset."""
def __init__(self, accessors, animations, asset, buffers, buffer_views, cameras, extensions, extensions_required,
extensions_used, extras, images, materials, meshes, nodes, samplers, scene, scenes, skins, textures):
self.accessors = accessors
self.animations = animations
self.asset = asset
self.buffers = buffers
self.buffer_views = buffer_views
self.cameras = cameras
self.extensions = extensions
self.extensions_required = extensions_required
self.extensions_used = extensions_used
self.extras = extras
self.images = images
self.materials = materials
self.meshes = meshes
self.nodes = nodes
self.samplers = samplers
self.scene = scene
self.scenes = scenes
self.skins = skins
self.textures = textures
@staticmethod
def from_dict(obj):
assert isinstance(obj, dict)
accessors = from_union([lambda x: from_list(Accessor.from_dict, x), from_none], obj.get("accessors"))
animations = from_union([lambda x: from_list(Animation.from_dict, x), from_none], obj.get("animations"))
asset = Asset.from_dict(obj.get("asset"))
buffers = from_union([lambda x: from_list(Buffer.from_dict, x), from_none], obj.get("buffers"))
buffer_views = from_union([lambda x: from_list(BufferView.from_dict, x), from_none], obj.get("bufferViews"))
cameras = from_union([lambda x: from_list(Camera.from_dict, x), from_none], obj.get("cameras"))
extensions = from_union([lambda x: from_dict(lambda x: from_dict(lambda x: x, x), x), from_none],
obj.get("extensions"))
extensions_required = from_union([lambda x: from_list(from_str, x), from_none], obj.get("extensionsRequired"))
extensions_used = from_union([lambda x: from_list(from_str, x), from_none], obj.get("extensionsUsed"))
extras = obj.get("extras")
images = from_union([lambda x: from_list(Image.from_dict, x), from_none], obj.get("images"))
materials = from_union([lambda x: from_list(Material.from_dict, x), from_none], obj.get("materials"))
meshes = from_union([lambda x: from_list(Mesh.from_dict, x), from_none], obj.get("meshes"))
nodes = from_union([lambda x: from_list(Node.from_dict, x), from_none], obj.get("nodes"))
samplers = from_union([lambda x: from_list(Sampler.from_dict, x), from_none], obj.get("samplers"))
scene = from_union([from_int, from_none], obj.get("scene"))
scenes = from_union([lambda x: from_list(Scene.from_dict, x), from_none], obj.get("scenes"))
skins = from_union([lambda x: from_list(Skin.from_dict, x), from_none], obj.get("skins"))
textures = from_union([lambda x: from_list(Texture.from_dict, x), from_none], obj.get("textures"))
return Gltf(accessors, animations, asset, buffers, buffer_views, cameras, extensions, extensions_required,
extensions_used, extras, images, materials, meshes, nodes, samplers, scene, scenes, skins, textures)
def to_dict(self):
result = {}
result["accessors"] = from_union([lambda x: from_list(lambda x: to_class(Accessor, x), x), from_none],
self.accessors)
result["animations"] = from_union([lambda x: from_list(lambda x: to_class(Animation, x), x), from_none],
self.animations)
result["asset"] = to_class(Asset, self.asset)
result["buffers"] = from_union([lambda x: from_list(lambda x: to_class(Buffer, x), x), from_none], self.buffers)
result["bufferViews"] = from_union([lambda x: from_list(lambda x: to_class(BufferView, x), x), from_none],
self.buffer_views)
result["cameras"] = from_union([lambda x: from_list(lambda x: to_class(Camera, x), x), from_none], self.cameras)
result["extensions"] = from_union([lambda x: from_dict(lambda x: from_dict(lambda x: x, x), x), from_none],
self.extensions)
result["extensionsRequired"] = from_union([lambda x: from_list(from_str, x), from_none],
self.extensions_required)
result["extensionsUsed"] = from_union([lambda x: from_list(from_str, x), from_none], self.extensions_used)
result["extras"] = self.extras
result["images"] = from_union([lambda x: from_list(lambda x: to_class(Image, x), x), from_none], self.images)
result["materials"] = from_union([lambda x: from_list(lambda x: to_class(Material, x), x), from_none],
self.materials)
result["meshes"] = from_union([lambda x: from_list(lambda x: to_class(Mesh, x), x), from_none], self.meshes)
result["nodes"] = from_union([lambda x: from_list(lambda x: to_class(Node, x), x), from_none], self.nodes)
result["samplers"] = from_union([lambda x: from_list(lambda x: to_class(Sampler, x), x), from_none],
self.samplers)
result["scene"] = from_union([from_int, from_none], self.scene)
result["scenes"] = from_union([lambda x: from_list(lambda x: to_class(Scene, x), x), from_none], self.scenes)
result["skins"] = from_union([lambda x: from_list(lambda x: to_class(Skin, x), x), from_none], self.skins)
result["textures"] = from_union([lambda x: from_list(lambda x: to_class(Texture, x), x), from_none],
self.textures)
return result
def gltf_from_dict(s):
return Gltf.from_dict(s)
def gltf_to_dict(x):
return to_class(Gltf, x)
| 45.877602
| 120
| 0.629993
|
import sys
import traceback
from io_scene_gltf2.io.com import gltf2_io_debug
def from_int(x):
assert isinstance(x, int) and not isinstance(x, bool)
return x
def from_none(x):
assert x is None
return x
def from_union(fs, x):
tracebacks = []
for f in fs:
try:
return f(x)
except AssertionError:
_, _, tb = sys.exc_info()
tracebacks.append(tb)
for tb in tracebacks:
traceback.print_tb(tb)
tb_info = traceback.extract_tb(tb)
for tbi in tb_info:
filename, line, func, text = tbi
gltf2_io_debug.print_console('ERROR', 'An error occurred on line {} in statement {}'.format(line, text))
assert False
def from_dict(f, x):
assert isinstance(x, dict)
return {k: f(v) for (k, v) in x.items()}
def to_class(c, x):
assert isinstance(x, c)
return x.to_dict()
def from_list(f, x):
assert isinstance(x, list)
return [f(y) for y in x]
def from_float(x):
assert isinstance(x, (float, int)) and not isinstance(x, bool)
return float(x)
def from_str(x):
assert isinstance(x, str)
return x
def from_bool(x):
assert isinstance(x, bool)
return x
def to_float(x):
assert isinstance(x, float)
return x
class AccessorSparseIndices:
def __init__(self, buffer_view, byte_offset, component_type, extensions, extras):
self.buffer_view = buffer_view
self.byte_offset = byte_offset
self.component_type = component_type
self.extensions = extensions
self.extras = extras
@staticmethod
def from_dict(obj):
assert isinstance(obj, dict)
buffer_view = from_int(obj.get("bufferView"))
byte_offset = from_union([from_int, from_none], obj.get("byteOffset"))
component_type = from_int(obj.get("componentType"))
extensions = from_union([lambda x: from_dict(lambda x: from_dict(lambda x: x, x), x), from_none],
obj.get("extensions"))
extras = obj.get("extras")
return AccessorSparseIndices(buffer_view, byte_offset, component_type, extensions, extras)
def to_dict(self):
result = {}
result["bufferView"] = from_int(self.buffer_view)
result["byteOffset"] = from_union([from_int, from_none], self.byte_offset)
result["componentType"] = from_int(self.component_type)
result["extensions"] = from_union([lambda x: from_dict(lambda x: from_dict(lambda x: x, x), x), from_none],
self.extensions)
result["extras"] = self.extras
return result
class AccessorSparseValues:
def __init__(self, buffer_view, byte_offset, extensions, extras):
self.buffer_view = buffer_view
self.byte_offset = byte_offset
self.extensions = extensions
self.extras = extras
@staticmethod
def from_dict(obj):
assert isinstance(obj, dict)
buffer_view = from_int(obj.get("bufferView"))
byte_offset = from_union([from_int, from_none], obj.get("byteOffset"))
extensions = from_union([lambda x: from_dict(lambda x: from_dict(lambda x: x, x), x), from_none],
obj.get("extensions"))
extras = obj.get("extras")
return AccessorSparseValues(buffer_view, byte_offset, extensions, extras)
def to_dict(self):
result = {}
result["bufferView"] = from_int(self.buffer_view)
result["byteOffset"] = from_union([from_int, from_none], self.byte_offset)
result["extensions"] = from_union([lambda x: from_dict(lambda x: from_dict(lambda x: x, x), x), from_none],
self.extensions)
result["extras"] = self.extras
return result
class AccessorSparse:
def __init__(self, count, extensions, extras, indices, values):
self.count = count
self.extensions = extensions
self.extras = extras
self.indices = indices
self.values = values
@staticmethod
def from_dict(obj):
assert isinstance(obj, dict)
count = from_int(obj.get("count"))
extensions = from_union([lambda x: from_dict(lambda x: from_dict(lambda x: x, x), x), from_none],
obj.get("extensions"))
extras = obj.get("extras")
indices = AccessorSparseIndices.from_dict(obj.get("indices"))
values = AccessorSparseValues.from_dict(obj.get("values"))
return AccessorSparse(count, extensions, extras, indices, values)
def to_dict(self):
result = {}
result["count"] = from_int(self.count)
result["extensions"] = from_union([lambda x: from_dict(lambda x: from_dict(lambda x: x, x), x), from_none],
self.extensions)
result["extras"] = self.extras
result["indices"] = to_class(AccessorSparseIndices, self.indices)
result["values"] = to_class(AccessorSparseValues, self.values)
return result
class Accessor:
def __init__(self, buffer_view, byte_offset, component_type, count, extensions, extras, max, min, name, normalized,
sparse, type):
self.buffer_view = buffer_view
self.byte_offset = byte_offset
self.component_type = component_type
self.count = count
self.extensions = extensions
self.extras = extras
self.max = max
self.min = min
self.name = name
self.normalized = normalized
self.sparse = sparse
self.type = type
@staticmethod
def from_dict(obj):
assert isinstance(obj, dict)
buffer_view = from_union([from_int, from_none], obj.get("bufferView"))
byte_offset = from_union([from_int, from_none], obj.get("byteOffset"))
component_type = from_int(obj.get("componentType"))
count = from_int(obj.get("count"))
extensions = from_union([lambda x: from_dict(lambda x: from_dict(lambda x: x, x), x), from_none],
obj.get("extensions"))
extras = obj.get("extras")
max = from_union([lambda x: from_list(from_float, x), from_none], obj.get("max"))
min = from_union([lambda x: from_list(from_float, x), from_none], obj.get("min"))
name = from_union([from_str, from_none], obj.get("name"))
normalized = from_union([from_bool, from_none], obj.get("normalized"))
sparse = from_union([AccessorSparse.from_dict, from_none], obj.get("sparse"))
type = from_str(obj.get("type"))
return Accessor(buffer_view, byte_offset, component_type, count, extensions, extras, max, min, name, normalized,
sparse, type)
def to_dict(self):
result = {}
result["bufferView"] = from_union([from_int, from_none], self.buffer_view)
result["byteOffset"] = from_union([from_int, from_none], self.byte_offset)
result["componentType"] = from_int(self.component_type)
result["count"] = from_int(self.count)
result["extensions"] = from_union([lambda x: from_dict(lambda x: from_dict(lambda x: x, x), x), from_none],
self.extensions)
result["extras"] = self.extras
result["max"] = from_union([lambda x: from_list(to_float, x), from_none], self.max)
result["min"] = from_union([lambda x: from_list(to_float, x), from_none], self.min)
result["name"] = from_union([from_str, from_none], self.name)
result["normalized"] = from_union([from_bool, from_none], self.normalized)
result["sparse"] = from_union([lambda x: to_class(AccessorSparse, x), from_none], self.sparse)
result["type"] = from_str(self.type)
return result
class AnimationChannelTarget:
def __init__(self, extensions, extras, node, path):
self.extensions = extensions
self.extras = extras
self.node = node
self.path = path
@staticmethod
def from_dict(obj):
assert isinstance(obj, dict)
extensions = from_union([lambda x: from_dict(lambda x: from_dict(lambda x: x, x), x), from_none],
obj.get("extensions"))
extras = obj.get("extras")
node = from_union([from_int, from_none], obj.get("node"))
path = from_str(obj.get("path"))
return AnimationChannelTarget(extensions, extras, node, path)
def to_dict(self):
result = {}
result["extensions"] = from_union([lambda x: from_dict(lambda x: from_dict(lambda x: x, x), x), from_none],
self.extensions)
result["extras"] = self.extras
result["node"] = from_union([from_int, from_none], self.node)
result["path"] = from_str(self.path)
return result
class AnimationChannel:
def __init__(self, extensions, extras, sampler, target):
self.extensions = extensions
self.extras = extras
self.sampler = sampler
self.target = target
@staticmethod
def from_dict(obj):
assert isinstance(obj, dict)
extensions = from_union([lambda x: from_dict(lambda x: from_dict(lambda x: x, x), x), from_none],
obj.get("extensions"))
extras = obj.get("extras")
sampler = from_int(obj.get("sampler"))
target = AnimationChannelTarget.from_dict(obj.get("target"))
return AnimationChannel(extensions, extras, sampler, target)
def to_dict(self):
result = {}
result["extensions"] = from_union([lambda x: from_dict(lambda x: from_dict(lambda x: x, x), x), from_none],
self.extensions)
result["extras"] = self.extras
result["sampler"] = from_int(self.sampler)
result["target"] = to_class(AnimationChannelTarget, self.target)
return result
class AnimationSampler:
def __init__(self, extensions, extras, input, interpolation, output):
self.extensions = extensions
self.extras = extras
self.input = input
self.interpolation = interpolation
self.output = output
@staticmethod
def from_dict(obj):
assert isinstance(obj, dict)
extensions = from_union([lambda x: from_dict(lambda x: from_dict(lambda x: x, x), x), from_none],
obj.get("extensions"))
extras = obj.get("extras")
input = from_int(obj.get("input"))
interpolation = from_union([from_str, from_none], obj.get("interpolation"))
output = from_int(obj.get("output"))
return AnimationSampler(extensions, extras, input, interpolation, output)
def to_dict(self):
result = {}
result["extensions"] = from_union([lambda x: from_dict(lambda x: from_dict(lambda x: x, x), x), from_none],
self.extensions)
result["extras"] = self.extras
result["input"] = from_int(self.input)
result["interpolation"] = from_union([from_str, from_none], self.interpolation)
result["output"] = from_int(self.output)
return result
class Animation:
def __init__(self, channels, extensions, extras, name, samplers):
self.channels = channels
self.extensions = extensions
self.extras = extras
self.name = name
self.samplers = samplers
@staticmethod
def from_dict(obj):
assert isinstance(obj, dict)
channels = from_list(AnimationChannel.from_dict, obj.get("channels"))
extensions = from_union([lambda x: from_dict(lambda x: from_dict(lambda x: x, x), x), from_none],
obj.get("extensions"))
extras = obj.get("extras")
name = from_union([from_str, from_none], obj.get("name"))
samplers = from_list(AnimationSampler.from_dict, obj.get("samplers"))
return Animation(channels, extensions, extras, name, samplers)
def to_dict(self):
result = {}
result["channels"] = from_list(lambda x: to_class(AnimationChannel, x), self.channels)
result["extensions"] = from_union([lambda x: from_dict(lambda x: from_dict(lambda x: x, x), x), from_none],
self.extensions)
result["extras"] = self.extras
result["name"] = from_union([from_str, from_none], self.name)
result["samplers"] = from_list(lambda x: to_class(AnimationSampler, x), self.samplers)
return result
class Asset:
def __init__(self, copyright, extensions, extras, generator, min_version, version):
self.copyright = copyright
self.extensions = extensions
self.extras = extras
self.generator = generator
self.min_version = min_version
self.version = version
@staticmethod
def from_dict(obj):
assert isinstance(obj, dict)
copyright = from_union([from_str, from_none], obj.get("copyright"))
extensions = from_union([lambda x: from_dict(lambda x: from_dict(lambda x: x, x), x), from_none],
obj.get("extensions"))
extras = obj.get("extras")
generator = from_union([from_str, from_none], obj.get("generator"))
min_version = from_union([from_str, from_none], obj.get("minVersion"))
version = from_str(obj.get("version"))
return Asset(copyright, extensions, extras, generator, min_version, version)
def to_dict(self):
result = {}
result["copyright"] = from_union([from_str, from_none], self.copyright)
result["extensions"] = from_union([lambda x: from_dict(lambda x: from_dict(lambda x: x, x), x), from_none],
self.extensions)
result["extras"] = self.extras
result["generator"] = from_union([from_str, from_none], self.generator)
result["minVersion"] = from_union([from_str, from_none], self.min_version)
result["version"] = from_str(self.version)
return result
class BufferView:
def __init__(self, buffer, byte_length, byte_offset, byte_stride, extensions, extras, name, target):
self.buffer = buffer
self.byte_length = byte_length
self.byte_offset = byte_offset
self.byte_stride = byte_stride
self.extensions = extensions
self.extras = extras
self.name = name
self.target = target
@staticmethod
def from_dict(obj):
assert isinstance(obj, dict)
buffer = from_int(obj.get("buffer"))
byte_length = from_int(obj.get("byteLength"))
byte_offset = from_union([from_int, from_none], obj.get("byteOffset"))
byte_stride = from_union([from_int, from_none], obj.get("byteStride"))
extensions = from_union([lambda x: from_dict(lambda x: from_dict(lambda x: x, x), x), from_none],
obj.get("extensions"))
extras = obj.get("extras")
name = from_union([from_str, from_none], obj.get("name"))
target = from_union([from_int, from_none], obj.get("target"))
return BufferView(buffer, byte_length, byte_offset, byte_stride, extensions, extras, name, target)
def to_dict(self):
result = {}
result["buffer"] = from_int(self.buffer)
result["byteLength"] = from_int(self.byte_length)
result["byteOffset"] = from_union([from_int, from_none], self.byte_offset)
result["byteStride"] = from_union([from_int, from_none], self.byte_stride)
result["extensions"] = from_union([lambda x: from_dict(lambda x: from_dict(lambda x: x, x), x), from_none],
self.extensions)
result["extras"] = self.extras
result["name"] = from_union([from_str, from_none], self.name)
result["target"] = from_union([from_int, from_none], self.target)
return result
class Buffer:
def __init__(self, byte_length, extensions, extras, name, uri):
self.byte_length = byte_length
self.extensions = extensions
self.extras = extras
self.name = name
self.uri = uri
@staticmethod
def from_dict(obj):
assert isinstance(obj, dict)
byte_length = from_int(obj.get("byteLength"))
extensions = from_union([lambda x: from_dict(lambda x: from_dict(lambda x: x, x), x), from_none],
obj.get("extensions"))
extras = obj.get("extras")
name = from_union([from_str, from_none], obj.get("name"))
uri = from_union([from_str, from_none], obj.get("uri"))
return Buffer(byte_length, extensions, extras, name, uri)
def to_dict(self):
result = {}
result["byteLength"] = from_int(self.byte_length)
result["extensions"] = from_union([lambda x: from_dict(lambda x: from_dict(lambda x: x, x), x), from_none],
self.extensions)
result["extras"] = self.extras
result["name"] = from_union([from_str, from_none], self.name)
result["uri"] = from_union([from_str, from_none], self.uri)
return result
class CameraOrthographic:
def __init__(self, extensions, extras, xmag, ymag, zfar, znear):
self.extensions = extensions
self.extras = extras
self.xmag = xmag
self.ymag = ymag
self.zfar = zfar
self.znear = znear
@staticmethod
def from_dict(obj):
assert isinstance(obj, dict)
extensions = from_union([lambda x: from_dict(lambda x: from_dict(lambda x: x, x), x), from_none],
obj.get("extensions"))
extras = obj.get("extras")
xmag = from_float(obj.get("xmag"))
ymag = from_float(obj.get("ymag"))
zfar = from_float(obj.get("zfar"))
znear = from_float(obj.get("znear"))
return CameraOrthographic(extensions, extras, xmag, ymag, zfar, znear)
def to_dict(self):
result = {}
result["extensions"] = from_union([lambda x: from_dict(lambda x: from_dict(lambda x: x, x), x), from_none],
self.extensions)
result["extras"] = self.extras
result["xmag"] = to_float(self.xmag)
result["ymag"] = to_float(self.ymag)
result["zfar"] = to_float(self.zfar)
result["znear"] = to_float(self.znear)
return result
class CameraPerspective:
def __init__(self, aspect_ratio, extensions, extras, yfov, zfar, znear):
self.aspect_ratio = aspect_ratio
self.extensions = extensions
self.extras = extras
self.yfov = yfov
self.zfar = zfar
self.znear = znear
@staticmethod
def from_dict(obj):
assert isinstance(obj, dict)
aspect_ratio = from_union([from_float, from_none], obj.get("aspectRatio"))
extensions = from_union([lambda x: from_dict(lambda x: from_dict(lambda x: x, x), x), from_none],
obj.get("extensions"))
extras = obj.get("extras")
yfov = from_float(obj.get("yfov"))
zfar = from_union([from_float, from_none], obj.get("zfar"))
znear = from_float(obj.get("znear"))
return CameraPerspective(aspect_ratio, extensions, extras, yfov, zfar, znear)
def to_dict(self):
result = {}
result["aspectRatio"] = from_union([to_float, from_none], self.aspect_ratio)
result["extensions"] = from_union([lambda x: from_dict(lambda x: from_dict(lambda x: x, x), x), from_none],
self.extensions)
result["extras"] = self.extras
result["yfov"] = to_float(self.yfov)
result["zfar"] = from_union([to_float, from_none], self.zfar)
result["znear"] = to_float(self.znear)
return result
class Camera:
def __init__(self, extensions, extras, name, orthographic, perspective, type):
self.extensions = extensions
self.extras = extras
self.name = name
self.orthographic = orthographic
self.perspective = perspective
self.type = type
@staticmethod
def from_dict(obj):
assert isinstance(obj, dict)
extensions = from_union([lambda x: from_dict(lambda x: from_dict(lambda x: x, x), x), from_none],
obj.get("extensions"))
extras = obj.get("extras")
name = from_union([from_str, from_none], obj.get("name"))
orthographic = from_union([CameraOrthographic.from_dict, from_none], obj.get("orthographic"))
perspective = from_union([CameraPerspective.from_dict, from_none], obj.get("perspective"))
type = from_str(obj.get("type"))
return Camera(extensions, extras, name, orthographic, perspective, type)
def to_dict(self):
result = {}
result["extensions"] = from_union([lambda x: from_dict(lambda x: from_dict(lambda x: x, x), x), from_none],
self.extensions)
result["extras"] = self.extras
result["name"] = from_union([from_str, from_none], self.name)
result["orthographic"] = from_union([lambda x: to_class(CameraOrthographic, x), from_none], self.orthographic)
result["perspective"] = from_union([lambda x: to_class(CameraPerspective, x), from_none], self.perspective)
result["type"] = from_str(self.type)
return result
class Image:
def __init__(self, buffer_view, extensions, extras, mime_type, name, uri):
self.buffer_view = buffer_view
self.extensions = extensions
self.extras = extras
self.mime_type = mime_type
self.name = name
self.uri = uri
@staticmethod
def from_dict(obj):
assert isinstance(obj, dict)
buffer_view = from_union([from_int, from_none], obj.get("bufferView"))
extensions = from_union([lambda x: from_dict(lambda x: from_dict(lambda x: x, x), x), from_none],
obj.get("extensions"))
extras = obj.get("extras")
mime_type = from_union([from_str, from_none], obj.get("mimeType"))
name = from_union([from_str, from_none], obj.get("name"))
uri = from_union([from_str, from_none], obj.get("uri"))
return Image(buffer_view, extensions, extras, mime_type, name, uri)
def to_dict(self):
result = {}
result["bufferView"] = from_union([from_int, from_none], self.buffer_view)
result["extensions"] = from_union([lambda x: from_dict(lambda x: from_dict(lambda x: x, x), x), from_none],
self.extensions)
result["extras"] = self.extras
result["mimeType"] = from_union([from_str, from_none], self.mime_type)
result["name"] = from_union([from_str, from_none], self.name)
result["uri"] = from_union([from_str, from_none], self.uri)
return result
class TextureInfo:
def __init__(self, extensions, extras, index, tex_coord):
self.extensions = extensions
self.extras = extras
self.index = index
self.tex_coord = tex_coord
@staticmethod
def from_dict(obj):
assert isinstance(obj, dict)
extensions = from_union([lambda x: from_dict(lambda x: from_dict(lambda x: x, x), x), from_none],
obj.get("extensions"))
extras = obj.get("extras")
index = from_int(obj.get("index"))
tex_coord = from_union([from_int, from_none], obj.get("texCoord"))
return TextureInfo(extensions, extras, index, tex_coord)
def to_dict(self):
result = {}
result["extensions"] = from_union([lambda x: from_dict(lambda x: from_dict(lambda x: x, x), x), from_none],
self.extensions)
result["extras"] = self.extras
result["index"] = from_int(self.index)
result["texCoord"] = from_union([from_int, from_none], self.tex_coord)
return result
class MaterialNormalTextureInfoClass:
def __init__(self, extensions, extras, index, scale, tex_coord):
self.extensions = extensions
self.extras = extras
self.index = index
self.scale = scale
self.tex_coord = tex_coord
@staticmethod
def from_dict(obj):
assert isinstance(obj, dict)
extensions = from_union([lambda x: from_dict(lambda x: from_dict(lambda x: x, x), x), from_none],
obj.get("extensions"))
extras = obj.get("extras")
index = from_int(obj.get("index"))
scale = from_union([from_float, from_none], obj.get("scale"))
tex_coord = from_union([from_int, from_none], obj.get("texCoord"))
return MaterialNormalTextureInfoClass(extensions, extras, index, scale, tex_coord)
def to_dict(self):
result = {}
result["extensions"] = from_union([lambda x: from_dict(lambda x: from_dict(lambda x: x, x), x), from_none],
self.extensions)
result["extras"] = self.extras
result["index"] = from_int(self.index)
result["scale"] = from_union([to_float, from_none], self.scale)
result["texCoord"] = from_union([from_int, from_none], self.tex_coord)
return result
class MaterialOcclusionTextureInfoClass:
def __init__(self, extensions, extras, index, strength, tex_coord):
self.extensions = extensions
self.extras = extras
self.index = index
self.strength = strength
self.tex_coord = tex_coord
@staticmethod
def from_dict(obj):
assert isinstance(obj, dict)
extensions = from_union([lambda x: from_dict(lambda x: from_dict(lambda x: x, x), x), from_none],
obj.get("extensions"))
extras = obj.get("extras")
index = from_int(obj.get("index"))
strength = from_union([from_float, from_none], obj.get("strength"))
tex_coord = from_union([from_int, from_none], obj.get("texCoord"))
return MaterialOcclusionTextureInfoClass(extensions, extras, index, strength, tex_coord)
def to_dict(self):
result = {}
result["extensions"] = from_union([lambda x: from_dict(lambda x: from_dict(lambda x: x, x), x), from_none],
self.extensions)
result["extras"] = self.extras
result["index"] = from_int(self.index)
result["strength"] = from_union([to_float, from_none], self.strength)
result["texCoord"] = from_union([from_int, from_none], self.tex_coord)
return result
class MaterialPBRMetallicRoughness:
def __init__(self, base_color_factor, base_color_texture, extensions, extras, metallic_factor,
metallic_roughness_texture, roughness_factor):
self.base_color_factor = base_color_factor
self.base_color_texture = base_color_texture
self.extensions = extensions
self.extras = extras
self.metallic_factor = metallic_factor
self.metallic_roughness_texture = metallic_roughness_texture
self.roughness_factor = roughness_factor
@staticmethod
def from_dict(obj):
assert isinstance(obj, dict)
base_color_factor = from_union([lambda x: from_list(from_float, x), from_none], obj.get("baseColorFactor"))
base_color_texture = from_union([TextureInfo.from_dict, from_none], obj.get("baseColorTexture"))
extensions = from_union([lambda x: from_dict(lambda x: from_dict(lambda x: x, x), x), from_none],
obj.get("extensions"))
extras = obj.get("extras")
metallic_factor = from_union([from_float, from_none], obj.get("metallicFactor"))
metallic_roughness_texture = from_union([TextureInfo.from_dict, from_none], obj.get("metallicRoughnessTexture"))
roughness_factor = from_union([from_float, from_none], obj.get("roughnessFactor"))
return MaterialPBRMetallicRoughness(base_color_factor, base_color_texture, extensions, extras, metallic_factor,
metallic_roughness_texture, roughness_factor)
def to_dict(self):
result = {}
result["baseColorFactor"] = from_union([lambda x: from_list(to_float, x), from_none], self.base_color_factor)
result["baseColorTexture"] = from_union([lambda x: to_class(TextureInfo, x), from_none],
self.base_color_texture)
result["extensions"] = from_union([lambda x: from_dict(lambda x: from_dict(lambda x: x, x), x), from_none],
self.extensions)
result["extras"] = self.extras
result["metallicFactor"] = from_union([to_float, from_none], self.metallic_factor)
result["metallicRoughnessTexture"] = from_union([lambda x: to_class(TextureInfo, x), from_none],
self.metallic_roughness_texture)
result["roughnessFactor"] = from_union([to_float, from_none], self.roughness_factor)
return result
class Material:
def __init__(self, alpha_cutoff, alpha_mode, double_sided, emissive_factor, emissive_texture, extensions, extras,
name, normal_texture, occlusion_texture, pbr_metallic_roughness):
self.alpha_cutoff = alpha_cutoff
self.alpha_mode = alpha_mode
self.double_sided = double_sided
self.emissive_factor = emissive_factor
self.emissive_texture = emissive_texture
self.extensions = extensions
self.extras = extras
self.name = name
self.normal_texture = normal_texture
self.occlusion_texture = occlusion_texture
self.pbr_metallic_roughness = pbr_metallic_roughness
@staticmethod
def from_dict(obj):
assert isinstance(obj, dict)
alpha_cutoff = from_union([from_float, from_none], obj.get("alphaCutoff"))
alpha_mode = from_union([from_str, from_none], obj.get("alphaMode"))
double_sided = from_union([from_bool, from_none], obj.get("doubleSided"))
emissive_factor = from_union([lambda x: from_list(from_float, x), from_none], obj.get("emissiveFactor"))
emissive_texture = from_union([TextureInfo.from_dict, from_none], obj.get("emissiveTexture"))
extensions = from_union([lambda x: from_dict(lambda x: from_dict(lambda x: x, x), x), from_none],
obj.get("extensions"))
extras = obj.get("extras")
name = from_union([from_str, from_none], obj.get("name"))
normal_texture = from_union([MaterialNormalTextureInfoClass.from_dict, from_none], obj.get("normalTexture"))
occlusion_texture = from_union([MaterialOcclusionTextureInfoClass.from_dict, from_none],
obj.get("occlusionTexture"))
pbr_metallic_roughness = from_union([MaterialPBRMetallicRoughness.from_dict, from_none],
obj.get("pbrMetallicRoughness"))
return Material(alpha_cutoff, alpha_mode, double_sided, emissive_factor, emissive_texture, extensions, extras,
name, normal_texture, occlusion_texture, pbr_metallic_roughness)
def to_dict(self):
result = {}
result["alphaCutoff"] = from_union([to_float, from_none], self.alpha_cutoff)
result["alphaMode"] = from_union([from_str, from_none], self.alpha_mode)
result["doubleSided"] = from_union([from_bool, from_none], self.double_sided)
result["emissiveFactor"] = from_union([lambda x: from_list(to_float, x), from_none], self.emissive_factor)
result["emissiveTexture"] = from_union([lambda x: to_class(TextureInfo, x), from_none], self.emissive_texture)
result["extensions"] = from_union([lambda x: from_dict(lambda x: from_dict(lambda x: x, x), x), from_none],
self.extensions)
result["extras"] = self.extras
result["name"] = from_union([from_str, from_none], self.name)
result["normalTexture"] = from_union([lambda x: to_class(MaterialNormalTextureInfoClass, x), from_none],
self.normal_texture)
result["occlusionTexture"] = from_union([lambda x: to_class(MaterialOcclusionTextureInfoClass, x), from_none],
self.occlusion_texture)
result["pbrMetallicRoughness"] = from_union([lambda x: to_class(MaterialPBRMetallicRoughness, x), from_none],
self.pbr_metallic_roughness)
return result
class MeshPrimitive:
def __init__(self, attributes, extensions, extras, indices, material, mode, targets):
self.attributes = attributes
self.extensions = extensions
self.extras = extras
self.indices = indices
self.material = material
self.mode = mode
self.targets = targets
@staticmethod
def from_dict(obj):
assert isinstance(obj, dict)
attributes = from_dict(from_int, obj.get("attributes"))
extensions = from_union([lambda x: from_dict(lambda x: from_dict(lambda x: x, x), x), from_none],
obj.get("extensions"))
extras = obj.get("extras")
indices = from_union([from_int, from_none], obj.get("indices"))
material = from_union([from_int, from_none], obj.get("material"))
mode = from_union([from_int, from_none], obj.get("mode"))
targets = from_union([lambda x: from_list(lambda x: from_dict(from_int, x), x), from_none], obj.get("targets"))
return MeshPrimitive(attributes, extensions, extras, indices, material, mode, targets)
def to_dict(self):
result = {}
result["attributes"] = from_dict(from_int, self.attributes)
result["extensions"] = from_union([lambda x: from_dict(lambda x: from_dict(lambda x: x, x), x), from_none],
self.extensions)
result["extras"] = self.extras
result["indices"] = from_union([from_int, from_none], self.indices)
result["material"] = from_union([from_int, from_none], self.material)
result["mode"] = from_union([from_int, from_none], self.mode)
result["targets"] = from_union([lambda x: from_list(lambda x: from_dict(from_int, x), x), from_none],
self.targets)
return result
class Mesh:
def __init__(self, extensions, extras, name, primitives, weights):
self.extensions = extensions
self.extras = extras
self.name = name
self.primitives = primitives
self.weights = weights
@staticmethod
def from_dict(obj):
assert isinstance(obj, dict)
extensions = from_union([lambda x: from_dict(lambda x: from_dict(lambda x: x, x), x), from_none],
obj.get("extensions"))
extras = obj.get("extras")
name = from_union([from_str, from_none], obj.get("name"))
primitives = from_list(MeshPrimitive.from_dict, obj.get("primitives"))
weights = from_union([lambda x: from_list(from_float, x), from_none], obj.get("weights"))
return Mesh(extensions, extras, name, primitives, weights)
def to_dict(self):
result = {}
result["extensions"] = from_union([lambda x: from_dict(lambda x: from_dict(lambda x: x, x), x), from_none],
self.extensions)
result["extras"] = self.extras
result["name"] = from_union([from_str, from_none], self.name)
result["primitives"] = from_list(lambda x: to_class(MeshPrimitive, x), self.primitives)
result["weights"] = from_union([lambda x: from_list(to_float, x), from_none], self.weights)
return result
class Node:
def __init__(self, camera, children, extensions, extras, matrix, mesh, name, rotation, scale, skin, translation,
weights):
self.camera = camera
self.children = children
self.extensions = extensions
self.extras = extras
self.matrix = matrix
self.mesh = mesh
self.name = name
self.rotation = rotation
self.scale = scale
self.skin = skin
self.translation = translation
self.weights = weights
@staticmethod
def from_dict(obj):
assert isinstance(obj, dict)
camera = from_union([from_int, from_none], obj.get("camera"))
children = from_union([lambda x: from_list(from_int, x), from_none], obj.get("children"))
extensions = from_union([lambda x: from_dict(lambda x: from_dict(lambda x: x, x), x), from_none],
obj.get("extensions"))
extras = obj.get("extras")
matrix = from_union([lambda x: from_list(from_float, x), from_none], obj.get("matrix"))
mesh = from_union([from_int, from_none], obj.get("mesh"))
name = from_union([from_str, from_none], obj.get("name"))
rotation = from_union([lambda x: from_list(from_float, x), from_none], obj.get("rotation"))
scale = from_union([lambda x: from_list(from_float, x), from_none], obj.get("scale"))
skin = from_union([from_int, from_none], obj.get("skin"))
translation = from_union([lambda x: from_list(from_float, x), from_none], obj.get("translation"))
weights = from_union([lambda x: from_list(from_float, x), from_none], obj.get("weights"))
return Node(camera, children, extensions, extras, matrix, mesh, name, rotation, scale, skin, translation,
weights)
def to_dict(self):
result = {}
result["camera"] = from_union([from_int, from_none], self.camera)
result["children"] = from_union([lambda x: from_list(from_int, x), from_none], self.children)
result["extensions"] = from_union([lambda x: from_dict(lambda x: from_dict(lambda x: x, x), x), from_none],
self.extensions)
result["extras"] = self.extras
result["matrix"] = from_union([lambda x: from_list(to_float, x), from_none], self.matrix)
result["mesh"] = from_union([from_int, from_none], self.mesh)
result["name"] = from_union([from_str, from_none], self.name)
result["rotation"] = from_union([lambda x: from_list(to_float, x), from_none], self.rotation)
result["scale"] = from_union([lambda x: from_list(to_float, x), from_none], self.scale)
result["skin"] = from_union([from_int, from_none], self.skin)
result["translation"] = from_union([lambda x: from_list(to_float, x), from_none], self.translation)
result["weights"] = from_union([lambda x: from_list(to_float, x), from_none], self.weights)
return result
class Sampler:
def __init__(self, extensions, extras, mag_filter, min_filter, name, wrap_s, wrap_t):
self.extensions = extensions
self.extras = extras
self.mag_filter = mag_filter
self.min_filter = min_filter
self.name = name
self.wrap_s = wrap_s
self.wrap_t = wrap_t
@staticmethod
def from_dict(obj):
assert isinstance(obj, dict)
extensions = from_union([lambda x: from_dict(lambda x: from_dict(lambda x: x, x), x), from_none],
obj.get("extensions"))
extras = obj.get("extras")
mag_filter = from_union([from_int, from_none], obj.get("magFilter"))
min_filter = from_union([from_int, from_none], obj.get("minFilter"))
name = from_union([from_str, from_none], obj.get("name"))
wrap_s = from_union([from_int, from_none], obj.get("wrapS"))
wrap_t = from_union([from_int, from_none], obj.get("wrapT"))
return Sampler(extensions, extras, mag_filter, min_filter, name, wrap_s, wrap_t)
def to_dict(self):
result = {}
result["extensions"] = from_union([lambda x: from_dict(lambda x: from_dict(lambda x: x, x), x), from_none],
self.extensions)
result["extras"] = self.extras
result["magFilter"] = from_union([from_int, from_none], self.mag_filter)
result["minFilter"] = from_union([from_int, from_none], self.min_filter)
result["name"] = from_union([from_str, from_none], self.name)
result["wrapS"] = from_union([from_int, from_none], self.wrap_s)
result["wrapT"] = from_union([from_int, from_none], self.wrap_t)
return result
class Scene:
def __init__(self, extensions, extras, name, nodes):
self.extensions = extensions
self.extras = extras
self.name = name
self.nodes = nodes
@staticmethod
def from_dict(obj):
assert isinstance(obj, dict)
extensions = from_union([lambda x: from_dict(lambda x: from_dict(lambda x: x, x), x), from_none],
obj.get("extensions"))
extras = obj.get("extras")
name = from_union([from_str, from_none], obj.get("name"))
nodes = from_union([lambda x: from_list(from_int, x), from_none], obj.get("nodes"))
return Scene(extensions, extras, name, nodes)
def to_dict(self):
result = {}
result["extensions"] = from_union([lambda x: from_dict(lambda x: from_dict(lambda x: x, x), x), from_none],
self.extensions)
result["extras"] = self.extras
result["name"] = from_union([from_str, from_none], self.name)
result["nodes"] = from_union([lambda x: from_list(from_int, x), from_none], self.nodes)
return result
class Skin:
def __init__(self, extensions, extras, inverse_bind_matrices, joints, name, skeleton):
self.extensions = extensions
self.extras = extras
self.inverse_bind_matrices = inverse_bind_matrices
self.joints = joints
self.name = name
self.skeleton = skeleton
@staticmethod
def from_dict(obj):
assert isinstance(obj, dict)
extensions = from_union([lambda x: from_dict(lambda x: from_dict(lambda x: x, x), x), from_none],
obj.get("extensions"))
extras = obj.get("extras")
inverse_bind_matrices = from_union([from_int, from_none], obj.get("inverseBindMatrices"))
joints = from_list(from_int, obj.get("joints"))
name = from_union([from_str, from_none], obj.get("name"))
skeleton = from_union([from_int, from_none], obj.get("skeleton"))
return Skin(extensions, extras, inverse_bind_matrices, joints, name, skeleton)
def to_dict(self):
result = {}
result["extensions"] = from_union([lambda x: from_dict(lambda x: from_dict(lambda x: x, x), x), from_none],
self.extensions)
result["extras"] = self.extras
result["inverseBindMatrices"] = from_union([from_int, from_none], self.inverse_bind_matrices)
result["joints"] = from_list(from_int, self.joints)
result["name"] = from_union([from_str, from_none], self.name)
result["skeleton"] = from_union([from_int, from_none], self.skeleton)
return result
class Texture:
def __init__(self, extensions, extras, name, sampler, source):
self.extensions = extensions
self.extras = extras
self.name = name
self.sampler = sampler
self.source = source
@staticmethod
def from_dict(obj):
assert isinstance(obj, dict)
extensions = from_union([lambda x: from_dict(lambda x: from_dict(lambda x: x, x), x), from_none],
obj.get("extensions"))
extras = obj.get("extras")
name = from_union([from_str, from_none], obj.get("name"))
sampler = from_union([from_int, from_none], obj.get("sampler"))
source = from_int(obj.get("source"))
return Texture(extensions, extras, name, sampler, source)
def to_dict(self):
result = {}
result["extensions"] = from_union([lambda x: from_dict(lambda x: from_dict(lambda x: x, x), x), from_none],
self.extensions)
result["extras"] = self.extras
result["name"] = from_union([from_str, from_none], self.name)
result["sampler"] = from_union([from_int, from_none], self.sampler)
result["source"] = from_int(self.source)
return result
class Gltf:
def __init__(self, accessors, animations, asset, buffers, buffer_views, cameras, extensions, extensions_required,
extensions_used, extras, images, materials, meshes, nodes, samplers, scene, scenes, skins, textures):
self.accessors = accessors
self.animations = animations
self.asset = asset
self.buffers = buffers
self.buffer_views = buffer_views
self.cameras = cameras
self.extensions = extensions
self.extensions_required = extensions_required
self.extensions_used = extensions_used
self.extras = extras
self.images = images
self.materials = materials
self.meshes = meshes
self.nodes = nodes
self.samplers = samplers
self.scene = scene
self.scenes = scenes
self.skins = skins
self.textures = textures
@staticmethod
def from_dict(obj):
assert isinstance(obj, dict)
accessors = from_union([lambda x: from_list(Accessor.from_dict, x), from_none], obj.get("accessors"))
animations = from_union([lambda x: from_list(Animation.from_dict, x), from_none], obj.get("animations"))
asset = Asset.from_dict(obj.get("asset"))
buffers = from_union([lambda x: from_list(Buffer.from_dict, x), from_none], obj.get("buffers"))
buffer_views = from_union([lambda x: from_list(BufferView.from_dict, x), from_none], obj.get("bufferViews"))
cameras = from_union([lambda x: from_list(Camera.from_dict, x), from_none], obj.get("cameras"))
extensions = from_union([lambda x: from_dict(lambda x: from_dict(lambda x: x, x), x), from_none],
obj.get("extensions"))
extensions_required = from_union([lambda x: from_list(from_str, x), from_none], obj.get("extensionsRequired"))
extensions_used = from_union([lambda x: from_list(from_str, x), from_none], obj.get("extensionsUsed"))
extras = obj.get("extras")
images = from_union([lambda x: from_list(Image.from_dict, x), from_none], obj.get("images"))
materials = from_union([lambda x: from_list(Material.from_dict, x), from_none], obj.get("materials"))
meshes = from_union([lambda x: from_list(Mesh.from_dict, x), from_none], obj.get("meshes"))
nodes = from_union([lambda x: from_list(Node.from_dict, x), from_none], obj.get("nodes"))
samplers = from_union([lambda x: from_list(Sampler.from_dict, x), from_none], obj.get("samplers"))
scene = from_union([from_int, from_none], obj.get("scene"))
scenes = from_union([lambda x: from_list(Scene.from_dict, x), from_none], obj.get("scenes"))
skins = from_union([lambda x: from_list(Skin.from_dict, x), from_none], obj.get("skins"))
textures = from_union([lambda x: from_list(Texture.from_dict, x), from_none], obj.get("textures"))
return Gltf(accessors, animations, asset, buffers, buffer_views, cameras, extensions, extensions_required,
extensions_used, extras, images, materials, meshes, nodes, samplers, scene, scenes, skins, textures)
def to_dict(self):
result = {}
result["accessors"] = from_union([lambda x: from_list(lambda x: to_class(Accessor, x), x), from_none],
self.accessors)
result["animations"] = from_union([lambda x: from_list(lambda x: to_class(Animation, x), x), from_none],
self.animations)
result["asset"] = to_class(Asset, self.asset)
result["buffers"] = from_union([lambda x: from_list(lambda x: to_class(Buffer, x), x), from_none], self.buffers)
result["bufferViews"] = from_union([lambda x: from_list(lambda x: to_class(BufferView, x), x), from_none],
self.buffer_views)
result["cameras"] = from_union([lambda x: from_list(lambda x: to_class(Camera, x), x), from_none], self.cameras)
result["extensions"] = from_union([lambda x: from_dict(lambda x: from_dict(lambda x: x, x), x), from_none],
self.extensions)
result["extensionsRequired"] = from_union([lambda x: from_list(from_str, x), from_none],
self.extensions_required)
result["extensionsUsed"] = from_union([lambda x: from_list(from_str, x), from_none], self.extensions_used)
result["extras"] = self.extras
result["images"] = from_union([lambda x: from_list(lambda x: to_class(Image, x), x), from_none], self.images)
result["materials"] = from_union([lambda x: from_list(lambda x: to_class(Material, x), x), from_none],
self.materials)
result["meshes"] = from_union([lambda x: from_list(lambda x: to_class(Mesh, x), x), from_none], self.meshes)
result["nodes"] = from_union([lambda x: from_list(lambda x: to_class(Node, x), x), from_none], self.nodes)
result["samplers"] = from_union([lambda x: from_list(lambda x: to_class(Sampler, x), x), from_none],
self.samplers)
result["scene"] = from_union([from_int, from_none], self.scene)
result["scenes"] = from_union([lambda x: from_list(lambda x: to_class(Scene, x), x), from_none], self.scenes)
result["skins"] = from_union([lambda x: from_list(lambda x: to_class(Skin, x), x), from_none], self.skins)
result["textures"] = from_union([lambda x: from_list(lambda x: to_class(Texture, x), x), from_none],
self.textures)
return result
def gltf_from_dict(s):
return Gltf.from_dict(s)
def gltf_to_dict(x):
return to_class(Gltf, x)
| true
| true
|
790ab56e397cbd847361a9ed6c5e792aaeed171e
| 3,047
|
py
|
Python
|
vehicle_tracker/visual.py
|
mondrasovic/multi_object_tracking_demo
|
d4ec6af4e3bca9d47628358967a05890071407ee
|
[
"MIT"
] | null | null | null |
vehicle_tracker/visual.py
|
mondrasovic/multi_object_tracking_demo
|
d4ec6af4e3bca9d47628358967a05890071407ee
|
[
"MIT"
] | null | null | null |
vehicle_tracker/visual.py
|
mondrasovic/multi_object_tracking_demo
|
d4ec6af4e3bca9d47628358967a05890071407ee
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Author: Milan Ondrasovic <milan.ondrasovic@gmail.com>
import random
from typing import Sequence, Tuple, Dict, cast
import cv2 as cv
import numpy as np
import matplotlib.pyplot as plt
from tracking import TrackedDetection
ColorT = Tuple[int, int, int]
PointT = Tuple[int, int]
def labeled_rectangle(
image: np.ndarray, start_point: PointT, end_point: PointT, label: str,
rect_color: ColorT, label_color: ColorT, alpha: float = 0.85):
(x1, y1), (x2, y2) = start_point, end_point
roi = image[y1:y2, x1:x2]
rect = np.ones_like(roi) * 255
image[y1:y2, x1:x2] = cv.addWeighted(roi, alpha, rect, 1 - alpha, 0)
font_face = cv.FONT_HERSHEY_COMPLEX_SMALL
font_scale = 1
font_thickness = 3
(text_width, text_height), baseline = cv.getTextSize(
label, font_face, font_scale, font_thickness)
text_rect_end = (
start_point[0] + text_width, start_point[1] + text_height + baseline)
cv.rectangle(image, start_point, text_rect_end, rect_color, -1)
# TODO Somehow calculate the shift.
text_start_point = (start_point[0] + 1, start_point[1] + text_height + 3)
cv.putText(
image, label, text_start_point, font_face, font_scale, label_color,
font_thickness, cv.LINE_AA)
cv.putText(
image, label, text_start_point, font_face, font_scale, (255, 255, 255),
max(1, font_thickness - 2), cv.LINE_AA)
cv.rectangle(image, start_point, end_point, rect_color, 2, cv.LINE_AA)
class TrackingVisualizer:
def __init__(self, n_colors: int) -> None:
assert n_colors > 0
self.colors: Sequence[ColorT] = self.init_colors(n_colors, True)
self.track_color: Dict[int, ColorT] = {}
def draw_tracks(
self, image: np.ndarray,
tracks: Sequence[TrackedDetection]) -> None:
for track in tracks:
text = str(track.track_id)
text_color = self._get_text_color()
annotation_color = self._get_annotation_color(track)
labeled_rectangle(
image, track.box.top_left, track.box.bottom_right, text,
annotation_color, text_color)
def _get_text_color(self) -> ColorT:
return (16, 16, 16)
def _get_annotation_color(self, track: TrackedDetection) -> ColorT:
color = self.track_color.get(track.track_id)
if color is not None:
return color
color_pos = len(self.track_color) % len(self.colors)
color = self.colors[color_pos]
self.track_color[track.track_id] = color
return cast(ColorT, color)
@staticmethod
def init_colors(n_colors: int, randomize: bool = False) -> Sequence[ColorT]:
color_map = plt.cm.get_cmap('Spectral', n_colors)
colors = [
tuple(int(round(c * 255)) for c in color_map(i)[:3])
for i in range(n_colors)]
if randomize:
random.shuffle(colors)
return cast(Sequence[ColorT], colors)
| 34.235955
| 80
| 0.644568
|
import random
from typing import Sequence, Tuple, Dict, cast
import cv2 as cv
import numpy as np
import matplotlib.pyplot as plt
from tracking import TrackedDetection
ColorT = Tuple[int, int, int]
PointT = Tuple[int, int]
def labeled_rectangle(
image: np.ndarray, start_point: PointT, end_point: PointT, label: str,
rect_color: ColorT, label_color: ColorT, alpha: float = 0.85):
(x1, y1), (x2, y2) = start_point, end_point
roi = image[y1:y2, x1:x2]
rect = np.ones_like(roi) * 255
image[y1:y2, x1:x2] = cv.addWeighted(roi, alpha, rect, 1 - alpha, 0)
font_face = cv.FONT_HERSHEY_COMPLEX_SMALL
font_scale = 1
font_thickness = 3
(text_width, text_height), baseline = cv.getTextSize(
label, font_face, font_scale, font_thickness)
text_rect_end = (
start_point[0] + text_width, start_point[1] + text_height + baseline)
cv.rectangle(image, start_point, text_rect_end, rect_color, -1)
text_start_point = (start_point[0] + 1, start_point[1] + text_height + 3)
cv.putText(
image, label, text_start_point, font_face, font_scale, label_color,
font_thickness, cv.LINE_AA)
cv.putText(
image, label, text_start_point, font_face, font_scale, (255, 255, 255),
max(1, font_thickness - 2), cv.LINE_AA)
cv.rectangle(image, start_point, end_point, rect_color, 2, cv.LINE_AA)
class TrackingVisualizer:
def __init__(self, n_colors: int) -> None:
assert n_colors > 0
self.colors: Sequence[ColorT] = self.init_colors(n_colors, True)
self.track_color: Dict[int, ColorT] = {}
def draw_tracks(
self, image: np.ndarray,
tracks: Sequence[TrackedDetection]) -> None:
for track in tracks:
text = str(track.track_id)
text_color = self._get_text_color()
annotation_color = self._get_annotation_color(track)
labeled_rectangle(
image, track.box.top_left, track.box.bottom_right, text,
annotation_color, text_color)
def _get_text_color(self) -> ColorT:
return (16, 16, 16)
def _get_annotation_color(self, track: TrackedDetection) -> ColorT:
color = self.track_color.get(track.track_id)
if color is not None:
return color
color_pos = len(self.track_color) % len(self.colors)
color = self.colors[color_pos]
self.track_color[track.track_id] = color
return cast(ColorT, color)
@staticmethod
def init_colors(n_colors: int, randomize: bool = False) -> Sequence[ColorT]:
color_map = plt.cm.get_cmap('Spectral', n_colors)
colors = [
tuple(int(round(c * 255)) for c in color_map(i)[:3])
for i in range(n_colors)]
if randomize:
random.shuffle(colors)
return cast(Sequence[ColorT], colors)
| true
| true
|
790ab5f854de84d04e04ba888f76a7c8528ef2a9
| 12,970
|
py
|
Python
|
preliminary_contest/nezha_pretrain/tokenization.py
|
YihaoChan/2021-Tianchi-GAIIC-Track1-Rank-3
|
a79a8ae4bc0f8b2662f71df4caaa7fa382735f9f
|
[
"Apache-2.0"
] | 22
|
2021-06-04T13:01:08.000Z
|
2022-02-18T13:19:46.000Z
|
preliminary_contest/nezha_pretrain/tokenization.py
|
YihaoChan/2021-Tianchi-GAIIC-Track1-Rank-3
|
a79a8ae4bc0f8b2662f71df4caaa7fa382735f9f
|
[
"Apache-2.0"
] | null | null | null |
preliminary_contest/nezha_pretrain/tokenization.py
|
YihaoChan/2021-Tianchi-GAIIC-Track1-Rank-3
|
a79a8ae4bc0f8b2662f71df4caaa7fa382735f9f
|
[
"Apache-2.0"
] | 2
|
2021-06-06T09:41:08.000Z
|
2021-06-09T01:05:10.000Z
|
# coding=utf-8
"""Tokenization classes."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import unicodedata
import six
import tensorflow as tf
import re
import warnings
warnings.filterwarnings('ignore')
def validate_case_matches_checkpoint(do_lower_case, init_checkpoint):
"""Checks whether the casing config is consistent with the checkpoint name."""
# The casing has to be passed in by the user and there is no explicit check
# as to whether it matches the checkpoint. The casing information probably
# should have been stored in the bert_config.json file, but it's not, so
# we have to heuristically detect it to validate.
if not init_checkpoint:
return
m = re.match("^.*?([A-Za-z0-9_-]+)/bert_model.ckpt", init_checkpoint)
if m is None:
return
model_name = m.group(1)
lower_models = [
"uncased_L-24_H-1024_A-16", "uncased_L-12_H-768_A-12",
"multilingual_L-12_H-768_A-12", "chinese_L-12_H-768_A-12"
]
cased_models = [
"cased_L-12_H-768_A-12", "cased_L-24_H-1024_A-16",
"multi_cased_L-12_H-768_A-12"
]
is_bad_config = False
if model_name in lower_models and not do_lower_case:
is_bad_config = True
actual_flag = "False"
case_name = "lowercased"
opposite_flag = "True"
if model_name in cased_models and do_lower_case:
is_bad_config = True
actual_flag = "True"
case_name = "cased"
opposite_flag = "False"
if is_bad_config:
raise ValueError(
"You passed in `--do_lower_case=%s` with `--init_checkpoint=%s`. "
"However, `%s` seems to be a %s model, so you "
"should pass in `--do_lower_case=%s` so that the fine-tuning matches "
"how the model was pre-training. If this error is wrong, please "
"just comment out this check." % (actual_flag, init_checkpoint,
model_name, case_name, opposite_flag))
def convert_to_unicode(text):
"""Converts `text` to Unicode (if it's not already), assuming utf-8 input."""
if six.PY3:
if isinstance(text, str):
return text
elif isinstance(text, bytes):
return text.decode("utf-8", "ignore")
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
elif six.PY2:
if isinstance(text, str):
return text.decode("utf-8", "ignore")
elif isinstance(text, unicode):
return text
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
else:
raise ValueError("Not running on Python2 or Python 3?")
def printable_text(text):
"""Returns text encoded in a way suitable for print or `tf.logging`."""
# These functions want `str` for both Python2 and Python3, but in one case
# it's a Unicode string and in the other it's a byte string.
if six.PY3:
if isinstance(text, str):
return text
elif isinstance(text, bytes):
return text.decode("utf-8", "ignore")
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
elif six.PY2:
if isinstance(text, str):
return text
elif isinstance(text, unicode):
return text.encode("utf-8")
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
else:
raise ValueError("Not running on Python2 or Python 3?")
def load_vocab(vocab_file):
"""Loads a vocabulary file into a dictionary."""
vocab = collections.OrderedDict()
index = 0
with tf.gfile.GFile(vocab_file, "r") as reader:
while True:
token = convert_to_unicode(reader.readline())
if not token:
break
token = token.strip()
vocab[token] = index
index += 1
return vocab
def convert_by_vocab(vocab, items):
"""Converts a sequence of [tokens|ids] using the vocab."""
output = []
for item in items:
output.append(vocab[item])
return output
def convert_tokens_to_ids(vocab, tokens):
return convert_by_vocab(vocab, tokens)
def convert_ids_to_tokens(inv_vocab, ids):
return convert_by_vocab(inv_vocab, ids)
def whitespace_tokenize(text):
"""Runs basic whitespace cleaning and splitting on a piece of text."""
text = text.strip()
if not text:
return []
tokens = text.split()
return tokens
class FullTokenizer(object):
"""Runs end-to-end tokenziation."""
def __init__(self, vocab_file, do_lower_case=True):
self.vocab = load_vocab(vocab_file)
self.inv_vocab = {v: k for k, v in self.vocab.items()}
self.basic_tokenizer = BasicTokenizer(do_lower_case=do_lower_case)
self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab)
def tokenize(self, text):
split_tokens = []
for token in self.basic_tokenizer.tokenize(text):
for sub_token in self.wordpiece_tokenizer.tokenize(token):
split_tokens.append(sub_token)
return split_tokens
def convert_tokens_to_ids(self, tokens):
return convert_by_vocab(self.vocab, tokens)
def convert_ids_to_tokens(self, ids):
return convert_by_vocab(self.inv_vocab, ids)
class BasicTokenizer(object):
"""Runs basic tokenization (punctuation splitting, lower casing, etc.)."""
def __init__(self, do_lower_case=True):
"""Constructs a BasicTokenizer.
Args:
do_lower_case: Whether to lower case the input.
"""
self.do_lower_case = do_lower_case
def tokenize(self, text):
"""Tokenizes a piece of text."""
text = convert_to_unicode(text)
text = self._clean_text(text)
# This was added on November 1st, 2018 for the multilingual and Chinese
# models. This is also applied to the English models now, but it doesn't
# matter since the English models were not trained on any Chinese data
# and generally don't have any Chinese data in them (there are Chinese
# characters in the vocabulary because Wikipedia does have some Chinese
# words in the English Wikipedia.).
text = self._tokenize_chinese_chars(text)
orig_tokens = whitespace_tokenize(text)
split_tokens = []
for token in orig_tokens:
if self.do_lower_case:
token = token.lower()
token = self._run_strip_accents(token)
split_tokens.extend(self._run_split_on_punc(token))
output_tokens = whitespace_tokenize(" ".join(split_tokens))
return output_tokens
def _run_strip_accents(self, text):
"""Strips accents from a piece of text."""
text = unicodedata.normalize("NFD", text)
output = []
for char in text:
cat = unicodedata.category(char)
if cat == "Mn":
continue
output.append(char)
return "".join(output)
def _run_split_on_punc(self, text):
"""Splits punctuation on a piece of text."""
chars = list(text)
i = 0
start_new_word = True
output = []
while i < len(chars):
char = chars[i]
if _is_punctuation(char):
output.append([char])
start_new_word = True
else:
if start_new_word:
output.append([])
start_new_word = False
output[-1].append(char)
i += 1
return ["".join(x) for x in output]
def _tokenize_chinese_chars(self, text):
"""Adds whitespace around any CJK character."""
output = []
for char in text:
cp = ord(char)
if self._is_chinese_char(cp):
output.append(" ")
output.append(char)
output.append(" ")
else:
output.append(char)
return "".join(output)
def _is_chinese_char(self, cp):
"""Checks whether CP is the codepoint of a CJK character."""
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if ((cp >= 0x4E00 and cp <= 0x9FFF) or #
(cp >= 0x3400 and cp <= 0x4DBF) or #
(cp >= 0x20000 and cp <= 0x2A6DF) or #
(cp >= 0x2A700 and cp <= 0x2B73F) or #
(cp >= 0x2B740 and cp <= 0x2B81F) or #
(cp >= 0x2B820 and cp <= 0x2CEAF) or
(cp >= 0xF900 and cp <= 0xFAFF) or #
(cp >= 0x2F800 and cp <= 0x2FA1F)): #
return True
return False
def _clean_text(self, text):
"""Performs invalid character removal and whitespace cleanup on text."""
output = []
for char in text:
cp = ord(char)
if cp == 0 or cp == 0xfffd or _is_control(char):
continue
if _is_whitespace(char):
output.append(" ")
else:
output.append(char)
return "".join(output)
class WordpieceTokenizer(object):
"""Runs WordPiece tokenziation."""
def __init__(self, vocab, unk_token="[UNK]", max_input_chars_per_word=200):
self.vocab = vocab
self.unk_token = unk_token
self.max_input_chars_per_word = max_input_chars_per_word
def tokenize(self, text):
"""Tokenizes a piece of text into its word pieces.
This uses a greedy longest-match-first algorithm to perform tokenization
using the given vocabulary.
For example:
input = "unaffable"
output = ["un", "##aff", "##able"]
Args:
text: A single token or whitespace separated tokens. This should have
already been passed through `BasicTokenizer.
Returns:
A list of wordpiece tokens.
"""
text = convert_to_unicode(text)
output_tokens = []
for token in whitespace_tokenize(text):
chars = list(token)
if len(chars) > self.max_input_chars_per_word:
output_tokens.append(self.unk_token)
continue
is_bad = False
start = 0
sub_tokens = []
while start < len(chars):
end = len(chars)
cur_substr = None
while start < end:
substr = "".join(chars[start:end])
if start > 0:
substr = "##" + substr
if substr in self.vocab:
cur_substr = substr
break
end -= 1
if cur_substr is None:
is_bad = True
break
sub_tokens.append(cur_substr)
start = end
if is_bad:
output_tokens.append(self.unk_token)
else:
output_tokens.extend(sub_tokens)
return output_tokens
def _is_whitespace(char):
"""Checks whether `chars` is a whitespace character."""
# \t, \n, and \r are technically contorl characters but we treat them
# as whitespace since they are generally considered as such.
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
cat = unicodedata.category(char)
if cat == "Zs":
return True
return False
def _is_control(char):
"""Checks whether `chars` is a control character."""
# These are technically control characters but we count them as whitespace
# characters.
if char == "\t" or char == "\n" or char == "\r":
return False
cat = unicodedata.category(char)
if cat in ("Cc", "Cf"):
return True
return False
def _is_punctuation(char):
"""Checks whether `chars` is a punctuation character."""
cp = ord(char)
# We treat all non-letter/number ASCII as punctuation.
# Characters such as "^", "$", and "`" are not in the Unicode
# Punctuation class but we treat them as punctuation anyways, for
# consistency.
if ((33 <= cp <= 47) or (58 <= cp <= 64) or
(91 <= cp <= 96) or (123 <= cp <= 126)):
return True
cat = unicodedata.category(char)
if cat.startswith("P"):
return True
return False
| 33.427835
| 84
| 0.590902
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import unicodedata
import six
import tensorflow as tf
import re
import warnings
warnings.filterwarnings('ignore')
def validate_case_matches_checkpoint(do_lower_case, init_checkpoint):
# we have to heuristically detect it to validate.
if not init_checkpoint:
return
m = re.match("^.*?([A-Za-z0-9_-]+)/bert_model.ckpt", init_checkpoint)
if m is None:
return
model_name = m.group(1)
lower_models = [
"uncased_L-24_H-1024_A-16", "uncased_L-12_H-768_A-12",
"multilingual_L-12_H-768_A-12", "chinese_L-12_H-768_A-12"
]
cased_models = [
"cased_L-12_H-768_A-12", "cased_L-24_H-1024_A-16",
"multi_cased_L-12_H-768_A-12"
]
is_bad_config = False
if model_name in lower_models and not do_lower_case:
is_bad_config = True
actual_flag = "False"
case_name = "lowercased"
opposite_flag = "True"
if model_name in cased_models and do_lower_case:
is_bad_config = True
actual_flag = "True"
case_name = "cased"
opposite_flag = "False"
if is_bad_config:
raise ValueError(
"You passed in `--do_lower_case=%s` with `--init_checkpoint=%s`. "
"However, `%s` seems to be a %s model, so you "
"should pass in `--do_lower_case=%s` so that the fine-tuning matches "
"how the model was pre-training. If this error is wrong, please "
"just comment out this check." % (actual_flag, init_checkpoint,
model_name, case_name, opposite_flag))
def convert_to_unicode(text):
if six.PY3:
if isinstance(text, str):
return text
elif isinstance(text, bytes):
return text.decode("utf-8", "ignore")
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
elif six.PY2:
if isinstance(text, str):
return text.decode("utf-8", "ignore")
elif isinstance(text, unicode):
return text
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
else:
raise ValueError("Not running on Python2 or Python 3?")
def printable_text(text):
# These functions want `str` for both Python2 and Python3, but in one case
# it's a Unicode string and in the other it's a byte string.
if six.PY3:
if isinstance(text, str):
return text
elif isinstance(text, bytes):
return text.decode("utf-8", "ignore")
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
elif six.PY2:
if isinstance(text, str):
return text
elif isinstance(text, unicode):
return text.encode("utf-8")
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
else:
raise ValueError("Not running on Python2 or Python 3?")
def load_vocab(vocab_file):
vocab = collections.OrderedDict()
index = 0
with tf.gfile.GFile(vocab_file, "r") as reader:
while True:
token = convert_to_unicode(reader.readline())
if not token:
break
token = token.strip()
vocab[token] = index
index += 1
return vocab
def convert_by_vocab(vocab, items):
output = []
for item in items:
output.append(vocab[item])
return output
def convert_tokens_to_ids(vocab, tokens):
return convert_by_vocab(vocab, tokens)
def convert_ids_to_tokens(inv_vocab, ids):
return convert_by_vocab(inv_vocab, ids)
def whitespace_tokenize(text):
text = text.strip()
if not text:
return []
tokens = text.split()
return tokens
class FullTokenizer(object):
def __init__(self, vocab_file, do_lower_case=True):
self.vocab = load_vocab(vocab_file)
self.inv_vocab = {v: k for k, v in self.vocab.items()}
self.basic_tokenizer = BasicTokenizer(do_lower_case=do_lower_case)
self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab)
def tokenize(self, text):
split_tokens = []
for token in self.basic_tokenizer.tokenize(text):
for sub_token in self.wordpiece_tokenizer.tokenize(token):
split_tokens.append(sub_token)
return split_tokens
def convert_tokens_to_ids(self, tokens):
return convert_by_vocab(self.vocab, tokens)
def convert_ids_to_tokens(self, ids):
return convert_by_vocab(self.inv_vocab, ids)
class BasicTokenizer(object):
def __init__(self, do_lower_case=True):
self.do_lower_case = do_lower_case
def tokenize(self, text):
text = convert_to_unicode(text)
text = self._clean_text(text)
# This was added on November 1st, 2018 for the multilingual and Chinese
# models. This is also applied to the English models now, but it doesn't
# characters in the vocabulary because Wikipedia does have some Chinese
# words in the English Wikipedia.).
text = self._tokenize_chinese_chars(text)
orig_tokens = whitespace_tokenize(text)
split_tokens = []
for token in orig_tokens:
if self.do_lower_case:
token = token.lower()
token = self._run_strip_accents(token)
split_tokens.extend(self._run_split_on_punc(token))
output_tokens = whitespace_tokenize(" ".join(split_tokens))
return output_tokens
def _run_strip_accents(self, text):
text = unicodedata.normalize("NFD", text)
output = []
for char in text:
cat = unicodedata.category(char)
if cat == "Mn":
continue
output.append(char)
return "".join(output)
def _run_split_on_punc(self, text):
chars = list(text)
i = 0
start_new_word = True
output = []
while i < len(chars):
char = chars[i]
if _is_punctuation(char):
output.append([char])
start_new_word = True
else:
if start_new_word:
output.append([])
start_new_word = False
output[-1].append(char)
i += 1
return ["".join(x) for x in output]
def _tokenize_chinese_chars(self, text):
output = []
for char in text:
cp = ord(char)
if self._is_chinese_char(cp):
output.append(" ")
output.append(char)
output.append(" ")
else:
output.append(char)
return "".join(output)
def _is_chinese_char(self, cp):
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if ((cp >= 0x4E00 and cp <= 0x9FFF) or #
(cp >= 0x3400 and cp <= 0x4DBF) or #
(cp >= 0x20000 and cp <= 0x2A6DF) or #
(cp >= 0x2A700 and cp <= 0x2B73F) or #
(cp >= 0x2B740 and cp <= 0x2B81F) or #
(cp >= 0x2B820 and cp <= 0x2CEAF) or
(cp >= 0xF900 and cp <= 0xFAFF) or #
(cp >= 0x2F800 and cp <= 0x2FA1F)): #
return True
return False
def _clean_text(self, text):
output = []
for char in text:
cp = ord(char)
if cp == 0 or cp == 0xfffd or _is_control(char):
continue
if _is_whitespace(char):
output.append(" ")
else:
output.append(char)
return "".join(output)
class WordpieceTokenizer(object):
def __init__(self, vocab, unk_token="[UNK]", max_input_chars_per_word=200):
self.vocab = vocab
self.unk_token = unk_token
self.max_input_chars_per_word = max_input_chars_per_word
def tokenize(self, text):
text = convert_to_unicode(text)
output_tokens = []
for token in whitespace_tokenize(text):
chars = list(token)
if len(chars) > self.max_input_chars_per_word:
output_tokens.append(self.unk_token)
continue
is_bad = False
start = 0
sub_tokens = []
while start < len(chars):
end = len(chars)
cur_substr = None
while start < end:
substr = "".join(chars[start:end])
if start > 0:
substr = "##" + substr
if substr in self.vocab:
cur_substr = substr
break
end -= 1
if cur_substr is None:
is_bad = True
break
sub_tokens.append(cur_substr)
start = end
if is_bad:
output_tokens.append(self.unk_token)
else:
output_tokens.extend(sub_tokens)
return output_tokens
def _is_whitespace(char):
# \t, \n, and \r are technically contorl characters but we treat them
# as whitespace since they are generally considered as such.
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
cat = unicodedata.category(char)
if cat == "Zs":
return True
return False
def _is_control(char):
# These are technically control characters but we count them as whitespace
# characters.
if char == "\t" or char == "\n" or char == "\r":
return False
cat = unicodedata.category(char)
if cat in ("Cc", "Cf"):
return True
return False
def _is_punctuation(char):
cp = ord(char)
# We treat all non-letter/number ASCII as punctuation.
# Characters such as "^", "$", and "`" are not in the Unicode
# Punctuation class but we treat them as punctuation anyways, for
# consistency.
if ((33 <= cp <= 47) or (58 <= cp <= 64) or
(91 <= cp <= 96) or (123 <= cp <= 126)):
return True
cat = unicodedata.category(char)
if cat.startswith("P"):
return True
return False
| true
| true
|
790ab6072cf551b86578de970ab717b64acd034b
| 717
|
py
|
Python
|
rpython/jit/backend/arm/test/test_ztranslation_external_exception.py
|
nanjekyejoannah/pypy
|
e80079fe13c29eda7b2a6b4cd4557051f975a2d9
|
[
"Apache-2.0",
"OpenSSL"
] | 381
|
2018-08-18T03:37:22.000Z
|
2022-02-06T23:57:36.000Z
|
rpython/jit/backend/arm/test/test_ztranslation_external_exception.py
|
nanjekyejoannah/pypy
|
e80079fe13c29eda7b2a6b4cd4557051f975a2d9
|
[
"Apache-2.0",
"OpenSSL"
] | 16
|
2018-09-22T18:12:47.000Z
|
2022-02-22T20:03:59.000Z
|
rpython/jit/backend/arm/test/test_ztranslation_external_exception.py
|
nanjekyejoannah/pypy
|
e80079fe13c29eda7b2a6b4cd4557051f975a2d9
|
[
"Apache-2.0",
"OpenSSL"
] | 55
|
2015-08-16T02:41:30.000Z
|
2022-03-20T20:33:35.000Z
|
from rpython.jit.backend.llsupport.test.ztranslation_test import TranslationRemoveTypePtrTest
from rpython.translator.translator import TranslationContext
from rpython.config.translationoption import DEFL_GC
from rpython.jit.backend.arm.test.support import skip_unless_run_slow_tests
skip_unless_run_slow_tests()
class TestTranslationRemoveTypePtrARM(TranslationRemoveTypePtrTest):
def _get_TranslationContext(self):
t = TranslationContext()
t.config.translation.gc = DEFL_GC # 'hybrid' or 'minimark'
t.config.translation.gcrootfinder = 'shadowstack'
t.config.translation.list_comprehension_operations = True
t.config.translation.gcremovetypeptr = True
return t
| 47.8
| 93
| 0.797768
|
from rpython.jit.backend.llsupport.test.ztranslation_test import TranslationRemoveTypePtrTest
from rpython.translator.translator import TranslationContext
from rpython.config.translationoption import DEFL_GC
from rpython.jit.backend.arm.test.support import skip_unless_run_slow_tests
skip_unless_run_slow_tests()
class TestTranslationRemoveTypePtrARM(TranslationRemoveTypePtrTest):
def _get_TranslationContext(self):
t = TranslationContext()
t.config.translation.gc = DEFL_GC
t.config.translation.gcrootfinder = 'shadowstack'
t.config.translation.list_comprehension_operations = True
t.config.translation.gcremovetypeptr = True
return t
| true
| true
|
790ab627a508221249228239edbb5d8c2e7c1a25
| 10,628
|
py
|
Python
|
src/model/lstm_crf/main.py
|
vikasbahirwani/SequenceTagging
|
b4e0dc2a71f869a27ada003c9276fd1f269e230d
|
[
"Apache-2.0"
] | null | null | null |
src/model/lstm_crf/main.py
|
vikasbahirwani/SequenceTagging
|
b4e0dc2a71f869a27ada003c9276fd1f269e230d
|
[
"Apache-2.0"
] | null | null | null |
src/model/lstm_crf/main.py
|
vikasbahirwani/SequenceTagging
|
b4e0dc2a71f869a27ada003c9276fd1f269e230d
|
[
"Apache-2.0"
] | null | null | null |
# reimplementation of https://github.com/guillaumegenthial/tf_ner/blob/master/models/lstm_crf/main.py
import functools
import json
import logging
from pathlib import Path
import sys
import numpy as np
import tensorflow as tf
# tf.enable_eager_execution()
from tf_metrics import precision, recall, f1
DATADIR = "../../../data/conll/"
# Setup Logging
Path('results').mkdir(exist_ok=True)
tf.logging.set_verbosity(logging.INFO)
handlers = [ logging.FileHandler('results/main.log'), logging.StreamHandler(sys.stdout)]
logging.getLogger('tensorflow').handlers = handlers
# Data Pipeline
def parse_fn(line_words, line_tags):
"""Encodes words into bytes for tensor
:param line_words: one line with words (aka sentences) with space between each word/token
:param line_tags: one line of tags (one tag per word in line_words)
:return: (list of encoded words, len(words)), list of encoded tags
"""
words = [w.encode() for w in line_words.strip().split()]
tags = [t.encode() for t in line_tags.strip().split()]
assert len(words) == len(tags), "Number of words {} and Number of tags must be the same {}".format(len(words), len(tags))
return (words, len(words)), tags
def generator_fn(words_file, tags_file):
"""Enumerator to enumerate through words_file and associated tags_file one line at a time
:param words_file: file path of the words file (one sentence per line)
:param tags_file: file path of tags file (tags corresponding to words file)
:return enumerator that enumerates over the format (words, len(words)), tags one line at a time from input files.
"""
with Path(words_file).open('r') as f_words, Path(tags_file).open('r') as f_tags:
for line_words, line_tags in zip(f_words, f_tags):
yield parse_fn(line_words, line_tags)
def input_fn(words_file, tags_file, params = None, shuffle_and_repeat = False):
"""Creates tensorflow dataset using the generator_fn
:param words_file: file path of the words file (one sentence per line)
:param tags_file: file path of tags file (tags corresponding to words file)
:param params: if not None then model hyperparameters expected - 'buffer' (as in buffer size) and 'epochs'
:param shuffle_and_repeat: if the input is to be shuffled and repeat-delivered (say per epoch)
:return: instance of tf.data.Dataset
"""
params = params if params is not None else {}
# shapes are analogous to (list of encoded words, len(words)), list of encoded tags
shapes = (([None], ()), [None])
types = ((tf.string, tf.int32), tf.string)
defaults = (('<pad>', 0), 'O')
generator = functools.partial(generator_fn, words_file, tags_file)
dataset = tf.data.Dataset.from_generator(generator, output_shapes = shapes, output_types = types)
if shuffle_and_repeat:
dataset = dataset.shuffle(params['buffer']).repeat(params['epochs'])
dataset = dataset.padded_batch(params.get('batch_size', 20), shapes, defaults).prefetch(1)\
return dataset
def model_fn(features, labels, mode, params):
"""
:param features: words from sentence and number of words per sentence
:param labels: One tag per word
:param mode: tf.estimator.ModeKeys.TRAIN or tf.estimator.ModeKeys.PREDICT or tf.estimator.ModeKeys.EVAL
:param params: dictionary of hyper parameters for the model
:return:
"""
# For serving, features are a bit different
if isinstance(features, dict):
features = features['words'], features['nwords']
# Read vocab_words_file, vocab_tags_file, features
words, nwords = features
training = (mode == tf.estimator.ModeKeys.TRAIN)
vocab_words = tf.contrib.lookup.index_table_from_file(params['vocab_words_file'], num_oov_buckets = params['num_oov_buckets'])
'''
If the file contains the following:
B-LOC
B-PER
O
I-LOC
then indices = [0, 1, 3] and num_tags = 4
Open Question: The special treatment of tag indices is probably needed for microavg metrics. Why though?
'''
with Path(params['vocab_tags_file']).open('r') as f:
indices = [idx for idx, tag in enumerate(f) if tag.strip() != 'O']
num_tags = len(indices) + 1
# Word Embeddings
# remember - as per the parse function "words" is a python list of
word_ids = vocab_words.lookup(words)
glove = np.load(params['glove'])['embeddings']
glove = np.vstack([glove, [[0.]*params['dim']]])
variable = tf.Variable(glove, dtype=tf.float32, trainable=False)
embeddings = tf.nn.embedding_lookup(variable, word_ids)
dropout = params['dropout']
embeddings = tf.layers.dropout(embeddings, rate = dropout, training = training)
# LSTM CRF
time_major = tf.transpose(embeddings, perm = [1, 0, 2])
lstm_cell_fw = tf.contrib.rnn.LSTMBlockFusedCell(params['lstm_size'])
lstm_cell_bw = tf.contrib.rnn.LSTMBlockFusedCell(params['lstm_size'])
lstm_cell_bw = tf.contrib.rnn.TimeReversedFusedRNN(lstm_cell_bw)
"""
Any LSTM Cell returns two things: Cell Output (h) and Cell State (c)
Following this, lstm_fw or lstm_bw each return a pair containing:
Cell Output: A 3-D tensor of shape [time_len, batch_size, output_size]
Final state: a tuple (cell_state, output) produced by the last LSTM Cell in the sequence.
"""
output_fw,_ = lstm_cell_fw(time_major, dtype = tf.float32, sequence_length = nwords)
output_bw,_ = lstm_cell_bw(time_major, dtype = tf.float32, sequence_length = nwords)
output = tf.concat([output_fw, output_bw], axis=-1)
output = tf.transpose(output, perm=[1, 0, 2])
output = tf.layers.dropout(output, rate=dropout, training=training)
# CRf
logits = tf.layers.dense(output, num_tags)
crf_params = tf.get_variable('crf', shape = [num_tags, num_tags], dtype = tf.float32)
pred_ids, _ = tf.contrib.crf.crf_decode(logits, crf_params, nwords) # pred_ids = A [batch_size, max_seq_len] matrix, with dtype tf.int32.
# Prediction mode
if mode == tf.estimator.ModeKeys.PREDICT:
reverse_vocab_tags = tf.contrib.lookup.index_to_string_table_from_file(params['vocab_tags_file'])
pred_strings = reverse_vocab_tags.lookup(tf.to_int64(pred_ids))
predictions = {'pred_ids': pred_ids, 'tags': pred_strings}
return tf.estimator.EstimatorSpec(mode, predictions=predictions)
# Loss
vocab_tags = tf.contrib.lookup.index_table_from_file(params['vocab_tags_file'])
label_ids = vocab_tags.lookup(labels)
"""
logits are the same thing as unary potentials,
checkout https://guillaumegenthial.github.io/sequence-tagging-with-tensorflow.html look for scores s[i]
"""
log_likelihood, _ = tf.contrib.crf.crf_log_likelihood(logits, label_ids, nwords, crf_params)
loss = tf.reduce_mean(-log_likelihood)
# metrics
weights = tf.sequence_mask(nwords)
metrics = {
'acc': tf.metrics.accuracy(label_ids, pred_ids, weights),
'precision': precision(label_ids, pred_ids, num_tags, indices, weights), # indices indicate non-null classes
'recall': recall(label_ids, pred_ids, num_tags, indices, weights),
'f1': f1(label_ids, pred_ids, num_tags, indices, weights),
}
for metric_name, op in metrics.items():
tf.summary.scalar(metric_name, op[1])
# Evaluation Mode or training mode
if mode == tf.estimator.ModeKeys.EVAL:
return tf.estimator.EstimatorSpec(mode, loss = loss, eval_metric_ops = metrics )
elif mode == tf.estimator.ModeKeys.TRAIN:
train_op = tf.train.AdamOptimizer().minimize(loss, global_step=tf.train.get_or_create_global_step())
return tf.estimator.EstimatorSpec(mode, loss = loss, train_op = train_op)
def fwords(name):
return str(Path(DATADIR, '{}.words.txt'.format(name)))
def ftags(name):
return str(Path(DATADIR, '{}.tags.txt'.format(name)))
# Write predictions to file
def write_predictions(name, estimator):
Path('results/score').mkdir(parents=True, exist_ok=True)
with Path('results/score/{}.preds.txt'.format(name)).open('wb') as f:
test_inpf = functools.partial(input_fn, fwords(name), ftags(name))
golds_gen = generator_fn(fwords(name), ftags(name))
preds_gen = estimator.predict(test_inpf)
for golds, preds in zip(golds_gen, preds_gen):
((words, _), tags) = golds
for word, tag, tag_pred in zip(words, tags, preds['tags']):
f.write(b' '.join([word, tag, tag_pred]) + b'\n')
f.write(b'\n')
if __name__ == '__main__':
# Params
params = {
'dim': 300,
'dropout': 0.5,
'num_oov_buckets': 1,
'epochs': 25,
'batch_size': 20,
'buffer': 15000,
'lstm_size': 100,
'vocab_words_file': str(Path(DATADIR, 'vocab.words.txt')),
'vocab_chars_file': str(Path(DATADIR, 'vocab.chars.txt')),
'vocab_tags_file': str(Path(DATADIR, 'vocab.tags.txt')),
'glove': str(Path(DATADIR, 'glove.npz'))
}
with Path('results/params.json').open('w') as f:
json.dump(params, f, indent=4, sort_keys=True)
print('Done writing params to disk')
# Run configuration and estimator
cfg = tf.estimator.RunConfig(save_checkpoints_secs=120)
estimator = tf.estimator.Estimator(model_fn, 'results/model', cfg, params)
print('Done creating estimator spec')
# Defining our input functions
train_inpf = functools.partial(input_fn, fwords('train'), ftags('train'), params, shuffle_and_repeat=True)
eval_inpf = functools.partial(input_fn, fwords('testa'), ftags('testa'))
# Create an early stopping hook
Path(estimator.eval_dir()).mkdir(parents=True, exist_ok=True)
"""
Ref: https://stackoverflow.com/questions/47137061/early-stopping-with-tf-estimator-how
The parameters for stop_if_no_decrease_hook are as follows:
tf.contrib.estimator.stop_if_no_decrease_hook(
estimator,
metric_name='loss',
max_steps_without_decrease=1000,
min_steps=100)
"""
hook = tf.contrib.estimator.stop_if_no_increase_hook(estimator, 'f1', 500, min_steps=8000, run_every_secs=120)
train_spec = tf.estimator.TrainSpec(input_fn = train_inpf, hooks = [hook])
eval_spec = tf.estimator.EvalSpec(input_fn = eval_inpf, throttle_secs = 120) # Evaluate every 120 seconds
print('Done creating train and eval spec')
# Train with early stopping
tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec)
print('Done training and evaluation')
for name in ['train', 'testa', 'testb']:
write_predictions(name, estimator)
| 38.647273
| 142
| 0.691664
|
import functools
import json
import logging
from pathlib import Path
import sys
import numpy as np
import tensorflow as tf
from tf_metrics import precision, recall, f1
DATADIR = "../../../data/conll/"
Path('results').mkdir(exist_ok=True)
tf.logging.set_verbosity(logging.INFO)
handlers = [ logging.FileHandler('results/main.log'), logging.StreamHandler(sys.stdout)]
logging.getLogger('tensorflow').handlers = handlers
def parse_fn(line_words, line_tags):
words = [w.encode() for w in line_words.strip().split()]
tags = [t.encode() for t in line_tags.strip().split()]
assert len(words) == len(tags), "Number of words {} and Number of tags must be the same {}".format(len(words), len(tags))
return (words, len(words)), tags
def generator_fn(words_file, tags_file):
with Path(words_file).open('r') as f_words, Path(tags_file).open('r') as f_tags:
for line_words, line_tags in zip(f_words, f_tags):
yield parse_fn(line_words, line_tags)
def input_fn(words_file, tags_file, params = None, shuffle_and_repeat = False):
params = params if params is not None else {}
shapes = (([None], ()), [None])
types = ((tf.string, tf.int32), tf.string)
defaults = (('<pad>', 0), 'O')
generator = functools.partial(generator_fn, words_file, tags_file)
dataset = tf.data.Dataset.from_generator(generator, output_shapes = shapes, output_types = types)
if shuffle_and_repeat:
dataset = dataset.shuffle(params['buffer']).repeat(params['epochs'])
dataset = dataset.padded_batch(params.get('batch_size', 20), shapes, defaults).prefetch(1)\
return dataset
def model_fn(features, labels, mode, params):
if isinstance(features, dict):
features = features['words'], features['nwords']
words, nwords = features
training = (mode == tf.estimator.ModeKeys.TRAIN)
vocab_words = tf.contrib.lookup.index_table_from_file(params['vocab_words_file'], num_oov_buckets = params['num_oov_buckets'])
with Path(params['vocab_tags_file']).open('r') as f:
indices = [idx for idx, tag in enumerate(f) if tag.strip() != 'O']
num_tags = len(indices) + 1
word_ids = vocab_words.lookup(words)
glove = np.load(params['glove'])['embeddings']
glove = np.vstack([glove, [[0.]*params['dim']]])
variable = tf.Variable(glove, dtype=tf.float32, trainable=False)
embeddings = tf.nn.embedding_lookup(variable, word_ids)
dropout = params['dropout']
embeddings = tf.layers.dropout(embeddings, rate = dropout, training = training)
time_major = tf.transpose(embeddings, perm = [1, 0, 2])
lstm_cell_fw = tf.contrib.rnn.LSTMBlockFusedCell(params['lstm_size'])
lstm_cell_bw = tf.contrib.rnn.LSTMBlockFusedCell(params['lstm_size'])
lstm_cell_bw = tf.contrib.rnn.TimeReversedFusedRNN(lstm_cell_bw)
output_fw,_ = lstm_cell_fw(time_major, dtype = tf.float32, sequence_length = nwords)
output_bw,_ = lstm_cell_bw(time_major, dtype = tf.float32, sequence_length = nwords)
output = tf.concat([output_fw, output_bw], axis=-1)
output = tf.transpose(output, perm=[1, 0, 2])
output = tf.layers.dropout(output, rate=dropout, training=training)
logits = tf.layers.dense(output, num_tags)
crf_params = tf.get_variable('crf', shape = [num_tags, num_tags], dtype = tf.float32)
pred_ids, _ = tf.contrib.crf.crf_decode(logits, crf_params, nwords)
if mode == tf.estimator.ModeKeys.PREDICT:
reverse_vocab_tags = tf.contrib.lookup.index_to_string_table_from_file(params['vocab_tags_file'])
pred_strings = reverse_vocab_tags.lookup(tf.to_int64(pred_ids))
predictions = {'pred_ids': pred_ids, 'tags': pred_strings}
return tf.estimator.EstimatorSpec(mode, predictions=predictions)
vocab_tags = tf.contrib.lookup.index_table_from_file(params['vocab_tags_file'])
label_ids = vocab_tags.lookup(labels)
log_likelihood, _ = tf.contrib.crf.crf_log_likelihood(logits, label_ids, nwords, crf_params)
loss = tf.reduce_mean(-log_likelihood)
weights = tf.sequence_mask(nwords)
metrics = {
'acc': tf.metrics.accuracy(label_ids, pred_ids, weights),
'precision': precision(label_ids, pred_ids, num_tags, indices, weights),
'recall': recall(label_ids, pred_ids, num_tags, indices, weights),
'f1': f1(label_ids, pred_ids, num_tags, indices, weights),
}
for metric_name, op in metrics.items():
tf.summary.scalar(metric_name, op[1])
if mode == tf.estimator.ModeKeys.EVAL:
return tf.estimator.EstimatorSpec(mode, loss = loss, eval_metric_ops = metrics )
elif mode == tf.estimator.ModeKeys.TRAIN:
train_op = tf.train.AdamOptimizer().minimize(loss, global_step=tf.train.get_or_create_global_step())
return tf.estimator.EstimatorSpec(mode, loss = loss, train_op = train_op)
def fwords(name):
return str(Path(DATADIR, '{}.words.txt'.format(name)))
def ftags(name):
return str(Path(DATADIR, '{}.tags.txt'.format(name)))
def write_predictions(name, estimator):
Path('results/score').mkdir(parents=True, exist_ok=True)
with Path('results/score/{}.preds.txt'.format(name)).open('wb') as f:
test_inpf = functools.partial(input_fn, fwords(name), ftags(name))
golds_gen = generator_fn(fwords(name), ftags(name))
preds_gen = estimator.predict(test_inpf)
for golds, preds in zip(golds_gen, preds_gen):
((words, _), tags) = golds
for word, tag, tag_pred in zip(words, tags, preds['tags']):
f.write(b' '.join([word, tag, tag_pred]) + b'\n')
f.write(b'\n')
if __name__ == '__main__':
params = {
'dim': 300,
'dropout': 0.5,
'num_oov_buckets': 1,
'epochs': 25,
'batch_size': 20,
'buffer': 15000,
'lstm_size': 100,
'vocab_words_file': str(Path(DATADIR, 'vocab.words.txt')),
'vocab_chars_file': str(Path(DATADIR, 'vocab.chars.txt')),
'vocab_tags_file': str(Path(DATADIR, 'vocab.tags.txt')),
'glove': str(Path(DATADIR, 'glove.npz'))
}
with Path('results/params.json').open('w') as f:
json.dump(params, f, indent=4, sort_keys=True)
print('Done writing params to disk')
cfg = tf.estimator.RunConfig(save_checkpoints_secs=120)
estimator = tf.estimator.Estimator(model_fn, 'results/model', cfg, params)
print('Done creating estimator spec')
train_inpf = functools.partial(input_fn, fwords('train'), ftags('train'), params, shuffle_and_repeat=True)
eval_inpf = functools.partial(input_fn, fwords('testa'), ftags('testa'))
Path(estimator.eval_dir()).mkdir(parents=True, exist_ok=True)
hook = tf.contrib.estimator.stop_if_no_increase_hook(estimator, 'f1', 500, min_steps=8000, run_every_secs=120)
train_spec = tf.estimator.TrainSpec(input_fn = train_inpf, hooks = [hook])
eval_spec = tf.estimator.EvalSpec(input_fn = eval_inpf, throttle_secs = 120)
print('Done creating train and eval spec')
tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec)
print('Done training and evaluation')
for name in ['train', 'testa', 'testb']:
write_predictions(name, estimator)
| true
| true
|
790ab6b54ec2b2575ba95b4db6a86451e8773e78
| 8,145
|
py
|
Python
|
tinygrad/ops_cpu.py
|
dredwardhyde/tinygrad-universal
|
aeb28fc42fb40e9848613ce81811a727fee6f313
|
[
"Apache-2.0"
] | 1
|
2021-02-24T17:38:03.000Z
|
2021-02-24T17:38:03.000Z
|
tinygrad/ops_cpu.py
|
dredwardhyde/tinygrad-universal
|
aeb28fc42fb40e9848613ce81811a727fee6f313
|
[
"Apache-2.0"
] | null | null | null |
tinygrad/ops_cpu.py
|
dredwardhyde/tinygrad-universal
|
aeb28fc42fb40e9848613ce81811a727fee6f313
|
[
"Apache-2.0"
] | null | null | null |
import numpy as np
from .tensor import Function
# ************* unary ops *************
class ReLU(Function):
@staticmethod
def forward(ctx, input):
ctx.save_for_backward(input)
return np.maximum(input, 0)
@staticmethod
def backward(ctx, grad_output):
input, = ctx.saved_tensors
return grad_output * (input >= 0)
class Log(Function):
@staticmethod
def forward(ctx, input):
ctx.save_for_backward(input)
return np.log(input)
@staticmethod
def backward(ctx, grad_output):
input, = ctx.saved_tensors
return grad_output / input
class Exp(Function):
@staticmethod
def forward(ctx, input):
ret = np.exp(input)
ctx.save_for_backward(ret)
return ret
@staticmethod
def backward(ctx, grad_output):
ret, = ctx.saved_tensors
return grad_output * ret
# ************* reduce ops *************
class Sum(Function):
@staticmethod
def forward(ctx, input, axis=None):
ctx.save_for_backward(input, axis)
return np.array([input.sum()]) if axis is None else input.sum(axis=axis)
@staticmethod
def backward(ctx, grad_output):
input, axis = ctx.saved_tensors
axis = [axis] if type(axis) is int else axis
shape = [1 if axis is None or i in axis else input.shape[i] for i in range(len(input.shape))]
return grad_output.reshape(shape) + np.zeros_like(input)
class Max(Function):
@staticmethod
def forward(ctx, inp, axis=None):
axis = [axis] if type(axis) == int else axis
ret = np.amax(inp, axis=None if axis is None else tuple(axis), keepdims=True)
ctx.save_for_backward(inp, axis, ret)
if axis is not None:
ret = ret.reshape([inp.shape[i] for i in range(len(inp.shape)) if i not in axis])
return ret
@staticmethod
def backward(ctx, grad_output):
input, axis, ret = ctx.saved_tensors
shape = [1 if axis is None or i in axis else input.shape[i] for i in range(len(input.shape))]
ret2 = (input == ret.reshape(shape))
div = ret2.sum(axis=None if axis is None else tuple(axis), keepdims=True)
return ret2 * grad_output.reshape(shape) / div
# ************* binary ops *************
def unbroadcast(out, in_sh):
# adjoint operation to broadcast is sum. Need to sum all axis with 1 = in_sh[i] < out.shape[i]
sum_axis = tuple([i for i in range(len(in_sh)) if in_sh[i] == 1 and out.shape[i] > 1]) if in_sh != (1,) else None
return out.sum(axis=sum_axis).reshape(in_sh)
class Add(Function):
@staticmethod
def forward(ctx, x, y):
ctx.save_for_backward(x.shape, y.shape)
return x + y
@staticmethod
def backward(ctx, grad_output):
shape_x, shape_y = ctx.saved_tensors
return unbroadcast(grad_output, shape_x), unbroadcast(grad_output, shape_y)
class Sub(Function):
@staticmethod
def forward(ctx, x, y):
ctx.save_for_backward(x.shape, y.shape)
return x - y
@staticmethod
def backward(ctx, grad_output):
shape_x, shape_y = ctx.saved_tensors
return unbroadcast(grad_output, shape_x), unbroadcast(-grad_output, shape_y)
class Mul(Function):
@staticmethod
def forward(ctx, x, y):
ctx.save_for_backward(x, y)
return x * y
@staticmethod
def backward(ctx, grad_output):
x, y = ctx.saved_tensors
return unbroadcast(y * grad_output, x.shape), unbroadcast(x * grad_output, y.shape)
class Pow(Function):
@staticmethod
def forward(ctx, x, y):
ctx.save_for_backward(x, y)
return x ** y
@staticmethod
def backward(ctx, grad_output):
x, y = ctx.saved_tensors
return unbroadcast(y * (x ** (y - 1.0)) * grad_output, x.shape), \
unbroadcast((x ** y) * np.log(x) * grad_output, y.shape)
# ************* movement ops *************
class Reshape(Function):
@staticmethod
def forward(ctx, x, shape):
ctx.save_for_backward(x.shape)
return x.reshape(shape)
@staticmethod
def backward(ctx, grad_output):
in_shape, = ctx.saved_tensors
return grad_output.reshape(in_shape)
class Transpose(Function):
@staticmethod
def forward(ctx, x, order):
ctx.save_for_backward(order)
return np.transpose(x, order)
@staticmethod
def backward(ctx, x):
return np.transpose(x, np.argsort(ctx.order))
def inner_slice(x, arg):
padding = [(max(0, -p[0]), max(0, p[1] - x.shape[i])) for i, p in enumerate(arg)]
x = np.pad(x, padding)
slicee = [(p[0] + padding[i][0], p[1] + padding[i][0]) for i, p in enumerate(arg)]
return x[tuple([slice(x[0], x[1], None) for x in slicee])]
class Slice(Function):
@staticmethod
def forward(ctx, x, arg=None):
ctx.save_for_backward(x.shape)
return inner_slice(x, arg)
@staticmethod
def backward(ctx, grad_output):
shape, = ctx.saved_tensors
narg = [(0 - p[0], grad_output.shape[i] + (shape[i] - p[1])) for i, p in enumerate(ctx.arg)]
return inner_slice(grad_output, narg)
# ************* processing ops *************
class Matmul(Function):
@staticmethod
def forward(ctx, input, weight):
ctx.save_for_backward(input, weight)
return input @ weight
@staticmethod
def backward(ctx, grad_output):
input, weight = ctx.saved_tensors
grad_input = grad_output @ np.swapaxes(weight, -2, -1)
grad_weight = np.swapaxes(input, -2, -1) @ grad_output
return grad_input, grad_weight
class Conv2D(Function):
@staticmethod
def forward(ctx, x, w, stride=1, groups=1):
if type(ctx.stride) == int:
ctx.stride = (ctx.stride, ctx.stride)
cout, cin, H, W = w.shape
ys, xs = ctx.stride
bs, cin_ = x.shape[0], x.shape[1]
oy, ox = (x.shape[2] - (H - ys)) // ys, (x.shape[3] - (W - xs)) // xs
assert cin * ctx.groups == cin_
assert cout % ctx.groups == 0
rcout = cout // ctx.groups
gx = x.reshape(bs, ctx.groups, cin, x.shape[2], x.shape[3])
tx = np.lib.stride_tricks.as_strided(gx,
shape=(bs, ctx.groups, cin, oy, ox, H, W),
strides=(*gx.strides[0:3], gx.strides[3] * ys, gx.strides[4] * xs,
*gx.strides[3:5]),
writeable=False,
)
tw = w.reshape(ctx.groups, rcout, cin, H, W)
ctx.save_for_backward(tx, tw, x.shape)
ret = np.zeros((bs, ctx.groups, oy, ox, rcout), dtype=x.dtype)
for g in range(ctx.groups):
# ijYXyx,kjyx -> iYXk ->ikYX
ret[:, g] += np.tensordot(tx[:, g], tw[g], ((1, 4, 5), (1, 2, 3)))
return np.moveaxis(ret, 4, 2).reshape(bs, cout, oy, ox)
@staticmethod
def backward(ctx, grad_output):
bs, _, oy, ox = grad_output.shape
tx, tw, x_shape = ctx.saved_tensors
_, rcout, cin, H, W = tw.shape
ys, xs = ctx.stride
OY, OX = x_shape[2:4]
ggg = grad_output.reshape(bs, ctx.groups, rcout, oy, ox)
gdw = np.zeros((ctx.groups, rcout, cin, H, W), dtype=tx.dtype)
for g in range(ctx.groups):
# 'ikYX,ijYXyx -> kjyx'
gdw[g] += np.tensordot(ggg[:, g], tx[:, g], ((0, 2, 3), (0, 2, 3)))
# needs to be optimized
gdx = np.zeros((bs, ctx.groups, cin, OY, OX), dtype=tx.dtype)
for k in range(oy * ox):
Y, X = k // ox, k % ox
iY, iX = Y * ys, X * xs
# gdx[:,:,: , iY:iY+H, iX:iX+W] += np.einsum('igk,gkjyx->igjyx', ggg[:,:,:,Y,X], tw)
for g in range(ctx.groups):
tg = np.dot(ggg[:, g, :, Y, X].reshape(bs, -1), tw[g].reshape(rcout, -1))
gdx[:, g, :, iY:iY + H, iX:iX + W] += tg.reshape((bs, cin, H, W))
return gdx.reshape((bs, ctx.groups * cin, OY, OX)), gdw.reshape((ctx.groups * rcout, cin, H, W))
| 32.193676
| 117
| 0.570166
|
import numpy as np
from .tensor import Function
class ReLU(Function):
@staticmethod
def forward(ctx, input):
ctx.save_for_backward(input)
return np.maximum(input, 0)
@staticmethod
def backward(ctx, grad_output):
input, = ctx.saved_tensors
return grad_output * (input >= 0)
class Log(Function):
@staticmethod
def forward(ctx, input):
ctx.save_for_backward(input)
return np.log(input)
@staticmethod
def backward(ctx, grad_output):
input, = ctx.saved_tensors
return grad_output / input
class Exp(Function):
@staticmethod
def forward(ctx, input):
ret = np.exp(input)
ctx.save_for_backward(ret)
return ret
@staticmethod
def backward(ctx, grad_output):
ret, = ctx.saved_tensors
return grad_output * ret
class Sum(Function):
@staticmethod
def forward(ctx, input, axis=None):
ctx.save_for_backward(input, axis)
return np.array([input.sum()]) if axis is None else input.sum(axis=axis)
@staticmethod
def backward(ctx, grad_output):
input, axis = ctx.saved_tensors
axis = [axis] if type(axis) is int else axis
shape = [1 if axis is None or i in axis else input.shape[i] for i in range(len(input.shape))]
return grad_output.reshape(shape) + np.zeros_like(input)
class Max(Function):
@staticmethod
def forward(ctx, inp, axis=None):
axis = [axis] if type(axis) == int else axis
ret = np.amax(inp, axis=None if axis is None else tuple(axis), keepdims=True)
ctx.save_for_backward(inp, axis, ret)
if axis is not None:
ret = ret.reshape([inp.shape[i] for i in range(len(inp.shape)) if i not in axis])
return ret
@staticmethod
def backward(ctx, grad_output):
input, axis, ret = ctx.saved_tensors
shape = [1 if axis is None or i in axis else input.shape[i] for i in range(len(input.shape))]
ret2 = (input == ret.reshape(shape))
div = ret2.sum(axis=None if axis is None else tuple(axis), keepdims=True)
return ret2 * grad_output.reshape(shape) / div
def unbroadcast(out, in_sh):
sum_axis = tuple([i for i in range(len(in_sh)) if in_sh[i] == 1 and out.shape[i] > 1]) if in_sh != (1,) else None
return out.sum(axis=sum_axis).reshape(in_sh)
class Add(Function):
@staticmethod
def forward(ctx, x, y):
ctx.save_for_backward(x.shape, y.shape)
return x + y
@staticmethod
def backward(ctx, grad_output):
shape_x, shape_y = ctx.saved_tensors
return unbroadcast(grad_output, shape_x), unbroadcast(grad_output, shape_y)
class Sub(Function):
@staticmethod
def forward(ctx, x, y):
ctx.save_for_backward(x.shape, y.shape)
return x - y
@staticmethod
def backward(ctx, grad_output):
shape_x, shape_y = ctx.saved_tensors
return unbroadcast(grad_output, shape_x), unbroadcast(-grad_output, shape_y)
class Mul(Function):
@staticmethod
def forward(ctx, x, y):
ctx.save_for_backward(x, y)
return x * y
@staticmethod
def backward(ctx, grad_output):
x, y = ctx.saved_tensors
return unbroadcast(y * grad_output, x.shape), unbroadcast(x * grad_output, y.shape)
class Pow(Function):
@staticmethod
def forward(ctx, x, y):
ctx.save_for_backward(x, y)
return x ** y
@staticmethod
def backward(ctx, grad_output):
x, y = ctx.saved_tensors
return unbroadcast(y * (x ** (y - 1.0)) * grad_output, x.shape), \
unbroadcast((x ** y) * np.log(x) * grad_output, y.shape)
class Reshape(Function):
@staticmethod
def forward(ctx, x, shape):
ctx.save_for_backward(x.shape)
return x.reshape(shape)
@staticmethod
def backward(ctx, grad_output):
in_shape, = ctx.saved_tensors
return grad_output.reshape(in_shape)
class Transpose(Function):
@staticmethod
def forward(ctx, x, order):
ctx.save_for_backward(order)
return np.transpose(x, order)
@staticmethod
def backward(ctx, x):
return np.transpose(x, np.argsort(ctx.order))
def inner_slice(x, arg):
padding = [(max(0, -p[0]), max(0, p[1] - x.shape[i])) for i, p in enumerate(arg)]
x = np.pad(x, padding)
slicee = [(p[0] + padding[i][0], p[1] + padding[i][0]) for i, p in enumerate(arg)]
return x[tuple([slice(x[0], x[1], None) for x in slicee])]
class Slice(Function):
@staticmethod
def forward(ctx, x, arg=None):
ctx.save_for_backward(x.shape)
return inner_slice(x, arg)
@staticmethod
def backward(ctx, grad_output):
shape, = ctx.saved_tensors
narg = [(0 - p[0], grad_output.shape[i] + (shape[i] - p[1])) for i, p in enumerate(ctx.arg)]
return inner_slice(grad_output, narg)
class Matmul(Function):
@staticmethod
def forward(ctx, input, weight):
ctx.save_for_backward(input, weight)
return input @ weight
@staticmethod
def backward(ctx, grad_output):
input, weight = ctx.saved_tensors
grad_input = grad_output @ np.swapaxes(weight, -2, -1)
grad_weight = np.swapaxes(input, -2, -1) @ grad_output
return grad_input, grad_weight
class Conv2D(Function):
@staticmethod
def forward(ctx, x, w, stride=1, groups=1):
if type(ctx.stride) == int:
ctx.stride = (ctx.stride, ctx.stride)
cout, cin, H, W = w.shape
ys, xs = ctx.stride
bs, cin_ = x.shape[0], x.shape[1]
oy, ox = (x.shape[2] - (H - ys)) // ys, (x.shape[3] - (W - xs)) // xs
assert cin * ctx.groups == cin_
assert cout % ctx.groups == 0
rcout = cout // ctx.groups
gx = x.reshape(bs, ctx.groups, cin, x.shape[2], x.shape[3])
tx = np.lib.stride_tricks.as_strided(gx,
shape=(bs, ctx.groups, cin, oy, ox, H, W),
strides=(*gx.strides[0:3], gx.strides[3] * ys, gx.strides[4] * xs,
*gx.strides[3:5]),
writeable=False,
)
tw = w.reshape(ctx.groups, rcout, cin, H, W)
ctx.save_for_backward(tx, tw, x.shape)
ret = np.zeros((bs, ctx.groups, oy, ox, rcout), dtype=x.dtype)
for g in range(ctx.groups):
ret[:, g] += np.tensordot(tx[:, g], tw[g], ((1, 4, 5), (1, 2, 3)))
return np.moveaxis(ret, 4, 2).reshape(bs, cout, oy, ox)
@staticmethod
def backward(ctx, grad_output):
bs, _, oy, ox = grad_output.shape
tx, tw, x_shape = ctx.saved_tensors
_, rcout, cin, H, W = tw.shape
ys, xs = ctx.stride
OY, OX = x_shape[2:4]
ggg = grad_output.reshape(bs, ctx.groups, rcout, oy, ox)
gdw = np.zeros((ctx.groups, rcout, cin, H, W), dtype=tx.dtype)
for g in range(ctx.groups):
gdw[g] += np.tensordot(ggg[:, g], tx[:, g], ((0, 2, 3), (0, 2, 3)))
gdx = np.zeros((bs, ctx.groups, cin, OY, OX), dtype=tx.dtype)
for k in range(oy * ox):
Y, X = k // ox, k % ox
iY, iX = Y * ys, X * xs
for g in range(ctx.groups):
tg = np.dot(ggg[:, g, :, Y, X].reshape(bs, -1), tw[g].reshape(rcout, -1))
gdx[:, g, :, iY:iY + H, iX:iX + W] += tg.reshape((bs, cin, H, W))
return gdx.reshape((bs, ctx.groups * cin, OY, OX)), gdw.reshape((ctx.groups * rcout, cin, H, W))
| true
| true
|
790ab7ee606d609170fba5d1c33ff6a2d559bc20
| 36,190
|
py
|
Python
|
Lib/test/test_tracemalloc.py
|
KinkowanWinter/Transcendental-Number-Utilization
|
5f6d1d32850ad2cd2d03cc6f796d32ba7876fc39
|
[
"PSF-2.0"
] | 486
|
2016-05-28T18:51:54.000Z
|
2022-03-20T17:30:31.000Z
|
Lib/test/test_tracemalloc.py
|
KinkowanWinter/Transcendental-Number-Utilization
|
5f6d1d32850ad2cd2d03cc6f796d32ba7876fc39
|
[
"PSF-2.0"
] | 40
|
2016-05-29T00:24:56.000Z
|
2020-07-13T11:56:58.000Z
|
Lib/test/test_tracemalloc.py
|
KinkowanWinter/Transcendental-Number-Utilization
|
5f6d1d32850ad2cd2d03cc6f796d32ba7876fc39
|
[
"PSF-2.0"
] | 46
|
2016-05-28T18:52:03.000Z
|
2021-06-01T07:57:51.000Z
|
import contextlib
import os
import sys
import tracemalloc
import unittest
from unittest.mock import patch
from test.support.script_helper import (assert_python_ok, assert_python_failure,
interpreter_requires_environment)
from test import support
try:
import threading
except ImportError:
threading = None
try:
import _testcapi
except ImportError:
_testcapi = None
EMPTY_STRING_SIZE = sys.getsizeof(b'')
def get_frames(nframe, lineno_delta):
frames = []
frame = sys._getframe(1)
for index in range(nframe):
code = frame.f_code
lineno = frame.f_lineno + lineno_delta
frames.append((code.co_filename, lineno))
lineno_delta = 0
frame = frame.f_back
if frame is None:
break
return tuple(frames)
def allocate_bytes(size):
nframe = tracemalloc.get_traceback_limit()
bytes_len = (size - EMPTY_STRING_SIZE)
frames = get_frames(nframe, 1)
data = b'x' * bytes_len
return data, tracemalloc.Traceback(frames)
def create_snapshots():
traceback_limit = 2
# _tracemalloc._get_traces() returns a list of (domain, size,
# traceback_frames) tuples. traceback_frames is a tuple of (filename,
# line_number) tuples.
raw_traces = [
(0, 10, (('a.py', 2), ('b.py', 4))),
(0, 10, (('a.py', 2), ('b.py', 4))),
(0, 10, (('a.py', 2), ('b.py', 4))),
(1, 2, (('a.py', 5), ('b.py', 4))),
(2, 66, (('b.py', 1),)),
(3, 7, (('<unknown>', 0),)),
]
snapshot = tracemalloc.Snapshot(raw_traces, traceback_limit)
raw_traces2 = [
(0, 10, (('a.py', 2), ('b.py', 4))),
(0, 10, (('a.py', 2), ('b.py', 4))),
(0, 10, (('a.py', 2), ('b.py', 4))),
(2, 2, (('a.py', 5), ('b.py', 4))),
(2, 5000, (('a.py', 5), ('b.py', 4))),
(4, 400, (('c.py', 578),)),
]
snapshot2 = tracemalloc.Snapshot(raw_traces2, traceback_limit)
return (snapshot, snapshot2)
def frame(filename, lineno):
return tracemalloc._Frame((filename, lineno))
def traceback(*frames):
return tracemalloc.Traceback(frames)
def traceback_lineno(filename, lineno):
return traceback((filename, lineno))
def traceback_filename(filename):
return traceback_lineno(filename, 0)
class TestTracemallocEnabled(unittest.TestCase):
def setUp(self):
if tracemalloc.is_tracing():
self.skipTest("tracemalloc must be stopped before the test")
tracemalloc.start(1)
def tearDown(self):
tracemalloc.stop()
def test_get_tracemalloc_memory(self):
data = [allocate_bytes(123) for count in range(1000)]
size = tracemalloc.get_tracemalloc_memory()
self.assertGreaterEqual(size, 0)
tracemalloc.clear_traces()
size2 = tracemalloc.get_tracemalloc_memory()
self.assertGreaterEqual(size2, 0)
self.assertLessEqual(size2, size)
def test_get_object_traceback(self):
tracemalloc.clear_traces()
obj_size = 12345
obj, obj_traceback = allocate_bytes(obj_size)
traceback = tracemalloc.get_object_traceback(obj)
self.assertEqual(traceback, obj_traceback)
def test_set_traceback_limit(self):
obj_size = 10
tracemalloc.stop()
self.assertRaises(ValueError, tracemalloc.start, -1)
tracemalloc.stop()
tracemalloc.start(10)
obj2, obj2_traceback = allocate_bytes(obj_size)
traceback = tracemalloc.get_object_traceback(obj2)
self.assertEqual(len(traceback), 10)
self.assertEqual(traceback, obj2_traceback)
tracemalloc.stop()
tracemalloc.start(1)
obj, obj_traceback = allocate_bytes(obj_size)
traceback = tracemalloc.get_object_traceback(obj)
self.assertEqual(len(traceback), 1)
self.assertEqual(traceback, obj_traceback)
def find_trace(self, traces, traceback):
for trace in traces:
if trace[2] == traceback._frames:
return trace
self.fail("trace not found")
def test_get_traces(self):
tracemalloc.clear_traces()
obj_size = 12345
obj, obj_traceback = allocate_bytes(obj_size)
traces = tracemalloc._get_traces()
trace = self.find_trace(traces, obj_traceback)
self.assertIsInstance(trace, tuple)
domain, size, traceback = trace
self.assertEqual(size, obj_size)
self.assertEqual(traceback, obj_traceback._frames)
tracemalloc.stop()
self.assertEqual(tracemalloc._get_traces(), [])
def test_get_traces_intern_traceback(self):
# dummy wrappers to get more useful and identical frames in the traceback
def allocate_bytes2(size):
return allocate_bytes(size)
def allocate_bytes3(size):
return allocate_bytes2(size)
def allocate_bytes4(size):
return allocate_bytes3(size)
# Ensure that two identical tracebacks are not duplicated
tracemalloc.stop()
tracemalloc.start(4)
obj_size = 123
obj1, obj1_traceback = allocate_bytes4(obj_size)
obj2, obj2_traceback = allocate_bytes4(obj_size)
traces = tracemalloc._get_traces()
trace1 = self.find_trace(traces, obj1_traceback)
trace2 = self.find_trace(traces, obj2_traceback)
domain1, size1, traceback1 = trace1
domain2, size2, traceback2 = trace2
self.assertIs(traceback2, traceback1)
def test_get_traced_memory(self):
# Python allocates some internals objects, so the test must tolerate
# a small difference between the expected size and the real usage
max_error = 2048
# allocate one object
obj_size = 1024 * 1024
tracemalloc.clear_traces()
obj, obj_traceback = allocate_bytes(obj_size)
size, peak_size = tracemalloc.get_traced_memory()
self.assertGreaterEqual(size, obj_size)
self.assertGreaterEqual(peak_size, size)
self.assertLessEqual(size - obj_size, max_error)
self.assertLessEqual(peak_size - size, max_error)
# destroy the object
obj = None
size2, peak_size2 = tracemalloc.get_traced_memory()
self.assertLess(size2, size)
self.assertGreaterEqual(size - size2, obj_size - max_error)
self.assertGreaterEqual(peak_size2, peak_size)
# clear_traces() must reset traced memory counters
tracemalloc.clear_traces()
self.assertEqual(tracemalloc.get_traced_memory(), (0, 0))
# allocate another object
obj, obj_traceback = allocate_bytes(obj_size)
size, peak_size = tracemalloc.get_traced_memory()
self.assertGreaterEqual(size, obj_size)
# stop() also resets traced memory counters
tracemalloc.stop()
self.assertEqual(tracemalloc.get_traced_memory(), (0, 0))
def test_clear_traces(self):
obj, obj_traceback = allocate_bytes(123)
traceback = tracemalloc.get_object_traceback(obj)
self.assertIsNotNone(traceback)
tracemalloc.clear_traces()
traceback2 = tracemalloc.get_object_traceback(obj)
self.assertIsNone(traceback2)
def test_is_tracing(self):
tracemalloc.stop()
self.assertFalse(tracemalloc.is_tracing())
tracemalloc.start()
self.assertTrue(tracemalloc.is_tracing())
def test_snapshot(self):
obj, source = allocate_bytes(123)
# take a snapshot
snapshot = tracemalloc.take_snapshot()
# write on disk
snapshot.dump(support.TESTFN)
self.addCleanup(support.unlink, support.TESTFN)
# load from disk
snapshot2 = tracemalloc.Snapshot.load(support.TESTFN)
self.assertEqual(snapshot2.traces, snapshot.traces)
# tracemalloc must be tracing memory allocations to take a snapshot
tracemalloc.stop()
with self.assertRaises(RuntimeError) as cm:
tracemalloc.take_snapshot()
self.assertEqual(str(cm.exception),
"the tracemalloc module must be tracing memory "
"allocations to take a snapshot")
def test_snapshot_save_attr(self):
# take a snapshot with a new attribute
snapshot = tracemalloc.take_snapshot()
snapshot.test_attr = "new"
snapshot.dump(support.TESTFN)
self.addCleanup(support.unlink, support.TESTFN)
# load() should recreate the attribute
snapshot2 = tracemalloc.Snapshot.load(support.TESTFN)
self.assertEqual(snapshot2.test_attr, "new")
def fork_child(self):
if not tracemalloc.is_tracing():
return 2
obj_size = 12345
obj, obj_traceback = allocate_bytes(obj_size)
traceback = tracemalloc.get_object_traceback(obj)
if traceback is None:
return 3
# everything is fine
return 0
@unittest.skipUnless(hasattr(os, 'fork'), 'need os.fork()')
def test_fork(self):
# check that tracemalloc is still working after fork
pid = os.fork()
if not pid:
# child
exitcode = 1
try:
exitcode = self.fork_child()
finally:
os._exit(exitcode)
else:
pid2, status = os.waitpid(pid, 0)
self.assertTrue(os.WIFEXITED(status))
exitcode = os.WEXITSTATUS(status)
self.assertEqual(exitcode, 0)
class TestSnapshot(unittest.TestCase):
maxDiff = 4000
def test_create_snapshot(self):
raw_traces = [(0, 5, (('a.py', 2),))]
with contextlib.ExitStack() as stack:
stack.enter_context(patch.object(tracemalloc, 'is_tracing',
return_value=True))
stack.enter_context(patch.object(tracemalloc, 'get_traceback_limit',
return_value=5))
stack.enter_context(patch.object(tracemalloc, '_get_traces',
return_value=raw_traces))
snapshot = tracemalloc.take_snapshot()
self.assertEqual(snapshot.traceback_limit, 5)
self.assertEqual(len(snapshot.traces), 1)
trace = snapshot.traces[0]
self.assertEqual(trace.size, 5)
self.assertEqual(len(trace.traceback), 1)
self.assertEqual(trace.traceback[0].filename, 'a.py')
self.assertEqual(trace.traceback[0].lineno, 2)
def test_filter_traces(self):
snapshot, snapshot2 = create_snapshots()
filter1 = tracemalloc.Filter(False, "b.py")
filter2 = tracemalloc.Filter(True, "a.py", 2)
filter3 = tracemalloc.Filter(True, "a.py", 5)
original_traces = list(snapshot.traces._traces)
# exclude b.py
snapshot3 = snapshot.filter_traces((filter1,))
self.assertEqual(snapshot3.traces._traces, [
(0, 10, (('a.py', 2), ('b.py', 4))),
(0, 10, (('a.py', 2), ('b.py', 4))),
(0, 10, (('a.py', 2), ('b.py', 4))),
(1, 2, (('a.py', 5), ('b.py', 4))),
(3, 7, (('<unknown>', 0),)),
])
# filter_traces() must not touch the original snapshot
self.assertEqual(snapshot.traces._traces, original_traces)
# only include two lines of a.py
snapshot4 = snapshot3.filter_traces((filter2, filter3))
self.assertEqual(snapshot4.traces._traces, [
(0, 10, (('a.py', 2), ('b.py', 4))),
(0, 10, (('a.py', 2), ('b.py', 4))),
(0, 10, (('a.py', 2), ('b.py', 4))),
(1, 2, (('a.py', 5), ('b.py', 4))),
])
# No filter: just duplicate the snapshot
snapshot5 = snapshot.filter_traces(())
self.assertIsNot(snapshot5, snapshot)
self.assertIsNot(snapshot5.traces, snapshot.traces)
self.assertEqual(snapshot5.traces, snapshot.traces)
self.assertRaises(TypeError, snapshot.filter_traces, filter1)
def test_filter_traces_domain(self):
snapshot, snapshot2 = create_snapshots()
filter1 = tracemalloc.Filter(False, "a.py", domain=1)
filter2 = tracemalloc.Filter(True, "a.py", domain=1)
original_traces = list(snapshot.traces._traces)
# exclude a.py of domain 1
snapshot3 = snapshot.filter_traces((filter1,))
self.assertEqual(snapshot3.traces._traces, [
(0, 10, (('a.py', 2), ('b.py', 4))),
(0, 10, (('a.py', 2), ('b.py', 4))),
(0, 10, (('a.py', 2), ('b.py', 4))),
(2, 66, (('b.py', 1),)),
(3, 7, (('<unknown>', 0),)),
])
# include domain 1
snapshot3 = snapshot.filter_traces((filter1,))
self.assertEqual(snapshot3.traces._traces, [
(0, 10, (('a.py', 2), ('b.py', 4))),
(0, 10, (('a.py', 2), ('b.py', 4))),
(0, 10, (('a.py', 2), ('b.py', 4))),
(2, 66, (('b.py', 1),)),
(3, 7, (('<unknown>', 0),)),
])
def test_filter_traces_domain_filter(self):
snapshot, snapshot2 = create_snapshots()
filter1 = tracemalloc.DomainFilter(False, domain=3)
filter2 = tracemalloc.DomainFilter(True, domain=3)
# exclude domain 2
snapshot3 = snapshot.filter_traces((filter1,))
self.assertEqual(snapshot3.traces._traces, [
(0, 10, (('a.py', 2), ('b.py', 4))),
(0, 10, (('a.py', 2), ('b.py', 4))),
(0, 10, (('a.py', 2), ('b.py', 4))),
(1, 2, (('a.py', 5), ('b.py', 4))),
(2, 66, (('b.py', 1),)),
])
# include domain 2
snapshot3 = snapshot.filter_traces((filter2,))
self.assertEqual(snapshot3.traces._traces, [
(3, 7, (('<unknown>', 0),)),
])
def test_snapshot_group_by_line(self):
snapshot, snapshot2 = create_snapshots()
tb_0 = traceback_lineno('<unknown>', 0)
tb_a_2 = traceback_lineno('a.py', 2)
tb_a_5 = traceback_lineno('a.py', 5)
tb_b_1 = traceback_lineno('b.py', 1)
tb_c_578 = traceback_lineno('c.py', 578)
# stats per file and line
stats1 = snapshot.statistics('lineno')
self.assertEqual(stats1, [
tracemalloc.Statistic(tb_b_1, 66, 1),
tracemalloc.Statistic(tb_a_2, 30, 3),
tracemalloc.Statistic(tb_0, 7, 1),
tracemalloc.Statistic(tb_a_5, 2, 1),
])
# stats per file and line (2)
stats2 = snapshot2.statistics('lineno')
self.assertEqual(stats2, [
tracemalloc.Statistic(tb_a_5, 5002, 2),
tracemalloc.Statistic(tb_c_578, 400, 1),
tracemalloc.Statistic(tb_a_2, 30, 3),
])
# stats diff per file and line
statistics = snapshot2.compare_to(snapshot, 'lineno')
self.assertEqual(statistics, [
tracemalloc.StatisticDiff(tb_a_5, 5002, 5000, 2, 1),
tracemalloc.StatisticDiff(tb_c_578, 400, 400, 1, 1),
tracemalloc.StatisticDiff(tb_b_1, 0, -66, 0, -1),
tracemalloc.StatisticDiff(tb_0, 0, -7, 0, -1),
tracemalloc.StatisticDiff(tb_a_2, 30, 0, 3, 0),
])
def test_snapshot_group_by_file(self):
snapshot, snapshot2 = create_snapshots()
tb_0 = traceback_filename('<unknown>')
tb_a = traceback_filename('a.py')
tb_b = traceback_filename('b.py')
tb_c = traceback_filename('c.py')
# stats per file
stats1 = snapshot.statistics('filename')
self.assertEqual(stats1, [
tracemalloc.Statistic(tb_b, 66, 1),
tracemalloc.Statistic(tb_a, 32, 4),
tracemalloc.Statistic(tb_0, 7, 1),
])
# stats per file (2)
stats2 = snapshot2.statistics('filename')
self.assertEqual(stats2, [
tracemalloc.Statistic(tb_a, 5032, 5),
tracemalloc.Statistic(tb_c, 400, 1),
])
# stats diff per file
diff = snapshot2.compare_to(snapshot, 'filename')
self.assertEqual(diff, [
tracemalloc.StatisticDiff(tb_a, 5032, 5000, 5, 1),
tracemalloc.StatisticDiff(tb_c, 400, 400, 1, 1),
tracemalloc.StatisticDiff(tb_b, 0, -66, 0, -1),
tracemalloc.StatisticDiff(tb_0, 0, -7, 0, -1),
])
def test_snapshot_group_by_traceback(self):
snapshot, snapshot2 = create_snapshots()
# stats per file
tb1 = traceback(('a.py', 2), ('b.py', 4))
tb2 = traceback(('a.py', 5), ('b.py', 4))
tb3 = traceback(('b.py', 1))
tb4 = traceback(('<unknown>', 0))
stats1 = snapshot.statistics('traceback')
self.assertEqual(stats1, [
tracemalloc.Statistic(tb3, 66, 1),
tracemalloc.Statistic(tb1, 30, 3),
tracemalloc.Statistic(tb4, 7, 1),
tracemalloc.Statistic(tb2, 2, 1),
])
# stats per file (2)
tb5 = traceback(('c.py', 578))
stats2 = snapshot2.statistics('traceback')
self.assertEqual(stats2, [
tracemalloc.Statistic(tb2, 5002, 2),
tracemalloc.Statistic(tb5, 400, 1),
tracemalloc.Statistic(tb1, 30, 3),
])
# stats diff per file
diff = snapshot2.compare_to(snapshot, 'traceback')
self.assertEqual(diff, [
tracemalloc.StatisticDiff(tb2, 5002, 5000, 2, 1),
tracemalloc.StatisticDiff(tb5, 400, 400, 1, 1),
tracemalloc.StatisticDiff(tb3, 0, -66, 0, -1),
tracemalloc.StatisticDiff(tb4, 0, -7, 0, -1),
tracemalloc.StatisticDiff(tb1, 30, 0, 3, 0),
])
self.assertRaises(ValueError,
snapshot.statistics, 'traceback', cumulative=True)
def test_snapshot_group_by_cumulative(self):
snapshot, snapshot2 = create_snapshots()
tb_0 = traceback_filename('<unknown>')
tb_a = traceback_filename('a.py')
tb_b = traceback_filename('b.py')
tb_a_2 = traceback_lineno('a.py', 2)
tb_a_5 = traceback_lineno('a.py', 5)
tb_b_1 = traceback_lineno('b.py', 1)
tb_b_4 = traceback_lineno('b.py', 4)
# per file
stats = snapshot.statistics('filename', True)
self.assertEqual(stats, [
tracemalloc.Statistic(tb_b, 98, 5),
tracemalloc.Statistic(tb_a, 32, 4),
tracemalloc.Statistic(tb_0, 7, 1),
])
# per line
stats = snapshot.statistics('lineno', True)
self.assertEqual(stats, [
tracemalloc.Statistic(tb_b_1, 66, 1),
tracemalloc.Statistic(tb_b_4, 32, 4),
tracemalloc.Statistic(tb_a_2, 30, 3),
tracemalloc.Statistic(tb_0, 7, 1),
tracemalloc.Statistic(tb_a_5, 2, 1),
])
def test_trace_format(self):
snapshot, snapshot2 = create_snapshots()
trace = snapshot.traces[0]
self.assertEqual(str(trace), 'a.py:2: 10 B')
traceback = trace.traceback
self.assertEqual(str(traceback), 'a.py:2')
frame = traceback[0]
self.assertEqual(str(frame), 'a.py:2')
def test_statistic_format(self):
snapshot, snapshot2 = create_snapshots()
stats = snapshot.statistics('lineno')
stat = stats[0]
self.assertEqual(str(stat),
'b.py:1: size=66 B, count=1, average=66 B')
def test_statistic_diff_format(self):
snapshot, snapshot2 = create_snapshots()
stats = snapshot2.compare_to(snapshot, 'lineno')
stat = stats[0]
self.assertEqual(str(stat),
'a.py:5: size=5002 B (+5000 B), count=2 (+1), average=2501 B')
def test_slices(self):
snapshot, snapshot2 = create_snapshots()
self.assertEqual(snapshot.traces[:2],
(snapshot.traces[0], snapshot.traces[1]))
traceback = snapshot.traces[0].traceback
self.assertEqual(traceback[:2],
(traceback[0], traceback[1]))
def test_format_traceback(self):
snapshot, snapshot2 = create_snapshots()
def getline(filename, lineno):
return ' <%s, %s>' % (filename, lineno)
with unittest.mock.patch('tracemalloc.linecache.getline',
side_effect=getline):
tb = snapshot.traces[0].traceback
self.assertEqual(tb.format(),
[' File "a.py", line 2',
' <a.py, 2>',
' File "b.py", line 4',
' <b.py, 4>'])
self.assertEqual(tb.format(limit=1),
[' File "a.py", line 2',
' <a.py, 2>'])
self.assertEqual(tb.format(limit=-1),
[])
class TestFilters(unittest.TestCase):
maxDiff = 2048
def test_filter_attributes(self):
# test default values
f = tracemalloc.Filter(True, "abc")
self.assertEqual(f.inclusive, True)
self.assertEqual(f.filename_pattern, "abc")
self.assertIsNone(f.lineno)
self.assertEqual(f.all_frames, False)
# test custom values
f = tracemalloc.Filter(False, "test.py", 123, True)
self.assertEqual(f.inclusive, False)
self.assertEqual(f.filename_pattern, "test.py")
self.assertEqual(f.lineno, 123)
self.assertEqual(f.all_frames, True)
# parameters passed by keyword
f = tracemalloc.Filter(inclusive=False, filename_pattern="test.py", lineno=123, all_frames=True)
self.assertEqual(f.inclusive, False)
self.assertEqual(f.filename_pattern, "test.py")
self.assertEqual(f.lineno, 123)
self.assertEqual(f.all_frames, True)
# read-only attribute
self.assertRaises(AttributeError, setattr, f, "filename_pattern", "abc")
def test_filter_match(self):
# filter without line number
f = tracemalloc.Filter(True, "abc")
self.assertTrue(f._match_frame("abc", 0))
self.assertTrue(f._match_frame("abc", 5))
self.assertTrue(f._match_frame("abc", 10))
self.assertFalse(f._match_frame("12356", 0))
self.assertFalse(f._match_frame("12356", 5))
self.assertFalse(f._match_frame("12356", 10))
f = tracemalloc.Filter(False, "abc")
self.assertFalse(f._match_frame("abc", 0))
self.assertFalse(f._match_frame("abc", 5))
self.assertFalse(f._match_frame("abc", 10))
self.assertTrue(f._match_frame("12356", 0))
self.assertTrue(f._match_frame("12356", 5))
self.assertTrue(f._match_frame("12356", 10))
# filter with line number > 0
f = tracemalloc.Filter(True, "abc", 5)
self.assertFalse(f._match_frame("abc", 0))
self.assertTrue(f._match_frame("abc", 5))
self.assertFalse(f._match_frame("abc", 10))
self.assertFalse(f._match_frame("12356", 0))
self.assertFalse(f._match_frame("12356", 5))
self.assertFalse(f._match_frame("12356", 10))
f = tracemalloc.Filter(False, "abc", 5)
self.assertTrue(f._match_frame("abc", 0))
self.assertFalse(f._match_frame("abc", 5))
self.assertTrue(f._match_frame("abc", 10))
self.assertTrue(f._match_frame("12356", 0))
self.assertTrue(f._match_frame("12356", 5))
self.assertTrue(f._match_frame("12356", 10))
# filter with line number 0
f = tracemalloc.Filter(True, "abc", 0)
self.assertTrue(f._match_frame("abc", 0))
self.assertFalse(f._match_frame("abc", 5))
self.assertFalse(f._match_frame("abc", 10))
self.assertFalse(f._match_frame("12356", 0))
self.assertFalse(f._match_frame("12356", 5))
self.assertFalse(f._match_frame("12356", 10))
f = tracemalloc.Filter(False, "abc", 0)
self.assertFalse(f._match_frame("abc", 0))
self.assertTrue(f._match_frame("abc", 5))
self.assertTrue(f._match_frame("abc", 10))
self.assertTrue(f._match_frame("12356", 0))
self.assertTrue(f._match_frame("12356", 5))
self.assertTrue(f._match_frame("12356", 10))
def test_filter_match_filename(self):
def fnmatch(inclusive, filename, pattern):
f = tracemalloc.Filter(inclusive, pattern)
return f._match_frame(filename, 0)
self.assertTrue(fnmatch(True, "abc", "abc"))
self.assertFalse(fnmatch(True, "12356", "abc"))
self.assertFalse(fnmatch(True, "<unknown>", "abc"))
self.assertFalse(fnmatch(False, "abc", "abc"))
self.assertTrue(fnmatch(False, "12356", "abc"))
self.assertTrue(fnmatch(False, "<unknown>", "abc"))
def test_filter_match_filename_joker(self):
def fnmatch(filename, pattern):
filter = tracemalloc.Filter(True, pattern)
return filter._match_frame(filename, 0)
# empty string
self.assertFalse(fnmatch('abc', ''))
self.assertFalse(fnmatch('', 'abc'))
self.assertTrue(fnmatch('', ''))
self.assertTrue(fnmatch('', '*'))
# no *
self.assertTrue(fnmatch('abc', 'abc'))
self.assertFalse(fnmatch('abc', 'abcd'))
self.assertFalse(fnmatch('abc', 'def'))
# a*
self.assertTrue(fnmatch('abc', 'a*'))
self.assertTrue(fnmatch('abc', 'abc*'))
self.assertFalse(fnmatch('abc', 'b*'))
self.assertFalse(fnmatch('abc', 'abcd*'))
# a*b
self.assertTrue(fnmatch('abc', 'a*c'))
self.assertTrue(fnmatch('abcdcx', 'a*cx'))
self.assertFalse(fnmatch('abb', 'a*c'))
self.assertFalse(fnmatch('abcdce', 'a*cx'))
# a*b*c
self.assertTrue(fnmatch('abcde', 'a*c*e'))
self.assertTrue(fnmatch('abcbdefeg', 'a*bd*eg'))
self.assertFalse(fnmatch('abcdd', 'a*c*e'))
self.assertFalse(fnmatch('abcbdefef', 'a*bd*eg'))
# replace .pyc suffix with .py
self.assertTrue(fnmatch('a.pyc', 'a.py'))
self.assertTrue(fnmatch('a.py', 'a.pyc'))
if os.name == 'nt':
# case insensitive
self.assertTrue(fnmatch('aBC', 'ABc'))
self.assertTrue(fnmatch('aBcDe', 'Ab*dE'))
self.assertTrue(fnmatch('a.pyc', 'a.PY'))
self.assertTrue(fnmatch('a.py', 'a.PYC'))
else:
# case sensitive
self.assertFalse(fnmatch('aBC', 'ABc'))
self.assertFalse(fnmatch('aBcDe', 'Ab*dE'))
self.assertFalse(fnmatch('a.pyc', 'a.PY'))
self.assertFalse(fnmatch('a.py', 'a.PYC'))
if os.name == 'nt':
# normalize alternate separator "/" to the standard separator "\"
self.assertTrue(fnmatch(r'a/b', r'a\b'))
self.assertTrue(fnmatch(r'a\b', r'a/b'))
self.assertTrue(fnmatch(r'a/b\c', r'a\b/c'))
self.assertTrue(fnmatch(r'a/b/c', r'a\b\c'))
else:
# there is no alternate separator
self.assertFalse(fnmatch(r'a/b', r'a\b'))
self.assertFalse(fnmatch(r'a\b', r'a/b'))
self.assertFalse(fnmatch(r'a/b\c', r'a\b/c'))
self.assertFalse(fnmatch(r'a/b/c', r'a\b\c'))
# as of 3.5, .pyo is no longer munged to .py
self.assertFalse(fnmatch('a.pyo', 'a.py'))
def test_filter_match_trace(self):
t1 = (("a.py", 2), ("b.py", 3))
t2 = (("b.py", 4), ("b.py", 5))
t3 = (("c.py", 5), ('<unknown>', 0))
unknown = (('<unknown>', 0),)
f = tracemalloc.Filter(True, "b.py", all_frames=True)
self.assertTrue(f._match_traceback(t1))
self.assertTrue(f._match_traceback(t2))
self.assertFalse(f._match_traceback(t3))
self.assertFalse(f._match_traceback(unknown))
f = tracemalloc.Filter(True, "b.py", all_frames=False)
self.assertFalse(f._match_traceback(t1))
self.assertTrue(f._match_traceback(t2))
self.assertFalse(f._match_traceback(t3))
self.assertFalse(f._match_traceback(unknown))
f = tracemalloc.Filter(False, "b.py", all_frames=True)
self.assertFalse(f._match_traceback(t1))
self.assertFalse(f._match_traceback(t2))
self.assertTrue(f._match_traceback(t3))
self.assertTrue(f._match_traceback(unknown))
f = tracemalloc.Filter(False, "b.py", all_frames=False)
self.assertTrue(f._match_traceback(t1))
self.assertFalse(f._match_traceback(t2))
self.assertTrue(f._match_traceback(t3))
self.assertTrue(f._match_traceback(unknown))
f = tracemalloc.Filter(False, "<unknown>", all_frames=False)
self.assertTrue(f._match_traceback(t1))
self.assertTrue(f._match_traceback(t2))
self.assertTrue(f._match_traceback(t3))
self.assertFalse(f._match_traceback(unknown))
f = tracemalloc.Filter(True, "<unknown>", all_frames=True)
self.assertFalse(f._match_traceback(t1))
self.assertFalse(f._match_traceback(t2))
self.assertTrue(f._match_traceback(t3))
self.assertTrue(f._match_traceback(unknown))
f = tracemalloc.Filter(False, "<unknown>", all_frames=True)
self.assertTrue(f._match_traceback(t1))
self.assertTrue(f._match_traceback(t2))
self.assertFalse(f._match_traceback(t3))
self.assertFalse(f._match_traceback(unknown))
class TestCommandLine(unittest.TestCase):
def test_env_var_disabled_by_default(self):
# not tracing by default
code = 'import tracemalloc; print(tracemalloc.is_tracing())'
ok, stdout, stderr = assert_python_ok('-c', code)
stdout = stdout.rstrip()
self.assertEqual(stdout, b'False')
@unittest.skipIf(interpreter_requires_environment(),
'Cannot run -E tests when PYTHON env vars are required.')
def test_env_var_ignored_with_E(self):
"""PYTHON* environment variables must be ignored when -E is present."""
code = 'import tracemalloc; print(tracemalloc.is_tracing())'
ok, stdout, stderr = assert_python_ok('-E', '-c', code, PYTHONTRACEMALLOC='1')
stdout = stdout.rstrip()
self.assertEqual(stdout, b'False')
def test_env_var_enabled_at_startup(self):
# tracing at startup
code = 'import tracemalloc; print(tracemalloc.is_tracing())'
ok, stdout, stderr = assert_python_ok('-c', code, PYTHONTRACEMALLOC='1')
stdout = stdout.rstrip()
self.assertEqual(stdout, b'True')
def test_env_limit(self):
# start and set the number of frames
code = 'import tracemalloc; print(tracemalloc.get_traceback_limit())'
ok, stdout, stderr = assert_python_ok('-c', code, PYTHONTRACEMALLOC='10')
stdout = stdout.rstrip()
self.assertEqual(stdout, b'10')
def test_env_var_invalid(self):
for nframe in (-1, 0, 2**30):
with self.subTest(nframe=nframe):
with support.SuppressCrashReport():
ok, stdout, stderr = assert_python_failure(
'-c', 'pass',
PYTHONTRACEMALLOC=str(nframe))
self.assertIn(b'PYTHONTRACEMALLOC: invalid '
b'number of frames',
stderr)
def test_sys_xoptions(self):
for xoptions, nframe in (
('tracemalloc', 1),
('tracemalloc=1', 1),
('tracemalloc=15', 15),
):
with self.subTest(xoptions=xoptions, nframe=nframe):
code = 'import tracemalloc; print(tracemalloc.get_traceback_limit())'
ok, stdout, stderr = assert_python_ok('-X', xoptions, '-c', code)
stdout = stdout.rstrip()
self.assertEqual(stdout, str(nframe).encode('ascii'))
def test_sys_xoptions_invalid(self):
for nframe in (-1, 0, 2**30):
with self.subTest(nframe=nframe):
with support.SuppressCrashReport():
args = ('-X', 'tracemalloc=%s' % nframe, '-c', 'pass')
ok, stdout, stderr = assert_python_failure(*args)
self.assertIn(b'-X tracemalloc=NFRAME: invalid '
b'number of frames',
stderr)
def test_pymem_alloc0(self):
# Issue #21639: Check that PyMem_Malloc(0) with tracemalloc enabled
# does not crash.
code = 'import _testcapi; _testcapi.test_pymem_alloc0(); 1'
assert_python_ok('-X', 'tracemalloc', '-c', code)
@unittest.skipIf(_testcapi is None, 'need _testcapi')
class TestCAPI(unittest.TestCase):
maxDiff = 80 * 20
def setUp(self):
if tracemalloc.is_tracing():
self.skipTest("tracemalloc must be stopped before the test")
self.domain = 5
self.size = 123
self.obj = allocate_bytes(self.size)[0]
# for the type "object", id(obj) is the address of its memory block.
# This type is not tracked by the garbage collector
self.ptr = id(self.obj)
def tearDown(self):
tracemalloc.stop()
def get_traceback(self):
frames = _testcapi.tracemalloc_get_traceback(self.domain, self.ptr)
if frames is not None:
return tracemalloc.Traceback(frames)
else:
return None
def track(self, release_gil=False, nframe=1):
frames = get_frames(nframe, 2)
_testcapi.tracemalloc_track(self.domain, self.ptr, self.size,
release_gil)
return frames
def untrack(self):
_testcapi.tracemalloc_untrack(self.domain, self.ptr)
def get_traced_memory(self):
# Get the traced size in the domain
snapshot = tracemalloc.take_snapshot()
domain_filter = tracemalloc.DomainFilter(True, self.domain)
snapshot = snapshot.filter_traces([domain_filter])
return sum(trace.size for trace in snapshot.traces)
def check_track(self, release_gil):
nframe = 5
tracemalloc.start(nframe)
size = tracemalloc.get_traced_memory()[0]
frames = self.track(release_gil, nframe)
self.assertEqual(self.get_traceback(),
tracemalloc.Traceback(frames))
self.assertEqual(self.get_traced_memory(), self.size)
def test_track(self):
self.check_track(False)
def test_track_without_gil(self):
# check that calling _PyTraceMalloc_Track() without holding the GIL
# works too
self.check_track(True)
def test_track_already_tracked(self):
nframe = 5
tracemalloc.start(nframe)
# track a first time
self.track()
# calling _PyTraceMalloc_Track() must remove the old trace and add
# a new trace with the new traceback
frames = self.track(nframe=nframe)
self.assertEqual(self.get_traceback(),
tracemalloc.Traceback(frames))
def test_untrack(self):
tracemalloc.start()
self.track()
self.assertIsNotNone(self.get_traceback())
self.assertEqual(self.get_traced_memory(), self.size)
# untrack must remove the trace
self.untrack()
self.assertIsNone(self.get_traceback())
self.assertEqual(self.get_traced_memory(), 0)
# calling _PyTraceMalloc_Untrack() multiple times must not crash
self.untrack()
self.untrack()
def test_stop_track(self):
tracemalloc.start()
tracemalloc.stop()
with self.assertRaises(RuntimeError):
self.track()
self.assertIsNone(self.get_traceback())
def test_stop_untrack(self):
tracemalloc.start()
self.track()
tracemalloc.stop()
with self.assertRaises(RuntimeError):
self.untrack()
def test_main():
support.run_unittest(
TestTracemallocEnabled,
TestSnapshot,
TestFilters,
TestCommandLine,
TestCAPI,
)
if __name__ == "__main__":
test_main()
| 36.408451
| 104
| 0.593645
|
import contextlib
import os
import sys
import tracemalloc
import unittest
from unittest.mock import patch
from test.support.script_helper import (assert_python_ok, assert_python_failure,
interpreter_requires_environment)
from test import support
try:
import threading
except ImportError:
threading = None
try:
import _testcapi
except ImportError:
_testcapi = None
EMPTY_STRING_SIZE = sys.getsizeof(b'')
def get_frames(nframe, lineno_delta):
frames = []
frame = sys._getframe(1)
for index in range(nframe):
code = frame.f_code
lineno = frame.f_lineno + lineno_delta
frames.append((code.co_filename, lineno))
lineno_delta = 0
frame = frame.f_back
if frame is None:
break
return tuple(frames)
def allocate_bytes(size):
nframe = tracemalloc.get_traceback_limit()
bytes_len = (size - EMPTY_STRING_SIZE)
frames = get_frames(nframe, 1)
data = b'x' * bytes_len
return data, tracemalloc.Traceback(frames)
def create_snapshots():
traceback_limit = 2
raw_traces = [
(0, 10, (('a.py', 2), ('b.py', 4))),
(0, 10, (('a.py', 2), ('b.py', 4))),
(0, 10, (('a.py', 2), ('b.py', 4))),
(1, 2, (('a.py', 5), ('b.py', 4))),
(2, 66, (('b.py', 1),)),
(3, 7, (('<unknown>', 0),)),
]
snapshot = tracemalloc.Snapshot(raw_traces, traceback_limit)
raw_traces2 = [
(0, 10, (('a.py', 2), ('b.py', 4))),
(0, 10, (('a.py', 2), ('b.py', 4))),
(0, 10, (('a.py', 2), ('b.py', 4))),
(2, 2, (('a.py', 5), ('b.py', 4))),
(2, 5000, (('a.py', 5), ('b.py', 4))),
(4, 400, (('c.py', 578),)),
]
snapshot2 = tracemalloc.Snapshot(raw_traces2, traceback_limit)
return (snapshot, snapshot2)
def frame(filename, lineno):
return tracemalloc._Frame((filename, lineno))
def traceback(*frames):
return tracemalloc.Traceback(frames)
def traceback_lineno(filename, lineno):
return traceback((filename, lineno))
def traceback_filename(filename):
return traceback_lineno(filename, 0)
class TestTracemallocEnabled(unittest.TestCase):
def setUp(self):
if tracemalloc.is_tracing():
self.skipTest("tracemalloc must be stopped before the test")
tracemalloc.start(1)
def tearDown(self):
tracemalloc.stop()
def test_get_tracemalloc_memory(self):
data = [allocate_bytes(123) for count in range(1000)]
size = tracemalloc.get_tracemalloc_memory()
self.assertGreaterEqual(size, 0)
tracemalloc.clear_traces()
size2 = tracemalloc.get_tracemalloc_memory()
self.assertGreaterEqual(size2, 0)
self.assertLessEqual(size2, size)
def test_get_object_traceback(self):
tracemalloc.clear_traces()
obj_size = 12345
obj, obj_traceback = allocate_bytes(obj_size)
traceback = tracemalloc.get_object_traceback(obj)
self.assertEqual(traceback, obj_traceback)
def test_set_traceback_limit(self):
obj_size = 10
tracemalloc.stop()
self.assertRaises(ValueError, tracemalloc.start, -1)
tracemalloc.stop()
tracemalloc.start(10)
obj2, obj2_traceback = allocate_bytes(obj_size)
traceback = tracemalloc.get_object_traceback(obj2)
self.assertEqual(len(traceback), 10)
self.assertEqual(traceback, obj2_traceback)
tracemalloc.stop()
tracemalloc.start(1)
obj, obj_traceback = allocate_bytes(obj_size)
traceback = tracemalloc.get_object_traceback(obj)
self.assertEqual(len(traceback), 1)
self.assertEqual(traceback, obj_traceback)
def find_trace(self, traces, traceback):
for trace in traces:
if trace[2] == traceback._frames:
return trace
self.fail("trace not found")
def test_get_traces(self):
tracemalloc.clear_traces()
obj_size = 12345
obj, obj_traceback = allocate_bytes(obj_size)
traces = tracemalloc._get_traces()
trace = self.find_trace(traces, obj_traceback)
self.assertIsInstance(trace, tuple)
domain, size, traceback = trace
self.assertEqual(size, obj_size)
self.assertEqual(traceback, obj_traceback._frames)
tracemalloc.stop()
self.assertEqual(tracemalloc._get_traces(), [])
def test_get_traces_intern_traceback(self):
def allocate_bytes2(size):
return allocate_bytes(size)
def allocate_bytes3(size):
return allocate_bytes2(size)
def allocate_bytes4(size):
return allocate_bytes3(size)
tracemalloc.stop()
tracemalloc.start(4)
obj_size = 123
obj1, obj1_traceback = allocate_bytes4(obj_size)
obj2, obj2_traceback = allocate_bytes4(obj_size)
traces = tracemalloc._get_traces()
trace1 = self.find_trace(traces, obj1_traceback)
trace2 = self.find_trace(traces, obj2_traceback)
domain1, size1, traceback1 = trace1
domain2, size2, traceback2 = trace2
self.assertIs(traceback2, traceback1)
def test_get_traced_memory(self):
max_error = 2048
obj_size = 1024 * 1024
tracemalloc.clear_traces()
obj, obj_traceback = allocate_bytes(obj_size)
size, peak_size = tracemalloc.get_traced_memory()
self.assertGreaterEqual(size, obj_size)
self.assertGreaterEqual(peak_size, size)
self.assertLessEqual(size - obj_size, max_error)
self.assertLessEqual(peak_size - size, max_error)
obj = None
size2, peak_size2 = tracemalloc.get_traced_memory()
self.assertLess(size2, size)
self.assertGreaterEqual(size - size2, obj_size - max_error)
self.assertGreaterEqual(peak_size2, peak_size)
tracemalloc.clear_traces()
self.assertEqual(tracemalloc.get_traced_memory(), (0, 0))
obj, obj_traceback = allocate_bytes(obj_size)
size, peak_size = tracemalloc.get_traced_memory()
self.assertGreaterEqual(size, obj_size)
tracemalloc.stop()
self.assertEqual(tracemalloc.get_traced_memory(), (0, 0))
def test_clear_traces(self):
obj, obj_traceback = allocate_bytes(123)
traceback = tracemalloc.get_object_traceback(obj)
self.assertIsNotNone(traceback)
tracemalloc.clear_traces()
traceback2 = tracemalloc.get_object_traceback(obj)
self.assertIsNone(traceback2)
def test_is_tracing(self):
tracemalloc.stop()
self.assertFalse(tracemalloc.is_tracing())
tracemalloc.start()
self.assertTrue(tracemalloc.is_tracing())
def test_snapshot(self):
obj, source = allocate_bytes(123)
snapshot = tracemalloc.take_snapshot()
snapshot.dump(support.TESTFN)
self.addCleanup(support.unlink, support.TESTFN)
snapshot2 = tracemalloc.Snapshot.load(support.TESTFN)
self.assertEqual(snapshot2.traces, snapshot.traces)
tracemalloc.stop()
with self.assertRaises(RuntimeError) as cm:
tracemalloc.take_snapshot()
self.assertEqual(str(cm.exception),
"the tracemalloc module must be tracing memory "
"allocations to take a snapshot")
def test_snapshot_save_attr(self):
snapshot = tracemalloc.take_snapshot()
snapshot.test_attr = "new"
snapshot.dump(support.TESTFN)
self.addCleanup(support.unlink, support.TESTFN)
snapshot2 = tracemalloc.Snapshot.load(support.TESTFN)
self.assertEqual(snapshot2.test_attr, "new")
def fork_child(self):
if not tracemalloc.is_tracing():
return 2
obj_size = 12345
obj, obj_traceback = allocate_bytes(obj_size)
traceback = tracemalloc.get_object_traceback(obj)
if traceback is None:
return 3
return 0
@unittest.skipUnless(hasattr(os, 'fork'), 'need os.fork()')
def test_fork(self):
pid = os.fork()
if not pid:
exitcode = 1
try:
exitcode = self.fork_child()
finally:
os._exit(exitcode)
else:
pid2, status = os.waitpid(pid, 0)
self.assertTrue(os.WIFEXITED(status))
exitcode = os.WEXITSTATUS(status)
self.assertEqual(exitcode, 0)
class TestSnapshot(unittest.TestCase):
maxDiff = 4000
def test_create_snapshot(self):
raw_traces = [(0, 5, (('a.py', 2),))]
with contextlib.ExitStack() as stack:
stack.enter_context(patch.object(tracemalloc, 'is_tracing',
return_value=True))
stack.enter_context(patch.object(tracemalloc, 'get_traceback_limit',
return_value=5))
stack.enter_context(patch.object(tracemalloc, '_get_traces',
return_value=raw_traces))
snapshot = tracemalloc.take_snapshot()
self.assertEqual(snapshot.traceback_limit, 5)
self.assertEqual(len(snapshot.traces), 1)
trace = snapshot.traces[0]
self.assertEqual(trace.size, 5)
self.assertEqual(len(trace.traceback), 1)
self.assertEqual(trace.traceback[0].filename, 'a.py')
self.assertEqual(trace.traceback[0].lineno, 2)
def test_filter_traces(self):
snapshot, snapshot2 = create_snapshots()
filter1 = tracemalloc.Filter(False, "b.py")
filter2 = tracemalloc.Filter(True, "a.py", 2)
filter3 = tracemalloc.Filter(True, "a.py", 5)
original_traces = list(snapshot.traces._traces)
snapshot3 = snapshot.filter_traces((filter1,))
self.assertEqual(snapshot3.traces._traces, [
(0, 10, (('a.py', 2), ('b.py', 4))),
(0, 10, (('a.py', 2), ('b.py', 4))),
(0, 10, (('a.py', 2), ('b.py', 4))),
(1, 2, (('a.py', 5), ('b.py', 4))),
(3, 7, (('<unknown>', 0),)),
])
self.assertEqual(snapshot.traces._traces, original_traces)
snapshot4 = snapshot3.filter_traces((filter2, filter3))
self.assertEqual(snapshot4.traces._traces, [
(0, 10, (('a.py', 2), ('b.py', 4))),
(0, 10, (('a.py', 2), ('b.py', 4))),
(0, 10, (('a.py', 2), ('b.py', 4))),
(1, 2, (('a.py', 5), ('b.py', 4))),
])
snapshot5 = snapshot.filter_traces(())
self.assertIsNot(snapshot5, snapshot)
self.assertIsNot(snapshot5.traces, snapshot.traces)
self.assertEqual(snapshot5.traces, snapshot.traces)
self.assertRaises(TypeError, snapshot.filter_traces, filter1)
def test_filter_traces_domain(self):
snapshot, snapshot2 = create_snapshots()
filter1 = tracemalloc.Filter(False, "a.py", domain=1)
filter2 = tracemalloc.Filter(True, "a.py", domain=1)
original_traces = list(snapshot.traces._traces)
snapshot3 = snapshot.filter_traces((filter1,))
self.assertEqual(snapshot3.traces._traces, [
(0, 10, (('a.py', 2), ('b.py', 4))),
(0, 10, (('a.py', 2), ('b.py', 4))),
(0, 10, (('a.py', 2), ('b.py', 4))),
(2, 66, (('b.py', 1),)),
(3, 7, (('<unknown>', 0),)),
])
snapshot3 = snapshot.filter_traces((filter1,))
self.assertEqual(snapshot3.traces._traces, [
(0, 10, (('a.py', 2), ('b.py', 4))),
(0, 10, (('a.py', 2), ('b.py', 4))),
(0, 10, (('a.py', 2), ('b.py', 4))),
(2, 66, (('b.py', 1),)),
(3, 7, (('<unknown>', 0),)),
])
def test_filter_traces_domain_filter(self):
snapshot, snapshot2 = create_snapshots()
filter1 = tracemalloc.DomainFilter(False, domain=3)
filter2 = tracemalloc.DomainFilter(True, domain=3)
snapshot3 = snapshot.filter_traces((filter1,))
self.assertEqual(snapshot3.traces._traces, [
(0, 10, (('a.py', 2), ('b.py', 4))),
(0, 10, (('a.py', 2), ('b.py', 4))),
(0, 10, (('a.py', 2), ('b.py', 4))),
(1, 2, (('a.py', 5), ('b.py', 4))),
(2, 66, (('b.py', 1),)),
])
snapshot3 = snapshot.filter_traces((filter2,))
self.assertEqual(snapshot3.traces._traces, [
(3, 7, (('<unknown>', 0),)),
])
def test_snapshot_group_by_line(self):
snapshot, snapshot2 = create_snapshots()
tb_0 = traceback_lineno('<unknown>', 0)
tb_a_2 = traceback_lineno('a.py', 2)
tb_a_5 = traceback_lineno('a.py', 5)
tb_b_1 = traceback_lineno('b.py', 1)
tb_c_578 = traceback_lineno('c.py', 578)
stats1 = snapshot.statistics('lineno')
self.assertEqual(stats1, [
tracemalloc.Statistic(tb_b_1, 66, 1),
tracemalloc.Statistic(tb_a_2, 30, 3),
tracemalloc.Statistic(tb_0, 7, 1),
tracemalloc.Statistic(tb_a_5, 2, 1),
])
stats2 = snapshot2.statistics('lineno')
self.assertEqual(stats2, [
tracemalloc.Statistic(tb_a_5, 5002, 2),
tracemalloc.Statistic(tb_c_578, 400, 1),
tracemalloc.Statistic(tb_a_2, 30, 3),
])
statistics = snapshot2.compare_to(snapshot, 'lineno')
self.assertEqual(statistics, [
tracemalloc.StatisticDiff(tb_a_5, 5002, 5000, 2, 1),
tracemalloc.StatisticDiff(tb_c_578, 400, 400, 1, 1),
tracemalloc.StatisticDiff(tb_b_1, 0, -66, 0, -1),
tracemalloc.StatisticDiff(tb_0, 0, -7, 0, -1),
tracemalloc.StatisticDiff(tb_a_2, 30, 0, 3, 0),
])
def test_snapshot_group_by_file(self):
snapshot, snapshot2 = create_snapshots()
tb_0 = traceback_filename('<unknown>')
tb_a = traceback_filename('a.py')
tb_b = traceback_filename('b.py')
tb_c = traceback_filename('c.py')
stats1 = snapshot.statistics('filename')
self.assertEqual(stats1, [
tracemalloc.Statistic(tb_b, 66, 1),
tracemalloc.Statistic(tb_a, 32, 4),
tracemalloc.Statistic(tb_0, 7, 1),
])
stats2 = snapshot2.statistics('filename')
self.assertEqual(stats2, [
tracemalloc.Statistic(tb_a, 5032, 5),
tracemalloc.Statistic(tb_c, 400, 1),
])
diff = snapshot2.compare_to(snapshot, 'filename')
self.assertEqual(diff, [
tracemalloc.StatisticDiff(tb_a, 5032, 5000, 5, 1),
tracemalloc.StatisticDiff(tb_c, 400, 400, 1, 1),
tracemalloc.StatisticDiff(tb_b, 0, -66, 0, -1),
tracemalloc.StatisticDiff(tb_0, 0, -7, 0, -1),
])
def test_snapshot_group_by_traceback(self):
snapshot, snapshot2 = create_snapshots()
tb1 = traceback(('a.py', 2), ('b.py', 4))
tb2 = traceback(('a.py', 5), ('b.py', 4))
tb3 = traceback(('b.py', 1))
tb4 = traceback(('<unknown>', 0))
stats1 = snapshot.statistics('traceback')
self.assertEqual(stats1, [
tracemalloc.Statistic(tb3, 66, 1),
tracemalloc.Statistic(tb1, 30, 3),
tracemalloc.Statistic(tb4, 7, 1),
tracemalloc.Statistic(tb2, 2, 1),
])
tb5 = traceback(('c.py', 578))
stats2 = snapshot2.statistics('traceback')
self.assertEqual(stats2, [
tracemalloc.Statistic(tb2, 5002, 2),
tracemalloc.Statistic(tb5, 400, 1),
tracemalloc.Statistic(tb1, 30, 3),
])
diff = snapshot2.compare_to(snapshot, 'traceback')
self.assertEqual(diff, [
tracemalloc.StatisticDiff(tb2, 5002, 5000, 2, 1),
tracemalloc.StatisticDiff(tb5, 400, 400, 1, 1),
tracemalloc.StatisticDiff(tb3, 0, -66, 0, -1),
tracemalloc.StatisticDiff(tb4, 0, -7, 0, -1),
tracemalloc.StatisticDiff(tb1, 30, 0, 3, 0),
])
self.assertRaises(ValueError,
snapshot.statistics, 'traceback', cumulative=True)
def test_snapshot_group_by_cumulative(self):
snapshot, snapshot2 = create_snapshots()
tb_0 = traceback_filename('<unknown>')
tb_a = traceback_filename('a.py')
tb_b = traceback_filename('b.py')
tb_a_2 = traceback_lineno('a.py', 2)
tb_a_5 = traceback_lineno('a.py', 5)
tb_b_1 = traceback_lineno('b.py', 1)
tb_b_4 = traceback_lineno('b.py', 4)
stats = snapshot.statistics('filename', True)
self.assertEqual(stats, [
tracemalloc.Statistic(tb_b, 98, 5),
tracemalloc.Statistic(tb_a, 32, 4),
tracemalloc.Statistic(tb_0, 7, 1),
])
stats = snapshot.statistics('lineno', True)
self.assertEqual(stats, [
tracemalloc.Statistic(tb_b_1, 66, 1),
tracemalloc.Statistic(tb_b_4, 32, 4),
tracemalloc.Statistic(tb_a_2, 30, 3),
tracemalloc.Statistic(tb_0, 7, 1),
tracemalloc.Statistic(tb_a_5, 2, 1),
])
def test_trace_format(self):
snapshot, snapshot2 = create_snapshots()
trace = snapshot.traces[0]
self.assertEqual(str(trace), 'a.py:2: 10 B')
traceback = trace.traceback
self.assertEqual(str(traceback), 'a.py:2')
frame = traceback[0]
self.assertEqual(str(frame), 'a.py:2')
def test_statistic_format(self):
snapshot, snapshot2 = create_snapshots()
stats = snapshot.statistics('lineno')
stat = stats[0]
self.assertEqual(str(stat),
'b.py:1: size=66 B, count=1, average=66 B')
def test_statistic_diff_format(self):
snapshot, snapshot2 = create_snapshots()
stats = snapshot2.compare_to(snapshot, 'lineno')
stat = stats[0]
self.assertEqual(str(stat),
'a.py:5: size=5002 B (+5000 B), count=2 (+1), average=2501 B')
def test_slices(self):
snapshot, snapshot2 = create_snapshots()
self.assertEqual(snapshot.traces[:2],
(snapshot.traces[0], snapshot.traces[1]))
traceback = snapshot.traces[0].traceback
self.assertEqual(traceback[:2],
(traceback[0], traceback[1]))
def test_format_traceback(self):
snapshot, snapshot2 = create_snapshots()
def getline(filename, lineno):
return ' <%s, %s>' % (filename, lineno)
with unittest.mock.patch('tracemalloc.linecache.getline',
side_effect=getline):
tb = snapshot.traces[0].traceback
self.assertEqual(tb.format(),
[' File "a.py", line 2',
' <a.py, 2>',
' File "b.py", line 4',
' <b.py, 4>'])
self.assertEqual(tb.format(limit=1),
[' File "a.py", line 2',
' <a.py, 2>'])
self.assertEqual(tb.format(limit=-1),
[])
class TestFilters(unittest.TestCase):
maxDiff = 2048
def test_filter_attributes(self):
f = tracemalloc.Filter(True, "abc")
self.assertEqual(f.inclusive, True)
self.assertEqual(f.filename_pattern, "abc")
self.assertIsNone(f.lineno)
self.assertEqual(f.all_frames, False)
f = tracemalloc.Filter(False, "test.py", 123, True)
self.assertEqual(f.inclusive, False)
self.assertEqual(f.filename_pattern, "test.py")
self.assertEqual(f.lineno, 123)
self.assertEqual(f.all_frames, True)
f = tracemalloc.Filter(inclusive=False, filename_pattern="test.py", lineno=123, all_frames=True)
self.assertEqual(f.inclusive, False)
self.assertEqual(f.filename_pattern, "test.py")
self.assertEqual(f.lineno, 123)
self.assertEqual(f.all_frames, True)
self.assertRaises(AttributeError, setattr, f, "filename_pattern", "abc")
def test_filter_match(self):
f = tracemalloc.Filter(True, "abc")
self.assertTrue(f._match_frame("abc", 0))
self.assertTrue(f._match_frame("abc", 5))
self.assertTrue(f._match_frame("abc", 10))
self.assertFalse(f._match_frame("12356", 0))
self.assertFalse(f._match_frame("12356", 5))
self.assertFalse(f._match_frame("12356", 10))
f = tracemalloc.Filter(False, "abc")
self.assertFalse(f._match_frame("abc", 0))
self.assertFalse(f._match_frame("abc", 5))
self.assertFalse(f._match_frame("abc", 10))
self.assertTrue(f._match_frame("12356", 0))
self.assertTrue(f._match_frame("12356", 5))
self.assertTrue(f._match_frame("12356", 10))
f = tracemalloc.Filter(True, "abc", 5)
self.assertFalse(f._match_frame("abc", 0))
self.assertTrue(f._match_frame("abc", 5))
self.assertFalse(f._match_frame("abc", 10))
self.assertFalse(f._match_frame("12356", 0))
self.assertFalse(f._match_frame("12356", 5))
self.assertFalse(f._match_frame("12356", 10))
f = tracemalloc.Filter(False, "abc", 5)
self.assertTrue(f._match_frame("abc", 0))
self.assertFalse(f._match_frame("abc", 5))
self.assertTrue(f._match_frame("abc", 10))
self.assertTrue(f._match_frame("12356", 0))
self.assertTrue(f._match_frame("12356", 5))
self.assertTrue(f._match_frame("12356", 10))
f = tracemalloc.Filter(True, "abc", 0)
self.assertTrue(f._match_frame("abc", 0))
self.assertFalse(f._match_frame("abc", 5))
self.assertFalse(f._match_frame("abc", 10))
self.assertFalse(f._match_frame("12356", 0))
self.assertFalse(f._match_frame("12356", 5))
self.assertFalse(f._match_frame("12356", 10))
f = tracemalloc.Filter(False, "abc", 0)
self.assertFalse(f._match_frame("abc", 0))
self.assertTrue(f._match_frame("abc", 5))
self.assertTrue(f._match_frame("abc", 10))
self.assertTrue(f._match_frame("12356", 0))
self.assertTrue(f._match_frame("12356", 5))
self.assertTrue(f._match_frame("12356", 10))
def test_filter_match_filename(self):
def fnmatch(inclusive, filename, pattern):
f = tracemalloc.Filter(inclusive, pattern)
return f._match_frame(filename, 0)
self.assertTrue(fnmatch(True, "abc", "abc"))
self.assertFalse(fnmatch(True, "12356", "abc"))
self.assertFalse(fnmatch(True, "<unknown>", "abc"))
self.assertFalse(fnmatch(False, "abc", "abc"))
self.assertTrue(fnmatch(False, "12356", "abc"))
self.assertTrue(fnmatch(False, "<unknown>", "abc"))
def test_filter_match_filename_joker(self):
def fnmatch(filename, pattern):
filter = tracemalloc.Filter(True, pattern)
return filter._match_frame(filename, 0)
self.assertFalse(fnmatch('abc', ''))
self.assertFalse(fnmatch('', 'abc'))
self.assertTrue(fnmatch('', ''))
self.assertTrue(fnmatch('', '*'))
self.assertTrue(fnmatch('abc', 'abc'))
self.assertFalse(fnmatch('abc', 'abcd'))
self.assertFalse(fnmatch('abc', 'def'))
self.assertTrue(fnmatch('abc', 'a*'))
self.assertTrue(fnmatch('abc', 'abc*'))
self.assertFalse(fnmatch('abc', 'b*'))
self.assertFalse(fnmatch('abc', 'abcd*'))
self.assertTrue(fnmatch('abc', 'a*c'))
self.assertTrue(fnmatch('abcdcx', 'a*cx'))
self.assertFalse(fnmatch('abb', 'a*c'))
self.assertFalse(fnmatch('abcdce', 'a*cx'))
self.assertTrue(fnmatch('abcde', 'a*c*e'))
self.assertTrue(fnmatch('abcbdefeg', 'a*bd*eg'))
self.assertFalse(fnmatch('abcdd', 'a*c*e'))
self.assertFalse(fnmatch('abcbdefef', 'a*bd*eg'))
self.assertTrue(fnmatch('a.pyc', 'a.py'))
self.assertTrue(fnmatch('a.py', 'a.pyc'))
if os.name == 'nt':
self.assertTrue(fnmatch('aBC', 'ABc'))
self.assertTrue(fnmatch('aBcDe', 'Ab*dE'))
self.assertTrue(fnmatch('a.pyc', 'a.PY'))
self.assertTrue(fnmatch('a.py', 'a.PYC'))
else:
self.assertFalse(fnmatch('aBC', 'ABc'))
self.assertFalse(fnmatch('aBcDe', 'Ab*dE'))
self.assertFalse(fnmatch('a.pyc', 'a.PY'))
self.assertFalse(fnmatch('a.py', 'a.PYC'))
if os.name == 'nt':
self.assertTrue(fnmatch(r'a/b', r'a\b'))
self.assertTrue(fnmatch(r'a\b', r'a/b'))
self.assertTrue(fnmatch(r'a/b\c', r'a\b/c'))
self.assertTrue(fnmatch(r'a/b/c', r'a\b\c'))
else:
self.assertFalse(fnmatch(r'a/b', r'a\b'))
self.assertFalse(fnmatch(r'a\b', r'a/b'))
self.assertFalse(fnmatch(r'a/b\c', r'a\b/c'))
self.assertFalse(fnmatch(r'a/b/c', r'a\b\c'))
self.assertFalse(fnmatch('a.pyo', 'a.py'))
def test_filter_match_trace(self):
t1 = (("a.py", 2), ("b.py", 3))
t2 = (("b.py", 4), ("b.py", 5))
t3 = (("c.py", 5), ('<unknown>', 0))
unknown = (('<unknown>', 0),)
f = tracemalloc.Filter(True, "b.py", all_frames=True)
self.assertTrue(f._match_traceback(t1))
self.assertTrue(f._match_traceback(t2))
self.assertFalse(f._match_traceback(t3))
self.assertFalse(f._match_traceback(unknown))
f = tracemalloc.Filter(True, "b.py", all_frames=False)
self.assertFalse(f._match_traceback(t1))
self.assertTrue(f._match_traceback(t2))
self.assertFalse(f._match_traceback(t3))
self.assertFalse(f._match_traceback(unknown))
f = tracemalloc.Filter(False, "b.py", all_frames=True)
self.assertFalse(f._match_traceback(t1))
self.assertFalse(f._match_traceback(t2))
self.assertTrue(f._match_traceback(t3))
self.assertTrue(f._match_traceback(unknown))
f = tracemalloc.Filter(False, "b.py", all_frames=False)
self.assertTrue(f._match_traceback(t1))
self.assertFalse(f._match_traceback(t2))
self.assertTrue(f._match_traceback(t3))
self.assertTrue(f._match_traceback(unknown))
f = tracemalloc.Filter(False, "<unknown>", all_frames=False)
self.assertTrue(f._match_traceback(t1))
self.assertTrue(f._match_traceback(t2))
self.assertTrue(f._match_traceback(t3))
self.assertFalse(f._match_traceback(unknown))
f = tracemalloc.Filter(True, "<unknown>", all_frames=True)
self.assertFalse(f._match_traceback(t1))
self.assertFalse(f._match_traceback(t2))
self.assertTrue(f._match_traceback(t3))
self.assertTrue(f._match_traceback(unknown))
f = tracemalloc.Filter(False, "<unknown>", all_frames=True)
self.assertTrue(f._match_traceback(t1))
self.assertTrue(f._match_traceback(t2))
self.assertFalse(f._match_traceback(t3))
self.assertFalse(f._match_traceback(unknown))
class TestCommandLine(unittest.TestCase):
def test_env_var_disabled_by_default(self):
code = 'import tracemalloc; print(tracemalloc.is_tracing())'
ok, stdout, stderr = assert_python_ok('-c', code)
stdout = stdout.rstrip()
self.assertEqual(stdout, b'False')
@unittest.skipIf(interpreter_requires_environment(),
'Cannot run -E tests when PYTHON env vars are required.')
def test_env_var_ignored_with_E(self):
code = 'import tracemalloc; print(tracemalloc.is_tracing())'
ok, stdout, stderr = assert_python_ok('-E', '-c', code, PYTHONTRACEMALLOC='1')
stdout = stdout.rstrip()
self.assertEqual(stdout, b'False')
def test_env_var_enabled_at_startup(self):
code = 'import tracemalloc; print(tracemalloc.is_tracing())'
ok, stdout, stderr = assert_python_ok('-c', code, PYTHONTRACEMALLOC='1')
stdout = stdout.rstrip()
self.assertEqual(stdout, b'True')
def test_env_limit(self):
code = 'import tracemalloc; print(tracemalloc.get_traceback_limit())'
ok, stdout, stderr = assert_python_ok('-c', code, PYTHONTRACEMALLOC='10')
stdout = stdout.rstrip()
self.assertEqual(stdout, b'10')
def test_env_var_invalid(self):
for nframe in (-1, 0, 2**30):
with self.subTest(nframe=nframe):
with support.SuppressCrashReport():
ok, stdout, stderr = assert_python_failure(
'-c', 'pass',
PYTHONTRACEMALLOC=str(nframe))
self.assertIn(b'PYTHONTRACEMALLOC: invalid '
b'number of frames',
stderr)
def test_sys_xoptions(self):
for xoptions, nframe in (
('tracemalloc', 1),
('tracemalloc=1', 1),
('tracemalloc=15', 15),
):
with self.subTest(xoptions=xoptions, nframe=nframe):
code = 'import tracemalloc; print(tracemalloc.get_traceback_limit())'
ok, stdout, stderr = assert_python_ok('-X', xoptions, '-c', code)
stdout = stdout.rstrip()
self.assertEqual(stdout, str(nframe).encode('ascii'))
def test_sys_xoptions_invalid(self):
for nframe in (-1, 0, 2**30):
with self.subTest(nframe=nframe):
with support.SuppressCrashReport():
args = ('-X', 'tracemalloc=%s' % nframe, '-c', 'pass')
ok, stdout, stderr = assert_python_failure(*args)
self.assertIn(b'-X tracemalloc=NFRAME: invalid '
b'number of frames',
stderr)
def test_pymem_alloc0(self):
pymem_alloc0(); 1'
assert_python_ok('-X', 'tracemalloc', '-c', code)
@unittest.skipIf(_testcapi is None, 'need _testcapi')
class TestCAPI(unittest.TestCase):
maxDiff = 80 * 20
def setUp(self):
if tracemalloc.is_tracing():
self.skipTest("tracemalloc must be stopped before the test")
self.domain = 5
self.size = 123
self.obj = allocate_bytes(self.size)[0]
self.ptr = id(self.obj)
def tearDown(self):
tracemalloc.stop()
def get_traceback(self):
frames = _testcapi.tracemalloc_get_traceback(self.domain, self.ptr)
if frames is not None:
return tracemalloc.Traceback(frames)
else:
return None
def track(self, release_gil=False, nframe=1):
frames = get_frames(nframe, 2)
_testcapi.tracemalloc_track(self.domain, self.ptr, self.size,
release_gil)
return frames
def untrack(self):
_testcapi.tracemalloc_untrack(self.domain, self.ptr)
def get_traced_memory(self):
snapshot = tracemalloc.take_snapshot()
domain_filter = tracemalloc.DomainFilter(True, self.domain)
snapshot = snapshot.filter_traces([domain_filter])
return sum(trace.size for trace in snapshot.traces)
def check_track(self, release_gil):
nframe = 5
tracemalloc.start(nframe)
size = tracemalloc.get_traced_memory()[0]
frames = self.track(release_gil, nframe)
self.assertEqual(self.get_traceback(),
tracemalloc.Traceback(frames))
self.assertEqual(self.get_traced_memory(), self.size)
def test_track(self):
self.check_track(False)
def test_track_without_gil(self):
self.check_track(True)
def test_track_already_tracked(self):
nframe = 5
tracemalloc.start(nframe)
self.track()
frames = self.track(nframe=nframe)
self.assertEqual(self.get_traceback(),
tracemalloc.Traceback(frames))
def test_untrack(self):
tracemalloc.start()
self.track()
self.assertIsNotNone(self.get_traceback())
self.assertEqual(self.get_traced_memory(), self.size)
self.untrack()
self.assertIsNone(self.get_traceback())
self.assertEqual(self.get_traced_memory(), 0)
self.untrack()
self.untrack()
def test_stop_track(self):
tracemalloc.start()
tracemalloc.stop()
with self.assertRaises(RuntimeError):
self.track()
self.assertIsNone(self.get_traceback())
def test_stop_untrack(self):
tracemalloc.start()
self.track()
tracemalloc.stop()
with self.assertRaises(RuntimeError):
self.untrack()
def test_main():
support.run_unittest(
TestTracemallocEnabled,
TestSnapshot,
TestFilters,
TestCommandLine,
TestCAPI,
)
if __name__ == "__main__":
test_main()
| true
| true
|
790ab8bc8dcaac92b2ae1d09617b8388b07d33b3
| 2,018
|
py
|
Python
|
python_code/vnev/Lib/site-packages/jdcloud_sdk/services/es/models/InstanceSpec.py
|
Ureimu/weather-robot
|
7634195af388538a566ccea9f8a8534c5fb0f4b6
|
[
"MIT"
] | 14
|
2018-04-19T09:53:56.000Z
|
2022-01-27T06:05:48.000Z
|
python_code/vnev/Lib/site-packages/jdcloud_sdk/services/es/models/InstanceSpec.py
|
Ureimu/weather-robot
|
7634195af388538a566ccea9f8a8534c5fb0f4b6
|
[
"MIT"
] | 15
|
2018-09-11T05:39:54.000Z
|
2021-07-02T12:38:02.000Z
|
python_code/vnev/Lib/site-packages/jdcloud_sdk/services/es/models/InstanceSpec.py
|
Ureimu/weather-robot
|
7634195af388538a566ccea9f8a8534c5fb0f4b6
|
[
"MIT"
] | 33
|
2018-04-20T05:29:16.000Z
|
2022-02-17T09:10:05.000Z
|
# coding=utf8
# Copyright 2018 JDCLOUD.COM
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# NOTE: This class is auto generated by the jdcloud code generator program.
class InstanceSpec(object):
def __init__(self, vpcId, subnetId, instanceVersion, instanceName, azId, instanceClass, ipVersion=None, dedicatedMaster=None, coordinating=None, autoSnapshot=None, authConfig=None):
"""
:param vpcId: 私有网络vpcId
:param subnetId: 子网subnetId
:param instanceVersion: es版本,当前支持5.6.9和6.5.4
:param instanceName: es集群名称,不可为空,只支持大小写字母、数字、英文下划线或者中划线,以字母开头且不能超过32位
:param azId: 可用区,各可用区编码请参考:https://docs.jdcloud.com/cn/jcs-for-elasticsearch/restrictions
:param instanceClass: 规格配置,规格代码请参考:https://docs.jdcloud.com/cn/jcs-for-elasticsearch/specifications
:param ipVersion: (Optional) 是否支持ipv6,支持值为v4&v6,不支持为空
:param dedicatedMaster: (Optional) 是否包含专用主节点,默认false
:param coordinating: (Optional) 是否包含协调节点,默认false
:param autoSnapshot: (Optional) 自动快照设置。
:param authConfig: (Optional) es数据面身份验证设置信息
"""
self.vpcId = vpcId
self.subnetId = subnetId
self.instanceVersion = instanceVersion
self.instanceName = instanceName
self.azId = azId
self.instanceClass = instanceClass
self.ipVersion = ipVersion
self.dedicatedMaster = dedicatedMaster
self.coordinating = coordinating
self.autoSnapshot = autoSnapshot
self.authConfig = authConfig
| 42.041667
| 185
| 0.715064
|
class InstanceSpec(object):
def __init__(self, vpcId, subnetId, instanceVersion, instanceName, azId, instanceClass, ipVersion=None, dedicatedMaster=None, coordinating=None, autoSnapshot=None, authConfig=None):
self.vpcId = vpcId
self.subnetId = subnetId
self.instanceVersion = instanceVersion
self.instanceName = instanceName
self.azId = azId
self.instanceClass = instanceClass
self.ipVersion = ipVersion
self.dedicatedMaster = dedicatedMaster
self.coordinating = coordinating
self.autoSnapshot = autoSnapshot
self.authConfig = authConfig
| true
| true
|
790ab8c88ba983389d8844cf3bf1ef391001f6d7
| 4,328
|
py
|
Python
|
pip/appdirs.py
|
chadrik/pip
|
178809d5cc6fc4054d34f3ac955f7f13cd8c5759
|
[
"MIT"
] | 1
|
2015-11-05T17:33:21.000Z
|
2015-11-05T17:33:21.000Z
|
pip/appdirs.py
|
chadrik/pip
|
178809d5cc6fc4054d34f3ac955f7f13cd8c5759
|
[
"MIT"
] | null | null | null |
pip/appdirs.py
|
chadrik/pip
|
178809d5cc6fc4054d34f3ac955f7f13cd8c5759
|
[
"MIT"
] | null | null | null |
"""
This code was taken from https://github.com/ActiveState/appdirs and modified
to suite our purposes.
"""
import os
import sys
from pip._vendor import six
def user_cache_dir(appname):
r"""
Return full path to the user-specific cache dir for this application.
"appname" is the name of application.
Typical user cache directories are:
Mac OS X: ~/Library/Caches/<AppName>
Unix: ~/.cache/<AppName> (XDG default)
Windows: C:\Users\<username>\AppData\Local\<AppName>\Cache
On Windows the only suggestion in the MSDN docs is that local settings go
in the `CSIDL_LOCAL_APPDATA` directory. This is identical to the
non-roaming app data dir (the default returned by `user_data_dir`). Apps
typically put cache data somewhere *under* the given dir here. Some
examples:
...\Mozilla\Firefox\Profiles\<ProfileName>\Cache
...\Acme\SuperApp\Cache\1.0
OPINION: This function appends "Cache" to the `CSIDL_LOCAL_APPDATA` value.
"""
if sys.platform == "win32":
# Get the base path
path = os.path.normpath(_get_win_folder("CSIDL_LOCAL_APPDATA"))
# Add our app name and Cache directory to it
path = os.path.join(path, appname, "Cache")
elif sys.platform == "darwin":
# Get the base path
path = os.path.expanduser("~/Library/Caches")
# Add our app name to it
path = os.path.join(path, appname)
else:
# Get the base path
path = os.getenv("XDG_CACHE_HOME", os.path.expanduser("~/.cache"))
# Add our app name to it
path = os.path.join(path, appname)
return path
# -- Windows support functions --
def _get_win_folder_from_registry(csidl_name):
"""
This is a fallback technique at best. I'm not sure if using the
registry for this guarantees us the correct answer for all CSIDL_*
names.
"""
import _winreg
shell_folder_name = {
"CSIDL_APPDATA": "AppData",
"CSIDL_COMMON_APPDATA": "Common AppData",
"CSIDL_LOCAL_APPDATA": "Local AppData",
}[csidl_name]
key = _winreg.OpenKey(
_winreg.HKEY_CURRENT_USER,
r"Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders"
)
directory, _type = _winreg.QueryValueEx(key, shell_folder_name)
return directory
def _get_win_folder_with_pywin32(csidl_name):
from win32com.shell import shellcon, shell
directory = shell.SHGetFolderPath(0, getattr(shellcon, csidl_name), 0, 0)
# Try to make this a unicode path because SHGetFolderPath does
# not return unicode strings when there is unicode data in the
# path.
try:
directory = six.text_type(directory)
# Downgrade to short path name if have highbit chars. See
# <http://bugs.activestate.com/show_bug.cgi?id=85099>.
has_high_char = False
for c in directory:
if ord(c) > 255:
has_high_char = True
break
if has_high_char:
try:
import win32api
directory = win32api.GetShortPathName(directory)
except ImportError:
pass
except UnicodeError:
pass
return directory
def _get_win_folder_with_ctypes(csidl_name):
csidl_const = {
"CSIDL_APPDATA": 26,
"CSIDL_COMMON_APPDATA": 35,
"CSIDL_LOCAL_APPDATA": 28,
}[csidl_name]
buf = ctypes.create_unicode_buffer(1024)
ctypes.windll.shell32.SHGetFolderPathW(None, csidl_const, None, 0, buf)
# Downgrade to short path name if have highbit chars. See
# <http://bugs.activestate.com/show_bug.cgi?id=85099>.
has_high_char = False
for c in buf:
if ord(c) > 255:
has_high_char = True
break
if has_high_char:
buf2 = ctypes.create_unicode_buffer(1024)
if ctypes.windll.kernel32.GetShortPathNameW(buf.value, buf2, 1024):
buf = buf2
return buf.value
if sys.platform == "win32":
try:
import win32com.shell # noqa
_get_win_folder = _get_win_folder_with_pywin32
except ImportError:
try:
import ctypes
_get_win_folder = _get_win_folder_with_ctypes
except ImportError:
_get_win_folder = _get_win_folder_from_registry
| 31.136691
| 78
| 0.647874
|
import os
import sys
from pip._vendor import six
def user_cache_dir(appname):
if sys.platform == "win32":
path = os.path.normpath(_get_win_folder("CSIDL_LOCAL_APPDATA"))
path = os.path.join(path, appname, "Cache")
elif sys.platform == "darwin":
path = os.path.expanduser("~/Library/Caches")
path = os.path.join(path, appname)
else:
path = os.getenv("XDG_CACHE_HOME", os.path.expanduser("~/.cache"))
path = os.path.join(path, appname)
return path
def _get_win_folder_from_registry(csidl_name):
import _winreg
shell_folder_name = {
"CSIDL_APPDATA": "AppData",
"CSIDL_COMMON_APPDATA": "Common AppData",
"CSIDL_LOCAL_APPDATA": "Local AppData",
}[csidl_name]
key = _winreg.OpenKey(
_winreg.HKEY_CURRENT_USER,
r"Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders"
)
directory, _type = _winreg.QueryValueEx(key, shell_folder_name)
return directory
def _get_win_folder_with_pywin32(csidl_name):
from win32com.shell import shellcon, shell
directory = shell.SHGetFolderPath(0, getattr(shellcon, csidl_name), 0, 0)
try:
directory = six.text_type(directory)
has_high_char = False
for c in directory:
if ord(c) > 255:
has_high_char = True
break
if has_high_char:
try:
import win32api
directory = win32api.GetShortPathName(directory)
except ImportError:
pass
except UnicodeError:
pass
return directory
def _get_win_folder_with_ctypes(csidl_name):
csidl_const = {
"CSIDL_APPDATA": 26,
"CSIDL_COMMON_APPDATA": 35,
"CSIDL_LOCAL_APPDATA": 28,
}[csidl_name]
buf = ctypes.create_unicode_buffer(1024)
ctypes.windll.shell32.SHGetFolderPathW(None, csidl_const, None, 0, buf)
has_high_char = False
for c in buf:
if ord(c) > 255:
has_high_char = True
break
if has_high_char:
buf2 = ctypes.create_unicode_buffer(1024)
if ctypes.windll.kernel32.GetShortPathNameW(buf.value, buf2, 1024):
buf = buf2
return buf.value
if sys.platform == "win32":
try:
import win32com.shell
_get_win_folder = _get_win_folder_with_pywin32
except ImportError:
try:
import ctypes
_get_win_folder = _get_win_folder_with_ctypes
except ImportError:
_get_win_folder = _get_win_folder_from_registry
| true
| true
|
790ab8f40190164a4a14f22dd2da155187441626
| 104,192
|
py
|
Python
|
apps/dash-web-trader/env/Lib/site-packages/plotly/graph_objs/scattergeo/__init__.py
|
alzo425/dash-sample-apps
|
d3e9f521a3bc2b8d39ed2922838ad35b9b17beb0
|
[
"MIT"
] | 2
|
2019-10-23T08:14:26.000Z
|
2019-10-23T08:14:27.000Z
|
apps/dash-web-trader/env/Lib/site-packages/plotly/graph_objs/scattergeo/__init__.py
|
alzo425/dash-sample-apps
|
d3e9f521a3bc2b8d39ed2922838ad35b9b17beb0
|
[
"MIT"
] | null | null | null |
apps/dash-web-trader/env/Lib/site-packages/plotly/graph_objs/scattergeo/__init__.py
|
alzo425/dash-sample-apps
|
d3e9f521a3bc2b8d39ed2922838ad35b9b17beb0
|
[
"MIT"
] | 1
|
2021-02-02T02:56:39.000Z
|
2021-02-02T02:56:39.000Z
|
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Unselected(_BaseTraceHierarchyType):
# marker
# ------
@property
def marker(self):
"""
The 'marker' property is an instance of Marker
that may be specified as:
- An instance of plotly.graph_objs.scattergeo.unselected.Marker
- A dict of string/value properties that will be passed
to the Marker constructor
Supported dict properties:
color
Sets the marker color of unselected points,
applied only when a selection exists.
opacity
Sets the marker opacity of unselected points,
applied only when a selection exists.
size
Sets the marker size of unselected points,
applied only when a selection exists.
Returns
-------
plotly.graph_objs.scattergeo.unselected.Marker
"""
return self['marker']
@marker.setter
def marker(self, val):
self['marker'] = val
# textfont
# --------
@property
def textfont(self):
"""
The 'textfont' property is an instance of Textfont
that may be specified as:
- An instance of plotly.graph_objs.scattergeo.unselected.Textfont
- A dict of string/value properties that will be passed
to the Textfont constructor
Supported dict properties:
color
Sets the text font color of unselected points,
applied only when a selection exists.
Returns
-------
plotly.graph_objs.scattergeo.unselected.Textfont
"""
return self['textfont']
@textfont.setter
def textfont(self, val):
self['textfont'] = val
# property parent name
# --------------------
@property
def _parent_path_str(self):
return 'scattergeo'
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
marker
plotly.graph_objs.scattergeo.unselected.Marker instance
or dict with compatible properties
textfont
plotly.graph_objs.scattergeo.unselected.Textfont
instance or dict with compatible properties
"""
def __init__(self, arg=None, marker=None, textfont=None, **kwargs):
"""
Construct a new Unselected object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of plotly.graph_objs.scattergeo.Unselected
marker
plotly.graph_objs.scattergeo.unselected.Marker instance
or dict with compatible properties
textfont
plotly.graph_objs.scattergeo.unselected.Textfont
instance or dict with compatible properties
Returns
-------
Unselected
"""
super(Unselected, self).__init__('unselected')
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.scattergeo.Unselected
constructor must be a dict or
an instance of plotly.graph_objs.scattergeo.Unselected"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop('skip_invalid', False)
# Import validators
# -----------------
from plotly.validators.scattergeo import (unselected as v_unselected)
# Initialize validators
# ---------------------
self._validators['marker'] = v_unselected.MarkerValidator()
self._validators['textfont'] = v_unselected.TextfontValidator()
# Populate data dict with properties
# ----------------------------------
_v = arg.pop('marker', None)
self['marker'] = marker if marker is not None else _v
_v = arg.pop('textfont', None)
self['textfont'] = textfont if textfont is not None else _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Textfont(_BaseTraceHierarchyType):
# color
# -----
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, saddlebrown, salmon, sandybrown,
seagreen, seashell, sienna, silver, skyblue,
slateblue, slategray, slategrey, snow, springgreen,
steelblue, tan, teal, thistle, tomato, turquoise,
violet, wheat, white, whitesmoke, yellow,
yellowgreen
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self['color']
@color.setter
def color(self, val):
self['color'] = val
# colorsrc
# --------
@property
def colorsrc(self):
"""
Sets the source reference on plot.ly for color .
The 'colorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self['colorsrc']
@colorsrc.setter
def colorsrc(self, val):
self['colorsrc'] = val
# family
# ------
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser will only be able to apply a font if
it is available on the system which it operates. Provide
multiple font families, separated by commas, to indicate the
preference in which to apply fonts if they aren't available on
the system. The plotly service (at https://plot.ly or on-
premise) generates images on a server, where only a select
number of fonts are installed and supported. These include
"Arial", "Balto", "Courier New", "Droid Sans",, "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old Standard TT", "Open
Sans", "Overpass", "PT Sans Narrow", "Raleway", "Times New
Roman".
The 'family' property is a string and must be specified as:
- A non-empty string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self['family']
@family.setter
def family(self, val):
self['family'] = val
# familysrc
# ---------
@property
def familysrc(self):
"""
Sets the source reference on plot.ly for family .
The 'familysrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self['familysrc']
@familysrc.setter
def familysrc(self, val):
self['familysrc'] = val
# size
# ----
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|float|numpy.ndarray
"""
return self['size']
@size.setter
def size(self, val):
self['size'] = val
# sizesrc
# -------
@property
def sizesrc(self):
"""
Sets the source reference on plot.ly for size .
The 'sizesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self['sizesrc']
@sizesrc.setter
def sizesrc(self, val):
self['sizesrc'] = val
# property parent name
# --------------------
@property
def _parent_path_str(self):
return 'scattergeo'
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
color
colorsrc
Sets the source reference on plot.ly for color .
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The
plotly service (at https://plot.ly or on-premise)
generates images on a server, where only a select
number of fonts are installed and supported. These
include "Arial", "Balto", "Courier New", "Droid Sans",,
"Droid Serif", "Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
familysrc
Sets the source reference on plot.ly for family .
size
sizesrc
Sets the source reference on plot.ly for size .
"""
def __init__(
self,
arg=None,
color=None,
colorsrc=None,
family=None,
familysrc=None,
size=None,
sizesrc=None,
**kwargs
):
"""
Construct a new Textfont object
Sets the text font.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of plotly.graph_objs.scattergeo.Textfont
color
colorsrc
Sets the source reference on plot.ly for color .
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The
plotly service (at https://plot.ly or on-premise)
generates images on a server, where only a select
number of fonts are installed and supported. These
include "Arial", "Balto", "Courier New", "Droid Sans",,
"Droid Serif", "Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
familysrc
Sets the source reference on plot.ly for family .
size
sizesrc
Sets the source reference on plot.ly for size .
Returns
-------
Textfont
"""
super(Textfont, self).__init__('textfont')
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.scattergeo.Textfont
constructor must be a dict or
an instance of plotly.graph_objs.scattergeo.Textfont"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop('skip_invalid', False)
# Import validators
# -----------------
from plotly.validators.scattergeo import (textfont as v_textfont)
# Initialize validators
# ---------------------
self._validators['color'] = v_textfont.ColorValidator()
self._validators['colorsrc'] = v_textfont.ColorsrcValidator()
self._validators['family'] = v_textfont.FamilyValidator()
self._validators['familysrc'] = v_textfont.FamilysrcValidator()
self._validators['size'] = v_textfont.SizeValidator()
self._validators['sizesrc'] = v_textfont.SizesrcValidator()
# Populate data dict with properties
# ----------------------------------
_v = arg.pop('color', None)
self['color'] = color if color is not None else _v
_v = arg.pop('colorsrc', None)
self['colorsrc'] = colorsrc if colorsrc is not None else _v
_v = arg.pop('family', None)
self['family'] = family if family is not None else _v
_v = arg.pop('familysrc', None)
self['familysrc'] = familysrc if familysrc is not None else _v
_v = arg.pop('size', None)
self['size'] = size if size is not None else _v
_v = arg.pop('sizesrc', None)
self['sizesrc'] = sizesrc if sizesrc is not None else _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Stream(_BaseTraceHierarchyType):
# maxpoints
# ---------
@property
def maxpoints(self):
"""
Sets the maximum number of points to keep on the plots from an
incoming stream. If `maxpoints` is set to 50, only the newest
50 points will be displayed on the plot.
The 'maxpoints' property is a number and may be specified as:
- An int or float in the interval [0, 10000]
Returns
-------
int|float
"""
return self['maxpoints']
@maxpoints.setter
def maxpoints(self, val):
self['maxpoints'] = val
# token
# -----
@property
def token(self):
"""
The stream id number links a data trace on a plot with a
stream. See https://plot.ly/settings for more details.
The 'token' property is a string and must be specified as:
- A non-empty string
Returns
-------
str
"""
return self['token']
@token.setter
def token(self, val):
self['token'] = val
# property parent name
# --------------------
@property
def _parent_path_str(self):
return 'scattergeo'
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
maxpoints
Sets the maximum number of points to keep on the plots
from an incoming stream. If `maxpoints` is set to 50,
only the newest 50 points will be displayed on the
plot.
token
The stream id number links a data trace on a plot with
a stream. See https://plot.ly/settings for more
details.
"""
def __init__(self, arg=None, maxpoints=None, token=None, **kwargs):
"""
Construct a new Stream object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of plotly.graph_objs.scattergeo.Stream
maxpoints
Sets the maximum number of points to keep on the plots
from an incoming stream. If `maxpoints` is set to 50,
only the newest 50 points will be displayed on the
plot.
token
The stream id number links a data trace on a plot with
a stream. See https://plot.ly/settings for more
details.
Returns
-------
Stream
"""
super(Stream, self).__init__('stream')
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.scattergeo.Stream
constructor must be a dict or
an instance of plotly.graph_objs.scattergeo.Stream"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop('skip_invalid', False)
# Import validators
# -----------------
from plotly.validators.scattergeo import (stream as v_stream)
# Initialize validators
# ---------------------
self._validators['maxpoints'] = v_stream.MaxpointsValidator()
self._validators['token'] = v_stream.TokenValidator()
# Populate data dict with properties
# ----------------------------------
_v = arg.pop('maxpoints', None)
self['maxpoints'] = maxpoints if maxpoints is not None else _v
_v = arg.pop('token', None)
self['token'] = token if token is not None else _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Selected(_BaseTraceHierarchyType):
# marker
# ------
@property
def marker(self):
"""
The 'marker' property is an instance of Marker
that may be specified as:
- An instance of plotly.graph_objs.scattergeo.selected.Marker
- A dict of string/value properties that will be passed
to the Marker constructor
Supported dict properties:
color
Sets the marker color of selected points.
opacity
Sets the marker opacity of selected points.
size
Sets the marker size of selected points.
Returns
-------
plotly.graph_objs.scattergeo.selected.Marker
"""
return self['marker']
@marker.setter
def marker(self, val):
self['marker'] = val
# textfont
# --------
@property
def textfont(self):
"""
The 'textfont' property is an instance of Textfont
that may be specified as:
- An instance of plotly.graph_objs.scattergeo.selected.Textfont
- A dict of string/value properties that will be passed
to the Textfont constructor
Supported dict properties:
color
Sets the text font color of selected points.
Returns
-------
plotly.graph_objs.scattergeo.selected.Textfont
"""
return self['textfont']
@textfont.setter
def textfont(self, val):
self['textfont'] = val
# property parent name
# --------------------
@property
def _parent_path_str(self):
return 'scattergeo'
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
marker
plotly.graph_objs.scattergeo.selected.Marker instance
or dict with compatible properties
textfont
plotly.graph_objs.scattergeo.selected.Textfont instance
or dict with compatible properties
"""
def __init__(self, arg=None, marker=None, textfont=None, **kwargs):
"""
Construct a new Selected object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of plotly.graph_objs.scattergeo.Selected
marker
plotly.graph_objs.scattergeo.selected.Marker instance
or dict with compatible properties
textfont
plotly.graph_objs.scattergeo.selected.Textfont instance
or dict with compatible properties
Returns
-------
Selected
"""
super(Selected, self).__init__('selected')
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.scattergeo.Selected
constructor must be a dict or
an instance of plotly.graph_objs.scattergeo.Selected"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop('skip_invalid', False)
# Import validators
# -----------------
from plotly.validators.scattergeo import (selected as v_selected)
# Initialize validators
# ---------------------
self._validators['marker'] = v_selected.MarkerValidator()
self._validators['textfont'] = v_selected.TextfontValidator()
# Populate data dict with properties
# ----------------------------------
_v = arg.pop('marker', None)
self['marker'] = marker if marker is not None else _v
_v = arg.pop('textfont', None)
self['textfont'] = textfont if textfont is not None else _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Marker(_BaseTraceHierarchyType):
# autocolorscale
# --------------
@property
def autocolorscale(self):
"""
Determines whether the colorscale is a default palette
(`autocolorscale: true`) or the palette determined by
`marker.colorscale`. Has an effect only if in `marker.color`is
set to a numerical array. In case `colorscale` is unspecified
or `autocolorscale` is true, the default palette will be
chosen according to whether numbers in the `color` array are
all positive, all negative or mixed.
The 'autocolorscale' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self['autocolorscale']
@autocolorscale.setter
def autocolorscale(self, val):
self['autocolorscale'] = val
# cauto
# -----
@property
def cauto(self):
"""
Determines whether or not the color domain is computed with
respect to the input data (here in `marker.color`) or the
bounds set in `marker.cmin` and `marker.cmax` Has an effect
only if in `marker.color`is set to a numerical array. Defaults
to `false` when `marker.cmin` and `marker.cmax` are set by the
user.
The 'cauto' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self['cauto']
@cauto.setter
def cauto(self, val):
self['cauto'] = val
# cmax
# ----
@property
def cmax(self):
"""
Sets the upper bound of the color domain. Has an effect only if
in `marker.color`is set to a numerical array. Value should have
the same units as in `marker.color` and if set, `marker.cmin`
must be set as well.
The 'cmax' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self['cmax']
@cmax.setter
def cmax(self, val):
self['cmax'] = val
# cmid
# ----
@property
def cmid(self):
"""
Sets the mid-point of the color domain by scaling `marker.cmin`
and/or `marker.cmax` to be equidistant to this point. Has an
effect only if in `marker.color`is set to a numerical array.
Value should have the same units as in `marker.color`. Has no
effect when `marker.cauto` is `false`.
The 'cmid' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self['cmid']
@cmid.setter
def cmid(self, val):
self['cmid'] = val
# cmin
# ----
@property
def cmin(self):
"""
Sets the lower bound of the color domain. Has an effect only if
in `marker.color`is set to a numerical array. Value should have
the same units as in `marker.color` and if set, `marker.cmax`
must be set as well.
The 'cmin' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self['cmin']
@cmin.setter
def cmin(self, val):
self['cmin'] = val
# color
# -----
@property
def color(self):
"""
Sets themarkercolor. It accepts either a specific color or an
array of numbers that are mapped to the colorscale relative to
the max and min values of the array or relative to
`marker.cmin` and `marker.cmax` if set.
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, saddlebrown, salmon, sandybrown,
seagreen, seashell, sienna, silver, skyblue,
slateblue, slategray, slategrey, snow, springgreen,
steelblue, tan, teal, thistle, tomato, turquoise,
violet, wheat, white, whitesmoke, yellow,
yellowgreen
- A number that will be interpreted as a color
according to scattergeo.marker.colorscale
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self['color']
@color.setter
def color(self, val):
self['color'] = val
# coloraxis
# ---------
@property
def coloraxis(self):
"""
Sets a reference to a shared color axis. References to these
shared color axes are "coloraxis", "coloraxis2", "coloraxis3",
etc. Settings for these shared color axes are set in the
layout, under `layout.coloraxis`, `layout.coloraxis2`, etc.
Note that multiple color scales can be linked to the same color
axis.
The 'coloraxis' property is an identifier of a particular
subplot, of type 'coloraxis', that may be specified as the string 'coloraxis'
optionally followed by an integer >= 1
(e.g. 'coloraxis', 'coloraxis1', 'coloraxis2', 'coloraxis3', etc.)
Returns
-------
str
"""
return self['coloraxis']
@coloraxis.setter
def coloraxis(self, val):
self['coloraxis'] = val
# colorbar
# --------
@property
def colorbar(self):
"""
The 'colorbar' property is an instance of ColorBar
that may be specified as:
- An instance of plotly.graph_objs.scattergeo.marker.ColorBar
- A dict of string/value properties that will be passed
to the ColorBar constructor
Supported dict properties:
bgcolor
Sets the color of padded area.
bordercolor
Sets the axis line color.
borderwidth
Sets the width (in px) or the border enclosing
this color bar.
dtick
Sets the step in-between ticks on this axis.
Use with `tick0`. Must be a positive number, or
special strings available to "log" and "date"
axes. If the axis `type` is "log", then ticks
are set every 10^(n*dtick) where n is the tick
number. For example, to set a tick mark at 1,
10, 100, 1000, ... set dtick to 1. To set tick
marks at 1, 100, 10000, ... set dtick to 2. To
set tick marks at 1, 5, 25, 125, 625, 3125, ...
set dtick to log_10(5), or 0.69897000433. "log"
has several special values; "L<f>", where `f`
is a positive number, gives ticks linearly
spaced in value (but not position). For example
`tick0` = 0.1, `dtick` = "L0.5" will put ticks
at 0.1, 0.6, 1.1, 1.6 etc. To show powers of 10
plus small digits between, use "D1" (all
digits) or "D2" (only 2 and 5). `tick0` is
ignored for "D1" and "D2". If the axis `type`
is "date", then you must convert the time to
milliseconds. For example, to set the interval
between ticks to one day, set `dtick` to
86400000.0. "date" also has special values
"M<n>" gives ticks spaced by a number of
months. `n` must be a positive integer. To set
ticks on the 15th of every third month, set
`tick0` to "2000-01-15" and `dtick` to "M3". To
set ticks every 4 years, set `dtick` to "M48"
exponentformat
Determines a formatting rule for the tick
exponents. For example, consider the number
1,000,000,000. If "none", it appears as
1,000,000,000. If "e", 1e+9. If "E", 1E+9. If
"power", 1x10^9 (with 9 in a super script). If
"SI", 1G. If "B", 1B.
len
Sets the length of the color bar This measure
excludes the padding of both ends. That is, the
color bar length is this length minus the
padding on both ends.
lenmode
Determines whether this color bar's length
(i.e. the measure in the color variation
direction) is set in units of plot "fraction"
or in *pixels. Use `len` to set the value.
nticks
Specifies the maximum number of ticks for the
particular axis. The actual number of ticks
will be chosen automatically to be less than or
equal to `nticks`. Has an effect only if
`tickmode` is set to "auto".
outlinecolor
Sets the axis line color.
outlinewidth
Sets the width (in px) of the axis line.
separatethousands
If "true", even 4-digit integers are separated
showexponent
If "all", all exponents are shown besides their
significands. If "first", only the exponent of
the first tick is shown. If "last", only the
exponent of the last tick is shown. If "none",
no exponents appear.
showticklabels
Determines whether or not the tick labels are
drawn.
showtickprefix
If "all", all tick labels are displayed with a
prefix. If "first", only the first tick is
displayed with a prefix. If "last", only the
last tick is displayed with a suffix. If
"none", tick prefixes are hidden.
showticksuffix
Same as `showtickprefix` but for tick suffixes.
thickness
Sets the thickness of the color bar This
measure excludes the size of the padding, ticks
and labels.
thicknessmode
Determines whether this color bar's thickness
(i.e. the measure in the constant color
direction) is set in units of plot "fraction"
or in "pixels". Use `thickness` to set the
value.
tick0
Sets the placement of the first tick on this
axis. Use with `dtick`. If the axis `type` is
"log", then you must take the log of your
starting tick (e.g. to set the starting tick to
100, set the `tick0` to 2) except when
`dtick`=*L<f>* (see `dtick` for more info). If
the axis `type` is "date", it should be a date
string, like date data. If the axis `type` is
"category", it should be a number, using the
scale where each category is assigned a serial
number from zero in the order it appears.
tickangle
Sets the angle of the tick labels with respect
to the horizontal. For example, a `tickangle`
of -90 draws the tick labels vertically.
tickcolor
Sets the tick color.
tickfont
Sets the color bar's tick label font
tickformat
Sets the tick label formatting rule using d3
formatting mini-languages which are very
similar to those in Python. For numbers, see: h
ttps://github.com/d3/d3-format/blob/master/READ
ME.md#locale_format And for dates see:
https://github.com/d3/d3-time-
format/blob/master/README.md#locale_format We
add one item to d3's date formatter: "%{n}f"
for fractional seconds with n digits. For
example, *2016-10-13 09:15:23.456* with
tickformat "%H~%M~%S.%2f" would display
"09~15~23.46"
tickformatstops
plotly.graph_objs.scattergeo.marker.colorbar.Ti
ckformatstop instance or dict with compatible
properties
tickformatstopdefaults
When used in a template (as layout.template.dat
a.scattergeo.marker.colorbar.tickformatstopdefa
ults), sets the default property values to use
for elements of
scattergeo.marker.colorbar.tickformatstops
ticklen
Sets the tick length (in px).
tickmode
Sets the tick mode for this axis. If "auto",
the number of ticks is set via `nticks`. If
"linear", the placement of the ticks is
determined by a starting position `tick0` and a
tick step `dtick` ("linear" is the default
value if `tick0` and `dtick` are provided). If
"array", the placement of the ticks is set via
`tickvals` and the tick text is `ticktext`.
("array" is the default value if `tickvals` is
provided).
tickprefix
Sets a tick label prefix.
ticks
Determines whether ticks are drawn or not. If
"", this axis' ticks are not drawn. If
"outside" ("inside"), this axis' are drawn
outside (inside) the axis lines.
ticksuffix
Sets a tick label suffix.
ticktext
Sets the text displayed at the ticks position
via `tickvals`. Only has an effect if
`tickmode` is set to "array". Used with
`tickvals`.
ticktextsrc
Sets the source reference on plot.ly for
ticktext .
tickvals
Sets the values at which ticks on this axis
appear. Only has an effect if `tickmode` is set
to "array". Used with `ticktext`.
tickvalssrc
Sets the source reference on plot.ly for
tickvals .
tickwidth
Sets the tick width (in px).
title
plotly.graph_objs.scattergeo.marker.colorbar.Ti
tle instance or dict with compatible properties
titlefont
Deprecated: Please use
scattergeo.marker.colorbar.title.font instead.
Sets this color bar's title font. Note that the
title's font used to be set by the now
deprecated `titlefont` attribute.
titleside
Deprecated: Please use
scattergeo.marker.colorbar.title.side instead.
Determines the location of color bar's title
with respect to the color bar. Note that the
title's location used to be set by the now
deprecated `titleside` attribute.
x
Sets the x position of the color bar (in plot
fraction).
xanchor
Sets this color bar's horizontal position
anchor. This anchor binds the `x` position to
the "left", "center" or "right" of the color
bar.
xpad
Sets the amount of padding (in px) along the x
direction.
y
Sets the y position of the color bar (in plot
fraction).
yanchor
Sets this color bar's vertical position anchor
This anchor binds the `y` position to the
"top", "middle" or "bottom" of the color bar.
ypad
Sets the amount of padding (in px) along the y
direction.
Returns
-------
plotly.graph_objs.scattergeo.marker.ColorBar
"""
return self['colorbar']
@colorbar.setter
def colorbar(self, val):
self['colorbar'] = val
# colorscale
# ----------
@property
def colorscale(self):
"""
Sets the colorscale. Has an effect only if in `marker.color`is
set to a numerical array. The colorscale must be an array
containing arrays mapping a normalized value to an rgb, rgba,
hex, hsl, hsv, or named color string. At minimum, a mapping for
the lowest (0) and highest (1) values are required. For
example, `[[0, 'rgb(0,0,255)', [1, 'rgb(255,0,0)']]`. To
control the bounds of the colorscale in color space,
use`marker.cmin` and `marker.cmax`. Alternatively, `colorscale`
may be a palette name string of the following list: Greys,YlGnB
u,Greens,YlOrRd,Bluered,RdBu,Reds,Blues,Picnic,Rainbow,Portland
,Jet,Hot,Blackbody,Earth,Electric,Viridis,Cividis.
The 'colorscale' property is a colorscale and may be
specified as:
- A list of 2-element lists where the first element is the
normalized color level value (starting at 0 and ending at 1),
and the second item is a valid color string.
(e.g. [[0, 'green'], [0.5, 'red'], [1.0, 'rgb(0, 0, 255)']])
- One of the following named colorscales:
['Greys', 'YlGnBu', 'Greens', 'YlOrRd', 'Bluered', 'RdBu',
'Reds', 'Blues', 'Picnic', 'Rainbow', 'Portland', 'Jet',
'Hot', 'Blackbody', 'Earth', 'Electric', 'Viridis', 'Cividis']
Returns
-------
str
"""
return self['colorscale']
@colorscale.setter
def colorscale(self, val):
self['colorscale'] = val
# colorsrc
# --------
@property
def colorsrc(self):
"""
Sets the source reference on plot.ly for color .
The 'colorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self['colorsrc']
@colorsrc.setter
def colorsrc(self, val):
self['colorsrc'] = val
# gradient
# --------
@property
def gradient(self):
"""
The 'gradient' property is an instance of Gradient
that may be specified as:
- An instance of plotly.graph_objs.scattergeo.marker.Gradient
- A dict of string/value properties that will be passed
to the Gradient constructor
Supported dict properties:
color
Sets the final color of the gradient fill: the
center color for radial, the right for
horizontal, or the bottom for vertical.
colorsrc
Sets the source reference on plot.ly for color
.
type
Sets the type of gradient used to fill the
markers
typesrc
Sets the source reference on plot.ly for type
.
Returns
-------
plotly.graph_objs.scattergeo.marker.Gradient
"""
return self['gradient']
@gradient.setter
def gradient(self, val):
self['gradient'] = val
# line
# ----
@property
def line(self):
"""
The 'line' property is an instance of Line
that may be specified as:
- An instance of plotly.graph_objs.scattergeo.marker.Line
- A dict of string/value properties that will be passed
to the Line constructor
Supported dict properties:
autocolorscale
Determines whether the colorscale is a default
palette (`autocolorscale: true`) or the palette
determined by `marker.line.colorscale`. Has an
effect only if in `marker.line.color`is set to
a numerical array. In case `colorscale` is
unspecified or `autocolorscale` is true, the
default palette will be chosen according to
whether numbers in the `color` array are all
positive, all negative or mixed.
cauto
Determines whether or not the color domain is
computed with respect to the input data (here
in `marker.line.color`) or the bounds set in
`marker.line.cmin` and `marker.line.cmax` Has
an effect only if in `marker.line.color`is set
to a numerical array. Defaults to `false` when
`marker.line.cmin` and `marker.line.cmax` are
set by the user.
cmax
Sets the upper bound of the color domain. Has
an effect only if in `marker.line.color`is set
to a numerical array. Value should have the
same units as in `marker.line.color` and if
set, `marker.line.cmin` must be set as well.
cmid
Sets the mid-point of the color domain by
scaling `marker.line.cmin` and/or
`marker.line.cmax` to be equidistant to this
point. Has an effect only if in
`marker.line.color`is set to a numerical array.
Value should have the same units as in
`marker.line.color`. Has no effect when
`marker.line.cauto` is `false`.
cmin
Sets the lower bound of the color domain. Has
an effect only if in `marker.line.color`is set
to a numerical array. Value should have the
same units as in `marker.line.color` and if
set, `marker.line.cmax` must be set as well.
color
Sets themarker.linecolor. It accepts either a
specific color or an array of numbers that are
mapped to the colorscale relative to the max
and min values of the array or relative to
`marker.line.cmin` and `marker.line.cmax` if
set.
coloraxis
Sets a reference to a shared color axis.
References to these shared color axes are
"coloraxis", "coloraxis2", "coloraxis3", etc.
Settings for these shared color axes are set in
the layout, under `layout.coloraxis`,
`layout.coloraxis2`, etc. Note that multiple
color scales can be linked to the same color
axis.
colorscale
Sets the colorscale. Has an effect only if in
`marker.line.color`is set to a numerical array.
The colorscale must be an array containing
arrays mapping a normalized value to an rgb,
rgba, hex, hsl, hsv, or named color string. At
minimum, a mapping for the lowest (0) and
highest (1) values are required. For example,
`[[0, 'rgb(0,0,255)', [1, 'rgb(255,0,0)']]`. To
control the bounds of the colorscale in color
space, use`marker.line.cmin` and
`marker.line.cmax`. Alternatively, `colorscale`
may be a palette name string of the following
list: Greys,YlGnBu,Greens,YlOrRd,Bluered,RdBu,R
eds,Blues,Picnic,Rainbow,Portland,Jet,Hot,Black
body,Earth,Electric,Viridis,Cividis.
colorsrc
Sets the source reference on plot.ly for color
.
reversescale
Reverses the color mapping if true. Has an
effect only if in `marker.line.color`is set to
a numerical array. If true, `marker.line.cmin`
will correspond to the last color in the array
and `marker.line.cmax` will correspond to the
first color.
width
Sets the width (in px) of the lines bounding
the marker points.
widthsrc
Sets the source reference on plot.ly for width
.
Returns
-------
plotly.graph_objs.scattergeo.marker.Line
"""
return self['line']
@line.setter
def line(self, val):
self['line'] = val
# opacity
# -------
@property
def opacity(self):
"""
Sets the marker opacity.
The 'opacity' property is a number and may be specified as:
- An int or float in the interval [0, 1]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|float|numpy.ndarray
"""
return self['opacity']
@opacity.setter
def opacity(self, val):
self['opacity'] = val
# opacitysrc
# ----------
@property
def opacitysrc(self):
"""
Sets the source reference on plot.ly for opacity .
The 'opacitysrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self['opacitysrc']
@opacitysrc.setter
def opacitysrc(self, val):
self['opacitysrc'] = val
# reversescale
# ------------
@property
def reversescale(self):
"""
Reverses the color mapping if true. Has an effect only if in
`marker.color`is set to a numerical array. If true,
`marker.cmin` will correspond to the last color in the array
and `marker.cmax` will correspond to the first color.
The 'reversescale' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self['reversescale']
@reversescale.setter
def reversescale(self, val):
self['reversescale'] = val
# showscale
# ---------
@property
def showscale(self):
"""
Determines whether or not a colorbar is displayed for this
trace. Has an effect only if in `marker.color`is set to a
numerical array.
The 'showscale' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self['showscale']
@showscale.setter
def showscale(self, val):
self['showscale'] = val
# size
# ----
@property
def size(self):
"""
Sets the marker size (in px).
The 'size' property is a number and may be specified as:
- An int or float in the interval [0, inf]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|float|numpy.ndarray
"""
return self['size']
@size.setter
def size(self, val):
self['size'] = val
# sizemin
# -------
@property
def sizemin(self):
"""
Has an effect only if `marker.size` is set to a numerical
array. Sets the minimum size (in px) of the rendered marker
points.
The 'sizemin' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self['sizemin']
@sizemin.setter
def sizemin(self, val):
self['sizemin'] = val
# sizemode
# --------
@property
def sizemode(self):
"""
Has an effect only if `marker.size` is set to a numerical
array. Sets the rule for which the data in `size` is converted
to pixels.
The 'sizemode' property is an enumeration that may be specified as:
- One of the following enumeration values:
['diameter', 'area']
Returns
-------
Any
"""
return self['sizemode']
@sizemode.setter
def sizemode(self, val):
self['sizemode'] = val
# sizeref
# -------
@property
def sizeref(self):
"""
Has an effect only if `marker.size` is set to a numerical
array. Sets the scale factor used to determine the rendered
size of marker points. Use with `sizemin` and `sizemode`.
The 'sizeref' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self['sizeref']
@sizeref.setter
def sizeref(self, val):
self['sizeref'] = val
# sizesrc
# -------
@property
def sizesrc(self):
"""
Sets the source reference on plot.ly for size .
The 'sizesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self['sizesrc']
@sizesrc.setter
def sizesrc(self, val):
self['sizesrc'] = val
# symbol
# ------
@property
def symbol(self):
"""
Sets the marker symbol type. Adding 100 is equivalent to
appending "-open" to a symbol name. Adding 200 is equivalent to
appending "-dot" to a symbol name. Adding 300 is equivalent to
appending "-open-dot" or "dot-open" to a symbol name.
The 'symbol' property is an enumeration that may be specified as:
- One of the following enumeration values:
[0, 'circle', 100, 'circle-open', 200, 'circle-dot', 300,
'circle-open-dot', 1, 'square', 101, 'square-open', 201,
'square-dot', 301, 'square-open-dot', 2, 'diamond', 102,
'diamond-open', 202, 'diamond-dot', 302,
'diamond-open-dot', 3, 'cross', 103, 'cross-open', 203,
'cross-dot', 303, 'cross-open-dot', 4, 'x', 104, 'x-open',
204, 'x-dot', 304, 'x-open-dot', 5, 'triangle-up', 105,
'triangle-up-open', 205, 'triangle-up-dot', 305,
'triangle-up-open-dot', 6, 'triangle-down', 106,
'triangle-down-open', 206, 'triangle-down-dot', 306,
'triangle-down-open-dot', 7, 'triangle-left', 107,
'triangle-left-open', 207, 'triangle-left-dot', 307,
'triangle-left-open-dot', 8, 'triangle-right', 108,
'triangle-right-open', 208, 'triangle-right-dot', 308,
'triangle-right-open-dot', 9, 'triangle-ne', 109,
'triangle-ne-open', 209, 'triangle-ne-dot', 309,
'triangle-ne-open-dot', 10, 'triangle-se', 110,
'triangle-se-open', 210, 'triangle-se-dot', 310,
'triangle-se-open-dot', 11, 'triangle-sw', 111,
'triangle-sw-open', 211, 'triangle-sw-dot', 311,
'triangle-sw-open-dot', 12, 'triangle-nw', 112,
'triangle-nw-open', 212, 'triangle-nw-dot', 312,
'triangle-nw-open-dot', 13, 'pentagon', 113,
'pentagon-open', 213, 'pentagon-dot', 313,
'pentagon-open-dot', 14, 'hexagon', 114, 'hexagon-open',
214, 'hexagon-dot', 314, 'hexagon-open-dot', 15,
'hexagon2', 115, 'hexagon2-open', 215, 'hexagon2-dot',
315, 'hexagon2-open-dot', 16, 'octagon', 116,
'octagon-open', 216, 'octagon-dot', 316,
'octagon-open-dot', 17, 'star', 117, 'star-open', 217,
'star-dot', 317, 'star-open-dot', 18, 'hexagram', 118,
'hexagram-open', 218, 'hexagram-dot', 318,
'hexagram-open-dot', 19, 'star-triangle-up', 119,
'star-triangle-up-open', 219, 'star-triangle-up-dot', 319,
'star-triangle-up-open-dot', 20, 'star-triangle-down',
120, 'star-triangle-down-open', 220,
'star-triangle-down-dot', 320,
'star-triangle-down-open-dot', 21, 'star-square', 121,
'star-square-open', 221, 'star-square-dot', 321,
'star-square-open-dot', 22, 'star-diamond', 122,
'star-diamond-open', 222, 'star-diamond-dot', 322,
'star-diamond-open-dot', 23, 'diamond-tall', 123,
'diamond-tall-open', 223, 'diamond-tall-dot', 323,
'diamond-tall-open-dot', 24, 'diamond-wide', 124,
'diamond-wide-open', 224, 'diamond-wide-dot', 324,
'diamond-wide-open-dot', 25, 'hourglass', 125,
'hourglass-open', 26, 'bowtie', 126, 'bowtie-open', 27,
'circle-cross', 127, 'circle-cross-open', 28, 'circle-x',
128, 'circle-x-open', 29, 'square-cross', 129,
'square-cross-open', 30, 'square-x', 130, 'square-x-open',
31, 'diamond-cross', 131, 'diamond-cross-open', 32,
'diamond-x', 132, 'diamond-x-open', 33, 'cross-thin', 133,
'cross-thin-open', 34, 'x-thin', 134, 'x-thin-open', 35,
'asterisk', 135, 'asterisk-open', 36, 'hash', 136,
'hash-open', 236, 'hash-dot', 336, 'hash-open-dot', 37,
'y-up', 137, 'y-up-open', 38, 'y-down', 138,
'y-down-open', 39, 'y-left', 139, 'y-left-open', 40,
'y-right', 140, 'y-right-open', 41, 'line-ew', 141,
'line-ew-open', 42, 'line-ns', 142, 'line-ns-open', 43,
'line-ne', 143, 'line-ne-open', 44, 'line-nw', 144,
'line-nw-open']
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
Any|numpy.ndarray
"""
return self['symbol']
@symbol.setter
def symbol(self, val):
self['symbol'] = val
# symbolsrc
# ---------
@property
def symbolsrc(self):
"""
Sets the source reference on plot.ly for symbol .
The 'symbolsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self['symbolsrc']
@symbolsrc.setter
def symbolsrc(self, val):
self['symbolsrc'] = val
# property parent name
# --------------------
@property
def _parent_path_str(self):
return 'scattergeo'
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
autocolorscale
Determines whether the colorscale is a default palette
(`autocolorscale: true`) or the palette determined by
`marker.colorscale`. Has an effect only if in
`marker.color`is set to a numerical array. In case
`colorscale` is unspecified or `autocolorscale` is
true, the default palette will be chosen according to
whether numbers in the `color` array are all positive,
all negative or mixed.
cauto
Determines whether or not the color domain is computed
with respect to the input data (here in `marker.color`)
or the bounds set in `marker.cmin` and `marker.cmax`
Has an effect only if in `marker.color`is set to a
numerical array. Defaults to `false` when `marker.cmin`
and `marker.cmax` are set by the user.
cmax
Sets the upper bound of the color domain. Has an effect
only if in `marker.color`is set to a numerical array.
Value should have the same units as in `marker.color`
and if set, `marker.cmin` must be set as well.
cmid
Sets the mid-point of the color domain by scaling
`marker.cmin` and/or `marker.cmax` to be equidistant to
this point. Has an effect only if in `marker.color`is
set to a numerical array. Value should have the same
units as in `marker.color`. Has no effect when
`marker.cauto` is `false`.
cmin
Sets the lower bound of the color domain. Has an effect
only if in `marker.color`is set to a numerical array.
Value should have the same units as in `marker.color`
and if set, `marker.cmax` must be set as well.
color
Sets themarkercolor. It accepts either a specific color
or an array of numbers that are mapped to the
colorscale relative to the max and min values of the
array or relative to `marker.cmin` and `marker.cmax` if
set.
coloraxis
Sets a reference to a shared color axis. References to
these shared color axes are "coloraxis", "coloraxis2",
"coloraxis3", etc. Settings for these shared color axes
are set in the layout, under `layout.coloraxis`,
`layout.coloraxis2`, etc. Note that multiple color
scales can be linked to the same color axis.
colorbar
plotly.graph_objs.scattergeo.marker.ColorBar instance
or dict with compatible properties
colorscale
Sets the colorscale. Has an effect only if in
`marker.color`is set to a numerical array. The
colorscale must be an array containing arrays mapping a
normalized value to an rgb, rgba, hex, hsl, hsv, or
named color string. At minimum, a mapping for the
lowest (0) and highest (1) values are required. For
example, `[[0, 'rgb(0,0,255)', [1, 'rgb(255,0,0)']]`.
To control the bounds of the colorscale in color space,
use`marker.cmin` and `marker.cmax`. Alternatively,
`colorscale` may be a palette name string of the
following list: Greys,YlGnBu,Greens,YlOrRd,Bluered,RdBu
,Reds,Blues,Picnic,Rainbow,Portland,Jet,Hot,Blackbody,E
arth,Electric,Viridis,Cividis.
colorsrc
Sets the source reference on plot.ly for color .
gradient
plotly.graph_objs.scattergeo.marker.Gradient instance
or dict with compatible properties
line
plotly.graph_objs.scattergeo.marker.Line instance or
dict with compatible properties
opacity
Sets the marker opacity.
opacitysrc
Sets the source reference on plot.ly for opacity .
reversescale
Reverses the color mapping if true. Has an effect only
if in `marker.color`is set to a numerical array. If
true, `marker.cmin` will correspond to the last color
in the array and `marker.cmax` will correspond to the
first color.
showscale
Determines whether or not a colorbar is displayed for
this trace. Has an effect only if in `marker.color`is
set to a numerical array.
size
Sets the marker size (in px).
sizemin
Has an effect only if `marker.size` is set to a
numerical array. Sets the minimum size (in px) of the
rendered marker points.
sizemode
Has an effect only if `marker.size` is set to a
numerical array. Sets the rule for which the data in
`size` is converted to pixels.
sizeref
Has an effect only if `marker.size` is set to a
numerical array. Sets the scale factor used to
determine the rendered size of marker points. Use with
`sizemin` and `sizemode`.
sizesrc
Sets the source reference on plot.ly for size .
symbol
Sets the marker symbol type. Adding 100 is equivalent
to appending "-open" to a symbol name. Adding 200 is
equivalent to appending "-dot" to a symbol name. Adding
300 is equivalent to appending "-open-dot" or "dot-
open" to a symbol name.
symbolsrc
Sets the source reference on plot.ly for symbol .
"""
def __init__(
self,
arg=None,
autocolorscale=None,
cauto=None,
cmax=None,
cmid=None,
cmin=None,
color=None,
coloraxis=None,
colorbar=None,
colorscale=None,
colorsrc=None,
gradient=None,
line=None,
opacity=None,
opacitysrc=None,
reversescale=None,
showscale=None,
size=None,
sizemin=None,
sizemode=None,
sizeref=None,
sizesrc=None,
symbol=None,
symbolsrc=None,
**kwargs
):
"""
Construct a new Marker object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of plotly.graph_objs.scattergeo.Marker
autocolorscale
Determines whether the colorscale is a default palette
(`autocolorscale: true`) or the palette determined by
`marker.colorscale`. Has an effect only if in
`marker.color`is set to a numerical array. In case
`colorscale` is unspecified or `autocolorscale` is
true, the default palette will be chosen according to
whether numbers in the `color` array are all positive,
all negative or mixed.
cauto
Determines whether or not the color domain is computed
with respect to the input data (here in `marker.color`)
or the bounds set in `marker.cmin` and `marker.cmax`
Has an effect only if in `marker.color`is set to a
numerical array. Defaults to `false` when `marker.cmin`
and `marker.cmax` are set by the user.
cmax
Sets the upper bound of the color domain. Has an effect
only if in `marker.color`is set to a numerical array.
Value should have the same units as in `marker.color`
and if set, `marker.cmin` must be set as well.
cmid
Sets the mid-point of the color domain by scaling
`marker.cmin` and/or `marker.cmax` to be equidistant to
this point. Has an effect only if in `marker.color`is
set to a numerical array. Value should have the same
units as in `marker.color`. Has no effect when
`marker.cauto` is `false`.
cmin
Sets the lower bound of the color domain. Has an effect
only if in `marker.color`is set to a numerical array.
Value should have the same units as in `marker.color`
and if set, `marker.cmax` must be set as well.
color
Sets themarkercolor. It accepts either a specific color
or an array of numbers that are mapped to the
colorscale relative to the max and min values of the
array or relative to `marker.cmin` and `marker.cmax` if
set.
coloraxis
Sets a reference to a shared color axis. References to
these shared color axes are "coloraxis", "coloraxis2",
"coloraxis3", etc. Settings for these shared color axes
are set in the layout, under `layout.coloraxis`,
`layout.coloraxis2`, etc. Note that multiple color
scales can be linked to the same color axis.
colorbar
plotly.graph_objs.scattergeo.marker.ColorBar instance
or dict with compatible properties
colorscale
Sets the colorscale. Has an effect only if in
`marker.color`is set to a numerical array. The
colorscale must be an array containing arrays mapping a
normalized value to an rgb, rgba, hex, hsl, hsv, or
named color string. At minimum, a mapping for the
lowest (0) and highest (1) values are required. For
example, `[[0, 'rgb(0,0,255)', [1, 'rgb(255,0,0)']]`.
To control the bounds of the colorscale in color space,
use`marker.cmin` and `marker.cmax`. Alternatively,
`colorscale` may be a palette name string of the
following list: Greys,YlGnBu,Greens,YlOrRd,Bluered,RdBu
,Reds,Blues,Picnic,Rainbow,Portland,Jet,Hot,Blackbody,E
arth,Electric,Viridis,Cividis.
colorsrc
Sets the source reference on plot.ly for color .
gradient
plotly.graph_objs.scattergeo.marker.Gradient instance
or dict with compatible properties
line
plotly.graph_objs.scattergeo.marker.Line instance or
dict with compatible properties
opacity
Sets the marker opacity.
opacitysrc
Sets the source reference on plot.ly for opacity .
reversescale
Reverses the color mapping if true. Has an effect only
if in `marker.color`is set to a numerical array. If
true, `marker.cmin` will correspond to the last color
in the array and `marker.cmax` will correspond to the
first color.
showscale
Determines whether or not a colorbar is displayed for
this trace. Has an effect only if in `marker.color`is
set to a numerical array.
size
Sets the marker size (in px).
sizemin
Has an effect only if `marker.size` is set to a
numerical array. Sets the minimum size (in px) of the
rendered marker points.
sizemode
Has an effect only if `marker.size` is set to a
numerical array. Sets the rule for which the data in
`size` is converted to pixels.
sizeref
Has an effect only if `marker.size` is set to a
numerical array. Sets the scale factor used to
determine the rendered size of marker points. Use with
`sizemin` and `sizemode`.
sizesrc
Sets the source reference on plot.ly for size .
symbol
Sets the marker symbol type. Adding 100 is equivalent
to appending "-open" to a symbol name. Adding 200 is
equivalent to appending "-dot" to a symbol name. Adding
300 is equivalent to appending "-open-dot" or "dot-
open" to a symbol name.
symbolsrc
Sets the source reference on plot.ly for symbol .
Returns
-------
Marker
"""
super(Marker, self).__init__('marker')
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.scattergeo.Marker
constructor must be a dict or
an instance of plotly.graph_objs.scattergeo.Marker"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop('skip_invalid', False)
# Import validators
# -----------------
from plotly.validators.scattergeo import (marker as v_marker)
# Initialize validators
# ---------------------
self._validators['autocolorscale'] = v_marker.AutocolorscaleValidator()
self._validators['cauto'] = v_marker.CautoValidator()
self._validators['cmax'] = v_marker.CmaxValidator()
self._validators['cmid'] = v_marker.CmidValidator()
self._validators['cmin'] = v_marker.CminValidator()
self._validators['color'] = v_marker.ColorValidator()
self._validators['coloraxis'] = v_marker.ColoraxisValidator()
self._validators['colorbar'] = v_marker.ColorBarValidator()
self._validators['colorscale'] = v_marker.ColorscaleValidator()
self._validators['colorsrc'] = v_marker.ColorsrcValidator()
self._validators['gradient'] = v_marker.GradientValidator()
self._validators['line'] = v_marker.LineValidator()
self._validators['opacity'] = v_marker.OpacityValidator()
self._validators['opacitysrc'] = v_marker.OpacitysrcValidator()
self._validators['reversescale'] = v_marker.ReversescaleValidator()
self._validators['showscale'] = v_marker.ShowscaleValidator()
self._validators['size'] = v_marker.SizeValidator()
self._validators['sizemin'] = v_marker.SizeminValidator()
self._validators['sizemode'] = v_marker.SizemodeValidator()
self._validators['sizeref'] = v_marker.SizerefValidator()
self._validators['sizesrc'] = v_marker.SizesrcValidator()
self._validators['symbol'] = v_marker.SymbolValidator()
self._validators['symbolsrc'] = v_marker.SymbolsrcValidator()
# Populate data dict with properties
# ----------------------------------
_v = arg.pop('autocolorscale', None)
self['autocolorscale'
] = autocolorscale if autocolorscale is not None else _v
_v = arg.pop('cauto', None)
self['cauto'] = cauto if cauto is not None else _v
_v = arg.pop('cmax', None)
self['cmax'] = cmax if cmax is not None else _v
_v = arg.pop('cmid', None)
self['cmid'] = cmid if cmid is not None else _v
_v = arg.pop('cmin', None)
self['cmin'] = cmin if cmin is not None else _v
_v = arg.pop('color', None)
self['color'] = color if color is not None else _v
_v = arg.pop('coloraxis', None)
self['coloraxis'] = coloraxis if coloraxis is not None else _v
_v = arg.pop('colorbar', None)
self['colorbar'] = colorbar if colorbar is not None else _v
_v = arg.pop('colorscale', None)
self['colorscale'] = colorscale if colorscale is not None else _v
_v = arg.pop('colorsrc', None)
self['colorsrc'] = colorsrc if colorsrc is not None else _v
_v = arg.pop('gradient', None)
self['gradient'] = gradient if gradient is not None else _v
_v = arg.pop('line', None)
self['line'] = line if line is not None else _v
_v = arg.pop('opacity', None)
self['opacity'] = opacity if opacity is not None else _v
_v = arg.pop('opacitysrc', None)
self['opacitysrc'] = opacitysrc if opacitysrc is not None else _v
_v = arg.pop('reversescale', None)
self['reversescale'] = reversescale if reversescale is not None else _v
_v = arg.pop('showscale', None)
self['showscale'] = showscale if showscale is not None else _v
_v = arg.pop('size', None)
self['size'] = size if size is not None else _v
_v = arg.pop('sizemin', None)
self['sizemin'] = sizemin if sizemin is not None else _v
_v = arg.pop('sizemode', None)
self['sizemode'] = sizemode if sizemode is not None else _v
_v = arg.pop('sizeref', None)
self['sizeref'] = sizeref if sizeref is not None else _v
_v = arg.pop('sizesrc', None)
self['sizesrc'] = sizesrc if sizesrc is not None else _v
_v = arg.pop('symbol', None)
self['symbol'] = symbol if symbol is not None else _v
_v = arg.pop('symbolsrc', None)
self['symbolsrc'] = symbolsrc if symbolsrc is not None else _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Line(_BaseTraceHierarchyType):
# color
# -----
@property
def color(self):
"""
Sets the line color.
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, saddlebrown, salmon, sandybrown,
seagreen, seashell, sienna, silver, skyblue,
slateblue, slategray, slategrey, snow, springgreen,
steelblue, tan, teal, thistle, tomato, turquoise,
violet, wheat, white, whitesmoke, yellow,
yellowgreen
Returns
-------
str
"""
return self['color']
@color.setter
def color(self, val):
self['color'] = val
# dash
# ----
@property
def dash(self):
"""
Sets the dash style of lines. Set to a dash type string
("solid", "dot", "dash", "longdash", "dashdot", or
"longdashdot") or a dash length list in px (eg
"5px,10px,2px,2px").
The 'dash' property is an enumeration that may be specified as:
- One of the following dash styles:
['solid', 'dot', 'dash', 'longdash', 'dashdot', 'longdashdot']
- A string containing a dash length list in pixels or percentages
(e.g. '5px 10px 2px 2px', '5, 10, 2, 2', '10% 20% 40%', etc.)
Returns
-------
str
"""
return self['dash']
@dash.setter
def dash(self, val):
self['dash'] = val
# width
# -----
@property
def width(self):
"""
Sets the line width (in px).
The 'width' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self['width']
@width.setter
def width(self, val):
self['width'] = val
# property parent name
# --------------------
@property
def _parent_path_str(self):
return 'scattergeo'
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
color
Sets the line color.
dash
Sets the dash style of lines. Set to a dash type string
("solid", "dot", "dash", "longdash", "dashdot", or
"longdashdot") or a dash length list in px (eg
"5px,10px,2px,2px").
width
Sets the line width (in px).
"""
def __init__(self, arg=None, color=None, dash=None, width=None, **kwargs):
"""
Construct a new Line object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of plotly.graph_objs.scattergeo.Line
color
Sets the line color.
dash
Sets the dash style of lines. Set to a dash type string
("solid", "dot", "dash", "longdash", "dashdot", or
"longdashdot") or a dash length list in px (eg
"5px,10px,2px,2px").
width
Sets the line width (in px).
Returns
-------
Line
"""
super(Line, self).__init__('line')
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.scattergeo.Line
constructor must be a dict or
an instance of plotly.graph_objs.scattergeo.Line"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop('skip_invalid', False)
# Import validators
# -----------------
from plotly.validators.scattergeo import (line as v_line)
# Initialize validators
# ---------------------
self._validators['color'] = v_line.ColorValidator()
self._validators['dash'] = v_line.DashValidator()
self._validators['width'] = v_line.WidthValidator()
# Populate data dict with properties
# ----------------------------------
_v = arg.pop('color', None)
self['color'] = color if color is not None else _v
_v = arg.pop('dash', None)
self['dash'] = dash if dash is not None else _v
_v = arg.pop('width', None)
self['width'] = width if width is not None else _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Hoverlabel(_BaseTraceHierarchyType):
# align
# -----
@property
def align(self):
"""
Sets the horizontal alignment of the text content within hover
label box. Has an effect only if the hover label text spans
more two or more lines
The 'align' property is an enumeration that may be specified as:
- One of the following enumeration values:
['left', 'right', 'auto']
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
Any|numpy.ndarray
"""
return self['align']
@align.setter
def align(self, val):
self['align'] = val
# alignsrc
# --------
@property
def alignsrc(self):
"""
Sets the source reference on plot.ly for align .
The 'alignsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self['alignsrc']
@alignsrc.setter
def alignsrc(self, val):
self['alignsrc'] = val
# bgcolor
# -------
@property
def bgcolor(self):
"""
Sets the background color of the hover labels for this trace
The 'bgcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, saddlebrown, salmon, sandybrown,
seagreen, seashell, sienna, silver, skyblue,
slateblue, slategray, slategrey, snow, springgreen,
steelblue, tan, teal, thistle, tomato, turquoise,
violet, wheat, white, whitesmoke, yellow,
yellowgreen
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self['bgcolor']
@bgcolor.setter
def bgcolor(self, val):
self['bgcolor'] = val
# bgcolorsrc
# ----------
@property
def bgcolorsrc(self):
"""
Sets the source reference on plot.ly for bgcolor .
The 'bgcolorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self['bgcolorsrc']
@bgcolorsrc.setter
def bgcolorsrc(self, val):
self['bgcolorsrc'] = val
# bordercolor
# -----------
@property
def bordercolor(self):
"""
Sets the border color of the hover labels for this trace.
The 'bordercolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, saddlebrown, salmon, sandybrown,
seagreen, seashell, sienna, silver, skyblue,
slateblue, slategray, slategrey, snow, springgreen,
steelblue, tan, teal, thistle, tomato, turquoise,
violet, wheat, white, whitesmoke, yellow,
yellowgreen
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self['bordercolor']
@bordercolor.setter
def bordercolor(self, val):
self['bordercolor'] = val
# bordercolorsrc
# --------------
@property
def bordercolorsrc(self):
"""
Sets the source reference on plot.ly for bordercolor .
The 'bordercolorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self['bordercolorsrc']
@bordercolorsrc.setter
def bordercolorsrc(self, val):
self['bordercolorsrc'] = val
# font
# ----
@property
def font(self):
"""
Sets the font used in hover labels.
The 'font' property is an instance of Font
that may be specified as:
- An instance of plotly.graph_objs.scattergeo.hoverlabel.Font
- A dict of string/value properties that will be passed
to the Font constructor
Supported dict properties:
color
colorsrc
Sets the source reference on plot.ly for color
.
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The plotly service (at https://plot.ly
or on-premise) generates images on a server,
where only a select number of fonts are
installed and supported. These include "Arial",
"Balto", "Courier New", "Droid Sans",, "Droid
Serif", "Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
familysrc
Sets the source reference on plot.ly for
family .
size
sizesrc
Sets the source reference on plot.ly for size
.
Returns
-------
plotly.graph_objs.scattergeo.hoverlabel.Font
"""
return self['font']
@font.setter
def font(self, val):
self['font'] = val
# namelength
# ----------
@property
def namelength(self):
"""
Sets the default length (in number of characters) of the trace
name in the hover labels for all traces. -1 shows the whole
name regardless of length. 0-3 shows the first 0-3 characters,
and an integer >3 will show the whole name if it is less than
that many characters, but if it is longer, will truncate to
`namelength - 3` characters and add an ellipsis.
The 'namelength' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [-1, 9223372036854775807]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|numpy.ndarray
"""
return self['namelength']
@namelength.setter
def namelength(self, val):
self['namelength'] = val
# namelengthsrc
# -------------
@property
def namelengthsrc(self):
"""
Sets the source reference on plot.ly for namelength .
The 'namelengthsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self['namelengthsrc']
@namelengthsrc.setter
def namelengthsrc(self, val):
self['namelengthsrc'] = val
# property parent name
# --------------------
@property
def _parent_path_str(self):
return 'scattergeo'
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
align
Sets the horizontal alignment of the text content
within hover label box. Has an effect only if the hover
label text spans more two or more lines
alignsrc
Sets the source reference on plot.ly for align .
bgcolor
Sets the background color of the hover labels for this
trace
bgcolorsrc
Sets the source reference on plot.ly for bgcolor .
bordercolor
Sets the border color of the hover labels for this
trace.
bordercolorsrc
Sets the source reference on plot.ly for bordercolor .
font
Sets the font used in hover labels.
namelength
Sets the default length (in number of characters) of
the trace name in the hover labels for all traces. -1
shows the whole name regardless of length. 0-3 shows
the first 0-3 characters, and an integer >3 will show
the whole name if it is less than that many characters,
but if it is longer, will truncate to `namelength - 3`
characters and add an ellipsis.
namelengthsrc
Sets the source reference on plot.ly for namelength .
"""
def __init__(
self,
arg=None,
align=None,
alignsrc=None,
bgcolor=None,
bgcolorsrc=None,
bordercolor=None,
bordercolorsrc=None,
font=None,
namelength=None,
namelengthsrc=None,
**kwargs
):
"""
Construct a new Hoverlabel object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of plotly.graph_objs.scattergeo.Hoverlabel
align
Sets the horizontal alignment of the text content
within hover label box. Has an effect only if the hover
label text spans more two or more lines
alignsrc
Sets the source reference on plot.ly for align .
bgcolor
Sets the background color of the hover labels for this
trace
bgcolorsrc
Sets the source reference on plot.ly for bgcolor .
bordercolor
Sets the border color of the hover labels for this
trace.
bordercolorsrc
Sets the source reference on plot.ly for bordercolor .
font
Sets the font used in hover labels.
namelength
Sets the default length (in number of characters) of
the trace name in the hover labels for all traces. -1
shows the whole name regardless of length. 0-3 shows
the first 0-3 characters, and an integer >3 will show
the whole name if it is less than that many characters,
but if it is longer, will truncate to `namelength - 3`
characters and add an ellipsis.
namelengthsrc
Sets the source reference on plot.ly for namelength .
Returns
-------
Hoverlabel
"""
super(Hoverlabel, self).__init__('hoverlabel')
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.scattergeo.Hoverlabel
constructor must be a dict or
an instance of plotly.graph_objs.scattergeo.Hoverlabel"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop('skip_invalid', False)
# Import validators
# -----------------
from plotly.validators.scattergeo import (hoverlabel as v_hoverlabel)
# Initialize validators
# ---------------------
self._validators['align'] = v_hoverlabel.AlignValidator()
self._validators['alignsrc'] = v_hoverlabel.AlignsrcValidator()
self._validators['bgcolor'] = v_hoverlabel.BgcolorValidator()
self._validators['bgcolorsrc'] = v_hoverlabel.BgcolorsrcValidator()
self._validators['bordercolor'] = v_hoverlabel.BordercolorValidator()
self._validators['bordercolorsrc'
] = v_hoverlabel.BordercolorsrcValidator()
self._validators['font'] = v_hoverlabel.FontValidator()
self._validators['namelength'] = v_hoverlabel.NamelengthValidator()
self._validators['namelengthsrc'
] = v_hoverlabel.NamelengthsrcValidator()
# Populate data dict with properties
# ----------------------------------
_v = arg.pop('align', None)
self['align'] = align if align is not None else _v
_v = arg.pop('alignsrc', None)
self['alignsrc'] = alignsrc if alignsrc is not None else _v
_v = arg.pop('bgcolor', None)
self['bgcolor'] = bgcolor if bgcolor is not None else _v
_v = arg.pop('bgcolorsrc', None)
self['bgcolorsrc'] = bgcolorsrc if bgcolorsrc is not None else _v
_v = arg.pop('bordercolor', None)
self['bordercolor'] = bordercolor if bordercolor is not None else _v
_v = arg.pop('bordercolorsrc', None)
self['bordercolorsrc'
] = bordercolorsrc if bordercolorsrc is not None else _v
_v = arg.pop('font', None)
self['font'] = font if font is not None else _v
_v = arg.pop('namelength', None)
self['namelength'] = namelength if namelength is not None else _v
_v = arg.pop('namelengthsrc', None)
self['namelengthsrc'
] = namelengthsrc if namelengthsrc is not None else _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
from plotly.graph_objs.scattergeo import unselected
from plotly.graph_objs.scattergeo import selected
from plotly.graph_objs.scattergeo import marker
from plotly.graph_objs.scattergeo import hoverlabel
| 37.224723
| 85
| 0.552
|
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Unselected(_BaseTraceHierarchyType):
@property
def marker(self):
return self['marker']
@marker.setter
def marker(self, val):
self['marker'] = val
@property
def textfont(self):
return self['textfont']
@textfont.setter
def textfont(self, val):
self['textfont'] = val
@property
def _parent_path_str(self):
return 'scattergeo'
@property
def _prop_descriptions(self):
return """\
marker
plotly.graph_objs.scattergeo.unselected.Marker instance
or dict with compatible properties
textfont
plotly.graph_objs.scattergeo.unselected.Textfont
instance or dict with compatible properties
"""
def __init__(self, arg=None, marker=None, textfont=None, **kwargs):
super(Unselected, self).__init__('unselected')
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.scattergeo.Unselected
constructor must be a dict or
an instance of plotly.graph_objs.scattergeo.Unselected"""
)
self._skip_invalid = kwargs.pop('skip_invalid', False)
from plotly.validators.scattergeo import (unselected as v_unselected)
self._validators['marker'] = v_unselected.MarkerValidator()
self._validators['textfont'] = v_unselected.TextfontValidator()
_v = arg.pop('marker', None)
self['marker'] = marker if marker is not None else _v
_v = arg.pop('textfont', None)
self['textfont'] = textfont if textfont is not None else _v
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Textfont(_BaseTraceHierarchyType):
@property
def color(self):
return self['color']
@color.setter
def color(self, val):
self['color'] = val
@property
def colorsrc(self):
return self['colorsrc']
@colorsrc.setter
def colorsrc(self, val):
self['colorsrc'] = val
@property
def family(self):
return self['family']
@family.setter
def family(self, val):
self['family'] = val
@property
def familysrc(self):
return self['familysrc']
@familysrc.setter
def familysrc(self, val):
self['familysrc'] = val
@property
def size(self):
return self['size']
@size.setter
def size(self, val):
self['size'] = val
@property
def sizesrc(self):
return self['sizesrc']
@sizesrc.setter
def sizesrc(self, val):
self['sizesrc'] = val
@property
def _parent_path_str(self):
return 'scattergeo'
@property
def _prop_descriptions(self):
return """\
color
colorsrc
Sets the source reference on plot.ly for color .
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The
plotly service (at https://plot.ly or on-premise)
generates images on a server, where only a select
number of fonts are installed and supported. These
include "Arial", "Balto", "Courier New", "Droid Sans",,
"Droid Serif", "Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
familysrc
Sets the source reference on plot.ly for family .
size
sizesrc
Sets the source reference on plot.ly for size .
"""
def __init__(
self,
arg=None,
color=None,
colorsrc=None,
family=None,
familysrc=None,
size=None,
sizesrc=None,
**kwargs
):
super(Textfont, self).__init__('textfont')
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.scattergeo.Textfont
constructor must be a dict or
an instance of plotly.graph_objs.scattergeo.Textfont"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop('skip_invalid', False)
# Import validators
# -----------------
from plotly.validators.scattergeo import (textfont as v_textfont)
# Initialize validators
# ---------------------
self._validators['color'] = v_textfont.ColorValidator()
self._validators['colorsrc'] = v_textfont.ColorsrcValidator()
self._validators['family'] = v_textfont.FamilyValidator()
self._validators['familysrc'] = v_textfont.FamilysrcValidator()
self._validators['size'] = v_textfont.SizeValidator()
self._validators['sizesrc'] = v_textfont.SizesrcValidator()
# Populate data dict with properties
# ----------------------------------
_v = arg.pop('color', None)
self['color'] = color if color is not None else _v
_v = arg.pop('colorsrc', None)
self['colorsrc'] = colorsrc if colorsrc is not None else _v
_v = arg.pop('family', None)
self['family'] = family if family is not None else _v
_v = arg.pop('familysrc', None)
self['familysrc'] = familysrc if familysrc is not None else _v
_v = arg.pop('size', None)
self['size'] = size if size is not None else _v
_v = arg.pop('sizesrc', None)
self['sizesrc'] = sizesrc if sizesrc is not None else _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Stream(_BaseTraceHierarchyType):
# maxpoints
# ---------
@property
def maxpoints(self):
return self['maxpoints']
@maxpoints.setter
def maxpoints(self, val):
self['maxpoints'] = val
# token
# -----
@property
def token(self):
return self['token']
@token.setter
def token(self, val):
self['token'] = val
# property parent name
# --------------------
@property
def _parent_path_str(self):
return 'scattergeo'
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
maxpoints
Sets the maximum number of points to keep on the plots
from an incoming stream. If `maxpoints` is set to 50,
only the newest 50 points will be displayed on the
plot.
token
The stream id number links a data trace on a plot with
a stream. See https://plot.ly/settings for more
details.
"""
def __init__(self, arg=None, maxpoints=None, token=None, **kwargs):
super(Stream, self).__init__('stream')
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.scattergeo.Stream
constructor must be a dict or
an instance of plotly.graph_objs.scattergeo.Stream"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop('skip_invalid', False)
# Import validators
# -----------------
from plotly.validators.scattergeo import (stream as v_stream)
# Initialize validators
# ---------------------
self._validators['maxpoints'] = v_stream.MaxpointsValidator()
self._validators['token'] = v_stream.TokenValidator()
# Populate data dict with properties
# ----------------------------------
_v = arg.pop('maxpoints', None)
self['maxpoints'] = maxpoints if maxpoints is not None else _v
_v = arg.pop('token', None)
self['token'] = token if token is not None else _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Selected(_BaseTraceHierarchyType):
# marker
# ------
@property
def marker(self):
return self['marker']
@marker.setter
def marker(self, val):
self['marker'] = val
# textfont
# --------
@property
def textfont(self):
return self['textfont']
@textfont.setter
def textfont(self, val):
self['textfont'] = val
# property parent name
# --------------------
@property
def _parent_path_str(self):
return 'scattergeo'
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
marker
plotly.graph_objs.scattergeo.selected.Marker instance
or dict with compatible properties
textfont
plotly.graph_objs.scattergeo.selected.Textfont instance
or dict with compatible properties
"""
def __init__(self, arg=None, marker=None, textfont=None, **kwargs):
super(Selected, self).__init__('selected')
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.scattergeo.Selected
constructor must be a dict or
an instance of plotly.graph_objs.scattergeo.Selected"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop('skip_invalid', False)
# Import validators
# -----------------
from plotly.validators.scattergeo import (selected as v_selected)
# Initialize validators
# ---------------------
self._validators['marker'] = v_selected.MarkerValidator()
self._validators['textfont'] = v_selected.TextfontValidator()
# Populate data dict with properties
# ----------------------------------
_v = arg.pop('marker', None)
self['marker'] = marker if marker is not None else _v
_v = arg.pop('textfont', None)
self['textfont'] = textfont if textfont is not None else _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Marker(_BaseTraceHierarchyType):
# autocolorscale
# --------------
@property
def autocolorscale(self):
return self['autocolorscale']
@autocolorscale.setter
def autocolorscale(self, val):
self['autocolorscale'] = val
# cauto
# -----
@property
def cauto(self):
return self['cauto']
@cauto.setter
def cauto(self, val):
self['cauto'] = val
# cmax
# ----
@property
def cmax(self):
return self['cmax']
@cmax.setter
def cmax(self, val):
self['cmax'] = val
# cmid
# ----
@property
def cmid(self):
return self['cmid']
@cmid.setter
def cmid(self, val):
self['cmid'] = val
# cmin
# ----
@property
def cmin(self):
return self['cmin']
@cmin.setter
def cmin(self, val):
self['cmin'] = val
# color
# -----
@property
def color(self):
return self['color']
@color.setter
def color(self, val):
self['color'] = val
# coloraxis
# ---------
@property
def coloraxis(self):
return self['coloraxis']
@coloraxis.setter
def coloraxis(self, val):
self['coloraxis'] = val
# colorbar
# --------
@property
def colorbar(self):
return self['colorbar']
@colorbar.setter
def colorbar(self, val):
self['colorbar'] = val
# colorscale
# ----------
@property
def colorscale(self):
return self['colorscale']
@colorscale.setter
def colorscale(self, val):
self['colorscale'] = val
# colorsrc
# --------
@property
def colorsrc(self):
return self['colorsrc']
@colorsrc.setter
def colorsrc(self, val):
self['colorsrc'] = val
# gradient
# --------
@property
def gradient(self):
return self['gradient']
@gradient.setter
def gradient(self, val):
self['gradient'] = val
# line
# ----
@property
def line(self):
return self['line']
@line.setter
def line(self, val):
self['line'] = val
# opacity
# -------
@property
def opacity(self):
return self['opacity']
@opacity.setter
def opacity(self, val):
self['opacity'] = val
# opacitysrc
# ----------
@property
def opacitysrc(self):
return self['opacitysrc']
@opacitysrc.setter
def opacitysrc(self, val):
self['opacitysrc'] = val
# reversescale
# ------------
@property
def reversescale(self):
return self['reversescale']
@reversescale.setter
def reversescale(self, val):
self['reversescale'] = val
# showscale
# ---------
@property
def showscale(self):
return self['showscale']
@showscale.setter
def showscale(self, val):
self['showscale'] = val
# size
# ----
@property
def size(self):
return self['size']
@size.setter
def size(self, val):
self['size'] = val
# sizemin
# -------
@property
def sizemin(self):
return self['sizemin']
@sizemin.setter
def sizemin(self, val):
self['sizemin'] = val
# sizemode
# --------
@property
def sizemode(self):
return self['sizemode']
@sizemode.setter
def sizemode(self, val):
self['sizemode'] = val
# sizeref
# -------
@property
def sizeref(self):
return self['sizeref']
@sizeref.setter
def sizeref(self, val):
self['sizeref'] = val
# sizesrc
# -------
@property
def sizesrc(self):
return self['sizesrc']
@sizesrc.setter
def sizesrc(self, val):
self['sizesrc'] = val
# symbol
# ------
@property
def symbol(self):
return self['symbol']
@symbol.setter
def symbol(self, val):
self['symbol'] = val
# symbolsrc
# ---------
@property
def symbolsrc(self):
return self['symbolsrc']
@symbolsrc.setter
def symbolsrc(self, val):
self['symbolsrc'] = val
# property parent name
# --------------------
@property
def _parent_path_str(self):
return 'scattergeo'
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
autocolorscale
Determines whether the colorscale is a default palette
(`autocolorscale: true`) or the palette determined by
`marker.colorscale`. Has an effect only if in
`marker.color`is set to a numerical array. In case
`colorscale` is unspecified or `autocolorscale` is
true, the default palette will be chosen according to
whether numbers in the `color` array are all positive,
all negative or mixed.
cauto
Determines whether or not the color domain is computed
with respect to the input data (here in `marker.color`)
or the bounds set in `marker.cmin` and `marker.cmax`
Has an effect only if in `marker.color`is set to a
numerical array. Defaults to `false` when `marker.cmin`
and `marker.cmax` are set by the user.
cmax
Sets the upper bound of the color domain. Has an effect
only if in `marker.color`is set to a numerical array.
Value should have the same units as in `marker.color`
and if set, `marker.cmin` must be set as well.
cmid
Sets the mid-point of the color domain by scaling
`marker.cmin` and/or `marker.cmax` to be equidistant to
this point. Has an effect only if in `marker.color`is
set to a numerical array. Value should have the same
units as in `marker.color`. Has no effect when
`marker.cauto` is `false`.
cmin
Sets the lower bound of the color domain. Has an effect
only if in `marker.color`is set to a numerical array.
Value should have the same units as in `marker.color`
and if set, `marker.cmax` must be set as well.
color
Sets themarkercolor. It accepts either a specific color
or an array of numbers that are mapped to the
colorscale relative to the max and min values of the
array or relative to `marker.cmin` and `marker.cmax` if
set.
coloraxis
Sets a reference to a shared color axis. References to
these shared color axes are "coloraxis", "coloraxis2",
"coloraxis3", etc. Settings for these shared color axes
are set in the layout, under `layout.coloraxis`,
`layout.coloraxis2`, etc. Note that multiple color
scales can be linked to the same color axis.
colorbar
plotly.graph_objs.scattergeo.marker.ColorBar instance
or dict with compatible properties
colorscale
Sets the colorscale. Has an effect only if in
`marker.color`is set to a numerical array. The
colorscale must be an array containing arrays mapping a
normalized value to an rgb, rgba, hex, hsl, hsv, or
named color string. At minimum, a mapping for the
lowest (0) and highest (1) values are required. For
example, `[[0, 'rgb(0,0,255)', [1, 'rgb(255,0,0)']]`.
To control the bounds of the colorscale in color space,
use`marker.cmin` and `marker.cmax`. Alternatively,
`colorscale` may be a palette name string of the
following list: Greys,YlGnBu,Greens,YlOrRd,Bluered,RdBu
,Reds,Blues,Picnic,Rainbow,Portland,Jet,Hot,Blackbody,E
arth,Electric,Viridis,Cividis.
colorsrc
Sets the source reference on plot.ly for color .
gradient
plotly.graph_objs.scattergeo.marker.Gradient instance
or dict with compatible properties
line
plotly.graph_objs.scattergeo.marker.Line instance or
dict with compatible properties
opacity
Sets the marker opacity.
opacitysrc
Sets the source reference on plot.ly for opacity .
reversescale
Reverses the color mapping if true. Has an effect only
if in `marker.color`is set to a numerical array. If
true, `marker.cmin` will correspond to the last color
in the array and `marker.cmax` will correspond to the
first color.
showscale
Determines whether or not a colorbar is displayed for
this trace. Has an effect only if in `marker.color`is
set to a numerical array.
size
Sets the marker size (in px).
sizemin
Has an effect only if `marker.size` is set to a
numerical array. Sets the minimum size (in px) of the
rendered marker points.
sizemode
Has an effect only if `marker.size` is set to a
numerical array. Sets the rule for which the data in
`size` is converted to pixels.
sizeref
Has an effect only if `marker.size` is set to a
numerical array. Sets the scale factor used to
determine the rendered size of marker points. Use with
`sizemin` and `sizemode`.
sizesrc
Sets the source reference on plot.ly for size .
symbol
Sets the marker symbol type. Adding 100 is equivalent
to appending "-open" to a symbol name. Adding 200 is
equivalent to appending "-dot" to a symbol name. Adding
300 is equivalent to appending "-open-dot" or "dot-
open" to a symbol name.
symbolsrc
Sets the source reference on plot.ly for symbol .
"""
def __init__(
self,
arg=None,
autocolorscale=None,
cauto=None,
cmax=None,
cmid=None,
cmin=None,
color=None,
coloraxis=None,
colorbar=None,
colorscale=None,
colorsrc=None,
gradient=None,
line=None,
opacity=None,
opacitysrc=None,
reversescale=None,
showscale=None,
size=None,
sizemin=None,
sizemode=None,
sizeref=None,
sizesrc=None,
symbol=None,
symbolsrc=None,
**kwargs
):
super(Marker, self).__init__('marker')
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.scattergeo.Marker
constructor must be a dict or
an instance of plotly.graph_objs.scattergeo.Marker"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop('skip_invalid', False)
# Import validators
# -----------------
from plotly.validators.scattergeo import (marker as v_marker)
# Initialize validators
# ---------------------
self._validators['autocolorscale'] = v_marker.AutocolorscaleValidator()
self._validators['cauto'] = v_marker.CautoValidator()
self._validators['cmax'] = v_marker.CmaxValidator()
self._validators['cmid'] = v_marker.CmidValidator()
self._validators['cmin'] = v_marker.CminValidator()
self._validators['color'] = v_marker.ColorValidator()
self._validators['coloraxis'] = v_marker.ColoraxisValidator()
self._validators['colorbar'] = v_marker.ColorBarValidator()
self._validators['colorscale'] = v_marker.ColorscaleValidator()
self._validators['colorsrc'] = v_marker.ColorsrcValidator()
self._validators['gradient'] = v_marker.GradientValidator()
self._validators['line'] = v_marker.LineValidator()
self._validators['opacity'] = v_marker.OpacityValidator()
self._validators['opacitysrc'] = v_marker.OpacitysrcValidator()
self._validators['reversescale'] = v_marker.ReversescaleValidator()
self._validators['showscale'] = v_marker.ShowscaleValidator()
self._validators['size'] = v_marker.SizeValidator()
self._validators['sizemin'] = v_marker.SizeminValidator()
self._validators['sizemode'] = v_marker.SizemodeValidator()
self._validators['sizeref'] = v_marker.SizerefValidator()
self._validators['sizesrc'] = v_marker.SizesrcValidator()
self._validators['symbol'] = v_marker.SymbolValidator()
self._validators['symbolsrc'] = v_marker.SymbolsrcValidator()
# Populate data dict with properties
# ----------------------------------
_v = arg.pop('autocolorscale', None)
self['autocolorscale'
] = autocolorscale if autocolorscale is not None else _v
_v = arg.pop('cauto', None)
self['cauto'] = cauto if cauto is not None else _v
_v = arg.pop('cmax', None)
self['cmax'] = cmax if cmax is not None else _v
_v = arg.pop('cmid', None)
self['cmid'] = cmid if cmid is not None else _v
_v = arg.pop('cmin', None)
self['cmin'] = cmin if cmin is not None else _v
_v = arg.pop('color', None)
self['color'] = color if color is not None else _v
_v = arg.pop('coloraxis', None)
self['coloraxis'] = coloraxis if coloraxis is not None else _v
_v = arg.pop('colorbar', None)
self['colorbar'] = colorbar if colorbar is not None else _v
_v = arg.pop('colorscale', None)
self['colorscale'] = colorscale if colorscale is not None else _v
_v = arg.pop('colorsrc', None)
self['colorsrc'] = colorsrc if colorsrc is not None else _v
_v = arg.pop('gradient', None)
self['gradient'] = gradient if gradient is not None else _v
_v = arg.pop('line', None)
self['line'] = line if line is not None else _v
_v = arg.pop('opacity', None)
self['opacity'] = opacity if opacity is not None else _v
_v = arg.pop('opacitysrc', None)
self['opacitysrc'] = opacitysrc if opacitysrc is not None else _v
_v = arg.pop('reversescale', None)
self['reversescale'] = reversescale if reversescale is not None else _v
_v = arg.pop('showscale', None)
self['showscale'] = showscale if showscale is not None else _v
_v = arg.pop('size', None)
self['size'] = size if size is not None else _v
_v = arg.pop('sizemin', None)
self['sizemin'] = sizemin if sizemin is not None else _v
_v = arg.pop('sizemode', None)
self['sizemode'] = sizemode if sizemode is not None else _v
_v = arg.pop('sizeref', None)
self['sizeref'] = sizeref if sizeref is not None else _v
_v = arg.pop('sizesrc', None)
self['sizesrc'] = sizesrc if sizesrc is not None else _v
_v = arg.pop('symbol', None)
self['symbol'] = symbol if symbol is not None else _v
_v = arg.pop('symbolsrc', None)
self['symbolsrc'] = symbolsrc if symbolsrc is not None else _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Line(_BaseTraceHierarchyType):
# color
# -----
@property
def color(self):
return self['color']
@color.setter
def color(self, val):
self['color'] = val
# dash
# ----
@property
def dash(self):
return self['dash']
@dash.setter
def dash(self, val):
self['dash'] = val
# width
# -----
@property
def width(self):
return self['width']
@width.setter
def width(self, val):
self['width'] = val
# property parent name
# --------------------
@property
def _parent_path_str(self):
return 'scattergeo'
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
color
Sets the line color.
dash
Sets the dash style of lines. Set to a dash type string
("solid", "dot", "dash", "longdash", "dashdot", or
"longdashdot") or a dash length list in px (eg
"5px,10px,2px,2px").
width
Sets the line width (in px).
"""
def __init__(self, arg=None, color=None, dash=None, width=None, **kwargs):
super(Line, self).__init__('line')
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.scattergeo.Line
constructor must be a dict or
an instance of plotly.graph_objs.scattergeo.Line"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop('skip_invalid', False)
# Import validators
# -----------------
from plotly.validators.scattergeo import (line as v_line)
# Initialize validators
# ---------------------
self._validators['color'] = v_line.ColorValidator()
self._validators['dash'] = v_line.DashValidator()
self._validators['width'] = v_line.WidthValidator()
# Populate data dict with properties
# ----------------------------------
_v = arg.pop('color', None)
self['color'] = color if color is not None else _v
_v = arg.pop('dash', None)
self['dash'] = dash if dash is not None else _v
_v = arg.pop('width', None)
self['width'] = width if width is not None else _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Hoverlabel(_BaseTraceHierarchyType):
# align
# -----
@property
def align(self):
return self['align']
@align.setter
def align(self, val):
self['align'] = val
# alignsrc
# --------
@property
def alignsrc(self):
return self['alignsrc']
@alignsrc.setter
def alignsrc(self, val):
self['alignsrc'] = val
# bgcolor
# -------
@property
def bgcolor(self):
return self['bgcolor']
@bgcolor.setter
def bgcolor(self, val):
self['bgcolor'] = val
# bgcolorsrc
# ----------
@property
def bgcolorsrc(self):
return self['bgcolorsrc']
@bgcolorsrc.setter
def bgcolorsrc(self, val):
self['bgcolorsrc'] = val
# bordercolor
# -----------
@property
def bordercolor(self):
return self['bordercolor']
@bordercolor.setter
def bordercolor(self, val):
self['bordercolor'] = val
# bordercolorsrc
# --------------
@property
def bordercolorsrc(self):
return self['bordercolorsrc']
@bordercolorsrc.setter
def bordercolorsrc(self, val):
self['bordercolorsrc'] = val
# font
# ----
@property
def font(self):
return self['font']
@font.setter
def font(self, val):
self['font'] = val
# namelength
# ----------
@property
def namelength(self):
return self['namelength']
@namelength.setter
def namelength(self, val):
self['namelength'] = val
# namelengthsrc
# -------------
@property
def namelengthsrc(self):
return self['namelengthsrc']
@namelengthsrc.setter
def namelengthsrc(self, val):
self['namelengthsrc'] = val
# property parent name
# --------------------
@property
def _parent_path_str(self):
return 'scattergeo'
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
align
Sets the horizontal alignment of the text content
within hover label box. Has an effect only if the hover
label text spans more two or more lines
alignsrc
Sets the source reference on plot.ly for align .
bgcolor
Sets the background color of the hover labels for this
trace
bgcolorsrc
Sets the source reference on plot.ly for bgcolor .
bordercolor
Sets the border color of the hover labels for this
trace.
bordercolorsrc
Sets the source reference on plot.ly for bordercolor .
font
Sets the font used in hover labels.
namelength
Sets the default length (in number of characters) of
the trace name in the hover labels for all traces. -1
shows the whole name regardless of length. 0-3 shows
the first 0-3 characters, and an integer >3 will show
the whole name if it is less than that many characters,
but if it is longer, will truncate to `namelength - 3`
characters and add an ellipsis.
namelengthsrc
Sets the source reference on plot.ly for namelength .
"""
def __init__(
self,
arg=None,
align=None,
alignsrc=None,
bgcolor=None,
bgcolorsrc=None,
bordercolor=None,
bordercolorsrc=None,
font=None,
namelength=None,
namelengthsrc=None,
**kwargs
):
super(Hoverlabel, self).__init__('hoverlabel')
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.scattergeo.Hoverlabel
constructor must be a dict or
an instance of plotly.graph_objs.scattergeo.Hoverlabel"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop('skip_invalid', False)
# Import validators
# -----------------
from plotly.validators.scattergeo import (hoverlabel as v_hoverlabel)
# Initialize validators
# ---------------------
self._validators['align'] = v_hoverlabel.AlignValidator()
self._validators['alignsrc'] = v_hoverlabel.AlignsrcValidator()
self._validators['bgcolor'] = v_hoverlabel.BgcolorValidator()
self._validators['bgcolorsrc'] = v_hoverlabel.BgcolorsrcValidator()
self._validators['bordercolor'] = v_hoverlabel.BordercolorValidator()
self._validators['bordercolorsrc'
] = v_hoverlabel.BordercolorsrcValidator()
self._validators['font'] = v_hoverlabel.FontValidator()
self._validators['namelength'] = v_hoverlabel.NamelengthValidator()
self._validators['namelengthsrc'
] = v_hoverlabel.NamelengthsrcValidator()
# Populate data dict with properties
# ----------------------------------
_v = arg.pop('align', None)
self['align'] = align if align is not None else _v
_v = arg.pop('alignsrc', None)
self['alignsrc'] = alignsrc if alignsrc is not None else _v
_v = arg.pop('bgcolor', None)
self['bgcolor'] = bgcolor if bgcolor is not None else _v
_v = arg.pop('bgcolorsrc', None)
self['bgcolorsrc'] = bgcolorsrc if bgcolorsrc is not None else _v
_v = arg.pop('bordercolor', None)
self['bordercolor'] = bordercolor if bordercolor is not None else _v
_v = arg.pop('bordercolorsrc', None)
self['bordercolorsrc'
] = bordercolorsrc if bordercolorsrc is not None else _v
_v = arg.pop('font', None)
self['font'] = font if font is not None else _v
_v = arg.pop('namelength', None)
self['namelength'] = namelength if namelength is not None else _v
_v = arg.pop('namelengthsrc', None)
self['namelengthsrc'
] = namelengthsrc if namelengthsrc is not None else _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
from plotly.graph_objs.scattergeo import unselected
from plotly.graph_objs.scattergeo import selected
from plotly.graph_objs.scattergeo import marker
from plotly.graph_objs.scattergeo import hoverlabel
| true
| true
|
790ab9099d3a1dd7e34516750d342d20748bda90
| 3,284
|
py
|
Python
|
configure_new.py
|
dodo325/demo-allauth-bootstrap-ru
|
28d340a07fdd7a118664818bd8c4399190ca9aee
|
[
"MIT"
] | null | null | null |
configure_new.py
|
dodo325/demo-allauth-bootstrap-ru
|
28d340a07fdd7a118664818bd8c4399190ca9aee
|
[
"MIT"
] | null | null | null |
configure_new.py
|
dodo325/demo-allauth-bootstrap-ru
|
28d340a07fdd7a118664818bd8c4399190ca9aee
|
[
"MIT"
] | null | null | null |
#!/bin/env
"""
Help new users configure the database for use with social networks.
"""
import os
from datetime import datetime
# Fix Python 2.x.
try:
input = raw_input
except NameError:
pass
import django
from django.conf import settings
from django.core.management.utils import get_random_secret_key
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
settings.configure(
DEBUG=True,
TEMPLATES=[dict(
# DEBUG = True,
BACKEND='django.template.backends.django.DjangoTemplates',
APP_DIRS=True,
DIRS=[
os.path.join(BASE_DIR, 'allauthdemo'),
],
)],
)
try:
django.setup() # for Django >= 1.7
except AttributeError:
pass # must be < Django 1.7
from django.template.loader import get_template
from django.template import engines
commands_template = engines['django'].from_string("""
Run these commands:
python manage.py makemigrations allauthdemo_auth
python manage.py migrate
python manage.py createsuperuser
{% if facebook %}# Facebook
python manage.py set_auth_provider facebook {{facebook.client_id}} {{facebook.secret}}{% endif %}
{% if google %}# Google
python manage.py set_auth_provider google {{google.client_id}} {{google.secret}}{% endif %}
{% if github %}# GitHub
python manage.py set_auth_provider github {{github.client_id}} {{github.secret}}{% endif %}
{% if vk %}# VK
python manage.py set_auth_provider vk {{vk.client_id}} {{vk.secret}}{% endif %}
If you have other providers you can add them in that way.
""")
settings_template = get_template("settings.template.py")
def heading(text):
text = text.strip()
line = '-' * len(text)
print("\n%s\n%s\n%s\n" % (line, text, line))
def print_list(ls):
max_len = max([len(i) for i in ls])
num = len(str(len(ls))) #TODO: full list providers
line = '-' * (2+num+3+max_len+2)
for i in range(len(ls)):
print(line)
print("| %d | %s "% (i+1, ls[i]))
def ask_text(need, default=None):
need = need.strip()
if default:
msg = "\n%s? Default: [%s] > " % (need, default)
else:
msg = "\n%s? > " % need
while True:
response = input(msg)
if response:
return response
elif default is not None:
return default
else:
pass # raw_input('Please enter a value.')
providers = ['facebook', 'google', 'github', 'vk']
if __name__ == "__main__":
context = {
'now': str(datetime.now()),
'secret_key': get_random_secret_key(),
}
print_list(providers)
print("Please list comma-separated providers. Example: 1,2,3,4")
corrct_providers = [int(i)-1 for i in input("Please enter: ").split(',')]
for i in corrct_providers:
p = providers[i]
heading(p)
secret = ask_text("{} Secret"%(p))
client_id = ask_text("{} Client ID"%(p))
context[p] = dict(secret=secret, client_id=client_id)
heading("Rendering settings...")
with open('allauthdemo/settings.py', 'w') as out:
out.write(settings_template.render(context, request=None))
print("OK")
heading("Next steps")
print(commands_template.render(context, request=None))
heading("Done")
| 26.699187
| 101
| 0.624848
|
import os
from datetime import datetime
try:
input = raw_input
except NameError:
pass
import django
from django.conf import settings
from django.core.management.utils import get_random_secret_key
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
settings.configure(
DEBUG=True,
TEMPLATES=[dict(
BACKEND='django.template.backends.django.DjangoTemplates',
APP_DIRS=True,
DIRS=[
os.path.join(BASE_DIR, 'allauthdemo'),
],
)],
)
try:
django.setup()
except AttributeError:
pass
from django.template.loader import get_template
from django.template import engines
commands_template = engines['django'].from_string("""
Run these commands:
python manage.py makemigrations allauthdemo_auth
python manage.py migrate
python manage.py createsuperuser
{% if facebook %}# Facebook
python manage.py set_auth_provider facebook {{facebook.client_id}} {{facebook.secret}}{% endif %}
{% if google %}# Google
python manage.py set_auth_provider google {{google.client_id}} {{google.secret}}{% endif %}
{% if github %}# GitHub
python manage.py set_auth_provider github {{github.client_id}} {{github.secret}}{% endif %}
{% if vk %}# VK
python manage.py set_auth_provider vk {{vk.client_id}} {{vk.secret}}{% endif %}
If you have other providers you can add them in that way.
""")
settings_template = get_template("settings.template.py")
def heading(text):
text = text.strip()
line = '-' * len(text)
print("\n%s\n%s\n%s\n" % (line, text, line))
def print_list(ls):
max_len = max([len(i) for i in ls])
num = len(str(len(ls)))
line = '-' * (2+num+3+max_len+2)
for i in range(len(ls)):
print(line)
print("| %d | %s "% (i+1, ls[i]))
def ask_text(need, default=None):
need = need.strip()
if default:
msg = "\n%s? Default: [%s] > " % (need, default)
else:
msg = "\n%s? > " % need
while True:
response = input(msg)
if response:
return response
elif default is not None:
return default
else:
pass
providers = ['facebook', 'google', 'github', 'vk']
if __name__ == "__main__":
context = {
'now': str(datetime.now()),
'secret_key': get_random_secret_key(),
}
print_list(providers)
print("Please list comma-separated providers. Example: 1,2,3,4")
corrct_providers = [int(i)-1 for i in input("Please enter: ").split(',')]
for i in corrct_providers:
p = providers[i]
heading(p)
secret = ask_text("{} Secret"%(p))
client_id = ask_text("{} Client ID"%(p))
context[p] = dict(secret=secret, client_id=client_id)
heading("Rendering settings...")
with open('allauthdemo/settings.py', 'w') as out:
out.write(settings_template.render(context, request=None))
print("OK")
heading("Next steps")
print(commands_template.render(context, request=None))
heading("Done")
| true
| true
|
790ab98b6a5fd4c3f06fcad8822de70fcfb9f51b
| 209
|
py
|
Python
|
frappe/website/doctype/web_page_block/web_page_block.py
|
oryxsolutions/frappe
|
d193ea22d17ca40d57432040a8afad72287d9e23
|
[
"MIT"
] | 3,755
|
2015-01-06T07:47:43.000Z
|
2022-03-31T20:54:23.000Z
|
frappe/website/doctype/web_page_block/web_page_block.py
|
oryxsolutions/frappe
|
d193ea22d17ca40d57432040a8afad72287d9e23
|
[
"MIT"
] | 7,369
|
2015-01-01T19:59:41.000Z
|
2022-03-31T23:02:05.000Z
|
frappe/website/doctype/web_page_block/web_page_block.py
|
oryxsolutions/frappe
|
d193ea22d17ca40d57432040a8afad72287d9e23
|
[
"MIT"
] | 2,685
|
2015-01-07T17:51:03.000Z
|
2022-03-31T23:16:24.000Z
|
# -*- coding: utf-8 -*-
# Copyright (c) 2020, Frappe Technologies and contributors
# License: MIT. See LICENSE
# import frappe
from frappe.model.document import Document
class WebPageBlock(Document):
pass
| 19
| 58
| 0.741627
|
from frappe.model.document import Document
class WebPageBlock(Document):
pass
| true
| true
|
790aba6956f1484ca4eca4a8f264f1b3c24bef28
| 9,534
|
py
|
Python
|
adminfilter/forms.py
|
COEXCZ/django-adminfilter
|
d66f6a3c5294156c01db1cf1927942f7cb31119a
|
[
"0BSD"
] | 3
|
2015-11-17T15:32:02.000Z
|
2021-08-06T16:16:04.000Z
|
adminfilter/forms.py
|
COEXCZ/django-adminfilter
|
d66f6a3c5294156c01db1cf1927942f7cb31119a
|
[
"0BSD"
] | null | null | null |
adminfilter/forms.py
|
COEXCZ/django-adminfilter
|
d66f6a3c5294156c01db1cf1927942f7cb31119a
|
[
"0BSD"
] | null | null | null |
# -*- coding: utf-8 -*-
from copy import deepcopy
from django import forms
from django.conf import settings
from django.core import urlresolvers
from django.db.models import Q, Model
from django.forms.forms import BoundField
from django.forms.util import ErrorDict
from django.utils.http import urlencode
from django.utils.translation import ugettext_lazy as _
import choices
from changelist import FILTER_PREFIX, LIST_PER_PAGE_VAR, PAGE_VAR
from fields import *
from utils import FieldStackLazy
__all__ = (
'And',
'FilterForm',
)
class S(object):
""" Queryset contructor. Returns queryset according to custom form queryset
definition. """
AND = '&'
INVALID = (u'', None)
def __init__(self, operator, *children, **opts):
self.operator = operator
self.children = children
self.lazy_init = opts.get('lazy_init', False)
def __and__(self, other):
return S(self.AND, self, other)
def __or__(self, other):
return S(self.OR, self, other)
def __invert__(self):
return S(self.NOT, self)
def __get__(self, form, formtype=None):
""" Using descriptor protocol to access data
returns:
get_queryset -- function
"""
# Get field attribute names known as children and set all fields to optional
if self.lazy_init:
children = []
ignore_fields = (LIST_PER_PAGE_VAR, PAGE_VAR)
for field_name, field in form.fields.iteritems():
if hasattr(field, 'q') and field_name not in ignore_fields:
children.append(field_name)
if isinstance(field, forms.DateField):
field.widget.attrs = {'class': 'vDateField'} # Grappelli class
field.input_formats = choices.DATE_INPUT_FORMAT
field.required = False
self.children = children
def get_queryset(queryset):
""" Accepts queryset and returns filtered queryset by form data
"""
#order_by = []
for field_name, field in form.fields.iteritems():
field.value = form.cleaned_data.get(field_name)
if self.is_valid(field.value):
if isinstance(field.value, basestring):
field.value = field.value.strip()
if isinstance(field.value, Model):
field.value = field.value.pk
#if getattr(field, 'order_by', None):
#order_by.append(field.value)
q = self.get_combined_q(form)
queryset = queryset.filter(q)
fulltext_query = self.get_fulltext_query(form)
if fulltext_query:
queryset = queryset.fulltext(
fulltext_query, rank_alias='fulltext_rank'
).order_by('-fulltext_rank')
#queryset = queryset.order_by(*order_by)
return queryset
return get_queryset
def get_combined_q(self, form):
""" Returns final combinad Q object for filtering
"""
q = None
for child in self.children:
if not q:
q = self.get_q(form, child)
q = self.combine_q(form, child, q)
return q
def combine_q(self, form, child, q):
""" Combines Q objects together according to self.operator
"""
return {self.AND: q & self.get_q(form, child)}[self.operator]
def is_valid(self, value):
return value not in self.INVALID
def get_q(self, form, child):
""" Returns Q object from field in child instance
"""
if isinstance(child, S):
return child(form)
else:
child_field = form.fields[child]
if self.is_valid(child_field.value):
if isinstance(child_field, forms.BooleanField) and not child_field.value:
return Q()
if getattr(child_field, 'fulltext', False):
return Q()
if getattr(child_field, 'takes_cleaned_data', False):
return child_field.q(child_field.value, form.cleaned_data)
else:
return child_field.q(child_field.value)
# Last fallback
return Q()
def get_fulltext_query(self, form):
""" Returns fulltext's form field query if exists """
query = u''
for child in self.children:
if getattr(form.fields[child], 'fulltext', False):
query = form.fields[child].value
return query
def And(*children, **opts):
return S(S.AND, *children, **opts)
class FilterForm(forms.Form):
""" Main filter form used for inheritance and field definition.
"""
FIELDS_TEMPLATE = "tags/filter.html"
def __init__(self, *args, **kwargs):
kwargs['prefix'] = FILTER_PREFIX
super(FilterForm, self).__init__(*args, **kwargs)
# Paginator page listing
self.fields[PAGE_VAR] = forms.IntegerField(initial=1, widget=forms.HiddenInput, label=u'Page')
# Items per page
self.fields[LIST_PER_PAGE_VAR] = forms.IntegerField(
initial=30,
label=_(u'Per page'),
widget=forms.Select(choices=choices.ITEMS_PER_PAGE)
)
self.fields['ot'] = forms.CharField(required=False, widget=forms.HiddenInput)
self.fields['o'] = forms.IntegerField(required=False, widget=forms.HiddenInput)
# Initialize form queryset
if not hasattr(self.__class__, 'queryset'):
self.__class__.queryset = And(lazy_init=True)
def visible_filter_fields(self):
"""
Iterator over filter form field. Per page field + current page field are iterated as last.
"""
end_fields = []
for bf in self.visible_fields():
if bf.name in (LIST_PER_PAGE_VAR, PAGE_VAR):
end_fields.append(bf)
else:
yield bf
for bf in end_fields:
yield bf
def full_clean(self):
"""
Cleans all of self.data and populates self._errors and
self.cleaned_data.
"""
self._errors = ErrorDict()
if not self.is_bound:
return
self.cleaned_data = {}
if self.empty_permitted and not self.has_changed():
return
self._clean_fields()
self._clean_form()
self._post_clean()
def is_valid(self):
""" ignores form errors
"""
self._get_errors()
return self.is_bound
def clean(self):
self.cleaned_data = super(FilterForm, self).clean()
if hasattr(self, 'ignore_if'):
for field, ignore_fields in self.ignore_if.iteritems():
if field in self.cleaned_data and self.cleaned_data[field]:
for ignore_field in ignore_fields:
try:
del(self.cleaned_data[ignore_field])
except KeyError:
pass
invalid_keys = []
for key, value in self.cleaned_data.iteritems():
if value in (None, u'', ''):
invalid_keys.append(key)
for key in invalid_keys:
del(self.cleaned_data[key])
return self.cleaned_data
@property
def cleaned_data_prefixed(self):
d = {}
for key, value in self.cleaned_data.iteritems():
if isinstance(value, Model):
value = value.pk
d[self.add_prefix(key)] = value
return d
@property
def hidden_form(self):
""" Returns SearchForm with hidden fields
"""
form = deepcopy(self)
for field in form.fields.itervalues():
field.widget = forms.HiddenInput()
form.all_fields = FieldStackLazy(form, *form.fields.keys())
return form
def __getitem__(self, name):
"Returns a BoundField with the given name."
try:
field = self.fields[name]
except KeyError:
raise KeyError('Key %r not found in Form' % name)
return BoundField(self, field, name)
class SearchData(object):
""" holder object for filter_params and filter_form """
pass
class LazyFilter(object):
def __get__(self, request, obj_type=None):
if not hasattr(request, '_cached_search'):
Form = None
request._cached_search = SearchData()
# TODO constant to configure
data = {
# 'cl-ll': choices.ITEMS_PER_PAGE_CONST
}
# Get Admin instance
urlconf = getattr(request, "urlconf", settings.ROOT_URLCONF)
urlresolvers.set_urlconf(urlconf)
resolver = urlresolvers.RegexURLResolver(r'^/', urlconf)
callback, callback_args, callback_kwargs = resolver.resolve(
request.path_info)
admin = callback.func_closure[0].cell_contents
if admin:
Form = admin.filter_form
else:
Form = None
if Form:
data.update(dict(request.REQUEST))
form = Form(data)
form.is_valid()
request._cached_search.filter_params = urlencode(form.cleaned_data_prefixed)
request._cached_search.filter_form = form
return request._cached_search
| 29.700935
| 102
| 0.577407
|
from copy import deepcopy
from django import forms
from django.conf import settings
from django.core import urlresolvers
from django.db.models import Q, Model
from django.forms.forms import BoundField
from django.forms.util import ErrorDict
from django.utils.http import urlencode
from django.utils.translation import ugettext_lazy as _
import choices
from changelist import FILTER_PREFIX, LIST_PER_PAGE_VAR, PAGE_VAR
from fields import *
from utils import FieldStackLazy
__all__ = (
'And',
'FilterForm',
)
class S(object):
AND = '&'
INVALID = (u'', None)
def __init__(self, operator, *children, **opts):
self.operator = operator
self.children = children
self.lazy_init = opts.get('lazy_init', False)
def __and__(self, other):
return S(self.AND, self, other)
def __or__(self, other):
return S(self.OR, self, other)
def __invert__(self):
return S(self.NOT, self)
def __get__(self, form, formtype=None):
if self.lazy_init:
children = []
ignore_fields = (LIST_PER_PAGE_VAR, PAGE_VAR)
for field_name, field in form.fields.iteritems():
if hasattr(field, 'q') and field_name not in ignore_fields:
children.append(field_name)
if isinstance(field, forms.DateField):
field.widget.attrs = {'class': 'vDateField'}
field.input_formats = choices.DATE_INPUT_FORMAT
field.required = False
self.children = children
def get_queryset(queryset):
for field_name, field in form.fields.iteritems():
field.value = form.cleaned_data.get(field_name)
if self.is_valid(field.value):
if isinstance(field.value, basestring):
field.value = field.value.strip()
if isinstance(field.value, Model):
field.value = field.value.pk
q = self.get_combined_q(form)
queryset = queryset.filter(q)
fulltext_query = self.get_fulltext_query(form)
if fulltext_query:
queryset = queryset.fulltext(
fulltext_query, rank_alias='fulltext_rank'
).order_by('-fulltext_rank')
return queryset
return get_queryset
def get_combined_q(self, form):
q = None
for child in self.children:
if not q:
q = self.get_q(form, child)
q = self.combine_q(form, child, q)
return q
def combine_q(self, form, child, q):
return {self.AND: q & self.get_q(form, child)}[self.operator]
def is_valid(self, value):
return value not in self.INVALID
def get_q(self, form, child):
if isinstance(child, S):
return child(form)
else:
child_field = form.fields[child]
if self.is_valid(child_field.value):
if isinstance(child_field, forms.BooleanField) and not child_field.value:
return Q()
if getattr(child_field, 'fulltext', False):
return Q()
if getattr(child_field, 'takes_cleaned_data', False):
return child_field.q(child_field.value, form.cleaned_data)
else:
return child_field.q(child_field.value)
return Q()
def get_fulltext_query(self, form):
query = u''
for child in self.children:
if getattr(form.fields[child], 'fulltext', False):
query = form.fields[child].value
return query
def And(*children, **opts):
return S(S.AND, *children, **opts)
class FilterForm(forms.Form):
FIELDS_TEMPLATE = "tags/filter.html"
def __init__(self, *args, **kwargs):
kwargs['prefix'] = FILTER_PREFIX
super(FilterForm, self).__init__(*args, **kwargs)
self.fields[PAGE_VAR] = forms.IntegerField(initial=1, widget=forms.HiddenInput, label=u'Page')
self.fields[LIST_PER_PAGE_VAR] = forms.IntegerField(
initial=30,
label=_(u'Per page'),
widget=forms.Select(choices=choices.ITEMS_PER_PAGE)
)
self.fields['ot'] = forms.CharField(required=False, widget=forms.HiddenInput)
self.fields['o'] = forms.IntegerField(required=False, widget=forms.HiddenInput)
if not hasattr(self.__class__, 'queryset'):
self.__class__.queryset = And(lazy_init=True)
def visible_filter_fields(self):
end_fields = []
for bf in self.visible_fields():
if bf.name in (LIST_PER_PAGE_VAR, PAGE_VAR):
end_fields.append(bf)
else:
yield bf
for bf in end_fields:
yield bf
def full_clean(self):
self._errors = ErrorDict()
if not self.is_bound:
return
self.cleaned_data = {}
if self.empty_permitted and not self.has_changed():
return
self._clean_fields()
self._clean_form()
self._post_clean()
def is_valid(self):
self._get_errors()
return self.is_bound
def clean(self):
self.cleaned_data = super(FilterForm, self).clean()
if hasattr(self, 'ignore_if'):
for field, ignore_fields in self.ignore_if.iteritems():
if field in self.cleaned_data and self.cleaned_data[field]:
for ignore_field in ignore_fields:
try:
del(self.cleaned_data[ignore_field])
except KeyError:
pass
invalid_keys = []
for key, value in self.cleaned_data.iteritems():
if value in (None, u'', ''):
invalid_keys.append(key)
for key in invalid_keys:
del(self.cleaned_data[key])
return self.cleaned_data
@property
def cleaned_data_prefixed(self):
d = {}
for key, value in self.cleaned_data.iteritems():
if isinstance(value, Model):
value = value.pk
d[self.add_prefix(key)] = value
return d
@property
def hidden_form(self):
form = deepcopy(self)
for field in form.fields.itervalues():
field.widget = forms.HiddenInput()
form.all_fields = FieldStackLazy(form, *form.fields.keys())
return form
def __getitem__(self, name):
try:
field = self.fields[name]
except KeyError:
raise KeyError('Key %r not found in Form' % name)
return BoundField(self, field, name)
class SearchData(object):
pass
class LazyFilter(object):
def __get__(self, request, obj_type=None):
if not hasattr(request, '_cached_search'):
Form = None
request._cached_search = SearchData()
data = {
}
urlconf = getattr(request, "urlconf", settings.ROOT_URLCONF)
urlresolvers.set_urlconf(urlconf)
resolver = urlresolvers.RegexURLResolver(r'^/', urlconf)
callback, callback_args, callback_kwargs = resolver.resolve(
request.path_info)
admin = callback.func_closure[0].cell_contents
if admin:
Form = admin.filter_form
else:
Form = None
if Form:
data.update(dict(request.REQUEST))
form = Form(data)
form.is_valid()
request._cached_search.filter_params = urlencode(form.cleaned_data_prefixed)
request._cached_search.filter_form = form
return request._cached_search
| true
| true
|
790abc34b87ceed8e9dbc2c1d5b05a111c067b4f
| 889
|
py
|
Python
|
ooobuild/dyn/configuration/backend/multi_layer_stratum.py
|
Amourspirit/ooo_uno_tmpl
|
64e0c86fd68f24794acc22d63d8d32ae05dd12b8
|
[
"Apache-2.0"
] | null | null | null |
ooobuild/dyn/configuration/backend/multi_layer_stratum.py
|
Amourspirit/ooo_uno_tmpl
|
64e0c86fd68f24794acc22d63d8d32ae05dd12b8
|
[
"Apache-2.0"
] | null | null | null |
ooobuild/dyn/configuration/backend/multi_layer_stratum.py
|
Amourspirit/ooo_uno_tmpl
|
64e0c86fd68f24794acc22d63d8d32ae05dd12b8
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
#
# Copyright 2022 :Barry-Thomas-Paul: Moss
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http: // www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Service Class
# this is a auto generated file generated by Cheetah
# Libre Office Version: 7.3
# Namespace: com.sun.star.configuration.backend
from ....lo.configuration.backend.multi_layer_stratum import MultiLayerStratum as MultiLayerStratum
__all__ = ['MultiLayerStratum']
| 34.192308
| 99
| 0.767154
|
from ....lo.configuration.backend.multi_layer_stratum import MultiLayerStratum as MultiLayerStratum
__all__ = ['MultiLayerStratum']
| true
| true
|
790abcb02e5541a1eb9af556ddb6d7a8f9ac2ed7
| 3,155
|
py
|
Python
|
extensions/rich_text_components/Video/Video.py
|
VictoriaRoux/oppia
|
5ae2a7f0b5c85d6e28222844d22ebdbfb81923c6
|
[
"Apache-2.0"
] | 3
|
2015-03-17T01:34:14.000Z
|
2015-04-11T10:35:53.000Z
|
extensions/rich_text_components/Video/Video.py
|
VictoriaRoux/oppia
|
5ae2a7f0b5c85d6e28222844d22ebdbfb81923c6
|
[
"Apache-2.0"
] | null | null | null |
extensions/rich_text_components/Video/Video.py
|
VictoriaRoux/oppia
|
5ae2a7f0b5c85d6e28222844d22ebdbfb81923c6
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, softwar
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from extensions.rich_text_components import base
NONNEGATIVE_INT_SCHEMA = {
'type': 'int',
'validators': [{
'id': 'is_at_least',
'min_value': 0
}],
}
class Video(base.BaseRichTextComponent):
"""A rich-text component representing a YouTube video."""
name = 'Video'
category = 'Basic Input'
description = 'A YouTube video.'
frontend_name = 'video'
tooltip = 'Insert video'
_customization_arg_specs = [{
'name': 'video_id',
'description': (
'The YouTube id for this video. This is the 11-character string '
'after \'v=\' in the video URL.'),
'schema': {
'type': 'unicode',
},
'default_value': '',
}, {
'name': 'start',
'description': (
'Video start time in seconds: (leave at 0 to start at the '
'beginning.)'),
'schema': NONNEGATIVE_INT_SCHEMA,
'default_value': 0
}, {
'name': 'end',
'description': (
'Video end time in seconds: (leave at 0 to play until the end.)'),
'schema': NONNEGATIVE_INT_SCHEMA,
'default_value': 0
}, {
'name': 'autoplay',
'description': (
'Autoplay this video once the question has loaded?'),
'schema': {
'type': 'bool'
},
'default_value': False,
}]
icon_data_url = (
'data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAA'
'ABGdBTUEAAK/INwWK6QAAABl0RVh0%0AU29mdHdhcmUAQWRvYmUgSW1hZ2VSZWFkeXHJZ'
'TwAAAIfSURBVDjLpZNPaBNBGMXfbrubzBqbg4kL%0A0lJLgiVKE/AP6Kl6UUFQNAeDIAj'
'VS08aELx59GQPAREV/4BeiqcqROpRD4pUNCJSS21OgloISWME%0AZ/aPb6ARdNeTCz92m'
'O%2B9N9/w7RphGOJ/nsH%2Bolqtvg%2BCYJR8q9VquThxuVz%2BoJTKeZ63Uq/XC38E%0'
'A0Jj3ff8%2BOVupVGLbolkzQw5HOqAxQU4wXWWnZrykmYD0QsgAOJe9hpEUcPr8i0GaJ8'
'n2vs/sL2h8%0AR66TpVfWTdETHWE6GRGKjGiiKNLii5BSLpN7pBHpgMYhMkm8tPUWz3sL'
'2D1wFaY/jvnWcTTaE5Dy%0AjMfTT5J0XIAiTRYn3ASwZ1MKbTmN7z%2BKaHUOYqmb1fcP'
'iNa4kQBuyvWAHYfcHGzDgYcx9NKrwJYH%0ACAyF21JiPWBnXMAQOea6bmn%2B4ueYGZi8'
'gtymNVobF7BG5prNpjd%2BeW6X4BSUD0gOdCpzA8MpA/v2%0Av15kl4%2BpK0emwHSbjJ'
'GBlz%2BvYM1fQeDrYOBTdzOGvDf6EFNr%2BLYjHbBgsaCLxr%2BmoNQjU2vYhRXp%0AgI'
'UOmSWWnsJRfjlOZhrexgtYDZ/gWbetNRbNs6QT10GJglNk64HMaGgbAkoMo5fiFNy7CKD'
'QUGqE%0A5r38YktxAfSqW7Zt33l66WtkAkACjuNsaLVaDxlw5HdJ/86aYrG4WCgUZD6fX'
'%2Bjv/U0ymfxoWVZo%0AmuZyf%2B8XqfGP49CCrBUAAAAASUVORK5CYII%3D%0A'
)
| 36.686047
| 79
| 0.675436
|
from extensions.rich_text_components import base
NONNEGATIVE_INT_SCHEMA = {
'type': 'int',
'validators': [{
'id': 'is_at_least',
'min_value': 0
}],
}
class Video(base.BaseRichTextComponent):
name = 'Video'
category = 'Basic Input'
description = 'A YouTube video.'
frontend_name = 'video'
tooltip = 'Insert video'
_customization_arg_specs = [{
'name': 'video_id',
'description': (
'The YouTube id for this video. This is the 11-character string '
'after \'v=\' in the video URL.'),
'schema': {
'type': 'unicode',
},
'default_value': '',
}, {
'name': 'start',
'description': (
'Video start time in seconds: (leave at 0 to start at the '
'beginning.)'),
'schema': NONNEGATIVE_INT_SCHEMA,
'default_value': 0
}, {
'name': 'end',
'description': (
'Video end time in seconds: (leave at 0 to play until the end.)'),
'schema': NONNEGATIVE_INT_SCHEMA,
'default_value': 0
}, {
'name': 'autoplay',
'description': (
'Autoplay this video once the question has loaded?'),
'schema': {
'type': 'bool'
},
'default_value': False,
}]
icon_data_url = (
'data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAA'
'ABGdBTUEAAK/INwWK6QAAABl0RVh0%0AU29mdHdhcmUAQWRvYmUgSW1hZ2VSZWFkeXHJZ'
'TwAAAIfSURBVDjLpZNPaBNBGMXfbrubzBqbg4kL%0A0lJLgiVKE/AP6Kl6UUFQNAeDIAj'
'VS08aELx59GQPAREV/4BeiqcqROpRD4pUNCJSS21OgloISWME%0AZ/aPb6ARdNeTCz92m'
'O%2B9N9/w7RphGOJ/nsH%2Bolqtvg%2BCYJR8q9VquThxuVz%2BoJTKeZ63Uq/XC38E%0'
'A0Jj3ff8%2BOVupVGLbolkzQw5HOqAxQU4wXWWnZrykmYD0QsgAOJe9hpEUcPr8i0GaJ8'
'n2vs/sL2h8%0AR66TpVfWTdETHWE6GRGKjGiiKNLii5BSLpN7pBHpgMYhMkm8tPUWz3sL'
'2D1wFaY/jvnWcTTaE5Dy%0AjMfTT5J0XIAiTRYn3ASwZ1MKbTmN7z%2BKaHUOYqmb1fcP'
'iNa4kQBuyvWAHYfcHGzDgYcx9NKrwJYH%0ACAyF21JiPWBnXMAQOea6bmn%2B4ueYGZi8'
'gtymNVobF7BG5prNpjd%2BeW6X4BSUD0gOdCpzA8MpA/v2%0Av15kl4%2BpK0emwHSbjJ'
'GBlz%2BvYM1fQeDrYOBTdzOGvDf6EFNr%2BLYjHbBgsaCLxr%2BmoNQjU2vYhRXp%0AgI'
'UOmSWWnsJRfjlOZhrexgtYDZ/gWbetNRbNs6QT10GJglNk64HMaGgbAkoMo5fiFNy7CKD'
'QUGqE%0A5r38YktxAfSqW7Zt33l66WtkAkACjuNsaLVaDxlw5HdJ/86aYrG4WCgUZD6fX'
'%2Bjv/U0ymfxoWVZo%0AmuZyf%2B8XqfGP49CCrBUAAAAASUVORK5CYII%3D%0A'
)
| true
| true
|
790abcfda02d081c46add1dfe3f1280ee73eed22
| 3,078
|
py
|
Python
|
bokeh/models/__init__.py
|
g-parki/bokeh
|
664ead5306bba64609e734d4105c8aa8cfb76d81
|
[
"BSD-3-Clause"
] | null | null | null |
bokeh/models/__init__.py
|
g-parki/bokeh
|
664ead5306bba64609e734d4105c8aa8cfb76d81
|
[
"BSD-3-Clause"
] | null | null | null |
bokeh/models/__init__.py
|
g-parki/bokeh
|
664ead5306bba64609e734d4105c8aa8cfb76d81
|
[
"BSD-3-Clause"
] | null | null | null |
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2022, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
''' Provide Bokeh model "building block" classes.
One of the central design principals of Bokeh is that, regardless of
how the plot creation code is spelled in Python (or other languages),
the result is an object graph that encompasses all the visual and
data aspects of the scene. Furthermore, this *scene graph* is to be
serialized, and it is this serialized graph that the client library
BokehJS uses to render the plot. The low-level objects that comprise
a Bokeh scene graph are called :ref:`Models <bokeh.model>`.
'''
# This file is excluded from flake8 checking in setup.cfg
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import annotations
import logging # isort:skip
log = logging.getLogger(__name__)
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Bokeh imports
from ..core.property.dataspec import expr, field, value # Legacy API
from ..model import Model
from .annotations import * # lgtm [py/polluting-import]
from .axes import *
from .callbacks import *
from .canvas import *
from .expressions import *
from .filters import *
from .formatters import *
from .glyphs import *
from .graphs import *
from .grids import *
from .labeling import *
from .layouts import *
from .map_plots import *
from .mappers import *
from .plots import *
from .ranges import *
from .renderers import *
from .scales import *
from .selections import *
from .sources import *
from .text import *
from .textures import *
from .tickers import *
from .tiles import *
from .tools import *
from .transforms import *
from .widgets import * # lgtm [py/polluting-import]
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
# __all__ = include all explicit transitive imports above
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
| 36.642857
| 78
| 0.445094
|
from __future__ import annotations
import logging
log = logging.getLogger(__name__)
from ..core.property.dataspec import expr, field, value
from ..model import Model
from .annotations import *
from .axes import *
from .callbacks import *
from .canvas import *
from .expressions import *
from .filters import *
from .formatters import *
from .glyphs import *
from .graphs import *
from .grids import *
from .labeling import *
from .layouts import *
from .map_plots import *
from .mappers import *
from .plots import *
from .ranges import *
from .renderers import *
from .scales import *
from .selections import *
from .sources import *
from .text import *
from .textures import *
from .tickers import *
from .tiles import *
from .tools import *
from .transforms import *
from .widgets import *
| true
| true
|
790abd468c1c9305c2b9c40a99de30e2e4abee0f
| 4,916
|
py
|
Python
|
brainframe/cli/docker_compose.py
|
aotuai/brainframe_cli
|
dcc33494cf3be042ae697ff2c7377e8632ba4132
|
[
"BSD-3-Clause"
] | 3
|
2020-06-16T17:57:26.000Z
|
2020-12-02T00:19:22.000Z
|
brainframe/cli/docker_compose.py
|
aotuai/brainframe_cli
|
dcc33494cf3be042ae697ff2c7377e8632ba4132
|
[
"BSD-3-Clause"
] | 13
|
2020-07-02T17:06:36.000Z
|
2020-12-10T21:16:17.000Z
|
brainframe/cli/docker_compose.py
|
aotuai/brainframe_cli
|
dcc33494cf3be042ae697ff2c7377e8632ba4132
|
[
"BSD-3-Clause"
] | 1
|
2021-02-18T07:28:11.000Z
|
2021-02-18T07:28:11.000Z
|
import os
import sys
from pathlib import Path
from typing import List, Optional, Tuple
import i18n
import requests
import yaml
from . import config, frozen_utils, os_utils, print_utils
# The URL to the docker-compose.yml
BRAINFRAME_DOCKER_COMPOSE_URL = "https://{subdomain}aotu.ai/releases/brainframe/{version}/docker-compose.yml"
# The URL to the latest tag, which is just a file containing the latest version
# as a string
BRAINFRAME_LATEST_TAG_URL = (
"https://{subdomain}aotu.ai/releases/brainframe/latest"
)
def assert_installed(install_path: Path) -> None:
compose_path = install_path / "docker-compose.yml"
if not compose_path.is_file():
print_utils.fail_translate(
"general.brainframe-must-be-installed",
install_env_var=config.install_path.name,
)
def run(install_path: Path, commands: List[str]) -> None:
_assert_has_docker_permissions()
compose_path = install_path / "docker-compose.yml"
if frozen_utils.is_frozen():
# Rely on the system's Docker Compose, since Compose can't be easily embedded
# into a PyInstaller executable
full_command = ["docker-compose"]
else:
# Use the included Docker Compose
full_command = [
sys.executable,
"-m",
"compose",
]
full_command += [
"--file",
str(compose_path),
]
# Provide the override file if it exists
compose_override_path = install_path / "docker-compose.override.yml"
if compose_override_path.is_file():
full_command += ["--file", str(compose_override_path)]
# Provide the .env file if it exists
env_path = install_path / ".env"
if env_path.is_file():
full_command += ["--env-file", str(env_path)]
os_utils.run(full_command + commands)
def download(target: Path, version: str = "latest") -> None:
_assert_has_write_permissions(target.parent)
if version == "latest":
version = get_latest_version()
credentials = config.staging_credentials()
url = BRAINFRAME_DOCKER_COMPOSE_URL.format(
subdomain="staging." if config.is_staging.value else "",
version=version,
)
response = requests.get(url, auth=credentials, stream=True)
if not response.ok:
print_utils.fail_translate(
"general.error-downloading-docker-compose",
status_code=response.status_code,
error_message=response.text,
)
target.write_text(response.text)
if os_utils.is_root():
# Fix the permissions of the docker-compose.yml so that the BrainFrame
# group can edit it
os_utils.give_brainframe_group_rw_access([target])
def get_latest_version() -> str:
"""
:return: The latest available version in the format "vX.Y.Z"
"""
# Add the flags to authenticate with staging if the user wants to download
# from there
subdomain = "staging." if config.is_staging.value else ""
credentials = config.staging_credentials()
# Check what the latest version is
url = BRAINFRAME_LATEST_TAG_URL.format(subdomain=subdomain)
response = requests.get(url, auth=credentials)
return response.text
def check_existing_version(install_path: Path) -> str:
compose_path = install_path / "docker-compose.yml"
compose = yaml.load(compose_path.read_text(), Loader=yaml.SafeLoader)
version = compose["services"]["core"]["image"].split(":")[-1]
version = "v" + version
return version
def _assert_has_docker_permissions() -> None:
"""Fails if the user does not have permissions to interact with Docker"""
if not (os_utils.is_root() or os_utils.currently_in_group("docker")):
error_message = (
i18n.t("general.docker-bad-permissions")
+ "\n"
+ _group_recommendation_message("docker")
)
print_utils.fail(error_message)
def _assert_has_write_permissions(path: Path) -> None:
"""Fails if the user does not have write access to the given path."""
if os.access(path, os.W_OK):
return
error_message = i18n.t("general.file-bad-write-permissions", path=path)
error_message += "\n"
if path.stat().st_gid == os_utils.BRAINFRAME_GROUP_ID:
error_message += " " + _group_recommendation_message("brainframe")
else:
error_message += " " + i18n.t(
"general.unexpected-group-for-file", path=path, group="brainframe"
)
print_utils.fail(error_message)
def _group_recommendation_message(group: str) -> str:
if os_utils.added_to_group("brainframe"):
# The user is in the group, they just need to restart
return i18n.t("general.restart-for-group-access", group=group)
else:
# The user is not in the group, so they need to either add
# themselves or use sudo
return i18n.t("general.retry-as-root-or-group", group=group)
| 31.716129
| 109
| 0.672091
|
import os
import sys
from pathlib import Path
from typing import List, Optional, Tuple
import i18n
import requests
import yaml
from . import config, frozen_utils, os_utils, print_utils
BRAINFRAME_DOCKER_COMPOSE_URL = "https://{subdomain}aotu.ai/releases/brainframe/{version}/docker-compose.yml"
BRAINFRAME_LATEST_TAG_URL = (
"https://{subdomain}aotu.ai/releases/brainframe/latest"
)
def assert_installed(install_path: Path) -> None:
compose_path = install_path / "docker-compose.yml"
if not compose_path.is_file():
print_utils.fail_translate(
"general.brainframe-must-be-installed",
install_env_var=config.install_path.name,
)
def run(install_path: Path, commands: List[str]) -> None:
_assert_has_docker_permissions()
compose_path = install_path / "docker-compose.yml"
if frozen_utils.is_frozen():
full_command = ["docker-compose"]
else:
full_command = [
sys.executable,
"-m",
"compose",
]
full_command += [
"--file",
str(compose_path),
]
compose_override_path = install_path / "docker-compose.override.yml"
if compose_override_path.is_file():
full_command += ["--file", str(compose_override_path)]
env_path = install_path / ".env"
if env_path.is_file():
full_command += ["--env-file", str(env_path)]
os_utils.run(full_command + commands)
def download(target: Path, version: str = "latest") -> None:
_assert_has_write_permissions(target.parent)
if version == "latest":
version = get_latest_version()
credentials = config.staging_credentials()
url = BRAINFRAME_DOCKER_COMPOSE_URL.format(
subdomain="staging." if config.is_staging.value else "",
version=version,
)
response = requests.get(url, auth=credentials, stream=True)
if not response.ok:
print_utils.fail_translate(
"general.error-downloading-docker-compose",
status_code=response.status_code,
error_message=response.text,
)
target.write_text(response.text)
if os_utils.is_root():
os_utils.give_brainframe_group_rw_access([target])
def get_latest_version() -> str:
subdomain = "staging." if config.is_staging.value else ""
credentials = config.staging_credentials()
url = BRAINFRAME_LATEST_TAG_URL.format(subdomain=subdomain)
response = requests.get(url, auth=credentials)
return response.text
def check_existing_version(install_path: Path) -> str:
compose_path = install_path / "docker-compose.yml"
compose = yaml.load(compose_path.read_text(), Loader=yaml.SafeLoader)
version = compose["services"]["core"]["image"].split(":")[-1]
version = "v" + version
return version
def _assert_has_docker_permissions() -> None:
if not (os_utils.is_root() or os_utils.currently_in_group("docker")):
error_message = (
i18n.t("general.docker-bad-permissions")
+ "\n"
+ _group_recommendation_message("docker")
)
print_utils.fail(error_message)
def _assert_has_write_permissions(path: Path) -> None:
if os.access(path, os.W_OK):
return
error_message = i18n.t("general.file-bad-write-permissions", path=path)
error_message += "\n"
if path.stat().st_gid == os_utils.BRAINFRAME_GROUP_ID:
error_message += " " + _group_recommendation_message("brainframe")
else:
error_message += " " + i18n.t(
"general.unexpected-group-for-file", path=path, group="brainframe"
)
print_utils.fail(error_message)
def _group_recommendation_message(group: str) -> str:
if os_utils.added_to_group("brainframe"):
return i18n.t("general.restart-for-group-access", group=group)
else:
return i18n.t("general.retry-as-root-or-group", group=group)
| true
| true
|
790abed0413cd2460b7033477a36bfbdec078f2d
| 10,432
|
py
|
Python
|
tests/linen/linen_linear_test.py
|
pschuh/flax
|
9eee5149f345bd871555f3b53e3605f58588c883
|
[
"Apache-2.0"
] | null | null | null |
tests/linen/linen_linear_test.py
|
pschuh/flax
|
9eee5149f345bd871555f3b53e3605f58588c883
|
[
"Apache-2.0"
] | null | null | null |
tests/linen/linen_linear_test.py
|
pschuh/flax
|
9eee5149f345bd871555f3b53e3605f58588c883
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2021 The Flax Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for flax.nn.linear."""
import functools
from absl.testing import absltest
from absl.testing import parameterized
from flax import linen as nn
import jax
from jax import random
from jax.nn import initializers
import jax.numpy as jnp
import numpy as np
# Parse absl flags test_srcdir and test_tmpdir.
jax.config.parse_flags_with_absl()
class LinearTest(parameterized.TestCase):
def test_dense(self):
rng = dict(params=random.PRNGKey(0))
x = jnp.ones((1, 3))
dense_module = nn.Dense(
features=4,
kernel_init=initializers.ones,
bias_init=initializers.ones,
)
y, _ = dense_module.init_with_output(rng, x)
self.assertEqual(y.shape, (1, 4))
np.testing.assert_allclose(y, np.full((1, 4), 4.))
def test_dense_extra_batch_dims(self):
rng = dict(params=random.PRNGKey(0))
x = jnp.ones((1, 2, 3))
dense_module = nn.Dense(
features=4,
kernel_init=initializers.ones,
bias_init=initializers.ones,
)
y, _ = dense_module.init_with_output(rng, x)
np.testing.assert_allclose(y, np.full((1, 2, 4), 4.))
def test_dense_no_bias(self):
rng = dict(params=random.PRNGKey(0))
x = jnp.ones((1, 3))
dense_module = nn.Dense(
features=4,
use_bias=False,
kernel_init=initializers.ones,
)
y, _ = dense_module.init_with_output(rng, x)
np.testing.assert_allclose(y, np.full((1, 4), 3.))
def test_dense_is_dense_general(self):
x = jax.random.normal(random.PRNGKey(0), (5, 3))
dense_module = nn.Dense(
features=4,
use_bias=True,
bias_init=initializers.normal(),
)
y1, _ = dense_module.init_with_output(dict(params=random.PRNGKey(1)), x)
dg_module = nn.DenseGeneral(
features=4,
use_bias=True,
bias_init=initializers.normal(),
)
y2, _ = dg_module.init_with_output(dict(params=random.PRNGKey(1)), x)
np.testing.assert_allclose(y1, y2)
def test_dense_general_batch_dim_raises(self):
rng = dict(params=random.PRNGKey(0))
x = jnp.ones((1, 3, 2, 5))
with self.assertRaises(ValueError):
dg_module = nn.DenseGeneral(
features=4,
batch_dims=(0, 2),
kernel_init=initializers.ones,
bias_init=initializers.ones,
)
dg_module.init_with_output(rng, x)
def test_dense_general_two_out(self):
rng = dict(params=random.PRNGKey(0))
x = jnp.ones((1, 3))
dg_module = nn.DenseGeneral(
features=(2, 2),
kernel_init=initializers.ones,
bias_init=initializers.ones,
)
y, _ = dg_module.init_with_output(rng, x)
np.testing.assert_allclose(y, np.full((1, 2, 2), 4.))
def test_dense_general_two_in(self):
rng = dict(params=random.PRNGKey(0))
x = jnp.ones((1, 2, 2))
dg_module = nn.DenseGeneral(
features=3,
axis=(-2, 2),
kernel_init=initializers.ones,
bias_init=initializers.ones,
)
y, _ = dg_module.init_with_output(rng, x)
np.testing.assert_allclose(y, np.full((1, 3), 5.))
def test_dense_general_batch_dim(self):
rng = dict(params=random.PRNGKey(0))
x = jnp.ones((2, 1, 3, 5))
state = {'counter': 0.}
def _counter_init(rng, shape, dtype, state):
del rng, dtype
state['counter'] += 1.
return jnp.full(shape, state['counter'])
counter_init = functools.partial(_counter_init, state=state)
dg_module = nn.DenseGeneral(
features=7,
axis=(3, -2),
batch_dims=0,
bias_init=initializers.ones,
kernel_init=counter_init,
)
y, _ = dg_module.init_with_output(rng, x)
target = np.concatenate(
[np.full((1, 1, 7), 16.), np.full((1, 1, 7), 31.)], axis=0)
np.testing.assert_allclose(y, target)
@parameterized.parameters([((-2, 3), (), 'bijk,jklm->bilm'),
((3, -2), (), 'bijk,jklm->bilm'),
((-2, 3), (0,), 'bijk,bjklm->bilm')])
def test_dense_general_vs_numpy(self, axis, batch_dims, einsum_expr):
rng = dict(params=random.PRNGKey(0))
x = jnp.ones((16, 8, 9, 10))
dg_module = nn.DenseGeneral(
features=(11, 12),
axis=axis,
batch_dims=batch_dims,
bias_init=initializers.ones,
kernel_init=initializers.normal(),
)
y, initial_params = dg_module.init_with_output(rng, x)
target = np.einsum(einsum_expr, x, initial_params['params']['kernel']) + 1.
np.testing.assert_allclose(y, target, atol=1e-6)
@parameterized.parameters([((3,),), (3,)])
def test_conv(self, kernel_size):
rng = dict(params=random.PRNGKey(0))
x = jnp.ones((1, 8, 3))
conv_module = nn.Conv(
features=4,
kernel_size=kernel_size,
padding='VALID',
kernel_init=initializers.ones,
bias_init=initializers.ones,
)
y, initial_params = conv_module.init_with_output(rng, x)
self.assertEqual(initial_params['params']['kernel'].shape, (3, 3, 4))
np.testing.assert_allclose(y, np.full((1, 6, 4), 10.))
@parameterized.parameters([((3,),), (3,)])
def test_single_input_conv(self, kernel_size):
rng = dict(params=random.PRNGKey(0))
x = jnp.ones((8, 3))
conv_module = nn.Conv(
features=4,
kernel_size=kernel_size,
padding='VALID',
kernel_init=initializers.ones,
bias_init=initializers.ones,
)
y, initial_params = conv_module.init_with_output(rng, x)
self.assertEqual(initial_params['params']['kernel'].shape, (3, 3, 4))
np.testing.assert_allclose(y, np.full((6, 4), 10.))
@parameterized.parameters([((3,),), (3,)])
def test_group_conv(self, kernel_size):
rng = dict(params=random.PRNGKey(0))
x = jnp.ones((1, 8, 4))
conv_module = nn.Conv(
features=4,
kernel_size=kernel_size,
feature_group_count=2,
padding='VALID',
kernel_init=initializers.ones,
bias_init=initializers.ones,
)
y, initial_params = conv_module.init_with_output(rng, x)
self.assertEqual(initial_params['params']['kernel'].shape, (3, 2, 4))
np.testing.assert_allclose(y, np.full((1, 6, 4), 7.))
@parameterized.parameters([((3,),), (3,)])
def test_conv_transpose(self, kernel_size):
rng = dict(params=random.PRNGKey(0))
x = jnp.ones((1, 8, 3))
conv_transpose_module = nn.ConvTranspose(
features=4,
kernel_size=kernel_size,
padding='VALID',
kernel_init=initializers.ones,
bias_init=initializers.ones,
)
y, initial_params = conv_transpose_module.init_with_output(rng, x)
self.assertEqual(initial_params['params']['kernel'].shape, (3, 3, 4))
correct_ans = np.array([[[ 4., 4., 4., 4.],
[ 7., 7., 7., 7.],
[10., 10., 10., 10.],
[10., 10., 10., 10.],
[10., 10., 10., 10.],
[10., 10., 10., 10.],
[10., 10., 10., 10.],
[10., 10., 10., 10.],
[ 7., 7., 7., 7.],
[ 4., 4., 4., 4.]]])
np.testing.assert_allclose(y, correct_ans)
@parameterized.parameters([((3,),), (3,)])
def test_single_input_conv_transpose(self, kernel_size):
rng = dict(params=random.PRNGKey(0))
x = jnp.ones((8, 3))
conv_transpose_module = nn.ConvTranspose(
features=4,
kernel_size=kernel_size,
padding='VALID',
kernel_init=initializers.ones,
bias_init=initializers.ones,
)
y, initial_params = conv_transpose_module.init_with_output(rng, x)
self.assertEqual(initial_params['params']['kernel'].shape, (3, 3, 4))
correct_ans = np.array([[ 4., 4., 4., 4.],
[ 7., 7., 7., 7.],
[10., 10., 10., 10.],
[10., 10., 10., 10.],
[10., 10., 10., 10.],
[10., 10., 10., 10.],
[10., 10., 10., 10.],
[10., 10., 10., 10.],
[ 7., 7., 7., 7.],
[ 4., 4., 4., 4.]])
np.testing.assert_allclose(y, correct_ans)
def test_embed(self):
rng = dict(params=random.PRNGKey(0))
x = jnp.arange(4)[None]
dummy_embedding = jnp.broadcast_to(
jnp.arange(4)[..., None], (4, 3)).astype(jnp.float32)
embed_module = nn.Embed(
num_embeddings=4,
features=3,
embedding_init=lambda rng, shape, dtype: dummy_embedding,
)
y, initial_params = embed_module.init_with_output(rng, x)
np.testing.assert_allclose(y, dummy_embedding[None])
z = embed_module.apply(initial_params, jnp.ones((3,)), method=embed_module.attend)
np.testing.assert_allclose(z, 3. * jnp.arange(4))
def test_non_final_axis(self):
class Foo(nn.Module):
@nn.compact
def __call__(self, x):
return nn.DenseGeneral(features=6, axis=1, name='dense')(x)
x = jnp.ones((2, 4, 8))
y, variables = Foo().init_with_output(random.PRNGKey(0), x)
self.assertEqual(jax.tree_map(jnp.shape, variables['params']), {
'dense': {'kernel': (4, 6), 'bias': (6,)}
})
self.assertEqual(y.shape, (2, 8, 6))
def test_non_final_axes(self):
class Foo(nn.Module):
@nn.compact
def __call__(self, x):
return nn.DenseGeneral(features=6, axis=(0, 1), name='dense')(x)
x = jnp.ones((2, 4, 8))
y, variables = Foo().init_with_output(random.PRNGKey(0), x)
self.assertEqual(jax.tree_map(jnp.shape, variables['params']), {
'dense': {'kernel': (2, 4, 6), 'bias': (6,)}
})
self.assertEqual(y.shape, (8, 6))
if __name__ == '__main__':
absltest.main()
| 34.315789
| 86
| 0.592216
|
import functools
from absl.testing import absltest
from absl.testing import parameterized
from flax import linen as nn
import jax
from jax import random
from jax.nn import initializers
import jax.numpy as jnp
import numpy as np
jax.config.parse_flags_with_absl()
class LinearTest(parameterized.TestCase):
def test_dense(self):
rng = dict(params=random.PRNGKey(0))
x = jnp.ones((1, 3))
dense_module = nn.Dense(
features=4,
kernel_init=initializers.ones,
bias_init=initializers.ones,
)
y, _ = dense_module.init_with_output(rng, x)
self.assertEqual(y.shape, (1, 4))
np.testing.assert_allclose(y, np.full((1, 4), 4.))
def test_dense_extra_batch_dims(self):
rng = dict(params=random.PRNGKey(0))
x = jnp.ones((1, 2, 3))
dense_module = nn.Dense(
features=4,
kernel_init=initializers.ones,
bias_init=initializers.ones,
)
y, _ = dense_module.init_with_output(rng, x)
np.testing.assert_allclose(y, np.full((1, 2, 4), 4.))
def test_dense_no_bias(self):
rng = dict(params=random.PRNGKey(0))
x = jnp.ones((1, 3))
dense_module = nn.Dense(
features=4,
use_bias=False,
kernel_init=initializers.ones,
)
y, _ = dense_module.init_with_output(rng, x)
np.testing.assert_allclose(y, np.full((1, 4), 3.))
def test_dense_is_dense_general(self):
x = jax.random.normal(random.PRNGKey(0), (5, 3))
dense_module = nn.Dense(
features=4,
use_bias=True,
bias_init=initializers.normal(),
)
y1, _ = dense_module.init_with_output(dict(params=random.PRNGKey(1)), x)
dg_module = nn.DenseGeneral(
features=4,
use_bias=True,
bias_init=initializers.normal(),
)
y2, _ = dg_module.init_with_output(dict(params=random.PRNGKey(1)), x)
np.testing.assert_allclose(y1, y2)
def test_dense_general_batch_dim_raises(self):
rng = dict(params=random.PRNGKey(0))
x = jnp.ones((1, 3, 2, 5))
with self.assertRaises(ValueError):
dg_module = nn.DenseGeneral(
features=4,
batch_dims=(0, 2),
kernel_init=initializers.ones,
bias_init=initializers.ones,
)
dg_module.init_with_output(rng, x)
def test_dense_general_two_out(self):
rng = dict(params=random.PRNGKey(0))
x = jnp.ones((1, 3))
dg_module = nn.DenseGeneral(
features=(2, 2),
kernel_init=initializers.ones,
bias_init=initializers.ones,
)
y, _ = dg_module.init_with_output(rng, x)
np.testing.assert_allclose(y, np.full((1, 2, 2), 4.))
def test_dense_general_two_in(self):
rng = dict(params=random.PRNGKey(0))
x = jnp.ones((1, 2, 2))
dg_module = nn.DenseGeneral(
features=3,
axis=(-2, 2),
kernel_init=initializers.ones,
bias_init=initializers.ones,
)
y, _ = dg_module.init_with_output(rng, x)
np.testing.assert_allclose(y, np.full((1, 3), 5.))
def test_dense_general_batch_dim(self):
rng = dict(params=random.PRNGKey(0))
x = jnp.ones((2, 1, 3, 5))
state = {'counter': 0.}
def _counter_init(rng, shape, dtype, state):
del rng, dtype
state['counter'] += 1.
return jnp.full(shape, state['counter'])
counter_init = functools.partial(_counter_init, state=state)
dg_module = nn.DenseGeneral(
features=7,
axis=(3, -2),
batch_dims=0,
bias_init=initializers.ones,
kernel_init=counter_init,
)
y, _ = dg_module.init_with_output(rng, x)
target = np.concatenate(
[np.full((1, 1, 7), 16.), np.full((1, 1, 7), 31.)], axis=0)
np.testing.assert_allclose(y, target)
@parameterized.parameters([((-2, 3), (), 'bijk,jklm->bilm'),
((3, -2), (), 'bijk,jklm->bilm'),
((-2, 3), (0,), 'bijk,bjklm->bilm')])
def test_dense_general_vs_numpy(self, axis, batch_dims, einsum_expr):
rng = dict(params=random.PRNGKey(0))
x = jnp.ones((16, 8, 9, 10))
dg_module = nn.DenseGeneral(
features=(11, 12),
axis=axis,
batch_dims=batch_dims,
bias_init=initializers.ones,
kernel_init=initializers.normal(),
)
y, initial_params = dg_module.init_with_output(rng, x)
target = np.einsum(einsum_expr, x, initial_params['params']['kernel']) + 1.
np.testing.assert_allclose(y, target, atol=1e-6)
@parameterized.parameters([((3,),), (3,)])
def test_conv(self, kernel_size):
rng = dict(params=random.PRNGKey(0))
x = jnp.ones((1, 8, 3))
conv_module = nn.Conv(
features=4,
kernel_size=kernel_size,
padding='VALID',
kernel_init=initializers.ones,
bias_init=initializers.ones,
)
y, initial_params = conv_module.init_with_output(rng, x)
self.assertEqual(initial_params['params']['kernel'].shape, (3, 3, 4))
np.testing.assert_allclose(y, np.full((1, 6, 4), 10.))
@parameterized.parameters([((3,),), (3,)])
def test_single_input_conv(self, kernel_size):
rng = dict(params=random.PRNGKey(0))
x = jnp.ones((8, 3))
conv_module = nn.Conv(
features=4,
kernel_size=kernel_size,
padding='VALID',
kernel_init=initializers.ones,
bias_init=initializers.ones,
)
y, initial_params = conv_module.init_with_output(rng, x)
self.assertEqual(initial_params['params']['kernel'].shape, (3, 3, 4))
np.testing.assert_allclose(y, np.full((6, 4), 10.))
@parameterized.parameters([((3,),), (3,)])
def test_group_conv(self, kernel_size):
rng = dict(params=random.PRNGKey(0))
x = jnp.ones((1, 8, 4))
conv_module = nn.Conv(
features=4,
kernel_size=kernel_size,
feature_group_count=2,
padding='VALID',
kernel_init=initializers.ones,
bias_init=initializers.ones,
)
y, initial_params = conv_module.init_with_output(rng, x)
self.assertEqual(initial_params['params']['kernel'].shape, (3, 2, 4))
np.testing.assert_allclose(y, np.full((1, 6, 4), 7.))
@parameterized.parameters([((3,),), (3,)])
def test_conv_transpose(self, kernel_size):
rng = dict(params=random.PRNGKey(0))
x = jnp.ones((1, 8, 3))
conv_transpose_module = nn.ConvTranspose(
features=4,
kernel_size=kernel_size,
padding='VALID',
kernel_init=initializers.ones,
bias_init=initializers.ones,
)
y, initial_params = conv_transpose_module.init_with_output(rng, x)
self.assertEqual(initial_params['params']['kernel'].shape, (3, 3, 4))
correct_ans = np.array([[[ 4., 4., 4., 4.],
[ 7., 7., 7., 7.],
[10., 10., 10., 10.],
[10., 10., 10., 10.],
[10., 10., 10., 10.],
[10., 10., 10., 10.],
[10., 10., 10., 10.],
[10., 10., 10., 10.],
[ 7., 7., 7., 7.],
[ 4., 4., 4., 4.]]])
np.testing.assert_allclose(y, correct_ans)
@parameterized.parameters([((3,),), (3,)])
def test_single_input_conv_transpose(self, kernel_size):
rng = dict(params=random.PRNGKey(0))
x = jnp.ones((8, 3))
conv_transpose_module = nn.ConvTranspose(
features=4,
kernel_size=kernel_size,
padding='VALID',
kernel_init=initializers.ones,
bias_init=initializers.ones,
)
y, initial_params = conv_transpose_module.init_with_output(rng, x)
self.assertEqual(initial_params['params']['kernel'].shape, (3, 3, 4))
correct_ans = np.array([[ 4., 4., 4., 4.],
[ 7., 7., 7., 7.],
[10., 10., 10., 10.],
[10., 10., 10., 10.],
[10., 10., 10., 10.],
[10., 10., 10., 10.],
[10., 10., 10., 10.],
[10., 10., 10., 10.],
[ 7., 7., 7., 7.],
[ 4., 4., 4., 4.]])
np.testing.assert_allclose(y, correct_ans)
def test_embed(self):
rng = dict(params=random.PRNGKey(0))
x = jnp.arange(4)[None]
dummy_embedding = jnp.broadcast_to(
jnp.arange(4)[..., None], (4, 3)).astype(jnp.float32)
embed_module = nn.Embed(
num_embeddings=4,
features=3,
embedding_init=lambda rng, shape, dtype: dummy_embedding,
)
y, initial_params = embed_module.init_with_output(rng, x)
np.testing.assert_allclose(y, dummy_embedding[None])
z = embed_module.apply(initial_params, jnp.ones((3,)), method=embed_module.attend)
np.testing.assert_allclose(z, 3. * jnp.arange(4))
def test_non_final_axis(self):
class Foo(nn.Module):
@nn.compact
def __call__(self, x):
return nn.DenseGeneral(features=6, axis=1, name='dense')(x)
x = jnp.ones((2, 4, 8))
y, variables = Foo().init_with_output(random.PRNGKey(0), x)
self.assertEqual(jax.tree_map(jnp.shape, variables['params']), {
'dense': {'kernel': (4, 6), 'bias': (6,)}
})
self.assertEqual(y.shape, (2, 8, 6))
def test_non_final_axes(self):
class Foo(nn.Module):
@nn.compact
def __call__(self, x):
return nn.DenseGeneral(features=6, axis=(0, 1), name='dense')(x)
x = jnp.ones((2, 4, 8))
y, variables = Foo().init_with_output(random.PRNGKey(0), x)
self.assertEqual(jax.tree_map(jnp.shape, variables['params']), {
'dense': {'kernel': (2, 4, 6), 'bias': (6,)}
})
self.assertEqual(y.shape, (8, 6))
if __name__ == '__main__':
absltest.main()
| true
| true
|
790abef3c00251911fcf64fdfc777b33ae51dfda
| 104,740
|
py
|
Python
|
python/pyspark/pandas/generic.py
|
XpressAI/spark
|
0a838dcd71c733289e60d9f74e8267027c7b2c4a
|
[
"BSD-2-Clause",
"Apache-2.0",
"CC0-1.0",
"MIT",
"MIT-0",
"ECL-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 1
|
2018-10-29T08:46:46.000Z
|
2018-10-29T08:46:46.000Z
|
python/pyspark/pandas/generic.py
|
XpressAI/spark
|
0a838dcd71c733289e60d9f74e8267027c7b2c4a
|
[
"BSD-2-Clause",
"Apache-2.0",
"CC0-1.0",
"MIT",
"MIT-0",
"ECL-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 2
|
2015-10-09T22:28:49.000Z
|
2019-12-20T00:07:37.000Z
|
python/pyspark/pandas/generic.py
|
XpressAI/spark
|
0a838dcd71c733289e60d9f74e8267027c7b2c4a
|
[
"BSD-2-Clause",
"Apache-2.0",
"CC0-1.0",
"MIT",
"MIT-0",
"ECL-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
A base class of DataFrame/Column to behave similar to pandas DataFrame/Series.
"""
from abc import ABCMeta, abstractmethod
from collections import Counter
from distutils.version import LooseVersion
from functools import reduce
from typing import (
Any,
Callable,
Iterable,
IO,
List,
Optional,
NoReturn,
Tuple,
Union,
TYPE_CHECKING,
cast,
)
import warnings
import numpy as np # noqa: F401
import pandas as pd
from pandas.api.types import is_list_like
from pyspark.sql import Column, functions as F
from pyspark.sql.types import (
BooleanType,
DataType,
DoubleType,
FloatType,
IntegralType,
LongType,
NumericType,
)
from pyspark import pandas as ps # For running doctests and reference resolution in PyCharm.
from pyspark.pandas._typing import Axis, DataFrameOrSeries, Dtype, FrameLike, Scalar
from pyspark.pandas.indexing import AtIndexer, iAtIndexer, iLocIndexer, LocIndexer
from pyspark.pandas.internal import InternalFrame
from pyspark.pandas.spark import functions as SF
from pyspark.pandas.typedef import spark_type_to_pandas_dtype
from pyspark.pandas.utils import (
is_name_like_tuple,
is_name_like_value,
name_like_string,
scol_for,
sql_conf,
validate_arguments_and_invoke_function,
validate_axis,
SPARK_CONF_ARROW_ENABLED,
)
if TYPE_CHECKING:
from pyspark.pandas.frame import DataFrame # noqa: F401 (SPARK-34943)
from pyspark.pandas.indexes.base import Index # noqa: F401 (SPARK-34943)
from pyspark.pandas.groupby import GroupBy # noqa: F401 (SPARK-34943)
from pyspark.pandas.series import Series # noqa: F401 (SPARK-34943)
from pyspark.pandas.window import Rolling, Expanding # noqa: F401 (SPARK-34943)
bool_type = bool
class Frame(object, metaclass=ABCMeta):
"""
The base class for both DataFrame and Series.
"""
@abstractmethod
def __getitem__(self, key: Any) -> Any:
pass
@property
@abstractmethod
def _internal(self) -> InternalFrame:
pass
@abstractmethod
def _apply_series_op(
self: FrameLike,
op: Callable[["Series"], Union["Series", Column]],
should_resolve: bool = False,
) -> FrameLike:
pass
@abstractmethod
def _reduce_for_stat_function(
self,
sfun: Union[Callable[[Column], Column], Callable[[Column, DataType], Column]],
name: str,
axis: Optional[Axis] = None,
numeric_only: bool = True,
**kwargs: Any
) -> Union["Series", Scalar]:
pass
@property
@abstractmethod
def dtypes(self) -> Union[pd.Series, Dtype]:
pass
@abstractmethod
def to_pandas(self) -> Union[pd.DataFrame, pd.Series]:
pass
@property
@abstractmethod
def index(self) -> "Index":
pass
@abstractmethod
def copy(self: FrameLike) -> FrameLike:
pass
@abstractmethod
def _to_internal_pandas(self) -> Union[pd.DataFrame, pd.Series]:
pass
@abstractmethod
def head(self: FrameLike, n: int = 5) -> FrameLike:
pass
# TODO: add 'axis' parameter
def cummin(self: FrameLike, skipna: bool = True) -> FrameLike:
"""
Return cumulative minimum over a DataFrame or Series axis.
Returns a DataFrame or Series of the same size containing the cumulative minimum.
.. note:: the current implementation of cummin uses Spark's Window without
specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
skipna : boolean, default True
Exclude NA/null values. If an entire row/column is NA, the result will be NA.
Returns
-------
DataFrame or Series
See Also
--------
DataFrame.min : Return the minimum over DataFrame axis.
DataFrame.cummax : Return cumulative maximum over DataFrame axis.
DataFrame.cummin : Return cumulative minimum over DataFrame axis.
DataFrame.cumsum : Return cumulative sum over DataFrame axis.
Series.min : Return the minimum over Series axis.
Series.cummax : Return cumulative maximum over Series axis.
Series.cummin : Return cumulative minimum over Series axis.
Series.cumsum : Return cumulative sum over Series axis.
Series.cumprod : Return cumulative product over Series axis.
Examples
--------
>>> df = ps.DataFrame([[2.0, 1.0], [3.0, None], [1.0, 0.0]], columns=list('AB'))
>>> df
A B
0 2.0 1.0
1 3.0 NaN
2 1.0 0.0
By default, iterates over rows and finds the minimum in each column.
>>> df.cummin()
A B
0 2.0 1.0
1 2.0 NaN
2 1.0 0.0
It works identically in Series.
>>> df.A.cummin()
0 2.0
1 2.0
2 1.0
Name: A, dtype: float64
"""
return self._apply_series_op(lambda psser: psser._cum(F.min, skipna), should_resolve=True)
# TODO: add 'axis' parameter
def cummax(self: FrameLike, skipna: bool = True) -> FrameLike:
"""
Return cumulative maximum over a DataFrame or Series axis.
Returns a DataFrame or Series of the same size containing the cumulative maximum.
.. note:: the current implementation of cummax uses Spark's Window without
specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
skipna : boolean, default True
Exclude NA/null values. If an entire row/column is NA, the result will be NA.
Returns
-------
DataFrame or Series
See Also
--------
DataFrame.max : Return the maximum over DataFrame axis.
DataFrame.cummax : Return cumulative maximum over DataFrame axis.
DataFrame.cummin : Return cumulative minimum over DataFrame axis.
DataFrame.cumsum : Return cumulative sum over DataFrame axis.
DataFrame.cumprod : Return cumulative product over DataFrame axis.
Series.max : Return the maximum over Series axis.
Series.cummax : Return cumulative maximum over Series axis.
Series.cummin : Return cumulative minimum over Series axis.
Series.cumsum : Return cumulative sum over Series axis.
Series.cumprod : Return cumulative product over Series axis.
Examples
--------
>>> df = ps.DataFrame([[2.0, 1.0], [3.0, None], [1.0, 0.0]], columns=list('AB'))
>>> df
A B
0 2.0 1.0
1 3.0 NaN
2 1.0 0.0
By default, iterates over rows and finds the maximum in each column.
>>> df.cummax()
A B
0 2.0 1.0
1 3.0 NaN
2 3.0 1.0
It works identically in Series.
>>> df.B.cummax()
0 1.0
1 NaN
2 1.0
Name: B, dtype: float64
"""
return self._apply_series_op(lambda psser: psser._cum(F.max, skipna), should_resolve=True)
# TODO: add 'axis' parameter
def cumsum(self: FrameLike, skipna: bool = True) -> FrameLike:
"""
Return cumulative sum over a DataFrame or Series axis.
Returns a DataFrame or Series of the same size containing the cumulative sum.
.. note:: the current implementation of cumsum uses Spark's Window without
specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
skipna : boolean, default True
Exclude NA/null values. If an entire row/column is NA, the result will be NA.
Returns
-------
DataFrame or Series
See Also
--------
DataFrame.sum : Return the sum over DataFrame axis.
DataFrame.cummax : Return cumulative maximum over DataFrame axis.
DataFrame.cummin : Return cumulative minimum over DataFrame axis.
DataFrame.cumsum : Return cumulative sum over DataFrame axis.
DataFrame.cumprod : Return cumulative product over DataFrame axis.
Series.sum : Return the sum over Series axis.
Series.cummax : Return cumulative maximum over Series axis.
Series.cummin : Return cumulative minimum over Series axis.
Series.cumsum : Return cumulative sum over Series axis.
Series.cumprod : Return cumulative product over Series axis.
Examples
--------
>>> df = ps.DataFrame([[2.0, 1.0], [3.0, None], [1.0, 0.0]], columns=list('AB'))
>>> df
A B
0 2.0 1.0
1 3.0 NaN
2 1.0 0.0
By default, iterates over rows and finds the sum in each column.
>>> df.cumsum()
A B
0 2.0 1.0
1 5.0 NaN
2 6.0 1.0
It works identically in Series.
>>> df.A.cumsum()
0 2.0
1 5.0
2 6.0
Name: A, dtype: float64
"""
return self._apply_series_op(lambda psser: psser._cumsum(skipna), should_resolve=True)
# TODO: add 'axis' parameter
# TODO: use pandas_udf to support negative values and other options later
# other window except unbounded ones is supported as of Spark 3.0.
def cumprod(self: FrameLike, skipna: bool = True) -> FrameLike:
"""
Return cumulative product over a DataFrame or Series axis.
Returns a DataFrame or Series of the same size containing the cumulative product.
.. note:: the current implementation of cumprod uses Spark's Window without
specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
.. note:: unlike pandas', pandas-on-Spark's emulates cumulative product by
``exp(sum(log(...)))`` trick. Therefore, it only works for positive numbers.
Parameters
----------
skipna : boolean, default True
Exclude NA/null values. If an entire row/column is NA, the result will be NA.
Returns
-------
DataFrame or Series
See Also
--------
DataFrame.cummax : Return cumulative maximum over DataFrame axis.
DataFrame.cummin : Return cumulative minimum over DataFrame axis.
DataFrame.cumsum : Return cumulative sum over DataFrame axis.
DataFrame.cumprod : Return cumulative product over DataFrame axis.
Series.cummax : Return cumulative maximum over Series axis.
Series.cummin : Return cumulative minimum over Series axis.
Series.cumsum : Return cumulative sum over Series axis.
Series.cumprod : Return cumulative product over Series axis.
Raises
------
Exception : If the values is equal to or lower than 0.
Examples
--------
>>> df = ps.DataFrame([[2.0, 1.0], [3.0, None], [4.0, 10.0]], columns=list('AB'))
>>> df
A B
0 2.0 1.0
1 3.0 NaN
2 4.0 10.0
By default, iterates over rows and finds the sum in each column.
>>> df.cumprod()
A B
0 2.0 1.0
1 6.0 NaN
2 24.0 10.0
It works identically in Series.
>>> df.A.cumprod()
0 2.0
1 6.0
2 24.0
Name: A, dtype: float64
"""
return self._apply_series_op(lambda psser: psser._cumprod(skipna), should_resolve=True)
# TODO: Although this has removed pandas >= 1.0.0, but we're keeping this as deprecated
# since we're using this for `DataFrame.info` internally.
# We can drop it once our minimal pandas version becomes 1.0.0.
def get_dtype_counts(self) -> pd.Series:
"""
Return counts of unique dtypes in this object.
.. deprecated:: 0.14.0
Returns
-------
dtype : pd.Series
Series with the count of columns with each dtype.
See Also
--------
dtypes : Return the dtypes in this object.
Examples
--------
>>> a = [['a', 1, 1], ['b', 2, 2], ['c', 3, 3]]
>>> df = ps.DataFrame(a, columns=['str', 'int1', 'int2'])
>>> df
str int1 int2
0 a 1 1
1 b 2 2
2 c 3 3
>>> df.get_dtype_counts().sort_values()
object 1
int64 2
dtype: int64
>>> df.str.get_dtype_counts().sort_values()
object 1
dtype: int64
"""
warnings.warn(
"`get_dtype_counts` has been deprecated and will be "
"removed in a future version. For DataFrames use "
"`.dtypes.value_counts()",
FutureWarning,
)
if not isinstance(self.dtypes, Iterable):
dtypes = [self.dtypes]
else:
dtypes = list(self.dtypes)
return pd.Series(dict(Counter([d.name for d in dtypes])))
def pipe(self, func: Callable[..., Any], *args: Any, **kwargs: Any) -> Any:
r"""
Apply func(self, \*args, \*\*kwargs).
Parameters
----------
func : function
function to apply to the DataFrame.
``args``, and ``kwargs`` are passed into ``func``.
Alternatively a ``(callable, data_keyword)`` tuple where
``data_keyword`` is a string indicating the keyword of
``callable`` that expects the DataFrames.
args : iterable, optional
positional arguments passed into ``func``.
kwargs : mapping, optional
a dictionary of keyword arguments passed into ``func``.
Returns
-------
object : the return type of ``func``.
Notes
-----
Use ``.pipe`` when chaining together functions that expect
Series, DataFrames or GroupBy objects. For example, given
>>> df = ps.DataFrame({'category': ['A', 'A', 'B'],
... 'col1': [1, 2, 3],
... 'col2': [4, 5, 6]},
... columns=['category', 'col1', 'col2'])
>>> def keep_category_a(df):
... return df[df['category'] == 'A']
>>> def add_one(df, column):
... return df.assign(col3=df[column] + 1)
>>> def multiply(df, column1, column2):
... return df.assign(col4=df[column1] * df[column2])
instead of writing
>>> multiply(add_one(keep_category_a(df), column="col1"), column1="col2", column2="col3")
category col1 col2 col3 col4
0 A 1 4 2 8
1 A 2 5 3 15
You can write
>>> (df.pipe(keep_category_a)
... .pipe(add_one, column="col1")
... .pipe(multiply, column1="col2", column2="col3")
... )
category col1 col2 col3 col4
0 A 1 4 2 8
1 A 2 5 3 15
If you have a function that takes the data as (say) the second
argument, pass a tuple indicating which keyword expects the
data. For example, suppose ``f`` takes its data as ``df``:
>>> def multiply_2(column1, df, column2):
... return df.assign(col4=df[column1] * df[column2])
Then you can write
>>> (df.pipe(keep_category_a)
... .pipe(add_one, column="col1")
... .pipe((multiply_2, 'df'), column1="col2", column2="col3")
... )
category col1 col2 col3 col4
0 A 1 4 2 8
1 A 2 5 3 15
You can use lambda as wel
>>> ps.Series([1, 2, 3]).pipe(lambda x: (x + 1).rename("value"))
0 2
1 3
2 4
Name: value, dtype: int64
"""
if isinstance(func, tuple):
func, target = func
if target in kwargs:
raise ValueError("%s is both the pipe target and a keyword " "argument" % target)
kwargs[target] = self
return func(*args, **kwargs)
else:
return func(self, *args, **kwargs)
def to_numpy(self) -> np.ndarray:
"""
A NumPy ndarray representing the values in this DataFrame or Series.
.. note:: This method should only be used if the resulting NumPy ndarray is expected
to be small, as all the data is loaded into the driver's memory.
Returns
-------
numpy.ndarray
Examples
--------
>>> ps.DataFrame({"A": [1, 2], "B": [3, 4]}).to_numpy()
array([[1, 3],
[2, 4]])
With heterogeneous data, the lowest common type will have to be used.
>>> ps.DataFrame({"A": [1, 2], "B": [3.0, 4.5]}).to_numpy()
array([[1. , 3. ],
[2. , 4.5]])
For a mix of numeric and non-numeric types, the output array will have object dtype.
>>> df = ps.DataFrame({"A": [1, 2], "B": [3.0, 4.5], "C": pd.date_range('2000', periods=2)})
>>> df.to_numpy()
array([[1, 3.0, Timestamp('2000-01-01 00:00:00')],
[2, 4.5, Timestamp('2000-01-02 00:00:00')]], dtype=object)
For Series,
>>> ps.Series(['a', 'b', 'a']).to_numpy()
array(['a', 'b', 'a'], dtype=object)
"""
return self.to_pandas().values
@property
def values(self) -> np.ndarray:
"""
Return a Numpy representation of the DataFrame or the Series.
.. warning:: We recommend using `DataFrame.to_numpy()` or `Series.to_numpy()` instead.
.. note:: This method should only be used if the resulting NumPy ndarray is expected
to be small, as all the data is loaded into the driver's memory.
Returns
-------
numpy.ndarray
Examples
--------
A DataFrame where all columns are the same type (e.g., int64) results in an array of
the same type.
>>> df = ps.DataFrame({'age': [ 3, 29],
... 'height': [94, 170],
... 'weight': [31, 115]})
>>> df
age height weight
0 3 94 31
1 29 170 115
>>> df.dtypes
age int64
height int64
weight int64
dtype: object
>>> df.values
array([[ 3, 94, 31],
[ 29, 170, 115]])
A DataFrame with mixed type columns(e.g., str/object, int64, float32) results in an ndarray
of the broadest type that accommodates these mixed types (e.g., object).
>>> df2 = ps.DataFrame([('parrot', 24.0, 'second'),
... ('lion', 80.5, 'first'),
... ('monkey', np.nan, None)],
... columns=('name', 'max_speed', 'rank'))
>>> df2.dtypes
name object
max_speed float64
rank object
dtype: object
>>> df2.values
array([['parrot', 24.0, 'second'],
['lion', 80.5, 'first'],
['monkey', nan, None]], dtype=object)
For Series,
>>> ps.Series([1, 2, 3]).values
array([1, 2, 3])
>>> ps.Series(list('aabc')).values
array(['a', 'a', 'b', 'c'], dtype=object)
"""
warnings.warn("We recommend using `{}.to_numpy()` instead.".format(type(self).__name__))
return self.to_numpy()
def to_csv(
self,
path: Optional[str] = None,
sep: str = ",",
na_rep: str = "",
columns: Optional[List[Union[Any, Tuple]]] = None,
header: bool = True,
quotechar: str = '"',
date_format: Optional[str] = None,
escapechar: Optional[str] = None,
num_files: Optional[int] = None,
mode: str = "overwrite",
partition_cols: Optional[Union[str, List[str]]] = None,
index_col: Optional[Union[str, List[str]]] = None,
**options: Any
) -> Optional[str]:
r"""
Write object to a comma-separated values (csv) file.
.. note:: pandas-on-Spark `to_csv` writes files to a path or URI. Unlike pandas',
pandas-on-Spark respects HDFS's property such as 'fs.default.name'.
.. note:: pandas-on-Spark writes CSV files into the directory, `path`, and writes
multiple `part-...` files in the directory when `path` is specified.
This behaviour was inherited from Apache Spark. The number of files can
be controlled by `num_files`.
Parameters
----------
path : str, default None
File path. If None is provided the result is returned as a string.
sep : str, default ','
String of length 1. Field delimiter for the output file.
na_rep : str, default ''
Missing data representation.
columns : sequence, optional
Columns to write.
header : bool or list of str, default True
Write out the column names. If a list of strings is given it is
assumed to be aliases for the column names.
quotechar : str, default '\"'
String of length 1. Character used to quote fields.
date_format : str, default None
Format string for datetime objects.
escapechar : str, default None
String of length 1. Character used to escape `sep` and `quotechar`
when appropriate.
num_files : the number of files to be written in `path` directory when
this is a path.
mode : str {'append', 'overwrite', 'ignore', 'error', 'errorifexists'},
default 'overwrite'. Specifies the behavior of the save operation when the
destination exists already.
- 'append': Append the new data to existing data.
- 'overwrite': Overwrite existing data.
- 'ignore': Silently ignore this operation if data already exists.
- 'error' or 'errorifexists': Throw an exception if data already exists.
partition_cols : str or list of str, optional, default None
Names of partitioning columns
index_col: str or list of str, optional, default: None
Column names to be used in Spark to represent pandas-on-Spark's index. The index name
in pandas-on-Spark is ignored. By default, the index is always lost.
options: keyword arguments for additional options specific to PySpark.
This kwargs are specific to PySpark's CSV options to pass. Check
the options in PySpark's API documentation for spark.write.csv(...).
It has higher priority and overwrites all other options.
This parameter only works when `path` is specified.
Returns
-------
str or None
See Also
--------
read_csv
DataFrame.to_delta
DataFrame.to_table
DataFrame.to_parquet
DataFrame.to_spark_io
Examples
--------
>>> df = ps.DataFrame(dict(
... date=list(pd.date_range('2012-1-1 12:00:00', periods=3, freq='M')),
... country=['KR', 'US', 'JP'],
... code=[1, 2 ,3]), columns=['date', 'country', 'code'])
>>> df.sort_values(by="date") # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
date country code
... 2012-01-31 12:00:00 KR 1
... 2012-02-29 12:00:00 US 2
... 2012-03-31 12:00:00 JP 3
>>> print(df.to_csv()) # doctest: +NORMALIZE_WHITESPACE
date,country,code
2012-01-31 12:00:00,KR,1
2012-02-29 12:00:00,US,2
2012-03-31 12:00:00,JP,3
>>> df.cummax().to_csv(path=r'%s/to_csv/foo.csv' % path, num_files=1)
>>> ps.read_csv(
... path=r'%s/to_csv/foo.csv' % path
... ).sort_values(by="date") # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
date country code
... 2012-01-31 12:00:00 KR 1
... 2012-02-29 12:00:00 US 2
... 2012-03-31 12:00:00 US 3
In case of Series,
>>> print(df.date.to_csv()) # doctest: +NORMALIZE_WHITESPACE
date
2012-01-31 12:00:00
2012-02-29 12:00:00
2012-03-31 12:00:00
>>> df.date.to_csv(path=r'%s/to_csv/foo.csv' % path, num_files=1)
>>> ps.read_csv(
... path=r'%s/to_csv/foo.csv' % path
... ).sort_values(by="date") # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
date
... 2012-01-31 12:00:00
... 2012-02-29 12:00:00
... 2012-03-31 12:00:00
You can preserve the index in the roundtrip as below.
>>> df.set_index("country", append=True, inplace=True)
>>> df.date.to_csv(
... path=r'%s/to_csv/bar.csv' % path,
... num_files=1,
... index_col=["index1", "index2"])
>>> ps.read_csv(
... path=r'%s/to_csv/bar.csv' % path, index_col=["index1", "index2"]
... ).sort_values(by="date") # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
date
index1 index2
... ... 2012-01-31 12:00:00
... ... 2012-02-29 12:00:00
... ... 2012-03-31 12:00:00
"""
if "options" in options and isinstance(options.get("options"), dict) and len(options) == 1:
options = options.get("options") # type: ignore
if path is None:
# If path is none, just collect and use pandas's to_csv.
psdf_or_ser = self
if (LooseVersion("0.24") > LooseVersion(pd.__version__)) and isinstance(
self, ps.Series
):
# 0.23 seems not having 'columns' parameter in Series' to_csv.
return psdf_or_ser.to_pandas().to_csv( # type: ignore
None,
sep=sep,
na_rep=na_rep,
header=header,
date_format=date_format,
index=False,
)
else:
return psdf_or_ser.to_pandas().to_csv( # type: ignore
None,
sep=sep,
na_rep=na_rep,
columns=columns,
header=header,
quotechar=quotechar,
date_format=date_format,
escapechar=escapechar,
index=False,
)
psdf = self
if isinstance(self, ps.Series):
psdf = self.to_frame()
if columns is None:
column_labels = psdf._internal.column_labels
else:
column_labels = []
for label in columns:
if not is_name_like_tuple(label):
label = (label,)
if label not in psdf._internal.column_labels:
raise KeyError(name_like_string(label))
column_labels.append(label)
if isinstance(index_col, str):
index_cols = [index_col]
elif index_col is None:
index_cols = []
else:
index_cols = index_col
if header is True and psdf._internal.column_labels_level > 1:
raise ValueError("to_csv only support one-level index column now")
elif isinstance(header, list):
sdf = psdf.to_spark(index_col) # type: ignore
sdf = sdf.select(
[scol_for(sdf, name_like_string(label)) for label in index_cols]
+ [
scol_for(sdf, str(i) if label is None else name_like_string(label)).alias(
new_name
)
for i, (label, new_name) in enumerate(zip(column_labels, header))
]
)
header = True
else:
sdf = psdf.to_spark(index_col) # type: ignore
sdf = sdf.select(
[scol_for(sdf, name_like_string(label)) for label in index_cols]
+ [
scol_for(sdf, str(i) if label is None else name_like_string(label))
for i, label in enumerate(column_labels)
]
)
if num_files is not None:
sdf = sdf.repartition(num_files)
builder = sdf.write.mode(mode)
if partition_cols is not None:
builder.partitionBy(partition_cols)
builder._set_opts(
sep=sep,
nullValue=na_rep,
header=header,
quote=quotechar,
dateFormat=date_format,
charToEscapeQuoteEscaping=escapechar,
)
builder.options(**options).format("csv").save(path)
return None
def to_json(
self,
path: Optional[str] = None,
compression: str = "uncompressed",
num_files: Optional[int] = None,
mode: str = "overwrite",
orient: str = "records",
lines: bool = True,
partition_cols: Optional[Union[str, List[str]]] = None,
index_col: Optional[Union[str, List[str]]] = None,
**options: Any
) -> Optional[str]:
"""
Convert the object to a JSON string.
.. note:: pandas-on-Spark `to_json` writes files to a path or URI. Unlike pandas',
pandas-on-Spark respects HDFS's property such as 'fs.default.name'.
.. note:: pandas-on-Spark writes JSON files into the directory, `path`, and writes
multiple `part-...` files in the directory when `path` is specified.
This behaviour was inherited from Apache Spark. The number of files can
be controlled by `num_files`.
.. note:: output JSON format is different from pandas'. It always use `orient='records'`
for its output. This behaviour might have to change in the near future.
Note NaN's and None will be converted to null and datetime objects
will be converted to UNIX timestamps.
Parameters
----------
path : string, optional
File path. If not specified, the result is returned as
a string.
lines : bool, default True
If ‘orient’ is ‘records’ write out line delimited json format.
Will throw ValueError if incorrect ‘orient’ since others are not
list like. It should be always True for now.
orient : str, default 'records'
It should be always 'records' for now.
compression : {'gzip', 'bz2', 'xz', None}
A string representing the compression to use in the output file,
only used when the first argument is a filename. By default, the
compression is inferred from the filename.
num_files : the number of files to be written in `path` directory when
this is a path.
mode : str {'append', 'overwrite', 'ignore', 'error', 'errorifexists'},
default 'overwrite'. Specifies the behavior of the save operation when the
destination exists already.
- 'append': Append the new data to existing data.
- 'overwrite': Overwrite existing data.
- 'ignore': Silently ignore this operation if data already exists.
- 'error' or 'errorifexists': Throw an exception if data already exists.
partition_cols : str or list of str, optional, default None
Names of partitioning columns
index_col: str or list of str, optional, default: None
Column names to be used in Spark to represent pandas-on-Spark's index. The index name
in pandas-on-Spark is ignored. By default, the index is always lost.
options: keyword arguments for additional options specific to PySpark.
It is specific to PySpark's JSON options to pass. Check
the options in PySpark's API documentation for `spark.write.json(...)`.
It has a higher priority and overwrites all other options.
This parameter only works when `path` is specified.
Returns
--------
str or None
Examples
--------
>>> df = ps.DataFrame([['a', 'b'], ['c', 'd']],
... columns=['col 1', 'col 2'])
>>> df.to_json()
'[{"col 1":"a","col 2":"b"},{"col 1":"c","col 2":"d"}]'
>>> df['col 1'].to_json()
'[{"col 1":"a"},{"col 1":"c"}]'
>>> df.to_json(path=r'%s/to_json/foo.json' % path, num_files=1)
>>> ps.read_json(
... path=r'%s/to_json/foo.json' % path
... ).sort_values(by="col 1")
col 1 col 2
0 a b
1 c d
>>> df['col 1'].to_json(path=r'%s/to_json/foo.json' % path, num_files=1, index_col="index")
>>> ps.read_json(
... path=r'%s/to_json/foo.json' % path, index_col="index"
... ).sort_values(by="col 1") # doctest: +NORMALIZE_WHITESPACE
col 1
index
0 a
1 c
"""
if "options" in options and isinstance(options.get("options"), dict) and len(options) == 1:
options = options.get("options") # type: ignore
if not lines:
raise NotImplementedError("lines=False is not implemented yet.")
if orient != "records":
raise NotImplementedError("orient='records' is supported only for now.")
if path is None:
# If path is none, just collect and use pandas's to_json.
psdf_or_ser = self
pdf = psdf_or_ser.to_pandas() # type: ignore
if isinstance(self, ps.Series):
pdf = pdf.to_frame()
# To make the format consistent and readable by `read_json`, convert it to pandas' and
# use 'records' orient for now.
return pdf.to_json(orient="records")
psdf = self
if isinstance(self, ps.Series):
psdf = self.to_frame()
sdf = psdf.to_spark(index_col=index_col) # type: ignore
if num_files is not None:
sdf = sdf.repartition(num_files)
builder = sdf.write.mode(mode)
if partition_cols is not None:
builder.partitionBy(partition_cols)
builder._set_opts(compression=compression)
builder.options(**options).format("json").save(path)
return None
def to_excel(
self,
excel_writer: Union[str, pd.ExcelWriter],
sheet_name: str = "Sheet1",
na_rep: str = "",
float_format: Optional[str] = None,
columns: Optional[Union[str, List[str]]] = None,
header: bool = True,
index: bool = True,
index_label: Optional[Union[str, List[str]]] = None,
startrow: int = 0,
startcol: int = 0,
engine: Optional[str] = None,
merge_cells: bool = True,
encoding: Optional[str] = None,
inf_rep: str = "inf",
verbose: bool = True,
freeze_panes: Optional[Tuple[int, int]] = None,
) -> None:
"""
Write object to an Excel sheet.
.. note:: This method should only be used if the resulting DataFrame is expected
to be small, as all the data is loaded into the driver's memory.
To write a single object to an Excel .xlsx file it is only necessary to
specify a target file name. To write to multiple sheets it is necessary to
create an `ExcelWriter` object with a target file name, and specify a sheet
in the file to write to.
Multiple sheets may be written to by specifying unique `sheet_name`.
With all data written to the file it is necessary to save the changes.
Note that creating an `ExcelWriter` object with a file name that already
exists will result in the contents of the existing file being erased.
Parameters
----------
excel_writer : str or ExcelWriter object
File path or existing ExcelWriter.
sheet_name : str, default 'Sheet1'
Name of sheet which will contain DataFrame.
na_rep : str, default ''
Missing data representation.
float_format : str, optional
Format string for floating point numbers. For example
``float_format="%%.2f"`` will format 0.1234 to 0.12.
columns : sequence or list of str, optional
Columns to write.
header : bool or list of str, default True
Write out the column names. If a list of string is given it is
assumed to be aliases for the column names.
index : bool, default True
Write row names (index).
index_label : str or sequence, optional
Column label for index column(s) if desired. If not specified, and
`header` and `index` are True, then the index names are used. A
sequence should be given if the DataFrame uses MultiIndex.
startrow : int, default 0
Upper left cell row to dump data frame.
startcol : int, default 0
Upper left cell column to dump data frame.
engine : str, optional
Write engine to use, 'openpyxl' or 'xlsxwriter'. You can also set this
via the options ``io.excel.xlsx.writer``, ``io.excel.xls.writer``, and
``io.excel.xlsm.writer``.
merge_cells : bool, default True
Write MultiIndex and Hierarchical Rows as merged cells.
encoding : str, optional
Encoding of the resulting excel file. Only necessary for xlwt,
other writers support unicode natively.
inf_rep : str, default 'inf'
Representation for infinity (there is no native representation for
infinity in Excel).
verbose : bool, default True
Display more information in the error logs.
freeze_panes : tuple of int (length 2), optional
Specifies the one-based bottommost row and rightmost column that
is to be frozen.
Notes
-----
Once a workbook has been saved it is not possible write further data
without rewriting the whole workbook.
See Also
--------
read_excel : Read Excel file.
Examples
--------
Create, write to and save a workbook:
>>> df1 = ps.DataFrame([['a', 'b'], ['c', 'd']],
... index=['row 1', 'row 2'],
... columns=['col 1', 'col 2'])
>>> df1.to_excel("output.xlsx") # doctest: +SKIP
To specify the sheet name:
>>> df1.to_excel("output.xlsx") # doctest: +SKIP
>>> df1.to_excel("output.xlsx",
... sheet_name='Sheet_name_1') # doctest: +SKIP
If you wish to write to more than one sheet in the workbook, it is
necessary to specify an ExcelWriter object:
>>> with pd.ExcelWriter('output.xlsx') as writer: # doctest: +SKIP
... df1.to_excel(writer, sheet_name='Sheet_name_1')
... df2.to_excel(writer, sheet_name='Sheet_name_2')
To set the library that is used to write the Excel file,
you can pass the `engine` keyword (the default engine is
automatically chosen depending on the file extension):
>>> df1.to_excel('output1.xlsx', engine='xlsxwriter') # doctest: +SKIP
"""
# Make sure locals() call is at the top of the function so we don't capture local variables.
args = locals()
psdf = self
if isinstance(self, ps.DataFrame):
f = pd.DataFrame.to_excel
elif isinstance(self, ps.Series):
f = pd.Series.to_excel
else:
raise TypeError(
"Constructor expects DataFrame or Series; however, " "got [%s]" % (self,)
)
return validate_arguments_and_invoke_function(
psdf._to_internal_pandas(), self.to_excel, f, args
)
def mean(
self, axis: Optional[Axis] = None, numeric_only: bool = None
) -> Union[Scalar, "Series"]:
"""
Return the mean of the values.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
Returns
-------
mean : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.mean()
a 2.0
b 0.2
dtype: float64
>>> df.mean(axis=1)
0 0.55
1 1.10
2 1.65
3 NaN
dtype: float64
On a Series:
>>> df['a'].mean()
2.0
"""
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
def mean(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, BooleanType):
spark_column = spark_column.cast(LongType())
elif not isinstance(spark_type, NumericType):
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
return F.mean(spark_column)
return self._reduce_for_stat_function(
mean, name="mean", axis=axis, numeric_only=numeric_only
)
def sum(
self, axis: Optional[Axis] = None, numeric_only: bool = None, min_count: int = 0
) -> Union[Scalar, "Series"]:
"""
Return the sum of the values.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
min_count : int, default 0
The required number of valid values to perform the operation. If fewer than
``min_count`` non-NA values are present the result will be NA.
Returns
-------
sum : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, np.nan, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.sum()
a 6.0
b 0.4
dtype: float64
>>> df.sum(axis=1)
0 1.1
1 2.0
2 3.3
3 0.0
dtype: float64
>>> df.sum(min_count=3)
a 6.0
b NaN
dtype: float64
>>> df.sum(axis=1, min_count=1)
0 1.1
1 2.0
2 3.3
3 NaN
dtype: float64
On a Series:
>>> df['a'].sum()
6.0
>>> df['a'].sum(min_count=3)
6.0
>>> df['b'].sum(min_count=3)
nan
"""
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
elif numeric_only is True and axis == 1:
numeric_only = None
def sum(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, BooleanType):
spark_column = spark_column.cast(LongType())
elif not isinstance(spark_type, NumericType):
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
return F.coalesce(F.sum(spark_column), SF.lit(0))
return self._reduce_for_stat_function(
sum, name="sum", axis=axis, numeric_only=numeric_only, min_count=min_count
)
def product(
self, axis: Optional[Axis] = None, numeric_only: bool = None, min_count: int = 0
) -> Union[Scalar, "Series"]:
"""
Return the product of the values.
.. note:: unlike pandas', pandas-on-Spark's emulates product by ``exp(sum(log(...)))``
trick. Therefore, it only works for positive numbers.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
min_count : int, default 0
The required number of valid values to perform the operation. If fewer than
``min_count`` non-NA values are present the result will be NA.
Examples
--------
On a DataFrame:
Non-numeric type column is not included to the result.
>>> psdf = ps.DataFrame({'A': [1, 2, 3, 4, 5],
... 'B': [10, 20, 30, 40, 50],
... 'C': ['a', 'b', 'c', 'd', 'e']})
>>> psdf
A B C
0 1 10 a
1 2 20 b
2 3 30 c
3 4 40 d
4 5 50 e
>>> psdf.prod()
A 120
B 12000000
dtype: int64
If there is no numeric type columns, returns empty Series.
>>> ps.DataFrame({"key": ['a', 'b', 'c'], "val": ['x', 'y', 'z']}).prod()
Series([], dtype: float64)
On a Series:
>>> ps.Series([1, 2, 3, 4, 5]).prod()
120
By default, the product of an empty or all-NA Series is ``1``
>>> ps.Series([]).prod()
1.0
This can be controlled with the ``min_count`` parameter
>>> ps.Series([]).prod(min_count=1)
nan
"""
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
elif numeric_only is True and axis == 1:
numeric_only = None
def prod(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, BooleanType):
scol = F.min(F.coalesce(spark_column, SF.lit(True))).cast(LongType())
elif isinstance(spark_type, NumericType):
num_zeros = F.sum(F.when(spark_column == 0, 1).otherwise(0))
sign = F.when(
F.sum(F.when(spark_column < 0, 1).otherwise(0)) % 2 == 0, 1
).otherwise(-1)
scol = F.when(num_zeros > 0, 0).otherwise(
sign * F.exp(F.sum(F.log(F.abs(spark_column))))
)
if isinstance(spark_type, IntegralType):
scol = F.round(scol).cast(LongType())
else:
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
return F.coalesce(scol, SF.lit(1))
return self._reduce_for_stat_function(
prod, name="prod", axis=axis, numeric_only=numeric_only, min_count=min_count
)
prod = product
def skew(
self, axis: Optional[Axis] = None, numeric_only: bool = None
) -> Union[Scalar, "Series"]:
"""
Return unbiased skew normalized by N-1.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
Returns
-------
skew : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.skew() # doctest: +SKIP
a 0.000000e+00
b -3.319678e-16
dtype: float64
On a Series:
>>> df['a'].skew()
0.0
"""
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
def skew(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, BooleanType):
spark_column = spark_column.cast(LongType())
elif not isinstance(spark_type, NumericType):
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
return F.skewness(spark_column)
return self._reduce_for_stat_function(
skew, name="skew", axis=axis, numeric_only=numeric_only
)
def kurtosis(
self, axis: Optional[Axis] = None, numeric_only: bool = None
) -> Union[Scalar, "Series"]:
"""
Return unbiased kurtosis using Fisher’s definition of kurtosis (kurtosis of normal == 0.0).
Normalized by N-1.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
Returns
-------
kurt : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.kurtosis()
a -1.5
b -1.5
dtype: float64
On a Series:
>>> df['a'].kurtosis()
-1.5
"""
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
def kurtosis(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, BooleanType):
spark_column = spark_column.cast(LongType())
elif not isinstance(spark_type, NumericType):
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
return F.kurtosis(spark_column)
return self._reduce_for_stat_function(
kurtosis, name="kurtosis", axis=axis, numeric_only=numeric_only
)
kurt = kurtosis
def min(
self, axis: Optional[Axis] = None, numeric_only: bool = None
) -> Union[Scalar, "Series"]:
"""
Return the minimum of the values.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
If True, include only float, int, boolean columns. This parameter is mainly for
pandas compatibility. False is supported; however, the columns should
be all numeric or all non-numeric.
Returns
-------
min : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.min()
a 1.0
b 0.1
dtype: float64
>>> df.min(axis=1)
0 0.1
1 0.2
2 0.3
3 NaN
dtype: float64
On a Series:
>>> df['a'].min()
1.0
"""
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
elif numeric_only is True and axis == 1:
numeric_only = None
return self._reduce_for_stat_function(
F.min, name="min", axis=axis, numeric_only=numeric_only
)
def max(
self, axis: Optional[Axis] = None, numeric_only: bool = None
) -> Union[Scalar, "Series"]:
"""
Return the maximum of the values.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
If True, include only float, int, boolean columns. This parameter is mainly for
pandas compatibility. False is supported; however, the columns should
be all numeric or all non-numeric.
Returns
-------
max : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.max()
a 3.0
b 0.3
dtype: float64
>>> df.max(axis=1)
0 1.0
1 2.0
2 3.0
3 NaN
dtype: float64
On a Series:
>>> df['a'].max()
3.0
"""
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
elif numeric_only is True and axis == 1:
numeric_only = None
return self._reduce_for_stat_function(
F.max, name="max", axis=axis, numeric_only=numeric_only
)
def count(
self, axis: Optional[Axis] = None, numeric_only: bool = False
) -> Union[Scalar, "Series"]:
"""
Count non-NA cells for each column.
The values `None`, `NaN` are considered NA.
Parameters
----------
axis : {0 or ‘index’, 1 or ‘columns’}, default 0
If 0 or ‘index’ counts are generated for each column. If 1 or ‘columns’ counts are
generated for each row.
numeric_only : bool, default False
If True, include only float, int, boolean columns. This parameter is mainly for
pandas compatibility.
Returns
-------
max : scalar for a Series, and a Series for a DataFrame.
See Also
--------
DataFrame.shape: Number of DataFrame rows and columns (including NA
elements).
DataFrame.isna: Boolean same-sized DataFrame showing places of NA
elements.
Examples
--------
Constructing DataFrame from a dictionary:
>>> df = ps.DataFrame({"Person":
... ["John", "Myla", "Lewis", "John", "Myla"],
... "Age": [24., np.nan, 21., 33, 26],
... "Single": [False, True, True, True, False]},
... columns=["Person", "Age", "Single"])
>>> df
Person Age Single
0 John 24.0 False
1 Myla NaN True
2 Lewis 21.0 True
3 John 33.0 True
4 Myla 26.0 False
Notice the uncounted NA values:
>>> df.count()
Person 5
Age 4
Single 5
dtype: int64
>>> df.count(axis=1)
0 3
1 2
2 3
3 3
4 3
dtype: int64
On a Series:
>>> df['Person'].count()
5
>>> df['Age'].count()
4
"""
return self._reduce_for_stat_function(
Frame._count_expr, name="count", axis=axis, numeric_only=numeric_only
)
def std(
self, axis: Optional[Axis] = None, ddof: int = 1, numeric_only: bool = None
) -> Union[Scalar, "Series"]:
"""
Return sample standard deviation.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations is N - ddof,
where N represents the number of elements.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
Returns
-------
std : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.std()
a 1.0
b 0.1
dtype: float64
>>> df.std(axis=1)
0 0.636396
1 1.272792
2 1.909188
3 NaN
dtype: float64
>>> df.std(ddof=0)
a 0.816497
b 0.081650
dtype: float64
On a Series:
>>> df['a'].std()
1.0
>>> df['a'].std(ddof=0)
0.816496580927726
"""
assert ddof in (0, 1)
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
def std(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, BooleanType):
spark_column = spark_column.cast(LongType())
elif not isinstance(spark_type, NumericType):
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
if ddof == 0:
return F.stddev_pop(spark_column)
else:
return F.stddev_samp(spark_column)
return self._reduce_for_stat_function(
std, name="std", axis=axis, numeric_only=numeric_only, ddof=ddof
)
def var(
self, axis: Optional[Axis] = None, ddof: int = 1, numeric_only: bool = None
) -> Union[Scalar, "Series"]:
"""
Return unbiased variance.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations is N - ddof,
where N represents the number of elements.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
Returns
-------
var : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.var()
a 1.00
b 0.01
dtype: float64
>>> df.var(axis=1)
0 0.405
1 1.620
2 3.645
3 NaN
dtype: float64
>>> df.var(ddof=0)
a 0.666667
b 0.006667
dtype: float64
On a Series:
>>> df['a'].var()
1.0
>>> df['a'].var(ddof=0)
0.6666666666666666
"""
assert ddof in (0, 1)
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
def var(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, BooleanType):
spark_column = spark_column.cast(LongType())
elif not isinstance(spark_type, NumericType):
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
if ddof == 0:
return F.var_pop(spark_column)
else:
return F.var_samp(spark_column)
return self._reduce_for_stat_function(
var, name="var", axis=axis, numeric_only=numeric_only, ddof=ddof
)
def median(
self, axis: Optional[Axis] = None, numeric_only: bool = None, accuracy: int = 10000
) -> Union[Scalar, "Series"]:
"""
Return the median of the values for the requested axis.
.. note:: Unlike pandas', the median in pandas-on-Spark is an approximated median based upon
approximate percentile computation because computing median across a large dataset
is extremely expensive.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
accuracy : int, optional
Default accuracy of approximation. Larger value means better accuracy.
The relative error can be deduced by 1.0 / accuracy.
Returns
-------
median : scalar or Series
Examples
--------
>>> df = ps.DataFrame({
... 'a': [24., 21., 25., 33., 26.], 'b': [1, 2, 3, 4, 5]}, columns=['a', 'b'])
>>> df
a b
0 24.0 1
1 21.0 2
2 25.0 3
3 33.0 4
4 26.0 5
On a DataFrame:
>>> df.median()
a 25.0
b 3.0
dtype: float64
On a Series:
>>> df['a'].median()
25.0
>>> (df['b'] + 100).median()
103.0
For multi-index columns,
>>> df.columns = pd.MultiIndex.from_tuples([('x', 'a'), ('y', 'b')])
>>> df
x y
a b
0 24.0 1
1 21.0 2
2 25.0 3
3 33.0 4
4 26.0 5
On a DataFrame:
>>> df.median()
x a 25.0
y b 3.0
dtype: float64
>>> df.median(axis=1)
0 12.5
1 11.5
2 14.0
3 18.5
4 15.5
dtype: float64
On a Series:
>>> df[('x', 'a')].median()
25.0
>>> (df[('y', 'b')] + 100).median()
103.0
"""
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
if not isinstance(accuracy, int):
raise TypeError(
"accuracy must be an integer; however, got [%s]" % type(accuracy).__name__
)
def median(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, (BooleanType, NumericType)):
return F.percentile_approx(spark_column.cast(DoubleType()), 0.5, accuracy)
else:
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
return self._reduce_for_stat_function(
median, name="median", numeric_only=numeric_only, axis=axis
)
def sem(
self, axis: Optional[Axis] = None, ddof: int = 1, numeric_only: bool = None
) -> Union[Scalar, "Series"]:
"""
Return unbiased standard error of the mean over requested axis.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations is N - ddof,
where N represents the number of elements.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
Returns
-------
scalar(for Series) or Series(for DataFrame)
Examples
--------
>>> psdf = ps.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
>>> psdf
a b
0 1 4
1 2 5
2 3 6
>>> psdf.sem()
a 0.57735
b 0.57735
dtype: float64
>>> psdf.sem(ddof=0)
a 0.471405
b 0.471405
dtype: float64
>>> psdf.sem(axis=1)
0 1.5
1 1.5
2 1.5
dtype: float64
Support for Series
>>> psser = psdf.a
>>> psser
0 1
1 2
2 3
Name: a, dtype: int64
>>> psser.sem()
0.5773502691896258
>>> psser.sem(ddof=0)
0.47140452079103173
"""
assert ddof in (0, 1)
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
def std(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, BooleanType):
spark_column = spark_column.cast(LongType())
elif not isinstance(spark_type, NumericType):
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
if ddof == 0:
return F.stddev_pop(spark_column)
else:
return F.stddev_samp(spark_column)
def sem(spark_column: Column, spark_type: DataType) -> Column:
return std(spark_column, spark_type) / pow(
Frame._count_expr(spark_column, spark_type), 0.5
)
return self._reduce_for_stat_function(
sem, name="sem", numeric_only=numeric_only, axis=axis, ddof=ddof
)
@property
def size(self) -> int:
"""
Return an int representing the number of elements in this object.
Return the number of rows if Series. Otherwise return the number of
rows times number of columns if DataFrame.
Examples
--------
>>> s = ps.Series({'a': 1, 'b': 2, 'c': None})
>>> s.size
3
>>> df = ps.DataFrame({'col1': [1, 2, None], 'col2': [3, 4, None]})
>>> df.size
6
>>> df = ps.DataFrame(index=[1, 2, None])
>>> df.size
0
"""
num_columns = len(self._internal.data_spark_columns)
if num_columns == 0:
return 0
else:
return len(self) * num_columns # type: ignore
def abs(self: FrameLike) -> FrameLike:
"""
Return a Series/DataFrame with absolute numeric value of each element.
Returns
-------
abs : Series/DataFrame containing the absolute value of each element.
Examples
--------
Absolute numeric values in a Series.
>>> s = ps.Series([-1.10, 2, -3.33, 4])
>>> s.abs()
0 1.10
1 2.00
2 3.33
3 4.00
dtype: float64
Absolute numeric values in a DataFrame.
>>> df = ps.DataFrame({
... 'a': [4, 5, 6, 7],
... 'b': [10, 20, 30, 40],
... 'c': [100, 50, -30, -50]
... },
... columns=['a', 'b', 'c'])
>>> df.abs()
a b c
0 4 10 100
1 5 20 50
2 6 30 30
3 7 40 50
"""
def abs(psser: "Series") -> Union["Series", Column]:
if isinstance(psser.spark.data_type, BooleanType):
return psser
elif isinstance(psser.spark.data_type, NumericType):
return psser._with_new_scol(
F.abs(psser.spark.column), field=psser._internal.data_fields[0]
)
else:
raise TypeError(
"bad operand type for abs(): {} ({})".format(
spark_type_to_pandas_dtype(psser.spark.data_type),
psser.spark.data_type.simpleString(),
)
)
return self._apply_series_op(abs)
# TODO: by argument only support the grouping name and as_index only for now. Documentation
# should be updated when it's supported.
def groupby(
self: FrameLike,
by: Union[Any, Tuple, "Series", List[Union[Any, Tuple, "Series"]]],
axis: Axis = 0,
as_index: bool = True,
dropna: bool = True,
) -> "GroupBy[FrameLike]":
"""
Group DataFrame or Series using a Series of columns.
A groupby operation involves some combination of splitting the
object, applying a function, and combining the results. This can be
used to group large amounts of data and compute operations on these
groups.
Parameters
----------
by : Series, label, or list of labels
Used to determine the groups for the groupby.
If Series is passed, the Series or dict VALUES
will be used to determine the groups. A label or list of
labels may be passed to group by the columns in ``self``.
axis : int, default 0 or 'index'
Can only be set to 0 at the moment.
as_index : bool, default True
For aggregated output, return object with group labels as the
index. Only relevant for DataFrame input. as_index=False is
effectively "SQL-style" grouped output.
dropna : bool, default True
If True, and if group keys contain NA values,
NA values together with row/column will be dropped.
If False, NA values will also be treated as the key in groups.
Returns
-------
DataFrameGroupBy or SeriesGroupBy
Depends on the calling object and returns groupby object that
contains information about the groups.
See Also
--------
pyspark.pandas.groupby.GroupBy
Examples
--------
>>> df = ps.DataFrame({'Animal': ['Falcon', 'Falcon',
... 'Parrot', 'Parrot'],
... 'Max Speed': [380., 370., 24., 26.]},
... columns=['Animal', 'Max Speed'])
>>> df
Animal Max Speed
0 Falcon 380.0
1 Falcon 370.0
2 Parrot 24.0
3 Parrot 26.0
>>> df.groupby(['Animal']).mean().sort_index() # doctest: +NORMALIZE_WHITESPACE
Max Speed
Animal
Falcon 375.0
Parrot 25.0
>>> df.groupby(['Animal'], as_index=False).mean().sort_values('Animal')
... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
Animal Max Speed
...Falcon 375.0
...Parrot 25.0
We can also choose to include NA in group keys or not by setting dropna parameter,
the default setting is True:
>>> l = [[1, 2, 3], [1, None, 4], [2, 1, 3], [1, 2, 2]]
>>> df = ps.DataFrame(l, columns=["a", "b", "c"])
>>> df.groupby(by=["b"]).sum().sort_index() # doctest: +NORMALIZE_WHITESPACE
a c
b
1.0 2 3
2.0 2 5
>>> df.groupby(by=["b"], dropna=False).sum().sort_index() # doctest: +NORMALIZE_WHITESPACE
a c
b
1.0 2 3
2.0 2 5
NaN 1 4
"""
if isinstance(by, ps.DataFrame):
raise ValueError("Grouper for '{}' not 1-dimensional".format(type(by).__name__))
elif isinstance(by, ps.Series):
new_by = [by] # type: List[Union[Tuple, ps.Series]]
elif is_name_like_tuple(by):
if isinstance(self, ps.Series):
raise KeyError(by)
new_by = [cast(Tuple, by)]
elif is_name_like_value(by):
if isinstance(self, ps.Series):
raise KeyError(by)
new_by = [(by,)]
elif is_list_like(by):
new_by = []
for key in by:
if isinstance(key, ps.DataFrame):
raise ValueError(
"Grouper for '{}' not 1-dimensional".format(type(key).__name__)
)
elif isinstance(key, ps.Series):
new_by.append(key)
elif is_name_like_tuple(key):
if isinstance(self, ps.Series):
raise KeyError(key)
new_by.append(key)
elif is_name_like_value(key):
if isinstance(self, ps.Series):
raise KeyError(key)
new_by.append((key,))
else:
raise ValueError(
"Grouper for '{}' not 1-dimensional".format(type(key).__name__)
)
else:
raise ValueError("Grouper for '{}' not 1-dimensional".format(type(by).__name__))
if not len(new_by):
raise ValueError("No group keys passed!")
axis = validate_axis(axis)
if axis != 0:
raise NotImplementedError('axis should be either 0 or "index" currently.')
return self._build_groupby(by=new_by, as_index=as_index, dropna=dropna)
@abstractmethod
def _build_groupby(
self: FrameLike, by: List[Union["Series", Tuple]], as_index: bool, dropna: bool
) -> "GroupBy[FrameLike]":
pass
def bool(self) -> bool:
"""
Return the bool of a single element in the current object.
This must be a boolean scalar value, either True or False. Raise a ValueError if
the object does not have exactly 1 element, or that element is not boolean
Returns
--------
bool
Examples
--------
>>> ps.DataFrame({'a': [True]}).bool()
True
>>> ps.Series([False]).bool()
False
If there are non-boolean or multiple values exist, it raises an exception in all
cases as below.
>>> ps.DataFrame({'a': ['a']}).bool()
Traceback (most recent call last):
...
ValueError: bool cannot act on a non-boolean single element DataFrame
>>> ps.DataFrame({'a': [True], 'b': [False]}).bool() # doctest: +NORMALIZE_WHITESPACE
Traceback (most recent call last):
...
ValueError: The truth value of a DataFrame is ambiguous. Use a.empty, a.bool(),
a.item(), a.any() or a.all().
>>> ps.Series([1]).bool()
Traceback (most recent call last):
...
ValueError: bool cannot act on a non-boolean single element DataFrame
"""
if isinstance(self, ps.DataFrame):
df = self
elif isinstance(self, ps.Series):
df = self.to_dataframe()
else:
raise TypeError("bool() expects DataFrame or Series; however, " "got [%s]" % (self,))
return df.head(2)._to_internal_pandas().bool()
def first_valid_index(self) -> Optional[Union[Scalar, Tuple[Scalar, ...]]]:
"""
Retrieves the index of the first valid value.
Returns
-------
scalar, tuple, or None
Examples
--------
Support for DataFrame
>>> psdf = ps.DataFrame({'a': [None, 2, 3, 2],
... 'b': [None, 2.0, 3.0, 1.0],
... 'c': [None, 200, 400, 200]},
... index=['Q', 'W', 'E', 'R'])
>>> psdf
a b c
Q NaN NaN NaN
W 2.0 2.0 200.0
E 3.0 3.0 400.0
R 2.0 1.0 200.0
>>> psdf.first_valid_index()
'W'
Support for MultiIndex columns
>>> psdf.columns = pd.MultiIndex.from_tuples([('a', 'x'), ('b', 'y'), ('c', 'z')])
>>> psdf
a b c
x y z
Q NaN NaN NaN
W 2.0 2.0 200.0
E 3.0 3.0 400.0
R 2.0 1.0 200.0
>>> psdf.first_valid_index()
'W'
Support for Series.
>>> s = ps.Series([None, None, 3, 4, 5], index=[100, 200, 300, 400, 500])
>>> s
100 NaN
200 NaN
300 3.0
400 4.0
500 5.0
dtype: float64
>>> s.first_valid_index()
300
Support for MultiIndex
>>> midx = pd.MultiIndex([['lama', 'cow', 'falcon'],
... ['speed', 'weight', 'length']],
... [[0, 0, 0, 1, 1, 1, 2, 2, 2],
... [0, 1, 2, 0, 1, 2, 0, 1, 2]])
>>> s = ps.Series([None, None, None, None, 250, 1.5, 320, 1, 0.3], index=midx)
>>> s
lama speed NaN
weight NaN
length NaN
cow speed NaN
weight 250.0
length 1.5
falcon speed 320.0
weight 1.0
length 0.3
dtype: float64
>>> s.first_valid_index()
('cow', 'weight')
"""
data_spark_columns = self._internal.data_spark_columns
if len(data_spark_columns) == 0:
return None
cond = reduce(lambda x, y: x & y, map(lambda x: x.isNotNull(), data_spark_columns))
with sql_conf({SPARK_CONF_ARROW_ENABLED: False}):
# Disable Arrow to keep row ordering.
first_valid_row = cast(
pd.DataFrame,
self._internal.spark_frame.filter(cond)
.select(self._internal.index_spark_columns)
.limit(1)
.toPandas(),
)
# For Empty Series or DataFrame, returns None.
if len(first_valid_row) == 0:
return None
first_valid_row = first_valid_row.iloc[0]
if len(first_valid_row) == 1:
return first_valid_row.iloc[0]
else:
return tuple(first_valid_row)
def last_valid_index(self) -> Optional[Union[Scalar, Tuple[Scalar, ...]]]:
"""
Return index for last non-NA/null value.
Returns
-------
scalar, tuple, or None
Notes
-----
This API only works with PySpark >= 3.0.
Examples
--------
Support for DataFrame
>>> psdf = ps.DataFrame({'a': [1, 2, 3, None],
... 'b': [1.0, 2.0, 3.0, None],
... 'c': [100, 200, 400, None]},
... index=['Q', 'W', 'E', 'R'])
>>> psdf
a b c
Q 1.0 1.0 100.0
W 2.0 2.0 200.0
E 3.0 3.0 400.0
R NaN NaN NaN
>>> psdf.last_valid_index() # doctest: +SKIP
'E'
Support for MultiIndex columns
>>> psdf.columns = pd.MultiIndex.from_tuples([('a', 'x'), ('b', 'y'), ('c', 'z')])
>>> psdf
a b c
x y z
Q 1.0 1.0 100.0
W 2.0 2.0 200.0
E 3.0 3.0 400.0
R NaN NaN NaN
>>> psdf.last_valid_index() # doctest: +SKIP
'E'
Support for Series.
>>> s = ps.Series([1, 2, 3, None, None], index=[100, 200, 300, 400, 500])
>>> s
100 1.0
200 2.0
300 3.0
400 NaN
500 NaN
dtype: float64
>>> s.last_valid_index() # doctest: +SKIP
300
Support for MultiIndex
>>> midx = pd.MultiIndex([['lama', 'cow', 'falcon'],
... ['speed', 'weight', 'length']],
... [[0, 0, 0, 1, 1, 1, 2, 2, 2],
... [0, 1, 2, 0, 1, 2, 0, 1, 2]])
>>> s = ps.Series([250, 1.5, 320, 1, 0.3, None, None, None, None], index=midx)
>>> s
lama speed 250.0
weight 1.5
length 320.0
cow speed 1.0
weight 0.3
length NaN
falcon speed NaN
weight NaN
length NaN
dtype: float64
>>> s.last_valid_index() # doctest: +SKIP
('cow', 'weight')
"""
data_spark_columns = self._internal.data_spark_columns
if len(data_spark_columns) == 0:
return None
cond = reduce(lambda x, y: x & y, map(lambda x: x.isNotNull(), data_spark_columns))
last_valid_rows = (
self._internal.spark_frame.filter(cond)
.select(self._internal.index_spark_columns)
.tail(1)
)
# For Empty Series or DataFrame, returns None.
if len(last_valid_rows) == 0:
return None
last_valid_row = last_valid_rows[0]
if len(last_valid_row) == 1:
return last_valid_row[0]
else:
return tuple(last_valid_row)
# TODO: 'center', 'win_type', 'on', 'axis' parameter should be implemented.
def rolling(
self: FrameLike, window: int, min_periods: Optional[int] = None
) -> "Rolling[FrameLike]":
"""
Provide rolling transformations.
.. note:: 'min_periods' in pandas-on-Spark works as a fixed window size unlike pandas.
Unlike pandas, NA is also counted as the period. This might be changed
in the near future.
Parameters
----------
window : int, or offset
Size of the moving window.
This is the number of observations used for calculating the statistic.
Each window will be a fixed size.
min_periods : int, default None
Minimum number of observations in window required to have a value
(otherwise result is NA).
For a window that is specified by an offset, min_periods will default to 1.
Otherwise, min_periods will default to the size of the window.
Returns
-------
a Window sub-classed for the particular operation
"""
from pyspark.pandas.window import Rolling
return Rolling(self, window=window, min_periods=min_periods)
# TODO: 'center' and 'axis' parameter should be implemented.
# 'axis' implementation, refer https://github.com/pyspark.pandas/pull/607
def expanding(self: FrameLike, min_periods: int = 1) -> "Expanding[FrameLike]":
"""
Provide expanding transformations.
.. note:: 'min_periods' in pandas-on-Spark works as a fixed window size unlike pandas.
Unlike pandas, NA is also counted as the period. This might be changed
in the near future.
Parameters
----------
min_periods : int, default 1
Minimum number of observations in window required to have a value
(otherwise result is NA).
Returns
-------
a Window sub-classed for the particular operation
"""
from pyspark.pandas.window import Expanding
return Expanding(self, min_periods=min_periods)
def get(self, key: Any, default: Optional[Any] = None) -> Any:
"""
Get item from object for given key (DataFrame column, Panel slice,
etc.). Returns default value if not found.
Parameters
----------
key : object
Returns
-------
value : same type as items contained in object
Examples
--------
>>> df = ps.DataFrame({'x':range(3), 'y':['a','b','b'], 'z':['a','b','b']},
... columns=['x', 'y', 'z'], index=[10, 20, 20])
>>> df
x y z
10 0 a a
20 1 b b
20 2 b b
>>> df.get('x')
10 0
20 1
20 2
Name: x, dtype: int64
>>> df.get(['x', 'y'])
x y
10 0 a
20 1 b
20 2 b
>>> df.x.get(10)
0
>>> df.x.get(20)
20 1
20 2
Name: x, dtype: int64
>>> df.x.get(15, -1)
-1
"""
try:
return self[key]
except (KeyError, ValueError, IndexError):
return default
def squeeze(self, axis: Optional[Axis] = None) -> Union[Scalar, "DataFrame", "Series"]:
"""
Squeeze 1 dimensional axis objects into scalars.
Series or DataFrames with a single element are squeezed to a scalar.
DataFrames with a single column or a single row are squeezed to a
Series. Otherwise the object is unchanged.
This method is most useful when you don't know if your
object is a Series or DataFrame, but you do know it has just a single
column. In that case you can safely call `squeeze` to ensure you have a
Series.
Parameters
----------
axis : {0 or 'index', 1 or 'columns', None}, default None
A specific axis to squeeze. By default, all length-1 axes are
squeezed.
Returns
-------
DataFrame, Series, or scalar
The projection after squeezing `axis` or all the axes.
See Also
--------
Series.iloc : Integer-location based indexing for selecting scalars.
DataFrame.iloc : Integer-location based indexing for selecting Series.
Series.to_frame : Inverse of DataFrame.squeeze for a
single-column DataFrame.
Examples
--------
>>> primes = ps.Series([2, 3, 5, 7])
Slicing might produce a Series with a single value:
>>> even_primes = primes[primes % 2 == 0]
>>> even_primes
0 2
dtype: int64
>>> even_primes.squeeze()
2
Squeezing objects with more than one value in every axis does nothing:
>>> odd_primes = primes[primes % 2 == 1]
>>> odd_primes
1 3
2 5
3 7
dtype: int64
>>> odd_primes.squeeze()
1 3
2 5
3 7
dtype: int64
Squeezing is even more effective when used with DataFrames.
>>> df = ps.DataFrame([[1, 2], [3, 4]], columns=['a', 'b'])
>>> df
a b
0 1 2
1 3 4
Slicing a single column will produce a DataFrame with the columns
having only one value:
>>> df_a = df[['a']]
>>> df_a
a
0 1
1 3
So the columns can be squeezed down, resulting in a Series:
>>> df_a.squeeze('columns')
0 1
1 3
Name: a, dtype: int64
Slicing a single row from a single column will produce a single
scalar DataFrame:
>>> df_1a = df.loc[[1], ['a']]
>>> df_1a
a
1 3
Squeezing the rows produces a single scalar Series:
>>> df_1a.squeeze('rows')
a 3
Name: 1, dtype: int64
Squeezing all axes will project directly into a scalar:
>>> df_1a.squeeze()
3
"""
if axis is not None:
axis = "index" if axis == "rows" else axis
axis = validate_axis(axis)
if isinstance(self, ps.DataFrame):
from pyspark.pandas.series import first_series
is_squeezable = len(self.columns[:2]) == 1
# If DataFrame has multiple columns, there is no change.
if not is_squeezable:
return self
series_from_column = first_series(self)
has_single_value = len(series_from_column.head(2)) == 1
# If DataFrame has only a single value, use pandas API directly.
if has_single_value:
result = self._to_internal_pandas().squeeze(axis)
return ps.Series(result) if isinstance(result, pd.Series) else result
elif axis == 0:
return self
else:
return series_from_column
else:
# The case of Series is simple.
# If Series has only a single value, just return it as a scalar.
# Otherwise, there is no change.
self_top_two = cast("Series", self).head(2)
has_single_value = len(self_top_two) == 1
return cast(Union[Scalar, ps.Series], self_top_two[0] if has_single_value else self)
def truncate(
self,
before: Optional[Any] = None,
after: Optional[Any] = None,
axis: Optional[Axis] = None,
copy: bool_type = True,
) -> DataFrameOrSeries:
"""
Truncate a Series or DataFrame before and after some index value.
This is a useful shorthand for boolean indexing based on index
values above or below certain thresholds.
.. note:: This API is dependent on :meth:`Index.is_monotonic_increasing`
which can be expensive.
Parameters
----------
before : date, str, int
Truncate all rows before this index value.
after : date, str, int
Truncate all rows after this index value.
axis : {0 or 'index', 1 or 'columns'}, optional
Axis to truncate. Truncates the index (rows) by default.
copy : bool, default is True,
Return a copy of the truncated section.
Returns
-------
type of caller
The truncated Series or DataFrame.
See Also
--------
DataFrame.loc : Select a subset of a DataFrame by label.
DataFrame.iloc : Select a subset of a DataFrame by position.
Examples
--------
>>> df = ps.DataFrame({'A': ['a', 'b', 'c', 'd', 'e'],
... 'B': ['f', 'g', 'h', 'i', 'j'],
... 'C': ['k', 'l', 'm', 'n', 'o']},
... index=[1, 2, 3, 4, 5])
>>> df
A B C
1 a f k
2 b g l
3 c h m
4 d i n
5 e j o
>>> df.truncate(before=2, after=4)
A B C
2 b g l
3 c h m
4 d i n
The columns of a DataFrame can be truncated.
>>> df.truncate(before="A", after="B", axis="columns")
A B
1 a f
2 b g
3 c h
4 d i
5 e j
For Series, only rows can be truncated.
>>> df['A'].truncate(before=2, after=4)
2 b
3 c
4 d
Name: A, dtype: object
A Series has index that sorted integers.
>>> s = ps.Series([10, 20, 30, 40, 50, 60, 70],
... index=[1, 2, 3, 4, 5, 6, 7])
>>> s
1 10
2 20
3 30
4 40
5 50
6 60
7 70
dtype: int64
>>> s.truncate(2, 5)
2 20
3 30
4 40
5 50
dtype: int64
A Series has index that sorted strings.
>>> s = ps.Series([10, 20, 30, 40, 50, 60, 70],
... index=['a', 'b', 'c', 'd', 'e', 'f', 'g'])
>>> s
a 10
b 20
c 30
d 40
e 50
f 60
g 70
dtype: int64
>>> s.truncate('b', 'e')
b 20
c 30
d 40
e 50
dtype: int64
"""
from pyspark.pandas.series import first_series
axis = validate_axis(axis)
indexes = self.index
indexes_increasing = indexes.is_monotonic_increasing
if not indexes_increasing and not indexes.is_monotonic_decreasing:
raise ValueError("truncate requires a sorted index")
if (before is None) and (after is None):
return cast(Union[ps.DataFrame, ps.Series], self.copy() if copy else self)
if (before is not None and after is not None) and before > after:
raise ValueError("Truncate: %s must be after %s" % (after, before))
if isinstance(self, ps.Series):
if indexes_increasing:
result = first_series(self.to_frame().loc[before:after]).rename(self.name)
else:
result = first_series(self.to_frame().loc[after:before]).rename(self.name)
elif isinstance(self, ps.DataFrame):
if axis == 0:
if indexes_increasing:
result = self.loc[before:after]
else:
result = self.loc[after:before]
elif axis == 1:
result = self.loc[:, before:after]
return cast(DataFrameOrSeries, result.copy() if copy else result)
def to_markdown(
self, buf: Optional[Union[IO[str], str]] = None, mode: Optional[str] = None
) -> str:
"""
Print Series or DataFrame in Markdown-friendly format.
.. note:: This method should only be used if the resulting pandas object is expected
to be small, as all the data is loaded into the driver's memory.
Parameters
----------
buf : writable buffer, defaults to sys.stdout
Where to send the output. By default, the output is printed to
sys.stdout. Pass a writable buffer if you need to further process
the output.
mode : str, optional
Mode in which file is opened.
**kwargs
These parameters will be passed to `tabulate`.
Returns
-------
str
Series or DataFrame in Markdown-friendly format.
Notes
-----
Requires the `tabulate <https://pypi.org/project/tabulate>`_ package.
Examples
--------
>>> psser = ps.Series(["elk", "pig", "dog", "quetzal"], name="animal")
>>> print(psser.to_markdown()) # doctest: +SKIP
| | animal |
|---:|:---------|
| 0 | elk |
| 1 | pig |
| 2 | dog |
| 3 | quetzal |
>>> psdf = ps.DataFrame(
... data={"animal_1": ["elk", "pig"], "animal_2": ["dog", "quetzal"]}
... )
>>> print(psdf.to_markdown()) # doctest: +SKIP
| | animal_1 | animal_2 |
|---:|:-----------|:-----------|
| 0 | elk | dog |
| 1 | pig | quetzal |
"""
# `to_markdown` is supported in pandas >= 1.0.0 since it's newly added in pandas 1.0.0.
if LooseVersion(pd.__version__) < LooseVersion("1.0.0"):
raise NotImplementedError(
"`to_markdown()` only supported in pandas-on-Spark with pandas >= 1.0.0"
)
# Make sure locals() call is at the top of the function so we don't capture local variables.
args = locals()
psser_or_psdf = self
internal_pandas = psser_or_psdf._to_internal_pandas()
return validate_arguments_and_invoke_function(
internal_pandas, self.to_markdown, type(internal_pandas).to_markdown, args
)
@abstractmethod
def fillna(
self: FrameLike,
value: Optional[Any] = None,
method: Optional[str] = None,
axis: Optional[Axis] = None,
inplace: bool_type = False,
limit: Optional[int] = None,
) -> FrameLike:
pass
# TODO: add 'downcast' when value parameter exists
def bfill(
self: FrameLike,
axis: Optional[Axis] = None,
inplace: bool_type = False,
limit: Optional[int] = None,
) -> FrameLike:
"""
Synonym for `DataFrame.fillna()` or `Series.fillna()` with ``method=`bfill```.
.. note:: the current implementation of 'bfill' uses Spark's Window
without specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
axis : {0 or `index`}
1 and `columns` are not supported.
inplace : boolean, default False
Fill in place (do not create a new object)
limit : int, default None
If method is specified, this is the maximum number of consecutive NaN values to
forward/backward fill. In other words, if there is a gap with more than this number of
consecutive NaNs, it will only be partially filled. If method is not specified,
this is the maximum number of entries along the entire axis where NaNs will be filled.
Must be greater than 0 if not None
Returns
-------
DataFrame or Series
DataFrame or Series with NA entries filled.
Examples
--------
>>> psdf = ps.DataFrame({
... 'A': [None, 3, None, None],
... 'B': [2, 4, None, 3],
... 'C': [None, None, None, 1],
... 'D': [0, 1, 5, 4]
... },
... columns=['A', 'B', 'C', 'D'])
>>> psdf
A B C D
0 NaN 2.0 NaN 0
1 3.0 4.0 NaN 1
2 NaN NaN NaN 5
3 NaN 3.0 1.0 4
Propagate non-null values backward.
>>> psdf.bfill()
A B C D
0 3.0 2.0 1.0 0
1 3.0 4.0 1.0 1
2 NaN 3.0 1.0 5
3 NaN 3.0 1.0 4
For Series
>>> psser = ps.Series([None, None, None, 1])
>>> psser
0 NaN
1 NaN
2 NaN
3 1.0
dtype: float64
>>> psser.bfill()
0 1.0
1 1.0
2 1.0
3 1.0
dtype: float64
"""
return self.fillna(method="bfill", axis=axis, inplace=inplace, limit=limit)
backfill = bfill
# TODO: add 'downcast' when value parameter exists
def ffill(
self: FrameLike,
axis: Optional[Axis] = None,
inplace: bool_type = False,
limit: Optional[int] = None,
) -> FrameLike:
"""
Synonym for `DataFrame.fillna()` or `Series.fillna()` with ``method=`ffill```.
.. note:: the current implementation of 'ffill' uses Spark's Window
without specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
axis : {0 or `index`}
1 and `columns` are not supported.
inplace : boolean, default False
Fill in place (do not create a new object)
limit : int, default None
If method is specified, this is the maximum number of consecutive NaN values to
forward/backward fill. In other words, if there is a gap with more than this number of
consecutive NaNs, it will only be partially filled. If method is not specified,
this is the maximum number of entries along the entire axis where NaNs will be filled.
Must be greater than 0 if not None
Returns
-------
DataFrame or Series
DataFrame or Series with NA entries filled.
Examples
--------
>>> psdf = ps.DataFrame({
... 'A': [None, 3, None, None],
... 'B': [2, 4, None, 3],
... 'C': [None, None, None, 1],
... 'D': [0, 1, 5, 4]
... },
... columns=['A', 'B', 'C', 'D'])
>>> psdf
A B C D
0 NaN 2.0 NaN 0
1 3.0 4.0 NaN 1
2 NaN NaN NaN 5
3 NaN 3.0 1.0 4
Propagate non-null values forward.
>>> psdf.ffill()
A B C D
0 NaN 2.0 NaN 0
1 3.0 4.0 NaN 1
2 3.0 4.0 NaN 5
3 3.0 3.0 1.0 4
For Series
>>> psser = ps.Series([2, 4, None, 3])
>>> psser
0 2.0
1 4.0
2 NaN
3 3.0
dtype: float64
>>> psser.ffill()
0 2.0
1 4.0
2 4.0
3 3.0
dtype: float64
"""
return self.fillna(method="ffill", axis=axis, inplace=inplace, limit=limit)
pad = ffill
@property
def at(self) -> AtIndexer:
return AtIndexer(self) # type: ignore
at.__doc__ = AtIndexer.__doc__
@property
def iat(self) -> iAtIndexer:
return iAtIndexer(self) # type: ignore
iat.__doc__ = iAtIndexer.__doc__
@property
def iloc(self) -> iLocIndexer:
return iLocIndexer(self) # type: ignore
iloc.__doc__ = iLocIndexer.__doc__
@property
def loc(self) -> LocIndexer:
return LocIndexer(self) # type: ignore
loc.__doc__ = LocIndexer.__doc__
def __bool__(self) -> NoReturn:
raise ValueError(
"The truth value of a {0} is ambiguous. "
"Use a.empty, a.bool(), a.item(), a.any() or a.all().".format(self.__class__.__name__)
)
@staticmethod
def _count_expr(spark_column: Column, spark_type: DataType) -> Column:
# Special handle floating point types because Spark's count treats nan as a valid value,
# whereas pandas count doesn't include nan.
if isinstance(spark_type, (FloatType, DoubleType)):
return F.count(F.nanvl(spark_column, SF.lit(None)))
else:
return F.count(spark_column)
def _test() -> None:
import os
import doctest
import shutil
import sys
import tempfile
from pyspark.sql import SparkSession
import pyspark.pandas.generic
os.chdir(os.environ["SPARK_HOME"])
globs = pyspark.pandas.generic.__dict__.copy()
globs["ps"] = pyspark.pandas
spark = (
SparkSession.builder.master("local[4]")
.appName("pyspark.pandas.generic tests")
.getOrCreate()
)
path = tempfile.mkdtemp()
globs["path"] = path
(failure_count, test_count) = doctest.testmod(
pyspark.pandas.generic,
globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE,
)
shutil.rmtree(path, ignore_errors=True)
spark.stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
| 32.721025
| 100
| 0.527
|
from abc import ABCMeta, abstractmethod
from collections import Counter
from distutils.version import LooseVersion
from functools import reduce
from typing import (
Any,
Callable,
Iterable,
IO,
List,
Optional,
NoReturn,
Tuple,
Union,
TYPE_CHECKING,
cast,
)
import warnings
import numpy as np
import pandas as pd
from pandas.api.types import is_list_like
from pyspark.sql import Column, functions as F
from pyspark.sql.types import (
BooleanType,
DataType,
DoubleType,
FloatType,
IntegralType,
LongType,
NumericType,
)
from pyspark import pandas as ps
from pyspark.pandas._typing import Axis, DataFrameOrSeries, Dtype, FrameLike, Scalar
from pyspark.pandas.indexing import AtIndexer, iAtIndexer, iLocIndexer, LocIndexer
from pyspark.pandas.internal import InternalFrame
from pyspark.pandas.spark import functions as SF
from pyspark.pandas.typedef import spark_type_to_pandas_dtype
from pyspark.pandas.utils import (
is_name_like_tuple,
is_name_like_value,
name_like_string,
scol_for,
sql_conf,
validate_arguments_and_invoke_function,
validate_axis,
SPARK_CONF_ARROW_ENABLED,
)
if TYPE_CHECKING:
from pyspark.pandas.frame import DataFrame
from pyspark.pandas.indexes.base import Index
from pyspark.pandas.groupby import GroupBy
from pyspark.pandas.series import Series
from pyspark.pandas.window import Rolling, Expanding
bool_type = bool
class Frame(object, metaclass=ABCMeta):
@abstractmethod
def __getitem__(self, key: Any) -> Any:
pass
@property
@abstractmethod
def _internal(self) -> InternalFrame:
pass
@abstractmethod
def _apply_series_op(
self: FrameLike,
op: Callable[["Series"], Union["Series", Column]],
should_resolve: bool = False,
) -> FrameLike:
pass
@abstractmethod
def _reduce_for_stat_function(
self,
sfun: Union[Callable[[Column], Column], Callable[[Column, DataType], Column]],
name: str,
axis: Optional[Axis] = None,
numeric_only: bool = True,
**kwargs: Any
) -> Union["Series", Scalar]:
pass
@property
@abstractmethod
def dtypes(self) -> Union[pd.Series, Dtype]:
pass
@abstractmethod
def to_pandas(self) -> Union[pd.DataFrame, pd.Series]:
pass
@property
@abstractmethod
def index(self) -> "Index":
pass
@abstractmethod
def copy(self: FrameLike) -> FrameLike:
pass
@abstractmethod
def _to_internal_pandas(self) -> Union[pd.DataFrame, pd.Series]:
pass
@abstractmethod
def head(self: FrameLike, n: int = 5) -> FrameLike:
pass
def cummin(self: FrameLike, skipna: bool = True) -> FrameLike:
return self._apply_series_op(lambda psser: psser._cum(F.min, skipna), should_resolve=True)
def cummax(self: FrameLike, skipna: bool = True) -> FrameLike:
return self._apply_series_op(lambda psser: psser._cum(F.max, skipna), should_resolve=True)
def cumsum(self: FrameLike, skipna: bool = True) -> FrameLike:
return self._apply_series_op(lambda psser: psser._cumsum(skipna), should_resolve=True)
def cumprod(self: FrameLike, skipna: bool = True) -> FrameLike:
return self._apply_series_op(lambda psser: psser._cumprod(skipna), should_resolve=True)
# since we're using this for `DataFrame.info` internally.
def get_dtype_counts(self) -> pd.Series:
warnings.warn(
"`get_dtype_counts` has been deprecated and will be "
"removed in a future version. For DataFrames use "
"`.dtypes.value_counts()",
FutureWarning,
)
if not isinstance(self.dtypes, Iterable):
dtypes = [self.dtypes]
else:
dtypes = list(self.dtypes)
return pd.Series(dict(Counter([d.name for d in dtypes])))
def pipe(self, func: Callable[..., Any], *args: Any, **kwargs: Any) -> Any:
if isinstance(func, tuple):
func, target = func
if target in kwargs:
raise ValueError("%s is both the pipe target and a keyword " "argument" % target)
kwargs[target] = self
return func(*args, **kwargs)
else:
return func(self, *args, **kwargs)
def to_numpy(self) -> np.ndarray:
return self.to_pandas().values
@property
def values(self) -> np.ndarray:
warnings.warn("We recommend using `{}.to_numpy()` instead.".format(type(self).__name__))
return self.to_numpy()
def to_csv(
self,
path: Optional[str] = None,
sep: str = ",",
na_rep: str = "",
columns: Optional[List[Union[Any, Tuple]]] = None,
header: bool = True,
quotechar: str = '"',
date_format: Optional[str] = None,
escapechar: Optional[str] = None,
num_files: Optional[int] = None,
mode: str = "overwrite",
partition_cols: Optional[Union[str, List[str]]] = None,
index_col: Optional[Union[str, List[str]]] = None,
**options: Any
) -> Optional[str]:
if "options" in options and isinstance(options.get("options"), dict) and len(options) == 1:
options = options.get("options") # type: ignore
if path is None:
# If path is none, just collect and use pandas's to_csv.
psdf_or_ser = self
if (LooseVersion("0.24") > LooseVersion(pd.__version__)) and isinstance(
self, ps.Series
):
# 0.23 seems not having 'columns' parameter in Series' to_csv.
return psdf_or_ser.to_pandas().to_csv( # type: ignore
None,
sep=sep,
na_rep=na_rep,
header=header,
date_format=date_format,
index=False,
)
else:
return psdf_or_ser.to_pandas().to_csv( # type: ignore
None,
sep=sep,
na_rep=na_rep,
columns=columns,
header=header,
quotechar=quotechar,
date_format=date_format,
escapechar=escapechar,
index=False,
)
psdf = self
if isinstance(self, ps.Series):
psdf = self.to_frame()
if columns is None:
column_labels = psdf._internal.column_labels
else:
column_labels = []
for label in columns:
if not is_name_like_tuple(label):
label = (label,)
if label not in psdf._internal.column_labels:
raise KeyError(name_like_string(label))
column_labels.append(label)
if isinstance(index_col, str):
index_cols = [index_col]
elif index_col is None:
index_cols = []
else:
index_cols = index_col
if header is True and psdf._internal.column_labels_level > 1:
raise ValueError("to_csv only support one-level index column now")
elif isinstance(header, list):
sdf = psdf.to_spark(index_col) # type: ignore
sdf = sdf.select(
[scol_for(sdf, name_like_string(label)) for label in index_cols]
+ [
scol_for(sdf, str(i) if label is None else name_like_string(label)).alias(
new_name
)
for i, (label, new_name) in enumerate(zip(column_labels, header))
]
)
header = True
else:
sdf = psdf.to_spark(index_col) # type: ignore
sdf = sdf.select(
[scol_for(sdf, name_like_string(label)) for label in index_cols]
+ [
scol_for(sdf, str(i) if label is None else name_like_string(label))
for i, label in enumerate(column_labels)
]
)
if num_files is not None:
sdf = sdf.repartition(num_files)
builder = sdf.write.mode(mode)
if partition_cols is not None:
builder.partitionBy(partition_cols)
builder._set_opts(
sep=sep,
nullValue=na_rep,
header=header,
quote=quotechar,
dateFormat=date_format,
charToEscapeQuoteEscaping=escapechar,
)
builder.options(**options).format("csv").save(path)
return None
def to_json(
self,
path: Optional[str] = None,
compression: str = "uncompressed",
num_files: Optional[int] = None,
mode: str = "overwrite",
orient: str = "records",
lines: bool = True,
partition_cols: Optional[Union[str, List[str]]] = None,
index_col: Optional[Union[str, List[str]]] = None,
**options: Any
) -> Optional[str]:
if "options" in options and isinstance(options.get("options"), dict) and len(options) == 1:
options = options.get("options") # type: ignore
if not lines:
raise NotImplementedError("lines=False is not implemented yet.")
if orient != "records":
raise NotImplementedError("orient='records' is supported only for now.")
if path is None:
# If path is none, just collect and use pandas's to_json.
psdf_or_ser = self
pdf = psdf_or_ser.to_pandas() # type: ignore
if isinstance(self, ps.Series):
pdf = pdf.to_frame()
# To make the format consistent and readable by `read_json`, convert it to pandas' and
# use 'records' orient for now.
return pdf.to_json(orient="records")
psdf = self
if isinstance(self, ps.Series):
psdf = self.to_frame()
sdf = psdf.to_spark(index_col=index_col) # type: ignore
if num_files is not None:
sdf = sdf.repartition(num_files)
builder = sdf.write.mode(mode)
if partition_cols is not None:
builder.partitionBy(partition_cols)
builder._set_opts(compression=compression)
builder.options(**options).format("json").save(path)
return None
def to_excel(
self,
excel_writer: Union[str, pd.ExcelWriter],
sheet_name: str = "Sheet1",
na_rep: str = "",
float_format: Optional[str] = None,
columns: Optional[Union[str, List[str]]] = None,
header: bool = True,
index: bool = True,
index_label: Optional[Union[str, List[str]]] = None,
startrow: int = 0,
startcol: int = 0,
engine: Optional[str] = None,
merge_cells: bool = True,
encoding: Optional[str] = None,
inf_rep: str = "inf",
verbose: bool = True,
freeze_panes: Optional[Tuple[int, int]] = None,
) -> None:
# Make sure locals() call is at the top of the function so we don't capture local variables.
args = locals()
psdf = self
if isinstance(self, ps.DataFrame):
f = pd.DataFrame.to_excel
elif isinstance(self, ps.Series):
f = pd.Series.to_excel
else:
raise TypeError(
"Constructor expects DataFrame or Series; however, " "got [%s]" % (self,)
)
return validate_arguments_and_invoke_function(
psdf._to_internal_pandas(), self.to_excel, f, args
)
def mean(
self, axis: Optional[Axis] = None, numeric_only: bool = None
) -> Union[Scalar, "Series"]:
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
def mean(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, BooleanType):
spark_column = spark_column.cast(LongType())
elif not isinstance(spark_type, NumericType):
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
return F.mean(spark_column)
return self._reduce_for_stat_function(
mean, name="mean", axis=axis, numeric_only=numeric_only
)
def sum(
self, axis: Optional[Axis] = None, numeric_only: bool = None, min_count: int = 0
) -> Union[Scalar, "Series"]:
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
elif numeric_only is True and axis == 1:
numeric_only = None
def sum(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, BooleanType):
spark_column = spark_column.cast(LongType())
elif not isinstance(spark_type, NumericType):
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
return F.coalesce(F.sum(spark_column), SF.lit(0))
return self._reduce_for_stat_function(
sum, name="sum", axis=axis, numeric_only=numeric_only, min_count=min_count
)
def product(
self, axis: Optional[Axis] = None, numeric_only: bool = None, min_count: int = 0
) -> Union[Scalar, "Series"]:
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
elif numeric_only is True and axis == 1:
numeric_only = None
def prod(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, BooleanType):
scol = F.min(F.coalesce(spark_column, SF.lit(True))).cast(LongType())
elif isinstance(spark_type, NumericType):
num_zeros = F.sum(F.when(spark_column == 0, 1).otherwise(0))
sign = F.when(
F.sum(F.when(spark_column < 0, 1).otherwise(0)) % 2 == 0, 1
).otherwise(-1)
scol = F.when(num_zeros > 0, 0).otherwise(
sign * F.exp(F.sum(F.log(F.abs(spark_column))))
)
if isinstance(spark_type, IntegralType):
scol = F.round(scol).cast(LongType())
else:
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
return F.coalesce(scol, SF.lit(1))
return self._reduce_for_stat_function(
prod, name="prod", axis=axis, numeric_only=numeric_only, min_count=min_count
)
prod = product
def skew(
self, axis: Optional[Axis] = None, numeric_only: bool = None
) -> Union[Scalar, "Series"]:
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
def skew(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, BooleanType):
spark_column = spark_column.cast(LongType())
elif not isinstance(spark_type, NumericType):
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
return F.skewness(spark_column)
return self._reduce_for_stat_function(
skew, name="skew", axis=axis, numeric_only=numeric_only
)
def kurtosis(
self, axis: Optional[Axis] = None, numeric_only: bool = None
) -> Union[Scalar, "Series"]:
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
def kurtosis(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, BooleanType):
spark_column = spark_column.cast(LongType())
elif not isinstance(spark_type, NumericType):
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
return F.kurtosis(spark_column)
return self._reduce_for_stat_function(
kurtosis, name="kurtosis", axis=axis, numeric_only=numeric_only
)
kurt = kurtosis
def min(
self, axis: Optional[Axis] = None, numeric_only: bool = None
) -> Union[Scalar, "Series"]:
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
elif numeric_only is True and axis == 1:
numeric_only = None
return self._reduce_for_stat_function(
F.min, name="min", axis=axis, numeric_only=numeric_only
)
def max(
self, axis: Optional[Axis] = None, numeric_only: bool = None
) -> Union[Scalar, "Series"]:
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
elif numeric_only is True and axis == 1:
numeric_only = None
return self._reduce_for_stat_function(
F.max, name="max", axis=axis, numeric_only=numeric_only
)
def count(
self, axis: Optional[Axis] = None, numeric_only: bool = False
) -> Union[Scalar, "Series"]:
return self._reduce_for_stat_function(
Frame._count_expr, name="count", axis=axis, numeric_only=numeric_only
)
def std(
self, axis: Optional[Axis] = None, ddof: int = 1, numeric_only: bool = None
) -> Union[Scalar, "Series"]:
assert ddof in (0, 1)
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
def std(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, BooleanType):
spark_column = spark_column.cast(LongType())
elif not isinstance(spark_type, NumericType):
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
if ddof == 0:
return F.stddev_pop(spark_column)
else:
return F.stddev_samp(spark_column)
return self._reduce_for_stat_function(
std, name="std", axis=axis, numeric_only=numeric_only, ddof=ddof
)
def var(
self, axis: Optional[Axis] = None, ddof: int = 1, numeric_only: bool = None
) -> Union[Scalar, "Series"]:
assert ddof in (0, 1)
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
def var(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, BooleanType):
spark_column = spark_column.cast(LongType())
elif not isinstance(spark_type, NumericType):
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
if ddof == 0:
return F.var_pop(spark_column)
else:
return F.var_samp(spark_column)
return self._reduce_for_stat_function(
var, name="var", axis=axis, numeric_only=numeric_only, ddof=ddof
)
def median(
self, axis: Optional[Axis] = None, numeric_only: bool = None, accuracy: int = 10000
) -> Union[Scalar, "Series"]:
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
if not isinstance(accuracy, int):
raise TypeError(
"accuracy must be an integer; however, got [%s]" % type(accuracy).__name__
)
def median(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, (BooleanType, NumericType)):
return F.percentile_approx(spark_column.cast(DoubleType()), 0.5, accuracy)
else:
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
return self._reduce_for_stat_function(
median, name="median", numeric_only=numeric_only, axis=axis
)
def sem(
self, axis: Optional[Axis] = None, ddof: int = 1, numeric_only: bool = None
) -> Union[Scalar, "Series"]:
assert ddof in (0, 1)
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
def std(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, BooleanType):
spark_column = spark_column.cast(LongType())
elif not isinstance(spark_type, NumericType):
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
if ddof == 0:
return F.stddev_pop(spark_column)
else:
return F.stddev_samp(spark_column)
def sem(spark_column: Column, spark_type: DataType) -> Column:
return std(spark_column, spark_type) / pow(
Frame._count_expr(spark_column, spark_type), 0.5
)
return self._reduce_for_stat_function(
sem, name="sem", numeric_only=numeric_only, axis=axis, ddof=ddof
)
@property
def size(self) -> int:
num_columns = len(self._internal.data_spark_columns)
if num_columns == 0:
return 0
else:
return len(self) * num_columns # type: ignore
def abs(self: FrameLike) -> FrameLike:
def abs(psser: "Series") -> Union["Series", Column]:
if isinstance(psser.spark.data_type, BooleanType):
return psser
elif isinstance(psser.spark.data_type, NumericType):
return psser._with_new_scol(
F.abs(psser.spark.column), field=psser._internal.data_fields[0]
)
else:
raise TypeError(
"bad operand type for abs(): {} ({})".format(
spark_type_to_pandas_dtype(psser.spark.data_type),
psser.spark.data_type.simpleString(),
)
)
return self._apply_series_op(abs)
# TODO: by argument only support the grouping name and as_index only for now. Documentation
# should be updated when it's supported.
def groupby(
self: FrameLike,
by: Union[Any, Tuple, "Series", List[Union[Any, Tuple, "Series"]]],
axis: Axis = 0,
as_index: bool = True,
dropna: bool = True,
) -> "GroupBy[FrameLike]":
if isinstance(by, ps.DataFrame):
raise ValueError("Grouper for '{}' not 1-dimensional".format(type(by).__name__))
elif isinstance(by, ps.Series):
new_by = [by] # type: List[Union[Tuple, ps.Series]]
elif is_name_like_tuple(by):
if isinstance(self, ps.Series):
raise KeyError(by)
new_by = [cast(Tuple, by)]
elif is_name_like_value(by):
if isinstance(self, ps.Series):
raise KeyError(by)
new_by = [(by,)]
elif is_list_like(by):
new_by = []
for key in by:
if isinstance(key, ps.DataFrame):
raise ValueError(
"Grouper for '{}' not 1-dimensional".format(type(key).__name__)
)
elif isinstance(key, ps.Series):
new_by.append(key)
elif is_name_like_tuple(key):
if isinstance(self, ps.Series):
raise KeyError(key)
new_by.append(key)
elif is_name_like_value(key):
if isinstance(self, ps.Series):
raise KeyError(key)
new_by.append((key,))
else:
raise ValueError(
"Grouper for '{}' not 1-dimensional".format(type(key).__name__)
)
else:
raise ValueError("Grouper for '{}' not 1-dimensional".format(type(by).__name__))
if not len(new_by):
raise ValueError("No group keys passed!")
axis = validate_axis(axis)
if axis != 0:
raise NotImplementedError('axis should be either 0 or "index" currently.')
return self._build_groupby(by=new_by, as_index=as_index, dropna=dropna)
@abstractmethod
def _build_groupby(
self: FrameLike, by: List[Union["Series", Tuple]], as_index: bool, dropna: bool
) -> "GroupBy[FrameLike]":
pass
def bool(self) -> bool:
if isinstance(self, ps.DataFrame):
df = self
elif isinstance(self, ps.Series):
df = self.to_dataframe()
else:
raise TypeError("bool() expects DataFrame or Series; however, " "got [%s]" % (self,))
return df.head(2)._to_internal_pandas().bool()
def first_valid_index(self) -> Optional[Union[Scalar, Tuple[Scalar, ...]]]:
data_spark_columns = self._internal.data_spark_columns
if len(data_spark_columns) == 0:
return None
cond = reduce(lambda x, y: x & y, map(lambda x: x.isNotNull(), data_spark_columns))
with sql_conf({SPARK_CONF_ARROW_ENABLED: False}):
# Disable Arrow to keep row ordering.
first_valid_row = cast(
pd.DataFrame,
self._internal.spark_frame.filter(cond)
.select(self._internal.index_spark_columns)
.limit(1)
.toPandas(),
)
# For Empty Series or DataFrame, returns None.
if len(first_valid_row) == 0:
return None
first_valid_row = first_valid_row.iloc[0]
if len(first_valid_row) == 1:
return first_valid_row.iloc[0]
else:
return tuple(first_valid_row)
def last_valid_index(self) -> Optional[Union[Scalar, Tuple[Scalar, ...]]]:
data_spark_columns = self._internal.data_spark_columns
if len(data_spark_columns) == 0:
return None
cond = reduce(lambda x, y: x & y, map(lambda x: x.isNotNull(), data_spark_columns))
last_valid_rows = (
self._internal.spark_frame.filter(cond)
.select(self._internal.index_spark_columns)
.tail(1)
)
# For Empty Series or DataFrame, returns None.
if len(last_valid_rows) == 0:
return None
last_valid_row = last_valid_rows[0]
if len(last_valid_row) == 1:
return last_valid_row[0]
else:
return tuple(last_valid_row)
# TODO: 'center', 'win_type', 'on', 'axis' parameter should be implemented.
def rolling(
self: FrameLike, window: int, min_periods: Optional[int] = None
) -> "Rolling[FrameLike]":
from pyspark.pandas.window import Rolling
return Rolling(self, window=window, min_periods=min_periods)
# TODO: 'center' and 'axis' parameter should be implemented.
# 'axis' implementation, refer https://github.com/pyspark.pandas/pull/607
def expanding(self: FrameLike, min_periods: int = 1) -> "Expanding[FrameLike]":
from pyspark.pandas.window import Expanding
return Expanding(self, min_periods=min_periods)
def get(self, key: Any, default: Optional[Any] = None) -> Any:
try:
return self[key]
except (KeyError, ValueError, IndexError):
return default
def squeeze(self, axis: Optional[Axis] = None) -> Union[Scalar, "DataFrame", "Series"]:
if axis is not None:
axis = "index" if axis == "rows" else axis
axis = validate_axis(axis)
if isinstance(self, ps.DataFrame):
from pyspark.pandas.series import first_series
is_squeezable = len(self.columns[:2]) == 1
# If DataFrame has multiple columns, there is no change.
if not is_squeezable:
return self
series_from_column = first_series(self)
has_single_value = len(series_from_column.head(2)) == 1
# If DataFrame has only a single value, use pandas API directly.
if has_single_value:
result = self._to_internal_pandas().squeeze(axis)
return ps.Series(result) if isinstance(result, pd.Series) else result
elif axis == 0:
return self
else:
return series_from_column
else:
# The case of Series is simple.
# If Series has only a single value, just return it as a scalar.
# Otherwise, there is no change.
self_top_two = cast("Series", self).head(2)
has_single_value = len(self_top_two) == 1
return cast(Union[Scalar, ps.Series], self_top_two[0] if has_single_value else self)
def truncate(
self,
before: Optional[Any] = None,
after: Optional[Any] = None,
axis: Optional[Axis] = None,
copy: bool_type = True,
) -> DataFrameOrSeries:
from pyspark.pandas.series import first_series
axis = validate_axis(axis)
indexes = self.index
indexes_increasing = indexes.is_monotonic_increasing
if not indexes_increasing and not indexes.is_monotonic_decreasing:
raise ValueError("truncate requires a sorted index")
if (before is None) and (after is None):
return cast(Union[ps.DataFrame, ps.Series], self.copy() if copy else self)
if (before is not None and after is not None) and before > after:
raise ValueError("Truncate: %s must be after %s" % (after, before))
if isinstance(self, ps.Series):
if indexes_increasing:
result = first_series(self.to_frame().loc[before:after]).rename(self.name)
else:
result = first_series(self.to_frame().loc[after:before]).rename(self.name)
elif isinstance(self, ps.DataFrame):
if axis == 0:
if indexes_increasing:
result = self.loc[before:after]
else:
result = self.loc[after:before]
elif axis == 1:
result = self.loc[:, before:after]
return cast(DataFrameOrSeries, result.copy() if copy else result)
def to_markdown(
self, buf: Optional[Union[IO[str], str]] = None, mode: Optional[str] = None
) -> str:
# `to_markdown` is supported in pandas >= 1.0.0 since it's newly added in pandas 1.0.0.
if LooseVersion(pd.__version__) < LooseVersion("1.0.0"):
raise NotImplementedError(
"`to_markdown()` only supported in pandas-on-Spark with pandas >= 1.0.0"
)
# Make sure locals() call is at the top of the function so we don't capture local variables.
args = locals()
psser_or_psdf = self
internal_pandas = psser_or_psdf._to_internal_pandas()
return validate_arguments_and_invoke_function(
internal_pandas, self.to_markdown, type(internal_pandas).to_markdown, args
)
@abstractmethod
def fillna(
self: FrameLike,
value: Optional[Any] = None,
method: Optional[str] = None,
axis: Optional[Axis] = None,
inplace: bool_type = False,
limit: Optional[int] = None,
) -> FrameLike:
pass
# TODO: add 'downcast' when value parameter exists
def bfill(
self: FrameLike,
axis: Optional[Axis] = None,
inplace: bool_type = False,
limit: Optional[int] = None,
) -> FrameLike:
return self.fillna(method="bfill", axis=axis, inplace=inplace, limit=limit)
backfill = bfill
# TODO: add 'downcast' when value parameter exists
def ffill(
self: FrameLike,
axis: Optional[Axis] = None,
inplace: bool_type = False,
limit: Optional[int] = None,
) -> FrameLike:
return self.fillna(method="ffill", axis=axis, inplace=inplace, limit=limit)
pad = ffill
@property
def at(self) -> AtIndexer:
return AtIndexer(self) # type: ignore
at.__doc__ = AtIndexer.__doc__
@property
def iat(self) -> iAtIndexer:
return iAtIndexer(self) # type: ignore
iat.__doc__ = iAtIndexer.__doc__
@property
def iloc(self) -> iLocIndexer:
return iLocIndexer(self) # type: ignore
iloc.__doc__ = iLocIndexer.__doc__
@property
def loc(self) -> LocIndexer:
return LocIndexer(self) # type: ignore
loc.__doc__ = LocIndexer.__doc__
def __bool__(self) -> NoReturn:
raise ValueError(
"The truth value of a {0} is ambiguous. "
"Use a.empty, a.bool(), a.item(), a.any() or a.all().".format(self.__class__.__name__)
)
@staticmethod
def _count_expr(spark_column: Column, spark_type: DataType) -> Column:
# Special handle floating point types because Spark's count treats nan as a valid value,
# whereas pandas count doesn't include nan.
if isinstance(spark_type, (FloatType, DoubleType)):
return F.count(F.nanvl(spark_column, SF.lit(None)))
else:
return F.count(spark_column)
def _test() -> None:
import os
import doctest
import shutil
import sys
import tempfile
from pyspark.sql import SparkSession
import pyspark.pandas.generic
os.chdir(os.environ["SPARK_HOME"])
globs = pyspark.pandas.generic.__dict__.copy()
globs["ps"] = pyspark.pandas
spark = (
SparkSession.builder.master("local[4]")
.appName("pyspark.pandas.generic tests")
.getOrCreate()
)
path = tempfile.mkdtemp()
globs["path"] = path
(failure_count, test_count) = doctest.testmod(
pyspark.pandas.generic,
globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE,
)
shutil.rmtree(path, ignore_errors=True)
spark.stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
| true
| true
|
790abf3e3611e1305d1c69b0dae9c34af032cefe
| 44,179
|
py
|
Python
|
pysnmp-with-texts/ADIC-INTELLIGENT-STORAGE-MIB.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 8
|
2019-05-09T17:04:00.000Z
|
2021-06-09T06:50:51.000Z
|
pysnmp-with-texts/ADIC-INTELLIGENT-STORAGE-MIB.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 4
|
2019-05-31T16:42:59.000Z
|
2020-01-31T21:57:17.000Z
|
pysnmp-with-texts/ADIC-INTELLIGENT-STORAGE-MIB.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 10
|
2019-04-30T05:51:36.000Z
|
2022-02-16T03:33:41.000Z
|
#
# PySNMP MIB module ADIC-INTELLIGENT-STORAGE-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/ADIC-INTELLIGENT-STORAGE-MIB
# Produced by pysmi-0.3.4 at Wed May 1 11:13:36 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
OctetString, ObjectIdentifier, Integer = mibBuilder.importSymbols("ASN1", "OctetString", "ObjectIdentifier", "Integer")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueSizeConstraint, ValueRangeConstraint, SingleValueConstraint, ConstraintsIntersection, ConstraintsUnion = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueSizeConstraint", "ValueRangeConstraint", "SingleValueConstraint", "ConstraintsIntersection", "ConstraintsUnion")
ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup")
NotificationType, NotificationType, iso, Counter64, ObjectIdentity, Counter32, Integer32, Unsigned32, enterprises, Bits, MibScalar, MibTable, MibTableRow, MibTableColumn, TimeTicks, ModuleIdentity, MibIdentifier, Gauge32, IpAddress = mibBuilder.importSymbols("SNMPv2-SMI", "NotificationType", "NotificationType", "iso", "Counter64", "ObjectIdentity", "Counter32", "Integer32", "Unsigned32", "enterprises", "Bits", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "TimeTicks", "ModuleIdentity", "MibIdentifier", "Gauge32", "IpAddress")
DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention")
adic = MibIdentifier((1, 3, 6, 1, 4, 1, 3764))
storage = MibIdentifier((1, 3, 6, 1, 4, 1, 3764, 1))
intelligent = MibIdentifier((1, 3, 6, 1, 4, 1, 3764, 1, 1))
productAgentInfo = MibIdentifier((1, 3, 6, 1, 4, 1, 3764, 1, 1, 10))
globalData = MibIdentifier((1, 3, 6, 1, 4, 1, 3764, 1, 1, 20))
components = MibIdentifier((1, 3, 6, 1, 4, 1, 3764, 1, 1, 30))
software = MibIdentifier((1, 3, 6, 1, 4, 1, 3764, 1, 1, 100))
hardware = MibIdentifier((1, 3, 6, 1, 4, 1, 3764, 1, 1, 200))
powerAndCooling = MibIdentifier((1, 3, 6, 1, 4, 1, 3764, 1, 1, 200, 200))
sml = MibIdentifier((1, 3, 6, 1, 4, 1, 3764, 1, 1, 300))
network = MibIdentifier((1, 3, 6, 1, 4, 1, 3764, 1, 1, 400))
notification = MibIdentifier((1, 3, 6, 1, 4, 1, 3764, 1, 1, 500))
class Boolean(Integer32):
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2))
namedValues = NamedValues(("true", 1), ("false", 2))
class AdicMibVersion(DisplayString):
pass
class AdicREDIdentifier(Counter32):
pass
class AdicEnable(Integer32):
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2))
namedValues = NamedValues(("enabled", 1), ("disabled", 2))
class AdicAgentStatus(Integer32):
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6))
namedValues = NamedValues(("other", 1), ("unknown", 2), ("ok", 3), ("non-critical", 4), ("critical", 5), ("non-recoverable", 6))
class AdicOnlineStatus(Integer32):
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3))
namedValues = NamedValues(("online", 1), ("offline", 2), ("shutdown", 3))
class AdicGlobalId(OctetString):
subtypeSpec = OctetString.subtypeSpec + ValueSizeConstraint(8, 8)
fixedLength = 8
class AdicComponentType(Integer32):
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8))
namedValues = NamedValues(("mcb", 1), ("cmb", 2), ("ioBlade", 3), ("rcu", 4), ("networkChasis", 5), ("controlModule", 6), ("expansionModule", 7), ("powerSupply", 8))
class AdicInterfaceType(Integer32):
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2))
namedValues = NamedValues(("scsi", 1), ("fibreChannel", 2))
class AdicSensorStatus(Integer32):
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7))
namedValues = NamedValues(("nominal", 1), ("warningLow", 2), ("warningHigh", 3), ("alarmLow", 4), ("alarmHigh", 5), ("notInstalled", 6), ("noData", 7))
class AdicVoltageType(Integer32):
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2))
namedValues = NamedValues(("dc", 1), ("ac", 2))
class AdicDateAndTime(OctetString):
subtypeSpec = OctetString.subtypeSpec + ConstraintsUnion(ValueSizeConstraint(8, 8), ValueSizeConstraint(11, 11), )
class AdicTrapSeverity(Integer32):
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))
namedValues = NamedValues(("emergency", 1), ("alarm", 2), ("warning", 3), ("notice", 4), ("informational", 5))
class AdicDoorStatus(Integer32):
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7))
namedValues = NamedValues(("open", 1), ("closed", 2), ("closedAndLocked", 3), ("closedAndUnlocked", 4), ("contollerFailed", 5), ("notInstalled", 6), ("noData", 7))
class AdicDriveStatus(Integer32):
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7))
namedValues = NamedValues(("idle", 1), ("loading", 2), ("ejecting", 3), ("inserted", 4), ("removed", 5), ("notInstalled", 6), ("noData", 7))
class RowStatus(Integer32):
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6))
namedValues = NamedValues(("active", 1), ("notInService", 2), ("notReady", 3), ("createAndGo", 4), ("createAndWait", 5), ("destroy", 6))
productMibVersion = MibScalar((1, 3, 6, 1, 4, 1, 3764, 1, 1, 10, 1), AdicMibVersion()).setMaxAccess("readonly")
if mibBuilder.loadTexts: productMibVersion.setStatus('mandatory')
if mibBuilder.loadTexts: productMibVersion.setDescription('MIB version identifier.')
productSnmpAgentVersion = MibScalar((1, 3, 6, 1, 4, 1, 3764, 1, 1, 10, 2), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: productSnmpAgentVersion.setStatus('mandatory')
if mibBuilder.loadTexts: productSnmpAgentVersion.setDescription('SNMP agent version identifier.')
productName = MibScalar((1, 3, 6, 1, 4, 1, 3764, 1, 1, 10, 3), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: productName.setStatus('mandatory')
if mibBuilder.loadTexts: productName.setDescription('Name of ADIC branded product. Uniquely identifies the product, independent of OEM.')
productDisplayName = MibScalar((1, 3, 6, 1, 4, 1, 3764, 1, 1, 10, 4), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: productDisplayName.setStatus('mandatory')
if mibBuilder.loadTexts: productDisplayName.setDescription('Name of this agent for display purposes. May be customized for OEM.')
productDescription = MibScalar((1, 3, 6, 1, 4, 1, 3764, 1, 1, 10, 5), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: productDescription.setStatus('mandatory')
if mibBuilder.loadTexts: productDescription.setDescription('A short description of this SNMP agent.')
productVendor = MibScalar((1, 3, 6, 1, 4, 1, 3764, 1, 1, 10, 6), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: productVendor.setStatus('mandatory')
if mibBuilder.loadTexts: productVendor.setDescription('Name of the product vendor or OEM.')
productVersion = MibScalar((1, 3, 6, 1, 4, 1, 3764, 1, 1, 10, 7), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: productVersion.setStatus('mandatory')
if mibBuilder.loadTexts: productVersion.setDescription('String Format: MNNO.TVBBBPP Examples 1. 091a.TR054 Version 0.91, build 54 of the RCS test code for ADIC 2. 100A.GM052 Version 1.00, build 52 of the MCB GA candidate code for ADIC M Major version number NN Minor version number O OEM (Uppercase when release candidate, otherwise lowercase) A/a - ADIC Others - Reserved) T Target G - GA Candidate Release (labeled build that is a release candidate) T - Test build (labeled build used for formal testing) D - Dev build (labeled build used for unit testing) (lower case) - specifies developer of a local build V Variant S - System R - RCS M - MCB BBB Build number (3 digit sequential number specifying exact build) PP Patch Number (Optional alphanumeric characters denoting patch level of this build if necessary)')
productDisplayVersion = MibScalar((1, 3, 6, 1, 4, 1, 3764, 1, 1, 10, 8), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: productDisplayVersion.setStatus('mandatory')
if mibBuilder.loadTexts: productDisplayVersion.setDescription('The version identifier according to the vendor or OEM.')
productLibraryClass = MibScalar((1, 3, 6, 1, 4, 1, 3764, 1, 1, 10, 9), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 10))).clone(namedValues=NamedValues(("basic", 1), ("intelligent", 2), ("virtual", 10)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: productLibraryClass.setStatus('mandatory')
if mibBuilder.loadTexts: productLibraryClass.setDescription('Basic library includes minimal connectivity hardware. Intelligent library includes SAN appliances and value-added features.')
productSerialNumber = MibScalar((1, 3, 6, 1, 4, 1, 3764, 1, 1, 10, 10), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: productSerialNumber.setStatus('mandatory')
if mibBuilder.loadTexts: productSerialNumber.setDescription('The serial number of the entire library.')
agentGlobalStatus = MibScalar((1, 3, 6, 1, 4, 1, 3764, 1, 1, 20, 1), AdicAgentStatus()).setMaxAccess("readonly")
if mibBuilder.loadTexts: agentGlobalStatus.setStatus('mandatory')
if mibBuilder.loadTexts: agentGlobalStatus.setDescription('Current overall status of the agent.')
agentLastGlobalStatus = MibScalar((1, 3, 6, 1, 4, 1, 3764, 1, 1, 20, 2), AdicAgentStatus()).setMaxAccess("readonly")
if mibBuilder.loadTexts: agentLastGlobalStatus.setStatus('mandatory')
if mibBuilder.loadTexts: agentLastGlobalStatus.setDescription('The status before the current status which induced an initiative to issue a global status change trap.')
agentTimeStamp = MibScalar((1, 3, 6, 1, 4, 1, 3764, 1, 1, 20, 3), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: agentTimeStamp.setStatus('mandatory')
if mibBuilder.loadTexts: agentTimeStamp.setDescription('The last time that the agent values have been updated. Universal time in seconds since UTC 1/1/70.')
agentGetTimeOut = MibScalar((1, 3, 6, 1, 4, 1, 3764, 1, 1, 20, 4), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: agentGetTimeOut.setStatus('mandatory')
if mibBuilder.loadTexts: agentGetTimeOut.setDescription('Suggested time out in milliseconds for how long an SNMP management application should wait while attempting to poll the SNMP agent.')
agentModifiers = MibScalar((1, 3, 6, 1, 4, 1, 3764, 1, 1, 20, 5), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: agentModifiers.setStatus('mandatory')
if mibBuilder.loadTexts: agentModifiers.setDescription('Agent functional modifiers, when set the modifier is active. ----------------------------------------------------- Bit 3 => Agent in debug mode. ----------------------------------------------------- All other bits are product specific.')
agentRefreshRate = MibScalar((1, 3, 6, 1, 4, 1, 3764, 1, 1, 20, 6), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: agentRefreshRate.setStatus('mandatory')
if mibBuilder.loadTexts: agentRefreshRate.setDescription('Rate in seconds at which the agent cached data is being updated.')
componentTable = MibTable((1, 3, 6, 1, 4, 1, 3764, 1, 1, 30, 10), )
if mibBuilder.loadTexts: componentTable.setStatus('mandatory')
if mibBuilder.loadTexts: componentTable.setDescription("General information about the system's components, including the unique identifiers. The structure this table is based on the Fibre Alliance MIB connUnitEntry.")
componentEntry = MibTableRow((1, 3, 6, 1, 4, 1, 3764, 1, 1, 30, 10, 1), ).setIndexNames((0, "ADIC-INTELLIGENT-STORAGE-MIB", "componentId"))
if mibBuilder.loadTexts: componentEntry.setStatus('mandatory')
if mibBuilder.loadTexts: componentEntry.setDescription('A component entry containing objects for a particular component.')
componentId = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 30, 10, 1, 1), AdicGlobalId()).setMaxAccess("readonly")
if mibBuilder.loadTexts: componentId.setStatus('mandatory')
if mibBuilder.loadTexts: componentId.setDescription('The unique identification for this component among those within this proxy domain.')
componentType = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 30, 10, 1, 2), AdicComponentType()).setMaxAccess("readonly")
if mibBuilder.loadTexts: componentType.setStatus('mandatory')
if mibBuilder.loadTexts: componentType.setDescription('The type of this component.')
componentDisplayName = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 30, 10, 1, 3), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 79))).setMaxAccess("readonly")
if mibBuilder.loadTexts: componentDisplayName.setStatus('mandatory')
if mibBuilder.loadTexts: componentDisplayName.setDescription('Name of this component for display purposes. Different OEMs may have different display names for the same ADIC product.')
componentInfo = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 30, 10, 1, 4), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: componentInfo.setStatus('mandatory')
if mibBuilder.loadTexts: componentInfo.setDescription('A display string containing information about this component.')
componentLocation = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 30, 10, 1, 5), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 79))).setMaxAccess("readonly")
if mibBuilder.loadTexts: componentLocation.setStatus('mandatory')
if mibBuilder.loadTexts: componentLocation.setDescription('Location information for this component.')
componentVendor = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 30, 10, 1, 6), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 79))).setMaxAccess("readonly")
if mibBuilder.loadTexts: componentVendor.setStatus('mandatory')
if mibBuilder.loadTexts: componentVendor.setDescription('Name vendor of this component.')
componentSn = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 30, 10, 1, 7), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 79))).setMaxAccess("readonly")
if mibBuilder.loadTexts: componentSn.setStatus('mandatory')
if mibBuilder.loadTexts: componentSn.setDescription('The serial number for this component.')
componentStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 30, 10, 1, 8), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))).clone(namedValues=NamedValues(("unknown", 1), ("unused", 2), ("ok", 3), ("warning", 4), ("failed", 5)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: componentStatus.setStatus('mandatory')
if mibBuilder.loadTexts: componentStatus.setDescription('Overall status of the component.')
componentControl = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 30, 10, 1, 9), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("resetColdStart", 1), ("resetWarmStart", 2), ("offline", 3), ("online", 4)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: componentControl.setStatus('mandatory')
if mibBuilder.loadTexts: componentControl.setDescription("This object is used to control the addressed connUnit. NOTE: 'Cold Start' and 'Warm Start' are as defined in MIB II and are not meant to be a factory reset. resetColdStart: the addressed unit performs a 'Cold Start' reset. resetWarmStart: the addressed unit performs a 'Warm Start' reset. offline: the addressed unit puts itself into an implementation dependant 'offline' state. online: the addressed unit puts itself into an implementation dependant 'online' state.")
componentREDId = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 30, 10, 1, 10), AdicREDIdentifier()).setMaxAccess("readonly")
if mibBuilder.loadTexts: componentREDId.setStatus('mandatory')
if mibBuilder.loadTexts: componentREDId.setDescription('Runtime Error Detection identifier for this power supply.')
componentFirmwareVersion = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 30, 10, 1, 11), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: componentFirmwareVersion.setStatus('mandatory')
if mibBuilder.loadTexts: componentFirmwareVersion.setDescription('Firmware version (or level) for this component.')
componentGeoAddrAisle = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 30, 10, 1, 12), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: componentGeoAddrAisle.setStatus('mandatory')
if mibBuilder.loadTexts: componentGeoAddrAisle.setDescription('The aisle number where this component is located. A negative value indicates that an aisle number is not applicable to this component.')
componentGeoAddrFrame = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 30, 10, 1, 13), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: componentGeoAddrFrame.setStatus('mandatory')
if mibBuilder.loadTexts: componentGeoAddrFrame.setDescription('The frame number where this component is located. A negative value indicates that a frame number is not applicable to this component.')
componentGeoAddrRack = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 30, 10, 1, 14), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: componentGeoAddrRack.setStatus('mandatory')
if mibBuilder.loadTexts: componentGeoAddrRack.setDescription('The rack number where this component is located. A negative value indicates that a rack number is not applicable to this component.')
componentGeoAddrChassis = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 30, 10, 1, 15), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: componentGeoAddrChassis.setStatus('mandatory')
if mibBuilder.loadTexts: componentGeoAddrChassis.setDescription('The chassis number where this component is located. A negative value indicates that a chassis number is not applicable to this component.')
componentGeoAddrBlade = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 30, 10, 1, 16), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: componentGeoAddrBlade.setStatus('mandatory')
if mibBuilder.loadTexts: componentGeoAddrBlade.setDescription('The blade number within the network chasis where this component is located. A negative value indicates that a blade number is not applicable to this component.')
componentIpAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 30, 10, 1, 17), IpAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: componentIpAddress.setStatus('mandatory')
if mibBuilder.loadTexts: componentIpAddress.setDescription('IP address of this component. If the component has no IP address, this object returns 0.0.0.0. The address may refer to an internal network not accessible to an external management application.')
powerSupplyTable = MibTable((1, 3, 6, 1, 4, 1, 3764, 1, 1, 200, 200, 10), )
if mibBuilder.loadTexts: powerSupplyTable.setStatus('optional')
if mibBuilder.loadTexts: powerSupplyTable.setDescription('** This table is optional ** Table of the power supplies.')
powerSupplyEntry = MibTableRow((1, 3, 6, 1, 4, 1, 3764, 1, 1, 200, 200, 10, 1), ).setIndexNames((0, "ADIC-INTELLIGENT-STORAGE-MIB", "componentId"), (0, "ADIC-INTELLIGENT-STORAGE-MIB", "powerSupplyIndex"))
if mibBuilder.loadTexts: powerSupplyEntry.setStatus('optional')
if mibBuilder.loadTexts: powerSupplyEntry.setDescription('** This entry object is optional ** Each entry contains the information for a specific power supply.')
powerSupplyIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 200, 200, 10, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: powerSupplyIndex.setStatus('optional')
if mibBuilder.loadTexts: powerSupplyIndex.setDescription('** This object is optional ** Index of this power supply within the component specified by componentId.')
powerSupplyName = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 200, 200, 10, 1, 2), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: powerSupplyName.setStatus('optional')
if mibBuilder.loadTexts: powerSupplyName.setDescription('** This object is optional ** Display name of this power supply.')
powerSupplyWattage = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 200, 200, 10, 1, 3), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: powerSupplyWattage.setStatus('optional')
if mibBuilder.loadTexts: powerSupplyWattage.setDescription('** This object is optional ** What is maximum power output of this power supply. Units are Watts.')
powerSupplyType = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 200, 200, 10, 1, 4), AdicVoltageType()).setMaxAccess("readonly")
if mibBuilder.loadTexts: powerSupplyType.setStatus('optional')
if mibBuilder.loadTexts: powerSupplyType.setDescription('** This object is optional ** DC or AC power supply?')
powerSupplyREDId = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 200, 200, 10, 1, 5), AdicREDIdentifier()).setMaxAccess("readonly")
if mibBuilder.loadTexts: powerSupplyREDId.setStatus('optional')
if mibBuilder.loadTexts: powerSupplyREDId.setDescription('** This object is optional ** Runtime Error Detection identifier for this power supply.')
powerSupplyRatedVoltage = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 200, 200, 10, 1, 6), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: powerSupplyRatedVoltage.setStatus('optional')
if mibBuilder.loadTexts: powerSupplyRatedVoltage.setDescription('** This object is optional ** Rated output voltage in millivolts of this power supply.')
powerSupplyLocation = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 200, 200, 10, 1, 7), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: powerSupplyLocation.setStatus('optional')
if mibBuilder.loadTexts: powerSupplyLocation.setDescription('** This object is optional ** Physical location of this power supply.')
voltageSensorTable = MibTable((1, 3, 6, 1, 4, 1, 3764, 1, 1, 200, 200, 20), )
if mibBuilder.loadTexts: voltageSensorTable.setStatus('optional')
if mibBuilder.loadTexts: voltageSensorTable.setDescription('** This table is optional ** Table of the voltage sensors.')
voltageSensorEntry = MibTableRow((1, 3, 6, 1, 4, 1, 3764, 1, 1, 200, 200, 20, 1), ).setIndexNames((0, "ADIC-INTELLIGENT-STORAGE-MIB", "componentId"), (0, "ADIC-INTELLIGENT-STORAGE-MIB", "powerSupplyIndex"), (0, "ADIC-INTELLIGENT-STORAGE-MIB", "voltageSensorIndex"))
if mibBuilder.loadTexts: voltageSensorEntry.setStatus('optional')
if mibBuilder.loadTexts: voltageSensorEntry.setDescription('** This entry object is optional ** Each entry contains the information for a specific voltage sensor.')
voltageSensorIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 200, 200, 20, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: voltageSensorIndex.setStatus('optional')
if mibBuilder.loadTexts: voltageSensorIndex.setDescription('** This object is optional ** Index of this voltage sensor within the component specified by componentId.')
voltageSensorName = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 200, 200, 20, 1, 2), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: voltageSensorName.setStatus('optional')
if mibBuilder.loadTexts: voltageSensorName.setDescription('** This object is optional ** Display name of this voltage sensor.')
voltageSensorStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 200, 200, 20, 1, 3), AdicSensorStatus()).setMaxAccess("readonly")
if mibBuilder.loadTexts: voltageSensorStatus.setStatus('optional')
if mibBuilder.loadTexts: voltageSensorStatus.setDescription('** This object is optional ** What is the state of this voltage sensor? Is the voltage in the nominal, warning or alarm region?')
voltageSensorMillivolts = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 200, 200, 20, 1, 4), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: voltageSensorMillivolts.setStatus('optional')
if mibBuilder.loadTexts: voltageSensorMillivolts.setDescription('** This object is optional ** What is the voltage in millivolts of this voltage sensor?')
voltageSensorType = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 200, 200, 20, 1, 5), AdicVoltageType()).setMaxAccess("readonly")
if mibBuilder.loadTexts: voltageSensorType.setStatus('optional')
if mibBuilder.loadTexts: voltageSensorType.setDescription('** This object is optional ** DC or AC voltage sensor?')
voltageSensorNominalLo = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 200, 200, 20, 1, 6), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: voltageSensorNominalLo.setStatus('optional')
if mibBuilder.loadTexts: voltageSensorNominalLo.setDescription('** This object is optional ** Lower voltage limit of the nominal state for this voltage sensor. Unit are millivolts.')
voltageSensorNominalHi = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 200, 200, 20, 1, 7), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: voltageSensorNominalHi.setStatus('optional')
if mibBuilder.loadTexts: voltageSensorNominalHi.setDescription('** This object is optional ** Upper voltage limit of the nominal state for this voltage sensor. Unit are millivolts.')
voltageSensorWarningLo = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 200, 200, 20, 1, 8), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: voltageSensorWarningLo.setStatus('optional')
if mibBuilder.loadTexts: voltageSensorWarningLo.setDescription('** This object is optional ** Lower voltage limit of the warning state for this voltage sensor. Unit are millivolts. If the voltage falls below this limit, the sensor enters the alarm state.')
voltageSensorWarningHi = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 200, 200, 20, 1, 9), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: voltageSensorWarningHi.setStatus('optional')
if mibBuilder.loadTexts: voltageSensorWarningHi.setDescription('** This object is optional ** Upper voltage limit of the warning state for this voltage sensor. Unit are millivolts. If the voltage rises above this limit, the sensor enters the alarm state.')
voltageSensorLocation = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 200, 200, 20, 1, 10), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: voltageSensorLocation.setStatus('optional')
if mibBuilder.loadTexts: voltageSensorLocation.setDescription('** This object is optional ** Physical location of the voltage sensor.')
voltageSensorREDId = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 200, 200, 20, 1, 11), AdicREDIdentifier()).setMaxAccess("readonly")
if mibBuilder.loadTexts: voltageSensorREDId.setStatus('optional')
if mibBuilder.loadTexts: voltageSensorREDId.setDescription('** This object is optional ** Runtime Error Detection identifier for this voltage sensor.')
temperatureSensorTable = MibTable((1, 3, 6, 1, 4, 1, 3764, 1, 1, 200, 200, 30), )
if mibBuilder.loadTexts: temperatureSensorTable.setStatus('optional')
if mibBuilder.loadTexts: temperatureSensorTable.setDescription('** This table is optional ** Table of the temperature sensors in each component.')
temperatureSensorEntry = MibTableRow((1, 3, 6, 1, 4, 1, 3764, 1, 1, 200, 200, 30, 1), ).setIndexNames((0, "ADIC-INTELLIGENT-STORAGE-MIB", "componentId"), (0, "ADIC-INTELLIGENT-STORAGE-MIB", "temperatureSensorIndex"))
if mibBuilder.loadTexts: temperatureSensorEntry.setStatus('optional')
if mibBuilder.loadTexts: temperatureSensorEntry.setDescription('** This entry object is optional ** Each entry contains the information for a specific sensor.')
temperatureSensorIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 200, 200, 30, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: temperatureSensorIndex.setStatus('optional')
if mibBuilder.loadTexts: temperatureSensorIndex.setDescription('** This object is optional ** Index of this temperatureSensor within the component specified by componentId.')
temperatureSensorName = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 200, 200, 30, 1, 2), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: temperatureSensorName.setStatus('optional')
if mibBuilder.loadTexts: temperatureSensorName.setDescription('** This object is optional ** Display name of this temperatureSensor.')
temperatureSensorStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 200, 200, 30, 1, 3), AdicSensorStatus()).setMaxAccess("readonly")
if mibBuilder.loadTexts: temperatureSensorStatus.setStatus('optional')
if mibBuilder.loadTexts: temperatureSensorStatus.setDescription('** This object is optional ** What is the state of this temperatureSensor? Is the temperature in the nominal, warning or alarm region?')
temperatureSensorDegreesCelsius = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 200, 200, 30, 1, 4), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: temperatureSensorDegreesCelsius.setStatus('optional')
if mibBuilder.loadTexts: temperatureSensorDegreesCelsius.setDescription('** This object is optional ** The temperature in degrees Celsuis for this temperature sensor.')
temperatureSensorNominalLo = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 200, 200, 30, 1, 5), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: temperatureSensorNominalLo.setStatus('optional')
if mibBuilder.loadTexts: temperatureSensorNominalLo.setDescription('** This object is optional ** Lower temperature limit of the nominal state for this temperature sensor. Unit are degrees Celsius.')
temperatureSensorNominalHi = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 200, 200, 30, 1, 6), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: temperatureSensorNominalHi.setStatus('optional')
if mibBuilder.loadTexts: temperatureSensorNominalHi.setDescription('** This object is optional ** Upper temperature limit of the nominal state for this temperature sensor. Unit are degrees Celsius.')
temperatureSensorWarningLo = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 200, 200, 30, 1, 7), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: temperatureSensorWarningLo.setStatus('optional')
if mibBuilder.loadTexts: temperatureSensorWarningLo.setDescription('** This object is optional ** Lower temperature limit of the warning state for this temperature sensor. Unit are degrees Celsius. If the temperature falls below this limit, the sensor enters the alarm state.')
temperatureSensorWarningHi = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 200, 200, 30, 1, 8), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: temperatureSensorWarningHi.setStatus('optional')
if mibBuilder.loadTexts: temperatureSensorWarningHi.setDescription('** This object is optional ** Upper temperature limit of the warning state for this temperature sensor. Unit are degrees Celsius. If the temperature rises above this limit, the sensor enters the alarm state.')
temperatureSensorLocation = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 200, 200, 30, 1, 9), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: temperatureSensorLocation.setStatus('optional')
if mibBuilder.loadTexts: temperatureSensorLocation.setDescription('** This object is optional ** Physical location of this temperature sensor.')
temperatureSensorREDId = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 200, 200, 30, 1, 10), AdicREDIdentifier()).setMaxAccess("readonly")
if mibBuilder.loadTexts: temperatureSensorREDId.setStatus('optional')
if mibBuilder.loadTexts: temperatureSensorREDId.setDescription('** This object is optional ** Runtime Error Detection identifier for this temperature sensor.')
coolingFanTable = MibTable((1, 3, 6, 1, 4, 1, 3764, 1, 1, 200, 200, 40), )
if mibBuilder.loadTexts: coolingFanTable.setStatus('optional')
if mibBuilder.loadTexts: coolingFanTable.setDescription('** This table is optional ** Table of cooling fans in the library.')
coolingFanEntry = MibTableRow((1, 3, 6, 1, 4, 1, 3764, 1, 1, 200, 200, 40, 1), ).setIndexNames((0, "ADIC-INTELLIGENT-STORAGE-MIB", "componentId"), (0, "ADIC-INTELLIGENT-STORAGE-MIB", "coolingFanIndex"))
if mibBuilder.loadTexts: coolingFanEntry.setStatus('optional')
if mibBuilder.loadTexts: coolingFanEntry.setDescription('** This entry object is optional ** Each entry contains the information for a specific cooling fan.')
coolingFanIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 200, 200, 40, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: coolingFanIndex.setStatus('optional')
if mibBuilder.loadTexts: coolingFanIndex.setDescription('** This object is optional ** Index of this cooling fan within the component specified by componentId.')
coolingFanName = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 200, 200, 40, 1, 2), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: coolingFanName.setStatus('optional')
if mibBuilder.loadTexts: coolingFanName.setDescription('** This object is optional ** Display name of this coolingFan.')
coolingFanStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 200, 200, 40, 1, 3), AdicSensorStatus()).setMaxAccess("readonly")
if mibBuilder.loadTexts: coolingFanStatus.setStatus('optional')
if mibBuilder.loadTexts: coolingFanStatus.setDescription('** This object is optional ** Is the fan speed in the nominal, warning or alarm region?')
coolingFanRPM = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 200, 200, 40, 1, 4), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: coolingFanRPM.setStatus('optional')
if mibBuilder.loadTexts: coolingFanRPM.setDescription('** This object is optional ** The fan speed in revolutions per minute.')
coolingFanNominalLo = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 200, 200, 40, 1, 5), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: coolingFanNominalLo.setStatus('optional')
if mibBuilder.loadTexts: coolingFanNominalLo.setDescription('** This object is optional ** Lower fan speed limit of the nominal state for this fan. Units are RPM.')
coolingFanNominalHi = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 200, 200, 40, 1, 6), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: coolingFanNominalHi.setStatus('optional')
if mibBuilder.loadTexts: coolingFanNominalHi.setDescription('** This object is optional ** Upper fan speed limit of the nominal state for this fan. Units are RPM.')
coolingFanWarningLo = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 200, 200, 40, 1, 7), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: coolingFanWarningLo.setStatus('optional')
if mibBuilder.loadTexts: coolingFanWarningLo.setDescription('** This object is optional ** Lower fan speed limit of the warning state for this fan. Units are RPM. If the speed falls below this limit, the fan enters the alarmLow state.')
coolingFanWarningHi = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 200, 200, 40, 1, 8), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: coolingFanWarningHi.setStatus('optional')
if mibBuilder.loadTexts: coolingFanWarningHi.setDescription('** This object is optional ** Upper fan speed limit of the warning state for this fan. Units are RPM. If the speed rises above this limit, the fan enters the alarmHigh state.')
coolingFanLocation = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 200, 200, 40, 1, 9), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: coolingFanLocation.setStatus('optional')
if mibBuilder.loadTexts: coolingFanLocation.setDescription('** This object is optional ** Physical location of this fan.')
coolingFanREDId = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 200, 200, 40, 1, 10), AdicREDIdentifier()).setMaxAccess("readonly")
if mibBuilder.loadTexts: coolingFanREDId.setStatus('optional')
if mibBuilder.loadTexts: coolingFanREDId.setDescription('** This object is optional ** Runtime Error Detection identifier for this fan.')
trapPayloadTable = MibTable((1, 3, 6, 1, 4, 1, 3764, 1, 1, 500, 10), )
if mibBuilder.loadTexts: trapPayloadTable.setStatus('mandatory')
if mibBuilder.loadTexts: trapPayloadTable.setDescription('Defines objects common to all trap payloads.')
trapPayloadEntry = MibTableRow((1, 3, 6, 1, 4, 1, 3764, 1, 1, 500, 10, 1), ).setIndexNames((0, "ADIC-INTELLIGENT-STORAGE-MIB", "trapSequenceNumber"))
if mibBuilder.loadTexts: trapPayloadEntry.setStatus('mandatory')
if mibBuilder.loadTexts: trapPayloadEntry.setDescription('Each entry contains the information for a specific cooling fan.')
trapSequenceNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 500, 10, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: trapSequenceNumber.setStatus('mandatory')
if mibBuilder.loadTexts: trapSequenceNumber.setDescription('')
trapSeverity = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 500, 10, 1, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: trapSeverity.setStatus('mandatory')
if mibBuilder.loadTexts: trapSeverity.setDescription('')
trapSummaryText = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 500, 10, 1, 3), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: trapSummaryText.setStatus('mandatory')
if mibBuilder.loadTexts: trapSummaryText.setDescription('')
trapIntendedUsage = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 500, 10, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("public", 1), ("triggerRefresh", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: trapIntendedUsage.setStatus('mandatory')
if mibBuilder.loadTexts: trapIntendedUsage.setDescription("The value of this qualifier aids the management application in determining how to respond to the trap. If the value is public(1), the information is intended to be propagated to external observers, such as sending email. If the value is triggerRefresh(2), the information is intended to update the management application's data model, but not necessarily propagated to external observers.")
startupSequenceComplete = NotificationType((1, 3, 6, 1, 4, 1, 3764, 1, 1) + (0,500)).setObjects(("ADIC-INTELLIGENT-STORAGE-MIB", "componentId"), ("ADIC-INTELLIGENT-STORAGE-MIB", "trapSummaryText"))
if mibBuilder.loadTexts: startupSequenceComplete.setDescription('The component indicated by the value of componentId has successfully completed its startup sequence.')
shutdownSequenceInitiated = NotificationType((1, 3, 6, 1, 4, 1, 3764, 1, 1) + (0,501)).setObjects(("ADIC-INTELLIGENT-STORAGE-MIB", "componentId"), ("ADIC-INTELLIGENT-STORAGE-MIB", "trapSummaryText"))
if mibBuilder.loadTexts: shutdownSequenceInitiated.setDescription('The component indicated by the value of componentId has initiated its shutdown sequence.')
componentAdded = NotificationType((1, 3, 6, 1, 4, 1, 3764, 1, 1) + (0,502)).setObjects(("ADIC-INTELLIGENT-STORAGE-MIB", "componentId"), ("ADIC-INTELLIGENT-STORAGE-MIB", "componentType"))
if mibBuilder.loadTexts: componentAdded.setDescription('The component indicated by the value of componentId has been added to the library.')
componentRemoved = NotificationType((1, 3, 6, 1, 4, 1, 3764, 1, 1) + (0,503)).setObjects(("ADIC-INTELLIGENT-STORAGE-MIB", "componentId"), ("ADIC-INTELLIGENT-STORAGE-MIB", "componentType"))
if mibBuilder.loadTexts: componentRemoved.setDescription('The component indicated by the value of componentId has been removed from the library.')
productLibraryClassChange = NotificationType((1, 3, 6, 1, 4, 1, 3764, 1, 1) + (0,504)).setObjects(("ADIC-INTELLIGENT-STORAGE-MIB", "productLibraryClass"), ("ADIC-INTELLIGENT-STORAGE-MIB", "productLibraryClass"))
if mibBuilder.loadTexts: productLibraryClassChange.setDescription('The product library class has changed. This occurs when connectivity hardware is added or removed. The payload contains the productLibraryClass before and after the change.')
mibBuilder.exportSymbols("ADIC-INTELLIGENT-STORAGE-MIB", powerSupplyTable=powerSupplyTable, powerSupplyEntry=powerSupplyEntry, sml=sml, powerSupplyREDId=powerSupplyREDId, temperatureSensorEntry=temperatureSensorEntry, componentLocation=componentLocation, voltageSensorNominalLo=voltageSensorNominalLo, temperatureSensorWarningHi=temperatureSensorWarningHi, intelligent=intelligent, RowStatus=RowStatus, AdicVoltageType=AdicVoltageType, software=software, agentModifiers=agentModifiers, shutdownSequenceInitiated=shutdownSequenceInitiated, coolingFanName=coolingFanName, voltageSensorTable=voltageSensorTable, trapSequenceNumber=trapSequenceNumber, trapIntendedUsage=trapIntendedUsage, componentIpAddress=componentIpAddress, globalData=globalData, temperatureSensorNominalHi=temperatureSensorNominalHi, productName=productName, powerSupplyRatedVoltage=powerSupplyRatedVoltage, AdicAgentStatus=AdicAgentStatus, voltageSensorWarningLo=voltageSensorWarningLo, agentGetTimeOut=agentGetTimeOut, coolingFanLocation=coolingFanLocation, AdicGlobalId=AdicGlobalId, voltageSensorStatus=voltageSensorStatus, AdicMibVersion=AdicMibVersion, powerSupplyLocation=powerSupplyLocation, productLibraryClassChange=productLibraryClassChange, AdicTrapSeverity=AdicTrapSeverity, storage=storage, componentEntry=componentEntry, coolingFanIndex=coolingFanIndex, temperatureSensorDegreesCelsius=temperatureSensorDegreesCelsius, voltageSensorLocation=voltageSensorLocation, agentRefreshRate=agentRefreshRate, coolingFanNominalHi=coolingFanNominalHi, AdicInterfaceType=AdicInterfaceType, componentId=componentId, temperatureSensorIndex=temperatureSensorIndex, coolingFanStatus=coolingFanStatus, AdicDriveStatus=AdicDriveStatus, coolingFanREDId=coolingFanREDId, trapPayloadEntry=trapPayloadEntry, agentTimeStamp=agentTimeStamp, componentREDId=componentREDId, powerAndCooling=powerAndCooling, voltageSensorEntry=voltageSensorEntry, coolingFanWarningHi=coolingFanWarningHi, AdicDateAndTime=AdicDateAndTime, componentGeoAddrBlade=componentGeoAddrBlade, notification=notification, productDisplayVersion=productDisplayVersion, componentControl=componentControl, AdicDoorStatus=AdicDoorStatus, componentGeoAddrChassis=componentGeoAddrChassis, productSnmpAgentVersion=productSnmpAgentVersion, components=components, agentLastGlobalStatus=agentLastGlobalStatus, temperatureSensorNominalLo=temperatureSensorNominalLo, voltageSensorType=voltageSensorType, componentGeoAddrAisle=componentGeoAddrAisle, network=network, componentDisplayName=componentDisplayName, temperatureSensorTable=temperatureSensorTable, powerSupplyType=powerSupplyType, temperatureSensorStatus=temperatureSensorStatus, AdicREDIdentifier=AdicREDIdentifier, voltageSensorIndex=voltageSensorIndex, componentTable=componentTable, componentStatus=componentStatus, powerSupplyIndex=powerSupplyIndex, AdicSensorStatus=AdicSensorStatus, agentGlobalStatus=agentGlobalStatus, componentVendor=componentVendor, AdicComponentType=AdicComponentType, componentFirmwareVersion=componentFirmwareVersion, coolingFanNominalLo=coolingFanNominalLo, coolingFanTable=coolingFanTable, temperatureSensorREDId=temperatureSensorREDId, coolingFanWarningLo=coolingFanWarningLo, powerSupplyName=powerSupplyName, hardware=hardware, voltageSensorName=voltageSensorName, productAgentInfo=productAgentInfo, Boolean=Boolean, voltageSensorNominalHi=voltageSensorNominalHi, temperatureSensorName=temperatureSensorName, componentSn=componentSn, powerSupplyWattage=powerSupplyWattage, voltageSensorMillivolts=voltageSensorMillivolts, voltageSensorWarningHi=voltageSensorWarningHi, startupSequenceComplete=startupSequenceComplete, productDisplayName=productDisplayName, productLibraryClass=productLibraryClass, componentGeoAddrRack=componentGeoAddrRack, productSerialNumber=productSerialNumber, adic=adic, coolingFanEntry=coolingFanEntry, AdicEnable=AdicEnable, temperatureSensorWarningLo=temperatureSensorWarningLo, componentType=componentType, componentAdded=componentAdded, productVendor=productVendor, componentRemoved=componentRemoved, productVersion=productVersion, voltageSensorREDId=voltageSensorREDId, productMibVersion=productMibVersion, componentGeoAddrFrame=componentGeoAddrFrame, temperatureSensorLocation=temperatureSensorLocation, trapPayloadTable=trapPayloadTable, trapSummaryText=trapSummaryText, AdicOnlineStatus=AdicOnlineStatus, trapSeverity=trapSeverity, componentInfo=componentInfo, coolingFanRPM=coolingFanRPM, productDescription=productDescription)
| 123.405028
| 4,464
| 0.774735
|
OctetString, ObjectIdentifier, Integer = mibBuilder.importSymbols("ASN1", "OctetString", "ObjectIdentifier", "Integer")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueSizeConstraint, ValueRangeConstraint, SingleValueConstraint, ConstraintsIntersection, ConstraintsUnion = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueSizeConstraint", "ValueRangeConstraint", "SingleValueConstraint", "ConstraintsIntersection", "ConstraintsUnion")
ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup")
NotificationType, NotificationType, iso, Counter64, ObjectIdentity, Counter32, Integer32, Unsigned32, enterprises, Bits, MibScalar, MibTable, MibTableRow, MibTableColumn, TimeTicks, ModuleIdentity, MibIdentifier, Gauge32, IpAddress = mibBuilder.importSymbols("SNMPv2-SMI", "NotificationType", "NotificationType", "iso", "Counter64", "ObjectIdentity", "Counter32", "Integer32", "Unsigned32", "enterprises", "Bits", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "TimeTicks", "ModuleIdentity", "MibIdentifier", "Gauge32", "IpAddress")
DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention")
adic = MibIdentifier((1, 3, 6, 1, 4, 1, 3764))
storage = MibIdentifier((1, 3, 6, 1, 4, 1, 3764, 1))
intelligent = MibIdentifier((1, 3, 6, 1, 4, 1, 3764, 1, 1))
productAgentInfo = MibIdentifier((1, 3, 6, 1, 4, 1, 3764, 1, 1, 10))
globalData = MibIdentifier((1, 3, 6, 1, 4, 1, 3764, 1, 1, 20))
components = MibIdentifier((1, 3, 6, 1, 4, 1, 3764, 1, 1, 30))
software = MibIdentifier((1, 3, 6, 1, 4, 1, 3764, 1, 1, 100))
hardware = MibIdentifier((1, 3, 6, 1, 4, 1, 3764, 1, 1, 200))
powerAndCooling = MibIdentifier((1, 3, 6, 1, 4, 1, 3764, 1, 1, 200, 200))
sml = MibIdentifier((1, 3, 6, 1, 4, 1, 3764, 1, 1, 300))
network = MibIdentifier((1, 3, 6, 1, 4, 1, 3764, 1, 1, 400))
notification = MibIdentifier((1, 3, 6, 1, 4, 1, 3764, 1, 1, 500))
class Boolean(Integer32):
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2))
namedValues = NamedValues(("true", 1), ("false", 2))
class AdicMibVersion(DisplayString):
pass
class AdicREDIdentifier(Counter32):
pass
class AdicEnable(Integer32):
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2))
namedValues = NamedValues(("enabled", 1), ("disabled", 2))
class AdicAgentStatus(Integer32):
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6))
namedValues = NamedValues(("other", 1), ("unknown", 2), ("ok", 3), ("non-critical", 4), ("critical", 5), ("non-recoverable", 6))
class AdicOnlineStatus(Integer32):
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3))
namedValues = NamedValues(("online", 1), ("offline", 2), ("shutdown", 3))
class AdicGlobalId(OctetString):
subtypeSpec = OctetString.subtypeSpec + ValueSizeConstraint(8, 8)
fixedLength = 8
class AdicComponentType(Integer32):
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8))
namedValues = NamedValues(("mcb", 1), ("cmb", 2), ("ioBlade", 3), ("rcu", 4), ("networkChasis", 5), ("controlModule", 6), ("expansionModule", 7), ("powerSupply", 8))
class AdicInterfaceType(Integer32):
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2))
namedValues = NamedValues(("scsi", 1), ("fibreChannel", 2))
class AdicSensorStatus(Integer32):
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7))
namedValues = NamedValues(("nominal", 1), ("warningLow", 2), ("warningHigh", 3), ("alarmLow", 4), ("alarmHigh", 5), ("notInstalled", 6), ("noData", 7))
class AdicVoltageType(Integer32):
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2))
namedValues = NamedValues(("dc", 1), ("ac", 2))
class AdicDateAndTime(OctetString):
subtypeSpec = OctetString.subtypeSpec + ConstraintsUnion(ValueSizeConstraint(8, 8), ValueSizeConstraint(11, 11), )
class AdicTrapSeverity(Integer32):
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))
namedValues = NamedValues(("emergency", 1), ("alarm", 2), ("warning", 3), ("notice", 4), ("informational", 5))
class AdicDoorStatus(Integer32):
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7))
namedValues = NamedValues(("open", 1), ("closed", 2), ("closedAndLocked", 3), ("closedAndUnlocked", 4), ("contollerFailed", 5), ("notInstalled", 6), ("noData", 7))
class AdicDriveStatus(Integer32):
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7))
namedValues = NamedValues(("idle", 1), ("loading", 2), ("ejecting", 3), ("inserted", 4), ("removed", 5), ("notInstalled", 6), ("noData", 7))
class RowStatus(Integer32):
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6))
namedValues = NamedValues(("active", 1), ("notInService", 2), ("notReady", 3), ("createAndGo", 4), ("createAndWait", 5), ("destroy", 6))
productMibVersion = MibScalar((1, 3, 6, 1, 4, 1, 3764, 1, 1, 10, 1), AdicMibVersion()).setMaxAccess("readonly")
if mibBuilder.loadTexts: productMibVersion.setStatus('mandatory')
if mibBuilder.loadTexts: productMibVersion.setDescription('MIB version identifier.')
productSnmpAgentVersion = MibScalar((1, 3, 6, 1, 4, 1, 3764, 1, 1, 10, 2), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: productSnmpAgentVersion.setStatus('mandatory')
if mibBuilder.loadTexts: productSnmpAgentVersion.setDescription('SNMP agent version identifier.')
productName = MibScalar((1, 3, 6, 1, 4, 1, 3764, 1, 1, 10, 3), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: productName.setStatus('mandatory')
if mibBuilder.loadTexts: productName.setDescription('Name of ADIC branded product. Uniquely identifies the product, independent of OEM.')
productDisplayName = MibScalar((1, 3, 6, 1, 4, 1, 3764, 1, 1, 10, 4), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: productDisplayName.setStatus('mandatory')
if mibBuilder.loadTexts: productDisplayName.setDescription('Name of this agent for display purposes. May be customized for OEM.')
productDescription = MibScalar((1, 3, 6, 1, 4, 1, 3764, 1, 1, 10, 5), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: productDescription.setStatus('mandatory')
if mibBuilder.loadTexts: productDescription.setDescription('A short description of this SNMP agent.')
productVendor = MibScalar((1, 3, 6, 1, 4, 1, 3764, 1, 1, 10, 6), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: productVendor.setStatus('mandatory')
if mibBuilder.loadTexts: productVendor.setDescription('Name of the product vendor or OEM.')
productVersion = MibScalar((1, 3, 6, 1, 4, 1, 3764, 1, 1, 10, 7), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: productVersion.setStatus('mandatory')
if mibBuilder.loadTexts: productVersion.setDescription('String Format: MNNO.TVBBBPP Examples 1. 091a.TR054 Version 0.91, build 54 of the RCS test code for ADIC 2. 100A.GM052 Version 1.00, build 52 of the MCB GA candidate code for ADIC M Major version number NN Minor version number O OEM (Uppercase when release candidate, otherwise lowercase) A/a - ADIC Others - Reserved) T Target G - GA Candidate Release (labeled build that is a release candidate) T - Test build (labeled build used for formal testing) D - Dev build (labeled build used for unit testing) (lower case) - specifies developer of a local build V Variant S - System R - RCS M - MCB BBB Build number (3 digit sequential number specifying exact build) PP Patch Number (Optional alphanumeric characters denoting patch level of this build if necessary)')
productDisplayVersion = MibScalar((1, 3, 6, 1, 4, 1, 3764, 1, 1, 10, 8), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: productDisplayVersion.setStatus('mandatory')
if mibBuilder.loadTexts: productDisplayVersion.setDescription('The version identifier according to the vendor or OEM.')
productLibraryClass = MibScalar((1, 3, 6, 1, 4, 1, 3764, 1, 1, 10, 9), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 10))).clone(namedValues=NamedValues(("basic", 1), ("intelligent", 2), ("virtual", 10)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: productLibraryClass.setStatus('mandatory')
if mibBuilder.loadTexts: productLibraryClass.setDescription('Basic library includes minimal connectivity hardware. Intelligent library includes SAN appliances and value-added features.')
productSerialNumber = MibScalar((1, 3, 6, 1, 4, 1, 3764, 1, 1, 10, 10), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: productSerialNumber.setStatus('mandatory')
if mibBuilder.loadTexts: productSerialNumber.setDescription('The serial number of the entire library.')
agentGlobalStatus = MibScalar((1, 3, 6, 1, 4, 1, 3764, 1, 1, 20, 1), AdicAgentStatus()).setMaxAccess("readonly")
if mibBuilder.loadTexts: agentGlobalStatus.setStatus('mandatory')
if mibBuilder.loadTexts: agentGlobalStatus.setDescription('Current overall status of the agent.')
agentLastGlobalStatus = MibScalar((1, 3, 6, 1, 4, 1, 3764, 1, 1, 20, 2), AdicAgentStatus()).setMaxAccess("readonly")
if mibBuilder.loadTexts: agentLastGlobalStatus.setStatus('mandatory')
if mibBuilder.loadTexts: agentLastGlobalStatus.setDescription('The status before the current status which induced an initiative to issue a global status change trap.')
agentTimeStamp = MibScalar((1, 3, 6, 1, 4, 1, 3764, 1, 1, 20, 3), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: agentTimeStamp.setStatus('mandatory')
if mibBuilder.loadTexts: agentTimeStamp.setDescription('The last time that the agent values have been updated. Universal time in seconds since UTC 1/1/70.')
agentGetTimeOut = MibScalar((1, 3, 6, 1, 4, 1, 3764, 1, 1, 20, 4), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: agentGetTimeOut.setStatus('mandatory')
if mibBuilder.loadTexts: agentGetTimeOut.setDescription('Suggested time out in milliseconds for how long an SNMP management application should wait while attempting to poll the SNMP agent.')
agentModifiers = MibScalar((1, 3, 6, 1, 4, 1, 3764, 1, 1, 20, 5), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: agentModifiers.setStatus('mandatory')
if mibBuilder.loadTexts: agentModifiers.setDescription('Agent functional modifiers, when set the modifier is active. ----------------------------------------------------- Bit 3 => Agent in debug mode. ----------------------------------------------------- All other bits are product specific.')
agentRefreshRate = MibScalar((1, 3, 6, 1, 4, 1, 3764, 1, 1, 20, 6), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: agentRefreshRate.setStatus('mandatory')
if mibBuilder.loadTexts: agentRefreshRate.setDescription('Rate in seconds at which the agent cached data is being updated.')
componentTable = MibTable((1, 3, 6, 1, 4, 1, 3764, 1, 1, 30, 10), )
if mibBuilder.loadTexts: componentTable.setStatus('mandatory')
if mibBuilder.loadTexts: componentTable.setDescription("General information about the system's components, including the unique identifiers. The structure this table is based on the Fibre Alliance MIB connUnitEntry.")
componentEntry = MibTableRow((1, 3, 6, 1, 4, 1, 3764, 1, 1, 30, 10, 1), ).setIndexNames((0, "ADIC-INTELLIGENT-STORAGE-MIB", "componentId"))
if mibBuilder.loadTexts: componentEntry.setStatus('mandatory')
if mibBuilder.loadTexts: componentEntry.setDescription('A component entry containing objects for a particular component.')
componentId = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 30, 10, 1, 1), AdicGlobalId()).setMaxAccess("readonly")
if mibBuilder.loadTexts: componentId.setStatus('mandatory')
if mibBuilder.loadTexts: componentId.setDescription('The unique identification for this component among those within this proxy domain.')
componentType = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 30, 10, 1, 2), AdicComponentType()).setMaxAccess("readonly")
if mibBuilder.loadTexts: componentType.setStatus('mandatory')
if mibBuilder.loadTexts: componentType.setDescription('The type of this component.')
componentDisplayName = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 30, 10, 1, 3), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 79))).setMaxAccess("readonly")
if mibBuilder.loadTexts: componentDisplayName.setStatus('mandatory')
if mibBuilder.loadTexts: componentDisplayName.setDescription('Name of this component for display purposes. Different OEMs may have different display names for the same ADIC product.')
componentInfo = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 30, 10, 1, 4), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: componentInfo.setStatus('mandatory')
if mibBuilder.loadTexts: componentInfo.setDescription('A display string containing information about this component.')
componentLocation = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 30, 10, 1, 5), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 79))).setMaxAccess("readonly")
if mibBuilder.loadTexts: componentLocation.setStatus('mandatory')
if mibBuilder.loadTexts: componentLocation.setDescription('Location information for this component.')
componentVendor = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 30, 10, 1, 6), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 79))).setMaxAccess("readonly")
if mibBuilder.loadTexts: componentVendor.setStatus('mandatory')
if mibBuilder.loadTexts: componentVendor.setDescription('Name vendor of this component.')
componentSn = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 30, 10, 1, 7), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 79))).setMaxAccess("readonly")
if mibBuilder.loadTexts: componentSn.setStatus('mandatory')
if mibBuilder.loadTexts: componentSn.setDescription('The serial number for this component.')
componentStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 30, 10, 1, 8), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))).clone(namedValues=NamedValues(("unknown", 1), ("unused", 2), ("ok", 3), ("warning", 4), ("failed", 5)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: componentStatus.setStatus('mandatory')
if mibBuilder.loadTexts: componentStatus.setDescription('Overall status of the component.')
componentControl = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 30, 10, 1, 9), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("resetColdStart", 1), ("resetWarmStart", 2), ("offline", 3), ("online", 4)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: componentControl.setStatus('mandatory')
if mibBuilder.loadTexts: componentControl.setDescription("This object is used to control the addressed connUnit. NOTE: 'Cold Start' and 'Warm Start' are as defined in MIB II and are not meant to be a factory reset. resetColdStart: the addressed unit performs a 'Cold Start' reset. resetWarmStart: the addressed unit performs a 'Warm Start' reset. offline: the addressed unit puts itself into an implementation dependant 'offline' state. online: the addressed unit puts itself into an implementation dependant 'online' state.")
componentREDId = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 30, 10, 1, 10), AdicREDIdentifier()).setMaxAccess("readonly")
if mibBuilder.loadTexts: componentREDId.setStatus('mandatory')
if mibBuilder.loadTexts: componentREDId.setDescription('Runtime Error Detection identifier for this power supply.')
componentFirmwareVersion = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 30, 10, 1, 11), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: componentFirmwareVersion.setStatus('mandatory')
if mibBuilder.loadTexts: componentFirmwareVersion.setDescription('Firmware version (or level) for this component.')
componentGeoAddrAisle = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 30, 10, 1, 12), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: componentGeoAddrAisle.setStatus('mandatory')
if mibBuilder.loadTexts: componentGeoAddrAisle.setDescription('The aisle number where this component is located. A negative value indicates that an aisle number is not applicable to this component.')
componentGeoAddrFrame = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 30, 10, 1, 13), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: componentGeoAddrFrame.setStatus('mandatory')
if mibBuilder.loadTexts: componentGeoAddrFrame.setDescription('The frame number where this component is located. A negative value indicates that a frame number is not applicable to this component.')
componentGeoAddrRack = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 30, 10, 1, 14), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: componentGeoAddrRack.setStatus('mandatory')
if mibBuilder.loadTexts: componentGeoAddrRack.setDescription('The rack number where this component is located. A negative value indicates that a rack number is not applicable to this component.')
componentGeoAddrChassis = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 30, 10, 1, 15), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: componentGeoAddrChassis.setStatus('mandatory')
if mibBuilder.loadTexts: componentGeoAddrChassis.setDescription('The chassis number where this component is located. A negative value indicates that a chassis number is not applicable to this component.')
componentGeoAddrBlade = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 30, 10, 1, 16), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: componentGeoAddrBlade.setStatus('mandatory')
if mibBuilder.loadTexts: componentGeoAddrBlade.setDescription('The blade number within the network chasis where this component is located. A negative value indicates that a blade number is not applicable to this component.')
componentIpAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 30, 10, 1, 17), IpAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: componentIpAddress.setStatus('mandatory')
if mibBuilder.loadTexts: componentIpAddress.setDescription('IP address of this component. If the component has no IP address, this object returns 0.0.0.0. The address may refer to an internal network not accessible to an external management application.')
powerSupplyTable = MibTable((1, 3, 6, 1, 4, 1, 3764, 1, 1, 200, 200, 10), )
if mibBuilder.loadTexts: powerSupplyTable.setStatus('optional')
if mibBuilder.loadTexts: powerSupplyTable.setDescription('** This table is optional ** Table of the power supplies.')
powerSupplyEntry = MibTableRow((1, 3, 6, 1, 4, 1, 3764, 1, 1, 200, 200, 10, 1), ).setIndexNames((0, "ADIC-INTELLIGENT-STORAGE-MIB", "componentId"), (0, "ADIC-INTELLIGENT-STORAGE-MIB", "powerSupplyIndex"))
if mibBuilder.loadTexts: powerSupplyEntry.setStatus('optional')
if mibBuilder.loadTexts: powerSupplyEntry.setDescription('** This entry object is optional ** Each entry contains the information for a specific power supply.')
powerSupplyIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 200, 200, 10, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: powerSupplyIndex.setStatus('optional')
if mibBuilder.loadTexts: powerSupplyIndex.setDescription('** This object is optional ** Index of this power supply within the component specified by componentId.')
powerSupplyName = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 200, 200, 10, 1, 2), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: powerSupplyName.setStatus('optional')
if mibBuilder.loadTexts: powerSupplyName.setDescription('** This object is optional ** Display name of this power supply.')
powerSupplyWattage = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 200, 200, 10, 1, 3), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: powerSupplyWattage.setStatus('optional')
if mibBuilder.loadTexts: powerSupplyWattage.setDescription('** This object is optional ** What is maximum power output of this power supply. Units are Watts.')
powerSupplyType = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 200, 200, 10, 1, 4), AdicVoltageType()).setMaxAccess("readonly")
if mibBuilder.loadTexts: powerSupplyType.setStatus('optional')
if mibBuilder.loadTexts: powerSupplyType.setDescription('** This object is optional ** DC or AC power supply?')
powerSupplyREDId = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 200, 200, 10, 1, 5), AdicREDIdentifier()).setMaxAccess("readonly")
if mibBuilder.loadTexts: powerSupplyREDId.setStatus('optional')
if mibBuilder.loadTexts: powerSupplyREDId.setDescription('** This object is optional ** Runtime Error Detection identifier for this power supply.')
powerSupplyRatedVoltage = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 200, 200, 10, 1, 6), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: powerSupplyRatedVoltage.setStatus('optional')
if mibBuilder.loadTexts: powerSupplyRatedVoltage.setDescription('** This object is optional ** Rated output voltage in millivolts of this power supply.')
powerSupplyLocation = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 200, 200, 10, 1, 7), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: powerSupplyLocation.setStatus('optional')
if mibBuilder.loadTexts: powerSupplyLocation.setDescription('** This object is optional ** Physical location of this power supply.')
voltageSensorTable = MibTable((1, 3, 6, 1, 4, 1, 3764, 1, 1, 200, 200, 20), )
if mibBuilder.loadTexts: voltageSensorTable.setStatus('optional')
if mibBuilder.loadTexts: voltageSensorTable.setDescription('** This table is optional ** Table of the voltage sensors.')
voltageSensorEntry = MibTableRow((1, 3, 6, 1, 4, 1, 3764, 1, 1, 200, 200, 20, 1), ).setIndexNames((0, "ADIC-INTELLIGENT-STORAGE-MIB", "componentId"), (0, "ADIC-INTELLIGENT-STORAGE-MIB", "powerSupplyIndex"), (0, "ADIC-INTELLIGENT-STORAGE-MIB", "voltageSensorIndex"))
if mibBuilder.loadTexts: voltageSensorEntry.setStatus('optional')
if mibBuilder.loadTexts: voltageSensorEntry.setDescription('** This entry object is optional ** Each entry contains the information for a specific voltage sensor.')
voltageSensorIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 200, 200, 20, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: voltageSensorIndex.setStatus('optional')
if mibBuilder.loadTexts: voltageSensorIndex.setDescription('** This object is optional ** Index of this voltage sensor within the component specified by componentId.')
voltageSensorName = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 200, 200, 20, 1, 2), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: voltageSensorName.setStatus('optional')
if mibBuilder.loadTexts: voltageSensorName.setDescription('** This object is optional ** Display name of this voltage sensor.')
voltageSensorStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 200, 200, 20, 1, 3), AdicSensorStatus()).setMaxAccess("readonly")
if mibBuilder.loadTexts: voltageSensorStatus.setStatus('optional')
if mibBuilder.loadTexts: voltageSensorStatus.setDescription('** This object is optional ** What is the state of this voltage sensor? Is the voltage in the nominal, warning or alarm region?')
voltageSensorMillivolts = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 200, 200, 20, 1, 4), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: voltageSensorMillivolts.setStatus('optional')
if mibBuilder.loadTexts: voltageSensorMillivolts.setDescription('** This object is optional ** What is the voltage in millivolts of this voltage sensor?')
voltageSensorType = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 200, 200, 20, 1, 5), AdicVoltageType()).setMaxAccess("readonly")
if mibBuilder.loadTexts: voltageSensorType.setStatus('optional')
if mibBuilder.loadTexts: voltageSensorType.setDescription('** This object is optional ** DC or AC voltage sensor?')
voltageSensorNominalLo = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 200, 200, 20, 1, 6), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: voltageSensorNominalLo.setStatus('optional')
if mibBuilder.loadTexts: voltageSensorNominalLo.setDescription('** This object is optional ** Lower voltage limit of the nominal state for this voltage sensor. Unit are millivolts.')
voltageSensorNominalHi = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 200, 200, 20, 1, 7), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: voltageSensorNominalHi.setStatus('optional')
if mibBuilder.loadTexts: voltageSensorNominalHi.setDescription('** This object is optional ** Upper voltage limit of the nominal state for this voltage sensor. Unit are millivolts.')
voltageSensorWarningLo = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 200, 200, 20, 1, 8), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: voltageSensorWarningLo.setStatus('optional')
if mibBuilder.loadTexts: voltageSensorWarningLo.setDescription('** This object is optional ** Lower voltage limit of the warning state for this voltage sensor. Unit are millivolts. If the voltage falls below this limit, the sensor enters the alarm state.')
voltageSensorWarningHi = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 200, 200, 20, 1, 9), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: voltageSensorWarningHi.setStatus('optional')
if mibBuilder.loadTexts: voltageSensorWarningHi.setDescription('** This object is optional ** Upper voltage limit of the warning state for this voltage sensor. Unit are millivolts. If the voltage rises above this limit, the sensor enters the alarm state.')
voltageSensorLocation = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 200, 200, 20, 1, 10), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: voltageSensorLocation.setStatus('optional')
if mibBuilder.loadTexts: voltageSensorLocation.setDescription('** This object is optional ** Physical location of the voltage sensor.')
voltageSensorREDId = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 200, 200, 20, 1, 11), AdicREDIdentifier()).setMaxAccess("readonly")
if mibBuilder.loadTexts: voltageSensorREDId.setStatus('optional')
if mibBuilder.loadTexts: voltageSensorREDId.setDescription('** This object is optional ** Runtime Error Detection identifier for this voltage sensor.')
temperatureSensorTable = MibTable((1, 3, 6, 1, 4, 1, 3764, 1, 1, 200, 200, 30), )
if mibBuilder.loadTexts: temperatureSensorTable.setStatus('optional')
if mibBuilder.loadTexts: temperatureSensorTable.setDescription('** This table is optional ** Table of the temperature sensors in each component.')
temperatureSensorEntry = MibTableRow((1, 3, 6, 1, 4, 1, 3764, 1, 1, 200, 200, 30, 1), ).setIndexNames((0, "ADIC-INTELLIGENT-STORAGE-MIB", "componentId"), (0, "ADIC-INTELLIGENT-STORAGE-MIB", "temperatureSensorIndex"))
if mibBuilder.loadTexts: temperatureSensorEntry.setStatus('optional')
if mibBuilder.loadTexts: temperatureSensorEntry.setDescription('** This entry object is optional ** Each entry contains the information for a specific sensor.')
temperatureSensorIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 200, 200, 30, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: temperatureSensorIndex.setStatus('optional')
if mibBuilder.loadTexts: temperatureSensorIndex.setDescription('** This object is optional ** Index of this temperatureSensor within the component specified by componentId.')
temperatureSensorName = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 200, 200, 30, 1, 2), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: temperatureSensorName.setStatus('optional')
if mibBuilder.loadTexts: temperatureSensorName.setDescription('** This object is optional ** Display name of this temperatureSensor.')
temperatureSensorStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 200, 200, 30, 1, 3), AdicSensorStatus()).setMaxAccess("readonly")
if mibBuilder.loadTexts: temperatureSensorStatus.setStatus('optional')
if mibBuilder.loadTexts: temperatureSensorStatus.setDescription('** This object is optional ** What is the state of this temperatureSensor? Is the temperature in the nominal, warning or alarm region?')
temperatureSensorDegreesCelsius = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 200, 200, 30, 1, 4), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: temperatureSensorDegreesCelsius.setStatus('optional')
if mibBuilder.loadTexts: temperatureSensorDegreesCelsius.setDescription('** This object is optional ** The temperature in degrees Celsuis for this temperature sensor.')
temperatureSensorNominalLo = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 200, 200, 30, 1, 5), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: temperatureSensorNominalLo.setStatus('optional')
if mibBuilder.loadTexts: temperatureSensorNominalLo.setDescription('** This object is optional ** Lower temperature limit of the nominal state for this temperature sensor. Unit are degrees Celsius.')
temperatureSensorNominalHi = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 200, 200, 30, 1, 6), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: temperatureSensorNominalHi.setStatus('optional')
if mibBuilder.loadTexts: temperatureSensorNominalHi.setDescription('** This object is optional ** Upper temperature limit of the nominal state for this temperature sensor. Unit are degrees Celsius.')
temperatureSensorWarningLo = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 200, 200, 30, 1, 7), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: temperatureSensorWarningLo.setStatus('optional')
if mibBuilder.loadTexts: temperatureSensorWarningLo.setDescription('** This object is optional ** Lower temperature limit of the warning state for this temperature sensor. Unit are degrees Celsius. If the temperature falls below this limit, the sensor enters the alarm state.')
temperatureSensorWarningHi = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 200, 200, 30, 1, 8), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: temperatureSensorWarningHi.setStatus('optional')
if mibBuilder.loadTexts: temperatureSensorWarningHi.setDescription('** This object is optional ** Upper temperature limit of the warning state for this temperature sensor. Unit are degrees Celsius. If the temperature rises above this limit, the sensor enters the alarm state.')
temperatureSensorLocation = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 200, 200, 30, 1, 9), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: temperatureSensorLocation.setStatus('optional')
if mibBuilder.loadTexts: temperatureSensorLocation.setDescription('** This object is optional ** Physical location of this temperature sensor.')
temperatureSensorREDId = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 200, 200, 30, 1, 10), AdicREDIdentifier()).setMaxAccess("readonly")
if mibBuilder.loadTexts: temperatureSensorREDId.setStatus('optional')
if mibBuilder.loadTexts: temperatureSensorREDId.setDescription('** This object is optional ** Runtime Error Detection identifier for this temperature sensor.')
coolingFanTable = MibTable((1, 3, 6, 1, 4, 1, 3764, 1, 1, 200, 200, 40), )
if mibBuilder.loadTexts: coolingFanTable.setStatus('optional')
if mibBuilder.loadTexts: coolingFanTable.setDescription('** This table is optional ** Table of cooling fans in the library.')
coolingFanEntry = MibTableRow((1, 3, 6, 1, 4, 1, 3764, 1, 1, 200, 200, 40, 1), ).setIndexNames((0, "ADIC-INTELLIGENT-STORAGE-MIB", "componentId"), (0, "ADIC-INTELLIGENT-STORAGE-MIB", "coolingFanIndex"))
if mibBuilder.loadTexts: coolingFanEntry.setStatus('optional')
if mibBuilder.loadTexts: coolingFanEntry.setDescription('** This entry object is optional ** Each entry contains the information for a specific cooling fan.')
coolingFanIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 200, 200, 40, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: coolingFanIndex.setStatus('optional')
if mibBuilder.loadTexts: coolingFanIndex.setDescription('** This object is optional ** Index of this cooling fan within the component specified by componentId.')
coolingFanName = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 200, 200, 40, 1, 2), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: coolingFanName.setStatus('optional')
if mibBuilder.loadTexts: coolingFanName.setDescription('** This object is optional ** Display name of this coolingFan.')
coolingFanStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 200, 200, 40, 1, 3), AdicSensorStatus()).setMaxAccess("readonly")
if mibBuilder.loadTexts: coolingFanStatus.setStatus('optional')
if mibBuilder.loadTexts: coolingFanStatus.setDescription('** This object is optional ** Is the fan speed in the nominal, warning or alarm region?')
coolingFanRPM = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 200, 200, 40, 1, 4), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: coolingFanRPM.setStatus('optional')
if mibBuilder.loadTexts: coolingFanRPM.setDescription('** This object is optional ** The fan speed in revolutions per minute.')
coolingFanNominalLo = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 200, 200, 40, 1, 5), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: coolingFanNominalLo.setStatus('optional')
if mibBuilder.loadTexts: coolingFanNominalLo.setDescription('** This object is optional ** Lower fan speed limit of the nominal state for this fan. Units are RPM.')
coolingFanNominalHi = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 200, 200, 40, 1, 6), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: coolingFanNominalHi.setStatus('optional')
if mibBuilder.loadTexts: coolingFanNominalHi.setDescription('** This object is optional ** Upper fan speed limit of the nominal state for this fan. Units are RPM.')
coolingFanWarningLo = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 200, 200, 40, 1, 7), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: coolingFanWarningLo.setStatus('optional')
if mibBuilder.loadTexts: coolingFanWarningLo.setDescription('** This object is optional ** Lower fan speed limit of the warning state for this fan. Units are RPM. If the speed falls below this limit, the fan enters the alarmLow state.')
coolingFanWarningHi = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 200, 200, 40, 1, 8), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: coolingFanWarningHi.setStatus('optional')
if mibBuilder.loadTexts: coolingFanWarningHi.setDescription('** This object is optional ** Upper fan speed limit of the warning state for this fan. Units are RPM. If the speed rises above this limit, the fan enters the alarmHigh state.')
coolingFanLocation = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 200, 200, 40, 1, 9), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: coolingFanLocation.setStatus('optional')
if mibBuilder.loadTexts: coolingFanLocation.setDescription('** This object is optional ** Physical location of this fan.')
coolingFanREDId = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 200, 200, 40, 1, 10), AdicREDIdentifier()).setMaxAccess("readonly")
if mibBuilder.loadTexts: coolingFanREDId.setStatus('optional')
if mibBuilder.loadTexts: coolingFanREDId.setDescription('** This object is optional ** Runtime Error Detection identifier for this fan.')
trapPayloadTable = MibTable((1, 3, 6, 1, 4, 1, 3764, 1, 1, 500, 10), )
if mibBuilder.loadTexts: trapPayloadTable.setStatus('mandatory')
if mibBuilder.loadTexts: trapPayloadTable.setDescription('Defines objects common to all trap payloads.')
trapPayloadEntry = MibTableRow((1, 3, 6, 1, 4, 1, 3764, 1, 1, 500, 10, 1), ).setIndexNames((0, "ADIC-INTELLIGENT-STORAGE-MIB", "trapSequenceNumber"))
if mibBuilder.loadTexts: trapPayloadEntry.setStatus('mandatory')
if mibBuilder.loadTexts: trapPayloadEntry.setDescription('Each entry contains the information for a specific cooling fan.')
trapSequenceNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 500, 10, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: trapSequenceNumber.setStatus('mandatory')
if mibBuilder.loadTexts: trapSequenceNumber.setDescription('')
trapSeverity = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 500, 10, 1, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: trapSeverity.setStatus('mandatory')
if mibBuilder.loadTexts: trapSeverity.setDescription('')
trapSummaryText = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 500, 10, 1, 3), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: trapSummaryText.setStatus('mandatory')
if mibBuilder.loadTexts: trapSummaryText.setDescription('')
trapIntendedUsage = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 500, 10, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("public", 1), ("triggerRefresh", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: trapIntendedUsage.setStatus('mandatory')
if mibBuilder.loadTexts: trapIntendedUsage.setDescription("The value of this qualifier aids the management application in determining how to respond to the trap. If the value is public(1), the information is intended to be propagated to external observers, such as sending email. If the value is triggerRefresh(2), the information is intended to update the management application's data model, but not necessarily propagated to external observers.")
startupSequenceComplete = NotificationType((1, 3, 6, 1, 4, 1, 3764, 1, 1) + (0,500)).setObjects(("ADIC-INTELLIGENT-STORAGE-MIB", "componentId"), ("ADIC-INTELLIGENT-STORAGE-MIB", "trapSummaryText"))
if mibBuilder.loadTexts: startupSequenceComplete.setDescription('The component indicated by the value of componentId has successfully completed its startup sequence.')
shutdownSequenceInitiated = NotificationType((1, 3, 6, 1, 4, 1, 3764, 1, 1) + (0,501)).setObjects(("ADIC-INTELLIGENT-STORAGE-MIB", "componentId"), ("ADIC-INTELLIGENT-STORAGE-MIB", "trapSummaryText"))
if mibBuilder.loadTexts: shutdownSequenceInitiated.setDescription('The component indicated by the value of componentId has initiated its shutdown sequence.')
componentAdded = NotificationType((1, 3, 6, 1, 4, 1, 3764, 1, 1) + (0,502)).setObjects(("ADIC-INTELLIGENT-STORAGE-MIB", "componentId"), ("ADIC-INTELLIGENT-STORAGE-MIB", "componentType"))
if mibBuilder.loadTexts: componentAdded.setDescription('The component indicated by the value of componentId has been added to the library.')
componentRemoved = NotificationType((1, 3, 6, 1, 4, 1, 3764, 1, 1) + (0,503)).setObjects(("ADIC-INTELLIGENT-STORAGE-MIB", "componentId"), ("ADIC-INTELLIGENT-STORAGE-MIB", "componentType"))
if mibBuilder.loadTexts: componentRemoved.setDescription('The component indicated by the value of componentId has been removed from the library.')
productLibraryClassChange = NotificationType((1, 3, 6, 1, 4, 1, 3764, 1, 1) + (0,504)).setObjects(("ADIC-INTELLIGENT-STORAGE-MIB", "productLibraryClass"), ("ADIC-INTELLIGENT-STORAGE-MIB", "productLibraryClass"))
if mibBuilder.loadTexts: productLibraryClassChange.setDescription('The product library class has changed. This occurs when connectivity hardware is added or removed. The payload contains the productLibraryClass before and after the change.')
mibBuilder.exportSymbols("ADIC-INTELLIGENT-STORAGE-MIB", powerSupplyTable=powerSupplyTable, powerSupplyEntry=powerSupplyEntry, sml=sml, powerSupplyREDId=powerSupplyREDId, temperatureSensorEntry=temperatureSensorEntry, componentLocation=componentLocation, voltageSensorNominalLo=voltageSensorNominalLo, temperatureSensorWarningHi=temperatureSensorWarningHi, intelligent=intelligent, RowStatus=RowStatus, AdicVoltageType=AdicVoltageType, software=software, agentModifiers=agentModifiers, shutdownSequenceInitiated=shutdownSequenceInitiated, coolingFanName=coolingFanName, voltageSensorTable=voltageSensorTable, trapSequenceNumber=trapSequenceNumber, trapIntendedUsage=trapIntendedUsage, componentIpAddress=componentIpAddress, globalData=globalData, temperatureSensorNominalHi=temperatureSensorNominalHi, productName=productName, powerSupplyRatedVoltage=powerSupplyRatedVoltage, AdicAgentStatus=AdicAgentStatus, voltageSensorWarningLo=voltageSensorWarningLo, agentGetTimeOut=agentGetTimeOut, coolingFanLocation=coolingFanLocation, AdicGlobalId=AdicGlobalId, voltageSensorStatus=voltageSensorStatus, AdicMibVersion=AdicMibVersion, powerSupplyLocation=powerSupplyLocation, productLibraryClassChange=productLibraryClassChange, AdicTrapSeverity=AdicTrapSeverity, storage=storage, componentEntry=componentEntry, coolingFanIndex=coolingFanIndex, temperatureSensorDegreesCelsius=temperatureSensorDegreesCelsius, voltageSensorLocation=voltageSensorLocation, agentRefreshRate=agentRefreshRate, coolingFanNominalHi=coolingFanNominalHi, AdicInterfaceType=AdicInterfaceType, componentId=componentId, temperatureSensorIndex=temperatureSensorIndex, coolingFanStatus=coolingFanStatus, AdicDriveStatus=AdicDriveStatus, coolingFanREDId=coolingFanREDId, trapPayloadEntry=trapPayloadEntry, agentTimeStamp=agentTimeStamp, componentREDId=componentREDId, powerAndCooling=powerAndCooling, voltageSensorEntry=voltageSensorEntry, coolingFanWarningHi=coolingFanWarningHi, AdicDateAndTime=AdicDateAndTime, componentGeoAddrBlade=componentGeoAddrBlade, notification=notification, productDisplayVersion=productDisplayVersion, componentControl=componentControl, AdicDoorStatus=AdicDoorStatus, componentGeoAddrChassis=componentGeoAddrChassis, productSnmpAgentVersion=productSnmpAgentVersion, components=components, agentLastGlobalStatus=agentLastGlobalStatus, temperatureSensorNominalLo=temperatureSensorNominalLo, voltageSensorType=voltageSensorType, componentGeoAddrAisle=componentGeoAddrAisle, network=network, componentDisplayName=componentDisplayName, temperatureSensorTable=temperatureSensorTable, powerSupplyType=powerSupplyType, temperatureSensorStatus=temperatureSensorStatus, AdicREDIdentifier=AdicREDIdentifier, voltageSensorIndex=voltageSensorIndex, componentTable=componentTable, componentStatus=componentStatus, powerSupplyIndex=powerSupplyIndex, AdicSensorStatus=AdicSensorStatus, agentGlobalStatus=agentGlobalStatus, componentVendor=componentVendor, AdicComponentType=AdicComponentType, componentFirmwareVersion=componentFirmwareVersion, coolingFanNominalLo=coolingFanNominalLo, coolingFanTable=coolingFanTable, temperatureSensorREDId=temperatureSensorREDId, coolingFanWarningLo=coolingFanWarningLo, powerSupplyName=powerSupplyName, hardware=hardware, voltageSensorName=voltageSensorName, productAgentInfo=productAgentInfo, Boolean=Boolean, voltageSensorNominalHi=voltageSensorNominalHi, temperatureSensorName=temperatureSensorName, componentSn=componentSn, powerSupplyWattage=powerSupplyWattage, voltageSensorMillivolts=voltageSensorMillivolts, voltageSensorWarningHi=voltageSensorWarningHi, startupSequenceComplete=startupSequenceComplete, productDisplayName=productDisplayName, productLibraryClass=productLibraryClass, componentGeoAddrRack=componentGeoAddrRack, productSerialNumber=productSerialNumber, adic=adic, coolingFanEntry=coolingFanEntry, AdicEnable=AdicEnable, temperatureSensorWarningLo=temperatureSensorWarningLo, componentType=componentType, componentAdded=componentAdded, productVendor=productVendor, componentRemoved=componentRemoved, productVersion=productVersion, voltageSensorREDId=voltageSensorREDId, productMibVersion=productMibVersion, componentGeoAddrFrame=componentGeoAddrFrame, temperatureSensorLocation=temperatureSensorLocation, trapPayloadTable=trapPayloadTable, trapSummaryText=trapSummaryText, AdicOnlineStatus=AdicOnlineStatus, trapSeverity=trapSeverity, componentInfo=componentInfo, coolingFanRPM=coolingFanRPM, productDescription=productDescription)
| true
| true
|
790abf5c731926a9cb18395576cca3e79e472422
| 1,001
|
py
|
Python
|
06-sistemaLinear/sistemaLinear_v11/teste/testeCores/cores2.py
|
jonasht/python
|
68cf6bd7e27d71a104917d402bbd443afb82810d
|
[
"MIT"
] | null | null | null |
06-sistemaLinear/sistemaLinear_v11/teste/testeCores/cores2.py
|
jonasht/python
|
68cf6bd7e27d71a104917d402bbd443afb82810d
|
[
"MIT"
] | null | null | null |
06-sistemaLinear/sistemaLinear_v11/teste/testeCores/cores2.py
|
jonasht/python
|
68cf6bd7e27d71a104917d402bbd443afb82810d
|
[
"MIT"
] | null | null | null |
import imp
from tkinter import *
from sys import exit
from teste.testeCores.corFunc import formatar
conta2x2 = 'x2y=5\n3x-5y=4'
root = Tk()
text = Text(root, width=20, height=10)
text.config(font='arial 20 bold')
text.insert(END, conta2x2)
text.pack()
def q_evento(event):
exit()
root.bind('q', q_evento)
cs = conta2x2.split('\n')
print('cs', cs)
posicao = cs[0].find('y')
print('posicao:', posicao)
p1 = p2 = '1.'
p1 += str(posicao)
p2 += str(posicao+1)
print('p1:', p1, 'p2:', p2)
conta = conta2x2.split('\n')
formatado = list()
text.config(background='black', foreground='white')
for i, c in enumerate(conta):
formatado.append(formatar(i, c))
fs = formatado[0][0]
print(fs)
print(fs['p1'])
for f1 in formatado:
for f in f1:
text.tag_add(f['nome'], f['p1'], f['p2'])
text.tag_config(f['nome'], foreground=f['fg'])
# text.tag_add("y1", p1, p2)
# text.tag_config("y1", background="black", foreground="green")
text.tag_config('1', foreground="green")
root.mainloop()
| 21.76087
| 63
| 0.656344
|
import imp
from tkinter import *
from sys import exit
from teste.testeCores.corFunc import formatar
conta2x2 = 'x2y=5\n3x-5y=4'
root = Tk()
text = Text(root, width=20, height=10)
text.config(font='arial 20 bold')
text.insert(END, conta2x2)
text.pack()
def q_evento(event):
exit()
root.bind('q', q_evento)
cs = conta2x2.split('\n')
print('cs', cs)
posicao = cs[0].find('y')
print('posicao:', posicao)
p1 = p2 = '1.'
p1 += str(posicao)
p2 += str(posicao+1)
print('p1:', p1, 'p2:', p2)
conta = conta2x2.split('\n')
formatado = list()
text.config(background='black', foreground='white')
for i, c in enumerate(conta):
formatado.append(formatar(i, c))
fs = formatado[0][0]
print(fs)
print(fs['p1'])
for f1 in formatado:
for f in f1:
text.tag_add(f['nome'], f['p1'], f['p2'])
text.tag_config(f['nome'], foreground=f['fg'])
text.tag_config('1', foreground="green")
root.mainloop()
| true
| true
|
790abf682a8b8ee8b95980765c04ff7227a1992d
| 7,173
|
py
|
Python
|
test/expected/python.asyncio/service_extension_same_file/f_Pinger.py
|
trevorackerman-wk/frugal
|
960f87b581d21a69dc889bcfb526ea3929acac22
|
[
"Apache-2.0"
] | null | null | null |
test/expected/python.asyncio/service_extension_same_file/f_Pinger.py
|
trevorackerman-wk/frugal
|
960f87b581d21a69dc889bcfb526ea3929acac22
|
[
"Apache-2.0"
] | null | null | null |
test/expected/python.asyncio/service_extension_same_file/f_Pinger.py
|
trevorackerman-wk/frugal
|
960f87b581d21a69dc889bcfb526ea3929acac22
|
[
"Apache-2.0"
] | null | null | null |
#
# Autogenerated by Frugal Compiler (3.4.2)
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
#
import asyncio
from datetime import timedelta
import inspect
from frugal.aio.processor import FBaseProcessor
from frugal.aio.processor import FProcessorFunction
from frugal.exceptions import TApplicationExceptionType
from frugal.exceptions import TTransportExceptionType
from frugal.middleware import Method
from frugal.transport import TMemoryOutputBuffer
from frugal.util.deprecate import deprecated
from thrift.Thrift import TApplicationException
from thrift.Thrift import TMessageType
from thrift.transport.TTransport import TTransportException
from . import f_BasePinger
from .ttypes import *
class Iface(f_BasePinger.Iface):
async def ping(self, ctx):
"""
Args:
ctx: FContext
"""
pass
class Client(f_BasePinger.Client, Iface):
def __init__(self, provider, middleware=None):
"""
Create a new Client with an FServiceProvider containing a transport
and protocol factory.
Args:
provider: FServiceProvider
middleware: ServiceMiddleware or list of ServiceMiddleware
"""
middleware = middleware or []
if middleware and not isinstance(middleware, list):
middleware = [middleware]
super(Client, self).__init__(provider, middleware=middleware)
middleware += provider.get_middleware()
self._methods.update({
'ping': Method(self._ping, middleware),
})
async def ping(self, ctx):
"""
Args:
ctx: FContext
"""
return await self._methods['ping']([ctx])
async def _ping(self, ctx):
memory_buffer = TMemoryOutputBuffer(self._transport.get_request_size_limit())
oprot = self._protocol_factory.get_protocol(memory_buffer)
oprot.write_request_headers(ctx)
oprot.writeMessageBegin('ping', TMessageType.CALL, 0)
args = ping_args()
args.write(oprot)
oprot.writeMessageEnd()
response_transport = await self._transport.request(ctx, memory_buffer.getvalue())
iprot = self._protocol_factory.get_protocol(response_transport)
iprot.read_response_headers(ctx)
_, mtype, _ = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
if x.type == TApplicationExceptionType.RESPONSE_TOO_LARGE:
raise TTransportException(type=TTransportExceptionType.RESPONSE_TOO_LARGE, message=x.message)
raise x
result = ping_result()
result.read(iprot)
iprot.readMessageEnd()
class Processor(f_BasePinger.Processor):
def __init__(self, handler, middleware=None):
"""
Create a new Processor.
Args:
handler: Iface
"""
if middleware and not isinstance(middleware, list):
middleware = [middleware]
super(Processor, self).__init__(handler, middleware=middleware)
self.add_to_processor_map('ping', _ping(Method(handler.ping, middleware), self.get_write_lock()))
class _ping(FProcessorFunction):
def __init__(self, handler, lock):
super(_ping, self).__init__(handler, lock)
async def process(self, ctx, iprot, oprot):
args = ping_args()
args.read(iprot)
iprot.readMessageEnd()
result = ping_result()
try:
ret = self._handler([ctx])
if inspect.iscoroutine(ret):
ret = await ret
except TApplicationException as ex:
async with self._lock:
_write_application_exception(ctx, oprot, "ping", exception=ex)
return
except Exception as e:
async with self._lock:
_write_application_exception(ctx, oprot, "ping", ex_code=TApplicationExceptionType.INTERNAL_ERROR, message=str(e))
raise
async with self._lock:
try:
oprot.write_response_headers(ctx)
oprot.writeMessageBegin('ping', TMessageType.REPLY, 0)
result.write(oprot)
oprot.writeMessageEnd()
oprot.get_transport().flush()
except TTransportException as e:
# catch a request too large error because the TMemoryOutputBuffer always throws that if too much data is written
if e.type == TTransportExceptionType.REQUEST_TOO_LARGE:
raise _write_application_exception(ctx, oprot, "ping", ex_code=TApplicationExceptionType.RESPONSE_TOO_LARGE, message=e.message)
else:
raise e
def _write_application_exception(ctx, oprot, method, ex_code=None, message=None, exception=None):
if exception is not None:
x = exception
else:
x = TApplicationException(type=ex_code, message=message)
oprot.write_response_headers(ctx)
oprot.writeMessageBegin(method, TMessageType.EXCEPTION, 0)
x.write(oprot)
oprot.writeMessageEnd()
oprot.get_transport().flush()
return x
class ping_args(object):
def read(self, iprot):
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
self.validate()
def write(self, oprot):
self.validate()
oprot.writeStructBegin('ping_args')
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class ping_result(object):
def read(self, iprot):
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
self.validate()
def write(self, oprot):
self.validate()
oprot.writeStructBegin('ping_result')
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
| 31.599119
| 147
| 0.626516
|
import asyncio
from datetime import timedelta
import inspect
from frugal.aio.processor import FBaseProcessor
from frugal.aio.processor import FProcessorFunction
from frugal.exceptions import TApplicationExceptionType
from frugal.exceptions import TTransportExceptionType
from frugal.middleware import Method
from frugal.transport import TMemoryOutputBuffer
from frugal.util.deprecate import deprecated
from thrift.Thrift import TApplicationException
from thrift.Thrift import TMessageType
from thrift.transport.TTransport import TTransportException
from . import f_BasePinger
from .ttypes import *
class Iface(f_BasePinger.Iface):
async def ping(self, ctx):
pass
class Client(f_BasePinger.Client, Iface):
def __init__(self, provider, middleware=None):
middleware = middleware or []
if middleware and not isinstance(middleware, list):
middleware = [middleware]
super(Client, self).__init__(provider, middleware=middleware)
middleware += provider.get_middleware()
self._methods.update({
'ping': Method(self._ping, middleware),
})
async def ping(self, ctx):
return await self._methods['ping']([ctx])
async def _ping(self, ctx):
memory_buffer = TMemoryOutputBuffer(self._transport.get_request_size_limit())
oprot = self._protocol_factory.get_protocol(memory_buffer)
oprot.write_request_headers(ctx)
oprot.writeMessageBegin('ping', TMessageType.CALL, 0)
args = ping_args()
args.write(oprot)
oprot.writeMessageEnd()
response_transport = await self._transport.request(ctx, memory_buffer.getvalue())
iprot = self._protocol_factory.get_protocol(response_transport)
iprot.read_response_headers(ctx)
_, mtype, _ = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
if x.type == TApplicationExceptionType.RESPONSE_TOO_LARGE:
raise TTransportException(type=TTransportExceptionType.RESPONSE_TOO_LARGE, message=x.message)
raise x
result = ping_result()
result.read(iprot)
iprot.readMessageEnd()
class Processor(f_BasePinger.Processor):
def __init__(self, handler, middleware=None):
if middleware and not isinstance(middleware, list):
middleware = [middleware]
super(Processor, self).__init__(handler, middleware=middleware)
self.add_to_processor_map('ping', _ping(Method(handler.ping, middleware), self.get_write_lock()))
class _ping(FProcessorFunction):
def __init__(self, handler, lock):
super(_ping, self).__init__(handler, lock)
async def process(self, ctx, iprot, oprot):
args = ping_args()
args.read(iprot)
iprot.readMessageEnd()
result = ping_result()
try:
ret = self._handler([ctx])
if inspect.iscoroutine(ret):
ret = await ret
except TApplicationException as ex:
async with self._lock:
_write_application_exception(ctx, oprot, "ping", exception=ex)
return
except Exception as e:
async with self._lock:
_write_application_exception(ctx, oprot, "ping", ex_code=TApplicationExceptionType.INTERNAL_ERROR, message=str(e))
raise
async with self._lock:
try:
oprot.write_response_headers(ctx)
oprot.writeMessageBegin('ping', TMessageType.REPLY, 0)
result.write(oprot)
oprot.writeMessageEnd()
oprot.get_transport().flush()
except TTransportException as e:
if e.type == TTransportExceptionType.REQUEST_TOO_LARGE:
raise _write_application_exception(ctx, oprot, "ping", ex_code=TApplicationExceptionType.RESPONSE_TOO_LARGE, message=e.message)
else:
raise e
def _write_application_exception(ctx, oprot, method, ex_code=None, message=None, exception=None):
if exception is not None:
x = exception
else:
x = TApplicationException(type=ex_code, message=message)
oprot.write_response_headers(ctx)
oprot.writeMessageBegin(method, TMessageType.EXCEPTION, 0)
x.write(oprot)
oprot.writeMessageEnd()
oprot.get_transport().flush()
return x
class ping_args(object):
def read(self, iprot):
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
self.validate()
def write(self, oprot):
self.validate()
oprot.writeStructBegin('ping_args')
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class ping_result(object):
def read(self, iprot):
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
self.validate()
def write(self, oprot):
self.validate()
oprot.writeStructBegin('ping_result')
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
| true
| true
|
790ac1baf35d3683f9b9d8baab2ab05ad107a599
| 542
|
py
|
Python
|
app.py
|
risatino/seadogz
|
182bee6944726477fdae195cf2446a6fedbb9c5b
|
[
"MIT"
] | null | null | null |
app.py
|
risatino/seadogz
|
182bee6944726477fdae195cf2446a6fedbb9c5b
|
[
"MIT"
] | 7
|
2018-07-03T00:33:42.000Z
|
2018-07-24T20:41:49.000Z
|
app.py
|
risatino/seadogz
|
182bee6944726477fdae195cf2446a6fedbb9c5b
|
[
"MIT"
] | null | null | null |
#####################
# IMPORT DEPENDENCIES
######################
# flask (server)
from flask import(
Flask,
render_template,
jsonify,
request,
redirect)
#######################
# FLASK SET-UP
#######################
app = Flask(__name__)
#######################
# FLASK ROUTES
#######################
@app.route("/")
def index():
return render_template("index.html")
# @app.route("/outcomes")
# def charts():
# return render_template("outcomes.html")
if __name__ == "__main__":
app.run(debug = True)
| 17.483871
| 45
| 0.479705
| true
| true
|
|
790ac599f474e066c512043357817061f888ef0b
| 2,411
|
py
|
Python
|
plugins/CropPermutations/__init__.py
|
spongezhang/maskgen
|
7284e300d1cb326a5349879de0bace9cfa8788a8
|
[
"BSD-3-Clause"
] | null | null | null |
plugins/CropPermutations/__init__.py
|
spongezhang/maskgen
|
7284e300d1cb326a5349879de0bace9cfa8788a8
|
[
"BSD-3-Clause"
] | null | null | null |
plugins/CropPermutations/__init__.py
|
spongezhang/maskgen
|
7284e300d1cb326a5349879de0bace9cfa8788a8
|
[
"BSD-3-Clause"
] | null | null | null |
from PIL import Image
import numpy
from random import randint
"""
A plugin used to create a set of variable spefications for permutation groups.
"""
def transform(img,source,target,**kwargs):
cv_image = numpy.array(img)
shape = cv_image.shape
snapto8 = 'eightbit_boundary' in kwargs and kwargs['eightbit_boundary'] == 'yes'
percentageWidth = float(kwargs['percentage_width'])
percentageHeight = float(kwargs['percentage_height'])
divisionsWidth = float(kwargs['divisions_width'] if 'divisions_width' in kwargs else shape[1])
divisionsHeight = float(kwargs['divisions_height'] if 'divisions_height' in kwargs else shape[0])
pixelWidth = int(shape[1] * percentageWidth)
pixelHeight = int(shape[0] * percentageHeight)
if snapto8:
pixelWidth = pixelWidth - pixelWidth % 8
pixelHeight = pixelHeight - pixelHeight % 8
incrementsWidth = max(8,int(pixelWidth/divisionsWidth))
incrementsHeight = max(8,int(pixelHeight/divisionsHeight))
crop_x = { "type": "list", "values" : [i for i in xrange(incrementsWidth,pixelWidth,incrementsWidth)]}
crop_y = { "type": "list", "values" : [i for i in xrange(incrementsHeight, pixelHeight, incrementsHeight)]}
return {'crop_x':crop_x,'crop_y':crop_y, 'crop_width':pixelWidth,'crop_height':pixelHeight},None
def operation():
return {
'category': 'Select',
'name': 'SelectRegion',
'description':'Select a region to crop',
'software':'OpenCV',
'version':cv2.__version__,
'arguments':{'percentage_width':
{'type': "float[0:0.5]", 'description':'the percentage of pixels to remove horizontal'},
'percentage_height':
{'type': "float[0:0.5]", 'description':'the percentage of pixels to remove vertically'},
'divisions_width':
{'type': "int[0:100000]", 'description': 'the number samples in the x direction'},
'divisions_height':
{'type': "int[0:100000]", 'description': 'the number of samples in the y direction'},
'eightbit_boundary':
{'type': "yesno", 'defaultvalue':'no', 'description':'Snap to 8 bit boundary'}
},
'transitions': [
'image.image'
]
}
| 48.22
| 115
| 0.608461
|
from PIL import Image
import numpy
from random import randint
def transform(img,source,target,**kwargs):
cv_image = numpy.array(img)
shape = cv_image.shape
snapto8 = 'eightbit_boundary' in kwargs and kwargs['eightbit_boundary'] == 'yes'
percentageWidth = float(kwargs['percentage_width'])
percentageHeight = float(kwargs['percentage_height'])
divisionsWidth = float(kwargs['divisions_width'] if 'divisions_width' in kwargs else shape[1])
divisionsHeight = float(kwargs['divisions_height'] if 'divisions_height' in kwargs else shape[0])
pixelWidth = int(shape[1] * percentageWidth)
pixelHeight = int(shape[0] * percentageHeight)
if snapto8:
pixelWidth = pixelWidth - pixelWidth % 8
pixelHeight = pixelHeight - pixelHeight % 8
incrementsWidth = max(8,int(pixelWidth/divisionsWidth))
incrementsHeight = max(8,int(pixelHeight/divisionsHeight))
crop_x = { "type": "list", "values" : [i for i in xrange(incrementsWidth,pixelWidth,incrementsWidth)]}
crop_y = { "type": "list", "values" : [i for i in xrange(incrementsHeight, pixelHeight, incrementsHeight)]}
return {'crop_x':crop_x,'crop_y':crop_y, 'crop_width':pixelWidth,'crop_height':pixelHeight},None
def operation():
return {
'category': 'Select',
'name': 'SelectRegion',
'description':'Select a region to crop',
'software':'OpenCV',
'version':cv2.__version__,
'arguments':{'percentage_width':
{'type': "float[0:0.5]", 'description':'the percentage of pixels to remove horizontal'},
'percentage_height':
{'type': "float[0:0.5]", 'description':'the percentage of pixels to remove vertically'},
'divisions_width':
{'type': "int[0:100000]", 'description': 'the number samples in the x direction'},
'divisions_height':
{'type': "int[0:100000]", 'description': 'the number of samples in the y direction'},
'eightbit_boundary':
{'type': "yesno", 'defaultvalue':'no', 'description':'Snap to 8 bit boundary'}
},
'transitions': [
'image.image'
]
}
| true
| true
|
790ac7e4c90ececf45e05b807d65b4c67a3baee7
| 29,340
|
py
|
Python
|
beta_nmf_minibatch.py
|
dasmy/minibatchNMF
|
f7f910e290103c26e2925426849f8bfbe75ba242
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null |
beta_nmf_minibatch.py
|
dasmy/minibatchNMF
|
f7f910e290103c26e2925426849f8bfbe75ba242
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null |
beta_nmf_minibatch.py
|
dasmy/minibatchNMF
|
f7f910e290103c26e2925426849f8bfbe75ba242
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
beta\_nmf_minibatch.py
~~~~~~~~~~~
.. topic:: Contents
The beta_nmf_minibatch module includes the betaNMF class,
fit function and theano functions to compute updates and cost.
Copyright 2014-2016 Romain Serizel
This software is distributed under the terms of the GNU Public License
version 3 (http://www.gnu.org/licenses/gpl.txt)"""
import time
import numpy as np
import theano
import base
import theano.tensor as T
import updates
import costs
class BetaNMF(object):
"""BetaNMF class
Performs nonnegative matrix factorization with mini-batch multiplicative
updates. GPGPU implementation based on Theano.
Parameters
----------
data_shape : tuple composed of integers
The shape of the data to approximate
n_components : positive integer
The number of latent components for the NMF model
beta: arbitrary float (default 2).
The beta-divergence to consider. Particular cases of interest are
* beta=2 : Euclidean distance
* beta=1 : Kullback Leibler
* beta=0 : Itakura-Saito
n_iter: positive integer
number of iterations
fixed_factors: array of intergers
Indexes of the factors that are kept fixed during the updates
* [0] : corresponds to fixed H
* [1] : corresponds to fixed W
cache1_size: integer
Size (in frames) of the first data cache.
The size is reduced to the closest multiple of the batch_size.
If set to zero the algorithm tries to fit all the data in cache
batch_size: integer
Size (in frames) of the batch for batch processing.
The batch size has an impact on the parrelization and the memory needed
to store partial gradients (see Schmidt et al.)
verbose: integer
The numer of iterations to wait between two computation and printing
of the cost
init_mode : string (default 'random')
* random : initalise the factors randomly
* custom : intialise the factors with custom value
W : array (optionnal)
Initial wvalue for factor W when custom initialisation is used
H : array (optionnal)
Initial wvalue for factor H when custom initialisation is used
solver : string (default 'mu_batch')
* mu_batch : mini-batch version of the MU updates.
(fully equivalent to standard NMF with MU).
* asg_mu : Asymetric stochatistic gradient for MU [1]_
* gsg_mu : Greedy stochatistic gradient for MU [1]_
* asag_mu : Asymetric stochatistic average gradient [2]_ for MU [1]_
* gsag_mu : Greedy stochatistic average gradient [2]_ for MU [1]_
nb_batch_w : interger (default 1)
number of batches on which W updates is computed
* 1 : greedy approaches [1]_
sag_memory : integer (default 0)
number of batches used to compute the average gradient
* 0 : SG approaches
* nb_batches : SAG approaches
Attributes
----------
nb_cache1 : integer
number of caches needed to fill the full data
forget_factor : float
forgetting factor for SAG
scores : array
reconstruction cost and iteration time for each iteration
factors\_ : list of arrays
The estimated factors
w : theano tensor
factor W
h_cache1 : theano tensor
part of the factor H in cache1
x_cache1 : theano tensor
data cache
References
----------
.. [#] R. Serizel, S. Essid, and G. Richard. “Mini-batch stochastic
approaches for accelerated multiplicative updates in nonnegative matrix
factorisation with beta-divergence”. Accepted for publication
In *Proc. of MLSP*, p. 5, 2016.
.. [#] Schmidt, M., Roux, N. L., & Bach, F. (2013).
Minimizing finite sums with the stochastic average gradient
https://hal.inria.fr/hal-00860051/PDF/sag_journal.pdf
"""
# Constructor
def __init__(self, data_shape, n_components=50, beta=2, n_iter=50,
fixed_factors=None, cache1_size=0,
batch_size=100, verbose=0,
init_mode='random', W=None, H=None, solver='mu_batch',
nb_batch_w=1, sag_memory=0):
self.data_shape = data_shape
self.n_components = n_components
self.batch_size = batch_size
self.nb_batch = int(np.ceil(np.true_divide(data_shape[0],
self.batch_size)))
self.batch_ind = np.zeros((self.nb_batch, self.batch_size))
if cache1_size > 0:
cache1_size = min((cache1_size, data_shape[0]))
if cache1_size < self.batch_size:
raise ValueError('cache1_size should be at '
'least equal to batch_size')
self.cache1_size = int(np.ceil(cache1_size/self.batch_size * self.batch_size))
self.nb_cache1 = int(np.ceil(np.true_divide(self.data_shape[0],
self.cache1_size)))
else:
self.cache1_size = data_shape[0]
self.nb_cache1 = 1
self.n_components = np.asarray(n_components, dtype='int32')
self.beta = theano.shared(np.asarray(beta, theano.config.floatX),
name="beta")
self.eps = theano.shared(np.asarray(1e-10, theano.config.floatX),
name="eps")
self.sag_memory = sag_memory
self.forget_factor = 1./(self.sag_memory + 1)
self.verbose = verbose
self.n_iter = n_iter
self.solver = solver
self.scores = []
self.nb_batch_w = nb_batch_w
if fixed_factors is None:
fixed_factors = []
self.fixed_factors = fixed_factors
fact_ = [base.nnrandn((dim, self.n_components)) for dim in data_shape]
self.init_mode = init_mode
if self.init_mode == 'custom':
fact_[0] = H
fact_[1] = W
self.w = theano.shared(fact_[1].astype(theano.config.floatX),
name="W", borrow=True, allow_downcast=True)
self.h_cache1 = theano.shared(fact_[0][:self.cache1_size,
].astype(theano.config.floatX),
name="H cache1", borrow=True,
allow_downcast=True)
self.factors_ = fact_
self.x_cache1 = theano.shared(np.zeros((self.cache1_size,
data_shape[1])).astype(
theano.config.floatX),
name="X cache1")
self.init()
def check_shape(self):
"""Check that all the matrix have consistent shapes
"""
batch_shape = self.x_cache1.get_value().shape
dim = int(self.n_components)
if self.w.get_value().shape != (self.data_shape[1], dim):
print("Inconsistent data for W, expected {1}, found {0}".format(
self.w.get_value().shape,
(self.data_shape[1], dim)))
raise SystemExit
if self.factors_[0].shape != (self.data_shape[0], dim):
print("Inconsistent shape for H, expected {1}, found {0}".format(
self.factors_[0].shape,
(self.data_shape[0], dim)))
raise SystemExit
if self.h_cache1.get_value().shape != (batch_shape[0], dim):
print("Inconsistent shape for h_cache1, expected {1}, found {0}".format(
self.h_cache1.get_value().shape,
(batch_shape[0], dim)))
raise SystemExit
def fit(self, data, cyclic=False, warm_start=False):
"""Learns NMF model
Parameters
----------
data : ndarray with nonnegative entries
The input array
cyclic : Boolean (default False)
pick the sample cyclically
warm_start : Boolean (default False)
start from previous values
"""
self.data_shape = data.shape
if (not warm_start) & (self.init_mode != 'custom'):
print("cold start")
self.set_factors(data, fixed_factors=self.fixed_factors)
self.check_shape()
self.prepare_batch(False)
self.prepare_cache1(False)
div_func = self.get_div_function()
if self.verbose > 0:
scores = np.zeros((
int(np.floor(self.n_iter/self.verbose)) + 2, 2))
else:
scores = np.zeros((2, 2))
if self.solver == 'asag_mu' or self.solver == 'gsag_mu':
grad_func = self.get_gradient_mu_sag()
update_func = self.get_updates()
elif self.solver == 'asg_mu' or self.solver == 'gsg_mu':
grad_func = self.get_gradient_mu_sg()
update_func = self.get_updates()
elif self.solver == 'mu_batch':
grad_func = self.get_gradient_mu_batch()
update_func = self.get_updates()
tick = time.time()
score = 0
for cache_ind in range(self.nb_cache1):
current_cache_ind = np.hstack(self.batch_ind[
self.cache1_ind[
cache_ind, self.cache1_ind[cache_ind] >= 0]])
current_cache_ind = current_cache_ind[current_cache_ind >= 0]
self.x_cache1.set_value(data[current_cache_ind, ].astype(
theano.config.floatX))
self.h_cache1.set_value(self.factors_[0][
current_cache_ind, ].astype(theano.config.floatX))
score += div_func['div_cache1']()
score_ind = 0
scores[0, ] = [score, time.time() - tick]
self.prepare_batch(not cyclic)
self.prepare_cache1(not cyclic)
print('Intitial score = %.2f' % score)
print('Fitting NMF model with %d iterations....' % self.n_iter)
if self.nb_cache1 == 1:
current_cache_ind = np.hstack(self.batch_ind[
self.cache1_ind[
0, self.cache1_ind[0] >= 0]])
current_cache_ind = current_cache_ind[current_cache_ind >= 0]
self.x_cache1.set_value(data[current_cache_ind, ].astype(
theano.config.floatX))
self.h_cache1.set_value(self.factors_[0][
current_cache_ind, ].astype(theano.config.floatX))
if self.solver == 'sag':
self.c1_grad_w.set_value(self.old_grad_w[self.cache1_ind[
0, self.cache1_ind[0] >= 0]].astype(
theano.config.floatX))
# main loop
for it in range(self.n_iter):
tick = time.time()
self.prepare_cache1(not cyclic)
score = 0
for cache_ind in range(self.nb_cache1):
if self.nb_cache1 > 1:
current_cache_ind = np.hstack(self.batch_ind[
self.cache1_ind[
cache_ind, self.cache1_ind[cache_ind] >= 0]])
current_cache_ind = current_cache_ind[
current_cache_ind >= 0]
self.x_cache1.set_value(data[current_cache_ind, ].astype(
theano.config.floatX))
self.h_cache1.set_value(self.factors_[0][
current_cache_ind, ].astype(theano.config.floatX))
if self.solver == 'sag':
self.c1_grad_w.set_value(
self.old_grad_w[
self.cache1_ind[
cache_ind,
self.cache1_ind[cache_ind] >= 0]].astype(
theano.config.floatX))
for batch_i in range(self.cache1_ind[
cache_ind, self.cache1_ind[cache_ind] >= 0].shape[0]):
batch_ind = np.arange(batch_i * self.batch_size,
(batch_i + 1) * self.batch_size)
batch_ind = batch_ind[
batch_ind < current_cache_ind.shape[0]]
batch_ind = np.asarray([batch_ind[0],
batch_ind[-1] + 1]).astype(
theano.config.floatX)
if self.solver == 'mu_batch':
self.update_mu_batch_h(batch_ind,
update_func, grad_func)
if self.solver == 'asag_mu' or self.solver == 'asg_mu':
self.update_mu_sag(batch_ind,
update_func, grad_func)
if self.solver == 'gsag_mu' or self.solver == 'gsg_mu':
grad_func['grad_h'](batch_ind)
update_func['train_h'](batch_ind)
if batch_i == 0 and cache_ind == 0:
grad_func['grad_w'](batch_ind)
if self.nb_cache1 > 1:
self.factors_[0][current_cache_ind, ] =\
self.h_cache1.get_value()
else:
self.factors_[0] = self.h_cache1.get_value()
if self.solver == 'mu_batch':
self.update_mu_batch_w(update_func)
elif self.solver == 'gsag_mu' or self.solver == 'gsg_mu':
update_func['train_w']()
if self.nb_cache1 > 1:
for cache_ind in range(self.nb_cache1):
self.x_cache1.set_value(data[np.hstack(self.batch_ind[
self.cache1_ind[
cache_ind,
self.cache1_ind[cache_ind] >= 0]]), ].astype(
theano.config.floatX))
self.h_cache1.set_value(self.factors_[0][
np.hstack(self.batch_ind[
self.cache1_ind[
cache_ind, self.cache1_ind[cache_ind] >= 0]]),
].astype(theano.config.floatX))
if (it+1) % self.verbose == 0:
score += div_func['div_cache1']()
else:
self.factors_[0] = self.h_cache1.get_value()
if (it+1) % self.verbose == 0:
score = div_func['div_cache1']()
if (it+1) % self.verbose == 0:
score_ind += 1
scores[score_ind, ] = [
score, time.time() - tick + scores[score_ind - 1, 1]]
print('Iteration %d / %d, duration=%.1fms, cost=%f'
% (it + 1,
self.n_iter,
scores[score_ind, 1] * 1000,
scores[score_ind, 0]))
tick = time.time()
score_ind += 1
scores[score_ind, ] = [
score, time.time() - tick + scores[score_ind - 1, 1]]
print('Iteration %d / %d, duration=%.1fms, cost=%f'
% (it + 1,
self.n_iter,
scores[-1, 1] * 1000,
scores[-1, 0]))
return scores
def get_div_function(self):
""" compile the theano-based divergence function"""
div_cache1 = theano.function(inputs=[],
outputs=costs.beta_div(self.x_cache1,
self.w.T,
self.h_cache1,
self.beta),
name="div c1",
allow_input_downcast=True, profile=False)
return dict(
div_cache1=div_cache1)
def get_gradient_mu_sag(self):
"""compile the theano based gradient functions for mu_sag algorithms"""
tbatch_ind = T.ivector('batch_ind')
tind = T.iscalar('ind')
grad_new = updates.gradient_w_mu(
self.x_cache1[tbatch_ind[0]:tbatch_ind[1],
],
self.w,
self.h_cache1[tbatch_ind[0]:tbatch_ind[1],
],
self.beta)
up_grad_w = self.forget_factor * grad_new + (
1 - self.forget_factor) * self.grad_w
grad_w = theano.function(inputs=[tbatch_ind],
outputs=[],
updates={(self.grad_w, up_grad_w)},
name="grad w",
allow_input_downcast=True)
grad_new = updates.gradient_h_mu(
self.x_cache1[tbatch_ind[0]:tbatch_ind[1],
],
self.w,
self.h_cache1[tbatch_ind[0]:tbatch_ind[1],
],
self.beta)
grad_h = theano.function(inputs=[tbatch_ind],
outputs=[],
updates={(self.c1_grad_h, grad_new)},
name="grad h",
allow_input_downcast=True)
return dict(
grad_h=grad_h,
grad_w=grad_w)
def get_gradient_mu_sg(self):
"""compile the theano based gradient functions for mu_sg algorithms"""
tbatch_ind = T.ivector('batch_ind')
tind = T.iscalar('ind')
grad_new = updates.gradient_w_mu(
self.x_cache1[tbatch_ind[0]:tbatch_ind[1],
],
self.w,
self.h_cache1[tbatch_ind[0]:tbatch_ind[1],
],
self.beta)
grad_w = theano.function(inputs=[tbatch_ind],
outputs=[],
updates={(self.grad_w, grad_new)},
name="grad w",
allow_input_downcast=True)
grad_new = updates.gradient_h_mu(
self.x_cache1[tbatch_ind[0]:tbatch_ind[1],
],
self.w,
self.h_cache1[tbatch_ind[0]:tbatch_ind[1],
],
self.beta)
grad_h = theano.function(inputs=[tbatch_ind],
outputs=[],
updates={(self.c1_grad_h, grad_new)},
name="grad h",
allow_input_downcast=True)
return dict(
grad_h=grad_h,
grad_w=grad_w)
def get_gradient_mu_batch(self):
"""compile the theano based gradient functions for mu"""
tbatch_ind = T.ivector('batch_ind')
tind = T.iscalar('ind')
grad_new = updates.gradient_w_mu(
self.x_cache1[tbatch_ind[0]:tbatch_ind[1],
],
self.w,
self.h_cache1[tbatch_ind[0]:tbatch_ind[1],
],
self.beta)
grad_w = theano.function(inputs=[tbatch_ind],
outputs=[],
updates={(self.grad_w,
self.grad_w + grad_new)},
name="grad w",
allow_input_downcast=True,
on_unused_input='ignore')
grad_new = updates.gradient_h_mu(
self.x_cache1[tbatch_ind[0]:tbatch_ind[1],
],
self.w,
self.h_cache1[tbatch_ind[0]:tbatch_ind[1],
],
self.beta)
grad_h = theano.function(inputs=[tbatch_ind],
outputs=[],
updates={(self.c1_grad_h, grad_new)},
name="grad h",
allow_input_downcast=True)
return dict(
grad_h=grad_h,
grad_w=grad_w)
def get_updates(self):
"""compile the theano based update functions"""
tbatch_ind = T.ivector('batch_ind')
tneg = T.iscalar('neg')
tpos = T.iscalar('pos')
up_h = T.set_subtensor(self.h_cache1[tbatch_ind[0]:tbatch_ind[1], ],
updates.mu_update(self.h_cache1[
tbatch_ind[0]:tbatch_ind[1], ],
self.c1_grad_h[0, ],
self.c1_grad_h[1, ],
self.eps))
train_h = theano.function(inputs=[tbatch_ind],
outputs=[],
updates={(self.h_cache1, up_h)},
name="trainH",
allow_input_downcast=True,
on_unused_input='ignore')
update_w = updates.mu_update(self.w,
self.grad_w[0],
self.grad_w[1],
self.eps)
train_w = theano.function(inputs=[],
outputs=[],
updates={self.w: update_w},
name="trainW",
allow_input_downcast=True)
return dict(
train_h=train_h,
train_w=train_w)
def init(self):
"""Initialise theano variable to store the gradients"""
self.grad_w = theano.shared(
np.zeros((2,
self.data_shape[1],
self.n_components)).astype(theano.config.floatX),
name="gradW", borrow=True,
allow_downcast=True)
self.grad_h = np.zeros((2, self.data_shape[0], self.n_components))
self.c1_grad_h = theano.shared(
np.zeros((2,
self.batch_size,
self.n_components)).astype(theano.config.floatX),
name="c1_gradH", borrow=True,
allow_downcast=True)
def prepare_batch(self, randomize=True):
"""Arrange data for batches
Parameters
----------
randomize : boolean (default True)
Randomise the data (time-wise) before preparing batch indexes
"""
ind = - np.ones((self.nb_batch * self.batch_size, ))
ind[:self.data_shape[0], ] = np.arange(self.data_shape[0])
if randomize:
np.random.shuffle(ind[:self.data_shape[0], ])
self.batch_ind = np.reshape(ind, (self.nb_batch,
self.batch_size)).astype(int)
def prepare_cache1(self, randomize=True):
"""Arrange data for to fill cache1
Parameters
----------
randomize : boolean (default True)
Randomise the data (time-wise) before preparing cahce indexes
"""
ind = - np.ones((self.nb_cache1 *
int(np.ceil(np.true_divide(self.cache1_size,
self.batch_size)))))
ind[:self.nb_batch, ] = np.arange(self.nb_batch)
if randomize:
np.random.shuffle(ind[:self.nb_batch, ])
self.cache1_ind = np.reshape(ind, (self.nb_cache1,
int(np.ceil(np.true_divide(
self.cache1_size,
self.batch_size)))
)).astype(int)
def set_factors(self, data, W=None, H=None, fixed_factors=None):
"""Re-set theano based parameters according to the object attributes.
Parameters
----------
W : array (optionnal)
Value for factor W when custom initialisation is used
H : array (optionnal)
Value for factor H when custom initialisation is used
fixed_factors : array (default Null)
list of factors that are not updated
e.g. fixed_factors = [0] -> H is not updated
fixed_factors = [1] -> W is not updated
"""
self.data_shape = data.shape
self.nb_batch = int(np.ceil(np.true_divide(self.data_shape[0],
self.batch_size)))
self.batch_ind = np.zeros((self.nb_batch, self.batch_size))
if self.cache1_size > 0 and self.cache1_size < self.data_shape[0]:
if self.cache1_size < self.batch_size:
raise ValueError('cache1_size should be at '
'least equal to batch_size')
self.cache1_size = self.cache1_size/self.batch_size * self.batch_size
self.nb_cache1 = int(np.ceil(np.true_divide(self.data_shape[0],
self.cache1_size)))
else:
self.cache1_size = self.data_shape[0]
self.nb_cache1 = 1
self.forget_factor = 1./(self.sag_memory + 1)
fact_ = [base.nnrandn((dim, self.n_components))
for dim in self.data_shape]
if H is not None:
fact_[0] = H
if W is not None:
fact_[1] = W
if fixed_factors is None:
fixed_factors = []
if 1 not in fixed_factors:
self.w = theano.shared(fact_[1].astype(theano.config.floatX),
name="W", borrow=True, allow_downcast=True)
if 0 not in fixed_factors:
self.h_cache1 = theano.shared(
fact_[0][
:self.cache1_size, ].astype(theano.config.floatX),
name="H cache1", borrow=True,
allow_downcast=True)
self.factors_[0] = fact_[0]
self.factors_ = fact_
self.x_cache1 = theano.shared(np.zeros((self.cache1_size,
self.data_shape[1])).astype(
theano.config.floatX),
name="X cache1")
self.init()
def transform(self, data, warm_start=False):
"""Project data X on the basis W
Parameters
----------
X : array
The input data
warm_start : Boolean (default False)
start from previous values
Returns
-------
H : array
Activations
"""
self.fixed_factors = [1]
if not warm_start:
print("cold start")
self.set_factors(data, fixed_factors=self.fixed_factors)
self.fit(data, warm_start=True)
return self.factors_[0]
def update_mu_sag(self, batch_ind, update_func, grad_func):
"""Update current batch with SAG based algorithms
Parameters
----------
batch_ind : array with 2 elements
:batch_ind[0]: batch start
:batch_ind[1]: batch end
update_func : Theano compiled function
Update function
grad_func : Theano compiled function
Gradient function
"""
if 0 not in self.fixed_factors:
grad_func['grad_h'](batch_ind)
update_func['train_h'](batch_ind)
if 1 not in self.fixed_factors:
grad_func['grad_w'](batch_ind)
update_func['train_w']()
def update_mu_batch_h(self, batch_ind, update_func, grad_func):
"""Update h for current batch with standard MU
Parameters
----------
batch_ind : array with 2 elements
:batch_ind[0]: batch start
:batch_ind[1]: batch end
update_func : Theano compiled function
Update function
grad_func : Theano compiled function
Gradient function
"""
if 0 not in self.fixed_factors:
grad_func['grad_h'](batch_ind)
update_func['train_h'](batch_ind)
grad_func['grad_w'](batch_ind)
def update_mu_batch_w(self, udpate_func):
"""Update W with standard MU
Parameters
----------
update_func : Theano compiled function
Update function
"""
if 1 not in self.fixed_factors:
udpate_func['train_w']()
self.grad_w.set_value(
np.zeros((
2,
self.data_shape[1],
self.n_components)).astype(
theano.config.floatX))
| 41.499293
| 90
| 0.496455
|
import time
import numpy as np
import theano
import base
import theano.tensor as T
import updates
import costs
class BetaNMF(object):
def __init__(self, data_shape, n_components=50, beta=2, n_iter=50,
fixed_factors=None, cache1_size=0,
batch_size=100, verbose=0,
init_mode='random', W=None, H=None, solver='mu_batch',
nb_batch_w=1, sag_memory=0):
self.data_shape = data_shape
self.n_components = n_components
self.batch_size = batch_size
self.nb_batch = int(np.ceil(np.true_divide(data_shape[0],
self.batch_size)))
self.batch_ind = np.zeros((self.nb_batch, self.batch_size))
if cache1_size > 0:
cache1_size = min((cache1_size, data_shape[0]))
if cache1_size < self.batch_size:
raise ValueError('cache1_size should be at '
'least equal to batch_size')
self.cache1_size = int(np.ceil(cache1_size/self.batch_size * self.batch_size))
self.nb_cache1 = int(np.ceil(np.true_divide(self.data_shape[0],
self.cache1_size)))
else:
self.cache1_size = data_shape[0]
self.nb_cache1 = 1
self.n_components = np.asarray(n_components, dtype='int32')
self.beta = theano.shared(np.asarray(beta, theano.config.floatX),
name="beta")
self.eps = theano.shared(np.asarray(1e-10, theano.config.floatX),
name="eps")
self.sag_memory = sag_memory
self.forget_factor = 1./(self.sag_memory + 1)
self.verbose = verbose
self.n_iter = n_iter
self.solver = solver
self.scores = []
self.nb_batch_w = nb_batch_w
if fixed_factors is None:
fixed_factors = []
self.fixed_factors = fixed_factors
fact_ = [base.nnrandn((dim, self.n_components)) for dim in data_shape]
self.init_mode = init_mode
if self.init_mode == 'custom':
fact_[0] = H
fact_[1] = W
self.w = theano.shared(fact_[1].astype(theano.config.floatX),
name="W", borrow=True, allow_downcast=True)
self.h_cache1 = theano.shared(fact_[0][:self.cache1_size,
].astype(theano.config.floatX),
name="H cache1", borrow=True,
allow_downcast=True)
self.factors_ = fact_
self.x_cache1 = theano.shared(np.zeros((self.cache1_size,
data_shape[1])).astype(
theano.config.floatX),
name="X cache1")
self.init()
def check_shape(self):
batch_shape = self.x_cache1.get_value().shape
dim = int(self.n_components)
if self.w.get_value().shape != (self.data_shape[1], dim):
print("Inconsistent data for W, expected {1}, found {0}".format(
self.w.get_value().shape,
(self.data_shape[1], dim)))
raise SystemExit
if self.factors_[0].shape != (self.data_shape[0], dim):
print("Inconsistent shape for H, expected {1}, found {0}".format(
self.factors_[0].shape,
(self.data_shape[0], dim)))
raise SystemExit
if self.h_cache1.get_value().shape != (batch_shape[0], dim):
print("Inconsistent shape for h_cache1, expected {1}, found {0}".format(
self.h_cache1.get_value().shape,
(batch_shape[0], dim)))
raise SystemExit
def fit(self, data, cyclic=False, warm_start=False):
self.data_shape = data.shape
if (not warm_start) & (self.init_mode != 'custom'):
print("cold start")
self.set_factors(data, fixed_factors=self.fixed_factors)
self.check_shape()
self.prepare_batch(False)
self.prepare_cache1(False)
div_func = self.get_div_function()
if self.verbose > 0:
scores = np.zeros((
int(np.floor(self.n_iter/self.verbose)) + 2, 2))
else:
scores = np.zeros((2, 2))
if self.solver == 'asag_mu' or self.solver == 'gsag_mu':
grad_func = self.get_gradient_mu_sag()
update_func = self.get_updates()
elif self.solver == 'asg_mu' or self.solver == 'gsg_mu':
grad_func = self.get_gradient_mu_sg()
update_func = self.get_updates()
elif self.solver == 'mu_batch':
grad_func = self.get_gradient_mu_batch()
update_func = self.get_updates()
tick = time.time()
score = 0
for cache_ind in range(self.nb_cache1):
current_cache_ind = np.hstack(self.batch_ind[
self.cache1_ind[
cache_ind, self.cache1_ind[cache_ind] >= 0]])
current_cache_ind = current_cache_ind[current_cache_ind >= 0]
self.x_cache1.set_value(data[current_cache_ind, ].astype(
theano.config.floatX))
self.h_cache1.set_value(self.factors_[0][
current_cache_ind, ].astype(theano.config.floatX))
score += div_func['div_cache1']()
score_ind = 0
scores[0, ] = [score, time.time() - tick]
self.prepare_batch(not cyclic)
self.prepare_cache1(not cyclic)
print('Intitial score = %.2f' % score)
print('Fitting NMF model with %d iterations....' % self.n_iter)
if self.nb_cache1 == 1:
current_cache_ind = np.hstack(self.batch_ind[
self.cache1_ind[
0, self.cache1_ind[0] >= 0]])
current_cache_ind = current_cache_ind[current_cache_ind >= 0]
self.x_cache1.set_value(data[current_cache_ind, ].astype(
theano.config.floatX))
self.h_cache1.set_value(self.factors_[0][
current_cache_ind, ].astype(theano.config.floatX))
if self.solver == 'sag':
self.c1_grad_w.set_value(self.old_grad_w[self.cache1_ind[
0, self.cache1_ind[0] >= 0]].astype(
theano.config.floatX))
for it in range(self.n_iter):
tick = time.time()
self.prepare_cache1(not cyclic)
score = 0
for cache_ind in range(self.nb_cache1):
if self.nb_cache1 > 1:
current_cache_ind = np.hstack(self.batch_ind[
self.cache1_ind[
cache_ind, self.cache1_ind[cache_ind] >= 0]])
current_cache_ind = current_cache_ind[
current_cache_ind >= 0]
self.x_cache1.set_value(data[current_cache_ind, ].astype(
theano.config.floatX))
self.h_cache1.set_value(self.factors_[0][
current_cache_ind, ].astype(theano.config.floatX))
if self.solver == 'sag':
self.c1_grad_w.set_value(
self.old_grad_w[
self.cache1_ind[
cache_ind,
self.cache1_ind[cache_ind] >= 0]].astype(
theano.config.floatX))
for batch_i in range(self.cache1_ind[
cache_ind, self.cache1_ind[cache_ind] >= 0].shape[0]):
batch_ind = np.arange(batch_i * self.batch_size,
(batch_i + 1) * self.batch_size)
batch_ind = batch_ind[
batch_ind < current_cache_ind.shape[0]]
batch_ind = np.asarray([batch_ind[0],
batch_ind[-1] + 1]).astype(
theano.config.floatX)
if self.solver == 'mu_batch':
self.update_mu_batch_h(batch_ind,
update_func, grad_func)
if self.solver == 'asag_mu' or self.solver == 'asg_mu':
self.update_mu_sag(batch_ind,
update_func, grad_func)
if self.solver == 'gsag_mu' or self.solver == 'gsg_mu':
grad_func['grad_h'](batch_ind)
update_func['train_h'](batch_ind)
if batch_i == 0 and cache_ind == 0:
grad_func['grad_w'](batch_ind)
if self.nb_cache1 > 1:
self.factors_[0][current_cache_ind, ] =\
self.h_cache1.get_value()
else:
self.factors_[0] = self.h_cache1.get_value()
if self.solver == 'mu_batch':
self.update_mu_batch_w(update_func)
elif self.solver == 'gsag_mu' or self.solver == 'gsg_mu':
update_func['train_w']()
if self.nb_cache1 > 1:
for cache_ind in range(self.nb_cache1):
self.x_cache1.set_value(data[np.hstack(self.batch_ind[
self.cache1_ind[
cache_ind,
self.cache1_ind[cache_ind] >= 0]]), ].astype(
theano.config.floatX))
self.h_cache1.set_value(self.factors_[0][
np.hstack(self.batch_ind[
self.cache1_ind[
cache_ind, self.cache1_ind[cache_ind] >= 0]]),
].astype(theano.config.floatX))
if (it+1) % self.verbose == 0:
score += div_func['div_cache1']()
else:
self.factors_[0] = self.h_cache1.get_value()
if (it+1) % self.verbose == 0:
score = div_func['div_cache1']()
if (it+1) % self.verbose == 0:
score_ind += 1
scores[score_ind, ] = [
score, time.time() - tick + scores[score_ind - 1, 1]]
print('Iteration %d / %d, duration=%.1fms, cost=%f'
% (it + 1,
self.n_iter,
scores[score_ind, 1] * 1000,
scores[score_ind, 0]))
tick = time.time()
score_ind += 1
scores[score_ind, ] = [
score, time.time() - tick + scores[score_ind - 1, 1]]
print('Iteration %d / %d, duration=%.1fms, cost=%f'
% (it + 1,
self.n_iter,
scores[-1, 1] * 1000,
scores[-1, 0]))
return scores
def get_div_function(self):
div_cache1 = theano.function(inputs=[],
outputs=costs.beta_div(self.x_cache1,
self.w.T,
self.h_cache1,
self.beta),
name="div c1",
allow_input_downcast=True, profile=False)
return dict(
div_cache1=div_cache1)
def get_gradient_mu_sag(self):
tbatch_ind = T.ivector('batch_ind')
tind = T.iscalar('ind')
grad_new = updates.gradient_w_mu(
self.x_cache1[tbatch_ind[0]:tbatch_ind[1],
],
self.w,
self.h_cache1[tbatch_ind[0]:tbatch_ind[1],
],
self.beta)
up_grad_w = self.forget_factor * grad_new + (
1 - self.forget_factor) * self.grad_w
grad_w = theano.function(inputs=[tbatch_ind],
outputs=[],
updates={(self.grad_w, up_grad_w)},
name="grad w",
allow_input_downcast=True)
grad_new = updates.gradient_h_mu(
self.x_cache1[tbatch_ind[0]:tbatch_ind[1],
],
self.w,
self.h_cache1[tbatch_ind[0]:tbatch_ind[1],
],
self.beta)
grad_h = theano.function(inputs=[tbatch_ind],
outputs=[],
updates={(self.c1_grad_h, grad_new)},
name="grad h",
allow_input_downcast=True)
return dict(
grad_h=grad_h,
grad_w=grad_w)
def get_gradient_mu_sg(self):
tbatch_ind = T.ivector('batch_ind')
tind = T.iscalar('ind')
grad_new = updates.gradient_w_mu(
self.x_cache1[tbatch_ind[0]:tbatch_ind[1],
],
self.w,
self.h_cache1[tbatch_ind[0]:tbatch_ind[1],
],
self.beta)
grad_w = theano.function(inputs=[tbatch_ind],
outputs=[],
updates={(self.grad_w, grad_new)},
name="grad w",
allow_input_downcast=True)
grad_new = updates.gradient_h_mu(
self.x_cache1[tbatch_ind[0]:tbatch_ind[1],
],
self.w,
self.h_cache1[tbatch_ind[0]:tbatch_ind[1],
],
self.beta)
grad_h = theano.function(inputs=[tbatch_ind],
outputs=[],
updates={(self.c1_grad_h, grad_new)},
name="grad h",
allow_input_downcast=True)
return dict(
grad_h=grad_h,
grad_w=grad_w)
def get_gradient_mu_batch(self):
tbatch_ind = T.ivector('batch_ind')
tind = T.iscalar('ind')
grad_new = updates.gradient_w_mu(
self.x_cache1[tbatch_ind[0]:tbatch_ind[1],
],
self.w,
self.h_cache1[tbatch_ind[0]:tbatch_ind[1],
],
self.beta)
grad_w = theano.function(inputs=[tbatch_ind],
outputs=[],
updates={(self.grad_w,
self.grad_w + grad_new)},
name="grad w",
allow_input_downcast=True,
on_unused_input='ignore')
grad_new = updates.gradient_h_mu(
self.x_cache1[tbatch_ind[0]:tbatch_ind[1],
],
self.w,
self.h_cache1[tbatch_ind[0]:tbatch_ind[1],
],
self.beta)
grad_h = theano.function(inputs=[tbatch_ind],
outputs=[],
updates={(self.c1_grad_h, grad_new)},
name="grad h",
allow_input_downcast=True)
return dict(
grad_h=grad_h,
grad_w=grad_w)
def get_updates(self):
tbatch_ind = T.ivector('batch_ind')
tneg = T.iscalar('neg')
tpos = T.iscalar('pos')
up_h = T.set_subtensor(self.h_cache1[tbatch_ind[0]:tbatch_ind[1], ],
updates.mu_update(self.h_cache1[
tbatch_ind[0]:tbatch_ind[1], ],
self.c1_grad_h[0, ],
self.c1_grad_h[1, ],
self.eps))
train_h = theano.function(inputs=[tbatch_ind],
outputs=[],
updates={(self.h_cache1, up_h)},
name="trainH",
allow_input_downcast=True,
on_unused_input='ignore')
update_w = updates.mu_update(self.w,
self.grad_w[0],
self.grad_w[1],
self.eps)
train_w = theano.function(inputs=[],
outputs=[],
updates={self.w: update_w},
name="trainW",
allow_input_downcast=True)
return dict(
train_h=train_h,
train_w=train_w)
def init(self):
self.grad_w = theano.shared(
np.zeros((2,
self.data_shape[1],
self.n_components)).astype(theano.config.floatX),
name="gradW", borrow=True,
allow_downcast=True)
self.grad_h = np.zeros((2, self.data_shape[0], self.n_components))
self.c1_grad_h = theano.shared(
np.zeros((2,
self.batch_size,
self.n_components)).astype(theano.config.floatX),
name="c1_gradH", borrow=True,
allow_downcast=True)
def prepare_batch(self, randomize=True):
ind = - np.ones((self.nb_batch * self.batch_size, ))
ind[:self.data_shape[0], ] = np.arange(self.data_shape[0])
if randomize:
np.random.shuffle(ind[:self.data_shape[0], ])
self.batch_ind = np.reshape(ind, (self.nb_batch,
self.batch_size)).astype(int)
def prepare_cache1(self, randomize=True):
ind = - np.ones((self.nb_cache1 *
int(np.ceil(np.true_divide(self.cache1_size,
self.batch_size)))))
ind[:self.nb_batch, ] = np.arange(self.nb_batch)
if randomize:
np.random.shuffle(ind[:self.nb_batch, ])
self.cache1_ind = np.reshape(ind, (self.nb_cache1,
int(np.ceil(np.true_divide(
self.cache1_size,
self.batch_size)))
)).astype(int)
def set_factors(self, data, W=None, H=None, fixed_factors=None):
self.data_shape = data.shape
self.nb_batch = int(np.ceil(np.true_divide(self.data_shape[0],
self.batch_size)))
self.batch_ind = np.zeros((self.nb_batch, self.batch_size))
if self.cache1_size > 0 and self.cache1_size < self.data_shape[0]:
if self.cache1_size < self.batch_size:
raise ValueError('cache1_size should be at '
'least equal to batch_size')
self.cache1_size = self.cache1_size/self.batch_size * self.batch_size
self.nb_cache1 = int(np.ceil(np.true_divide(self.data_shape[0],
self.cache1_size)))
else:
self.cache1_size = self.data_shape[0]
self.nb_cache1 = 1
self.forget_factor = 1./(self.sag_memory + 1)
fact_ = [base.nnrandn((dim, self.n_components))
for dim in self.data_shape]
if H is not None:
fact_[0] = H
if W is not None:
fact_[1] = W
if fixed_factors is None:
fixed_factors = []
if 1 not in fixed_factors:
self.w = theano.shared(fact_[1].astype(theano.config.floatX),
name="W", borrow=True, allow_downcast=True)
if 0 not in fixed_factors:
self.h_cache1 = theano.shared(
fact_[0][
:self.cache1_size, ].astype(theano.config.floatX),
name="H cache1", borrow=True,
allow_downcast=True)
self.factors_[0] = fact_[0]
self.factors_ = fact_
self.x_cache1 = theano.shared(np.zeros((self.cache1_size,
self.data_shape[1])).astype(
theano.config.floatX),
name="X cache1")
self.init()
def transform(self, data, warm_start=False):
self.fixed_factors = [1]
if not warm_start:
print("cold start")
self.set_factors(data, fixed_factors=self.fixed_factors)
self.fit(data, warm_start=True)
return self.factors_[0]
def update_mu_sag(self, batch_ind, update_func, grad_func):
if 0 not in self.fixed_factors:
grad_func['grad_h'](batch_ind)
update_func['train_h'](batch_ind)
if 1 not in self.fixed_factors:
grad_func['grad_w'](batch_ind)
update_func['train_w']()
def update_mu_batch_h(self, batch_ind, update_func, grad_func):
if 0 not in self.fixed_factors:
grad_func['grad_h'](batch_ind)
update_func['train_h'](batch_ind)
grad_func['grad_w'](batch_ind)
def update_mu_batch_w(self, udpate_func):
if 1 not in self.fixed_factors:
udpate_func['train_w']()
self.grad_w.set_value(
np.zeros((
2,
self.data_shape[1],
self.n_components)).astype(
theano.config.floatX))
| true
| true
|
790ac88d2bd437731fe4d384f1d233e0b0a71dc3
| 840
|
py
|
Python
|
recorder.py
|
udham2511/Python-Screen-Recorder
|
419be068398f15128f1279bd68a64c285b60493b
|
[
"Apache-2.0"
] | 2
|
2022-01-03T06:51:28.000Z
|
2022-01-11T19:52:27.000Z
|
recorder.py
|
udham2511/Python-Screen-Recorder
|
419be068398f15128f1279bd68a64c285b60493b
|
[
"Apache-2.0"
] | null | null | null |
recorder.py
|
udham2511/Python-Screen-Recorder
|
419be068398f15128f1279bd68a64c285b60493b
|
[
"Apache-2.0"
] | null | null | null |
from PIL import ImageGrab
import pyautogui
import numpy
import time
import cv2
import os
timeA = time.time()
fourcc = cv2.VideoWriter_fourcc(*"XVID")
name = f"Recording{len(os.listdir())-2}.mp4"
out = cv2.VideoWriter(name, fourcc, 14.0, (1920, 1080))
white = (255, 255, 255)
black = (0, 0, 0)
while True:
frame = ImageGrab.grab()
data = frame.load()
(x, y) = pyautogui.position()
mouseFrame = numpy.array(frame)
finalFrame = cv2.cvtColor(mouseFrame, 4)
cv2.circle(finalFrame, (x, y), 7, (0, 0, 0), -1)
cv2.circle(finalFrame, (x, y), 6, (255, 255, 255), -1)
cv2.imshow("Recoding", finalFrame)
out.write(finalFrame)
if (cv2.waitKey(1) & 0xFF == ord("q")):
break
out.release()
cv2.destroyAllWindows()
print("Time:", str(time.time() - timeA)[:4]+"s")
| 22.702703
| 59
| 0.608333
|
from PIL import ImageGrab
import pyautogui
import numpy
import time
import cv2
import os
timeA = time.time()
fourcc = cv2.VideoWriter_fourcc(*"XVID")
name = f"Recording{len(os.listdir())-2}.mp4"
out = cv2.VideoWriter(name, fourcc, 14.0, (1920, 1080))
white = (255, 255, 255)
black = (0, 0, 0)
while True:
frame = ImageGrab.grab()
data = frame.load()
(x, y) = pyautogui.position()
mouseFrame = numpy.array(frame)
finalFrame = cv2.cvtColor(mouseFrame, 4)
cv2.circle(finalFrame, (x, y), 7, (0, 0, 0), -1)
cv2.circle(finalFrame, (x, y), 6, (255, 255, 255), -1)
cv2.imshow("Recoding", finalFrame)
out.write(finalFrame)
if (cv2.waitKey(1) & 0xFF == ord("q")):
break
out.release()
cv2.destroyAllWindows()
print("Time:", str(time.time() - timeA)[:4]+"s")
| true
| true
|
790ac899051a5188dd166c227a75f5f75a40e178
| 3,137
|
py
|
Python
|
Details/models.py
|
iamcaro/SMA-Team-2
|
bc069ed1d11b2fa777d1a3459cc97d276af6ece7
|
[
"MIT"
] | null | null | null |
Details/models.py
|
iamcaro/SMA-Team-2
|
bc069ed1d11b2fa777d1a3459cc97d276af6ece7
|
[
"MIT"
] | 1
|
2021-07-27T22:28:54.000Z
|
2021-07-27T22:28:54.000Z
|
Details/models.py
|
iamcaro/SMA-Team-2
|
bc069ed1d11b2fa777d1a3459cc97d276af6ece7
|
[
"MIT"
] | 9
|
2021-07-12T22:34:19.000Z
|
2021-07-30T03:11:44.000Z
|
from django.db import models
from django.conf import settings
# Create your models here.
class Message(models.Model):
id = models.AutoField(primary_key=True)
sender_id = models.ForeignKey(settings.AUTH_USER_MODEL, related_name="message_sender", on_delete=models.DO_NOTHING, null=True)
receiver_id = models.ForeignKey(settings.AUTH_USER_MODEL, related_name="message_receiver", on_delete=models.DO_NOTHING, null=True)
content = models.TextField(null=True)
read_at = models.DateTimeField(auto_now=True)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
class Meta:
ordering = ['-created_at']
class Follower(models.Model):
id = models.AutoField(primary_key=True)
user_id = models.ForeignKey(settings.AUTH_USER_MODEL, related_name="following", on_delete=models.CASCADE, null=True)
follower_id = models.ManyToManyField(settings.AUTH_USER_MODEL, related_name="follower")
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
def __str__(self) -> str:
return self.user_id.username
class Meta:
ordering = ['user_id']
class PostCategory(models.Model):
id = models.AutoField(primary_key=True)
name = models.CharField(max_length=50, null=True)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
class Meta:
verbose_name_plural = "Post Categories"
ordering = ['name']
def __str__(self):
return self.name
class Post(models.Model):
id = models.AutoField(primary_key=True)
user_id = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.SET(None))
title = models.CharField(max_length=50, null=True)
post_category_id = models.ForeignKey(PostCategory, null=True, on_delete=models.SET_NULL, blank=True)
content = models.TextField(null=True)
likes = models.ManyToManyField(settings.AUTH_USER_MODEL, blank=True, related_name="likes")
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
class Meta:
ordering = ['-created_at']
def __str__(self):
return str(self.title)
class PostComment(models.Model):
id = models.AutoField(primary_key=True)
post_id = models.ForeignKey(Post, on_delete=models.CASCADE)
commenter_id = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)
content = models.TextField(null=True)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
class Meta:
verbose_name_plural = "Post Comments"
ordering = ['-created_at']
class PostLike(models.Model):
id = models.AutoField(primary_key=True)
liker_id = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)
post_id = models.ForeignKey(Post, on_delete=models.CASCADE)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
class Meta:
verbose_name_plural = "Post Likes"
| 35.247191
| 134
| 0.739241
|
from django.db import models
from django.conf import settings
class Message(models.Model):
id = models.AutoField(primary_key=True)
sender_id = models.ForeignKey(settings.AUTH_USER_MODEL, related_name="message_sender", on_delete=models.DO_NOTHING, null=True)
receiver_id = models.ForeignKey(settings.AUTH_USER_MODEL, related_name="message_receiver", on_delete=models.DO_NOTHING, null=True)
content = models.TextField(null=True)
read_at = models.DateTimeField(auto_now=True)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
class Meta:
ordering = ['-created_at']
class Follower(models.Model):
id = models.AutoField(primary_key=True)
user_id = models.ForeignKey(settings.AUTH_USER_MODEL, related_name="following", on_delete=models.CASCADE, null=True)
follower_id = models.ManyToManyField(settings.AUTH_USER_MODEL, related_name="follower")
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
def __str__(self) -> str:
return self.user_id.username
class Meta:
ordering = ['user_id']
class PostCategory(models.Model):
id = models.AutoField(primary_key=True)
name = models.CharField(max_length=50, null=True)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
class Meta:
verbose_name_plural = "Post Categories"
ordering = ['name']
def __str__(self):
return self.name
class Post(models.Model):
id = models.AutoField(primary_key=True)
user_id = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.SET(None))
title = models.CharField(max_length=50, null=True)
post_category_id = models.ForeignKey(PostCategory, null=True, on_delete=models.SET_NULL, blank=True)
content = models.TextField(null=True)
likes = models.ManyToManyField(settings.AUTH_USER_MODEL, blank=True, related_name="likes")
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
class Meta:
ordering = ['-created_at']
def __str__(self):
return str(self.title)
class PostComment(models.Model):
id = models.AutoField(primary_key=True)
post_id = models.ForeignKey(Post, on_delete=models.CASCADE)
commenter_id = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)
content = models.TextField(null=True)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
class Meta:
verbose_name_plural = "Post Comments"
ordering = ['-created_at']
class PostLike(models.Model):
id = models.AutoField(primary_key=True)
liker_id = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)
post_id = models.ForeignKey(Post, on_delete=models.CASCADE)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
class Meta:
verbose_name_plural = "Post Likes"
| true
| true
|
790ac8ba6876ddb2295dafc87e0c68181c282302
| 3,657
|
py
|
Python
|
commerce/migrations/0001_initial.py
|
PragmaticMates/django-commerce
|
b992bf4c81ca6dfaad9ccd423d25fba9d255f159
|
[
"Apache-2.0"
] | 4
|
2017-09-08T19:22:19.000Z
|
2021-12-21T17:55:29.000Z
|
commerce/migrations/0001_initial.py
|
PragmaticMates/django-commerce
|
b992bf4c81ca6dfaad9ccd423d25fba9d255f159
|
[
"Apache-2.0"
] | null | null | null |
commerce/migrations/0001_initial.py
|
PragmaticMates/django-commerce
|
b992bf4c81ca6dfaad9ccd423d25fba9d255f159
|
[
"Apache-2.0"
] | 1
|
2021-10-31T06:31:18.000Z
|
2021-10-31T06:31:18.000Z
|
# Generated by Django 2.2.4 on 2020-06-25 17:17
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import internationalflavor.countries.models
import internationalflavor.vat_number.models
class Migration(migrations.Migration):
initial = True
dependencies = [
('contenttypes', '0002_remove_content_type_name'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Cart',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True, db_index=True, verbose_name='created')),
('modified', models.DateTimeField(auto_now=True, verbose_name='modified')),
('delivery_name', models.CharField(db_index=True, max_length=30, verbose_name='full name or company name')),
('delivery_street', models.CharField(max_length=200, verbose_name='street and number')),
('delivery_postcode', models.CharField(max_length=30, verbose_name='postcode')),
('delivery_city', models.CharField(max_length=50, verbose_name='city')),
('delivery_country', internationalflavor.countries.models.CountryField(db_index=True, verbose_name='country')),
('billing_name', models.CharField(max_length=100, verbose_name='full name or company name')),
('billing_street', models.CharField(max_length=200, verbose_name='street')),
('billing_postcode', models.CharField(max_length=30, verbose_name='postcode')),
('billing_city', models.CharField(max_length=50, verbose_name='city')),
('billing_country', internationalflavor.countries.models.CountryField(db_index=True, verbose_name='country')),
('reg_id', models.CharField(blank=True, max_length=30, verbose_name='Company Registration No.')),
('tax_id', models.CharField(blank=True, max_length=30, verbose_name='TAX ID')),
('vat_id', internationalflavor.vat_number.models.VATNumberField(blank=True, verbose_name='VAT ID')),
('email', models.EmailField(max_length=254, verbose_name='email')),
('phone', models.CharField(max_length=30, verbose_name='phone')),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'shopping cart',
'verbose_name_plural': 'shopping carts',
},
),
migrations.CreateModel(
name='Item',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('object_id', models.PositiveIntegerField()),
('quantity', models.PositiveSmallIntegerField(verbose_name='quantity')),
('created', models.DateTimeField(auto_now_add=True, db_index=True, verbose_name='created')),
('modified', models.DateTimeField(auto_now=True, verbose_name='modified')),
('cart', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='commerce.Cart')),
('content_type', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='contenttypes.ContentType')),
],
options={
'verbose_name': 'item',
'verbose_name_plural': 'items',
},
),
]
| 56.261538
| 128
| 0.637134
|
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import internationalflavor.countries.models
import internationalflavor.vat_number.models
class Migration(migrations.Migration):
initial = True
dependencies = [
('contenttypes', '0002_remove_content_type_name'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Cart',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True, db_index=True, verbose_name='created')),
('modified', models.DateTimeField(auto_now=True, verbose_name='modified')),
('delivery_name', models.CharField(db_index=True, max_length=30, verbose_name='full name or company name')),
('delivery_street', models.CharField(max_length=200, verbose_name='street and number')),
('delivery_postcode', models.CharField(max_length=30, verbose_name='postcode')),
('delivery_city', models.CharField(max_length=50, verbose_name='city')),
('delivery_country', internationalflavor.countries.models.CountryField(db_index=True, verbose_name='country')),
('billing_name', models.CharField(max_length=100, verbose_name='full name or company name')),
('billing_street', models.CharField(max_length=200, verbose_name='street')),
('billing_postcode', models.CharField(max_length=30, verbose_name='postcode')),
('billing_city', models.CharField(max_length=50, verbose_name='city')),
('billing_country', internationalflavor.countries.models.CountryField(db_index=True, verbose_name='country')),
('reg_id', models.CharField(blank=True, max_length=30, verbose_name='Company Registration No.')),
('tax_id', models.CharField(blank=True, max_length=30, verbose_name='TAX ID')),
('vat_id', internationalflavor.vat_number.models.VATNumberField(blank=True, verbose_name='VAT ID')),
('email', models.EmailField(max_length=254, verbose_name='email')),
('phone', models.CharField(max_length=30, verbose_name='phone')),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'shopping cart',
'verbose_name_plural': 'shopping carts',
},
),
migrations.CreateModel(
name='Item',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('object_id', models.PositiveIntegerField()),
('quantity', models.PositiveSmallIntegerField(verbose_name='quantity')),
('created', models.DateTimeField(auto_now_add=True, db_index=True, verbose_name='created')),
('modified', models.DateTimeField(auto_now=True, verbose_name='modified')),
('cart', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='commerce.Cart')),
('content_type', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='contenttypes.ContentType')),
],
options={
'verbose_name': 'item',
'verbose_name_plural': 'items',
},
),
]
| true
| true
|
790aca15ae84ba5cc89541b8f434209f0a6479f9
| 773
|
py
|
Python
|
hackernews/hackernews/urls.py
|
Saltiest-Hacker-News-Trolls-2/DS
|
aaef46dcb225d0be15f65fc34f97c1734c1c64e9
|
[
"MIT"
] | 1
|
2019-11-23T06:56:11.000Z
|
2019-11-23T06:56:11.000Z
|
hackernews/hackernews/urls.py
|
Saltiest-Hacker-News-Trolls-2/DS
|
aaef46dcb225d0be15f65fc34f97c1734c1c64e9
|
[
"MIT"
] | 10
|
2020-03-24T17:50:51.000Z
|
2022-02-09T23:33:10.000Z
|
hackernews/hackernews/urls.py
|
Saltiest-Hacker-News-Trolls-2/DS
|
aaef46dcb225d0be15f65fc34f97c1734c1c64e9
|
[
"MIT"
] | 1
|
2019-11-20T06:18:27.000Z
|
2019-11-20T06:18:27.000Z
|
"""hackernews URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
urlpatterns = [
path('admin/', admin.site.urls),
]
| 35.136364
| 78
| 0.690815
|
from django.contrib import admin
from django.urls import path
urlpatterns = [
path('admin/', admin.site.urls),
]
| true
| true
|
790aca83da71a12360818a5b285719c722b18eab
| 16,025
|
py
|
Python
|
ironic/dhcp/neutron.py
|
overcastcloud/ironic
|
c6608e97af33f8d7f3fe2f7deeb78f52196f2cc7
|
[
"Apache-2.0"
] | null | null | null |
ironic/dhcp/neutron.py
|
overcastcloud/ironic
|
c6608e97af33f8d7f3fe2f7deeb78f52196f2cc7
|
[
"Apache-2.0"
] | null | null | null |
ironic/dhcp/neutron.py
|
overcastcloud/ironic
|
c6608e97af33f8d7f3fe2f7deeb78f52196f2cc7
|
[
"Apache-2.0"
] | null | null | null |
#
# Copyright 2014 OpenStack Foundation
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import time
from neutronclient.common import exceptions as neutron_client_exc
from neutronclient.v2_0 import client as clientv20
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import netutils
from ironic.common import exception
from ironic.common.i18n import _
from ironic.common.i18n import _LE
from ironic.common.i18n import _LW
from ironic.common import keystone
from ironic.common import network
from ironic.dhcp import base
from ironic.drivers.modules import ssh
neutron_opts = [
cfg.StrOpt('url',
default='http://$my_ip:9696',
help='URL for connecting to neutron.'),
cfg.IntOpt('url_timeout',
default=30,
help='Timeout value for connecting to neutron in seconds.'),
cfg.IntOpt('retries',
default=3,
help='Client retries in the case of a failed request.'),
cfg.StrOpt('auth_strategy',
default='keystone',
help='Default authentication strategy to use when connecting '
'to neutron. Can be either "keystone" or "noauth". '
'Running neutron in noauth mode (related to but not '
'affected by this setting) is insecure and should only be '
'used for testing.'),
cfg.StrOpt('cleaning_network_uuid',
help='UUID of the network to create Neutron ports on when '
'booting to a ramdisk for cleaning/zapping using Neutron '
'DHCP')
]
CONF = cfg.CONF
CONF.import_opt('my_ip', 'ironic.netconf')
CONF.register_opts(neutron_opts, group='neutron')
LOG = logging.getLogger(__name__)
def _build_client(token=None):
"""Utility function to create Neutron client."""
params = {
'timeout': CONF.neutron.url_timeout,
'retries': CONF.neutron.retries,
'insecure': CONF.keystone_authtoken.insecure,
'ca_cert': CONF.keystone_authtoken.certfile,
}
if CONF.neutron.auth_strategy not in ['noauth', 'keystone']:
raise exception.ConfigInvalid(_('Neutron auth_strategy should be '
'either "noauth" or "keystone".'))
if CONF.neutron.auth_strategy == 'noauth':
params['endpoint_url'] = CONF.neutron.url
params['auth_strategy'] = 'noauth'
elif (CONF.neutron.auth_strategy == 'keystone' and
token is None):
params['endpoint_url'] = (CONF.neutron.url or
keystone.get_service_url('neutron'))
params['username'] = CONF.keystone_authtoken.admin_user
params['tenant_name'] = CONF.keystone_authtoken.admin_tenant_name
params['password'] = CONF.keystone_authtoken.admin_password
params['auth_url'] = (CONF.keystone_authtoken.auth_uri or '')
if CONF.keystone.region_name:
params['region_name'] = CONF.keystone.region_name
else:
params['token'] = token
params['endpoint_url'] = CONF.neutron.url
params['auth_strategy'] = None
return clientv20.Client(**params)
class NeutronDHCPApi(base.BaseDHCP):
"""API for communicating to neutron 2.x API."""
def update_port_dhcp_opts(self, port_id, dhcp_options, token=None):
"""Update a port's attributes.
Update one or more DHCP options on the specified port.
For the relevant API spec, see
http://docs.openstack.org/api/openstack-network/2.0/content/extra-dhc-opt-ext-update.html
:param port_id: designate which port these attributes
will be applied to.
:param dhcp_options: this will be a list of dicts, e.g.
::
[{'opt_name': 'bootfile-name',
'opt_value': 'pxelinux.0'},
{'opt_name': 'server-ip-address',
'opt_value': '123.123.123.456'},
{'opt_name': 'tftp-server',
'opt_value': '123.123.123.123'}]
:param token: optional auth token.
:raises: FailedToUpdateDHCPOptOnPort
"""
port_req_body = {'port': {'extra_dhcp_opts': dhcp_options}}
try:
_build_client(token).update_port(port_id, port_req_body)
except neutron_client_exc.NeutronClientException:
LOG.exception(_LE("Failed to update Neutron port %s."), port_id)
raise exception.FailedToUpdateDHCPOptOnPort(port_id=port_id)
def update_port_address(self, port_id, address, token=None):
"""Update a port's mac address.
:param port_id: Neutron port id.
:param address: new MAC address.
:param token: optional auth token.
:raises: FailedToUpdateMacOnPort
"""
port_req_body = {'port': {'mac_address': address}}
try:
_build_client(token).update_port(port_id, port_req_body)
except neutron_client_exc.NeutronClientException:
LOG.exception(_LE("Failed to update MAC address on Neutron "
"port %s."), port_id)
raise exception.FailedToUpdateMacOnPort(port_id=port_id)
def update_dhcp_opts(self, task, options, vifs=None):
"""Send or update the DHCP BOOT options for this node.
:param task: A TaskManager instance.
:param options: this will be a list of dicts, e.g.
::
[{'opt_name': 'bootfile-name',
'opt_value': 'pxelinux.0'},
{'opt_name': 'server-ip-address',
'opt_value': '123.123.123.456'},
{'opt_name': 'tftp-server',
'opt_value': '123.123.123.123'}]
:param vifs: a dict of Neutron port dicts to update DHCP options on.
The keys should be Ironic port UUIDs, and the values should be
Neutron port UUIDs
If the value is None, will get the list of ports from the Ironic
port objects.
"""
if vifs is None:
vifs = network.get_node_vif_ids(task)
if not vifs:
raise exception.FailedToUpdateDHCPOptOnPort(
_("No VIFs found for node %(node)s when attempting "
"to update DHCP BOOT options.") %
{'node': task.node.uuid})
failures = []
for port_id, port_vif in vifs.items():
try:
self.update_port_dhcp_opts(port_vif, options,
token=task.context.auth_token)
except exception.FailedToUpdateDHCPOptOnPort:
failures.append(port_id)
if failures:
if len(failures) == len(vifs):
raise exception.FailedToUpdateDHCPOptOnPort(_(
"Failed to set DHCP BOOT options for any port on node %s.")
% task.node.uuid)
else:
LOG.warning(_LW("Some errors were encountered when updating "
"the DHCP BOOT options for node %(node)s on "
"the following ports: %(ports)s."),
{'node': task.node.uuid, 'ports': failures})
# TODO(adam_g): Hack to workaround bug 1334447 until we have a
# mechanism for synchronizing events with Neutron. We need to sleep
# only if we are booting VMs, which is implied by SSHPower, to ensure
# they do not boot before Neutron agents have setup sufficient DHCP
# config for netboot.
if isinstance(task.driver.power, ssh.SSHPower):
LOG.debug("Waiting 15 seconds for Neutron.")
time.sleep(15)
def _get_fixed_ip_address(self, port_uuid, client):
"""Get a port's fixed ip address.
:param port_uuid: Neutron port id.
:param client: Neutron client instance.
:returns: Neutron port ip address.
:raises: FailedToGetIPAddressOnPort
:raises: InvalidIPv4Address
"""
ip_address = None
try:
neutron_port = client.show_port(port_uuid).get('port')
except neutron_client_exc.NeutronClientException:
LOG.exception(_LE("Failed to Get IP address on Neutron port %s."),
port_uuid)
raise exception.FailedToGetIPAddressOnPort(port_id=port_uuid)
fixed_ips = neutron_port.get('fixed_ips')
# NOTE(faizan) At present only the first fixed_ip assigned to this
# neutron port will be used, since nova allocates only one fixed_ip
# for the instance.
if fixed_ips:
ip_address = fixed_ips[0].get('ip_address', None)
if ip_address:
if netutils.is_valid_ipv4(ip_address):
return ip_address
else:
LOG.error(_LE("Neutron returned invalid IPv4 address %s."),
ip_address)
raise exception.InvalidIPv4Address(ip_address=ip_address)
else:
LOG.error(_LE("No IP address assigned to Neutron port %s."),
port_uuid)
raise exception.FailedToGetIPAddressOnPort(port_id=port_uuid)
def _get_port_ip_address(self, task, port_uuid, client):
"""Get ip address of ironic port assigned by neutron.
:param task: a TaskManager instance.
:param port_uuid: ironic Node's port UUID.
:param client: Neutron client instance.
:returns: Neutron port ip address associated with Node's port.
:raises: FailedToGetIPAddressOnPort
:raises: InvalidIPv4Address
"""
vifs = network.get_node_vif_ids(task)
if not vifs:
LOG.warning(_LW("No VIFs found for node %(node)s when attempting "
" to get port IP address."),
{'node': task.node.uuid})
raise exception.FailedToGetIPAddressOnPort(port_id=port_uuid)
port_vif = vifs[port_uuid]
port_ip_address = self._get_fixed_ip_address(port_vif, client)
return port_ip_address
def get_ip_addresses(self, task):
"""Get IP addresses for all ports in `task`.
:param task: a TaskManager instance.
:returns: List of IP addresses associated with task.ports.
"""
client = _build_client(task.context.auth_token)
failures = []
ip_addresses = []
for port in task.ports:
try:
port_ip_address = self._get_port_ip_address(task, port.uuid,
client)
ip_addresses.append(port_ip_address)
except (exception.FailedToGetIPAddressOnPort,
exception.InvalidIPv4Address):
failures.append(port.uuid)
if failures:
LOG.warn(_LW("Some errors were encountered on node %(node)s"
" while retrieving IP address on the following"
" ports: %(ports)s."),
{'node': task.node.uuid, 'ports': failures})
return ip_addresses
def create_cleaning_ports(self, task):
"""Create neutron ports for each port on task.node to boot the ramdisk.
:param task: a TaskManager instance.
:raises: InvalidParameterValue if the cleaning network is None
:returns: a dictionary in the form {port.uuid: neutron_port['id']}
"""
if not CONF.neutron.cleaning_network_uuid:
raise exception.InvalidParameterValue(_('Valid cleaning network '
'UUID not provided'))
neutron_client = _build_client(task.context.auth_token)
body = {
'port': {
'network_id': CONF.neutron.cleaning_network_uuid,
'admin_state_up': True,
}
}
ports = {}
for ironic_port in task.ports:
body['port']['mac_address'] = ironic_port.address
try:
port = neutron_client.create_port(body)
except neutron_client_exc.ConnectionFailed as e:
self._rollback_cleaning_ports(task)
msg = (_('Could not create cleaning port on network %(net)s '
'from %(node)s. %(exc)s') %
{'net': CONF.neutron.cleaning_network_uuid,
'node': task.node.uuid,
'exc': e})
LOG.exception(msg)
raise exception.NodeCleaningFailure(msg)
if not port.get('port') or not port['port'].get('id'):
self._rollback_cleaning_ports(task)
msg = (_('Failed to create cleaning ports for node '
'%(node)s') % task.node.uuid)
LOG.error(msg)
raise exception.NodeCleaningFailure(msg)
# Match return value of get_node_vif_ids()
ports[ironic_port.uuid] = port['port']['id']
return ports
def delete_cleaning_ports(self, task):
"""Deletes the neutron port created for booting the ramdisk.
:param task: a TaskManager instance.
"""
neutron_client = _build_client(task.context.auth_token)
macs = [p.address for p in task.ports]
params = {
'network_id': CONF.neutron.cleaning_network_uuid
}
try:
ports = neutron_client.list_ports(**params)
except neutron_client_exc.ConnectionFailed as e:
msg = (_('Could not get cleaning network vif for %(node)s '
'from Neutron, possible network issue. %(exc)s') %
{'node': task.node.uuid,
'exc': e})
LOG.exception(msg)
raise exception.NodeCleaningFailure(msg)
# Iterate the list of Neutron port dicts, remove the ones we added
for neutron_port in ports.get('ports', []):
# Only delete ports using the node's mac addresses
if neutron_port.get('mac_address') in macs:
try:
neutron_client.delete_port(neutron_port.get('id'))
except neutron_client_exc.ConnectionFailed as e:
msg = (_('Could not remove cleaning ports on network '
'%(net)s from %(node)s, possible network issue. '
'%(exc)s') %
{'net': CONF.neutron.cleaning_network_uuid,
'node': task.node.uuid,
'exc': e})
LOG.exception(msg)
raise exception.NodeCleaningFailure(msg)
def _rollback_cleaning_ports(self, task):
"""Attempts to delete any ports created by cleaning
Purposefully will not raise any exceptions so error handling can
continue.
:param task: a TaskManager instance.
"""
try:
self.delete_cleaning_ports(task)
except Exception:
# Log the error, but let the caller invoke the
# manager.cleaning_error_handler().
LOG.exception(_LE('Failed to rollback cleaning port '
'changes for node %s') % task.node.uuid)
| 42.060367
| 97
| 0.582652
|
import time
from neutronclient.common import exceptions as neutron_client_exc
from neutronclient.v2_0 import client as clientv20
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import netutils
from ironic.common import exception
from ironic.common.i18n import _
from ironic.common.i18n import _LE
from ironic.common.i18n import _LW
from ironic.common import keystone
from ironic.common import network
from ironic.dhcp import base
from ironic.drivers.modules import ssh
neutron_opts = [
cfg.StrOpt('url',
default='http://$my_ip:9696',
help='URL for connecting to neutron.'),
cfg.IntOpt('url_timeout',
default=30,
help='Timeout value for connecting to neutron in seconds.'),
cfg.IntOpt('retries',
default=3,
help='Client retries in the case of a failed request.'),
cfg.StrOpt('auth_strategy',
default='keystone',
help='Default authentication strategy to use when connecting '
'to neutron. Can be either "keystone" or "noauth". '
'Running neutron in noauth mode (related to but not '
'affected by this setting) is insecure and should only be '
'used for testing.'),
cfg.StrOpt('cleaning_network_uuid',
help='UUID of the network to create Neutron ports on when '
'booting to a ramdisk for cleaning/zapping using Neutron '
'DHCP')
]
CONF = cfg.CONF
CONF.import_opt('my_ip', 'ironic.netconf')
CONF.register_opts(neutron_opts, group='neutron')
LOG = logging.getLogger(__name__)
def _build_client(token=None):
params = {
'timeout': CONF.neutron.url_timeout,
'retries': CONF.neutron.retries,
'insecure': CONF.keystone_authtoken.insecure,
'ca_cert': CONF.keystone_authtoken.certfile,
}
if CONF.neutron.auth_strategy not in ['noauth', 'keystone']:
raise exception.ConfigInvalid(_('Neutron auth_strategy should be '
'either "noauth" or "keystone".'))
if CONF.neutron.auth_strategy == 'noauth':
params['endpoint_url'] = CONF.neutron.url
params['auth_strategy'] = 'noauth'
elif (CONF.neutron.auth_strategy == 'keystone' and
token is None):
params['endpoint_url'] = (CONF.neutron.url or
keystone.get_service_url('neutron'))
params['username'] = CONF.keystone_authtoken.admin_user
params['tenant_name'] = CONF.keystone_authtoken.admin_tenant_name
params['password'] = CONF.keystone_authtoken.admin_password
params['auth_url'] = (CONF.keystone_authtoken.auth_uri or '')
if CONF.keystone.region_name:
params['region_name'] = CONF.keystone.region_name
else:
params['token'] = token
params['endpoint_url'] = CONF.neutron.url
params['auth_strategy'] = None
return clientv20.Client(**params)
class NeutronDHCPApi(base.BaseDHCP):
def update_port_dhcp_opts(self, port_id, dhcp_options, token=None):
port_req_body = {'port': {'extra_dhcp_opts': dhcp_options}}
try:
_build_client(token).update_port(port_id, port_req_body)
except neutron_client_exc.NeutronClientException:
LOG.exception(_LE("Failed to update Neutron port %s."), port_id)
raise exception.FailedToUpdateDHCPOptOnPort(port_id=port_id)
def update_port_address(self, port_id, address, token=None):
port_req_body = {'port': {'mac_address': address}}
try:
_build_client(token).update_port(port_id, port_req_body)
except neutron_client_exc.NeutronClientException:
LOG.exception(_LE("Failed to update MAC address on Neutron "
"port %s."), port_id)
raise exception.FailedToUpdateMacOnPort(port_id=port_id)
def update_dhcp_opts(self, task, options, vifs=None):
if vifs is None:
vifs = network.get_node_vif_ids(task)
if not vifs:
raise exception.FailedToUpdateDHCPOptOnPort(
_("No VIFs found for node %(node)s when attempting "
"to update DHCP BOOT options.") %
{'node': task.node.uuid})
failures = []
for port_id, port_vif in vifs.items():
try:
self.update_port_dhcp_opts(port_vif, options,
token=task.context.auth_token)
except exception.FailedToUpdateDHCPOptOnPort:
failures.append(port_id)
if failures:
if len(failures) == len(vifs):
raise exception.FailedToUpdateDHCPOptOnPort(_(
"Failed to set DHCP BOOT options for any port on node %s.")
% task.node.uuid)
else:
LOG.warning(_LW("Some errors were encountered when updating "
"the DHCP BOOT options for node %(node)s on "
"the following ports: %(ports)s."),
{'node': task.node.uuid, 'ports': failures})
if isinstance(task.driver.power, ssh.SSHPower):
LOG.debug("Waiting 15 seconds for Neutron.")
time.sleep(15)
def _get_fixed_ip_address(self, port_uuid, client):
ip_address = None
try:
neutron_port = client.show_port(port_uuid).get('port')
except neutron_client_exc.NeutronClientException:
LOG.exception(_LE("Failed to Get IP address on Neutron port %s."),
port_uuid)
raise exception.FailedToGetIPAddressOnPort(port_id=port_uuid)
fixed_ips = neutron_port.get('fixed_ips')
if fixed_ips:
ip_address = fixed_ips[0].get('ip_address', None)
if ip_address:
if netutils.is_valid_ipv4(ip_address):
return ip_address
else:
LOG.error(_LE("Neutron returned invalid IPv4 address %s."),
ip_address)
raise exception.InvalidIPv4Address(ip_address=ip_address)
else:
LOG.error(_LE("No IP address assigned to Neutron port %s."),
port_uuid)
raise exception.FailedToGetIPAddressOnPort(port_id=port_uuid)
def _get_port_ip_address(self, task, port_uuid, client):
vifs = network.get_node_vif_ids(task)
if not vifs:
LOG.warning(_LW("No VIFs found for node %(node)s when attempting "
" to get port IP address."),
{'node': task.node.uuid})
raise exception.FailedToGetIPAddressOnPort(port_id=port_uuid)
port_vif = vifs[port_uuid]
port_ip_address = self._get_fixed_ip_address(port_vif, client)
return port_ip_address
def get_ip_addresses(self, task):
client = _build_client(task.context.auth_token)
failures = []
ip_addresses = []
for port in task.ports:
try:
port_ip_address = self._get_port_ip_address(task, port.uuid,
client)
ip_addresses.append(port_ip_address)
except (exception.FailedToGetIPAddressOnPort,
exception.InvalidIPv4Address):
failures.append(port.uuid)
if failures:
LOG.warn(_LW("Some errors were encountered on node %(node)s"
" while retrieving IP address on the following"
" ports: %(ports)s."),
{'node': task.node.uuid, 'ports': failures})
return ip_addresses
def create_cleaning_ports(self, task):
if not CONF.neutron.cleaning_network_uuid:
raise exception.InvalidParameterValue(_('Valid cleaning network '
'UUID not provided'))
neutron_client = _build_client(task.context.auth_token)
body = {
'port': {
'network_id': CONF.neutron.cleaning_network_uuid,
'admin_state_up': True,
}
}
ports = {}
for ironic_port in task.ports:
body['port']['mac_address'] = ironic_port.address
try:
port = neutron_client.create_port(body)
except neutron_client_exc.ConnectionFailed as e:
self._rollback_cleaning_ports(task)
msg = (_('Could not create cleaning port on network %(net)s '
'from %(node)s. %(exc)s') %
{'net': CONF.neutron.cleaning_network_uuid,
'node': task.node.uuid,
'exc': e})
LOG.exception(msg)
raise exception.NodeCleaningFailure(msg)
if not port.get('port') or not port['port'].get('id'):
self._rollback_cleaning_ports(task)
msg = (_('Failed to create cleaning ports for node '
'%(node)s') % task.node.uuid)
LOG.error(msg)
raise exception.NodeCleaningFailure(msg)
ports[ironic_port.uuid] = port['port']['id']
return ports
def delete_cleaning_ports(self, task):
neutron_client = _build_client(task.context.auth_token)
macs = [p.address for p in task.ports]
params = {
'network_id': CONF.neutron.cleaning_network_uuid
}
try:
ports = neutron_client.list_ports(**params)
except neutron_client_exc.ConnectionFailed as e:
msg = (_('Could not get cleaning network vif for %(node)s '
'from Neutron, possible network issue. %(exc)s') %
{'node': task.node.uuid,
'exc': e})
LOG.exception(msg)
raise exception.NodeCleaningFailure(msg)
for neutron_port in ports.get('ports', []):
if neutron_port.get('mac_address') in macs:
try:
neutron_client.delete_port(neutron_port.get('id'))
except neutron_client_exc.ConnectionFailed as e:
msg = (_('Could not remove cleaning ports on network '
'%(net)s from %(node)s, possible network issue. '
'%(exc)s') %
{'net': CONF.neutron.cleaning_network_uuid,
'node': task.node.uuid,
'exc': e})
LOG.exception(msg)
raise exception.NodeCleaningFailure(msg)
def _rollback_cleaning_ports(self, task):
try:
self.delete_cleaning_ports(task)
except Exception:
# Log the error, but let the caller invoke the
# manager.cleaning_error_handler().
LOG.exception(_LE('Failed to rollback cleaning port '
'changes for node %s') % task.node.uuid)
| true
| true
|
790acb10e43773f3fb0e4f6ad569629a1d8d7ce4
| 173
|
py
|
Python
|
sarada/__init__.py
|
wikii122/sarada
|
6e156cce90d6468cce80312f83c3708ad9ca8f6b
|
[
"MIT"
] | null | null | null |
sarada/__init__.py
|
wikii122/sarada
|
6e156cce90d6468cce80312f83c3708ad9ca8f6b
|
[
"MIT"
] | 16
|
2021-12-03T14:29:39.000Z
|
2021-12-28T09:38:15.000Z
|
sarada/__init__.py
|
wikii122/sarada
|
6e156cce90d6468cce80312f83c3708ad9ca8f6b
|
[
"MIT"
] | null | null | null |
"""
A small utility aiming to create programatically sound.
"""
from __future__ import annotations
from importlib import metadata
__version__ = metadata.version("sarada")
| 19.222222
| 55
| 0.791908
|
from __future__ import annotations
from importlib import metadata
__version__ = metadata.version("sarada")
| true
| true
|
790acb8bf4b67c5a3f492e5ec2b57345dc0e8614
| 18,147
|
py
|
Python
|
antlir/btrfs_diff/extents_to_chunks.py
|
SaurabhAgarwala/antlir
|
d9513d35d3eaa9d28717a40057a14d099c6ec775
|
[
"MIT"
] | null | null | null |
antlir/btrfs_diff/extents_to_chunks.py
|
SaurabhAgarwala/antlir
|
d9513d35d3eaa9d28717a40057a14d099c6ec775
|
[
"MIT"
] | null | null | null |
antlir/btrfs_diff/extents_to_chunks.py
|
SaurabhAgarwala/antlir
|
d9513d35d3eaa9d28717a40057a14d099c6ec775
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
One of the trickier parts of creating a mock btrfs filesystem is tracking
the structures of the write forks, respecting `truncate`, `write`, and
`clone` operations. We achieve this as follows:
- Sequentially apply `btrfs send` operations to create & update:
* `IncompleteInode`s and their `Extent`s,
* the path -> `IncompleteInode` mapping.
- Run `extents_to_chunks_with_clones()` to summarize which files clone
which other files. A quick clarificaiton of the notation:
* `Extent` is actually a tree of extents, which captures the history of
how the file's sequence of extents was created. Refer to `extent.py`.
* `Chunk` more directly corresponds to a filesystem extent. It's either
data or a hole of a given length. A file is just a contiguous sequence
of `Chunk`s. Beyond recording the kind, and the length, each `Chunk`
records precisely how other files clone from it.
So `extents_to_chunks_with_clones()` flattens the history-preserving,
clone-aware tree in `Extent` objects into a test-friendly list of
`Chunk`s.
For testing, it is important to produce a representation that is as
normalized as possible: our output should deterministically and uniquely
capture the information we wish to test, and omit everything else[1].
We do NOT want our output to depend on the order of the operations that
created the filesystem, but only on the final filesystem state.
Specifically:
* For any byte offset[2] in the file, we need to know whether it's a
`HOLE`, or it contains `DATA` (see `Extent.Kind`). An offset -> kind
map is too verbose to use in manual tests, so we merge adjacent
offsets with the same `Extent.Kind` into `Chunk`s.
* For any offset in the file, we need to know whether it is a clone of
any other file locations (i.e. copy-on-write sharing of underlying
storage). For this reason, each `Chunk` has a set of `ChunkClones`,
which form a normalized[3] description of the shared-storage links on
the filesystem.
To give an example -- let's say that columns are byte offsets, and we
have this 10-byte extent, parts of which were cloned to make files
`A`, `B`, and `C`:
0123456789 # offsets on disk
BBBBBAAA # some part of file `B` includes offsets 1-5; `A` -- 6-8
AAACCCCC # `A` ALSO includes 0-2, possibly separated from its 6-8
(Aside: `test_extents_to_chunks_with_clones` also uses such figures)
Reading this figure, we see that:
- A has a 6-byte DATA `Chunk` with two `ChunkClones`:
* From `offset` 1 into B at `offset` 0 with length 2, aka `B:0+2@1`
* From `offset` 3 into C at `offset` 3 with length 2, aka `C:3+2@3'
- B has a 5-byte DATA `Chunk` with two `ChunkClones`:
* From `offset` 0 into A at `offset` 1 with length 2, aka `A:1+2@0`
* From `offset` 2 into C at `offset` 0 with length 3, aka `C:0+3@2'
- C has a 5-byte DATA `Chunk` with two `ChunkClones`:
* From `offset` 0 into B at `offset` 2 with length 3, aka `B:2+3@0`
* From `offset` 3 into A at `offset` 3 with length 2, aka `A:3+2@3'
You can see that our representation of "a set of `ChunkClone`s for
every `Chunk`" is NOT parsimonious. If the same range of bytes is
cloned into N `Chunk`s, each of those `Chunk`s will refer to every
other `Chunk`, for a total of N*(N-1)/2 references. This is far less
efficient than a spanning tree with `N - 1` references.
E.g. in the above example, N = 4, and we stored 6 `ChunkClones`:
{'A': {'B:0+2@1', 'C:3+2@3'},
'B': {'A:1+2@0', 'C:0+3@2'},
'C': {'B:2+3@0', 'A:3+2@3'}}
The redundancy is obvious, e.g. each of these pairs are mirror images:
- 'A': 'B:0+2@1' versus 'B': 'A:1+2@0'
- 'A': 'C:3+2@3' versus 'C': 'A:3+2@3'
- 'B': 'C:0+3@2' versus 'C': 'B:2+3@0'
Picking one ChunkClone from each line would make a 3-edge spanning tree.
Using an inefficient presentation is an intentional design decision.
In most test filesystems, the copy number of any Chunk will be low, so
the cost of enumerating all references is minimal. The upside of this
quadratic representation is that it is unique and simple.
In contrast, presenting the clone structure via a spanning tree breaks
the symmetry, and then each test author has to understand the process
by which the N-1 spanning tree edges are selected. It's easy to make
such a process deterministic, but it still adds cognitive load.
[1] The current code tracks clones of HOLEs, because it makes no effort to
ignore them. I would guess that btrfs lacks this tracking, since such
clones would save no space. Once this is confirmed, it would be very
easy to either ignore, or leave unpopulated the `chunk_clones` field for
`Chunk` object with `kind == Extent.Kind.HOLE`.
[2] I refer to "bytes" throughout, but in actuality filesystems are
block-oriented. To deal with this, divide all lengths and offsets by
your block size to get the sense of "bytes" used here.
[3] The current code does NOT merge adjacent ChunkClones that were created
by separate `clone` operations. This is easy to fix once it comes up in
real applications. Tested in `test_cannot_merge_adjacent_clones()`.
"""
# Future: frozentypes instead of NamedTuples can permit some cleanups below.
import functools
from collections import defaultdict
from typing import Dict, Iterable, NamedTuple, Sequence, Tuple
from .extent import Extent
from .inode import Chunk, ChunkClone, Clone
from .inode_id import InodeID
class _CloneExtentRef(NamedTuple):
"""
Connects a part of a HOLE/DATA leaf Extent to a location in an Inode.
Although the Extent is shared between many inodes and/or disjoint
locations in the same inode, each _CloneExtentRef object is specific to
one occurrence of this Extent in the `gen_trimmed_leaves` of one inode.
We initially create a _CloneExtentRef for every piece of every inode,
but later we only retain those have some inter-inode overlap within
their `.extent`, thus identifying cloned chunks of inodes.
Aside: Unlike the simplified data model in `inode.py`, the Extent's
object identity captures the original reason that parts of some inodes
became identified via a clone relationship. We mostly use this for
assertions.
Future: With `frozentype`, __new__ could assert that `offset` and
`clone.length` are sane with respect to `extent`.
"""
clone: Clone # `clone.length` trims `extent`
extent: Extent
offset: int # Trims `extent`
# The position in `gen_trimmed_leaves` of the specific trimmed leaf that
# is being connected to another inode.
#
# It is possible for a Inode to have two instances of the same Extent
# with the same offset & length in its `gen_trimmed_leaves` stream, see
# e.g. `test_multi_extent`. In that case, we cannot correctly assign
# `ChunkClone`s to their trimmed leaves solely based on the content of
# the trimmed leaf: `(offset, length, extent)`.
#
# You might ask why the `ChunkClone` lists would differ between
# identical trimmed extents? Here is why: the first has to refer to the
# second, but not to itself, and conversely, the second must refer to
# the first, but not to itself.
#
# We could avoid this denormalization by keying `CloneChunk`s on
# `(inode_offset, offset, length, extent)`, which is unique. And
# `extents_to_chunks_with_clones` does already track `inode_offset`.
# However, the denormalized approach seemed cleaner.
leaf_idx: int
def __repr__(self): # pragma: no cover
return (
f"{self.clone.inode_id}:{self.clone.offset}"
f"+{self.clone.length}:{id(self.extent)}" # Extent is too noisy
)
# If these change, we have to update `_clone_op_compare_key`
assert Clone._fields.index("inode_id") == 0
assert _CloneExtentRef._fields.index("clone") == 0
# Our _CloneOp ordering obeys the following invariants:
# - sort by position first
# - sort by action second, putting POPs before PUSHes (see their def'ns)
# We do not need finer-grained ordering because:
# (1) we only do work on POPs,
# (2) the work done on all the POPs at one position does not depend on the
# order of the _CloneOps -- we symmetrically record the relationship in
# both directions:
# (just-popped op, each unpopped op)
# (each unpopped op, just-popped op)
#
# We could get the desired ordering implicitly by:
# - relying on the order of field declaration in `_CloneOp` (not bad)
# - making `Inode`s comparable (a bit ugly, comparing Extents is pricy,
# comparing InodeIDs would require some comparator boilerplate)
# Luckily, being explicit is not *that* painful.
def _clone_op_compare_key(c: "_CloneOp"):
return (
# The preceding asserts make these [1:] hacks tolerable.
c.pos,
c.action,
c.ref[1:],
c.ref.clone[1:],
c.ref.clone.inode_id.id,
)
def _clone_op_compare(fn):
@functools.wraps(fn)
def cmp(self: "_CloneOp", other: "_CloneOp"):
assert isinstance(other, _CloneOp)
# We only compare ops within one extent. The tests assume this to
# justify focusing on single-extent examples, so check it.
assert self.ref.extent is other.ref.extent
# All our items are distinct, since `clone.offset` is `inode_offset`,
# which is strictly increasing in each inode. We have no business
# comparing a _CloneOp with itself.
assert tuple.__ne__(self, other)
return fn(_clone_op_compare_key(self), _clone_op_compare_key(other))
return cmp
class _CloneOp(NamedTuple):
PUSH = "push"
POP = "pop"
assert POP < PUSH # We want to sort all POPs before any PUSHes
pos: int
action: str
ref: _CloneExtentRef
# NamedTuple confuses functools.total_ordering, so define all 6 comparators
__eq__ = _clone_op_compare(tuple.__eq__)
__ne__ = _clone_op_compare(tuple.__ne__)
__lt__ = _clone_op_compare(tuple.__lt__)
__le__ = _clone_op_compare(tuple.__le__)
__gt__ = _clone_op_compare(tuple.__gt__)
__ge__ = _clone_op_compare(tuple.__ge__)
def _leaf_extent_id_to_clone_ops(
ids_and_extents: Iterable[Tuple[InodeID, Extent]]
):
"""
To collect the parts of a Chunk that are cloned, we will run a variation
on the standard interval-overlap algorithm. We first sort the starts &
ends of each interval, and then do a sequential scan that uses starts to
add, and ends to remove, a tracking object from a "current intervals"
structure.
This function simply prepares the set of interval starts & ends for each
InodeID, the computation is in `_leaf_ref_to_chunk_clones_from_clone_ops`.
"""
leaf_extent_id_to_clone_ops = defaultdict(list)
for ino_id, extent in ids_and_extents:
file_offset = 0
for leaf_idx, (offset, length, leaf_extent) in enumerate(
extent.gen_trimmed_leaves()
):
ref = _CloneExtentRef(
clone=Clone(inode_id=ino_id, offset=file_offset, length=length),
extent=leaf_extent,
offset=offset,
leaf_idx=leaf_idx,
)
leaf_extent_id_to_clone_ops[id(leaf_extent)].extend(
[
_CloneOp(pos=offset, action=_CloneOp.PUSH, ref=ref),
_CloneOp(pos=offset + length, action=_CloneOp.POP, ref=ref),
]
)
file_offset += length
return leaf_extent_id_to_clone_ops
def _leaf_ref_to_chunk_clones_from_clone_ops(
extent_id: int, clone_ops: Iterable[_CloneOp]
):
"As per `_leaf_extent_id_to_clone_ops`, this computes interval overlaps"
active_ops: Dict[_CloneExtentRef, _CloneOp] = {} # Tracks open intervals
leaf_ref_to_chunk_clones = defaultdict(list)
for op in sorted(clone_ops):
# Whenever an interval (aka an Inode's Extent's "trimmed leaf")
# ends, we create `ChunkClone` objects **to** and **from** all the
# concurrently open intervals.
if op.action is _CloneOp.POP:
pushed_op = active_ops.pop(op.ref)
assert pushed_op.ref is op.ref
assert id(op.ref.extent) == extent_id
assert pushed_op.pos == op.ref.offset
assert pushed_op.pos + op.ref.clone.length == op.pos
for clone_op in active_ops.values():
assert op.ref.extent is clone_op.ref.extent
# The cloned portion's extent offset is the larger of the 2
bigger_offset = max(clone_op.ref.offset, op.ref.offset)
# Record that `clone_op` clones part of `op`'s inode.
leaf_ref_to_chunk_clones[op.ref].append(
ChunkClone(
offset=bigger_offset,
clone=Clone(
inode_id=clone_op.ref.clone.inode_id,
offset=clone_op.ref.clone.offset
+ (bigger_offset - clone_op.ref.offset),
length=op.pos - bigger_offset,
),
)
)
# Record that `op` clones part of `clone_op`'s inode.
leaf_ref_to_chunk_clones[clone_op.ref].append(
ChunkClone(
offset=bigger_offset,
clone=Clone(
inode_id=op.ref.clone.inode_id,
offset=op.ref.clone.offset
+ (bigger_offset - op.ref.offset),
length=op.pos - bigger_offset, # Same length
),
)
)
# Sorting guarantees all POPs for `pos` are handled before PUSHes
elif op.action == _CloneOp.PUSH:
assert op.ref not in active_ops
active_ops[op.ref] = op
else:
raise AssertionError(op) # pragma: no cover
return leaf_ref_to_chunk_clones
def _id_to_leaf_idx_to_chunk_clones(
ids_and_extents: Iterable[Tuple[InodeID, Extent]]
):
'Aggregates newly created ChunkClones per InodeID, and per "trimmed leaf"'
id_to_leaf_idx_to_chunk_clones = defaultdict(dict)
for extent_id, clone_ops in _leaf_extent_id_to_clone_ops(
ids_and_extents
).items():
leaf_ref_to_chunk_clones = _leaf_ref_to_chunk_clones_from_clone_ops(
extent_id, clone_ops
)
for leaf_ref, offsets_clones in leaf_ref_to_chunk_clones.items():
d = id_to_leaf_idx_to_chunk_clones[leaf_ref.clone.inode_id]
# A `leaf_idx` from a specific inode ID refers to one extent,
# and each extent is handled in one iteration, so it cannot be
# that two iterations contribute to the same `leaf_idx` key.
assert leaf_ref.leaf_idx not in d
# `leaf_idx` is the position in `gen_trimmed_leaves` of the
# chunk, whose clones we computed. That fully specifies where
# `extents_to_chunks_with_clones` should put the clones.
d[leaf_ref.leaf_idx] = offsets_clones
return id_to_leaf_idx_to_chunk_clones
def extents_to_chunks_with_clones(
ids_and_extents: Sequence[Tuple[InodeID, Extent]]
) -> Iterable[Tuple[InodeID, Sequence[Chunk]]]:
"""
Converts the nested, history-preserving `Extent` structures into flat
sequences of `Chunk`s, while being careful to annotate cloned parts as
described in this file's docblock. The `InodeID`s are needed to ensure
that the `Chunk`s' `Clone` objects refer to the appropriate files.
"""
id_to_leaf_idx_to_chunk_clones = _id_to_leaf_idx_to_chunk_clones(
ids_and_extents
)
for ino_id, extent in ids_and_extents:
leaf_to_chunk_clones = id_to_leaf_idx_to_chunk_clones.get(ino_id, {})
new_chunks = []
for leaf_idx, (offset, length, extent) in enumerate(
extent.gen_trimmed_leaves()
):
chunk_clones = leaf_to_chunk_clones.get(leaf_idx, [])
assert isinstance(extent.content, Extent.Kind)
# If the chunk kind matches, merge into the previous chunk.
if new_chunks and new_chunks[-1].kind == extent.content:
prev_length = new_chunks[-1].length
prev_clones = new_chunks[-1].chunk_clones
else: # Otherwise, make a new one.
prev_length = 0
prev_clones = set()
new_chunks.append(None)
new_chunks[-1] = Chunk(
kind=extent.content,
length=length + prev_length,
chunk_clones=prev_clones,
)
new_chunks[-1].chunk_clones.update(
# Future: when switching to frozentype, __new__ should
# validate that clone offset & length are sane relative
# to the trimmed extent.
ChunkClone(
clone=clone,
# Subtract `offset` because `ChunkClone.offset` is
# Extent-relative, but in the actual file layout, the
# leaf Extent is trimmed further.
offset=clone_offset + prev_length - offset,
)
for clone_offset, clone in chunk_clones
)
# Future: `deepfrozen` was made for this:
yield ino_id, tuple(
Chunk(
kind=c.kind,
length=c.length,
chunk_clones=frozenset(c.chunk_clones),
)
for c in new_chunks
)
| 43.310263
| 80
| 0.648372
|
import functools
from collections import defaultdict
from typing import Dict, Iterable, NamedTuple, Sequence, Tuple
from .extent import Extent
from .inode import Chunk, ChunkClone, Clone
from .inode_id import InodeID
class _CloneExtentRef(NamedTuple):
clone: Clone
extent: Extent
offset: int
leaf_idx: int
def __repr__(self):
return (
f"{self.clone.inode_id}:{self.clone.offset}"
f"+{self.clone.length}:{id(self.extent)}"
)
assert Clone._fields.index("inode_id") == 0
assert _CloneExtentRef._fields.index("clone") == 0
# We do not need finer-grained ordering because:
# (1) we only do work on POPs,
# (2) the work done on all the POPs at one position does not depend on the
# order of the _CloneOps -- we symmetrically record the relationship in
# both directions:
# (just-popped op, each unpopped op)
# (each unpopped op, just-popped op)
#
# We could get the desired ordering implicitly by:
# - relying on the order of field declaration in `_CloneOp` (not bad)
# - making `Inode`s comparable (a bit ugly, comparing Extents is pricy,
# comparing InodeIDs would require some comparator boilerplate)
# Luckily, being explicit is not *that* painful.
def _clone_op_compare_key(c: "_CloneOp"):
return (
# The preceding asserts make these [1:] hacks tolerable.
c.pos,
c.action,
c.ref[1:],
c.ref.clone[1:],
c.ref.clone.inode_id.id,
)
def _clone_op_compare(fn):
@functools.wraps(fn)
def cmp(self: "_CloneOp", other: "_CloneOp"):
assert isinstance(other, _CloneOp)
# We only compare ops within one extent. The tests assume this to
# justify focusing on single-extent examples, so check it.
assert self.ref.extent is other.ref.extent
# All our items are distinct, since `clone.offset` is `inode_offset`,
# which is strictly increasing in each inode. We have no business
# comparing a _CloneOp with itself.
assert tuple.__ne__(self, other)
return fn(_clone_op_compare_key(self), _clone_op_compare_key(other))
return cmp
class _CloneOp(NamedTuple):
PUSH = "push"
POP = "pop"
assert POP < PUSH # We want to sort all POPs before any PUSHes
pos: int
action: str
ref: _CloneExtentRef
# NamedTuple confuses functools.total_ordering, so define all 6 comparators
__eq__ = _clone_op_compare(tuple.__eq__)
__ne__ = _clone_op_compare(tuple.__ne__)
__lt__ = _clone_op_compare(tuple.__lt__)
__le__ = _clone_op_compare(tuple.__le__)
__gt__ = _clone_op_compare(tuple.__gt__)
__ge__ = _clone_op_compare(tuple.__ge__)
def _leaf_extent_id_to_clone_ops(
ids_and_extents: Iterable[Tuple[InodeID, Extent]]
):
leaf_extent_id_to_clone_ops = defaultdict(list)
for ino_id, extent in ids_and_extents:
file_offset = 0
for leaf_idx, (offset, length, leaf_extent) in enumerate(
extent.gen_trimmed_leaves()
):
ref = _CloneExtentRef(
clone=Clone(inode_id=ino_id, offset=file_offset, length=length),
extent=leaf_extent,
offset=offset,
leaf_idx=leaf_idx,
)
leaf_extent_id_to_clone_ops[id(leaf_extent)].extend(
[
_CloneOp(pos=offset, action=_CloneOp.PUSH, ref=ref),
_CloneOp(pos=offset + length, action=_CloneOp.POP, ref=ref),
]
)
file_offset += length
return leaf_extent_id_to_clone_ops
def _leaf_ref_to_chunk_clones_from_clone_ops(
extent_id: int, clone_ops: Iterable[_CloneOp]
):
active_ops: Dict[_CloneExtentRef, _CloneOp] = {} # Tracks open intervals
leaf_ref_to_chunk_clones = defaultdict(list)
for op in sorted(clone_ops):
# Whenever an interval (aka an Inode's Extent's "trimmed leaf")
# ends, we create `ChunkClone` objects **to** and **from** all the
# concurrently open intervals.
if op.action is _CloneOp.POP:
pushed_op = active_ops.pop(op.ref)
assert pushed_op.ref is op.ref
assert id(op.ref.extent) == extent_id
assert pushed_op.pos == op.ref.offset
assert pushed_op.pos + op.ref.clone.length == op.pos
for clone_op in active_ops.values():
assert op.ref.extent is clone_op.ref.extent
# The cloned portion's extent offset is the larger of the 2
bigger_offset = max(clone_op.ref.offset, op.ref.offset)
leaf_ref_to_chunk_clones[op.ref].append(
ChunkClone(
offset=bigger_offset,
clone=Clone(
inode_id=clone_op.ref.clone.inode_id,
offset=clone_op.ref.clone.offset
+ (bigger_offset - clone_op.ref.offset),
length=op.pos - bigger_offset,
),
)
)
# Record that `op` clones part of `clone_op`'s inode.
leaf_ref_to_chunk_clones[clone_op.ref].append(
ChunkClone(
offset=bigger_offset,
clone=Clone(
inode_id=op.ref.clone.inode_id,
offset=op.ref.clone.offset
+ (bigger_offset - op.ref.offset),
length=op.pos - bigger_offset,
),
)
)
elif op.action == _CloneOp.PUSH:
assert op.ref not in active_ops
active_ops[op.ref] = op
else:
raise AssertionError(op)
return leaf_ref_to_chunk_clones
def _id_to_leaf_idx_to_chunk_clones(
ids_and_extents: Iterable[Tuple[InodeID, Extent]]
):
id_to_leaf_idx_to_chunk_clones = defaultdict(dict)
for extent_id, clone_ops in _leaf_extent_id_to_clone_ops(
ids_and_extents
).items():
leaf_ref_to_chunk_clones = _leaf_ref_to_chunk_clones_from_clone_ops(
extent_id, clone_ops
)
for leaf_ref, offsets_clones in leaf_ref_to_chunk_clones.items():
d = id_to_leaf_idx_to_chunk_clones[leaf_ref.clone.inode_id]
assert leaf_ref.leaf_idx not in d
d[leaf_ref.leaf_idx] = offsets_clones
return id_to_leaf_idx_to_chunk_clones
def extents_to_chunks_with_clones(
ids_and_extents: Sequence[Tuple[InodeID, Extent]]
) -> Iterable[Tuple[InodeID, Sequence[Chunk]]]:
id_to_leaf_idx_to_chunk_clones = _id_to_leaf_idx_to_chunk_clones(
ids_and_extents
)
for ino_id, extent in ids_and_extents:
leaf_to_chunk_clones = id_to_leaf_idx_to_chunk_clones.get(ino_id, {})
new_chunks = []
for leaf_idx, (offset, length, extent) in enumerate(
extent.gen_trimmed_leaves()
):
chunk_clones = leaf_to_chunk_clones.get(leaf_idx, [])
assert isinstance(extent.content, Extent.Kind)
if new_chunks and new_chunks[-1].kind == extent.content:
prev_length = new_chunks[-1].length
prev_clones = new_chunks[-1].chunk_clones
else:
prev_length = 0
prev_clones = set()
new_chunks.append(None)
new_chunks[-1] = Chunk(
kind=extent.content,
length=length + prev_length,
chunk_clones=prev_clones,
)
new_chunks[-1].chunk_clones.update(
ChunkClone(
clone=clone,
offset=clone_offset + prev_length - offset,
)
for clone_offset, clone in chunk_clones
)
yield ino_id, tuple(
Chunk(
kind=c.kind,
length=c.length,
chunk_clones=frozenset(c.chunk_clones),
)
for c in new_chunks
)
| true
| true
|
790acba3f445cec85703cf63215660fb9cb174a4
| 1,797
|
py
|
Python
|
examples/customisations/plot_rainbow_serif_bins.py
|
Jiaming1999/ChainConsumer
|
5606696525d91f11d8093085934fa352b98ce97c
|
[
"MIT"
] | 55
|
2016-08-31T01:02:41.000Z
|
2022-03-15T15:23:29.000Z
|
examples/customisations/plot_rainbow_serif_bins.py
|
Jiaming1999/ChainConsumer
|
5606696525d91f11d8093085934fa352b98ce97c
|
[
"MIT"
] | 86
|
2016-10-09T23:20:00.000Z
|
2022-03-23T09:55:57.000Z
|
examples/customisations/plot_rainbow_serif_bins.py
|
Jiaming1999/ChainConsumer
|
5606696525d91f11d8093085934fa352b98ce97c
|
[
"MIT"
] | 17
|
2016-08-31T08:35:37.000Z
|
2021-07-24T16:39:26.000Z
|
# -*- coding: utf-8 -*-
"""
=======================
Cmap and Custom Bins
=======================
Invoke the cmap colour scheme and choose how many bins to use with your data.
By default, the cmap colour scheme is used if you have many, many chains. You can
enable it before that point if you wish and pass in the cmap you want to use.
You can also pick how many bins you want to display your data with.
You can see that in this example, we pick too many bins and would not get good
summaries. If you simply want more (or less) bins than the default estimate,
if you input a float instead of an integer, the number of bins will simply scale
by that amount. For example, if the estimated picks 20 bins, and you set ``bins=1.5``
your plots and summaries would be calculated with 30 bins.
"""
import numpy as np
from numpy.random import normal, random, multivariate_normal
from chainconsumer import ChainConsumer
np.random.seed(0)
cov = 0.3 * random(size=(3, 3)) + np.identity(3)
data = multivariate_normal(normal(size=3), np.dot(cov, cov.T), size=100000)
cov = 0.3 * random(size=(3, 3)) + np.identity(3)
data2 = multivariate_normal(normal(size=3), np.dot(cov, cov.T), size=100000)
cov = 0.3 * random(size=(3, 3)) + np.identity(3)
data3 = multivariate_normal(normal(size=3), np.dot(cov, cov.T), size=100000)
cov = 0.3 * random(size=(3, 3)) + np.identity(3)
data4 = multivariate_normal(normal(size=3), np.dot(cov, cov.T), size=100000)
c = ChainConsumer()
c.add_chain(data, name="A")
c.add_chain(data2, name="B")
c.add_chain(data3, name="C")
c.add_chain(data4, name="D")
c.configure(bins=50, cmap="plasma")
fig = c.plotter.plot(figsize=0.75) # Also making the figure 75% of its original size, for fun
fig.set_size_inches(3 + fig.get_size_inches()) # Resize fig for doco. You don't need this.
| 39.933333
| 94
| 0.70729
|
import numpy as np
from numpy.random import normal, random, multivariate_normal
from chainconsumer import ChainConsumer
np.random.seed(0)
cov = 0.3 * random(size=(3, 3)) + np.identity(3)
data = multivariate_normal(normal(size=3), np.dot(cov, cov.T), size=100000)
cov = 0.3 * random(size=(3, 3)) + np.identity(3)
data2 = multivariate_normal(normal(size=3), np.dot(cov, cov.T), size=100000)
cov = 0.3 * random(size=(3, 3)) + np.identity(3)
data3 = multivariate_normal(normal(size=3), np.dot(cov, cov.T), size=100000)
cov = 0.3 * random(size=(3, 3)) + np.identity(3)
data4 = multivariate_normal(normal(size=3), np.dot(cov, cov.T), size=100000)
c = ChainConsumer()
c.add_chain(data, name="A")
c.add_chain(data2, name="B")
c.add_chain(data3, name="C")
c.add_chain(data4, name="D")
c.configure(bins=50, cmap="plasma")
fig = c.plotter.plot(figsize=0.75)
fig.set_size_inches(3 + fig.get_size_inches())
| true
| true
|
790acc9953ae5e0ad3fb887e45f2807efc3c43e8
| 1,298
|
py
|
Python
|
UML2ER/contracts/unit/HContractUnitR03_ConnectedLHS.py
|
levilucio/SyVOLT
|
7526ec794d21565e3efcc925a7b08ae8db27d46a
|
[
"MIT"
] | 3
|
2017-06-02T19:26:27.000Z
|
2021-06-14T04:25:45.000Z
|
UML2ER/contracts/unit/HContractUnitR03_ConnectedLHS.py
|
levilucio/SyVOLT
|
7526ec794d21565e3efcc925a7b08ae8db27d46a
|
[
"MIT"
] | 8
|
2016-08-24T07:04:07.000Z
|
2017-05-26T16:22:47.000Z
|
UML2ER/contracts/unit/HContractUnitR03_ConnectedLHS.py
|
levilucio/SyVOLT
|
7526ec794d21565e3efcc925a7b08ae8db27d46a
|
[
"MIT"
] | 1
|
2019-10-31T06:00:23.000Z
|
2019-10-31T06:00:23.000Z
|
from core.himesis import Himesis, HimesisPreConditionPatternLHS
import uuid
class HContractUnitR03_ConnectedLHS(HimesisPreConditionPatternLHS):
def __init__(self):
"""
Creates the himesis graph representing the AToM3 model HContractUnitR03_ConnectedLHS
"""
# Flag this instance as compiled now
self.is_compiled = True
super(HContractUnitR03_ConnectedLHS, self).__init__(name='HContractUnitR03_ConnectedLHS', num_nodes=0, edges=[])
# Add the edges
self.add_edges([])
# Set the graph attributes
self["mm__"] = ['MT_pre__FamiliesToPersonsMM', 'MoTifRule']
self["MT_constraint__"] = """return True"""
self["name"] = """"""
self["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'HContractUnitR03_ConnectedLHS')
self["equations"] = []
# Set the node attributes
# match class Class(Class) node
self.add_node()
self.vs[0]["MT_pre__attr1"] = """return True"""
self.vs[0]["MT_label__"] = """1"""
self.vs[0]["mm__"] = """MT_pre__Class"""
self.vs[0]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'Class')
# Add the edges
self.add_edges([
])
# define evaluation methods for each match class.
def eval_attr11(self, attr_value, this):
return True
# define evaluation methods for each match association.
def constraint(self, PreNode, graph):
return True
| 26.489796
| 114
| 0.716487
|
from core.himesis import Himesis, HimesisPreConditionPatternLHS
import uuid
class HContractUnitR03_ConnectedLHS(HimesisPreConditionPatternLHS):
def __init__(self):
self.is_compiled = True
super(HContractUnitR03_ConnectedLHS, self).__init__(name='HContractUnitR03_ConnectedLHS', num_nodes=0, edges=[])
self.add_edges([])
self["mm__"] = ['MT_pre__FamiliesToPersonsMM', 'MoTifRule']
self["MT_constraint__"] = """return True"""
self["name"] = """"""
self["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'HContractUnitR03_ConnectedLHS')
self["equations"] = []
self.add_node()
self.vs[0]["MT_pre__attr1"] = """return True"""
self.vs[0]["MT_label__"] = """1"""
self.vs[0]["mm__"] = """MT_pre__Class"""
self.vs[0]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'Class')
self.add_edges([
])
def eval_attr11(self, attr_value, this):
return True
def constraint(self, PreNode, graph):
return True
| true
| true
|
790ace19384d70584442abecc4540ceb6f9392b5
| 1,585
|
py
|
Python
|
tiveU/users/views.py
|
rds0751/newtiveu
|
779ac840f0787ea0e74701b43d4320b44429527e
|
[
"MIT"
] | null | null | null |
tiveU/users/views.py
|
rds0751/newtiveu
|
779ac840f0787ea0e74701b43d4320b44429527e
|
[
"MIT"
] | null | null | null |
tiveU/users/views.py
|
rds0751/newtiveu
|
779ac840f0787ea0e74701b43d4320b44429527e
|
[
"MIT"
] | null | null | null |
from django.contrib.auth.mixins import LoginRequiredMixin
from django.urls import reverse
from django.views.generic import DetailView, ListView, RedirectView, UpdateView
from tiveU.articles.models import Article
from .models import User
class UserDetailView(LoginRequiredMixin, DetailView):
model = User
# These next two lines tell the view to index lookups by username
slug_field = 'username'
slug_url_kwarg = 'username'
def get_context_data(self, **kwargs):
context = super(UserDetailView, self).get_context_data(**kwargs)
context['articles'] = Article.objects.all()
return context
class UserRedirectView(LoginRequiredMixin, RedirectView):
permanent = False
def get_redirect_url(self):
return reverse('users:detail',
kwargs={'username': self.request.user.username})
class UserUpdateView(LoginRequiredMixin, UpdateView):
fields = ['name', 'email', 'picture', 'job_title', 'bio', 'phone', 'gender']
model = User
# send the user back to their own page after a successful update
def get_success_url(self):
return reverse('users:detail',
kwargs={'username': self.request.user.username})
def get_object(self):
# Only get the User record for the user making the request
return User.objects.get(username=self.request.user.username)
class UserListView(LoginRequiredMixin, ListView):
model = User
# These next two lines tell the view to index lookups by username
slug_field = 'username'
slug_url_kwarg = 'username'
| 33.723404
| 80
| 0.705363
|
from django.contrib.auth.mixins import LoginRequiredMixin
from django.urls import reverse
from django.views.generic import DetailView, ListView, RedirectView, UpdateView
from tiveU.articles.models import Article
from .models import User
class UserDetailView(LoginRequiredMixin, DetailView):
model = User
slug_field = 'username'
slug_url_kwarg = 'username'
def get_context_data(self, **kwargs):
context = super(UserDetailView, self).get_context_data(**kwargs)
context['articles'] = Article.objects.all()
return context
class UserRedirectView(LoginRequiredMixin, RedirectView):
permanent = False
def get_redirect_url(self):
return reverse('users:detail',
kwargs={'username': self.request.user.username})
class UserUpdateView(LoginRequiredMixin, UpdateView):
fields = ['name', 'email', 'picture', 'job_title', 'bio', 'phone', 'gender']
model = User
def get_success_url(self):
return reverse('users:detail',
kwargs={'username': self.request.user.username})
def get_object(self):
return User.objects.get(username=self.request.user.username)
class UserListView(LoginRequiredMixin, ListView):
model = User
slug_field = 'username'
slug_url_kwarg = 'username'
| true
| true
|
790ad287fb92682456101e4a4670286bc0608205
| 3,498
|
py
|
Python
|
BlogProject/settings.py
|
lwpdzq/BlogProject
|
bd46434722948ddb26e1dc9bd9c652e9531cf32b
|
[
"MIT"
] | null | null | null |
BlogProject/settings.py
|
lwpdzq/BlogProject
|
bd46434722948ddb26e1dc9bd9c652e9531cf32b
|
[
"MIT"
] | null | null | null |
BlogProject/settings.py
|
lwpdzq/BlogProject
|
bd46434722948ddb26e1dc9bd9c652e9531cf32b
|
[
"MIT"
] | null | null | null |
"""
Django settings for BlogProject project.
Generated by 'django-admin startproject' using Django 1.11.20.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
import sys
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# 添加apps目录
sys.path.insert(0,os.path.join(BASE_DIR, 'apps'))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'l-r2=6&(#p$f1qn$xzk6vce99ojk1nit&x7l_hqi9%&u$f#am&'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'apps.base',
'apps.article',
'apps.comment'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'BlogProject.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'base.context_processors.site_info' #站点基础信息上下文
],
},
},
]
WSGI_APPLICATION = 'BlogProject.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'zh-hans'
TIME_ZONE = 'Asia/Shanghai'
USE_I18N = True
USE_L10N = True
USE_TZ = False
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [os.path.join(BASE_DIR, 'static')]
# (开发环境)媒体文件 上传图片保存文件夹
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
MEDIA_URL = '/media/'
| 25.911111
| 91
| 0.69697
|
import os
import sys
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0,os.path.join(BASE_DIR, 'apps'))
SECRET_KEY = 'l-r2=6&(#p$f1qn$xzk6vce99ojk1nit&x7l_hqi9%&u$f#am&'
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'apps.base',
'apps.article',
'apps.comment'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'BlogProject.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'base.context_processors.site_info' #站点基础信息上下文
],
},
},
]
WSGI_APPLICATION = 'BlogProject.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'zh-hans'
TIME_ZONE = 'Asia/Shanghai'
USE_I18N = True
USE_L10N = True
USE_TZ = False
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [os.path.join(BASE_DIR, 'static')]
# (开发环境)媒体文件 上传图片保存文件夹
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
MEDIA_URL = '/media/'
| true
| true
|
790ad2c7c75ae801e7758e65bed03fd84cb2f24e
| 685
|
py
|
Python
|
analyzer/migrations/0005_auto_20200329_1308.py
|
4elovek37/diseases_risk_analysing
|
3664eaed09f749851fced467bd43e911d5e04ee3
|
[
"MIT"
] | null | null | null |
analyzer/migrations/0005_auto_20200329_1308.py
|
4elovek37/diseases_risk_analysing
|
3664eaed09f749851fced467bd43e911d5e04ee3
|
[
"MIT"
] | 4
|
2021-03-19T02:16:42.000Z
|
2021-09-22T18:55:20.000Z
|
analyzer/migrations/0005_auto_20200329_1308.py
|
bogdanvso/diseases_risk_analysing
|
3664eaed09f749851fced467bd43e911d5e04ee3
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.0.4 on 2020-03-29 13:08
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('analyzer', '0004_auto_20200328_1750'),
]
operations = [
migrations.AddField(
model_name='diseasestats',
name='country',
field=models.ForeignKey(default=0, on_delete=django.db.models.deletion.PROTECT, to='analyzer.Country'),
preserve_default=False,
),
migrations.AlterUniqueTogether(
name='diseasestats',
unique_together={('disease_season', 'country', 'stats_date')},
),
]
| 27.4
| 115
| 0.626277
|
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('analyzer', '0004_auto_20200328_1750'),
]
operations = [
migrations.AddField(
model_name='diseasestats',
name='country',
field=models.ForeignKey(default=0, on_delete=django.db.models.deletion.PROTECT, to='analyzer.Country'),
preserve_default=False,
),
migrations.AlterUniqueTogether(
name='diseasestats',
unique_together={('disease_season', 'country', 'stats_date')},
),
]
| true
| true
|
790ad3c47a8bf507fb7765cce0060763b00a775b
| 7,818
|
py
|
Python
|
grammar_induction/earley_parser/nltk/tag/stanford.py
|
tdonca/OpenBottle
|
f03d80e7b3645232fb97f91cf7fc2dc02f101ac2
|
[
"MIT"
] | 6
|
2017-01-22T03:15:01.000Z
|
2019-12-01T16:19:36.000Z
|
grammar_induction/earley_parser/nltk/tag/stanford.py
|
tdonca/OpenBottle
|
f03d80e7b3645232fb97f91cf7fc2dc02f101ac2
|
[
"MIT"
] | 3
|
2020-03-24T15:38:23.000Z
|
2021-02-02T21:44:18.000Z
|
grammar_induction/earley_parser/nltk/tag/stanford.py
|
tdonca/OpenBottle
|
f03d80e7b3645232fb97f91cf7fc2dc02f101ac2
|
[
"MIT"
] | 6
|
2017-01-19T21:49:55.000Z
|
2021-04-14T09:57:17.000Z
|
# -*- coding: utf-8 -*-
# Natural Language Toolkit: Interface to the Stanford Part-of-speech and Named-Entity Taggers
#
# Copyright (C) 2001-2017 NLTK Project
# Author: Nitin Madnani <nmadnani@ets.org>
# Rami Al-Rfou' <ralrfou@cs.stonybrook.edu>
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
"""
A module for interfacing with the Stanford taggers.
Tagger models need to be downloaded from http://nlp.stanford.edu/software
and the STANFORD_MODELS environment variable set (a colon-separated
list of paths).
For more details see the documentation for StanfordPOSTagger and StanfordNERTagger.
"""
import os
import tempfile
from subprocess import PIPE
import warnings
from nltk.internals import find_file, find_jar, config_java, java, _java_options, find_jars_within_path
from nltk.tag.api import TaggerI
from nltk import compat
_stanford_url = 'http://nlp.stanford.edu/software'
class StanfordTagger(TaggerI):
"""
An interface to Stanford taggers. Subclasses must define:
- ``_cmd`` property: A property that returns the command that will be
executed.
- ``_SEPARATOR``: Class constant that represents that character that
is used to separate the tokens from their tags.
- ``_JAR`` file: Class constant that represents the jar file name.
"""
_SEPARATOR = ''
_JAR = ''
def __init__(self, model_filename, path_to_jar=None, encoding='utf8', verbose=False, java_options='-mx1000m'):
if not self._JAR:
warnings.warn('The StanfordTagger class is not meant to be '
'instantiated directly. Did you mean StanfordPOSTagger or StanfordNERTagger?')
self._stanford_jar = find_jar(
self._JAR, path_to_jar,
searchpath=(), url=_stanford_url,
verbose=verbose)
self._stanford_model = find_file(model_filename,
env_vars=('STANFORD_MODELS',), verbose=verbose)
# Adding logging jar files to classpath
stanford_dir = os.path.split(self._stanford_jar)[0]
self._stanford_jar = tuple(find_jars_within_path(stanford_dir))
self._encoding = encoding
self.java_options = java_options
@property
def _cmd(self):
raise NotImplementedError
def tag(self, tokens):
# This function should return list of tuple rather than list of list
return sum(self.tag_sents([tokens]), [])
def tag_sents(self, sentences):
encoding = self._encoding
default_options = ' '.join(_java_options)
config_java(options=self.java_options, verbose=False)
# Create a temporary input file
_input_fh, self._input_file_path = tempfile.mkstemp(text=True)
cmd = list(self._cmd)
cmd.extend(['-encoding', encoding])
# Write the actual sentences to the temporary input file
_input_fh = os.fdopen(_input_fh, 'wb')
_input = '\n'.join((' '.join(x) for x in sentences))
if isinstance(_input, compat.text_type) and encoding:
_input = _input.encode(encoding)
_input_fh.write(_input)
_input_fh.close()
# Run the tagger and get the output
stanpos_output, _stderr = java(cmd, classpath=self._stanford_jar,
stdout=PIPE, stderr=PIPE)
stanpos_output = stanpos_output.decode(encoding)
# Delete the temporary file
os.unlink(self._input_file_path)
# Return java configurations to their default values
config_java(options=default_options, verbose=False)
return self.parse_output(stanpos_output, sentences)
def parse_output(self, text, sentences = None):
# Output the tagged sentences
tagged_sentences = []
for tagged_sentence in text.strip().split("\n"):
sentence = []
for tagged_word in tagged_sentence.strip().split():
word_tags = tagged_word.strip().split(self._SEPARATOR)
sentence.append((''.join(word_tags[:-1]), word_tags[-1]))
tagged_sentences.append(sentence)
return tagged_sentences
class StanfordPOSTagger(StanfordTagger):
"""
A class for pos tagging with Stanford Tagger. The input is the paths to:
- a model trained on training data
- (optionally) the path to the stanford tagger jar file. If not specified here,
then this jar file must be specified in the CLASSPATH envinroment variable.
- (optionally) the encoding of the training data (default: UTF-8)
Example:
>>> from nltk.tag import StanfordPOSTagger
>>> st = StanfordPOSTagger('english-bidirectional-distsim.tagger') # doctest: +SKIP
>>> st.tag('What is the airspeed of an unladen swallow ?'.split()) # doctest: +SKIP
[('What', 'WP'), ('is', 'VBZ'), ('the', 'DT'), ('airspeed', 'NN'), ('of', 'IN'), ('an', 'DT'), ('unladen', 'JJ'), ('swallow', 'VB'), ('?', '.')]
"""
_SEPARATOR = '_'
_JAR = 'stanford-postagger.jar'
def __init__(self, *args, **kwargs):
super(StanfordPOSTagger, self).__init__(*args, **kwargs)
@property
def _cmd(self):
return ['edu.stanford.nlp.tagger.maxent.MaxentTagger',
'-model', self._stanford_model, '-textFile',
self._input_file_path, '-tokenize', 'false','-outputFormatOptions', 'keepEmptySentences']
class StanfordNERTagger(StanfordTagger):
"""
A class for Named-Entity Tagging with Stanford Tagger. The input is the paths to:
- a model trained on training data
- (optionally) the path to the stanford tagger jar file. If not specified here,
then this jar file must be specified in the CLASSPATH envinroment variable.
- (optionally) the encoding of the training data (default: UTF-8)
Example:
>>> from nltk.tag import StanfordNERTagger
>>> st = StanfordNERTagger('english.all.3class.distsim.crf.ser.gz') # doctest: +SKIP
>>> st.tag('Rami Eid is studying at Stony Brook University in NY'.split()) # doctest: +SKIP
[('Rami', 'PERSON'), ('Eid', 'PERSON'), ('is', 'O'), ('studying', 'O'),
('at', 'O'), ('Stony', 'ORGANIZATION'), ('Brook', 'ORGANIZATION'),
('University', 'ORGANIZATION'), ('in', 'O'), ('NY', 'LOCATION')]
"""
_SEPARATOR = '/'
_JAR = 'stanford-ner.jar'
_FORMAT = 'slashTags'
def __init__(self, *args, **kwargs):
super(StanfordNERTagger, self).__init__(*args, **kwargs)
@property
def _cmd(self):
# Adding -tokenizerFactory edu.stanford.nlp.process.WhitespaceTokenizer -tokenizerOptions tokenizeNLs=false for not using stanford Tokenizer
return ['edu.stanford.nlp.ie.crf.CRFClassifier',
'-loadClassifier', self._stanford_model, '-textFile',
self._input_file_path, '-outputFormat', self._FORMAT, '-tokenizerFactory', 'edu.stanford.nlp.process.WhitespaceTokenizer', '-tokenizerOptions','\"tokenizeNLs=false\"']
def parse_output(self, text, sentences):
if self._FORMAT == 'slashTags':
# Joint together to a big list
tagged_sentences = []
for tagged_sentence in text.strip().split("\n"):
for tagged_word in tagged_sentence.strip().split():
word_tags = tagged_word.strip().split(self._SEPARATOR)
tagged_sentences.append((''.join(word_tags[:-1]), word_tags[-1]))
# Separate it according to the input
result = []
start = 0
for sent in sentences:
result.append(tagged_sentences[start:start + len(sent)])
start += len(sent);
return result
raise NotImplementedError
| 39.685279
| 183
| 0.637375
|
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
import os
import tempfile
from subprocess import PIPE
import warnings
from nltk.internals import find_file, find_jar, config_java, java, _java_options, find_jars_within_path
from nltk.tag.api import TaggerI
from nltk import compat
_stanford_url = 'http://nlp.stanford.edu/software'
class StanfordTagger(TaggerI):
_SEPARATOR = ''
_JAR = ''
def __init__(self, model_filename, path_to_jar=None, encoding='utf8', verbose=False, java_options='-mx1000m'):
if not self._JAR:
warnings.warn('The StanfordTagger class is not meant to be '
'instantiated directly. Did you mean StanfordPOSTagger or StanfordNERTagger?')
self._stanford_jar = find_jar(
self._JAR, path_to_jar,
searchpath=(), url=_stanford_url,
verbose=verbose)
self._stanford_model = find_file(model_filename,
env_vars=('STANFORD_MODELS',), verbose=verbose)
# Adding logging jar files to classpath
stanford_dir = os.path.split(self._stanford_jar)[0]
self._stanford_jar = tuple(find_jars_within_path(stanford_dir))
self._encoding = encoding
self.java_options = java_options
@property
def _cmd(self):
raise NotImplementedError
def tag(self, tokens):
# This function should return list of tuple rather than list of list
return sum(self.tag_sents([tokens]), [])
def tag_sents(self, sentences):
encoding = self._encoding
default_options = ' '.join(_java_options)
config_java(options=self.java_options, verbose=False)
# Create a temporary input file
_input_fh, self._input_file_path = tempfile.mkstemp(text=True)
cmd = list(self._cmd)
cmd.extend(['-encoding', encoding])
# Write the actual sentences to the temporary input file
_input_fh = os.fdopen(_input_fh, 'wb')
_input = '\n'.join((' '.join(x) for x in sentences))
if isinstance(_input, compat.text_type) and encoding:
_input = _input.encode(encoding)
_input_fh.write(_input)
_input_fh.close()
# Run the tagger and get the output
stanpos_output, _stderr = java(cmd, classpath=self._stanford_jar,
stdout=PIPE, stderr=PIPE)
stanpos_output = stanpos_output.decode(encoding)
# Delete the temporary file
os.unlink(self._input_file_path)
# Return java configurations to their default values
config_java(options=default_options, verbose=False)
return self.parse_output(stanpos_output, sentences)
def parse_output(self, text, sentences = None):
# Output the tagged sentences
tagged_sentences = []
for tagged_sentence in text.strip().split("\n"):
sentence = []
for tagged_word in tagged_sentence.strip().split():
word_tags = tagged_word.strip().split(self._SEPARATOR)
sentence.append((''.join(word_tags[:-1]), word_tags[-1]))
tagged_sentences.append(sentence)
return tagged_sentences
class StanfordPOSTagger(StanfordTagger):
_SEPARATOR = '_'
_JAR = 'stanford-postagger.jar'
def __init__(self, *args, **kwargs):
super(StanfordPOSTagger, self).__init__(*args, **kwargs)
@property
def _cmd(self):
return ['edu.stanford.nlp.tagger.maxent.MaxentTagger',
'-model', self._stanford_model, '-textFile',
self._input_file_path, '-tokenize', 'false','-outputFormatOptions', 'keepEmptySentences']
class StanfordNERTagger(StanfordTagger):
_SEPARATOR = '/'
_JAR = 'stanford-ner.jar'
_FORMAT = 'slashTags'
def __init__(self, *args, **kwargs):
super(StanfordNERTagger, self).__init__(*args, **kwargs)
@property
def _cmd(self):
# Adding -tokenizerFactory edu.stanford.nlp.process.WhitespaceTokenizer -tokenizerOptions tokenizeNLs=false for not using stanford Tokenizer
return ['edu.stanford.nlp.ie.crf.CRFClassifier',
'-loadClassifier', self._stanford_model, '-textFile',
self._input_file_path, '-outputFormat', self._FORMAT, '-tokenizerFactory', 'edu.stanford.nlp.process.WhitespaceTokenizer', '-tokenizerOptions','\"tokenizeNLs=false\"']
def parse_output(self, text, sentences):
if self._FORMAT == 'slashTags':
# Joint together to a big list
tagged_sentences = []
for tagged_sentence in text.strip().split("\n"):
for tagged_word in tagged_sentence.strip().split():
word_tags = tagged_word.strip().split(self._SEPARATOR)
tagged_sentences.append((''.join(word_tags[:-1]), word_tags[-1]))
# Separate it according to the input
result = []
start = 0
for sent in sentences:
result.append(tagged_sentences[start:start + len(sent)])
start += len(sent);
return result
raise NotImplementedError
| true
| true
|
790ad3db4c845d8c00ddade9f2c78fee16710c04
| 8,406
|
py
|
Python
|
docs/source/conf.py
|
shawnr/submittable-api-client
|
ff0ac5f4bfcfbaa332b8611e5404b088609c3a47
|
[
"MIT"
] | 2
|
2016-06-06T21:19:29.000Z
|
2019-05-19T00:19:43.000Z
|
docs/source/conf.py
|
shawnr/submittable-api-client
|
ff0ac5f4bfcfbaa332b8611e5404b088609c3a47
|
[
"MIT"
] | 1
|
2019-03-19T21:03:06.000Z
|
2019-03-20T13:17:54.000Z
|
docs/source/conf.py
|
shawnr/submittable-api-client
|
ff0ac5f4bfcfbaa332b8611e5404b088609c3a47
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# Submittable API Client documentation build configuration file, created by
# sphinx-quickstart on Mon Jun 9 15:21:21 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
dirname = os.path.dirname
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.join(dirname(dirname(dirname(os.path.abspath(__file__)))), 'submittable_api_client'))
print sys.path
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Submittable API Client'
copyright = u'2014, Shawn Rider'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'SubmittableAPIClientdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'SubmittableAPIClient.tex', u'Submittable API Client Documentation',
u'Shawn Rider', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'submittableapiclient', u'Submittable API Client Documentation',
[u'Shawn Rider'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'SubmittableAPIClient', u'Submittable API Client Documentation',
u'Shawn Rider', 'SubmittableAPIClient', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| 31.961977
| 112
| 0.721865
|
import sys
import os
dirname = os.path.dirname
sys.path.insert(0, os.path.join(dirname(dirname(dirname(os.path.abspath(__file__)))), 'submittable_api_client'))
print sys.path
extensions = [
'sphinx.ext.autodoc',
]
templates_path = ['_templates']
source_suffix = '.rst'
master_doc = 'index'
project = u'Submittable API Client'
copyright = u'2014, Shawn Rider'
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'SubmittableAPIClientdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'SubmittableAPIClient.tex', u'Submittable API Client Documentation',
u'Shawn Rider', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'submittableapiclient', u'Submittable API Client Documentation',
[u'Shawn Rider'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'SubmittableAPIClient', u'Submittable API Client Documentation',
u'Shawn Rider', 'SubmittableAPIClient', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
| false
| true
|
790ad4582008ab1ba07b35f56d8a3633c3c83fc4
| 9,708
|
py
|
Python
|
tests/test_clustering.py
|
hakimakbarmaulana/dtaidistance
|
ddf4a8111732d4429686d96c9195a81151be1dd8
|
[
"Apache-2.0"
] | 711
|
2017-02-07T07:24:58.000Z
|
2022-03-31T07:46:47.000Z
|
tests/test_clustering.py
|
hakimakbarmaulana/dtaidistance
|
ddf4a8111732d4429686d96c9195a81151be1dd8
|
[
"Apache-2.0"
] | 142
|
2018-04-09T10:36:11.000Z
|
2022-03-31T11:30:26.000Z
|
tests/test_clustering.py
|
hakimakbarmaulana/dtaidistance
|
ddf4a8111732d4429686d96c9195a81151be1dd8
|
[
"Apache-2.0"
] | 155
|
2017-06-01T08:37:45.000Z
|
2022-03-23T08:50:13.000Z
|
import os
import sys
import tempfile
import pytest
import logging
from pathlib import Path
from dtaidistance import dtw, dtw_ndim, clustering, util_numpy
import dtaidistance.dtw_visualisation as dtwvis
from dtaidistance.exceptions import PyClusteringException
logger = logging.getLogger("be.kuleuven.dtai.distance")
directory = None
numpyonly = pytest.mark.skipif("util_numpy.test_without_numpy()")
scipyonly = pytest.mark.skipif("util_numpy.test_without_scipy()")
@numpyonly
def test_clustering():
with util_numpy.test_uses_numpy() as np:
s = np.array([
[0., 0, 1, 2, 1, 0, 1, 0, 0],
[0., 1, 2, 0, 0, 0, 0, 0, 0],
[1., 2, 0, 0, 0, 0, 0, 1, 1],
[0., 0, 1, 2, 1, 0, 1, 0, 0],
[0., 1, 2, 0, 0, 0, 0, 0, 0],
[1., 2, 0, 0, 0, 0, 0, 1, 1]])
def test_hook(from_idx, to_idx, distance):
assert (from_idx, to_idx) in [(3, 0), (4, 1), (5, 2), (1, 0)]
model = clustering.Hierarchical(dtw.distance_matrix_fast, {}, 2, merge_hook=test_hook,
show_progress=False)
cluster_idx = model.fit(s)
assert cluster_idx[0] == {0, 1, 3, 4}
assert cluster_idx[2] == {2, 5}
@numpyonly
def test_clustering_tree():
with util_numpy.test_uses_numpy() as np:
s = np.array([
[0., 0, 1, 2, 1, 0, 1, 0, 0],
[0., 1, 2, 0, 0, 0, 0, 0, 0],
[1., 2, 0, 0, 0, 0, 0, 1, 1],
[0., 0, 1, 2, 1, 0, 1, 0, 0],
[0., 1, 2, 0, 0, 0, 0, 0, 0],
[1., 2, 0, 0, 0, 0, 0, 1, 1],
[1., 2, 0, 0, 0, 0, 0, 1, 1]])
def test_hook(from_idx, to_idx, distance):
assert (from_idx, to_idx) in [(3, 0), (4, 1), (5, 2), (6, 2), (1, 0), (2, 0)]
model = clustering.Hierarchical(dtw.distance_matrix_fast, {}, merge_hook=test_hook,
show_progress=False)
modelw = clustering.HierarchicalTree(model)
cluster_idx = modelw.fit(s)
assert cluster_idx[0] == {0, 1, 2, 3, 4, 5, 6}
if directory:
hierarchy_fn = os.path.join(directory, "hierarchy.png")
graphviz_fn = os.path.join(directory, "hierarchy.dot")
else:
file = tempfile.NamedTemporaryFile()
hierarchy_fn = file.name + "_hierarchy.png"
graphviz_fn = file.name + "_hierarchy.dot"
if not dtwvis.test_without_visualization():
modelw.plot(hierarchy_fn)
print("Figure saved to", hierarchy_fn)
with open(graphviz_fn, "w") as ofile:
print(modelw.to_dot(), file=ofile)
print("Dot saved to", graphviz_fn)
@numpyonly
def test_clustering_tree_ndim():
with util_numpy.test_uses_numpy() as np:
s = np.array([
[[0.,0.], [0,0], [1,0], [2,0], [1,0], [0,0], [1,0], [0,0], [0,0]],
[[0.,0.], [1,0], [2,0], [0,0], [0,0], [0,0], [0,0], [0,0], [0,0]],
[[1.,0.], [2,0], [0,0], [0,0], [0,0], [0,0], [0,0], [1,0], [1,0]]])
model = clustering.Hierarchical(dtw_ndim.distance_matrix_fast, {'ndim':2},
show_progress=False)
cluster_idx = model.fit(s)
assert cluster_idx[0] == {0, 1, 2}
@numpyonly
def test_clustering_tree_maxdist():
with util_numpy.test_uses_numpy() as np:
s = np.array([
[0., 0, 1, 2, 1, 0, 1, 0, 0],
[0., 1, 2, 0, 0, 0, 0, 0, 0],
[1., 2, 0, 0, 0, 0, 0, 1, 1],
[0., 0, 1, 2, 1, 0, 1, 0, 0],
[0., 1, 2, 0, 0, 0, 0, 0, 0],
[1., 2, 0, 0, 0, 0, 0, 1, 1],
[1., 2, 0, 0, 0, 0, 0, 1, 1]])
def test_hook(from_idx, to_idx, distance):
assert (from_idx, to_idx) in [(3, 0), (4, 1), (5, 2), (6, 2), (1, 0), (2, 0)]
model = clustering.Hierarchical(dtw.distance_matrix_fast, {}, merge_hook=test_hook,
show_progress=False, max_dist=0.1)
modelw = clustering.HierarchicalTree(model)
cluster_idx = modelw.fit(s)
assert cluster_idx[0] == {0, 1, 2, 3, 4, 5, 6}
if directory:
hierarchy_fn = os.path.join(directory, "hierarchy.png")
graphviz_fn = os.path.join(directory, "hierarchy.dot")
else:
file = tempfile.NamedTemporaryFile()
hierarchy_fn = file.name + "_hierarchy.png"
graphviz_fn = file.name + "_hierarchy.dot"
if not dtwvis.test_without_visualization():
modelw.plot(hierarchy_fn)
print("Figure saved to", hierarchy_fn)
with open(graphviz_fn, "w") as ofile:
print(modelw.to_dot(), file=ofile)
print("Dot saved to", graphviz_fn)
@scipyonly
@numpyonly
def test_linkage_tree():
with util_numpy.test_uses_numpy() as np:
s = np.array([
[0., 0, 1, 2, 1, 0, 1, 0, 0],
[0., 1, 2, 0, 0, 0, 0, 0, 0],
[1., 2, 0, 0, 0, 0, 0, 1, 1],
[0., 0, 1, 2, 1, 0, 1, 0, 0],
[0., 1, 2, 0, 0, 0, 0, 0, 0],
[1., 2, 0, 0, 0, 0, 0, 1, 1],
[1., 2, 0, 0, 0, 0, 0, 1, 1]])
model = clustering.LinkageTree(dtw.distance_matrix_fast, {})
cluster_idx = model.fit(s)
if directory:
hierarchy_fn = os.path.join(directory, "hierarchy.png")
graphviz_fn = os.path.join(directory, "hierarchy.dot")
else:
file = tempfile.NamedTemporaryFile()
hierarchy_fn = file.name + "_hierarchy.png"
graphviz_fn = file.name + "_hierarchy.dot"
if not dtwvis.test_without_visualization():
model.plot(hierarchy_fn)
print("Figure saved to", hierarchy_fn)
with open(graphviz_fn, "w") as ofile:
print(model.to_dot(), file=ofile)
print("Dot saved to", graphviz_fn)
@scipyonly
@numpyonly
def test_controlchart():
with util_numpy.test_uses_numpy() as np:
series = np.zeros((600, 60))
rsrc_fn = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'rsrc', 'synthetic_control.data')
with open(rsrc_fn, 'r') as ifile:
for idx, line in enumerate(ifile.readlines()):
series[idx, :] = line.split()
s = []
for idx in range(0, 600, 20):
s.append(series[idx, :])
model = clustering.LinkageTree(dtw.distance_matrix_fast, {'parallel': True})
cluster_idx = model.fit(s)
if not dtwvis.test_without_visualization():
import matplotlib.pyplot as plt
if directory:
hierarchy_fn = os.path.join(directory, "hierarchy.png")
else:
file = tempfile.NamedTemporaryFile()
hierarchy_fn = file.name + "_hierarchy.png"
fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(10, 10))
show_ts_label = lambda idx: "ts-" + str(idx)
# show_ts_label = list(range(len(s)))
def curcmap(idx):
if idx % 2 == 0:
return 'r'
return 'g'
model.plot(hierarchy_fn, axes=ax, show_ts_label=show_ts_label,
show_tr_label=True, ts_label_margin=-10,
ts_left_margin=10, ts_sample_length=1, ts_color=curcmap)
print("Figure saved to", hierarchy_fn)
@scipyonly
@numpyonly
def test_plotbug1():
with util_numpy.test_uses_numpy() as np:
s1 = np.array([0., 0, 1, 2, 1, 0, 1, 0, 0, 2, 1, 0, 0])
s2 = np.array([0., 1, 2, 3, 1, 0, 0, 0, 2, 1, 0, 0])
series = s1, s2
m = clustering.LinkageTree(dtw.distance_matrix, {})
m.fit(series)
if not dtwvis.test_without_visualization():
if directory:
hierarchy_fn = os.path.join(directory, "clustering.png")
else:
file = tempfile.NamedTemporaryFile()
hierarchy_fn = file.name + "_clustering.png"
m.plot(hierarchy_fn)
print("Figure save to", hierarchy_fn)
@numpyonly
def test_clustering_centroid():
with util_numpy.test_uses_numpy() as np:
s = np.array([
[0., 0, 1, 2, 1, 0, 1, 0, 0],
[0., 1, 2, 0, 0, 0, 0, 0, 0],
[1., 2, 0, 0, 0, 0, 0, 1, 1],
[0., 0, 1, 2, 1, 0, 1, 0, 0],
[0., 1, 2, 0, 0, 0, 0, 0, 0],
[1., 2, 0, 0, 0, 0, 0, 1, 1],
[1., 2, 0, 0, 0, 0, 0, 1, 1]])
# def test_hook(from_idx, to_idx, distance):
# assert (from_idx, to_idx) in [(3, 0), (4, 1), (5, 2), (6, 2), (1, 0), (2, 0)]
model = clustering.KMedoids(dtw.distance_matrix_fast, {}, k=3,
show_progress=False)
try:
cluster_idx = model.fit(s)
except PyClusteringException:
return
# assert cluster_idx[0] == {0, 1, 2, 3, 4, 5, 6}
if not dtwvis.test_without_visualization():
if directory:
png_fn = os.path.join(directory, "centroid.png")
else:
file = tempfile.NamedTemporaryFile()
png_fn = file.name + "_centroid.png"
model.plot(png_fn)
print("Figure saved to", png_fn)
if __name__ == "__main__":
logger.setLevel(logging.DEBUG)
logger.addHandler(logging.StreamHandler(sys.stdout))
directory = Path(os.environ.get('TESTDIR', Path(__file__).parent))
print(f"Saving files to {directory}")
# test_clustering_tree()
test_clustering_tree_ndim()
# test_clustering_tree_maxdist()
# test_linkage_tree()
# test_controlchart()
# test_plotbug1()
# test_clustering_centroid()
| 37.053435
| 108
| 0.525546
|
import os
import sys
import tempfile
import pytest
import logging
from pathlib import Path
from dtaidistance import dtw, dtw_ndim, clustering, util_numpy
import dtaidistance.dtw_visualisation as dtwvis
from dtaidistance.exceptions import PyClusteringException
logger = logging.getLogger("be.kuleuven.dtai.distance")
directory = None
numpyonly = pytest.mark.skipif("util_numpy.test_without_numpy()")
scipyonly = pytest.mark.skipif("util_numpy.test_without_scipy()")
@numpyonly
def test_clustering():
with util_numpy.test_uses_numpy() as np:
s = np.array([
[0., 0, 1, 2, 1, 0, 1, 0, 0],
[0., 1, 2, 0, 0, 0, 0, 0, 0],
[1., 2, 0, 0, 0, 0, 0, 1, 1],
[0., 0, 1, 2, 1, 0, 1, 0, 0],
[0., 1, 2, 0, 0, 0, 0, 0, 0],
[1., 2, 0, 0, 0, 0, 0, 1, 1]])
def test_hook(from_idx, to_idx, distance):
assert (from_idx, to_idx) in [(3, 0), (4, 1), (5, 2), (1, 0)]
model = clustering.Hierarchical(dtw.distance_matrix_fast, {}, 2, merge_hook=test_hook,
show_progress=False)
cluster_idx = model.fit(s)
assert cluster_idx[0] == {0, 1, 3, 4}
assert cluster_idx[2] == {2, 5}
@numpyonly
def test_clustering_tree():
with util_numpy.test_uses_numpy() as np:
s = np.array([
[0., 0, 1, 2, 1, 0, 1, 0, 0],
[0., 1, 2, 0, 0, 0, 0, 0, 0],
[1., 2, 0, 0, 0, 0, 0, 1, 1],
[0., 0, 1, 2, 1, 0, 1, 0, 0],
[0., 1, 2, 0, 0, 0, 0, 0, 0],
[1., 2, 0, 0, 0, 0, 0, 1, 1],
[1., 2, 0, 0, 0, 0, 0, 1, 1]])
def test_hook(from_idx, to_idx, distance):
assert (from_idx, to_idx) in [(3, 0), (4, 1), (5, 2), (6, 2), (1, 0), (2, 0)]
model = clustering.Hierarchical(dtw.distance_matrix_fast, {}, merge_hook=test_hook,
show_progress=False)
modelw = clustering.HierarchicalTree(model)
cluster_idx = modelw.fit(s)
assert cluster_idx[0] == {0, 1, 2, 3, 4, 5, 6}
if directory:
hierarchy_fn = os.path.join(directory, "hierarchy.png")
graphviz_fn = os.path.join(directory, "hierarchy.dot")
else:
file = tempfile.NamedTemporaryFile()
hierarchy_fn = file.name + "_hierarchy.png"
graphviz_fn = file.name + "_hierarchy.dot"
if not dtwvis.test_without_visualization():
modelw.plot(hierarchy_fn)
print("Figure saved to", hierarchy_fn)
with open(graphviz_fn, "w") as ofile:
print(modelw.to_dot(), file=ofile)
print("Dot saved to", graphviz_fn)
@numpyonly
def test_clustering_tree_ndim():
with util_numpy.test_uses_numpy() as np:
s = np.array([
[[0.,0.], [0,0], [1,0], [2,0], [1,0], [0,0], [1,0], [0,0], [0,0]],
[[0.,0.], [1,0], [2,0], [0,0], [0,0], [0,0], [0,0], [0,0], [0,0]],
[[1.,0.], [2,0], [0,0], [0,0], [0,0], [0,0], [0,0], [1,0], [1,0]]])
model = clustering.Hierarchical(dtw_ndim.distance_matrix_fast, {'ndim':2},
show_progress=False)
cluster_idx = model.fit(s)
assert cluster_idx[0] == {0, 1, 2}
@numpyonly
def test_clustering_tree_maxdist():
with util_numpy.test_uses_numpy() as np:
s = np.array([
[0., 0, 1, 2, 1, 0, 1, 0, 0],
[0., 1, 2, 0, 0, 0, 0, 0, 0],
[1., 2, 0, 0, 0, 0, 0, 1, 1],
[0., 0, 1, 2, 1, 0, 1, 0, 0],
[0., 1, 2, 0, 0, 0, 0, 0, 0],
[1., 2, 0, 0, 0, 0, 0, 1, 1],
[1., 2, 0, 0, 0, 0, 0, 1, 1]])
def test_hook(from_idx, to_idx, distance):
assert (from_idx, to_idx) in [(3, 0), (4, 1), (5, 2), (6, 2), (1, 0), (2, 0)]
model = clustering.Hierarchical(dtw.distance_matrix_fast, {}, merge_hook=test_hook,
show_progress=False, max_dist=0.1)
modelw = clustering.HierarchicalTree(model)
cluster_idx = modelw.fit(s)
assert cluster_idx[0] == {0, 1, 2, 3, 4, 5, 6}
if directory:
hierarchy_fn = os.path.join(directory, "hierarchy.png")
graphviz_fn = os.path.join(directory, "hierarchy.dot")
else:
file = tempfile.NamedTemporaryFile()
hierarchy_fn = file.name + "_hierarchy.png"
graphviz_fn = file.name + "_hierarchy.dot"
if not dtwvis.test_without_visualization():
modelw.plot(hierarchy_fn)
print("Figure saved to", hierarchy_fn)
with open(graphviz_fn, "w") as ofile:
print(modelw.to_dot(), file=ofile)
print("Dot saved to", graphviz_fn)
@scipyonly
@numpyonly
def test_linkage_tree():
with util_numpy.test_uses_numpy() as np:
s = np.array([
[0., 0, 1, 2, 1, 0, 1, 0, 0],
[0., 1, 2, 0, 0, 0, 0, 0, 0],
[1., 2, 0, 0, 0, 0, 0, 1, 1],
[0., 0, 1, 2, 1, 0, 1, 0, 0],
[0., 1, 2, 0, 0, 0, 0, 0, 0],
[1., 2, 0, 0, 0, 0, 0, 1, 1],
[1., 2, 0, 0, 0, 0, 0, 1, 1]])
model = clustering.LinkageTree(dtw.distance_matrix_fast, {})
cluster_idx = model.fit(s)
if directory:
hierarchy_fn = os.path.join(directory, "hierarchy.png")
graphviz_fn = os.path.join(directory, "hierarchy.dot")
else:
file = tempfile.NamedTemporaryFile()
hierarchy_fn = file.name + "_hierarchy.png"
graphviz_fn = file.name + "_hierarchy.dot"
if not dtwvis.test_without_visualization():
model.plot(hierarchy_fn)
print("Figure saved to", hierarchy_fn)
with open(graphviz_fn, "w") as ofile:
print(model.to_dot(), file=ofile)
print("Dot saved to", graphviz_fn)
@scipyonly
@numpyonly
def test_controlchart():
with util_numpy.test_uses_numpy() as np:
series = np.zeros((600, 60))
rsrc_fn = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'rsrc', 'synthetic_control.data')
with open(rsrc_fn, 'r') as ifile:
for idx, line in enumerate(ifile.readlines()):
series[idx, :] = line.split()
s = []
for idx in range(0, 600, 20):
s.append(series[idx, :])
model = clustering.LinkageTree(dtw.distance_matrix_fast, {'parallel': True})
cluster_idx = model.fit(s)
if not dtwvis.test_without_visualization():
import matplotlib.pyplot as plt
if directory:
hierarchy_fn = os.path.join(directory, "hierarchy.png")
else:
file = tempfile.NamedTemporaryFile()
hierarchy_fn = file.name + "_hierarchy.png"
fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(10, 10))
show_ts_label = lambda idx: "ts-" + str(idx)
def curcmap(idx):
if idx % 2 == 0:
return 'r'
return 'g'
model.plot(hierarchy_fn, axes=ax, show_ts_label=show_ts_label,
show_tr_label=True, ts_label_margin=-10,
ts_left_margin=10, ts_sample_length=1, ts_color=curcmap)
print("Figure saved to", hierarchy_fn)
@scipyonly
@numpyonly
def test_plotbug1():
with util_numpy.test_uses_numpy() as np:
s1 = np.array([0., 0, 1, 2, 1, 0, 1, 0, 0, 2, 1, 0, 0])
s2 = np.array([0., 1, 2, 3, 1, 0, 0, 0, 2, 1, 0, 0])
series = s1, s2
m = clustering.LinkageTree(dtw.distance_matrix, {})
m.fit(series)
if not dtwvis.test_without_visualization():
if directory:
hierarchy_fn = os.path.join(directory, "clustering.png")
else:
file = tempfile.NamedTemporaryFile()
hierarchy_fn = file.name + "_clustering.png"
m.plot(hierarchy_fn)
print("Figure save to", hierarchy_fn)
@numpyonly
def test_clustering_centroid():
with util_numpy.test_uses_numpy() as np:
s = np.array([
[0., 0, 1, 2, 1, 0, 1, 0, 0],
[0., 1, 2, 0, 0, 0, 0, 0, 0],
[1., 2, 0, 0, 0, 0, 0, 1, 1],
[0., 0, 1, 2, 1, 0, 1, 0, 0],
[0., 1, 2, 0, 0, 0, 0, 0, 0],
[1., 2, 0, 0, 0, 0, 0, 1, 1],
[1., 2, 0, 0, 0, 0, 0, 1, 1]])
model = clustering.KMedoids(dtw.distance_matrix_fast, {}, k=3,
show_progress=False)
try:
cluster_idx = model.fit(s)
except PyClusteringException:
return
if not dtwvis.test_without_visualization():
if directory:
png_fn = os.path.join(directory, "centroid.png")
else:
file = tempfile.NamedTemporaryFile()
png_fn = file.name + "_centroid.png"
model.plot(png_fn)
print("Figure saved to", png_fn)
if __name__ == "__main__":
logger.setLevel(logging.DEBUG)
logger.addHandler(logging.StreamHandler(sys.stdout))
directory = Path(os.environ.get('TESTDIR', Path(__file__).parent))
print(f"Saving files to {directory}")
test_clustering_tree_ndim()
| true
| true
|
790ad4e241a7003192a040b85576df147daa3b37
| 2,025
|
py
|
Python
|
portfolio/Python/scrapy/seapets/thepetexpress.py
|
0--key/lib
|
ba7a85dda2b208adc290508ca617bdc55a5ded22
|
[
"Apache-2.0"
] | null | null | null |
portfolio/Python/scrapy/seapets/thepetexpress.py
|
0--key/lib
|
ba7a85dda2b208adc290508ca617bdc55a5ded22
|
[
"Apache-2.0"
] | null | null | null |
portfolio/Python/scrapy/seapets/thepetexpress.py
|
0--key/lib
|
ba7a85dda2b208adc290508ca617bdc55a5ded22
|
[
"Apache-2.0"
] | 5
|
2016-03-22T07:40:46.000Z
|
2021-05-30T16:12:21.000Z
|
import re
import os
from scrapy.spider import BaseSpider
from scrapy.selector import HtmlXPathSelector
from scrapy.http import Request, HtmlResponse
from scrapy.utils.response import get_base_url
from scrapy.utils.url import urljoin_rfc
from urllib import urlencode
import hashlib
import csv
from product_spiders.items import Product, ProductLoaderWithNameStrip as ProductLoader
from scrapy import log
HERE = os.path.abspath(os.path.dirname(__file__))
class thepetexpress_spider(BaseSpider):
name = 'thepetexpress.co.uk'
allowed_domains = ['thepetexpress.co.uk', 'www.thepetexpress.co.uk']
start_urls = ('http://www.thepetexpress.co.uk/',)
def parse(self, response):
if not isinstance(response, HtmlResponse):
return
hxs = HtmlXPathSelector(response)
# categories
categories = hxs.select(u'//nav[@class="cat"]/ul/li/ul/li/a/@href').extract()
for url in categories:
url = urljoin_rfc(get_base_url(response), url + u'?sort=titledesc')
yield Request(url)
# pagination
next_page = hxs.select(u'//a[@class="nxt"]/@href').extract()
if next_page:
next_page = urljoin_rfc(get_base_url(response), next_page[0])
yield Request(next_page)
# products
products = hxs.select(u'//div[@class="products"]//a/@href').extract()
for url in products:
url = urljoin_rfc(get_base_url(response), url)
yield Request(url, callback=self.parse_product)
def parse_product(self, response):
if not isinstance(response, HtmlResponse):
return
hxs = HtmlXPathSelector(response)
loader = ProductLoader(item=Product(), response=response)
loader.add_value('url', response.url)
loader.add_xpath('name', u'//div[@id="product"]/h1/text()')
loader.add_xpath('price', u'//p[@class="price"]/span[@class="our_price"]/text()')
if loader.get_output_value('price'):
yield loader.load_item()
| 35.526316
| 89
| 0.668148
|
import re
import os
from scrapy.spider import BaseSpider
from scrapy.selector import HtmlXPathSelector
from scrapy.http import Request, HtmlResponse
from scrapy.utils.response import get_base_url
from scrapy.utils.url import urljoin_rfc
from urllib import urlencode
import hashlib
import csv
from product_spiders.items import Product, ProductLoaderWithNameStrip as ProductLoader
from scrapy import log
HERE = os.path.abspath(os.path.dirname(__file__))
class thepetexpress_spider(BaseSpider):
name = 'thepetexpress.co.uk'
allowed_domains = ['thepetexpress.co.uk', 'www.thepetexpress.co.uk']
start_urls = ('http://www.thepetexpress.co.uk/',)
def parse(self, response):
if not isinstance(response, HtmlResponse):
return
hxs = HtmlXPathSelector(response)
categories = hxs.select(u'//nav[@class="cat"]/ul/li/ul/li/a/@href').extract()
for url in categories:
url = urljoin_rfc(get_base_url(response), url + u'?sort=titledesc')
yield Request(url)
next_page = hxs.select(u'//a[@class="nxt"]/@href').extract()
if next_page:
next_page = urljoin_rfc(get_base_url(response), next_page[0])
yield Request(next_page)
products = hxs.select(u'//div[@class="products"]//a/@href').extract()
for url in products:
url = urljoin_rfc(get_base_url(response), url)
yield Request(url, callback=self.parse_product)
def parse_product(self, response):
if not isinstance(response, HtmlResponse):
return
hxs = HtmlXPathSelector(response)
loader = ProductLoader(item=Product(), response=response)
loader.add_value('url', response.url)
loader.add_xpath('name', u'//div[@id="product"]/h1/text()')
loader.add_xpath('price', u'//p[@class="price"]/span[@class="our_price"]/text()')
if loader.get_output_value('price'):
yield loader.load_item()
| true
| true
|
790ad5094a9424e40f43d661a64daa14ff9734db
| 490
|
py
|
Python
|
modulo 3/aulas/4.0 - Funcoes.py
|
GabrielBrotas/Python
|
9441b6b86ff3cb7fa5921b508c484075adac08b3
|
[
"MIT"
] | null | null | null |
modulo 3/aulas/4.0 - Funcoes.py
|
GabrielBrotas/Python
|
9441b6b86ff3cb7fa5921b508c484075adac08b3
|
[
"MIT"
] | null | null | null |
modulo 3/aulas/4.0 - Funcoes.py
|
GabrielBrotas/Python
|
9441b6b86ff3cb7fa5921b508c484075adac08b3
|
[
"MIT"
] | null | null | null |
# Funcoes servem para quando tivermos coisas repetitivas poder simplificar o programa
def lin(): # para definir um afuncao ela tem que ter parenteses no finalk
print('=-'*30)
lin()
print('Bem Vindo')
lin()
nome = str(input('Qual seu nome? '))
lin()
print(f'Tenha um otimo dia {nome}!')
lin()
def mensagem(msg):
print('-'*30)
print(msg) # A mensagem que vai aparecer aqui o usuario vai digitar quando chamar a funcao
print('-'*30)
mensagem('SISTEMA DE ALUNOS')
| 20.416667
| 98
| 0.677551
|
def lin():
print('=-'*30)
lin()
print('Bem Vindo')
lin()
nome = str(input('Qual seu nome? '))
lin()
print(f'Tenha um otimo dia {nome}!')
lin()
def mensagem(msg):
print('-'*30)
print(msg)
print('-'*30)
mensagem('SISTEMA DE ALUNOS')
| true
| true
|
790ad5e42ec47c633f039098b436e19e0c16f40a
| 956
|
py
|
Python
|
census_data_downloader/core/decorators.py
|
JoeGermuska/census-data-downloader
|
0098b9e522b78ad0e30301c9845ecbcc903c62e4
|
[
"MIT"
] | 170
|
2019-04-01T01:41:42.000Z
|
2022-03-25T21:22:06.000Z
|
census_data_downloader/core/decorators.py
|
JoeGermuska/census-data-downloader
|
0098b9e522b78ad0e30301c9845ecbcc903c62e4
|
[
"MIT"
] | 68
|
2019-03-31T22:52:43.000Z
|
2021-08-30T16:33:54.000Z
|
census_data_downloader/core/decorators.py
|
JoeGermuska/census-data-downloader
|
0098b9e522b78ad0e30301c9845ecbcc903c62e4
|
[
"MIT"
] | 34
|
2019-04-02T17:57:16.000Z
|
2022-03-28T17:22:35.000Z
|
#! /usr/bin/env python
# -*- coding: utf-8 -*
"""
Decorators to help manage our custom classes.
"""
TABLE_LIST = []
def register(cls):
"""
A decorator to register new table configuration classes.
"""
TABLE_LIST.append(cls)
return cls
def downloader(func):
"""
A decorator to download data inside a table configuration class.
"""
def inner(*args, **kwargs):
# Grab the TableConfig
table_config = args[0]
# Grab the geotype downloader class by running the metaprogramming function
downloader_klass = func(table_config)
# For each year authorized on the config
for year in table_config.years_to_download:
# Create the geotype downloader instance
downloader = downloader_klass(table_config, year)
# Download the raw data
downloader.download()
# Process the data
downloader.process()
return inner
| 27.314286
| 83
| 0.634937
|
TABLE_LIST = []
def register(cls):
TABLE_LIST.append(cls)
return cls
def downloader(func):
def inner(*args, **kwargs):
table_config = args[0]
downloader_klass = func(table_config)
for year in table_config.years_to_download:
downloader = downloader_klass(table_config, year)
downloader.download()
downloader.process()
return inner
| true
| true
|
790ad5e933260f2e5e4e6abcd110a9e775d9b371
| 10,790
|
py
|
Python
|
scripts/irods/database_upgrade.py
|
tempoz/irods
|
a64c5e9cfb86af725f8f20ae940591adef8e02f0
|
[
"BSD-3-Clause"
] | null | null | null |
scripts/irods/database_upgrade.py
|
tempoz/irods
|
a64c5e9cfb86af725f8f20ae940591adef8e02f0
|
[
"BSD-3-Clause"
] | null | null | null |
scripts/irods/database_upgrade.py
|
tempoz/irods
|
a64c5e9cfb86af725f8f20ae940591adef8e02f0
|
[
"BSD-3-Clause"
] | null | null | null |
from . import database_connect
from .exceptions import IrodsError, IrodsWarning
import logging
import re
def run_update(irods_config, cursor):
l = logging.getLogger(__name__)
new_schema_version = database_connect.get_schema_version_in_database(cursor) + 1
l.info('Updating to schema version %d...', new_schema_version)
if new_schema_version == 2:
database_connect.execute_sql_statement(cursor, "insert into R_SPECIFIC_QUERY (alias, sqlStr, create_ts) values ('listQueryByAliasLike', 'SELECT alias, sqlStr FROM R_SPECIFIC_QUERY WHERE alias LIKE ?', '1388534400');")
database_connect.execute_sql_statement(cursor, "insert into R_SPECIFIC_QUERY (alias, sqlStr, create_ts) values ('findQueryByAlias', 'SELECT alias, sqlStr FROM R_SPECIFIC_QUERY WHERE alias = ?', '1388534400');")
database_connect.execute_sql_statement(cursor, "insert into R_SPECIFIC_QUERY (alias, sqlStr, create_ts) values ('ilsLACollections', 'SELECT c.parent_coll_name, c.coll_name, c.create_ts, c.modify_ts, c.coll_id, c.coll_owner_name, c.coll_owner_zone, c.coll_type, u.user_name, u.zone_name, a.access_type_id, u.user_id FROM R_COLL_MAIN c JOIN R_OBJT_ACCESS a ON c.coll_id = a.object_id JOIN R_USER_MAIN u ON a.user_id = u.user_id WHERE c.parent_coll_name = ? ORDER BY c.coll_name, u.user_name, a.access_type_id DESC LIMIT ? OFFSET ?', '1388534400');")
database_connect.execute_sql_statement(cursor, "insert into R_SPECIFIC_QUERY (alias, sqlStr, create_ts) values ('ilsLADataObjects', 'SELECT s.coll_name, s.data_name, s.create_ts, s.modify_ts, s.data_id, s.data_size, s.data_repl_num, s.data_owner_name, s.data_owner_zone, u.user_name, u.user_id, a.access_type_id, u.user_type_name, u.zone_name FROM ( SELECT c.coll_name, d.data_name, d.create_ts, d.modify_ts, d.data_id, d.data_repl_num, d.data_size, d.data_owner_name, d.data_owner_zone FROM R_COLL_MAIN c JOIN R_DATA_MAIN d ON c.coll_id = d.coll_id WHERE c.coll_name = ? ORDER BY d.data_name) s JOIN R_OBJT_ACCESS a ON s.data_id = a.object_id JOIN R_USER_MAIN u ON a.user_id = u.user_id ORDER BY s.coll_name, s.data_name, u.user_name, a.access_type_id DESC LIMIT ? OFFSET ?', '1388534400');")
database_connect.execute_sql_statement(cursor, "insert into R_SPECIFIC_QUERY (alias, sqlStr, create_ts) values ('listSharedCollectionsOwnedByUser', 'SELECT DISTINCT R_COLL_MAIN.coll_id, R_COLL_MAIN.parent_coll_name, R_COLL_MAIN.coll_name, R_COLL_MAIN.coll_owner_name, R_COLL_MAIN.coll_owner_zone, R_META_MAIN.meta_attr_name, R_META_MAIN.meta_attr_value, R_META_MAIN.meta_attr_unit FROM R_COLL_MAIN JOIN R_OBJT_METAMAP ON R_COLL_MAIN.coll_id = R_OBJT_METAMAP.object_id JOIN R_META_MAIN ON R_OBJT_METAMAP.meta_id = R_META_MAIN.meta_id WHERE R_META_MAIN.meta_attr_unit = ''iRODSUserTagging:Share'' AND R_COLL_MAIN.coll_owner_name = ? AND R_COLL_MAIN.coll_owner_zone = ? ORDER BY R_COLL_MAIN.parent_coll_name ASC, R_COLL_MAIN.coll_name ASC', '1388534400');")
database_connect.execute_sql_statement(cursor, "insert into R_SPECIFIC_QUERY (alias, sqlStr, create_ts) values ('listSharedCollectionsSharedWithUser', 'SELECT DISTINCT R_COLL_MAIN.coll_id, R_COLL_MAIN.parent_coll_name, R_COLL_MAIN.coll_name, R_COLL_MAIN.coll_owner_name, R_COLL_MAIN.coll_owner_zone, R_META_MAIN.meta_attr_name, R_META_MAIN.meta_attr_value, R_META_MAIN.meta_attr_unit, R_USER_MAIN.user_name, R_USER_MAIN.zone_name, R_OBJT_ACCESS.access_type_id FROM R_COLL_MAIN JOIN R_OBJT_METAMAP ON R_COLL_MAIN.coll_id = R_OBJT_METAMAP.object_id JOIN R_META_MAIN ON R_OBJT_METAMAP.meta_id = R_META_MAIN.meta_id JOIN R_OBJT_ACCESS ON R_COLL_MAIN.coll_id = R_OBJT_ACCESS.object_id JOIN R_USER_MAIN ON R_OBJT_ACCESS.user_id = R_USER_MAIN.user_id WHERE R_META_MAIN.meta_attr_unit = ''iRODSUserTagging:Share'' AND R_USER_MAIN.user_name = ? AND R_USER_MAIN.zone_name = ? AND R_COLL_MAIN.coll_owner_name <> ? ORDER BY R_COLL_MAIN.parent_coll_name ASC, R_COLL_MAIN.coll_name ASC', '1388534400');")
database_connect.execute_sql_statement(cursor, "insert into R_SPECIFIC_QUERY (alias, sqlStr, create_ts) values ('listUserACLForDataObjViaGroup', 'SELECT R_USER_MAIN.user_name, R_USER_MAIN.user_id, R_OBJT_ACCESS.access_type_id, R_USER_MAIN.user_type_name, R_USER_MAIN.zone_name, R_COLL_MAIN.coll_name, R_DATA_MAIN.data_name, USER_GROUP_MAIN.user_name, R_DATA_MAIN.data_name, R_COLL_MAIN.coll_name FROM R_USER_MAIN AS USER_GROUP_MAIN JOIN R_USER_GROUP JOIN R_USER_MAIN ON R_USER_GROUP.user_id = R_USER_MAIN.user_id ON USER_GROUP_MAIN.user_id = R_USER_GROUP.group_user_id JOIN R_OBJT_ACCESS ON R_USER_GROUP.group_user_id = R_OBJT_ACCESS.user_id JOIN R_DATA_MAIN JOIN R_COLL_MAIN ON R_DATA_MAIN.coll_id = R_COLL_MAIN.coll_id ON R_OBJT_ACCESS.object_id = R_DATA_MAIN.data_id WHERE R_COLL_MAIN.coll_name = ? AND R_DATA_MAIN.data_name = ? AND R_USER_MAIN.user_name = ? ORDER BY R_COLL_MAIN.coll_name, R_DATA_MAIN.data_name, R_USER_MAIN.user_name, R_OBJT_ACCESS.access_type_id DESC', '1388534400');")
database_connect.execute_sql_statement(cursor, "insert into R_SPECIFIC_QUERY (alias, sqlStr, create_ts) values ('listUserACLForCollectionViaGroup', 'SELECT R_USER_MAIN.user_name, R_USER_MAIN.user_id, R_OBJT_ACCESS.access_type_id, R_USER_MAIN.user_type_name, R_USER_MAIN.zone_name, R_COLL_MAIN.coll_name, USER_GROUP_MAIN.user_name, R_COLL_MAIN.coll_name FROM R_USER_MAIN AS USER_GROUP_MAIN JOIN R_USER_GROUP JOIN R_USER_MAIN ON R_USER_GROUP.user_id = R_USER_MAIN.user_id ON USER_GROUP_MAIN.user_id = R_USER_GROUP.group_user_id JOIN R_OBJT_ACCESS ON R_USER_GROUP.group_user_id = R_OBJT_ACCESS.user_id JOIN R_COLL_MAIN ON R_OBJT_ACCESS.object_id = R_COLL_MAIN.coll_id WHERE R_COLL_MAIN.coll_name = ? AND R_USER_MAIN.user_name = ? ORDER BY R_COLL_MAIN.coll_name, R_USER_MAIN.user_name, R_OBJT_ACCESS.access_type_id DESC', '1388534400');")
elif new_schema_version == 3:
database_connect.execute_sql_statement(cursor, "insert into R_SPECIFIC_QUERY (alias, sqlStr, create_ts) values ('DataObjInCollReCur', 'WITH coll AS (SELECT coll_id, coll_name FROM r_coll_main WHERE R_COLL_MAIN.coll_name = ? OR R_COLL_MAIN.coll_name LIKE ?) SELECT DISTINCT d.data_id, (SELECT coll_name FROM coll WHERE coll.coll_id = d.coll_id) coll_name, d.data_name, d.data_repl_num, d.resc_name, d.data_path, d.resc_hier FROM R_DATA_MAIN d WHERE d.coll_id = ANY(ARRAY(SELECT coll_id FROM coll)) ORDER BY coll_name, d.data_name, d.data_repl_num', '1388534400');")
elif new_schema_version == 4:
database_connect.execute_sql_statement(cursor, "create index idx_quota_main1 on R_QUOTA_MAIN (user_id);")
database_connect.execute_sql_statement(cursor, "delete from R_TOKN_MAIN where token_name = 'domainadmin';")
database_connect.execute_sql_statement(cursor, "delete from R_TOKN_MAIN where token_name = 'rodscurators';")
database_connect.execute_sql_statement(cursor, "delete from R_TOKN_MAIN where token_name = 'storageadmin';")
if irods_config.catalog_database_type == 'mysql':
database_connect.execute_sql_statement(cursor, "delete from R_SPECIFIC_QUERY where alias = 'DataObjInCollReCur';")
elif new_schema_version == 5:
if irods_config.catalog_database_type == 'oracle':
database_connect.execute_sql_statement(cursor, "ALTER TABLE R_DATA_MAIN ADD resc_id integer;")
database_connect.execute_sql_statement(cursor, "ALTER TABLE R_RESC_MAIN ADD resc_parent_context varchar2(4000);") # max oracle varchar2 for sql is 4000, 32767 pl/sql
else:
database_connect.execute_sql_statement(cursor, "ALTER TABLE R_DATA_MAIN ADD resc_id bigint;")
database_connect.execute_sql_statement(cursor, "ALTER TABLE R_RESC_MAIN ADD resc_parent_context varchar(4000);")
database_connect.execute_sql_statement(cursor, "UPDATE R_SPECIFIC_QUERY SET sqlstr='WITH coll AS (SELECT coll_id, coll_name FROM R_COLL_MAIN WHERE R_COLL_MAIN.coll_name = ? OR R_COLL_MAIN.coll_name LIKE ?) SELECT DISTINCT d.data_id, (SELECT coll_name FROM coll WHERE coll.coll_id = d.coll_id) coll_name, d.data_name, d.data_repl_num, d.resc_name, d.data_path, d.resc_id FROM R_DATA_MAIN d WHERE d.coll_id = ANY(ARRAY(SELECT coll_id FROM coll)) ORDER BY coll_name, d.data_name, d.data_repl_num' where alias='DataObjInCollReCur';")
rows = database_connect.execute_sql_statement(cursor, "select resc_id, resc_name from R_RESC_MAIN;").fetchall()
for row in rows:
resc_id = row[0]
resc_name = row[1]
database_connect.execute_sql_statement(cursor, "update R_DATA_MAIN set resc_id=? where resc_hier=? or resc_hier like ?", resc_id, resc_name, ''.join(['%;', resc_name]))
if irods_config.catalog_database_type == 'postgres':
database_connect.execute_sql_statement(cursor, "update r_resc_main as rdm set resc_parent = am.resc_id from ( select resc_name, resc_id from r_resc_main ) as am where am.resc_name = rdm.resc_parent;")
elif irods_config.catalog_database_type == 'cockroachdb':
rows = database_connect.execute_sql_statement(cursor, "select rdm.resc_id, am.resc_id from r_resc_main rdm, r_resc_main am where am.resc_name = rdm.resc_parent;").fetchall()
for row in rows:
resc_id = row[0]
resc_id2 = row[1]
database_connect.execute_sql_statement(cursor, "update r_resc_main set resc_parent = ? where resc_id = ?;", resc_id2, resc_id)
elif irods_config.catalog_database_type == 'mysql':
database_connect.execute_sql_statement(cursor, "update R_RESC_MAIN as rdm, ( select resc_name, resc_id from R_RESC_MAIN ) as am set rdm.resc_parent = am.resc_id where am.resc_name = rdm.resc_parent;")
else:
database_connect.execute_sql_statement(cursor, "update R_RESC_MAIN rdm set resc_parent = ( select resc_id from ( select resc_name, resc_id from R_RESC_MAIN ) am where am.resc_name = rdm.resc_parent );")
rows = database_connect.execute_sql_statement(cursor, "select resc_id, resc_children from R_RESC_MAIN where resc_children is not null;").fetchall()
context_expression = re.compile('^([^{}]*)\\{([^{}]*)\\}')
for row in rows:
resc_id = row[0]
child_contexts = [(m.group(1), m.group(2)) for m in [context_expression.match(s) for s in row[1].split(';')] if m]
for child_name, context in child_contexts:
database_connect.execute_sql_statement(cursor, "update R_RESC_MAIN set resc_parent_context=? where resc_name=?", context, child_name)
else:
raise IrodsError('Upgrade to schema version %d is unsupported.' % (new_schema_version))
database_connect.execute_sql_statement(cursor, "update R_GRID_CONFIGURATION set option_value = ? where namespace = 'database' and option_name = 'schema_version';", new_schema_version)
| 149.861111
| 1,001
| 0.779333
|
from . import database_connect
from .exceptions import IrodsError, IrodsWarning
import logging
import re
def run_update(irods_config, cursor):
l = logging.getLogger(__name__)
new_schema_version = database_connect.get_schema_version_in_database(cursor) + 1
l.info('Updating to schema version %d...', new_schema_version)
if new_schema_version == 2:
database_connect.execute_sql_statement(cursor, "insert into R_SPECIFIC_QUERY (alias, sqlStr, create_ts) values ('listQueryByAliasLike', 'SELECT alias, sqlStr FROM R_SPECIFIC_QUERY WHERE alias LIKE ?', '1388534400');")
database_connect.execute_sql_statement(cursor, "insert into R_SPECIFIC_QUERY (alias, sqlStr, create_ts) values ('findQueryByAlias', 'SELECT alias, sqlStr FROM R_SPECIFIC_QUERY WHERE alias = ?', '1388534400');")
database_connect.execute_sql_statement(cursor, "insert into R_SPECIFIC_QUERY (alias, sqlStr, create_ts) values ('ilsLACollections', 'SELECT c.parent_coll_name, c.coll_name, c.create_ts, c.modify_ts, c.coll_id, c.coll_owner_name, c.coll_owner_zone, c.coll_type, u.user_name, u.zone_name, a.access_type_id, u.user_id FROM R_COLL_MAIN c JOIN R_OBJT_ACCESS a ON c.coll_id = a.object_id JOIN R_USER_MAIN u ON a.user_id = u.user_id WHERE c.parent_coll_name = ? ORDER BY c.coll_name, u.user_name, a.access_type_id DESC LIMIT ? OFFSET ?', '1388534400');")
database_connect.execute_sql_statement(cursor, "insert into R_SPECIFIC_QUERY (alias, sqlStr, create_ts) values ('ilsLADataObjects', 'SELECT s.coll_name, s.data_name, s.create_ts, s.modify_ts, s.data_id, s.data_size, s.data_repl_num, s.data_owner_name, s.data_owner_zone, u.user_name, u.user_id, a.access_type_id, u.user_type_name, u.zone_name FROM ( SELECT c.coll_name, d.data_name, d.create_ts, d.modify_ts, d.data_id, d.data_repl_num, d.data_size, d.data_owner_name, d.data_owner_zone FROM R_COLL_MAIN c JOIN R_DATA_MAIN d ON c.coll_id = d.coll_id WHERE c.coll_name = ? ORDER BY d.data_name) s JOIN R_OBJT_ACCESS a ON s.data_id = a.object_id JOIN R_USER_MAIN u ON a.user_id = u.user_id ORDER BY s.coll_name, s.data_name, u.user_name, a.access_type_id DESC LIMIT ? OFFSET ?', '1388534400');")
database_connect.execute_sql_statement(cursor, "insert into R_SPECIFIC_QUERY (alias, sqlStr, create_ts) values ('listSharedCollectionsOwnedByUser', 'SELECT DISTINCT R_COLL_MAIN.coll_id, R_COLL_MAIN.parent_coll_name, R_COLL_MAIN.coll_name, R_COLL_MAIN.coll_owner_name, R_COLL_MAIN.coll_owner_zone, R_META_MAIN.meta_attr_name, R_META_MAIN.meta_attr_value, R_META_MAIN.meta_attr_unit FROM R_COLL_MAIN JOIN R_OBJT_METAMAP ON R_COLL_MAIN.coll_id = R_OBJT_METAMAP.object_id JOIN R_META_MAIN ON R_OBJT_METAMAP.meta_id = R_META_MAIN.meta_id WHERE R_META_MAIN.meta_attr_unit = ''iRODSUserTagging:Share'' AND R_COLL_MAIN.coll_owner_name = ? AND R_COLL_MAIN.coll_owner_zone = ? ORDER BY R_COLL_MAIN.parent_coll_name ASC, R_COLL_MAIN.coll_name ASC', '1388534400');")
database_connect.execute_sql_statement(cursor, "insert into R_SPECIFIC_QUERY (alias, sqlStr, create_ts) values ('listSharedCollectionsSharedWithUser', 'SELECT DISTINCT R_COLL_MAIN.coll_id, R_COLL_MAIN.parent_coll_name, R_COLL_MAIN.coll_name, R_COLL_MAIN.coll_owner_name, R_COLL_MAIN.coll_owner_zone, R_META_MAIN.meta_attr_name, R_META_MAIN.meta_attr_value, R_META_MAIN.meta_attr_unit, R_USER_MAIN.user_name, R_USER_MAIN.zone_name, R_OBJT_ACCESS.access_type_id FROM R_COLL_MAIN JOIN R_OBJT_METAMAP ON R_COLL_MAIN.coll_id = R_OBJT_METAMAP.object_id JOIN R_META_MAIN ON R_OBJT_METAMAP.meta_id = R_META_MAIN.meta_id JOIN R_OBJT_ACCESS ON R_COLL_MAIN.coll_id = R_OBJT_ACCESS.object_id JOIN R_USER_MAIN ON R_OBJT_ACCESS.user_id = R_USER_MAIN.user_id WHERE R_META_MAIN.meta_attr_unit = ''iRODSUserTagging:Share'' AND R_USER_MAIN.user_name = ? AND R_USER_MAIN.zone_name = ? AND R_COLL_MAIN.coll_owner_name <> ? ORDER BY R_COLL_MAIN.parent_coll_name ASC, R_COLL_MAIN.coll_name ASC', '1388534400');")
database_connect.execute_sql_statement(cursor, "insert into R_SPECIFIC_QUERY (alias, sqlStr, create_ts) values ('listUserACLForDataObjViaGroup', 'SELECT R_USER_MAIN.user_name, R_USER_MAIN.user_id, R_OBJT_ACCESS.access_type_id, R_USER_MAIN.user_type_name, R_USER_MAIN.zone_name, R_COLL_MAIN.coll_name, R_DATA_MAIN.data_name, USER_GROUP_MAIN.user_name, R_DATA_MAIN.data_name, R_COLL_MAIN.coll_name FROM R_USER_MAIN AS USER_GROUP_MAIN JOIN R_USER_GROUP JOIN R_USER_MAIN ON R_USER_GROUP.user_id = R_USER_MAIN.user_id ON USER_GROUP_MAIN.user_id = R_USER_GROUP.group_user_id JOIN R_OBJT_ACCESS ON R_USER_GROUP.group_user_id = R_OBJT_ACCESS.user_id JOIN R_DATA_MAIN JOIN R_COLL_MAIN ON R_DATA_MAIN.coll_id = R_COLL_MAIN.coll_id ON R_OBJT_ACCESS.object_id = R_DATA_MAIN.data_id WHERE R_COLL_MAIN.coll_name = ? AND R_DATA_MAIN.data_name = ? AND R_USER_MAIN.user_name = ? ORDER BY R_COLL_MAIN.coll_name, R_DATA_MAIN.data_name, R_USER_MAIN.user_name, R_OBJT_ACCESS.access_type_id DESC', '1388534400');")
database_connect.execute_sql_statement(cursor, "insert into R_SPECIFIC_QUERY (alias, sqlStr, create_ts) values ('listUserACLForCollectionViaGroup', 'SELECT R_USER_MAIN.user_name, R_USER_MAIN.user_id, R_OBJT_ACCESS.access_type_id, R_USER_MAIN.user_type_name, R_USER_MAIN.zone_name, R_COLL_MAIN.coll_name, USER_GROUP_MAIN.user_name, R_COLL_MAIN.coll_name FROM R_USER_MAIN AS USER_GROUP_MAIN JOIN R_USER_GROUP JOIN R_USER_MAIN ON R_USER_GROUP.user_id = R_USER_MAIN.user_id ON USER_GROUP_MAIN.user_id = R_USER_GROUP.group_user_id JOIN R_OBJT_ACCESS ON R_USER_GROUP.group_user_id = R_OBJT_ACCESS.user_id JOIN R_COLL_MAIN ON R_OBJT_ACCESS.object_id = R_COLL_MAIN.coll_id WHERE R_COLL_MAIN.coll_name = ? AND R_USER_MAIN.user_name = ? ORDER BY R_COLL_MAIN.coll_name, R_USER_MAIN.user_name, R_OBJT_ACCESS.access_type_id DESC', '1388534400');")
elif new_schema_version == 3:
database_connect.execute_sql_statement(cursor, "insert into R_SPECIFIC_QUERY (alias, sqlStr, create_ts) values ('DataObjInCollReCur', 'WITH coll AS (SELECT coll_id, coll_name FROM r_coll_main WHERE R_COLL_MAIN.coll_name = ? OR R_COLL_MAIN.coll_name LIKE ?) SELECT DISTINCT d.data_id, (SELECT coll_name FROM coll WHERE coll.coll_id = d.coll_id) coll_name, d.data_name, d.data_repl_num, d.resc_name, d.data_path, d.resc_hier FROM R_DATA_MAIN d WHERE d.coll_id = ANY(ARRAY(SELECT coll_id FROM coll)) ORDER BY coll_name, d.data_name, d.data_repl_num', '1388534400');")
elif new_schema_version == 4:
database_connect.execute_sql_statement(cursor, "create index idx_quota_main1 on R_QUOTA_MAIN (user_id);")
database_connect.execute_sql_statement(cursor, "delete from R_TOKN_MAIN where token_name = 'domainadmin';")
database_connect.execute_sql_statement(cursor, "delete from R_TOKN_MAIN where token_name = 'rodscurators';")
database_connect.execute_sql_statement(cursor, "delete from R_TOKN_MAIN where token_name = 'storageadmin';")
if irods_config.catalog_database_type == 'mysql':
database_connect.execute_sql_statement(cursor, "delete from R_SPECIFIC_QUERY where alias = 'DataObjInCollReCur';")
elif new_schema_version == 5:
if irods_config.catalog_database_type == 'oracle':
database_connect.execute_sql_statement(cursor, "ALTER TABLE R_DATA_MAIN ADD resc_id integer;")
database_connect.execute_sql_statement(cursor, "ALTER TABLE R_RESC_MAIN ADD resc_parent_context varchar2(4000);")
else:
database_connect.execute_sql_statement(cursor, "ALTER TABLE R_DATA_MAIN ADD resc_id bigint;")
database_connect.execute_sql_statement(cursor, "ALTER TABLE R_RESC_MAIN ADD resc_parent_context varchar(4000);")
database_connect.execute_sql_statement(cursor, "UPDATE R_SPECIFIC_QUERY SET sqlstr='WITH coll AS (SELECT coll_id, coll_name FROM R_COLL_MAIN WHERE R_COLL_MAIN.coll_name = ? OR R_COLL_MAIN.coll_name LIKE ?) SELECT DISTINCT d.data_id, (SELECT coll_name FROM coll WHERE coll.coll_id = d.coll_id) coll_name, d.data_name, d.data_repl_num, d.resc_name, d.data_path, d.resc_id FROM R_DATA_MAIN d WHERE d.coll_id = ANY(ARRAY(SELECT coll_id FROM coll)) ORDER BY coll_name, d.data_name, d.data_repl_num' where alias='DataObjInCollReCur';")
rows = database_connect.execute_sql_statement(cursor, "select resc_id, resc_name from R_RESC_MAIN;").fetchall()
for row in rows:
resc_id = row[0]
resc_name = row[1]
database_connect.execute_sql_statement(cursor, "update R_DATA_MAIN set resc_id=? where resc_hier=? or resc_hier like ?", resc_id, resc_name, ''.join(['%;', resc_name]))
if irods_config.catalog_database_type == 'postgres':
database_connect.execute_sql_statement(cursor, "update r_resc_main as rdm set resc_parent = am.resc_id from ( select resc_name, resc_id from r_resc_main ) as am where am.resc_name = rdm.resc_parent;")
elif irods_config.catalog_database_type == 'cockroachdb':
rows = database_connect.execute_sql_statement(cursor, "select rdm.resc_id, am.resc_id from r_resc_main rdm, r_resc_main am where am.resc_name = rdm.resc_parent;").fetchall()
for row in rows:
resc_id = row[0]
resc_id2 = row[1]
database_connect.execute_sql_statement(cursor, "update r_resc_main set resc_parent = ? where resc_id = ?;", resc_id2, resc_id)
elif irods_config.catalog_database_type == 'mysql':
database_connect.execute_sql_statement(cursor, "update R_RESC_MAIN as rdm, ( select resc_name, resc_id from R_RESC_MAIN ) as am set rdm.resc_parent = am.resc_id where am.resc_name = rdm.resc_parent;")
else:
database_connect.execute_sql_statement(cursor, "update R_RESC_MAIN rdm set resc_parent = ( select resc_id from ( select resc_name, resc_id from R_RESC_MAIN ) am where am.resc_name = rdm.resc_parent );")
rows = database_connect.execute_sql_statement(cursor, "select resc_id, resc_children from R_RESC_MAIN where resc_children is not null;").fetchall()
context_expression = re.compile('^([^{}]*)\\{([^{}]*)\\}')
for row in rows:
resc_id = row[0]
child_contexts = [(m.group(1), m.group(2)) for m in [context_expression.match(s) for s in row[1].split(';')] if m]
for child_name, context in child_contexts:
database_connect.execute_sql_statement(cursor, "update R_RESC_MAIN set resc_parent_context=? where resc_name=?", context, child_name)
else:
raise IrodsError('Upgrade to schema version %d is unsupported.' % (new_schema_version))
database_connect.execute_sql_statement(cursor, "update R_GRID_CONFIGURATION set option_value = ? where namespace = 'database' and option_name = 'schema_version';", new_schema_version)
| true
| true
|
790ad6bc5dfa928e760095a113c8e989519e42ee
| 9,163
|
py
|
Python
|
laserfiche_api/models/watermark.py
|
Layer8Err/laserfiche_api
|
8c9030c8f5cc245b61858bd096a1ad3c58cdbfd2
|
[
"BSD-2-Clause"
] | 1
|
2021-06-17T23:51:25.000Z
|
2021-06-17T23:51:25.000Z
|
laserfiche_api/models/watermark.py
|
Layer8Err/laserfiche_api
|
8c9030c8f5cc245b61858bd096a1ad3c58cdbfd2
|
[
"BSD-2-Clause"
] | null | null | null |
laserfiche_api/models/watermark.py
|
Layer8Err/laserfiche_api
|
8c9030c8f5cc245b61858bd096a1ad3c58cdbfd2
|
[
"BSD-2-Clause"
] | null | null | null |
# coding: utf-8
"""
Laserfiche API
Welcome to the Laserfiche API Swagger Playground. You can try out any of our API calls against your live Laserfiche Cloud account. Visit the developer center for more details: <a href=\"https://developer.laserfiche.com\">https://developer.laserfiche.com</a><p><strong>Build# : </strong>650780</p> # noqa: E501
OpenAPI spec version: 1-alpha
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class Watermark(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'watermark_text': 'str',
'watermark_text_size': 'int',
'watermark_position': 'OneOfWatermarkWatermarkPosition',
'watermark_rotation_angle': 'int',
'is_watermark_mandatory': 'bool',
'watermark_intensity': 'int'
}
attribute_map = {
'watermark_text': 'watermarkText',
'watermark_text_size': 'watermarkTextSize',
'watermark_position': 'watermarkPosition',
'watermark_rotation_angle': 'watermarkRotationAngle',
'is_watermark_mandatory': 'isWatermarkMandatory',
'watermark_intensity': 'watermarkIntensity'
}
def __init__(self, watermark_text=None, watermark_text_size=None, watermark_position=None, watermark_rotation_angle=None, is_watermark_mandatory=None, watermark_intensity=None): # noqa: E501
"""Watermark - a model defined in Swagger""" # noqa: E501
self._watermark_text = None
self._watermark_text_size = None
self._watermark_position = None
self._watermark_rotation_angle = None
self._is_watermark_mandatory = None
self._watermark_intensity = None
self.discriminator = None
if watermark_text is not None:
self.watermark_text = watermark_text
if watermark_text_size is not None:
self.watermark_text_size = watermark_text_size
if watermark_position is not None:
self.watermark_position = watermark_position
if watermark_rotation_angle is not None:
self.watermark_rotation_angle = watermark_rotation_angle
if is_watermark_mandatory is not None:
self.is_watermark_mandatory = is_watermark_mandatory
if watermark_intensity is not None:
self.watermark_intensity = watermark_intensity
@property
def watermark_text(self):
"""Gets the watermark_text of this Watermark. # noqa: E501
The watermark text associated with the tag defintion. # noqa: E501
:return: The watermark_text of this Watermark. # noqa: E501
:rtype: str
"""
return self._watermark_text
@watermark_text.setter
def watermark_text(self, watermark_text):
"""Sets the watermark_text of this Watermark.
The watermark text associated with the tag defintion. # noqa: E501
:param watermark_text: The watermark_text of this Watermark. # noqa: E501
:type: str
"""
self._watermark_text = watermark_text
@property
def watermark_text_size(self):
"""Gets the watermark_text_size of this Watermark. # noqa: E501
The size of the watermark text, in points, associated with the tag definition. # noqa: E501
:return: The watermark_text_size of this Watermark. # noqa: E501
:rtype: int
"""
return self._watermark_text_size
@watermark_text_size.setter
def watermark_text_size(self, watermark_text_size):
"""Sets the watermark_text_size of this Watermark.
The size of the watermark text, in points, associated with the tag definition. # noqa: E501
:param watermark_text_size: The watermark_text_size of this Watermark. # noqa: E501
:type: int
"""
self._watermark_text_size = watermark_text_size
@property
def watermark_position(self):
"""Gets the watermark_position of this Watermark. # noqa: E501
The position of the watermark on the page. # noqa: E501
:return: The watermark_position of this Watermark. # noqa: E501
:rtype: OneOfWatermarkWatermarkPosition
"""
return self._watermark_position
@watermark_position.setter
def watermark_position(self, watermark_position):
"""Sets the watermark_position of this Watermark.
The position of the watermark on the page. # noqa: E501
:param watermark_position: The watermark_position of this Watermark. # noqa: E501
:type: OneOfWatermarkWatermarkPosition
"""
self._watermark_position = watermark_position
@property
def watermark_rotation_angle(self):
"""Gets the watermark_rotation_angle of this Watermark. # noqa: E501
The rotation angle, in degrees, of the watermark associated with the tag definition. # noqa: E501
:return: The watermark_rotation_angle of this Watermark. # noqa: E501
:rtype: int
"""
return self._watermark_rotation_angle
@watermark_rotation_angle.setter
def watermark_rotation_angle(self, watermark_rotation_angle):
"""Sets the watermark_rotation_angle of this Watermark.
The rotation angle, in degrees, of the watermark associated with the tag definition. # noqa: E501
:param watermark_rotation_angle: The watermark_rotation_angle of this Watermark. # noqa: E501
:type: int
"""
self._watermark_rotation_angle = watermark_rotation_angle
@property
def is_watermark_mandatory(self):
"""Gets the is_watermark_mandatory of this Watermark. # noqa: E501
A boolean indicating whether or not the watermark associated with the tag is mandatory. # noqa: E501
:return: The is_watermark_mandatory of this Watermark. # noqa: E501
:rtype: bool
"""
return self._is_watermark_mandatory
@is_watermark_mandatory.setter
def is_watermark_mandatory(self, is_watermark_mandatory):
"""Sets the is_watermark_mandatory of this Watermark.
A boolean indicating whether or not the watermark associated with the tag is mandatory. # noqa: E501
:param is_watermark_mandatory: The is_watermark_mandatory of this Watermark. # noqa: E501
:type: bool
"""
self._is_watermark_mandatory = is_watermark_mandatory
@property
def watermark_intensity(self):
"""Gets the watermark_intensity of this Watermark. # noqa: E501
The intensity of the watermark associated with the tag definition. Valid value ranges from 0 to 100, with -1 as the default values. # noqa: E501
:return: The watermark_intensity of this Watermark. # noqa: E501
:rtype: int
"""
return self._watermark_intensity
@watermark_intensity.setter
def watermark_intensity(self, watermark_intensity):
"""Sets the watermark_intensity of this Watermark.
The intensity of the watermark associated with the tag definition. Valid value ranges from 0 to 100, with -1 as the default values. # noqa: E501
:param watermark_intensity: The watermark_intensity of this Watermark. # noqa: E501
:type: int
"""
self._watermark_intensity = watermark_intensity
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(Watermark, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Watermark):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 36.217391
| 314
| 0.653716
|
import pprint
import re
import six
class Watermark(object):
swagger_types = {
'watermark_text': 'str',
'watermark_text_size': 'int',
'watermark_position': 'OneOfWatermarkWatermarkPosition',
'watermark_rotation_angle': 'int',
'is_watermark_mandatory': 'bool',
'watermark_intensity': 'int'
}
attribute_map = {
'watermark_text': 'watermarkText',
'watermark_text_size': 'watermarkTextSize',
'watermark_position': 'watermarkPosition',
'watermark_rotation_angle': 'watermarkRotationAngle',
'is_watermark_mandatory': 'isWatermarkMandatory',
'watermark_intensity': 'watermarkIntensity'
}
def __init__(self, watermark_text=None, watermark_text_size=None, watermark_position=None, watermark_rotation_angle=None, is_watermark_mandatory=None, watermark_intensity=None):
self._watermark_text = None
self._watermark_text_size = None
self._watermark_position = None
self._watermark_rotation_angle = None
self._is_watermark_mandatory = None
self._watermark_intensity = None
self.discriminator = None
if watermark_text is not None:
self.watermark_text = watermark_text
if watermark_text_size is not None:
self.watermark_text_size = watermark_text_size
if watermark_position is not None:
self.watermark_position = watermark_position
if watermark_rotation_angle is not None:
self.watermark_rotation_angle = watermark_rotation_angle
if is_watermark_mandatory is not None:
self.is_watermark_mandatory = is_watermark_mandatory
if watermark_intensity is not None:
self.watermark_intensity = watermark_intensity
@property
def watermark_text(self):
return self._watermark_text
@watermark_text.setter
def watermark_text(self, watermark_text):
self._watermark_text = watermark_text
@property
def watermark_text_size(self):
return self._watermark_text_size
@watermark_text_size.setter
def watermark_text_size(self, watermark_text_size):
self._watermark_text_size = watermark_text_size
@property
def watermark_position(self):
return self._watermark_position
@watermark_position.setter
def watermark_position(self, watermark_position):
self._watermark_position = watermark_position
@property
def watermark_rotation_angle(self):
return self._watermark_rotation_angle
@watermark_rotation_angle.setter
def watermark_rotation_angle(self, watermark_rotation_angle):
self._watermark_rotation_angle = watermark_rotation_angle
@property
def is_watermark_mandatory(self):
return self._is_watermark_mandatory
@is_watermark_mandatory.setter
def is_watermark_mandatory(self, is_watermark_mandatory):
self._is_watermark_mandatory = is_watermark_mandatory
@property
def watermark_intensity(self):
return self._watermark_intensity
@watermark_intensity.setter
def watermark_intensity(self, watermark_intensity):
self._watermark_intensity = watermark_intensity
def to_dict(self):
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(Watermark, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
return pprint.pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if not isinstance(other, Watermark):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| true
| true
|
790ad83ca9fa2bfbd11a4af5e5a105ba53305769
| 375
|
py
|
Python
|
tests/test_base.py
|
khulaifi95/cool-search
|
b18b54994fa792de92a6d27a24c0f600af73c93f
|
[
"Unlicense"
] | null | null | null |
tests/test_base.py
|
khulaifi95/cool-search
|
b18b54994fa792de92a6d27a24c0f600af73c93f
|
[
"Unlicense"
] | null | null | null |
tests/test_base.py
|
khulaifi95/cool-search
|
b18b54994fa792de92a6d27a24c0f600af73c93f
|
[
"Unlicense"
] | null | null | null |
import pytest
from cool_search import BaseClass, base_function
given = pytest.mark.parametrize
@given("fn", [BaseClass(), base_function])
def test_parameterized(fn):
assert "hello from" in fn()
def test_base_function():
assert base_function() == "hello from base function"
def test_base_class():
assert BaseClass().base_method() == "hello from BaseClass"
| 19.736842
| 62
| 0.733333
|
import pytest
from cool_search import BaseClass, base_function
given = pytest.mark.parametrize
@given("fn", [BaseClass(), base_function])
def test_parameterized(fn):
assert "hello from" in fn()
def test_base_function():
assert base_function() == "hello from base function"
def test_base_class():
assert BaseClass().base_method() == "hello from BaseClass"
| true
| true
|
790ad8b8e290b7e193979ee04d3a148e6f33e9c9
| 12,058
|
py
|
Python
|
homeassistant/components/media_player/soundtouch.py
|
maihde/home-assistant
|
688d70644949a658e0607e52b3896a2c4c8a85e7
|
[
"Apache-2.0"
] | 2
|
2018-01-15T06:36:11.000Z
|
2019-06-06T14:08:01.000Z
|
homeassistant/components/media_player/soundtouch.py
|
maihde/home-assistant
|
688d70644949a658e0607e52b3896a2c4c8a85e7
|
[
"Apache-2.0"
] | null | null | null |
homeassistant/components/media_player/soundtouch.py
|
maihde/home-assistant
|
688d70644949a658e0607e52b3896a2c4c8a85e7
|
[
"Apache-2.0"
] | 3
|
2018-08-27T10:08:30.000Z
|
2020-07-04T10:07:03.000Z
|
"""
Support for interface with a Bose Soundtouch.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/media_player.soundtouch/
"""
import logging
import re
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.components.media_player import (
SUPPORT_NEXT_TRACK, SUPPORT_PAUSE, SUPPORT_PREVIOUS_TRACK,
SUPPORT_TURN_OFF, SUPPORT_VOLUME_MUTE, SUPPORT_VOLUME_STEP,
SUPPORT_VOLUME_SET, SUPPORT_TURN_ON, SUPPORT_PLAY, MediaPlayerDevice,
PLATFORM_SCHEMA)
from homeassistant.const import (CONF_HOST, CONF_NAME, STATE_OFF, CONF_PORT,
STATE_PAUSED, STATE_PLAYING,
STATE_UNAVAILABLE)
REQUIREMENTS = ['libsoundtouch==0.7.2']
_LOGGER = logging.getLogger(__name__)
DOMAIN = 'media_player'
SERVICE_PLAY_EVERYWHERE = 'soundtouch_play_everywhere'
SERVICE_CREATE_ZONE = 'soundtouch_create_zone'
SERVICE_ADD_ZONE_SLAVE = 'soundtouch_add_zone_slave'
SERVICE_REMOVE_ZONE_SLAVE = 'soundtouch_remove_zone_slave'
MAP_STATUS = {
"PLAY_STATE": STATE_PLAYING,
"BUFFERING_STATE": STATE_PLAYING,
"PAUSE_STATE": STATE_PAUSED,
"STOP_STATE": STATE_OFF
}
DATA_SOUNDTOUCH = "soundtouch"
SOUNDTOUCH_PLAY_EVERYWHERE = vol.Schema({
vol.Required('master'): cv.entity_id
})
SOUNDTOUCH_CREATE_ZONE_SCHEMA = vol.Schema({
vol.Required('master'): cv.entity_id,
vol.Required('slaves'): cv.entity_ids
})
SOUNDTOUCH_ADD_ZONE_SCHEMA = vol.Schema({
vol.Required('master'): cv.entity_id,
vol.Required('slaves'): cv.entity_ids
})
SOUNDTOUCH_REMOVE_ZONE_SCHEMA = vol.Schema({
vol.Required('master'): cv.entity_id,
vol.Required('slaves'): cv.entity_ids
})
DEFAULT_NAME = 'Bose Soundtouch'
DEFAULT_PORT = 8090
SUPPORT_SOUNDTOUCH = SUPPORT_PAUSE | SUPPORT_VOLUME_STEP | \
SUPPORT_VOLUME_MUTE | SUPPORT_PREVIOUS_TRACK | \
SUPPORT_NEXT_TRACK | SUPPORT_TURN_OFF | \
SUPPORT_VOLUME_SET | SUPPORT_TURN_ON | SUPPORT_PLAY
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port
})
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Set up the Bose Soundtouch platform."""
if DATA_SOUNDTOUCH not in hass.data:
hass.data[DATA_SOUNDTOUCH] = []
if discovery_info:
host = discovery_info['host']
port = int(discovery_info['port'])
# if device already exists by config
if host in [device.config['host'] for device in
hass.data[DATA_SOUNDTOUCH]]:
return
remote_config = {
'id': 'ha.component.soundtouch',
'host': host,
'port': port
}
soundtouch_device = SoundTouchDevice(None, remote_config)
hass.data[DATA_SOUNDTOUCH].append(soundtouch_device)
add_devices([soundtouch_device])
else:
name = config.get(CONF_NAME)
remote_config = {
'id': 'ha.component.soundtouch',
'port': config.get(CONF_PORT),
'host': config.get(CONF_HOST)
}
soundtouch_device = SoundTouchDevice(name, remote_config)
hass.data[DATA_SOUNDTOUCH].append(soundtouch_device)
add_devices([soundtouch_device])
def service_handle(service):
"""Handle the applying of a service."""
master_device_id = service.data.get('master')
slaves_ids = service.data.get('slaves')
slaves = []
if slaves_ids:
slaves = [device for device in hass.data[DATA_SOUNDTOUCH] if
device.entity_id in slaves_ids]
master = next([device for device in hass.data[DATA_SOUNDTOUCH] if
device.entity_id == master_device_id].__iter__(), None)
if master is None:
_LOGGER.warning("Unable to find master with entity_id: %s",
str(master_device_id))
return
if service.service == SERVICE_PLAY_EVERYWHERE:
slaves = [d for d in hass.data[DATA_SOUNDTOUCH] if
d.entity_id != master_device_id]
master.create_zone(slaves)
elif service.service == SERVICE_CREATE_ZONE:
master.create_zone(slaves)
elif service.service == SERVICE_REMOVE_ZONE_SLAVE:
master.remove_zone_slave(slaves)
elif service.service == SERVICE_ADD_ZONE_SLAVE:
master.add_zone_slave(slaves)
hass.services.register(DOMAIN, SERVICE_PLAY_EVERYWHERE,
service_handle,
schema=SOUNDTOUCH_PLAY_EVERYWHERE)
hass.services.register(DOMAIN, SERVICE_CREATE_ZONE,
service_handle,
schema=SOUNDTOUCH_CREATE_ZONE_SCHEMA)
hass.services.register(DOMAIN, SERVICE_REMOVE_ZONE_SLAVE,
service_handle,
schema=SOUNDTOUCH_REMOVE_ZONE_SCHEMA)
hass.services.register(DOMAIN, SERVICE_ADD_ZONE_SLAVE,
service_handle,
schema=SOUNDTOUCH_ADD_ZONE_SCHEMA)
class SoundTouchDevice(MediaPlayerDevice):
"""Representation of a SoundTouch Bose device."""
def __init__(self, name, config):
"""Create Soundtouch Entity."""
from libsoundtouch import soundtouch_device
self._device = soundtouch_device(config['host'], config['port'])
if name is None:
self._name = self._device.config.name
else:
self._name = name
self._status = self._device.status()
self._volume = self._device.volume()
self._config = config
@property
def config(self):
"""Return specific soundtouch configuration."""
return self._config
@property
def device(self):
"""Return Soundtouch device."""
return self._device
def update(self):
"""Retrieve the latest data."""
self._status = self._device.status()
self._volume = self._device.volume()
@property
def volume_level(self):
"""Volume level of the media player (0..1)."""
return self._volume.actual / 100
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def state(self):
"""Return the state of the device."""
if self._status.source == 'STANDBY':
return STATE_OFF
return MAP_STATUS.get(self._status.play_status, STATE_UNAVAILABLE)
@property
def is_volume_muted(self):
"""Boolean if volume is currently muted."""
return self._volume.muted
@property
def supported_features(self):
"""Flag media player features that are supported."""
return SUPPORT_SOUNDTOUCH
def turn_off(self):
"""Turn off media player."""
self._device.power_off()
self._status = self._device.status()
def turn_on(self):
"""Turn on media player."""
self._device.power_on()
self._status = self._device.status()
def volume_up(self):
"""Volume up the media player."""
self._device.volume_up()
self._volume = self._device.volume()
def volume_down(self):
"""Volume down media player."""
self._device.volume_down()
self._volume = self._device.volume()
def set_volume_level(self, volume):
"""Set volume level, range 0..1."""
self._device.set_volume(int(volume * 100))
self._volume = self._device.volume()
def mute_volume(self, mute):
"""Send mute command."""
self._device.mute()
self._volume = self._device.volume()
def media_play_pause(self):
"""Simulate play pause media player."""
self._device.play_pause()
self._status = self._device.status()
def media_play(self):
"""Send play command."""
self._device.play()
self._status = self._device.status()
def media_pause(self):
"""Send media pause command to media player."""
self._device.pause()
self._status = self._device.status()
def media_next_track(self):
"""Send next track command."""
self._device.next_track()
self._status = self._device.status()
def media_previous_track(self):
"""Send the previous track command."""
self._device.previous_track()
self._status = self._device.status()
@property
def media_image_url(self):
"""Image url of current playing media."""
return self._status.image
@property
def media_title(self):
"""Title of current playing media."""
if self._status.station_name is not None:
return self._status.station_name
elif self._status.artist is not None:
return self._status.artist + " - " + self._status.track
return None
@property
def media_duration(self):
"""Duration of current playing media in seconds."""
return self._status.duration
@property
def media_artist(self):
"""Artist of current playing media."""
return self._status.artist
@property
def media_track(self):
"""Artist of current playing media."""
return self._status.track
@property
def media_album_name(self):
"""Album name of current playing media."""
return self._status.album
def play_media(self, media_type, media_id, **kwargs):
"""Play a piece of media."""
_LOGGER.debug("Starting media with media_id: " + str(media_id))
if re.match(r'http://', str(media_id)):
# URL
_LOGGER.debug("Playing URL %s", str(media_id))
self._device.play_url(str(media_id))
else:
# Preset
presets = self._device.presets()
preset = next([preset for preset in presets if
preset.preset_id == str(media_id)].__iter__(), None)
if preset is not None:
_LOGGER.debug("Playing preset: " + preset.name)
self._device.select_preset(preset)
else:
_LOGGER.warning(
"Unable to find preset with id " + str(media_id))
def create_zone(self, slaves):
"""
Create a zone (multi-room) and play on selected devices.
:param slaves: slaves on which to play
"""
if not slaves:
_LOGGER.warning("Unable to create zone without slaves")
else:
_LOGGER.info(
"Creating zone with master " + str(self.device.config.name))
self.device.create_zone([slave.device for slave in slaves])
def remove_zone_slave(self, slaves):
"""
Remove slave(s) from and existing zone (multi-room).
Zone must already exist and slaves array can not be empty.
Note: If removing last slave, the zone will be deleted and you'll have
to create a new one. You will not be able to add a new slave anymore
:param slaves: slaves to remove from the zone
"""
if not slaves:
_LOGGER.warning("Unable to find slaves to remove")
else:
_LOGGER.info("Removing slaves from zone with master " +
str(self.device.config.name))
self.device.remove_zone_slave([slave.device for slave in slaves])
def add_zone_slave(self, slaves):
"""
Add slave(s) to and existing zone (multi-room).
Zone must already exist and slaves array can not be empty.
:param slaves:slaves to add
"""
if not slaves:
_LOGGER.warning("Unable to find slaves to add")
else:
_LOGGER.info(
"Adding slaves to zone with master " + str(
self.device.config.name))
self.device.add_zone_slave([slave.device for slave in slaves])
| 33.035616
| 79
| 0.625643
|
import logging
import re
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.components.media_player import (
SUPPORT_NEXT_TRACK, SUPPORT_PAUSE, SUPPORT_PREVIOUS_TRACK,
SUPPORT_TURN_OFF, SUPPORT_VOLUME_MUTE, SUPPORT_VOLUME_STEP,
SUPPORT_VOLUME_SET, SUPPORT_TURN_ON, SUPPORT_PLAY, MediaPlayerDevice,
PLATFORM_SCHEMA)
from homeassistant.const import (CONF_HOST, CONF_NAME, STATE_OFF, CONF_PORT,
STATE_PAUSED, STATE_PLAYING,
STATE_UNAVAILABLE)
REQUIREMENTS = ['libsoundtouch==0.7.2']
_LOGGER = logging.getLogger(__name__)
DOMAIN = 'media_player'
SERVICE_PLAY_EVERYWHERE = 'soundtouch_play_everywhere'
SERVICE_CREATE_ZONE = 'soundtouch_create_zone'
SERVICE_ADD_ZONE_SLAVE = 'soundtouch_add_zone_slave'
SERVICE_REMOVE_ZONE_SLAVE = 'soundtouch_remove_zone_slave'
MAP_STATUS = {
"PLAY_STATE": STATE_PLAYING,
"BUFFERING_STATE": STATE_PLAYING,
"PAUSE_STATE": STATE_PAUSED,
"STOP_STATE": STATE_OFF
}
DATA_SOUNDTOUCH = "soundtouch"
SOUNDTOUCH_PLAY_EVERYWHERE = vol.Schema({
vol.Required('master'): cv.entity_id
})
SOUNDTOUCH_CREATE_ZONE_SCHEMA = vol.Schema({
vol.Required('master'): cv.entity_id,
vol.Required('slaves'): cv.entity_ids
})
SOUNDTOUCH_ADD_ZONE_SCHEMA = vol.Schema({
vol.Required('master'): cv.entity_id,
vol.Required('slaves'): cv.entity_ids
})
SOUNDTOUCH_REMOVE_ZONE_SCHEMA = vol.Schema({
vol.Required('master'): cv.entity_id,
vol.Required('slaves'): cv.entity_ids
})
DEFAULT_NAME = 'Bose Soundtouch'
DEFAULT_PORT = 8090
SUPPORT_SOUNDTOUCH = SUPPORT_PAUSE | SUPPORT_VOLUME_STEP | \
SUPPORT_VOLUME_MUTE | SUPPORT_PREVIOUS_TRACK | \
SUPPORT_NEXT_TRACK | SUPPORT_TURN_OFF | \
SUPPORT_VOLUME_SET | SUPPORT_TURN_ON | SUPPORT_PLAY
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port
})
def setup_platform(hass, config, add_devices, discovery_info=None):
if DATA_SOUNDTOUCH not in hass.data:
hass.data[DATA_SOUNDTOUCH] = []
if discovery_info:
host = discovery_info['host']
port = int(discovery_info['port'])
if host in [device.config['host'] for device in
hass.data[DATA_SOUNDTOUCH]]:
return
remote_config = {
'id': 'ha.component.soundtouch',
'host': host,
'port': port
}
soundtouch_device = SoundTouchDevice(None, remote_config)
hass.data[DATA_SOUNDTOUCH].append(soundtouch_device)
add_devices([soundtouch_device])
else:
name = config.get(CONF_NAME)
remote_config = {
'id': 'ha.component.soundtouch',
'port': config.get(CONF_PORT),
'host': config.get(CONF_HOST)
}
soundtouch_device = SoundTouchDevice(name, remote_config)
hass.data[DATA_SOUNDTOUCH].append(soundtouch_device)
add_devices([soundtouch_device])
def service_handle(service):
master_device_id = service.data.get('master')
slaves_ids = service.data.get('slaves')
slaves = []
if slaves_ids:
slaves = [device for device in hass.data[DATA_SOUNDTOUCH] if
device.entity_id in slaves_ids]
master = next([device for device in hass.data[DATA_SOUNDTOUCH] if
device.entity_id == master_device_id].__iter__(), None)
if master is None:
_LOGGER.warning("Unable to find master with entity_id: %s",
str(master_device_id))
return
if service.service == SERVICE_PLAY_EVERYWHERE:
slaves = [d for d in hass.data[DATA_SOUNDTOUCH] if
d.entity_id != master_device_id]
master.create_zone(slaves)
elif service.service == SERVICE_CREATE_ZONE:
master.create_zone(slaves)
elif service.service == SERVICE_REMOVE_ZONE_SLAVE:
master.remove_zone_slave(slaves)
elif service.service == SERVICE_ADD_ZONE_SLAVE:
master.add_zone_slave(slaves)
hass.services.register(DOMAIN, SERVICE_PLAY_EVERYWHERE,
service_handle,
schema=SOUNDTOUCH_PLAY_EVERYWHERE)
hass.services.register(DOMAIN, SERVICE_CREATE_ZONE,
service_handle,
schema=SOUNDTOUCH_CREATE_ZONE_SCHEMA)
hass.services.register(DOMAIN, SERVICE_REMOVE_ZONE_SLAVE,
service_handle,
schema=SOUNDTOUCH_REMOVE_ZONE_SCHEMA)
hass.services.register(DOMAIN, SERVICE_ADD_ZONE_SLAVE,
service_handle,
schema=SOUNDTOUCH_ADD_ZONE_SCHEMA)
class SoundTouchDevice(MediaPlayerDevice):
def __init__(self, name, config):
from libsoundtouch import soundtouch_device
self._device = soundtouch_device(config['host'], config['port'])
if name is None:
self._name = self._device.config.name
else:
self._name = name
self._status = self._device.status()
self._volume = self._device.volume()
self._config = config
@property
def config(self):
return self._config
@property
def device(self):
return self._device
def update(self):
self._status = self._device.status()
self._volume = self._device.volume()
@property
def volume_level(self):
return self._volume.actual / 100
@property
def name(self):
return self._name
@property
def state(self):
if self._status.source == 'STANDBY':
return STATE_OFF
return MAP_STATUS.get(self._status.play_status, STATE_UNAVAILABLE)
@property
def is_volume_muted(self):
return self._volume.muted
@property
def supported_features(self):
return SUPPORT_SOUNDTOUCH
def turn_off(self):
self._device.power_off()
self._status = self._device.status()
def turn_on(self):
self._device.power_on()
self._status = self._device.status()
def volume_up(self):
self._device.volume_up()
self._volume = self._device.volume()
def volume_down(self):
self._device.volume_down()
self._volume = self._device.volume()
def set_volume_level(self, volume):
self._device.set_volume(int(volume * 100))
self._volume = self._device.volume()
def mute_volume(self, mute):
self._device.mute()
self._volume = self._device.volume()
def media_play_pause(self):
self._device.play_pause()
self._status = self._device.status()
def media_play(self):
self._device.play()
self._status = self._device.status()
def media_pause(self):
self._device.pause()
self._status = self._device.status()
def media_next_track(self):
self._device.next_track()
self._status = self._device.status()
def media_previous_track(self):
self._device.previous_track()
self._status = self._device.status()
@property
def media_image_url(self):
return self._status.image
@property
def media_title(self):
if self._status.station_name is not None:
return self._status.station_name
elif self._status.artist is not None:
return self._status.artist + " - " + self._status.track
return None
@property
def media_duration(self):
return self._status.duration
@property
def media_artist(self):
return self._status.artist
@property
def media_track(self):
return self._status.track
@property
def media_album_name(self):
return self._status.album
def play_media(self, media_type, media_id, **kwargs):
_LOGGER.debug("Starting media with media_id: " + str(media_id))
if re.match(r'http://', str(media_id)):
_LOGGER.debug("Playing URL %s", str(media_id))
self._device.play_url(str(media_id))
else:
presets = self._device.presets()
preset = next([preset for preset in presets if
preset.preset_id == str(media_id)].__iter__(), None)
if preset is not None:
_LOGGER.debug("Playing preset: " + preset.name)
self._device.select_preset(preset)
else:
_LOGGER.warning(
"Unable to find preset with id " + str(media_id))
def create_zone(self, slaves):
if not slaves:
_LOGGER.warning("Unable to create zone without slaves")
else:
_LOGGER.info(
"Creating zone with master " + str(self.device.config.name))
self.device.create_zone([slave.device for slave in slaves])
def remove_zone_slave(self, slaves):
if not slaves:
_LOGGER.warning("Unable to find slaves to remove")
else:
_LOGGER.info("Removing slaves from zone with master " +
str(self.device.config.name))
self.device.remove_zone_slave([slave.device for slave in slaves])
def add_zone_slave(self, slaves):
if not slaves:
_LOGGER.warning("Unable to find slaves to add")
else:
_LOGGER.info(
"Adding slaves to zone with master " + str(
self.device.config.name))
self.device.add_zone_slave([slave.device for slave in slaves])
| true
| true
|
790ad8fbe755827fb30faf59159513c915ce20bf
| 160
|
py
|
Python
|
bitcoin_tweeter/credentials.py
|
llamafarmer/bitcoin_tweeter
|
5ebc07e01ee3d5465ad9115438e9de6ee64b6011
|
[
"MIT"
] | 5
|
2017-10-25T12:07:42.000Z
|
2019-03-02T03:56:13.000Z
|
code/25-27-error-handling/credentials.py
|
llamafarmer/100_days_of_code
|
6af973157aa4c77cd6f88bf1f0fa5e60a375339c
|
[
"MIT"
] | null | null | null |
code/25-27-error-handling/credentials.py
|
llamafarmer/100_days_of_code
|
6af973157aa4c77cd6f88bf1f0fa5e60a375339c
|
[
"MIT"
] | 3
|
2017-10-25T04:40:57.000Z
|
2018-04-25T07:00:44.000Z
|
consumer_key = 'YOUR CONSUMER KEY'
consumer_secret = 'YOUR CONSUMER SECRET'
access_token = 'YOUR ACCESS TOKEN'
access_token_secret = 'YOUR ACCESS TOKEN SECRET'
| 32
| 48
| 0.79375
|
consumer_key = 'YOUR CONSUMER KEY'
consumer_secret = 'YOUR CONSUMER SECRET'
access_token = 'YOUR ACCESS TOKEN'
access_token_secret = 'YOUR ACCESS TOKEN SECRET'
| true
| true
|
790ad94490493a8a472f94a2bac069b00402d6b8
| 2,620
|
py
|
Python
|
setup.py
|
morpframework/morpfw
|
b867e5809d6c52e8839586670a29fcd179ce64c7
|
[
"Apache-2.0"
] | 8
|
2018-12-08T01:41:58.000Z
|
2020-12-21T15:30:12.000Z
|
setup.py
|
morpframework/morpfw
|
b867e5809d6c52e8839586670a29fcd179ce64c7
|
[
"Apache-2.0"
] | 17
|
2019-02-05T15:01:32.000Z
|
2020-04-28T16:17:42.000Z
|
setup.py
|
morpframework/morpfw
|
b867e5809d6c52e8839586670a29fcd179ce64c7
|
[
"Apache-2.0"
] | 2
|
2018-12-08T05:03:37.000Z
|
2019-03-20T07:15:21.000Z
|
import os
import sys
from setuptools import find_packages, setup
IS_RTD = os.environ.get("READTHEDOCS", None)
version = "0.4.0b14.dev0"
long_description = open(os.path.join(os.path.dirname(__file__), "README.rst")).read()
install_requires = [
"morepath==0.19",
"alembic",
"rulez>=0.1.4,<0.2.0",
"inverter>=0.1.0<0.2.0",
"more.cors",
"celery",
"redis",
"jsl",
"pyyaml>=4.2b1",
"more.jsonschema",
"sqlalchemy",
"sqlalchemy_utils",
"more.signals",
"DateTime",
"transitions",
"jsonpath_ng",
"python-dateutil",
"more.jwtauth",
"more.itsdangerous",
"sqlsoup",
"celery",
"gunicorn",
"itsdangerous",
"pyyaml",
"passlib",
"jsonschema",
"more.transaction",
"zope.sqlalchemy",
"python-dateutil",
"more.cors",
"sqlalchemy_jsonfield",
"sqlsoup",
"celery",
"gunicorn",
"itsdangerous",
"pyyaml",
"passlib",
"jsonschema",
"more.transaction",
"zope.sqlalchemy",
"more.basicauth",
"cryptography",
"elasticsearch>7.0.0,<8.0.0",
"pamela",
"click",
"cookiecutter",
"eventlet",
"wsgigzip",
"psycopg2",
"colander",
"deform",
"more.chameleon",
"more.static",
"RestrictedPython",
"beaker",
"zstandard",
"oauthlib[signedtoken]",
"requests-oauthlib",
]
if IS_RTD is None:
install_requires.append("python-ldap")
setup(
name="morpfw",
version=version,
description="Web framework based on morepath",
long_description=long_description,
classifiers=[], # Get strings from http://pypi.python.org/pypi?%3Aaction=list_classifiers
keywords="",
author="Izhar Firdaus",
author_email="izhar@kagesenshi.org",
url="http://github.com/morpframework/morpfw",
license="Apache-2.0",
packages=find_packages(exclude=["ez_setup", "examples", "tests"]),
include_package_data=True,
zip_safe=False,
install_requires=install_requires,
extras_require={
"test": [
"nose",
"webtest",
"pytest",
"pytest-html",
"pytest_postgresql",
"pytest_rabbitmq",
"pytest-annotate",
"pytest-cov",
"pika",
"mirakuru",
],
"docs": ["sphinxcontrib-httpdomain", "sphinx-click"],
},
entry_points={
"morepath": ["scan=morpfw"],
"console_scripts": [
"morpfw=morpfw.cli.main:main",
"mfw-runmodule=morpfw.cli:run_module",
"mfw-profilemodule=morpfw.cli:run_module_profile",
],
},
)
| 22.782609
| 94
| 0.579771
|
import os
import sys
from setuptools import find_packages, setup
IS_RTD = os.environ.get("READTHEDOCS", None)
version = "0.4.0b14.dev0"
long_description = open(os.path.join(os.path.dirname(__file__), "README.rst")).read()
install_requires = [
"morepath==0.19",
"alembic",
"rulez>=0.1.4,<0.2.0",
"inverter>=0.1.0<0.2.0",
"more.cors",
"celery",
"redis",
"jsl",
"pyyaml>=4.2b1",
"more.jsonschema",
"sqlalchemy",
"sqlalchemy_utils",
"more.signals",
"DateTime",
"transitions",
"jsonpath_ng",
"python-dateutil",
"more.jwtauth",
"more.itsdangerous",
"sqlsoup",
"celery",
"gunicorn",
"itsdangerous",
"pyyaml",
"passlib",
"jsonschema",
"more.transaction",
"zope.sqlalchemy",
"python-dateutil",
"more.cors",
"sqlalchemy_jsonfield",
"sqlsoup",
"celery",
"gunicorn",
"itsdangerous",
"pyyaml",
"passlib",
"jsonschema",
"more.transaction",
"zope.sqlalchemy",
"more.basicauth",
"cryptography",
"elasticsearch>7.0.0,<8.0.0",
"pamela",
"click",
"cookiecutter",
"eventlet",
"wsgigzip",
"psycopg2",
"colander",
"deform",
"more.chameleon",
"more.static",
"RestrictedPython",
"beaker",
"zstandard",
"oauthlib[signedtoken]",
"requests-oauthlib",
]
if IS_RTD is None:
install_requires.append("python-ldap")
setup(
name="morpfw",
version=version,
description="Web framework based on morepath",
long_description=long_description,
classifiers=[],
keywords="",
author="Izhar Firdaus",
author_email="izhar@kagesenshi.org",
url="http://github.com/morpframework/morpfw",
license="Apache-2.0",
packages=find_packages(exclude=["ez_setup", "examples", "tests"]),
include_package_data=True,
zip_safe=False,
install_requires=install_requires,
extras_require={
"test": [
"nose",
"webtest",
"pytest",
"pytest-html",
"pytest_postgresql",
"pytest_rabbitmq",
"pytest-annotate",
"pytest-cov",
"pika",
"mirakuru",
],
"docs": ["sphinxcontrib-httpdomain", "sphinx-click"],
},
entry_points={
"morepath": ["scan=morpfw"],
"console_scripts": [
"morpfw=morpfw.cli.main:main",
"mfw-runmodule=morpfw.cli:run_module",
"mfw-profilemodule=morpfw.cli:run_module_profile",
],
},
)
| true
| true
|
790ad996aeb7de9287fe537e06e6057d5c60f7cb
| 1,491
|
py
|
Python
|
tests/test_ds.py
|
kororo/rand
|
af0496a33e4eb4c08d1e19cf718d58393585c6cb
|
[
"MIT"
] | null | null | null |
tests/test_ds.py
|
kororo/rand
|
af0496a33e4eb4c08d1e19cf718d58393585c6cb
|
[
"MIT"
] | null | null | null |
tests/test_ds.py
|
kororo/rand
|
af0496a33e4eb4c08d1e19cf718d58393585c6cb
|
[
"MIT"
] | null | null | null |
from tests import create_rand
def prepare_database_with_table(name: str, rows: list):
from peewee import IntegerField, Proxy, CharField, Model
from playhouse.sqlite_ext import CSqliteExtDatabase
db = Proxy()
db.initialize(CSqliteExtDatabase(':memory:', bloomfilter=True))
NameModel = type(name, (Model,), {
'id_': IntegerField(primary_key=True, column_name='id'),
'name': CharField(column_name='name')
})
table: Model = NameModel()
table.bind(db)
db.create_tables([NameModel])
for row in rows:
table.insert(row).execute()
return db
def test_ds_list():
from rand.providers.ds import RandDatasetBaseProvider, ListDatasetTarget
db = {
'names': [{'name': 'test1'}, {'name': 'test1'}],
'cities': [{'name': 'test2'}, {'name': 'test2'}],
}
ds = RandDatasetBaseProvider(prefix='ds', target=ListDatasetTarget(db=db))
rand = create_rand()
rand.register_provider(ds)
assert rand.gen('(:ds_get:)', ['names']) == ['test1']
assert rand.gen('(:ds_get_names:)-(:ds_get_cities:)') == ['test1-test2']
def test_ds_db():
from rand.providers.ds import RandDatasetBaseProvider, DBDatasetTarget
rows = [{'name': 'test'}, {'name': 'test'}]
db = prepare_database_with_table('names', rows)
ds = RandDatasetBaseProvider(prefix='ds', target=DBDatasetTarget(db=db))
rand = create_rand()
rand.register_provider(ds)
assert rand.gen('(:ds_get:)', ['names']) == ['test']
| 33.133333
| 78
| 0.652582
|
from tests import create_rand
def prepare_database_with_table(name: str, rows: list):
from peewee import IntegerField, Proxy, CharField, Model
from playhouse.sqlite_ext import CSqliteExtDatabase
db = Proxy()
db.initialize(CSqliteExtDatabase(':memory:', bloomfilter=True))
NameModel = type(name, (Model,), {
'id_': IntegerField(primary_key=True, column_name='id'),
'name': CharField(column_name='name')
})
table: Model = NameModel()
table.bind(db)
db.create_tables([NameModel])
for row in rows:
table.insert(row).execute()
return db
def test_ds_list():
from rand.providers.ds import RandDatasetBaseProvider, ListDatasetTarget
db = {
'names': [{'name': 'test1'}, {'name': 'test1'}],
'cities': [{'name': 'test2'}, {'name': 'test2'}],
}
ds = RandDatasetBaseProvider(prefix='ds', target=ListDatasetTarget(db=db))
rand = create_rand()
rand.register_provider(ds)
assert rand.gen('(:ds_get:)', ['names']) == ['test1']
assert rand.gen('(:ds_get_names:)-(:ds_get_cities:)') == ['test1-test2']
def test_ds_db():
from rand.providers.ds import RandDatasetBaseProvider, DBDatasetTarget
rows = [{'name': 'test'}, {'name': 'test'}]
db = prepare_database_with_table('names', rows)
ds = RandDatasetBaseProvider(prefix='ds', target=DBDatasetTarget(db=db))
rand = create_rand()
rand.register_provider(ds)
assert rand.gen('(:ds_get:)', ['names']) == ['test']
| true
| true
|
790ad9c9a59e8e1075423ee5117501b1890483f8
| 177
|
py
|
Python
|
odoo/openerp/addons/test_new_api/tests/__init__.py
|
tuanquanghpvn/odoo8-tutorial
|
52d25f1ca5f233c431cb9d3b24b79c3b4fb5127e
|
[
"MIT"
] | 1
|
2019-12-29T11:53:56.000Z
|
2019-12-29T11:53:56.000Z
|
odoo/openerp/addons/test_new_api/tests/__init__.py
|
tuanquanghpvn/odoo8-tutorial
|
52d25f1ca5f233c431cb9d3b24b79c3b4fb5127e
|
[
"MIT"
] | null | null | null |
odoo/openerp/addons/test_new_api/tests/__init__.py
|
tuanquanghpvn/odoo8-tutorial
|
52d25f1ca5f233c431cb9d3b24b79c3b4fb5127e
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from . import test_related
from . import test_new_fields
from . import test_onchange
from . import test_field_conversions
from . import test_attributes
| 22.125
| 36
| 0.774011
|
from . import test_related
from . import test_new_fields
from . import test_onchange
from . import test_field_conversions
from . import test_attributes
| true
| true
|
790adbe9ccc1aac072309ed10ac00cdd52d18e47
| 324
|
py
|
Python
|
recipe/run_test.py
|
csdms-stack/permamodel-frostnumbergeo-csdms-recipe
|
007922309c891ec8b11ef050986887e9747593e5
|
[
"MIT"
] | null | null | null |
recipe/run_test.py
|
csdms-stack/permamodel-frostnumbergeo-csdms-recipe
|
007922309c891ec8b11ef050986887e9747593e5
|
[
"MIT"
] | null | null | null |
recipe/run_test.py
|
csdms-stack/permamodel-frostnumbergeo-csdms-recipe
|
007922309c891ec8b11ef050986887e9747593e5
|
[
"MIT"
] | null | null | null |
#! /usr/bin/env python
import os
os.mkdir('_testing')
os.chdir('_testing')
os.environ['MPLBACKEND'] = 'Agg'
from pymt.components import FrostNumberGeoModel as Model
model = Model()
for default in model.defaults:
print('{name}: {val} {units}'.format(
name=default[0], val=default[1][0], units=default[1][1]))
| 21.6
| 65
| 0.682099
|
import os
os.mkdir('_testing')
os.chdir('_testing')
os.environ['MPLBACKEND'] = 'Agg'
from pymt.components import FrostNumberGeoModel as Model
model = Model()
for default in model.defaults:
print('{name}: {val} {units}'.format(
name=default[0], val=default[1][0], units=default[1][1]))
| true
| true
|
790adc33e460e957ad88c928f4148caf97fe21f2
| 1,969
|
py
|
Python
|
TextRecognitionDataGenerator/idcard_file_parse.py
|
yuliangzhang/TextRecognitionDataGenerator
|
38c6a2906b17580d0618ebf92e016a5331c965e7
|
[
"MIT"
] | null | null | null |
TextRecognitionDataGenerator/idcard_file_parse.py
|
yuliangzhang/TextRecognitionDataGenerator
|
38c6a2906b17580d0618ebf92e016a5331c965e7
|
[
"MIT"
] | null | null | null |
TextRecognitionDataGenerator/idcard_file_parse.py
|
yuliangzhang/TextRecognitionDataGenerator
|
38c6a2906b17580d0618ebf92e016a5331c965e7
|
[
"MIT"
] | null | null | null |
# import pandas as pd
#
# csv_data = pd.read_csv('E:\\AI_Object_Detect\\Code\\TextRecognitionDataGenerator\\idcard_file.txt', sep=',', header=0, encoding='UTF-8')
# N = 5
# csv_batch_data = csv_data.tail(N)
# print(csv_batch_data.shape)
import csv
import os
idcard_file = 'E:\\AI_Object_Detect\\Code\\TextRecognitionDataGenerator\\idcard_file.txt'
idcard_data = []
with open(idcard_file, 'r', encoding='UTF-8') as csvfile:
csv_reader = csv.reader(csvfile) # 使用csv.reader读取csvfile中的文件
birth_header = next(csv_reader) # 读取第一行每一列的标题
for row in csv_reader: # 将csv 文件中的数据保存到birth_data中
tmp_str = row[10]
if 'issueAuthority' in tmp_str:
front = row[10].split(':')[1] + row[11].split(':')[1]
idcard_data.append(front.replace('"', '').replace("}",''))
elif 'address' in tmp_str:
back = row[10].split(':')[1] + row[11].split(':')[1] + row[12].split(':')[1] + row[13].split(':')[1] + row[14].split(':')[1] + row[15].split(':')[1]
idcard_data.append(back.replace('"', '').replace("}",''))
# print(str + '\r\n')
lang = 'char_std_5991'
with open(os.path.join('dicts', lang + '.txt'), 'r', encoding="utf8", errors='ignore') as d:
lang_dict = d.readlines()
lang_dict = [ch.strip('\n') for ch in lang_dict]
for text in idcard_data:
for character in text:
try:
p = lang_dict.index(character)
except ValueError:
lang_dict.append(character)
print(character)
# file=open('texts/data.txt','w+', encoding='UTF-8')
# for strgin in idcard_data:
# file.write(strgin + '\n')
# file.close()
# for cnt in idcard_data:
# print(cnt)
# print('\n')
# idcard_data = [[float(x) for x in row] for row in idcard_data] # 将数据从string形式转换为float形式
# birth_data = np.array(birth_data) # 将list数组转化成array数组便于查看数据结构
# birth_header = np.array(birth_header)
# print(birth_data.shape) # 利用.shape查看结构。
# print(birth_header.shape)
| 33.372881
| 160
| 0.630269
|
import csv
import os
idcard_file = 'E:\\AI_Object_Detect\\Code\\TextRecognitionDataGenerator\\idcard_file.txt'
idcard_data = []
with open(idcard_file, 'r', encoding='UTF-8') as csvfile:
csv_reader = csv.reader(csvfile)
birth_header = next(csv_reader)
for row in csv_reader:
tmp_str = row[10]
if 'issueAuthority' in tmp_str:
front = row[10].split(':')[1] + row[11].split(':')[1]
idcard_data.append(front.replace('"', '').replace("}",''))
elif 'address' in tmp_str:
back = row[10].split(':')[1] + row[11].split(':')[1] + row[12].split(':')[1] + row[13].split(':')[1] + row[14].split(':')[1] + row[15].split(':')[1]
idcard_data.append(back.replace('"', '').replace("}",''))
lang = 'char_std_5991'
with open(os.path.join('dicts', lang + '.txt'), 'r', encoding="utf8", errors='ignore') as d:
lang_dict = d.readlines()
lang_dict = [ch.strip('\n') for ch in lang_dict]
for text in idcard_data:
for character in text:
try:
p = lang_dict.index(character)
except ValueError:
lang_dict.append(character)
print(character)
| true
| true
|
790adcf0f09bd61554b66a4fd55c7518a3d277e9
| 4,742
|
py
|
Python
|
recipes/cmake/3.x.x/conanfile.py
|
nicraMarcin/conan-center-index
|
c31521c29eaf309f91d2efffb7f27c7795302420
|
[
"MIT"
] | null | null | null |
recipes/cmake/3.x.x/conanfile.py
|
nicraMarcin/conan-center-index
|
c31521c29eaf309f91d2efffb7f27c7795302420
|
[
"MIT"
] | 2
|
2021-07-12T11:40:14.000Z
|
2022-03-04T13:15:54.000Z
|
recipes/cmake/3.x.x/conanfile.py
|
ericLemanissier/conan-center-index
|
24743f30fe24da91c4eabeb42e7457821029a9b9
|
[
"MIT"
] | null | null | null |
import os
from conans import tools, ConanFile, CMake
from conans.errors import ConanInvalidConfiguration, ConanException
class CMakeConan(ConanFile):
name = "cmake"
description = "Conan installer for CMake"
topics = ("conan", "cmake", "build", "installer")
url = "https://github.com/conan-io/conan-center-index"
homepage = "https://github.com/Kitware/CMake"
license = "BSD-3-Clause"
generators = "cmake"
settings = "os", "arch", "compiler", "build_type"
options = {
"with_openssl": [True, False, "auto"],
}
default_options = {
"with_openssl": "auto",
}
_source_subfolder = "source_subfolder"
_cmake = None
def _minor_version(self):
return ".".join(str(self.version).split(".")[:2])
@property
def _with_openssl(self):
if self.options.with_openssl == "auto":
return self.settings.os != "Windows"
return self.options.with_openssl
def configure(self):
if self.settings.os == "Macos" and self.settings.arch == "x86":
raise ConanInvalidConfiguration("CMake does not support x86 for macOS")
minimal_cpp_standard = "11"
if self.settings.compiler.cppstd:
tools.check_min_cppstd(self, minimal_cpp_standard)
minimal_version = {
"gcc": "5",
"clang": "3.3",
"apple-clang": "9",
"Visual Studio": "14",
}
compiler = str(self.settings.compiler)
if compiler not in minimal_version:
self.output.warn(
"{} recipe lacks information about the {} compiler standard version support".format(self.name, compiler))
self.output.warn(
"{} requires a compiler that supports at least C++{}".format(self.name, minimal_cpp_standard))
return
version = tools.Version(self.settings.compiler.version)
if version < minimal_version[compiler]:
raise ConanInvalidConfiguration(
"{} requires a compiler that supports at least C++{}".format(self.name, minimal_cpp_standard))
def requirements(self):
if self._with_openssl:
self.requires("openssl/1.1.1h")
def source(self):
tools.get(**self.conan_data["sources"][self.version])
extracted_dir = self.name + "-" + self.version
os.rename(extracted_dir, self._source_subfolder)
def _configure_cmake(self):
if not self._cmake:
self._cmake = CMake(self)
if not self.settings.compiler.cppstd:
self._cmake.definitions["CMAKE_CXX_STANDARD"] = 11
self._cmake.definitions["CMAKE_BOOTSTRAP"] = False
if self.settings.os == "Linux":
self._cmake.definitions["CMAKE_USE_OPENSSL"] = self._with_openssl
if self._with_openssl:
self._cmake.definitions["OPENSSL_USE_STATIC_LIBS"] = not self.options["openssl"].shared
self._cmake.configure(source_folder=self._source_subfolder)
return self._cmake
def build(self):
tools.replace_in_file(os.path.join(self._source_subfolder, "CMakeLists.txt"),
"project(CMake)",
"project(CMake)\ninclude(\"{}/conanbuildinfo.cmake\")\nconan_basic_setup(NO_OUTPUT_DIRS)".format(
self.install_folder.replace("\\", "/")))
if self.settings.os == "Linux":
tools.replace_in_file(os.path.join(self._source_subfolder, "Utilities", "cmcurl", "CMakeLists.txt"),
"list(APPEND CURL_LIBS ${OPENSSL_LIBRARIES})",
"list(APPEND CURL_LIBS ${OPENSSL_LIBRARIES} ${CMAKE_DL_LIBS} pthread)")
cmake = self._configure_cmake()
cmake.build()
def package(self):
self.copy("Copyright.txt", dst="licenses", src=self._source_subfolder)
cmake = self._configure_cmake()
cmake.install()
tools.rmdir(os.path.join(self.package_folder, "doc"))
def package_id(self):
self.info.options.with_openssl = self._with_openssl
del self.info.settings.compiler
def package_info(self):
minor = self._minor_version()
bindir = os.path.join(self.package_folder, "bin")
self.output.info("Appending PATH environment variable: {}".format(bindir))
self.env_info.PATH.append(bindir)
self.env_info.CMAKE_ROOT = self.package_folder
mod_path = os.path.join(self.package_folder, "share", "cmake-%s" % minor, "Modules")
self.env_info.CMAKE_MODULE_PATH = mod_path
if not os.path.exists(mod_path):
raise ConanException("Module path not found: %s" % mod_path)
| 39.848739
| 127
| 0.615141
|
import os
from conans import tools, ConanFile, CMake
from conans.errors import ConanInvalidConfiguration, ConanException
class CMakeConan(ConanFile):
name = "cmake"
description = "Conan installer for CMake"
topics = ("conan", "cmake", "build", "installer")
url = "https://github.com/conan-io/conan-center-index"
homepage = "https://github.com/Kitware/CMake"
license = "BSD-3-Clause"
generators = "cmake"
settings = "os", "arch", "compiler", "build_type"
options = {
"with_openssl": [True, False, "auto"],
}
default_options = {
"with_openssl": "auto",
}
_source_subfolder = "source_subfolder"
_cmake = None
def _minor_version(self):
return ".".join(str(self.version).split(".")[:2])
@property
def _with_openssl(self):
if self.options.with_openssl == "auto":
return self.settings.os != "Windows"
return self.options.with_openssl
def configure(self):
if self.settings.os == "Macos" and self.settings.arch == "x86":
raise ConanInvalidConfiguration("CMake does not support x86 for macOS")
minimal_cpp_standard = "11"
if self.settings.compiler.cppstd:
tools.check_min_cppstd(self, minimal_cpp_standard)
minimal_version = {
"gcc": "5",
"clang": "3.3",
"apple-clang": "9",
"Visual Studio": "14",
}
compiler = str(self.settings.compiler)
if compiler not in minimal_version:
self.output.warn(
"{} recipe lacks information about the {} compiler standard version support".format(self.name, compiler))
self.output.warn(
"{} requires a compiler that supports at least C++{}".format(self.name, minimal_cpp_standard))
return
version = tools.Version(self.settings.compiler.version)
if version < minimal_version[compiler]:
raise ConanInvalidConfiguration(
"{} requires a compiler that supports at least C++{}".format(self.name, minimal_cpp_standard))
def requirements(self):
if self._with_openssl:
self.requires("openssl/1.1.1h")
def source(self):
tools.get(**self.conan_data["sources"][self.version])
extracted_dir = self.name + "-" + self.version
os.rename(extracted_dir, self._source_subfolder)
def _configure_cmake(self):
if not self._cmake:
self._cmake = CMake(self)
if not self.settings.compiler.cppstd:
self._cmake.definitions["CMAKE_CXX_STANDARD"] = 11
self._cmake.definitions["CMAKE_BOOTSTRAP"] = False
if self.settings.os == "Linux":
self._cmake.definitions["CMAKE_USE_OPENSSL"] = self._with_openssl
if self._with_openssl:
self._cmake.definitions["OPENSSL_USE_STATIC_LIBS"] = not self.options["openssl"].shared
self._cmake.configure(source_folder=self._source_subfolder)
return self._cmake
def build(self):
tools.replace_in_file(os.path.join(self._source_subfolder, "CMakeLists.txt"),
"project(CMake)",
"project(CMake)\ninclude(\"{}/conanbuildinfo.cmake\")\nconan_basic_setup(NO_OUTPUT_DIRS)".format(
self.install_folder.replace("\\", "/")))
if self.settings.os == "Linux":
tools.replace_in_file(os.path.join(self._source_subfolder, "Utilities", "cmcurl", "CMakeLists.txt"),
"list(APPEND CURL_LIBS ${OPENSSL_LIBRARIES})",
"list(APPEND CURL_LIBS ${OPENSSL_LIBRARIES} ${CMAKE_DL_LIBS} pthread)")
cmake = self._configure_cmake()
cmake.build()
def package(self):
self.copy("Copyright.txt", dst="licenses", src=self._source_subfolder)
cmake = self._configure_cmake()
cmake.install()
tools.rmdir(os.path.join(self.package_folder, "doc"))
def package_id(self):
self.info.options.with_openssl = self._with_openssl
del self.info.settings.compiler
def package_info(self):
minor = self._minor_version()
bindir = os.path.join(self.package_folder, "bin")
self.output.info("Appending PATH environment variable: {}".format(bindir))
self.env_info.PATH.append(bindir)
self.env_info.CMAKE_ROOT = self.package_folder
mod_path = os.path.join(self.package_folder, "share", "cmake-%s" % minor, "Modules")
self.env_info.CMAKE_MODULE_PATH = mod_path
if not os.path.exists(mod_path):
raise ConanException("Module path not found: %s" % mod_path)
| true
| true
|
790add3f01747408ad5054c9f590ac9059ccc3b0
| 961
|
py
|
Python
|
pygame/lib/macosx.py
|
CiubucAlexandra/Theremine-Projet-Micriprocesseurs
|
7670d9cb468b060135dc5f057b734db970da0f0c
|
[
"BSD-3-Clause"
] | 4
|
2018-09-07T15:35:24.000Z
|
2019-03-27T09:48:12.000Z
|
pygame/lib/macosx.py
|
CiubucAlexandra/Theremine-Projet-Micriprocesseurs
|
7670d9cb468b060135dc5f057b734db970da0f0c
|
[
"BSD-3-Clause"
] | 371
|
2020-03-04T21:51:56.000Z
|
2022-03-31T20:59:11.000Z
|
pygame/lib/macosx.py
|
CiubucAlexandra/Theremine-Projet-Micriprocesseurs
|
7670d9cb468b060135dc5f057b734db970da0f0c
|
[
"BSD-3-Clause"
] | 3
|
2019-06-18T19:57:17.000Z
|
2020-11-06T03:55:08.000Z
|
import os, sys
try:
import MacOS
except:
MacOS = None
from pygame.pkgdata import getResource
from pygame import sdlmain_osx
__all__ = ['Video_AutoInit']
def Video_AutoInit():
"""This is a function that's called from the c extension code
just before the display module is initialized"""
if MacOS and not MacOS.WMAvailable():
if not sdlmain_osx.WMEnable():
raise ImportError("Can not access the window manager. Use py2app or execute with the pythonw script.")
if not sdlmain_osx.RunningFromBundleWithNSApplication():
try:
default_icon_data = getResource('pygame_icon.tiff').read()
except IOError:
default_icon_data = None
except NotImplementedError:
default_icon_data = None
sdlmain_osx.InstallNSApplication(default_icon_data)
if (os.getcwd() == '/') and len(sys.argv) > 1:
os.chdir(os.path.dirname(sys.argv[0]))
return True
| 30.03125
| 115
| 0.676379
|
import os, sys
try:
import MacOS
except:
MacOS = None
from pygame.pkgdata import getResource
from pygame import sdlmain_osx
__all__ = ['Video_AutoInit']
def Video_AutoInit():
if MacOS and not MacOS.WMAvailable():
if not sdlmain_osx.WMEnable():
raise ImportError("Can not access the window manager. Use py2app or execute with the pythonw script.")
if not sdlmain_osx.RunningFromBundleWithNSApplication():
try:
default_icon_data = getResource('pygame_icon.tiff').read()
except IOError:
default_icon_data = None
except NotImplementedError:
default_icon_data = None
sdlmain_osx.InstallNSApplication(default_icon_data)
if (os.getcwd() == '/') and len(sys.argv) > 1:
os.chdir(os.path.dirname(sys.argv[0]))
return True
| true
| true
|
790add88bbf516becc94d87ef611e5073c481ff7
| 1,331
|
py
|
Python
|
colour/examples/appearance/examples_llab.py
|
soma2000-lang/colour
|
bb7ee23ac65e09613af78bd18dd98dffb1a2904a
|
[
"BSD-3-Clause"
] | 1
|
2022-02-12T06:28:15.000Z
|
2022-02-12T06:28:15.000Z
|
colour/examples/appearance/examples_llab.py
|
soma2000-lang/colour
|
bb7ee23ac65e09613af78bd18dd98dffb1a2904a
|
[
"BSD-3-Clause"
] | null | null | null |
colour/examples/appearance/examples_llab.py
|
soma2000-lang/colour
|
bb7ee23ac65e09613af78bd18dd98dffb1a2904a
|
[
"BSD-3-Clause"
] | null | null | null |
"""
Showcases *LLAB(l:c)* colour appearance model computations.
"""
import numpy as np
import colour
from colour.appearance.llab import CAM_ReferenceSpecification_LLAB
from colour.utilities import message_box
message_box('"LLAB(l:c)" Colour Appearance Model Computations')
XYZ = np.array([19.01, 20.00, 21.78])
XYZ_0 = np.array([95.05, 100.00, 108.88])
Y_b = 20.0
L = 318.31
surround = colour.VIEWING_CONDITIONS_LLAB["ref_average_4_minus"]
message_box(
f'Converting to the "LLAB(l:c)" colour appearance model specification '
f"using given parameters:\n\n"
f"\tXYZ: {XYZ}\n"
f"\tXYZ_0: {XYZ_0}\n"
f"\tY_b: {Y_b}\n"
f"\tL: {L}\n"
f"\tsurround: {surround}"
)
specification = colour.XYZ_to_LLAB(XYZ, XYZ_0, Y_b, L, surround)
print(specification)
print("\n")
message_box(
'Broadcasting the current output "LLAB(l:c)" colour appearance '
"model specification to the reference specification.\n"
"The intent of this reference specification is to provide names "
'as closest as possible to the "Mark D. Fairchild" reference.\n'
"The current output specification is meant to be consistent with "
"the other colour appearance model specification by using same "
"argument names for consistency wherever possible."
)
print(CAM_ReferenceSpecification_LLAB(*specification.values))
| 30.953488
| 76
| 0.728024
|
import numpy as np
import colour
from colour.appearance.llab import CAM_ReferenceSpecification_LLAB
from colour.utilities import message_box
message_box('"LLAB(l:c)" Colour Appearance Model Computations')
XYZ = np.array([19.01, 20.00, 21.78])
XYZ_0 = np.array([95.05, 100.00, 108.88])
Y_b = 20.0
L = 318.31
surround = colour.VIEWING_CONDITIONS_LLAB["ref_average_4_minus"]
message_box(
f'Converting to the "LLAB(l:c)" colour appearance model specification '
f"using given parameters:\n\n"
f"\tXYZ: {XYZ}\n"
f"\tXYZ_0: {XYZ_0}\n"
f"\tY_b: {Y_b}\n"
f"\tL: {L}\n"
f"\tsurround: {surround}"
)
specification = colour.XYZ_to_LLAB(XYZ, XYZ_0, Y_b, L, surround)
print(specification)
print("\n")
message_box(
'Broadcasting the current output "LLAB(l:c)" colour appearance '
"model specification to the reference specification.\n"
"The intent of this reference specification is to provide names "
'as closest as possible to the "Mark D. Fairchild" reference.\n'
"The current output specification is meant to be consistent with "
"the other colour appearance model specification by using same "
"argument names for consistency wherever possible."
)
print(CAM_ReferenceSpecification_LLAB(*specification.values))
| true
| true
|
790adf0135986079574bda3f854a77d0b09c75f3
| 286
|
py
|
Python
|
racerCalculator.py
|
Sam-Gram/PiWood-Derby
|
e2d18f595100e73923a55f8d7cb396ecb87fc727
|
[
"MIT"
] | 1
|
2017-12-06T19:35:20.000Z
|
2017-12-06T19:35:20.000Z
|
racerCalculator.py
|
Sam-Gram/PiWood-Derby
|
e2d18f595100e73923a55f8d7cb396ecb87fc727
|
[
"MIT"
] | null | null | null |
racerCalculator.py
|
Sam-Gram/PiWood-Derby
|
e2d18f595100e73923a55f8d7cb396ecb87fc727
|
[
"MIT"
] | null | null | null |
# This utility function comes up with which racers to use in the next race
def racerCalculator(raceNum, numCars):
print("RaceNum",raceNum)
print("numCars",numCars)
if raceNum is None:
raceNum = 1
f = lambda x : (raceNum*3+x) % numCars
return f(0),f(1),f(2)
| 28.6
| 74
| 0.657343
|
def racerCalculator(raceNum, numCars):
print("RaceNum",raceNum)
print("numCars",numCars)
if raceNum is None:
raceNum = 1
f = lambda x : (raceNum*3+x) % numCars
return f(0),f(1),f(2)
| true
| true
|
790ae00d40eb0d792deaa0b0143442f1beb1f66b
| 12,429
|
py
|
Python
|
dcs_rest_client.py
|
5GEVE/5geve-wp4-dcs-signalling-topic-handler
|
f02a1b34abdeeb3927438eb57580e7bc27283b3b
|
[
"Apache-2.0"
] | null | null | null |
dcs_rest_client.py
|
5GEVE/5geve-wp4-dcs-signalling-topic-handler
|
f02a1b34abdeeb3927438eb57580e7bc27283b3b
|
[
"Apache-2.0"
] | null | null | null |
dcs_rest_client.py
|
5GEVE/5geve-wp4-dcs-signalling-topic-handler
|
f02a1b34abdeeb3927438eb57580e7bc27283b3b
|
[
"Apache-2.0"
] | null | null | null |
import requests
import argparse
import logging
import coloredlogs
import threading
from flask import Flask, request, jsonify
from flask_swagger import swagger
from waitress import serve
import subprocess
import json
from kafka import KafkaConsumer
from threading import Thread
from threading import Timer
from datetime import timedelta
import psycopg2
import time
app = Flask(__name__)
logger = logging.getLogger("DCSRestClient")
signalling_metric_infrastructure = {'expId': 'internal', 'topic': 'signalling.metric.infrastructure'}
signalling_metric_application = {'expId': 'internal', 'topic': 'signalling.metric.application'}
signalling_kpi = {'expId': 'internal', 'topic': 'signalling.kpi'}
dcm_port = "8090"
dcm_subscribe_url = "/dcm/subscribe"
dcm_unsubscribe_url = "/dcm/unsubscribe"
dcs_dashboard_url = "http://127.0.0.1:8080/portal/dcs/dashboard"
signalling_start = False
@app.route('/', methods=['GET'])
def server_status():
"""
Get status.
---
describe: get status
responses:
200:
description: OK
"""
logger.info("GET /")
return '', 200
@app.route("/spec", methods=['GET'])
def spec():
"""
Get swagger specification.
---
describe: get swagger specification
responses:
swagger:
description: swagger specification
"""
swag = swagger(app)
swag['info']['version'] = "1.0"
swag['info']['title'] = "DCS REST API"
return jsonify(swag)
def kafka_consumer_refresh_dashboard_handler(topic, value):
logger.info("Creating Kafka Consumer for %s topic", topic)
consumer = KafkaConsumer(
topic,
bootstrap_servers=[dcm_ip_address + ":9092"],
auto_offset_reset='earliest',
enable_auto_commit=True,
group_id=None,
value_deserializer=lambda x: json.loads(x.decode('utf-8')))
message_received = False
while not message_received:
message = consumer.poll(timeout_ms=1000)
if message != {}:
logger.info("Message received in %s topic: %s", topic, message)
message_received = True
time.sleep(5)
logger.info("Creating dashboard for topic: %s", topic)
r = requests.post(dcs_dashboard_url, json={'records': [ { 'value': json.loads(value) }]})
logger.info("Response: Code %s", r)
# This call seems that is not needed as the dashboard is generated when data is present.
#time.sleep(2)
#logger.info("Refreshing dashboard for %s topic", topic)
#subprocess.call(['/bin/bash', '/usr/bin/dcs/refresh_dashboard.sh', topic])
logger.info("Closing Kafka Consumer for %s topic", topic)
consumer.close()
def index_cleaner(topic, value):
logger.info("Time to delete the dashboard for topic %s", topic)
r = requests.delete(dcs_dashboard_url, json={'records': [ { 'value': json.loads(value) }]})
logger.info("Response: Code %s", r)
logger.info("Time to delete the Elasticsearch index for topic %s", topic)
subprocess.call(['/bin/bash', '/usr/bin/dcs/delete_logstash_pipeline.sh', topic, 'yes'])
def kafka_consumer_signalling_topic_handler(signalling_topic_data):
logger.info("Creating Kafka Consumer for %s topic", signalling_topic_data["topic"])
consumer = KafkaConsumer(
signalling_topic_data["topic"],
bootstrap_servers=[dcm_ip_address + ":9092"],
auto_offset_reset='earliest',
enable_auto_commit=True,
group_id=None,
value_deserializer=lambda x: json.loads(x.decode('utf-8')))
while signalling_start:
message = consumer.poll(timeout_ms=1000)
if message != {}:
logger.info("Message received in %s topic: %s", signalling_topic_data["topic"], message)
for tp, messages in message.items():
for msg in messages:
logger.info("Value: %s", msg.value)
topic = json.loads(msg.value)["topic"]
if json.loads(msg.value)["action"] == "subscribe":
logger.info("Create Logstash pipeline for topic %s", topic)
subprocess.call(['/bin/bash', '/usr/bin/dcs/create_logstash_pipeline.sh', topic])
# Dashboard creation is commented because it will be created when data is published in the topic.
#r = requests.post(dcs_dashboard_url, json={'records': [ { 'value': json.loads(msg.value) }]})
#logger.info("Response: Code %s", r)
# Create Kafka consumer to wait for the first message received in the topic and, then, refresh the dashboard.
thread = threading.Thread(target = kafka_consumer_refresh_dashboard_handler, args = [topic, msg.value])
thread.start()
# Finally, save topic in DB
try:
connection = psycopg2.connect(user = "eve", password = eve_db_password, host = "localhost", port = "5432", dbname="pipelines")
logger.info("Inserting %s topic in database", topic)
cursor = connection.cursor()
cursor.execute("INSERT INTO pipeline VALUES ( %s )", (topic,))
connection.commit()
logger.info("Topic %s inserted in database", topic)
cursor.close()
connection.close()
except (Exception, psycopg2.Error) as error:
logger.error("Error while connecting to PostgreSQL: ", error)
elif json.loads(msg.value)["action"] == "unsubscribe":
logger.info("Delete Logstash pipeline for topic %s", topic)
subprocess.call(['/bin/bash', '/usr/bin/dcs/delete_logstash_pipeline.sh', topic, 'no'])
# Schedule the removal of Kibana dashboard and Elasticsearch index (retention time of 14 days)
scheduled_thread = threading.Timer(timedelta(days=14).total_seconds(), index_cleaner, args = [topic, msg.value])
# This call is for testing purposes, to be commented when unused:
#scheduled_thread = threading.Timer(timedelta(seconds=30).total_seconds(), index_cleaner, args = [topic, msg.value])
scheduled_thread.start()
logger.info("Data removal for topic %s scheduled in 14 days", topic)
# Finally, delete topic in DB
try:
connection = psycopg2.connect(user = "eve", password = eve_db_password, host = "localhost", port = "5432", dbname="pipelines")
logger.info("Deleting %s topic in database", topic)
cursor = connection.cursor()
cursor.execute("DELETE FROM pipeline WHERE topic = %s", (topic,))
connection.commit()
logger.info("Topic %s deleted in database", topic)
cursor.close()
connection.close()
except (Exception, psycopg2.Error) as error:
logger.error("Error while connecting to PostgreSQL: ", error)
else:
logger.error("Action not allowed")
logger.info("Closing Kafka Consumer for %s topic", signalling_topic_data["topic"])
consumer.close()
def start_consuming_signalling_topic(signalling_topic_data):
signalling_topic_data = json.loads(signalling_topic_data)
logger.info("Starting %s topic", signalling_topic_data["topic"])
logger.info("Sending POST request to %s", url_subscribe)
# Send the request to the DCM.
r = requests.post(url_subscribe, json=signalling_topic_data)
logger.info("Response: Code %s", r)
# Create Kafka consumer.
global signalling_start
signalling_start = True
thread = threading.Thread(target = kafka_consumer_signalling_topic_handler, args = [signalling_topic_data])
thread.start()
@app.route('/portal/dcs/start_signalling/', methods=['POST'])
def start_dcs():
"""
Start signalling topics.
---
describe: start signalling topics
responses:
201:
description: accepted request
400:
description: error processing the request
"""
logger.info("Request received - POST /portal/dcs/start_signalling/")
try:
start_consuming_signalling_topic(json.dumps(signalling_metric_infrastructure))
start_consuming_signalling_topic(json.dumps(signalling_metric_application))
start_consuming_signalling_topic(json.dumps(signalling_kpi))
except Exception as e:
logger.error("Error while parsing request")
logger.exception(e)
return str(e), 400
return '', 201
def stop_consuming_signalling_topic(signalling_topic_data):
signalling_topic_data = json.loads(signalling_topic_data)
logger.info("Stopping %s topic", signalling_topic_data["topic"])
logger.info("Sending DELETE request to %s", url_unsubscribe)
# Send the request to the DCM.
r = requests.delete(url_unsubscribe, json=signalling_topic_data)
logger.info("Response: Code %s", r)
# Delete Kafka consumer.
global signalling_start
# Put signalling_start to False, and then threads will finish their execution.
signalling_start = False
@app.route('/portal/dcs/stop_signalling/', methods=['DELETE'])
def stop_dcs():
"""
Stop signalling topics.
---
describe: stop signalling topics
responses:
201:
description: accepted request
400:
description: error processing the request
"""
logger.info("Request received - DELETE /portal/dcs/stop_signalling/")
try:
stop_consuming_signalling_topic(json.dumps(signalling_metric_infrastructure))
stop_consuming_signalling_topic(json.dumps(signalling_metric_application))
stop_consuming_signalling_topic(json.dumps(signalling_kpi))
except Exception as e:
logger.error("Error while parsing request")
logger.exception(e)
return str(e), 400
return '', 201
def checkValidPort(value):
ivalue = int(value)
# RFC 793
if ivalue < 0 or ivalue > 65535:
raise argparse.ArgumentTypeError("%s is not a valid port" % value)
return value
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--dcm_ip_address",
help='DCM IP address, default IP is localhost',
default='localhost')
parser.add_argument(
"--eve_db_password",
help='DB password for eve user')
parser.add_argument(
"--port",
type=checkValidPort,
help='The port you want to use as an endpoint, default port is 8091',
default="8091")
parser.add_argument(
"--log",
help='Sets the Log Level output, default level is "info"',
choices=[
"info",
"debug",
"error",
"warning"],
nargs='?',
default='info')
args = parser.parse_args()
numeric_level = getattr(logging, str(args.log).upper(), None)
if not isinstance(numeric_level, int):
raise ValueError('Invalid log level: %s' % loglevel)
coloredlogs.install(
fmt='%(asctime)s %(levelname)s %(message)s',
datefmt='%d/%m/%Y %H:%M:%S',
level=numeric_level)
logging.basicConfig(filename='/var/log/dcs_rest_client.log')
logging.getLogger("DCSRestClient").setLevel(numeric_level)
logging.getLogger("requests.packages.urllib3").setLevel(logging.ERROR)
args = parser.parse_args()
logger.info("Serving DCSRestClient on port %s", str(args.port))
global dcm_ip_address
dcm_ip_address= str(args.dcm_ip_address)
global url_subscribe
url_subscribe = "http://" + dcm_ip_address + ":" + dcm_port + dcm_subscribe_url
global url_unsubscribe
url_unsubscribe = "http://" + dcm_ip_address + ":" + dcm_port + dcm_unsubscribe_url
global eve_db_password
eve_db_password= str(args.eve_db_password)
#TODO: advanced feature - connect to the database and make sure that Logstash pipelines are created for the topics saved in the DB.
serve(app, host='0.0.0.0', port=args.port)
| 42.132203
| 154
| 0.629898
|
import requests
import argparse
import logging
import coloredlogs
import threading
from flask import Flask, request, jsonify
from flask_swagger import swagger
from waitress import serve
import subprocess
import json
from kafka import KafkaConsumer
from threading import Thread
from threading import Timer
from datetime import timedelta
import psycopg2
import time
app = Flask(__name__)
logger = logging.getLogger("DCSRestClient")
signalling_metric_infrastructure = {'expId': 'internal', 'topic': 'signalling.metric.infrastructure'}
signalling_metric_application = {'expId': 'internal', 'topic': 'signalling.metric.application'}
signalling_kpi = {'expId': 'internal', 'topic': 'signalling.kpi'}
dcm_port = "8090"
dcm_subscribe_url = "/dcm/subscribe"
dcm_unsubscribe_url = "/dcm/unsubscribe"
dcs_dashboard_url = "http://127.0.0.1:8080/portal/dcs/dashboard"
signalling_start = False
@app.route('/', methods=['GET'])
def server_status():
logger.info("GET /")
return '', 200
@app.route("/spec", methods=['GET'])
def spec():
swag = swagger(app)
swag['info']['version'] = "1.0"
swag['info']['title'] = "DCS REST API"
return jsonify(swag)
def kafka_consumer_refresh_dashboard_handler(topic, value):
logger.info("Creating Kafka Consumer for %s topic", topic)
consumer = KafkaConsumer(
topic,
bootstrap_servers=[dcm_ip_address + ":9092"],
auto_offset_reset='earliest',
enable_auto_commit=True,
group_id=None,
value_deserializer=lambda x: json.loads(x.decode('utf-8')))
message_received = False
while not message_received:
message = consumer.poll(timeout_ms=1000)
if message != {}:
logger.info("Message received in %s topic: %s", topic, message)
message_received = True
time.sleep(5)
logger.info("Creating dashboard for topic: %s", topic)
r = requests.post(dcs_dashboard_url, json={'records': [ { 'value': json.loads(value) }]})
logger.info("Response: Code %s", r)
logger.info("Closing Kafka Consumer for %s topic", topic)
consumer.close()
def index_cleaner(topic, value):
logger.info("Time to delete the dashboard for topic %s", topic)
r = requests.delete(dcs_dashboard_url, json={'records': [ { 'value': json.loads(value) }]})
logger.info("Response: Code %s", r)
logger.info("Time to delete the Elasticsearch index for topic %s", topic)
subprocess.call(['/bin/bash', '/usr/bin/dcs/delete_logstash_pipeline.sh', topic, 'yes'])
def kafka_consumer_signalling_topic_handler(signalling_topic_data):
logger.info("Creating Kafka Consumer for %s topic", signalling_topic_data["topic"])
consumer = KafkaConsumer(
signalling_topic_data["topic"],
bootstrap_servers=[dcm_ip_address + ":9092"],
auto_offset_reset='earliest',
enable_auto_commit=True,
group_id=None,
value_deserializer=lambda x: json.loads(x.decode('utf-8')))
while signalling_start:
message = consumer.poll(timeout_ms=1000)
if message != {}:
logger.info("Message received in %s topic: %s", signalling_topic_data["topic"], message)
for tp, messages in message.items():
for msg in messages:
logger.info("Value: %s", msg.value)
topic = json.loads(msg.value)["topic"]
if json.loads(msg.value)["action"] == "subscribe":
logger.info("Create Logstash pipeline for topic %s", topic)
subprocess.call(['/bin/bash', '/usr/bin/dcs/create_logstash_pipeline.sh', topic])
thread = threading.Thread(target = kafka_consumer_refresh_dashboard_handler, args = [topic, msg.value])
thread.start()
try:
connection = psycopg2.connect(user = "eve", password = eve_db_password, host = "localhost", port = "5432", dbname="pipelines")
logger.info("Inserting %s topic in database", topic)
cursor = connection.cursor()
cursor.execute("INSERT INTO pipeline VALUES ( %s )", (topic,))
connection.commit()
logger.info("Topic %s inserted in database", topic)
cursor.close()
connection.close()
except (Exception, psycopg2.Error) as error:
logger.error("Error while connecting to PostgreSQL: ", error)
elif json.loads(msg.value)["action"] == "unsubscribe":
logger.info("Delete Logstash pipeline for topic %s", topic)
subprocess.call(['/bin/bash', '/usr/bin/dcs/delete_logstash_pipeline.sh', topic, 'no'])
scheduled_thread = threading.Timer(timedelta(days=14).total_seconds(), index_cleaner, args = [topic, msg.value])
scheduled_thread.start()
logger.info("Data removal for topic %s scheduled in 14 days", topic)
try:
connection = psycopg2.connect(user = "eve", password = eve_db_password, host = "localhost", port = "5432", dbname="pipelines")
logger.info("Deleting %s topic in database", topic)
cursor = connection.cursor()
cursor.execute("DELETE FROM pipeline WHERE topic = %s", (topic,))
connection.commit()
logger.info("Topic %s deleted in database", topic)
cursor.close()
connection.close()
except (Exception, psycopg2.Error) as error:
logger.error("Error while connecting to PostgreSQL: ", error)
else:
logger.error("Action not allowed")
logger.info("Closing Kafka Consumer for %s topic", signalling_topic_data["topic"])
consumer.close()
def start_consuming_signalling_topic(signalling_topic_data):
signalling_topic_data = json.loads(signalling_topic_data)
logger.info("Starting %s topic", signalling_topic_data["topic"])
logger.info("Sending POST request to %s", url_subscribe)
r = requests.post(url_subscribe, json=signalling_topic_data)
logger.info("Response: Code %s", r)
global signalling_start
signalling_start = True
thread = threading.Thread(target = kafka_consumer_signalling_topic_handler, args = [signalling_topic_data])
thread.start()
@app.route('/portal/dcs/start_signalling/', methods=['POST'])
def start_dcs():
logger.info("Request received - POST /portal/dcs/start_signalling/")
try:
start_consuming_signalling_topic(json.dumps(signalling_metric_infrastructure))
start_consuming_signalling_topic(json.dumps(signalling_metric_application))
start_consuming_signalling_topic(json.dumps(signalling_kpi))
except Exception as e:
logger.error("Error while parsing request")
logger.exception(e)
return str(e), 400
return '', 201
def stop_consuming_signalling_topic(signalling_topic_data):
signalling_topic_data = json.loads(signalling_topic_data)
logger.info("Stopping %s topic", signalling_topic_data["topic"])
logger.info("Sending DELETE request to %s", url_unsubscribe)
r = requests.delete(url_unsubscribe, json=signalling_topic_data)
logger.info("Response: Code %s", r)
global signalling_start
signalling_start = False
@app.route('/portal/dcs/stop_signalling/', methods=['DELETE'])
def stop_dcs():
logger.info("Request received - DELETE /portal/dcs/stop_signalling/")
try:
stop_consuming_signalling_topic(json.dumps(signalling_metric_infrastructure))
stop_consuming_signalling_topic(json.dumps(signalling_metric_application))
stop_consuming_signalling_topic(json.dumps(signalling_kpi))
except Exception as e:
logger.error("Error while parsing request")
logger.exception(e)
return str(e), 400
return '', 201
def checkValidPort(value):
ivalue = int(value)
if ivalue < 0 or ivalue > 65535:
raise argparse.ArgumentTypeError("%s is not a valid port" % value)
return value
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--dcm_ip_address",
help='DCM IP address, default IP is localhost',
default='localhost')
parser.add_argument(
"--eve_db_password",
help='DB password for eve user')
parser.add_argument(
"--port",
type=checkValidPort,
help='The port you want to use as an endpoint, default port is 8091',
default="8091")
parser.add_argument(
"--log",
help='Sets the Log Level output, default level is "info"',
choices=[
"info",
"debug",
"error",
"warning"],
nargs='?',
default='info')
args = parser.parse_args()
numeric_level = getattr(logging, str(args.log).upper(), None)
if not isinstance(numeric_level, int):
raise ValueError('Invalid log level: %s' % loglevel)
coloredlogs.install(
fmt='%(asctime)s %(levelname)s %(message)s',
datefmt='%d/%m/%Y %H:%M:%S',
level=numeric_level)
logging.basicConfig(filename='/var/log/dcs_rest_client.log')
logging.getLogger("DCSRestClient").setLevel(numeric_level)
logging.getLogger("requests.packages.urllib3").setLevel(logging.ERROR)
args = parser.parse_args()
logger.info("Serving DCSRestClient on port %s", str(args.port))
global dcm_ip_address
dcm_ip_address= str(args.dcm_ip_address)
global url_subscribe
url_subscribe = "http://" + dcm_ip_address + ":" + dcm_port + dcm_subscribe_url
global url_unsubscribe
url_unsubscribe = "http://" + dcm_ip_address + ":" + dcm_port + dcm_unsubscribe_url
global eve_db_password
eve_db_password= str(args.eve_db_password)
serve(app, host='0.0.0.0', port=args.port)
| true
| true
|
790ae0aeb23331f047ea7f73a20796f80cd8cb55
| 840
|
py
|
Python
|
logdna/utils.py
|
nimbinatus/python
|
1941935597e2250579d2876b2234c648fd236f1b
|
[
"MIT"
] | null | null | null |
logdna/utils.py
|
nimbinatus/python
|
1941935597e2250579d2876b2234c648fd236f1b
|
[
"MIT"
] | null | null | null |
logdna/utils.py
|
nimbinatus/python
|
1941935597e2250579d2876b2234c648fd236f1b
|
[
"MIT"
] | null | null | null |
import json
import socket
def is_jsonable(obj):
try:
json.dumps(obj)
return True
except (TypeError, OverflowError, ValueError):
return False
def sanitize_meta(meta):
keys_to_sanitize = []
for key, value in meta.items():
if not is_jsonable(value):
keys_to_sanitize.append(key)
if keys_to_sanitize:
for key in keys_to_sanitize:
del meta[key]
meta['__errors'] = 'These keys have been sanitized: ' + ', '.join(
keys_to_sanitize)
return meta
def get_ip():
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
# doesn't even have to be reachable
s.connect(('10.255.255.255', 1))
ip = s.getsockname()[0]
except Exception:
ip = '127.0.0.1'
finally:
s.close()
return ip
| 22.702703
| 74
| 0.591667
|
import json
import socket
def is_jsonable(obj):
try:
json.dumps(obj)
return True
except (TypeError, OverflowError, ValueError):
return False
def sanitize_meta(meta):
keys_to_sanitize = []
for key, value in meta.items():
if not is_jsonable(value):
keys_to_sanitize.append(key)
if keys_to_sanitize:
for key in keys_to_sanitize:
del meta[key]
meta['__errors'] = 'These keys have been sanitized: ' + ', '.join(
keys_to_sanitize)
return meta
def get_ip():
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
s.connect(('10.255.255.255', 1))
ip = s.getsockname()[0]
except Exception:
ip = '127.0.0.1'
finally:
s.close()
return ip
| true
| true
|
790ae12bafd7a1452c9a94286da7c5c51a49a888
| 4,648
|
py
|
Python
|
appgate/openapi/attribmaker.py
|
appgate/sdp-operator
|
289927e07eca84003aa4bd4631b57dc9955eee23
|
[
"MIT"
] | 6
|
2020-09-22T13:21:05.000Z
|
2022-01-06T01:49:22.000Z
|
appgate/openapi/attribmaker.py
|
appgate/sdp-operator
|
289927e07eca84003aa4bd4631b57dc9955eee23
|
[
"MIT"
] | 36
|
2020-09-23T06:38:51.000Z
|
2022-02-09T13:53:32.000Z
|
appgate/openapi/attribmaker.py
|
appgate/sdp-operator
|
289927e07eca84003aa4bd4631b57dc9955eee23
|
[
"MIT"
] | 3
|
2021-07-27T18:16:52.000Z
|
2022-03-01T22:18:15.000Z
|
from uuid import uuid4
from typing import Optional, Dict, Any, List
from appgate.openapi.types import OpenApiDict, AttribType, AttributesDict, \
IGNORED_EQ_ATTRIBUTES, OpenApiParserException, InstanceMakerConfig, UUID_REFERENCE_FIELD, K8S_LOADERS_FIELD_NAME
write_only_formats = {'PEM', 'password'}
class SimpleAttribMaker:
def __init__(self, name: str, tpe: type, base_tpe: type, default: Optional[AttribType],
factory: Optional[type], definition: OpenApiDict, repr: bool = True) -> None:
self.base_tpe = base_tpe
self.name = name
self.tpe = tpe
self.default = default
self.factory = factory
self.repr = repr
self.definition = definition
@property
def metadata(self) -> Dict[str, Any]:
return self.definition.get('metadata', {})
@property
def is_password(self) -> bool:
return False
@property
def has_default(self) -> bool:
"""
Checks if attrs as a default field value
"""
return self.factory is not None or self.default is not None
def values(self, attributes: Dict[str, 'SimpleAttribMaker'], required_fields: List[str],
instance_maker_config: InstanceMakerConfig) -> AttributesDict:
required = self.name in required_fields
definition = self.definition
read_only = definition.get('readOnly', False)
format = definition.get('format')
if type(format) is not dict and format in write_only_formats:
write_only = True
else:
write_only = definition.get('writeOnly', False)
if instance_maker_config.level == 0 and self.name == 'id':
# We dont want to save id on k8s
read_only = True
attribs: AttributesDict = {}
attribs['metadata'] = {
'name': self.name,
'readOnly': read_only,
'writeOnly': write_only,
'format': format,
'base_type': self.base_tpe,
}
if 'description' in definition:
attribs['metadata']['description'] = definition['description']
if 'example' in definition:
if isinstance(definition['example'], List):
attribs['metadata']['example'] = frozenset(definition['example'])
else:
attribs['metadata']['example'] = definition['example']
if UUID_REFERENCE_FIELD in definition:
attribs['metadata'][UUID_REFERENCE_FIELD] = definition[UUID_REFERENCE_FIELD]
if self.name in IGNORED_EQ_ATTRIBUTES or write_only or read_only:
attribs['eq'] = False
# Set type
if not required or read_only or write_only:
attribs['type'] = Optional[self.tpe]
attribs['metadata']['type'] = str(Optional[self.tpe])
elif required and (read_only or write_only):
raise OpenApiParserException(f'readOnly/writeOnly attribute {self.name} '
'can not be required')
else:
attribs['type'] = self.tpe
attribs['metadata']['type'] = str(self.tpe)
if instance_maker_config.level == 0 and self.name == 'id':
attribs['factory'] = lambda: str(uuid4())
elif self.factory and not (read_only or write_only):
attribs['factory'] = self.factory
elif not required or read_only or write_only:
attribs['default'] = definition.get('default',
None if (read_only or write_only) else self.default)
attribs['repr'] = self.repr
return attribs
class DeprecatedAttribMaker(SimpleAttribMaker):
pass
class DefaultAttribMaker(SimpleAttribMaker):
def values(self, attributes: Dict[str, 'SimpleAttribMaker'], required_fields: List[str],
instance_maker_config: InstanceMakerConfig) -> AttributesDict:
vs = {
'type': Optional[self.tpe],
'eq': False,
'metadata': {
'base_type': self.tpe,
'name': self.name,
},
'repr': self.repr,
}
if self.default:
vs['default'] = self.default
vs['type'] = self.tpe
elif self.factory:
vs['factory'] = self.factory
vs['type'] = self.tpe
return vs
def create_default_attrib(name: str, attrib_value: Any) -> DefaultAttribMaker:
return DefaultAttribMaker(
tpe=type(attrib_value),
base_tpe=type(attrib_value),
name=name,
default=attrib_value,
factory=None,
definition={})
| 36.598425
| 116
| 0.596386
|
from uuid import uuid4
from typing import Optional, Dict, Any, List
from appgate.openapi.types import OpenApiDict, AttribType, AttributesDict, \
IGNORED_EQ_ATTRIBUTES, OpenApiParserException, InstanceMakerConfig, UUID_REFERENCE_FIELD, K8S_LOADERS_FIELD_NAME
write_only_formats = {'PEM', 'password'}
class SimpleAttribMaker:
def __init__(self, name: str, tpe: type, base_tpe: type, default: Optional[AttribType],
factory: Optional[type], definition: OpenApiDict, repr: bool = True) -> None:
self.base_tpe = base_tpe
self.name = name
self.tpe = tpe
self.default = default
self.factory = factory
self.repr = repr
self.definition = definition
@property
def metadata(self) -> Dict[str, Any]:
return self.definition.get('metadata', {})
@property
def is_password(self) -> bool:
return False
@property
def has_default(self) -> bool:
return self.factory is not None or self.default is not None
def values(self, attributes: Dict[str, 'SimpleAttribMaker'], required_fields: List[str],
instance_maker_config: InstanceMakerConfig) -> AttributesDict:
required = self.name in required_fields
definition = self.definition
read_only = definition.get('readOnly', False)
format = definition.get('format')
if type(format) is not dict and format in write_only_formats:
write_only = True
else:
write_only = definition.get('writeOnly', False)
if instance_maker_config.level == 0 and self.name == 'id':
read_only = True
attribs: AttributesDict = {}
attribs['metadata'] = {
'name': self.name,
'readOnly': read_only,
'writeOnly': write_only,
'format': format,
'base_type': self.base_tpe,
}
if 'description' in definition:
attribs['metadata']['description'] = definition['description']
if 'example' in definition:
if isinstance(definition['example'], List):
attribs['metadata']['example'] = frozenset(definition['example'])
else:
attribs['metadata']['example'] = definition['example']
if UUID_REFERENCE_FIELD in definition:
attribs['metadata'][UUID_REFERENCE_FIELD] = definition[UUID_REFERENCE_FIELD]
if self.name in IGNORED_EQ_ATTRIBUTES or write_only or read_only:
attribs['eq'] = False
if not required or read_only or write_only:
attribs['type'] = Optional[self.tpe]
attribs['metadata']['type'] = str(Optional[self.tpe])
elif required and (read_only or write_only):
raise OpenApiParserException(f'readOnly/writeOnly attribute {self.name} '
'can not be required')
else:
attribs['type'] = self.tpe
attribs['metadata']['type'] = str(self.tpe)
if instance_maker_config.level == 0 and self.name == 'id':
attribs['factory'] = lambda: str(uuid4())
elif self.factory and not (read_only or write_only):
attribs['factory'] = self.factory
elif not required or read_only or write_only:
attribs['default'] = definition.get('default',
None if (read_only or write_only) else self.default)
attribs['repr'] = self.repr
return attribs
class DeprecatedAttribMaker(SimpleAttribMaker):
pass
class DefaultAttribMaker(SimpleAttribMaker):
def values(self, attributes: Dict[str, 'SimpleAttribMaker'], required_fields: List[str],
instance_maker_config: InstanceMakerConfig) -> AttributesDict:
vs = {
'type': Optional[self.tpe],
'eq': False,
'metadata': {
'base_type': self.tpe,
'name': self.name,
},
'repr': self.repr,
}
if self.default:
vs['default'] = self.default
vs['type'] = self.tpe
elif self.factory:
vs['factory'] = self.factory
vs['type'] = self.tpe
return vs
def create_default_attrib(name: str, attrib_value: Any) -> DefaultAttribMaker:
return DefaultAttribMaker(
tpe=type(attrib_value),
base_tpe=type(attrib_value),
name=name,
default=attrib_value,
factory=None,
definition={})
| true
| true
|
790ae1cd5f1909b3ea05f4bc1e22db416ad39314
| 13,230
|
py
|
Python
|
deepext_with_lightning/metrics/object_detection.py
|
pei223/deepext_with_lightning
|
e40ac19844a05864f803431d8ef4a534286a0950
|
[
"MIT"
] | 1
|
2021-02-25T14:30:08.000Z
|
2021-02-25T14:30:08.000Z
|
deepext_with_lightning/metrics/object_detection.py
|
pei223/deepext_with_lightning
|
e40ac19844a05864f803431d8ef4a534286a0950
|
[
"MIT"
] | null | null | null |
deepext_with_lightning/metrics/object_detection.py
|
pei223/deepext_with_lightning
|
e40ac19844a05864f803431d8ef4a534286a0950
|
[
"MIT"
] | null | null | null |
from typing import List, Tuple, Union
import numpy as np
import torch
import pytorch_lightning as pl
def calc_area(bbox: np.ndarray):
return (bbox[2] - bbox[0]) * (bbox[3] - bbox[1])
def calc_bbox_overlap_union_iou(pred: np.ndarray or None, teacher: np.ndarray) -> Tuple[float, float, float]:
"""
:param pred: ndarray (4, )
:param teacher: ndarray (4, )
:return: overlap, union, iou
"""
teacher_area = (teacher[2] - teacher[0]) * (teacher[3] - teacher[1])
if pred is None:
return 0.0, teacher_area, 0.0
pred_area = (pred[2] - pred[0]) * (pred[3] - pred[1])
intersection_width = np.maximum(np.minimum(pred[2], teacher[2]) - np.maximum(pred[0], teacher[0]), 0)
intersection_height = np.maximum(np.minimum(pred[3], teacher[3]) - np.maximum(pred[1], teacher[1]), 0)
overlap = intersection_width * intersection_height
union = teacher_area + pred_area - overlap
iou = overlap / union
return overlap, union, iou
class DetectionIoU(pl.metrics.Metric):
def __init__(self, n_classes: int, by_classes: bool = False):
super().__init__(compute_on_step=False)
self._n_classes = n_classes
self._by_classes = by_classes
self.add_state("image_count_by_classes", default=torch.tensor([0. for _ in range(n_classes)]),
dist_reduce_fx="sum")
self.add_state("total_iou_by_classes", default=torch.tensor([0. for _ in range(n_classes)]),
dist_reduce_fx="sum")
def update(self, preds: List[np.ndarray], targets: Union[np.ndarray, torch.Tensor]) -> None:
"""
:param preds: Sorted by score. (Batch size, bounding boxes by batch, 5(x_min, y_min, x_max, y_max, label))
:param targets: (batch size, bounding box count, 5(x_min, y_min, x_max, y_max, label))
:return:
"""
targets = targets.cpu().detach().numpy() if isinstance(targets, torch.Tensor) else targets
# 全探索だと遅いのでクラスごとにまとめておく
preds_by_class = []
for pred_bboxes in preds:
pred_by_class = [[] for _ in range(self._n_classes)]
for pred_bbox in pred_bboxes:
pred_by_class[int(pred_bbox[4])].append(pred_bbox)
preds_by_class.append(pred_by_class)
for i in range(targets.shape[0]): # Explore every batch.
bbox_annotations = targets[i, :, :]
# Exclude invalid label annotation.
bbox_annotations = bbox_annotations[bbox_annotations[:, 4] >= 0]
pred_by_class = preds_by_class[i]
"""
1画像でラベルごとに計算.
ラベルごとの面積合計/overlapを計算
1画像ごとにIoU算出、最終的に画像平均を算出
"""
total_area_by_classes = [0 for _ in range(self._n_classes)]
total_overlap_by_classes = [0 for _ in range(self._n_classes)]
is_label_appeared = [False for _ in range(self._n_classes)]
for bbox_annotation in bbox_annotations:
label = int(bbox_annotation[4])
total_area_by_classes[label] += calc_area(bbox_annotation)
pred_bboxes = pred_by_class[label]
if pred_bboxes is None or len(pred_bboxes) == 0:
continue
# Calculate area and overlap by class.
for pred_bbox in pred_bboxes:
overlap, _, _ = calc_bbox_overlap_union_iou(pred_bbox, bbox_annotation)
total_overlap_by_classes[label] += overlap
if is_label_appeared[label]:
continue
total_area_by_classes[label] += calc_area(pred_bbox)
is_label_appeared[label] = True
for label in range(self._n_classes):
# Not exist label in this data.
if total_area_by_classes[label] <= 0:
continue
self.total_iou_by_classes[label] += total_overlap_by_classes[label] / (
total_area_by_classes[label] - total_overlap_by_classes[label])
self.image_count_by_classes[label] += 1
def compute(self):
epsilon = 1e-8
iou_by_classes = self.total_iou_by_classes / (self.image_count_by_classes + epsilon)
if self._by_classes:
return iou_by_classes
return torch.mean(iou_by_classes)
class RecallPrecision(pl.metrics.Metric):
def __init__(self, n_classes: int, by_classes: bool = False):
super().__init__(compute_on_step=False)
self._n_classes = n_classes
self._by_classes = by_classes
self.add_state("tp_by_classes", default=torch.tensor([0 for _ in range(n_classes)]), dist_reduce_fx="sum")
self.add_state("fp_by_classes", default=torch.tensor([0 for _ in range(n_classes)]), dist_reduce_fx="sum")
self.add_state("fn_by_classes", default=torch.tensor([0 for _ in range(n_classes)]), dist_reduce_fx="sum")
def update(self, preds: List[np.ndarray], targets: Union[np.ndarray, torch.Tensor]) -> None:
"""
:param preds: Sorted by score. (Batch size, bounding boxes by batch, 5(x_min, y_min, x_max, y_max, label))
:param targets: (batch size, bounding box count, 5(x_min, y_min, x_max, y_max, label))
:return:
"""
targets = targets.cpu().detach().numpy() if isinstance(targets, torch.Tensor) else targets
# 全探索だと遅いのでクラスごとにまとめておく
preds_by_class = []
for pred_bboxes in preds:
pred_by_class = [[] for _ in range(self._n_classes)]
for pred_bbox in pred_bboxes:
pred_by_class[int(pred_bbox[4])].append(pred_bbox)
preds_by_class.append(pred_by_class)
for i in range(targets.shape[0]):
bbox_annotations = targets[i, :, :]
# Exclude invalid label annotation.
bbox_annotations = bbox_annotations[bbox_annotations[:, 4] >= 0]
pred_by_class = preds_by_class[i]
applied_bbox_count_by_classes = [0 for _ in range(self._n_classes)]
for bbox_annotation in bbox_annotations:
label = int(bbox_annotation[4])
pred_bboxes = pred_by_class[label]
if pred_bboxes is None or len(pred_bboxes) == 0:
self.fn_by_classes[label] += 1
continue
# Explore max iou of bbox_annotation
is_matched = False
for pred_bbox in pred_bboxes:
overlap, union, iou = calc_bbox_overlap_union_iou(pred_bbox, bbox_annotation)
if iou >= 0.5:
applied_bbox_count_by_classes[label] += 1
self.tp_by_classes[label] += 1
is_matched = True
break
if not is_matched:
self.fn_by_classes[label] += 1
for label in range(self._n_classes):
self.fp_by_classes[label] += len(pred_by_class[label]) - applied_bbox_count_by_classes[label]
def compute(self):
epsilon = 1e-8
recall = self.tp_by_classes / (self.tp_by_classes + self.fn_by_classes + epsilon)
precision = self.tp_by_classes / (self.tp_by_classes + self.fp_by_classes + epsilon)
f_score = 2. * recall * precision / (recall + precision + epsilon)
if self._by_classes:
return recall, precision, f_score
return torch.mean(recall), torch.mean(precision), torch.mean(f_score)
class MeanAveragePrecision(pl.metrics.Metric):
def __init__(self, n_classes: int, by_classes=False):
super().__init__(compute_on_step=False)
self._n_classes = n_classes
# TODO want to implement using add_state
self.fp_list_by_classes = [[] for _ in range(n_classes)]
self.tp_list_by_classes = [[] for _ in range(n_classes)]
self.score_list_by_classes = [[] for _ in range(n_classes)]
self.num_annotations_by_classes = [0 for _ in range(n_classes)]
# self.add_state("fp_list_by_classes", default=[[] for _ in range(n_classes)], dist_reduce_fx="cat")
# self.add_state("tp_list_by_classes", default=[[] for _ in range(n_classes)], dist_reduce_fx="cat")
# self.add_state("score_list_by_classes", default=[[] for _ in range(n_classes)], dist_reduce_fx="cat")
# self.add_state("num_annotations_by_classes", default=[0 for _ in range(n_classes)], dist_reduce_fx="cat")
self._by_classes = by_classes
def update(self, preds: List[np.ndarray], targets: Union[np.ndarray, torch.Tensor]) -> None:
"""
:param preds: Sorted by score. (Batch size, bounding boxes by batch, 5(x_min, y_min, x_max, y_max, label))
:param targets: (batch size, bounding box count, 5(x_min, y_min, x_max, y_max, label))
:return:
"""
targets = targets.cpu().detach().numpy() if isinstance(targets, torch.Tensor) else targets
for i in range(len(preds)):
pred_bboxes, target_bboxes = preds[i], targets[i]
# exclude invalid annotations.
target_bboxes = target_bboxes[target_bboxes[:, 4] >= 0]
self._update_num_annotations(target_bboxes)
self._update_tp_fp_score(pred_bboxes, target_bboxes)
def compute(self):
ap_by_classes = [0 for _ in range(self._n_classes)]
for label in range(self._n_classes):
num_annotations = self.num_annotations_by_classes[label]
tp_list, fp_list = np.array(self.tp_list_by_classes[label]), np.array(self.fp_list_by_classes[label])
scores = np.array(self.score_list_by_classes[label])
indices = np.argsort(-scores)
# sort by score
tp_list, fp_list = tp_list[indices], fp_list[indices]
# cumulative sum
tp_list, fp_list = np.cumsum(tp_list), np.cumsum(fp_list)
if num_annotations == 0:
ap_by_classes[label] = 0
continue
recall_curve = tp_list / num_annotations
precision_curve = tp_list / np.maximum(tp_list + fp_list, np.finfo(np.float64).eps)
ap_by_classes[label] = self._compute_average_precision(recall_curve, precision_curve)
return ap_by_classes if self._by_classes else sum(ap_by_classes) / len(ap_by_classes)
def _update_tp_fp_score(self, pred_bboxes: np.ndarray, target_bboxes: np.ndarray):
"""
:param pred_bboxes: (N, 6(xmin, ymin, xmax, ymax, class, score))
:param target_bboxes: (N, 5(xmin, ymin, xmax, ymax, class))
"""
detected_indices = []
for i in range(pred_bboxes.shape[0]):
pred_label, pred_score = int(pred_bboxes[i][4]), pred_bboxes[i][5]
matched = False
for j in filter(lambda k: int(target_bboxes[k][4]) == pred_label and k not in detected_indices,
range(target_bboxes.shape[0])):
overlap, union, iou = calc_bbox_overlap_union_iou(pred_bboxes[i], target_bboxes[j])
if iou >= 0.5:
detected_indices.append(j)
self.fp_list_by_classes[pred_label].append(0)
self.tp_list_by_classes[pred_label].append(1)
matched = True
break
if not matched:
self.fp_list_by_classes[pred_label].append(1)
self.tp_list_by_classes[pred_label].append(0)
self.score_list_by_classes[pred_label].append(pred_score)
def _update_num_annotations(self, target_bboxes: np.ndarray):
"""
:param target_bboxes: (N, 5(xmin, ymin, xmax, ymax, class))
"""
counts = list(map(lambda i: np.count_nonzero(target_bboxes[:, 4] == i), range(self._n_classes)))
self.num_annotations_by_classes = list(
map(lambda i: counts[i] + self.num_annotations_by_classes[i], range(self._n_classes)))
def _compute_average_precision(self, recall_curve: np.ndarray, precision_curve: np.ndarray):
# Reference by https://github.com/toandaominh1997/EfficientDet.Pytorch/blob/master/eval.py
assert recall_curve.ndim == 1 and precision_curve.ndim == 1
# correct AP calculation
# first append sentinel values at the end
mean_recall = np.concatenate(([0.], recall_curve, [1.]))
mean_precision = np.concatenate(([0.], precision_curve, [0.]))
# compute the precision envelope
for i in range(mean_precision.size - 1, 0, -1):
mean_precision[i - 1] = np.maximum(mean_precision[i - 1], mean_precision[i])
# to calculate area under PR curve, look for points
# where X axis (recall) changes value
i = np.where(mean_recall[1:] != mean_recall[:-1])[0]
# and sum (\Delta recall) * prec
ap = np.sum((mean_recall[i + 1] - mean_recall[i]) * mean_precision[i + 1])
return ap
def reset(self):
self.fp_list_by_classes = [[] for _ in range(self._n_classes)]
self.tp_list_by_classes = [[] for _ in range(self._n_classes)]
self.score_list_by_classes = [[] for _ in range(self._n_classes)]
self.num_annotations_by_classes = [0 for _ in range(self._n_classes)]
| 47.589928
| 115
| 0.621391
|
from typing import List, Tuple, Union
import numpy as np
import torch
import pytorch_lightning as pl
def calc_area(bbox: np.ndarray):
return (bbox[2] - bbox[0]) * (bbox[3] - bbox[1])
def calc_bbox_overlap_union_iou(pred: np.ndarray or None, teacher: np.ndarray) -> Tuple[float, float, float]:
teacher_area = (teacher[2] - teacher[0]) * (teacher[3] - teacher[1])
if pred is None:
return 0.0, teacher_area, 0.0
pred_area = (pred[2] - pred[0]) * (pred[3] - pred[1])
intersection_width = np.maximum(np.minimum(pred[2], teacher[2]) - np.maximum(pred[0], teacher[0]), 0)
intersection_height = np.maximum(np.minimum(pred[3], teacher[3]) - np.maximum(pred[1], teacher[1]), 0)
overlap = intersection_width * intersection_height
union = teacher_area + pred_area - overlap
iou = overlap / union
return overlap, union, iou
class DetectionIoU(pl.metrics.Metric):
def __init__(self, n_classes: int, by_classes: bool = False):
super().__init__(compute_on_step=False)
self._n_classes = n_classes
self._by_classes = by_classes
self.add_state("image_count_by_classes", default=torch.tensor([0. for _ in range(n_classes)]),
dist_reduce_fx="sum")
self.add_state("total_iou_by_classes", default=torch.tensor([0. for _ in range(n_classes)]),
dist_reduce_fx="sum")
def update(self, preds: List[np.ndarray], targets: Union[np.ndarray, torch.Tensor]) -> None:
targets = targets.cpu().detach().numpy() if isinstance(targets, torch.Tensor) else targets
preds_by_class = []
for pred_bboxes in preds:
pred_by_class = [[] for _ in range(self._n_classes)]
for pred_bbox in pred_bboxes:
pred_by_class[int(pred_bbox[4])].append(pred_bbox)
preds_by_class.append(pred_by_class)
for i in range(targets.shape[0]):
bbox_annotations = targets[i, :, :]
bbox_annotations = bbox_annotations[bbox_annotations[:, 4] >= 0]
pred_by_class = preds_by_class[i]
total_area_by_classes = [0 for _ in range(self._n_classes)]
total_overlap_by_classes = [0 for _ in range(self._n_classes)]
is_label_appeared = [False for _ in range(self._n_classes)]
for bbox_annotation in bbox_annotations:
label = int(bbox_annotation[4])
total_area_by_classes[label] += calc_area(bbox_annotation)
pred_bboxes = pred_by_class[label]
if pred_bboxes is None or len(pred_bboxes) == 0:
continue
for pred_bbox in pred_bboxes:
overlap, _, _ = calc_bbox_overlap_union_iou(pred_bbox, bbox_annotation)
total_overlap_by_classes[label] += overlap
if is_label_appeared[label]:
continue
total_area_by_classes[label] += calc_area(pred_bbox)
is_label_appeared[label] = True
for label in range(self._n_classes):
if total_area_by_classes[label] <= 0:
continue
self.total_iou_by_classes[label] += total_overlap_by_classes[label] / (
total_area_by_classes[label] - total_overlap_by_classes[label])
self.image_count_by_classes[label] += 1
def compute(self):
epsilon = 1e-8
iou_by_classes = self.total_iou_by_classes / (self.image_count_by_classes + epsilon)
if self._by_classes:
return iou_by_classes
return torch.mean(iou_by_classes)
class RecallPrecision(pl.metrics.Metric):
def __init__(self, n_classes: int, by_classes: bool = False):
super().__init__(compute_on_step=False)
self._n_classes = n_classes
self._by_classes = by_classes
self.add_state("tp_by_classes", default=torch.tensor([0 for _ in range(n_classes)]), dist_reduce_fx="sum")
self.add_state("fp_by_classes", default=torch.tensor([0 for _ in range(n_classes)]), dist_reduce_fx="sum")
self.add_state("fn_by_classes", default=torch.tensor([0 for _ in range(n_classes)]), dist_reduce_fx="sum")
def update(self, preds: List[np.ndarray], targets: Union[np.ndarray, torch.Tensor]) -> None:
targets = targets.cpu().detach().numpy() if isinstance(targets, torch.Tensor) else targets
preds_by_class = []
for pred_bboxes in preds:
pred_by_class = [[] for _ in range(self._n_classes)]
for pred_bbox in pred_bboxes:
pred_by_class[int(pred_bbox[4])].append(pred_bbox)
preds_by_class.append(pred_by_class)
for i in range(targets.shape[0]):
bbox_annotations = targets[i, :, :]
bbox_annotations = bbox_annotations[bbox_annotations[:, 4] >= 0]
pred_by_class = preds_by_class[i]
applied_bbox_count_by_classes = [0 for _ in range(self._n_classes)]
for bbox_annotation in bbox_annotations:
label = int(bbox_annotation[4])
pred_bboxes = pred_by_class[label]
if pred_bboxes is None or len(pred_bboxes) == 0:
self.fn_by_classes[label] += 1
continue
is_matched = False
for pred_bbox in pred_bboxes:
overlap, union, iou = calc_bbox_overlap_union_iou(pred_bbox, bbox_annotation)
if iou >= 0.5:
applied_bbox_count_by_classes[label] += 1
self.tp_by_classes[label] += 1
is_matched = True
break
if not is_matched:
self.fn_by_classes[label] += 1
for label in range(self._n_classes):
self.fp_by_classes[label] += len(pred_by_class[label]) - applied_bbox_count_by_classes[label]
def compute(self):
epsilon = 1e-8
recall = self.tp_by_classes / (self.tp_by_classes + self.fn_by_classes + epsilon)
precision = self.tp_by_classes / (self.tp_by_classes + self.fp_by_classes + epsilon)
f_score = 2. * recall * precision / (recall + precision + epsilon)
if self._by_classes:
return recall, precision, f_score
return torch.mean(recall), torch.mean(precision), torch.mean(f_score)
class MeanAveragePrecision(pl.metrics.Metric):
def __init__(self, n_classes: int, by_classes=False):
super().__init__(compute_on_step=False)
self._n_classes = n_classes
self.fp_list_by_classes = [[] for _ in range(n_classes)]
self.tp_list_by_classes = [[] for _ in range(n_classes)]
self.score_list_by_classes = [[] for _ in range(n_classes)]
self.num_annotations_by_classes = [0 for _ in range(n_classes)]
self._by_classes = by_classes
def update(self, preds: List[np.ndarray], targets: Union[np.ndarray, torch.Tensor]) -> None:
targets = targets.cpu().detach().numpy() if isinstance(targets, torch.Tensor) else targets
for i in range(len(preds)):
pred_bboxes, target_bboxes = preds[i], targets[i]
target_bboxes = target_bboxes[target_bboxes[:, 4] >= 0]
self._update_num_annotations(target_bboxes)
self._update_tp_fp_score(pred_bboxes, target_bboxes)
def compute(self):
ap_by_classes = [0 for _ in range(self._n_classes)]
for label in range(self._n_classes):
num_annotations = self.num_annotations_by_classes[label]
tp_list, fp_list = np.array(self.tp_list_by_classes[label]), np.array(self.fp_list_by_classes[label])
scores = np.array(self.score_list_by_classes[label])
indices = np.argsort(-scores)
tp_list, fp_list = tp_list[indices], fp_list[indices]
tp_list, fp_list = np.cumsum(tp_list), np.cumsum(fp_list)
if num_annotations == 0:
ap_by_classes[label] = 0
continue
recall_curve = tp_list / num_annotations
precision_curve = tp_list / np.maximum(tp_list + fp_list, np.finfo(np.float64).eps)
ap_by_classes[label] = self._compute_average_precision(recall_curve, precision_curve)
return ap_by_classes if self._by_classes else sum(ap_by_classes) / len(ap_by_classes)
def _update_tp_fp_score(self, pred_bboxes: np.ndarray, target_bboxes: np.ndarray):
detected_indices = []
for i in range(pred_bboxes.shape[0]):
pred_label, pred_score = int(pred_bboxes[i][4]), pred_bboxes[i][5]
matched = False
for j in filter(lambda k: int(target_bboxes[k][4]) == pred_label and k not in detected_indices,
range(target_bboxes.shape[0])):
overlap, union, iou = calc_bbox_overlap_union_iou(pred_bboxes[i], target_bboxes[j])
if iou >= 0.5:
detected_indices.append(j)
self.fp_list_by_classes[pred_label].append(0)
self.tp_list_by_classes[pred_label].append(1)
matched = True
break
if not matched:
self.fp_list_by_classes[pred_label].append(1)
self.tp_list_by_classes[pred_label].append(0)
self.score_list_by_classes[pred_label].append(pred_score)
def _update_num_annotations(self, target_bboxes: np.ndarray):
counts = list(map(lambda i: np.count_nonzero(target_bboxes[:, 4] == i), range(self._n_classes)))
self.num_annotations_by_classes = list(
map(lambda i: counts[i] + self.num_annotations_by_classes[i], range(self._n_classes)))
def _compute_average_precision(self, recall_curve: np.ndarray, precision_curve: np.ndarray):
assert recall_curve.ndim == 1 and precision_curve.ndim == 1
mean_recall = np.concatenate(([0.], recall_curve, [1.]))
mean_precision = np.concatenate(([0.], precision_curve, [0.]))
for i in range(mean_precision.size - 1, 0, -1):
mean_precision[i - 1] = np.maximum(mean_precision[i - 1], mean_precision[i])
i = np.where(mean_recall[1:] != mean_recall[:-1])[0]
ap = np.sum((mean_recall[i + 1] - mean_recall[i]) * mean_precision[i + 1])
return ap
def reset(self):
self.fp_list_by_classes = [[] for _ in range(self._n_classes)]
self.tp_list_by_classes = [[] for _ in range(self._n_classes)]
self.score_list_by_classes = [[] for _ in range(self._n_classes)]
self.num_annotations_by_classes = [0 for _ in range(self._n_classes)]
| true
| true
|
790ae308eb3a6aeeafb3c1213e67a5e87168931c
| 1,200
|
py
|
Python
|
examples/linear_regression/linear_regression_numba.py
|
uw-ipd/numba
|
26dde2b28cadda403a5549a84dc1698900b23f74
|
[
"BSD-2-Clause"
] | 140
|
2017-07-15T21:17:44.000Z
|
2022-03-19T00:56:05.000Z
|
examples/linear_regression/linear_regression_numba.py
|
uw-ipd/numba
|
26dde2b28cadda403a5549a84dc1698900b23f74
|
[
"BSD-2-Clause"
] | 24
|
2017-07-24T16:25:35.000Z
|
2021-12-08T17:54:38.000Z
|
examples/linear_regression/linear_regression_numba.py
|
uw-ipd/numba
|
26dde2b28cadda403a5549a84dc1698900b23f74
|
[
"BSD-2-Clause"
] | 50
|
2017-07-15T21:15:16.000Z
|
2021-12-12T15:27:05.000Z
|
#
# Copyright (c) 2017 Intel Corporation
# SPDX-License-Identifier: BSD-2-Clause
#
import numba
import numpy as np
import argparse
import time
@numba.njit()
def linear_regression(Y, X, w, iterations, alphaN):
for i in range(iterations):
w -= alphaN * np.dot(X.T, np.dot(X,w)-Y)
return w
def main():
parser = argparse.ArgumentParser(description='Linear Regression.')
parser.add_argument('--samples', dest='samples', type=int, default=200000)
parser.add_argument('--features', dest='features', type=int, default=10)
parser.add_argument('--functions', dest='functions', type=int, default=4)
parser.add_argument('--iterations', dest='iterations', type=int, default=20)
args = parser.parse_args()
N = args.samples
D = args.features
p = args.functions
iterations = args.iterations
alphaN = 0.01/N
w = np.zeros((D,p))
np.random.seed(0)
points = np.random.random((N,D))
labels = np.random.random((N,p))
t1 = time.time()
w = linear_regression(labels, points, w, iterations, alphaN)
selftimed = time.time()-t1
print("SELFTIMED ", selftimed)
print("checksum: ", np.sum(w))
if __name__ == '__main__':
main()
| 29.268293
| 80
| 0.661667
|
import numba
import numpy as np
import argparse
import time
@numba.njit()
def linear_regression(Y, X, w, iterations, alphaN):
for i in range(iterations):
w -= alphaN * np.dot(X.T, np.dot(X,w)-Y)
return w
def main():
parser = argparse.ArgumentParser(description='Linear Regression.')
parser.add_argument('--samples', dest='samples', type=int, default=200000)
parser.add_argument('--features', dest='features', type=int, default=10)
parser.add_argument('--functions', dest='functions', type=int, default=4)
parser.add_argument('--iterations', dest='iterations', type=int, default=20)
args = parser.parse_args()
N = args.samples
D = args.features
p = args.functions
iterations = args.iterations
alphaN = 0.01/N
w = np.zeros((D,p))
np.random.seed(0)
points = np.random.random((N,D))
labels = np.random.random((N,p))
t1 = time.time()
w = linear_regression(labels, points, w, iterations, alphaN)
selftimed = time.time()-t1
print("SELFTIMED ", selftimed)
print("checksum: ", np.sum(w))
if __name__ == '__main__':
main()
| true
| true
|
790ae362b32658dd0747cc1017b53d5aa98dd30a
| 17,875
|
py
|
Python
|
nlpia/book/examples/ch09.py
|
brusic/nlpia
|
e239074eaa1fd51eb1c9a35d53a69e3b15343f57
|
[
"MIT"
] | 1
|
2021-04-15T04:26:00.000Z
|
2021-04-15T04:26:00.000Z
|
nlpia/book/examples/ch09.py
|
brusic/nlpia
|
e239074eaa1fd51eb1c9a35d53a69e3b15343f57
|
[
"MIT"
] | null | null | null |
nlpia/book/examples/ch09.py
|
brusic/nlpia
|
e239074eaa1fd51eb1c9a35d53a69e3b15343f57
|
[
"MIT"
] | null | null | null |
# coding: utf-8
# In[ ]:
import os
import re
import tarfile
import requests
from pugnlp.futil import path_status, find_files
# In[ ]:
# From the nlpia package for downloading data too big for the repo
BIG_URLS = {
'w2v': (
'https://www.dropbox.com/s/965dir4dje0hfi4/GoogleNews-vectors-negative300.bin.gz?dl=1',
1647046227,
),
'slang': (
'https://www.dropbox.com/s/43c22018fbfzypd/slang.csv.gz?dl=1',
117633024,
),
'tweets': (
'https://www.dropbox.com/s/5gpb43c494mc8p0/tweets.csv.gz?dl=1',
311725313,
),
'lsa_tweets': (
'https://www.dropbox.com/s/rpjt0d060t4n1mr/lsa_tweets_5589798_2003588x200.tar.gz?dl=1',
3112841563, # 3112841312,
),
'imdb': (
'https://www.dropbox.com/s/yviic64qv84x73j/aclImdb_v1.tar.gz?dl=1',
3112841563, # 3112841312,
),
}
# In[ ]:
# These functions are part of the nlpia package which can be pip installed and run from there.
def dropbox_basename(url):
filename = os.path.basename(url)
match = re.findall(r'\?dl=[0-9]$', filename)
if match:
return filename[:-len(match[0])]
return filename
def download_file(url, data_path='.', filename=None, size=None, chunk_size=4096, verbose=True):
"""Uses stream=True and a reasonable chunk size to be able to download large (GB) files over https"""
if filename is None:
filename = dropbox_basename(url)
file_path = os.path.join(data_path, filename)
if url.endswith('?dl=0'):
url = url[:-1] + '1' # noninteractive download
if verbose:
tqdm_prog = tqdm
print('requesting URL: {}'.format(url))
else:
tqdm_prog = no_tqdm
r = requests.get(url, stream=True, allow_redirects=True)
size = r.headers.get('Content-Length', None) if size is None else size
print('remote size: {}'.format(size))
stat = path_status(file_path)
print('local size: {}'.format(stat.get('size', None)))
if stat['type'] == 'file' and stat['size'] == size: # TODO: check md5 or get the right size of remote file
r.close()
return file_path
print('Downloading to {}'.format(file_path))
with open(file_path, 'wb') as f:
for chunk in r.iter_content(chunk_size=chunk_size):
if chunk: # filter out keep-alive chunks
f.write(chunk)
r.close()
return file_path
def untar(fname):
if fname.endswith("tar.gz"):
with tarfile.open(fname) as tf:
tf.extractall()
else:
print("Not a tar.gz file: {}".format(fname))
# In[ ]:
# UNCOMMENT these 2 lines if you haven't already download the word2vec model and the imdb dataset
# download_file(BIG_URLS['w2v'][0])
# untar(download_file(BIG_URLS['imdb'][0]))
# In[ ]:
maxlen = 400
batch_size = 32
embedding_dims = 300
epochs = 2
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten, LSTM
num_neurons = 50
print('Build model...')
model = Sequential()
model.add(LSTM(num_neurons, return_sequences=True, input_shape=(maxlen, embedding_dims)))
model.add(Dropout(.2))
model.add(Flatten())
model.add(Dense(1, activation='sigmoid'))
model.compile('rmsprop', 'binary_crossentropy', metrics=['accuracy'])
print(model.summary())
# In[ ]:
import glob
import os
from random import shuffle
def pre_process_data(filepath):
"""
This is dependent on your training data source but we will try to generalize it as best as possible.
"""
positive_path = os.path.join(filepath, 'pos')
negative_path = os.path.join(filepath, 'neg')
pos_label = 1
neg_label = 0
dataset = []
for filename in glob.glob(os.path.join(positive_path, '*.txt')):
with open(filename, 'r') as f:
dataset.append((pos_label, f.read()))
for filename in glob.glob(os.path.join(negative_path, '*.txt')):
with open(filename, 'r') as f:
dataset.append((neg_label, f.read()))
shuffle(dataset)
return dataset
# In[ ]:
from nltk.tokenize import TreebankWordTokenizer
from gensim.models.keyedvectors import KeyedVectors
word_vectors = KeyedVectors.load_word2vec_format('GoogleNews-vectors-negative300.bin.gz', binary=True, limit=200000)
def tokenize_and_vectorize(dataset):
tokenizer = TreebankWordTokenizer()
vectorized_data = []
expected = []
for sample in dataset:
tokens = tokenizer.tokenize(sample[1])
sample_vecs = []
for token in tokens:
try:
sample_vecs.append(word_vectors[token])
except KeyError:
pass # No matching token in the Google w2v vocab
vectorized_data.append(sample_vecs)
return vectorized_data
# In[ ]:
def collect_expected(dataset):
""" Peel of the target values from the dataset """
expected = []
for sample in dataset:
expected.append(sample[0])
return expected
# In[ ]:
def pad_trunc(data, maxlen):
""" For a given dataset pad with zero vectors or truncate to maxlen """
new_data = []
# Create a vector of 0's the length of our word vectors
zero_vector = []
for _ in range(len(data[0][0])):
zero_vector.append(0.0)
for sample in data:
if len(sample) > maxlen:
temp = sample[:maxlen]
elif len(sample) < maxlen:
temp = sample
additional_elems = maxlen - len(sample)
for _ in range(additional_elems):
temp.append(zero_vector)
else:
temp = sample
new_data.append(temp)
return new_data
# In[ ]:
import numpy as np
dataset = pre_process_data('./aclImdb_v1/train')
vectorized_data = tokenize_and_vectorize(dataset)
expected = collect_expected(dataset)
split_point = int(len(vectorized_data)*.8)
x_train = vectorized_data[:split_point]
y_train = expected[:split_point]
x_test = vectorized_data[split_point:]
y_test = expected[split_point:]
maxlen = 400
batch_size = 32 # How many samples to show the net before backpropogating the error and updating the weights
embedding_dims = 300 # Length of the token vectors we will create for passing into the Convnet
epochs = 2
x_train = pad_trunc(x_train, maxlen)
x_test = pad_trunc(x_test, maxlen)
x_train = np.reshape(x_train, (len(x_train), maxlen, embedding_dims))
y_train = np.array(y_train)
x_test = np.reshape(x_test, (len(x_test), maxlen, embedding_dims))
y_test = np.array(y_test)
# In[ ]:
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten, LSTM
num_neurons = 50
print('Build model...')
model = Sequential()
model.add(LSTM(num_neurons, return_sequences=True, input_shape=(maxlen, embedding_dims)))
model.add(Dropout(.2))
model.add(Flatten())
model.add(Dense(1, activation='sigmoid'))
model.compile('rmsprop', 'binary_crossentropy', metrics=['accuracy'])
print(model.summary())
# In[ ]:
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
validation_data=(x_test, y_test))
model_structure = model.to_json()
with open("lstm_model1.json", "w") as json_file:
json_file.write(model_structure)
model.save_weights("lstm_weights1.h5")
print('Model saved.')
# In[ ]:
from keras.models import model_from_json
with open("lstm_model1.json", "r") as json_file:
json_string = json_file.read()
model = model_from_json(json_string)
model.load_weights('lstm_weights1.h5')
# In[ ]:
sample_1 = "I'm hate that the dismal weather that had me down for so long, when will it break! Ugh, when does happiness return? The sun is blinding and the puffy clouds are too thin. I can't wait for the weekend."
# We pass a dummy value in the first element of the tuple just because our helper expects it from the way processed the initial data. That value won't ever see the network, so it can be whatever.
vec_list = tokenize_and_vectorize([(1, sample_1)])
# Tokenize returns a list of the data (length 1 here)
test_vec_list = pad_trunc(vec_list, maxlen)
test_vec = np.reshape(test_vec_list, (len(test_vec_list), maxlen, embedding_dims))
print("Sample's sentiment, 1 - pos, 2 - neg : {}".format(model.predict_classes(test_vec)))
print("Raw output of sigmoid function: {}".format(model.predict(test_vec)))
# In[ ]:
def test_len(data, maxlen):
total_len = truncated = exact = padded = 0
for sample in data:
total_len += len(sample)
if len(sample) > maxlen:
truncated += 1
elif len(sample) < maxlen:
padded += 1
else:
exact +=1
print('Padded: {}'.format(padded))
print('Equal: {}'.format(exact))
print('Truncated: {}'.format(truncated))
print('Avg length: {}'.format(total_len/len(data)))
dataset = pre_process_data('./aclImdb_v1/train')
vectorized_data = tokenize_and_vectorize(dataset)
test_len(vectorized_data, 400)
# In[ ]:
import numpy as np
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten, LSTM
maxlen = 200
batch_size = 32 # How many samples to show the net before backpropagating the error and updating the weights
embedding_dims = 300 # Length of the token vectors we will create for passing into the Convnet
epochs = 2
dataset = pre_process_data('./aclImdb_v1/train')
vectorized_data = tokenize_and_vectorize(dataset)
expected = collect_expected(dataset)
split_point = int(len(vectorized_data)*.8)
x_train = vectorized_data[:split_point]
y_train = expected[:split_point]
x_test = vectorized_data[split_point:]
y_test = expected[split_point:]
x_train = pad_trunc(x_train, maxlen)
x_test = pad_trunc(x_test, maxlen)
x_train = np.reshape(x_train, (len(x_train), maxlen, embedding_dims))
y_train = np.array(y_train)
x_test = np.reshape(x_test, (len(x_test), maxlen, embedding_dims))
y_test = np.array(y_test)
num_neurons = 50
print('Build model...')
model = Sequential()
model.add(LSTM(num_neurons, return_sequences=True, input_shape=(maxlen, embedding_dims)))
model.add(Dropout(.2))
model.add(Flatten())
model.add(Dense(1, activation='sigmoid'))
model.compile('rmsprop', 'binary_crossentropy', metrics=['accuracy'])
print(model.summary())
# In[ ]:
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
validation_data=(x_test, y_test))
model_structure = model.to_json()
with open("lstm_model7.json", "w") as json_file:
json_file.write(model_structure)
model.save_weights("lstm_weights7.h5")
print('Model saved.')
# In[ ]:
dataset = pre_process_data('./aclImdb_v1/train')
expected = collect_expected(dataset)
# In[ ]:
def avg_len(data):
total_len = 0
for sample in data:
total_len += len(sample[1])
print(total_len/len(data))
print(avg_len(dataset))
# In[ ]:
def clean_data(data):
""" Shift to lower case, replace unknowns with UNK, and listify """
new_data = []
VALID = 'abcdefghijklmnopqrstuvwxyz123456789"\'?!.,:; '
for sample in data:
new_sample = []
for char in sample[1].lower(): # Just grab the string, not the label
if char in VALID:
new_sample.append(char)
else:
new_sample.append('UNK')
new_data.append(new_sample)
return new_data
listified_data = clean_data(dataset)
# In[ ]:
def char_pad_trunc(data, maxlen):
""" We truncate to maxlen or add in PAD tokens """
new_dataset = []
for sample in data:
if len(sample) > maxlen:
new_data = sample[:maxlen]
elif len(sample) < maxlen:
pads = maxlen - len(sample)
new_data = sample + ['PAD'] * pads
else:
new_data = sample
new_dataset.append(new_data)
return new_dataset
maxlen = 1500
# In[ ]:
def create_dicts(data):
""" Modified from Keras LSTM example"""
chars = set()
for sample in data:
chars.update(set(sample))
char_indices = dict((c, i) for i, c in enumerate(chars))
indices_char = dict((i, c) for i, c in enumerate(chars))
return char_indices, indices_char
# In[ ]:
import numpy as np
def onehot_encode(dataset, char_indices, maxlen):
"""
One hot encode the tokens
Args:
dataset list of lists of tokens
char_indices dictionary of {key=character, value=index to use encoding vector}
maxlen int Length of each sample
Return:
np array of shape (samples, tokens, encoding length)
"""
X = np.zeros((len(dataset), maxlen, len(char_indices.keys())))
for i, sentence in enumerate(dataset):
for t, char in enumerate(sentence):
X[i, t, char_indices[char]] = 1
return X
# In[ ]:
dataset = pre_process_data('./aclImdb_v1/train')
expected = collect_expected(dataset)
listified_data = clean_data(dataset)
maxlen = 1500
common_length_data = char_pad_trunc(listified_data, maxlen)
char_indices, indices_char = create_dicts(common_length_data)
encoded_data = onehot_encode(common_length_data, char_indices, maxlen)
# In[ ]:
split_point = int(len(encoded_data)*.8)
x_train = encoded_data[:split_point]
y_train = expected[:split_point]
x_test = encoded_data[split_point:]
y_test = expected[split_point:]
# In[ ]:
from keras.models import Sequential
from keras.layers import Dense, Dropout, Embedding, Flatten, LSTM
num_neurons = 40
print('Build model...')
model = Sequential()
model.add(LSTM(num_neurons, return_sequences=True, input_shape=(maxlen, len(char_indices.keys()))))
model.add(Dropout(.2))
model.add(Flatten())
model.add(Dense(1, activation='sigmoid'))
model.compile('rmsprop', 'binary_crossentropy', metrics=['accuracy'])
print(model.summary())
# In[ ]:
# In[ ]:
batch_size = 32
epochs = 10
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
validation_data=(x_test, y_test))
model_structure = model.to_json()
with open("char_lstm_model3.json", "w") as json_file:
json_file.write(model_structure)
model.save_weights("char_lstm_weights3.h5")
print('Model saved.')
# In[ ]:
from nltk.corpus import gutenberg
print(gutenberg.fileids())
# In[ ]:
text = ''
for txt in gutenberg.fileids():
if 'shakespeare' in txt:
text += gutenberg.raw(txt).lower()
print('corpus length:', len(text))
chars = sorted(list(set(text)))
print('total chars:', len(chars))
char_indices = dict((c, i) for i, c in enumerate(chars))
indices_char = dict((i, c) for i, c in enumerate(chars))
# In[ ]:
print(text[:500])
# In[ ]:
# cut the text in semi-redundant sequences of maxlen characters
maxlen = 40
step = 3
sentences = []
next_chars = []
for i in range(0, len(text) - maxlen, step):
sentences.append(text[i: i + maxlen])
next_chars.append(text[i + maxlen])
print('nb sequences:', len(sentences))
# In[ ]:
print('Vectorization...')
X = np.zeros((len(sentences), maxlen, len(chars)), dtype=np.bool)
y = np.zeros((len(sentences), len(chars)), dtype=np.bool)
for i, sentence in enumerate(sentences):
for t, char in enumerate(sentence):
X[i, t, char_indices[char]] = 1
y[i, char_indices[next_chars[i]]] = 1
# In[ ]:
from keras.models import Sequential
from keras.layers import Dense, Activation
from keras.layers import LSTM
from keras.optimizers import RMSprop
# build the model: a single LSTM
print('Build model...')
model = Sequential()
model.add(LSTM(128, input_shape=(maxlen, len(chars))))
model.add(Dense(len(chars)))
model.add(Activation('softmax'))
optimizer = RMSprop(lr=0.01)
model.compile(loss='categorical_crossentropy', optimizer=optimizer)
print(model.summary())
# In[ ]:
epochs = 6
batch_size = 128
model_structure = model.to_json()
with open("shakes_lstm_model.json", "w") as json_file:
json_file.write(model_structure)
for i in range(5):
model.fit(X, y,
batch_size=batch_size,
epochs=epochs)
model.save_weights("shakes_lstm_weights_{}.h5".format(i+1))
print('Model saved.')
# In[ ]:
### NOT IN CHAPTER, Just to reproduce output
from keras.models import model_from_json
with open('shakes_lstm_model.json', 'r') as f:
model_json = f.read()
model = model_from_json(model_json)
model.load_weights('shakes_lstm_weights_4.h5')
# In[ ]:
import random
def sample(preds, temperature=1.0):
# helper function to sample an index from a probability array
preds = np.asarray(preds).astype('float64')
preds = np.log(preds) / temperature
exp_preds = np.exp(preds)
preds = exp_preds / np.sum(exp_preds)
probas = np.random.multinomial(1, preds, 1)
return np.argmax(probas)
# In[ ]:
import sys
start_index = random.randint(0, len(text) - maxlen - 1)
for diversity in [0.2, 0.5, 1.0]:
print()
print('----- diversity:', diversity)
generated = ''
sentence = text[start_index: start_index + maxlen]
generated += sentence
print('----- Generating with seed: "' + sentence + '"')
sys.stdout.write(generated)
for i in range(400):
x = np.zeros((1, maxlen, len(chars)))
for t, char in enumerate(sentence):
x[0, t, char_indices[char]] = 1.
preds = model.predict(x, verbose=0)[0]
next_index = sample(preds, diversity)
next_char = indices_char[next_index]
generated += next_char
sentence = sentence[1:] + next_char
sys.stdout.write(next_char)
sys.stdout.flush()
print()
# In[ ]:
from keras.models import Sequential
from keras.layers import GRU
model = Sequential()
model.add(GRU(num_neurons, return_sequences=True, input_shape=X[0].shape))
# In[ ]:
from keras.models import Sequential
from keras.layers import LSTM
model = Sequential()
model.add(LSTM(num_neurons, return_sequences=True, input_shape=X[0].shape))
model.add(LSTM(num_neurons_2, return_sequences=True))
| 23.73838
| 215
| 0.669762
|
import os
import re
import tarfile
import requests
from pugnlp.futil import path_status, find_files
BIG_URLS = {
'w2v': (
'https://www.dropbox.com/s/965dir4dje0hfi4/GoogleNews-vectors-negative300.bin.gz?dl=1',
1647046227,
),
'slang': (
'https://www.dropbox.com/s/43c22018fbfzypd/slang.csv.gz?dl=1',
117633024,
),
'tweets': (
'https://www.dropbox.com/s/5gpb43c494mc8p0/tweets.csv.gz?dl=1',
311725313,
),
'lsa_tweets': (
'https://www.dropbox.com/s/rpjt0d060t4n1mr/lsa_tweets_5589798_2003588x200.tar.gz?dl=1',
3112841563,
),
'imdb': (
'https://www.dropbox.com/s/yviic64qv84x73j/aclImdb_v1.tar.gz?dl=1',
3112841563,
),
}
def dropbox_basename(url):
filename = os.path.basename(url)
match = re.findall(r'\?dl=[0-9]$', filename)
if match:
return filename[:-len(match[0])]
return filename
def download_file(url, data_path='.', filename=None, size=None, chunk_size=4096, verbose=True):
if filename is None:
filename = dropbox_basename(url)
file_path = os.path.join(data_path, filename)
if url.endswith('?dl=0'):
url = url[:-1] + '1'
if verbose:
tqdm_prog = tqdm
print('requesting URL: {}'.format(url))
else:
tqdm_prog = no_tqdm
r = requests.get(url, stream=True, allow_redirects=True)
size = r.headers.get('Content-Length', None) if size is None else size
print('remote size: {}'.format(size))
stat = path_status(file_path)
print('local size: {}'.format(stat.get('size', None)))
if stat['type'] == 'file' and stat['size'] == size:
r.close()
return file_path
print('Downloading to {}'.format(file_path))
with open(file_path, 'wb') as f:
for chunk in r.iter_content(chunk_size=chunk_size):
if chunk:
f.write(chunk)
r.close()
return file_path
def untar(fname):
if fname.endswith("tar.gz"):
with tarfile.open(fname) as tf:
tf.extractall()
else:
print("Not a tar.gz file: {}".format(fname))
# download_file(BIG_URLS['w2v'][0])
# untar(download_file(BIG_URLS['imdb'][0]))
# In[ ]:
maxlen = 400
batch_size = 32
embedding_dims = 300
epochs = 2
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten, LSTM
num_neurons = 50
print('Build model...')
model = Sequential()
model.add(LSTM(num_neurons, return_sequences=True, input_shape=(maxlen, embedding_dims)))
model.add(Dropout(.2))
model.add(Flatten())
model.add(Dense(1, activation='sigmoid'))
model.compile('rmsprop', 'binary_crossentropy', metrics=['accuracy'])
print(model.summary())
# In[ ]:
import glob
import os
from random import shuffle
def pre_process_data(filepath):
positive_path = os.path.join(filepath, 'pos')
negative_path = os.path.join(filepath, 'neg')
pos_label = 1
neg_label = 0
dataset = []
for filename in glob.glob(os.path.join(positive_path, '*.txt')):
with open(filename, 'r') as f:
dataset.append((pos_label, f.read()))
for filename in glob.glob(os.path.join(negative_path, '*.txt')):
with open(filename, 'r') as f:
dataset.append((neg_label, f.read()))
shuffle(dataset)
return dataset
# In[ ]:
from nltk.tokenize import TreebankWordTokenizer
from gensim.models.keyedvectors import KeyedVectors
word_vectors = KeyedVectors.load_word2vec_format('GoogleNews-vectors-negative300.bin.gz', binary=True, limit=200000)
def tokenize_and_vectorize(dataset):
tokenizer = TreebankWordTokenizer()
vectorized_data = []
expected = []
for sample in dataset:
tokens = tokenizer.tokenize(sample[1])
sample_vecs = []
for token in tokens:
try:
sample_vecs.append(word_vectors[token])
except KeyError:
pass # No matching token in the Google w2v vocab
vectorized_data.append(sample_vecs)
return vectorized_data
# In[ ]:
def collect_expected(dataset):
expected = []
for sample in dataset:
expected.append(sample[0])
return expected
# In[ ]:
def pad_trunc(data, maxlen):
new_data = []
# Create a vector of 0's the length of our word vectors
zero_vector = []
for _ in range(len(data[0][0])):
zero_vector.append(0.0)
for sample in data:
if len(sample) > maxlen:
temp = sample[:maxlen]
elif len(sample) < maxlen:
temp = sample
additional_elems = maxlen - len(sample)
for _ in range(additional_elems):
temp.append(zero_vector)
else:
temp = sample
new_data.append(temp)
return new_data
import numpy as np
dataset = pre_process_data('./aclImdb_v1/train')
vectorized_data = tokenize_and_vectorize(dataset)
expected = collect_expected(dataset)
split_point = int(len(vectorized_data)*.8)
x_train = vectorized_data[:split_point]
y_train = expected[:split_point]
x_test = vectorized_data[split_point:]
y_test = expected[split_point:]
maxlen = 400
batch_size = 32
embedding_dims = 300
epochs = 2
x_train = pad_trunc(x_train, maxlen)
x_test = pad_trunc(x_test, maxlen)
x_train = np.reshape(x_train, (len(x_train), maxlen, embedding_dims))
y_train = np.array(y_train)
x_test = np.reshape(x_test, (len(x_test), maxlen, embedding_dims))
y_test = np.array(y_test)
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten, LSTM
num_neurons = 50
print('Build model...')
model = Sequential()
model.add(LSTM(num_neurons, return_sequences=True, input_shape=(maxlen, embedding_dims)))
model.add(Dropout(.2))
model.add(Flatten())
model.add(Dense(1, activation='sigmoid'))
model.compile('rmsprop', 'binary_crossentropy', metrics=['accuracy'])
print(model.summary())
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
validation_data=(x_test, y_test))
model_structure = model.to_json()
with open("lstm_model1.json", "w") as json_file:
json_file.write(model_structure)
model.save_weights("lstm_weights1.h5")
print('Model saved.')
from keras.models import model_from_json
with open("lstm_model1.json", "r") as json_file:
json_string = json_file.read()
model = model_from_json(json_string)
model.load_weights('lstm_weights1.h5')
sample_1 = "I'm hate that the dismal weather that had me down for so long, when will it break! Ugh, when does happiness return? The sun is blinding and the puffy clouds are too thin. I can't wait for the weekend."
vec_list = tokenize_and_vectorize([(1, sample_1)])
# Tokenize returns a list of the data (length 1 here)
test_vec_list = pad_trunc(vec_list, maxlen)
test_vec = np.reshape(test_vec_list, (len(test_vec_list), maxlen, embedding_dims))
print("Sample's sentiment, 1 - pos, 2 - neg : {}".format(model.predict_classes(test_vec)))
print("Raw output of sigmoid function: {}".format(model.predict(test_vec)))
def test_len(data, maxlen):
total_len = truncated = exact = padded = 0
for sample in data:
total_len += len(sample)
if len(sample) > maxlen:
truncated += 1
elif len(sample) < maxlen:
padded += 1
else:
exact +=1
print('Padded: {}'.format(padded))
print('Equal: {}'.format(exact))
print('Truncated: {}'.format(truncated))
print('Avg length: {}'.format(total_len/len(data)))
dataset = pre_process_data('./aclImdb_v1/train')
vectorized_data = tokenize_and_vectorize(dataset)
test_len(vectorized_data, 400)
import numpy as np
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten, LSTM
maxlen = 200
batch_size = 32
embedding_dims = 300
epochs = 2
dataset = pre_process_data('./aclImdb_v1/train')
vectorized_data = tokenize_and_vectorize(dataset)
expected = collect_expected(dataset)
split_point = int(len(vectorized_data)*.8)
x_train = vectorized_data[:split_point]
y_train = expected[:split_point]
x_test = vectorized_data[split_point:]
y_test = expected[split_point:]
x_train = pad_trunc(x_train, maxlen)
x_test = pad_trunc(x_test, maxlen)
x_train = np.reshape(x_train, (len(x_train), maxlen, embedding_dims))
y_train = np.array(y_train)
x_test = np.reshape(x_test, (len(x_test), maxlen, embedding_dims))
y_test = np.array(y_test)
num_neurons = 50
print('Build model...')
model = Sequential()
model.add(LSTM(num_neurons, return_sequences=True, input_shape=(maxlen, embedding_dims)))
model.add(Dropout(.2))
model.add(Flatten())
model.add(Dense(1, activation='sigmoid'))
model.compile('rmsprop', 'binary_crossentropy', metrics=['accuracy'])
print(model.summary())
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
validation_data=(x_test, y_test))
model_structure = model.to_json()
with open("lstm_model7.json", "w") as json_file:
json_file.write(model_structure)
model.save_weights("lstm_weights7.h5")
print('Model saved.')
dataset = pre_process_data('./aclImdb_v1/train')
expected = collect_expected(dataset)
def avg_len(data):
total_len = 0
for sample in data:
total_len += len(sample[1])
print(total_len/len(data))
print(avg_len(dataset))
def clean_data(data):
new_data = []
VALID = 'abcdefghijklmnopqrstuvwxyz123456789"\'?!.,:; '
for sample in data:
new_sample = []
for char in sample[1].lower(): # Just grab the string, not the label
if char in VALID:
new_sample.append(char)
else:
new_sample.append('UNK')
new_data.append(new_sample)
return new_data
listified_data = clean_data(dataset)
# In[ ]:
def char_pad_trunc(data, maxlen):
new_dataset = []
for sample in data:
if len(sample) > maxlen:
new_data = sample[:maxlen]
elif len(sample) < maxlen:
pads = maxlen - len(sample)
new_data = sample + ['PAD'] * pads
else:
new_data = sample
new_dataset.append(new_data)
return new_dataset
maxlen = 1500
# In[ ]:
def create_dicts(data):
chars = set()
for sample in data:
chars.update(set(sample))
char_indices = dict((c, i) for i, c in enumerate(chars))
indices_char = dict((i, c) for i, c in enumerate(chars))
return char_indices, indices_char
# In[ ]:
import numpy as np
def onehot_encode(dataset, char_indices, maxlen):
X = np.zeros((len(dataset), maxlen, len(char_indices.keys())))
for i, sentence in enumerate(dataset):
for t, char in enumerate(sentence):
X[i, t, char_indices[char]] = 1
return X
# In[ ]:
dataset = pre_process_data('./aclImdb_v1/train')
expected = collect_expected(dataset)
listified_data = clean_data(dataset)
maxlen = 1500
common_length_data = char_pad_trunc(listified_data, maxlen)
char_indices, indices_char = create_dicts(common_length_data)
encoded_data = onehot_encode(common_length_data, char_indices, maxlen)
# In[ ]:
split_point = int(len(encoded_data)*.8)
x_train = encoded_data[:split_point]
y_train = expected[:split_point]
x_test = encoded_data[split_point:]
y_test = expected[split_point:]
# In[ ]:
from keras.models import Sequential
from keras.layers import Dense, Dropout, Embedding, Flatten, LSTM
num_neurons = 40
print('Build model...')
model = Sequential()
model.add(LSTM(num_neurons, return_sequences=True, input_shape=(maxlen, len(char_indices.keys()))))
model.add(Dropout(.2))
model.add(Flatten())
model.add(Dense(1, activation='sigmoid'))
model.compile('rmsprop', 'binary_crossentropy', metrics=['accuracy'])
print(model.summary())
# In[ ]:
# In[ ]:
batch_size = 32
epochs = 10
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
validation_data=(x_test, y_test))
model_structure = model.to_json()
with open("char_lstm_model3.json", "w") as json_file:
json_file.write(model_structure)
model.save_weights("char_lstm_weights3.h5")
print('Model saved.')
# In[ ]:
from nltk.corpus import gutenberg
print(gutenberg.fileids())
# In[ ]:
text = ''
for txt in gutenberg.fileids():
if 'shakespeare' in txt:
text += gutenberg.raw(txt).lower()
print('corpus length:', len(text))
chars = sorted(list(set(text)))
print('total chars:', len(chars))
char_indices = dict((c, i) for i, c in enumerate(chars))
indices_char = dict((i, c) for i, c in enumerate(chars))
# In[ ]:
print(text[:500])
# In[ ]:
# cut the text in semi-redundant sequences of maxlen characters
maxlen = 40
step = 3
sentences = []
next_chars = []
for i in range(0, len(text) - maxlen, step):
sentences.append(text[i: i + maxlen])
next_chars.append(text[i + maxlen])
print('nb sequences:', len(sentences))
# In[ ]:
print('Vectorization...')
X = np.zeros((len(sentences), maxlen, len(chars)), dtype=np.bool)
y = np.zeros((len(sentences), len(chars)), dtype=np.bool)
for i, sentence in enumerate(sentences):
for t, char in enumerate(sentence):
X[i, t, char_indices[char]] = 1
y[i, char_indices[next_chars[i]]] = 1
# In[ ]:
from keras.models import Sequential
from keras.layers import Dense, Activation
from keras.layers import LSTM
from keras.optimizers import RMSprop
# build the model: a single LSTM
print('Build model...')
model = Sequential()
model.add(LSTM(128, input_shape=(maxlen, len(chars))))
model.add(Dense(len(chars)))
model.add(Activation('softmax'))
optimizer = RMSprop(lr=0.01)
model.compile(loss='categorical_crossentropy', optimizer=optimizer)
print(model.summary())
# In[ ]:
epochs = 6
batch_size = 128
model_structure = model.to_json()
with open("shakes_lstm_model.json", "w") as json_file:
json_file.write(model_structure)
for i in range(5):
model.fit(X, y,
batch_size=batch_size,
epochs=epochs)
model.save_weights("shakes_lstm_weights_{}.h5".format(i+1))
print('Model saved.')
# In[ ]:
### NOT IN CHAPTER, Just to reproduce output
from keras.models import model_from_json
with open('shakes_lstm_model.json', 'r') as f:
model_json = f.read()
model = model_from_json(model_json)
model.load_weights('shakes_lstm_weights_4.h5')
# In[ ]:
import random
def sample(preds, temperature=1.0):
# helper function to sample an index from a probability array
preds = np.asarray(preds).astype('float64')
preds = np.log(preds) / temperature
exp_preds = np.exp(preds)
preds = exp_preds / np.sum(exp_preds)
probas = np.random.multinomial(1, preds, 1)
return np.argmax(probas)
# In[ ]:
import sys
start_index = random.randint(0, len(text) - maxlen - 1)
for diversity in [0.2, 0.5, 1.0]:
print()
print('----- diversity:', diversity)
generated = ''
sentence = text[start_index: start_index + maxlen]
generated += sentence
print('----- Generating with seed: "' + sentence + '"')
sys.stdout.write(generated)
for i in range(400):
x = np.zeros((1, maxlen, len(chars)))
for t, char in enumerate(sentence):
x[0, t, char_indices[char]] = 1.
preds = model.predict(x, verbose=0)[0]
next_index = sample(preds, diversity)
next_char = indices_char[next_index]
generated += next_char
sentence = sentence[1:] + next_char
sys.stdout.write(next_char)
sys.stdout.flush()
print()
# In[ ]:
from keras.models import Sequential
from keras.layers import GRU
model = Sequential()
model.add(GRU(num_neurons, return_sequences=True, input_shape=X[0].shape))
# In[ ]:
from keras.models import Sequential
from keras.layers import LSTM
model = Sequential()
model.add(LSTM(num_neurons, return_sequences=True, input_shape=X[0].shape))
model.add(LSTM(num_neurons_2, return_sequences=True))
| true
| true
|
790ae4372b41981ec1d96d170bff059bcff2649f
| 3,101
|
py
|
Python
|
TFG/checkEpochAndUpdateJSON.py
|
lmagellanic-cloud/phishers-monitor
|
a60c9376aa539a843fa365be7fc87724c7da43a3
|
[
"Apache-2.0"
] | null | null | null |
TFG/checkEpochAndUpdateJSON.py
|
lmagellanic-cloud/phishers-monitor
|
a60c9376aa539a843fa365be7fc87724c7da43a3
|
[
"Apache-2.0"
] | 12
|
2020-09-07T03:40:30.000Z
|
2022-02-26T17:33:06.000Z
|
TFG/checkEpochAndUpdateJSON.py
|
lmagellanic-cloud/phishers-monitor
|
a60c9376aa539a843fa365be7fc87724c7da43a3
|
[
"Apache-2.0"
] | null | null | null |
import os
import json
import time
import datetime
import manageMonitoredUsersDB
pathToJSON = os.getcwd() + '/generatedJSON'
def get_local_json_timestamp_epoch(username, filename):
monitoredJSON = None
try:
monitoredJSON = json.load(open(pathToJSON + os.sep + filename, "r+"))
except:
with open(os.getcwd() + '/logs/fail_to_get_local_epoch', "a") as fileText:
fileText.write("The JSON fail to read is " + pathToJSON + os.sep + filename + " at " + str(datetime.datetime.now()) + "\n")
fileText.close()
if monitoredJSON == None:
return None
user_info = monitoredJSON["user_info"]
json_timestamp_epoch = user_info["json_timestamp_epoch"]
json_timestamp_epoch = float(json_timestamp_epoch) #Epoch LOCAL
return json_timestamp_epoch
def get_remote_json_timestamp_epoch(username):
user_infoRemote = None
monitoredUserSelected = manageMonitoredUsersDB.get_monitoredUserByName(username)
temp = monitoredUserSelected[2]
temp = temp.replace("'", "\"")
temp = temp.replace("True", "true")
temp = temp.replace("False", "false")
temp = json.loads(temp)
for key in temp.keys():
if key == "user_info":
user_infoRemote = temp[key]
if user_infoRemote != None:
json_timestamp_epochRemote = user_infoRemote["json_timestamp_epoch"]
return float(json_timestamp_epochRemote) #Epoch REMOTO, el guardado en monitoredUser.db
else:
print("\n" + "\033[91m" + "ERROR: No se ha podido obtener user_info en remoto, monitoredUser.db" + "\033[0m" + "\n")
with open(os.getcwd() + '/logs/fail_to_get_remote_epoch', "a") as fileText:
fileText.write("The username fail to read is " + username + " at " + str(datetime.datetime.now()) + "\n")
fileText.close()
def checkArrivedJSON():
for filename in sorted(os.listdir(pathToJSON)):
if filename.endswith(".json"):
username = filename.strip(".json")
#Obtención del epoch del JSON local
json_timestamp_epoch = get_local_json_timestamp_epoch(username, filename)
if json_timestamp_epoch == None:
continue
#Obtención del epoch del JSON remoto, en monitoredUser.db
json_timestamp_epochRemote = get_remote_json_timestamp_epoch(username)
#Comprobación del tiempo transcurrido entre local y remoto
#print("\033[92m" + "json_timestamp_epoch: " + str(json_timestamp_epoch) + "\033[0m" + "\n")
#print("\033[92m" + "json_timestamp_epochRemote: " + str(json_timestamp_epochRemote) + "\033[0m" + "\n")
if json_timestamp_epoch > json_timestamp_epochRemote:
monitoredJSON = json.load(open(pathToJSON + os.sep + filename, "r+"))
monitoredJSON = str(monitoredJSON)
manageMonitoredUsersDB.update_monitoredUserByName(username, monitoredJSON)
#MAIN
veces = 0
while True:
checkArrivedJSON()
time.sleep(1)
if veces >= 10:
print("Checking new user activities...\n")
veces = 0
veces += 1
| 43.676056
| 135
| 0.659787
|
import os
import json
import time
import datetime
import manageMonitoredUsersDB
pathToJSON = os.getcwd() + '/generatedJSON'
def get_local_json_timestamp_epoch(username, filename):
monitoredJSON = None
try:
monitoredJSON = json.load(open(pathToJSON + os.sep + filename, "r+"))
except:
with open(os.getcwd() + '/logs/fail_to_get_local_epoch', "a") as fileText:
fileText.write("The JSON fail to read is " + pathToJSON + os.sep + filename + " at " + str(datetime.datetime.now()) + "\n")
fileText.close()
if monitoredJSON == None:
return None
user_info = monitoredJSON["user_info"]
json_timestamp_epoch = user_info["json_timestamp_epoch"]
json_timestamp_epoch = float(json_timestamp_epoch)
return json_timestamp_epoch
def get_remote_json_timestamp_epoch(username):
user_infoRemote = None
monitoredUserSelected = manageMonitoredUsersDB.get_monitoredUserByName(username)
temp = monitoredUserSelected[2]
temp = temp.replace("'", "\"")
temp = temp.replace("True", "true")
temp = temp.replace("False", "false")
temp = json.loads(temp)
for key in temp.keys():
if key == "user_info":
user_infoRemote = temp[key]
if user_infoRemote != None:
json_timestamp_epochRemote = user_infoRemote["json_timestamp_epoch"]
return float(json_timestamp_epochRemote) #Epoch REMOTO, el guardado en monitoredUser.db
else:
print("\n" + "\033[91m" + "ERROR: No se ha podido obtener user_info en remoto, monitoredUser.db" + "\033[0m" + "\n")
with open(os.getcwd() + '/logs/fail_to_get_remote_epoch', "a") as fileText:
fileText.write("The username fail to read is " + username + " at " + str(datetime.datetime.now()) + "\n")
fileText.close()
def checkArrivedJSON():
for filename in sorted(os.listdir(pathToJSON)):
if filename.endswith(".json"):
username = filename.strip(".json")
#Obtención del epoch del JSON local
json_timestamp_epoch = get_local_json_timestamp_epoch(username, filename)
if json_timestamp_epoch == None:
continue
#Obtención del epoch del JSON remoto, en monitoredUser.db
json_timestamp_epochRemote = get_remote_json_timestamp_epoch(username)
#Comprobación del tiempo transcurrido entre local y remoto
#print("\033[92m" + "json_timestamp_epoch: " + str(json_timestamp_epoch) + "\033[0m" + "\n")
#print("\033[92m" + "json_timestamp_epochRemote: " + str(json_timestamp_epochRemote) + "\033[0m" + "\n")
if json_timestamp_epoch > json_timestamp_epochRemote:
monitoredJSON = json.load(open(pathToJSON + os.sep + filename, "r+"))
monitoredJSON = str(monitoredJSON)
manageMonitoredUsersDB.update_monitoredUserByName(username, monitoredJSON)
#MAIN
veces = 0
while True:
checkArrivedJSON()
time.sleep(1)
if veces >= 10:
print("Checking new user activities...\n")
veces = 0
veces += 1
| true
| true
|
790ae4a1ee277fb088ab97674f3d90ed99520315
| 1,374
|
py
|
Python
|
release/stubs.min/System/Windows/Forms/__init___parts/DataGridViewAutoSizeColumnMode.py
|
tranconbv/ironpython-stubs
|
a601759e6c6819beff8e6b639d18a24b7e351851
|
[
"MIT"
] | null | null | null |
release/stubs.min/System/Windows/Forms/__init___parts/DataGridViewAutoSizeColumnMode.py
|
tranconbv/ironpython-stubs
|
a601759e6c6819beff8e6b639d18a24b7e351851
|
[
"MIT"
] | null | null | null |
release/stubs.min/System/Windows/Forms/__init___parts/DataGridViewAutoSizeColumnMode.py
|
tranconbv/ironpython-stubs
|
a601759e6c6819beff8e6b639d18a24b7e351851
|
[
"MIT"
] | null | null | null |
class DataGridViewAutoSizeColumnMode(Enum,IComparable,IFormattable,IConvertible):
"""
Defines values for specifying how the width of a column is adjusted.
enum DataGridViewAutoSizeColumnMode,values: AllCells (6),AllCellsExceptHeader (4),ColumnHeader (2),DisplayedCells (10),DisplayedCellsExceptHeader (8),Fill (16),None (1),NotSet (0)
"""
def Instance(self):
""" This function has been arbitrarily put into the stubs"""
return DataGridViewAutoSizeColumnMode()
def __eq__(self,*args):
""" x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """
pass
def __format__(self,*args):
""" __format__(formattable: IFormattable,format: str) -> str """
pass
def __ge__(self,*args):
pass
def __gt__(self,*args):
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __le__(self,*args):
pass
def __lt__(self,*args):
pass
def __ne__(self,*args):
pass
def __reduce_ex__(self,*args):
pass
def __str__(self,*args):
pass
AllCells=None
AllCellsExceptHeader=None
ColumnHeader=None
DisplayedCells=None
DisplayedCellsExceptHeader=None
Fill=None
None_ =None
NotSet=None
value__=None
| 31.227273
| 215
| 0.703057
|
class DataGridViewAutoSizeColumnMode(Enum,IComparable,IFormattable,IConvertible):
return DataGridViewAutoSizeColumnMode()
def __eq__(self,*args):
""" x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """
pass
""" __format__(formattable: IFormattable,format: str) -> str """
pass
pass
def __gt__(self,*args):
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
pass
def __lt__(self,*args):
pass
def __ne__(self,*args):
pass
def __reduce_ex__(self,*args):
pass
def __str__(self,*args):
pass
AllCells=None
AllCellsExceptHeader=None
ColumnHeader=None
DisplayedCells=None
DisplayedCellsExceptHeader=None
Fill=None
None_ =None
NotSet=None
value__=None
| true
| true
|
790ae50045a6dc57e1c2571136914d090f64c847
| 9,114
|
py
|
Python
|
mentionbot/servermodules/truthgame.py
|
simshadows/Discord-mentionbot
|
fe3b6c3bdd2ff71f3a16666b3facce6ec97279c6
|
[
"MIT"
] | null | null | null |
mentionbot/servermodules/truthgame.py
|
simshadows/Discord-mentionbot
|
fe3b6c3bdd2ff71f3a16666b3facce6ec97279c6
|
[
"MIT"
] | null | null | null |
mentionbot/servermodules/truthgame.py
|
simshadows/Discord-mentionbot
|
fe3b6c3bdd2ff71f3a16666b3facce6ec97279c6
|
[
"MIT"
] | null | null | null |
import asyncio
import random
import re
import textwrap
import discord
from .. import utils, errors, cmd
from ..servermodule import ServerModule, registered
from ..enums import PrivilegeLevel
@registered
class TruthGame(ServerModule):
MODULE_NAME = "Truth Game"
MODULE_SHORT_DESCRIPTION = "Tools to play *Truth*."
RECOMMENDED_CMD_NAMES = ["truth", "troof", "trufe"]
_SECRET_TOKEN = utils.SecretToken()
_cmdd = {}
_HELP_SUMMARY = """
`{modhelp}` - Truth game.
"""
DEFAULT_SETTINGS = {
"enabled channels": []
}
_PARTICIPANT_DELIMITER = " --> "
_RULES_STRING = textwrap.dedent("""
**Rules for a game of _Truth_**:
idk, ask the people playing it.
""").strip()
async def _initialize(self, resources):
self._client = resources.client
self._res = resources
self._enabled_channels = None
self._load_settings()
self._res.suppress_autokill(True)
return
def _load_settings(self):
settings = self._res.get_settings(default=self.DEFAULT_SETTINGS)
self._enabled_channels = []
try:
self._enabled_channels = settings["enabled channels"]
if self._enabled_channels is None:
print("DEBUGGING: truthgame.py TruthGame._load_settings() enabled channels is None!")
self._enabled_channels = []
except KeyError:
self._enabled_channels = settings["enabled channels"] = []
self._res.save_settings(settings)
return
def _save_settings(self):
settings = self._res.get_settings()
settings["enabled channels"] = self._enabled_channels
self._res.save_settings(settings)
return
@cmd.add(_cmdd, "rules")
async def _cmdf_enable(self, substr, msg, privilege_level):
"""`{cmd}` - View game rules."""
await self._client.send_msg(msg, self._RULES_STRING)
return
@cmd.add(_cmdd, "newgame", top=True)
@cmd.minimum_privilege(PrivilegeLevel.TRUSTED)
async def _cmdf_newgame(self, substr, msg, privilege_level):
"""`{cmd}` - New game."""
channel = msg.channel
await self._abort_if_not_truth_channel(channel)
await self._new_game(channel)
await self._client.send_msg(channel, "Truth game cleared.")
return
@cmd.add(_cmdd, "in", top=True)
async def _cmdf_in(self, substr, msg, privilege_level):
"""
`{cmd}` - Adds you to the game.
This command also allows moderators to add other users and arbitrary strings as participants.
**Example:** `{cmd} an elephant` - Adds "an elephant" as a participant.
"""
channel = msg.channel
await self._abort_if_not_truth_channel(channel)
new_participant = None
if (privilege_level < PrivilegeLevel.MODERATOR) or (len(substr) == 0):
new_participant = "<@" + msg.author.id + ">"
else:
new_participant = substr
if self._PARTICIPANT_DELIMITER in new_participant:
await self._client.send_msg(channel, "Error: Not allowed to use the delimiter characters.")
raise errors.OperationAborted
if new_participant in self._get_participants(channel):
await self._client.send_msg(channel, "Error: {} is already a participant.".format(new_participant))
else:
await self._add_participant(channel, new_participant)
await self._client.send_msg(channel, "Added {} to the game.".format(new_participant))
return
@cmd.add(_cmdd, "out", top=True)
async def _cmdf_out(self, substr, msg, privilege_level):
"""
`{cmd}` - Removes you from the game.
This command also allows moderators to remove other users and arbitrary strings.
**Example:** `{cmd} an elephant` - Removes "an elephant" as a participant.
"""
channel = msg.channel
await self._abort_if_not_truth_channel(channel)
participant = None
if (privilege_level < PrivilegeLevel.MODERATOR) or (len(substr) == 0):
participant = "<@" + msg.author.id + ">"
else:
participant = substr
if participant in self._get_participants(channel):
await self._remove_participant(channel, participant)
await self._client.send_msg(channel, "Removed {} from the game.".format(participant))
else:
await self._client.send_msg(channel, "Error: {} is not already a participant.".format(participant))
return
@cmd.add(_cmdd, "enablechannel")
@cmd.minimum_privilege(PrivilegeLevel.ADMIN)
async def _cmdf_enable(self, substr, msg, privilege_level):
"""`{cmd}` - Enable Truth in this channel."""
channel = msg.channel
if channel.id in self._enabled_channels:
await self._client.send_msg(channel, "This channel is already a Truth game channel.")
else:
self._enabled_channels.append(channel.id)
self._save_settings()
await self._client.send_msg(channel, "This channel is now a Truth game channel.")
return
@cmd.add(_cmdd, "disablechannel")
@cmd.minimum_privilege(PrivilegeLevel.ADMIN)
async def _cmdf_disable(self, substr, msg, privilege_level):
"""`{cmd}` - Disable Truth in this channel."""
channel = msg.channel
if channel.id in self._enabled_channels:
self._enabled_channels.remove(channel.id)
self._save_settings()
await self._client.send_msg(channel, "This channel is no longer a Truth game channel.")
else:
await self._client.send_msg(channel, "This channel is not a Truth game channel.")
return
@cmd.add(_cmdd, "viewenabled")
async def _cmdf_viewenabled(self, substr, msg, privilege_level):
"""`{cmd}` - View all channels that are enabled as Truth channels."""
buf = None
if len(self._enabled_channels) == 0:
buf = "No channels have Truth game enabled."
else:
buf = "**Truth game enabled channels:**"
for channel_id in self._enabled_channels:
buf += "\n<#{0}> (ID: {0})".format(channel_id)
await self._client.send_msg(msg, buf)
return
# TODO: Edit this to use the topic string abstraction methods.
# Currently, it only consideres user mentions to be participants!
@cmd.add(_cmdd, "choose", "random", "rand")
async def _cmdf_choosetruth(self, substr, msg, privilege_level):
"""`{cmd}` - Pick a random participant other than yourself."""
topic = msg.channel.topic
if topic is None:
await self._client.send_msg(msg, "There doesn't appear to be a truth game in here.")
raise errors.OperationAborted
mentions = utils.get_all_mentions(topic)
if len(mentions) == 0:
await self._client.send_msg(msg, "There doesn't appear to be a truth game in here.")
raise errors.OperationAborted
try:
mentions.remove(msg.author.id)
if len(mentions) == 0:
await self._client.send_msg(msg, "<@{}>".format(msg.author.id))
raise errors.OperationAborted
except ValueError:
pass
choice = random.choice(mentions)
buf = "<@{}>\n".format(choice)
buf += "My choices were: "
for mention in mentions:
user = self._client.search_for_user(mention, enablenamesearch=False, serverrestriction=self._res.server)
if user is None:
buf += "<@{}>, ".format(mention)
else:
buf += "{}, ".format(user.name)
buf = buf[:-2]
await self._client.send_msg(msg, buf)
return
################################
### TOPIC STRING ABSTRACTION ###
################################
def _get_participants(self, channel):
topic = channel.topic
if topic is None:
return []
return topic.split(self._PARTICIPANT_DELIMITER)
# PRECONDITION: participant_str contains printable characters.
# PRECONDITION: participant_str does not contain the delimiter.
async def _add_participant(self, channel, participant_str):
topic = channel.topic
new_topic = None
if topic == "":
new_topic = participant_str
else:
new_topic = topic + self._PARTICIPANT_DELIMITER + participant_str
await self._client.edit_channel(channel, topic=new_topic)
return
# PRECONDITION: participant_str in self._get_participants(channel)
async def _remove_participant(self, channel, participant_str):
participants_list = self._get_participants(channel)
participants_list.remove(participant_str)
new_topic = self._PARTICIPANT_DELIMITER.join(participants_list)
await self._client.edit_channel(channel, topic=new_topic)
return
async def _new_game(self, channel):
await self._client.edit_channel(channel, topic="")
return
########################
### GENERAL SERVICES ###
########################
async def _abort_if_not_truth_channel(self, channel):
if not channel.id in self._enabled_channels:
await self._client.send_msg(channel, "Error: Truth isn't enabled on this channel.")
raise errors.OperationAborted
return
| 36.166667
| 113
| 0.650099
|
import asyncio
import random
import re
import textwrap
import discord
from .. import utils, errors, cmd
from ..servermodule import ServerModule, registered
from ..enums import PrivilegeLevel
@registered
class TruthGame(ServerModule):
MODULE_NAME = "Truth Game"
MODULE_SHORT_DESCRIPTION = "Tools to play *Truth*."
RECOMMENDED_CMD_NAMES = ["truth", "troof", "trufe"]
_SECRET_TOKEN = utils.SecretToken()
_cmdd = {}
_HELP_SUMMARY = """
`{modhelp}` - Truth game.
"""
DEFAULT_SETTINGS = {
"enabled channels": []
}
_PARTICIPANT_DELIMITER = " --> "
_RULES_STRING = textwrap.dedent("""
**Rules for a game of _Truth_**:
idk, ask the people playing it.
""").strip()
async def _initialize(self, resources):
self._client = resources.client
self._res = resources
self._enabled_channels = None
self._load_settings()
self._res.suppress_autokill(True)
return
def _load_settings(self):
settings = self._res.get_settings(default=self.DEFAULT_SETTINGS)
self._enabled_channels = []
try:
self._enabled_channels = settings["enabled channels"]
if self._enabled_channels is None:
print("DEBUGGING: truthgame.py TruthGame._load_settings() enabled channels is None!")
self._enabled_channels = []
except KeyError:
self._enabled_channels = settings["enabled channels"] = []
self._res.save_settings(settings)
return
def _save_settings(self):
settings = self._res.get_settings()
settings["enabled channels"] = self._enabled_channels
self._res.save_settings(settings)
return
@cmd.add(_cmdd, "rules")
async def _cmdf_enable(self, substr, msg, privilege_level):
await self._client.send_msg(msg, self._RULES_STRING)
return
@cmd.add(_cmdd, "newgame", top=True)
@cmd.minimum_privilege(PrivilegeLevel.TRUSTED)
async def _cmdf_newgame(self, substr, msg, privilege_level):
channel = msg.channel
await self._abort_if_not_truth_channel(channel)
await self._new_game(channel)
await self._client.send_msg(channel, "Truth game cleared.")
return
@cmd.add(_cmdd, "in", top=True)
async def _cmdf_in(self, substr, msg, privilege_level):
channel = msg.channel
await self._abort_if_not_truth_channel(channel)
new_participant = None
if (privilege_level < PrivilegeLevel.MODERATOR) or (len(substr) == 0):
new_participant = "<@" + msg.author.id + ">"
else:
new_participant = substr
if self._PARTICIPANT_DELIMITER in new_participant:
await self._client.send_msg(channel, "Error: Not allowed to use the delimiter characters.")
raise errors.OperationAborted
if new_participant in self._get_participants(channel):
await self._client.send_msg(channel, "Error: {} is already a participant.".format(new_participant))
else:
await self._add_participant(channel, new_participant)
await self._client.send_msg(channel, "Added {} to the game.".format(new_participant))
return
@cmd.add(_cmdd, "out", top=True)
async def _cmdf_out(self, substr, msg, privilege_level):
channel = msg.channel
await self._abort_if_not_truth_channel(channel)
participant = None
if (privilege_level < PrivilegeLevel.MODERATOR) or (len(substr) == 0):
participant = "<@" + msg.author.id + ">"
else:
participant = substr
if participant in self._get_participants(channel):
await self._remove_participant(channel, participant)
await self._client.send_msg(channel, "Removed {} from the game.".format(participant))
else:
await self._client.send_msg(channel, "Error: {} is not already a participant.".format(participant))
return
@cmd.add(_cmdd, "enablechannel")
@cmd.minimum_privilege(PrivilegeLevel.ADMIN)
async def _cmdf_enable(self, substr, msg, privilege_level):
channel = msg.channel
if channel.id in self._enabled_channels:
await self._client.send_msg(channel, "This channel is already a Truth game channel.")
else:
self._enabled_channels.append(channel.id)
self._save_settings()
await self._client.send_msg(channel, "This channel is now a Truth game channel.")
return
@cmd.add(_cmdd, "disablechannel")
@cmd.minimum_privilege(PrivilegeLevel.ADMIN)
async def _cmdf_disable(self, substr, msg, privilege_level):
channel = msg.channel
if channel.id in self._enabled_channels:
self._enabled_channels.remove(channel.id)
self._save_settings()
await self._client.send_msg(channel, "This channel is no longer a Truth game channel.")
else:
await self._client.send_msg(channel, "This channel is not a Truth game channel.")
return
@cmd.add(_cmdd, "viewenabled")
async def _cmdf_viewenabled(self, substr, msg, privilege_level):
buf = None
if len(self._enabled_channels) == 0:
buf = "No channels have Truth game enabled."
else:
buf = "**Truth game enabled channels:**"
for channel_id in self._enabled_channels:
buf += "\n<#{0}> (ID: {0})".format(channel_id)
await self._client.send_msg(msg, buf)
return
@cmd.add(_cmdd, "choose", "random", "rand")
async def _cmdf_choosetruth(self, substr, msg, privilege_level):
topic = msg.channel.topic
if topic is None:
await self._client.send_msg(msg, "There doesn't appear to be a truth game in here.")
raise errors.OperationAborted
mentions = utils.get_all_mentions(topic)
if len(mentions) == 0:
await self._client.send_msg(msg, "There doesn't appear to be a truth game in here.")
raise errors.OperationAborted
try:
mentions.remove(msg.author.id)
if len(mentions) == 0:
await self._client.send_msg(msg, "<@{}>".format(msg.author.id))
raise errors.OperationAborted
except ValueError:
pass
choice = random.choice(mentions)
buf = "<@{}>\n".format(choice)
buf += "My choices were: "
for mention in mentions:
user = self._client.search_for_user(mention, enablenamesearch=False, serverrestriction=self._res.server)
if user is None:
buf += "<@{}>, ".format(mention)
else:
buf += "{}, ".format(user.name)
buf = buf[:-2]
await self._client.send_msg(msg, buf)
return
| true
| true
|
790ae62026b2c367eb828924be3b4fff54833bc9
| 310
|
py
|
Python
|
main.py
|
maroemon58/twitter-bot
|
cf16eae5d08874b46c5215e1b43f9d15990ae995
|
[
"MIT"
] | 2
|
2020-03-31T13:01:52.000Z
|
2020-09-08T14:03:39.000Z
|
main.py
|
maroemon58/twitter-bot
|
cf16eae5d08874b46c5215e1b43f9d15990ae995
|
[
"MIT"
] | null | null | null |
main.py
|
maroemon58/twitter-bot
|
cf16eae5d08874b46c5215e1b43f9d15990ae995
|
[
"MIT"
] | 1
|
2020-06-10T03:27:12.000Z
|
2020-06-10T03:27:12.000Z
|
import config
from twitter import *
def main():
t = Twitter(
auth=OAuth(config.TW_TOKEN, config.TW_TOKEN_SECRET, config.TW_CONSUMER_KEY, config.TW_CONSUMER_SECRET))
# Post a message
msg = 'テスト投稿ですm(_ _)m'
t.statuses.update(status=msg)
if __name__ == '__main__':
main()
| 19.375
| 111
| 0.66129
|
import config
from twitter import *
def main():
t = Twitter(
auth=OAuth(config.TW_TOKEN, config.TW_TOKEN_SECRET, config.TW_CONSUMER_KEY, config.TW_CONSUMER_SECRET))
msg = 'テスト投稿ですm(_ _)m'
t.statuses.update(status=msg)
if __name__ == '__main__':
main()
| true
| true
|
790ae6782d93d7c2fa0528fe37b2cb4019262d51
| 1,452
|
py
|
Python
|
src/ClusterTrellis/utils.py
|
lbg251/ClusterTrellis
|
06e9c8cd3f04f606e185b95f4d68703a34cc86ef
|
[
"MIT"
] | 6
|
2021-01-15T19:34:22.000Z
|
2022-02-26T16:28:23.000Z
|
src/ClusterTrellis/utils.py
|
lbg251/ClusterTrellis
|
06e9c8cd3f04f606e185b95f4d68703a34cc86ef
|
[
"MIT"
] | 2
|
2022-02-12T17:49:45.000Z
|
2022-02-12T20:01:07.000Z
|
src/ClusterTrellis/utils.py
|
lbg251/ClusterTrellis
|
06e9c8cd3f04f606e185b95f4d68703a34cc86ef
|
[
"MIT"
] | 2
|
2021-01-21T18:23:53.000Z
|
2021-06-10T15:43:17.000Z
|
import os
import pickle
import string
import time
import logging
import numpy as np
def get_logger(name=__file__, level=logging.INFO):
logger = logging.getLogger(name)
if getattr(logger, "_init_done__", None):
logger.setLevel(level)
return logger
logger._init_done__ = True
logger.propagate = False
logger.setLevel(level)
formatter = logging.Formatter("%(asctime)s:%(levelname)s::%(message)s")
handler = logging.StreamHandler()
handler.setFormatter(formatter)
handler.setLevel(0)
del logger.handlers[:]
logger.addHandler(handler)
return logger
## Utils
def load_jets():
root_dir = "data/"
filename = os.path.join(root_dir, "TruthBS_10")
with open(filename + ".pkl", "rb") as fd:
Truth10, BS10 = pickle.load(fd, encoding='latin-1')
return Truth10, BS10
def sumLogLH(jetList):
for jet in jetList:
jet["totLogLH"] = np.sum(jet["logLH"])
def getConstituents(jet, node_id, outers_list):
"""
Recursive function to get a list of the tree leaves
"""
if jet["tree"][node_id, 0] == -1:
outers_list.append(jet["content"][node_id])
else:
getConstituents(
jet,
jet["tree"][node_id, 0],
outers_list,)
getConstituents(
jet,
jet["tree"][node_id, 1],
outers_list,)
return outers_list
def get_leaves(jet):
return getConstituents(jet, jet["root_id"], [])
| 22
| 75
| 0.63843
|
import os
import pickle
import string
import time
import logging
import numpy as np
def get_logger(name=__file__, level=logging.INFO):
logger = logging.getLogger(name)
if getattr(logger, "_init_done__", None):
logger.setLevel(level)
return logger
logger._init_done__ = True
logger.propagate = False
logger.setLevel(level)
formatter = logging.Formatter("%(asctime)s:%(levelname)s::%(message)s")
handler = logging.StreamHandler()
handler.setFormatter(formatter)
handler.setLevel(0)
del logger.handlers[:]
logger.addHandler(handler)
return logger
ad_jets():
root_dir = "data/"
filename = os.path.join(root_dir, "TruthBS_10")
with open(filename + ".pkl", "rb") as fd:
Truth10, BS10 = pickle.load(fd, encoding='latin-1')
return Truth10, BS10
def sumLogLH(jetList):
for jet in jetList:
jet["totLogLH"] = np.sum(jet["logLH"])
def getConstituents(jet, node_id, outers_list):
if jet["tree"][node_id, 0] == -1:
outers_list.append(jet["content"][node_id])
else:
getConstituents(
jet,
jet["tree"][node_id, 0],
outers_list,)
getConstituents(
jet,
jet["tree"][node_id, 1],
outers_list,)
return outers_list
def get_leaves(jet):
return getConstituents(jet, jet["root_id"], [])
| true
| true
|
790ae70ce14134a9e8e35f625ba395c6200c440c
| 1,102
|
py
|
Python
|
ttv_api/channel.py
|
Atheridis/aptbot
|
ee4372b6f0878fe3678e6a67d44494cc92437856
|
[
"MIT"
] | null | null | null |
ttv_api/channel.py
|
Atheridis/aptbot
|
ee4372b6f0878fe3678e6a67d44494cc92437856
|
[
"MIT"
] | null | null | null |
ttv_api/channel.py
|
Atheridis/aptbot
|
ee4372b6f0878fe3678e6a67d44494cc92437856
|
[
"MIT"
] | null | null | null |
from ttv_api import *
@dataclass
class Channel:
broadcaster_id: str
broadcaster_login: str
broadcaster_name: str
game_name: str
game_id: str
broadcaster_language: str
title: str
delay: int
def get_channels(*channel_ids: str) -> Optional[list[Channel]]:
params = "?"
for channel_id in channel_ids:
params += f"broadcaster_id={channel_id}&"
http = urllib3.PoolManager()
r = http.request(
"GET",
URL.channels.value + params,
headers=HEADER,
)
if r.status != 200:
return None
data = json.loads(r.data.decode("utf-8"))["data"]
channels: list[Channel] = []
for channel in data:
channels.append(
Channel(
channel["broadcaster_id"],
channel["broadcaster_login"],
channel["broadcaster_name"],
channel["game_name"],
channel["game_id"],
channel["broadcaster_language"],
channel["title"],
channel["delay"],
)
)
return channels
| 23.446809
| 63
| 0.554446
|
from ttv_api import *
@dataclass
class Channel:
broadcaster_id: str
broadcaster_login: str
broadcaster_name: str
game_name: str
game_id: str
broadcaster_language: str
title: str
delay: int
def get_channels(*channel_ids: str) -> Optional[list[Channel]]:
params = "?"
for channel_id in channel_ids:
params += f"broadcaster_id={channel_id}&"
http = urllib3.PoolManager()
r = http.request(
"GET",
URL.channels.value + params,
headers=HEADER,
)
if r.status != 200:
return None
data = json.loads(r.data.decode("utf-8"))["data"]
channels: list[Channel] = []
for channel in data:
channels.append(
Channel(
channel["broadcaster_id"],
channel["broadcaster_login"],
channel["broadcaster_name"],
channel["game_name"],
channel["game_id"],
channel["broadcaster_language"],
channel["title"],
channel["delay"],
)
)
return channels
| true
| true
|
790ae73bbd1969a7e7aab60abcc81d372d5c8c5f
| 8,067
|
py
|
Python
|
dicomanonymizer/batch_anonymizer.py
|
ademyanchuk/dicom-anonymizer
|
6b9bff0564233819dee74dc12aad27cb6228c46e
|
[
"BSD-3-Clause"
] | 1
|
2021-08-03T11:37:29.000Z
|
2021-08-03T11:37:29.000Z
|
dicomanonymizer/batch_anonymizer.py
|
ademyanchuk/dicom-anonymizer
|
6b9bff0564233819dee74dc12aad27cb6228c46e
|
[
"BSD-3-Clause"
] | null | null | null |
dicomanonymizer/batch_anonymizer.py
|
ademyanchuk/dicom-anonymizer
|
6b9bff0564233819dee74dc12aad27cb6228c46e
|
[
"BSD-3-Clause"
] | null | null | null |
""" This module is intended to extend functionality of the code provided by original authors.
The process is as follows:
1. User has to provide source root path containing (possibly nested) folders with dicom files
2. The program will recreate the structure in the destination root path and anonymize all
dicom files.
"""
import argparse
import json
import logging
import logging.config
import random
from pathlib import Path
from typing import Optional
import pydicom
from dicomanonymizer.anonym_state import AnonState
from dicomanonymizer.dicom_utils import fix_exposure
from dicomanonymizer.simpledicomanonymizer import (
anonymize_dicom_file,
initialize_actions,
)
from dicomanonymizer.utils import (
LOGS_PATH,
PROJ_ROOT,
ActionsDict,
Path_Str,
get_dirs,
to_Path,
try_valid_dir,
)
# setup logging (create dirs, if it is first time)
LOGS_PATH.mkdir(parents=True, exist_ok=True)
logging.config.fileConfig(
PROJ_ROOT / "dicomanonymizer/config/logging.ini",
defaults={"logfilename": (LOGS_PATH / "file.log").as_posix()},
disable_existing_loggers=False,
)
logger = logging.getLogger(__name__)
_STATE_PATH = Path.home() / ".dicomanonymizer/cache"
_STATE_PATH.mkdir(parents=True, exist_ok=True)
def get_extra_rules(
use_extra: bool,
extra_json_path: Path_Str,
) -> Optional[ActionsDict]:
"""Helper to provide custom (project level/user level) anonymization
rules as a mapping of tags -> action function.
Args:
use_extra (bool): If use extra rules.
extra_json_path (Path_Str): Path to extra rules json file.
It should be flat json with action as a key and list of tags as value.
Returns:
Optional[ActionsDict]: extra rules mapping (tags -> action function)
"""
# Define the actions dict for additional tags (customization)
extra_rules = None
if use_extra:
# default or user provided path to extra rules json file
with open(extra_json_path, "r") as fout:
extra_rules = json.load(fout)
for key in extra_rules:
tag_list = extra_rules[key]
tag_list = [tuple(elem) for elem in tag_list]
extra_rules[key] = tag_list
extra_rules = initialize_actions(extra_rules)
return extra_rules
def anonymize_dicom_folder(
in_path: Path_Str, out_path: Path_Str, debug: bool = False, **kwargs
):
"""Anonymize dicom files in `in_path`, if `in_path` doesn't
contain dicom files, will do nothing. Debug == True will do
sort of dry run to check if all good for the large data storages
Args:
in_path (Path_Str): path to the folder containing dicom files
out_path (Path_Str): path to the folder there anonymized copies
will be saved
debuf (bool): if true, will do a "dry" run
"""
# check and prepare
in_path = to_Path(in_path)
try_valid_dir(in_path)
out_path = to_Path(out_path)
out_path.mkdir(parents=True, exist_ok=True)
logger.info(f"Processing: {in_path}")
# work itself
in_files = [p for p in in_path.iterdir() if p.is_file()]
if not in_files:
logger.info(f"Folder {in_path} doesn't have dicom files, skip.")
return
if debug:
# anonymize just one file
f_in = random.choice(in_files)
f_out = out_path / f_in.name
try:
anonymize_dicom_file(f_in, f_out)
except Exception as e:
logger.info(f_in)
logger.exception(e)
raise e
else:
for f_in in in_files:
f_out = out_path / f_in.name
try:
anonymize_dicom_file(f_in, f_out, **kwargs)
except Exception as e:
logger.info(f_in)
logger.exception(e)
raise e
def anonymize_root_folder(
in_root: Path_Str,
out_root: Path_Str,
**kwargs,
):
"""The fuction will get all nested folders from `in_root`
and perform anonymization of all folders containg dicom-files
Will recreate the `in_root` folders structure in the `out_root`
Args:
in_root (Path_Str): source root folder (presumably has
some dicom-files inide, maybe nested)
out_root (Path_Str): destination root folder, will create
if not exists
"""
in_root = to_Path(in_root)
try_valid_dir(in_root)
out_root = to_Path(out_root)
out_root.mkdir(parents=True, exist_ok=True)
in_dirs = get_dirs(in_root)
state = AnonState(_STATE_PATH)
state.init_state()
state.load_state()
def get_tags_callback(dataset: pydicom.Dataset):
state.tag_counter.update(dataset.dir())
logger.info(
"Processed paths will be added to the cache, if cache exist and has some paths included, they will be skipped"
)
logger.info(
f"if, you need to process data again delete files {_STATE_PATH}, please"
)
# will try to process all folders, if exception will dump state before raising
try:
for in_d in in_dirs:
rel_path = in_d.relative_to(in_root)
if str(rel_path) in state.visited_folders:
logger.info(f"{in_d} path is in cache, skipping")
continue
else:
out_d = out_root / rel_path
anonymize_dicom_folder(
in_d, out_d, ds_callback=get_tags_callback, **kwargs
)
# update state
state.visited_folders[str(rel_path)] = True
except Exception as e:
raise e
finally:
# before saving updated state let's flag tags not seen previously
prev_state = AnonState(_STATE_PATH)
prev_state.init_state()
prev_state.load_state()
new_tags = set(state.tag_counter.keys()).difference(
prev_state.tag_counter.keys()
)
if new_tags:
logger.warning(
f"During the anonymization new tags: {new_tags} were present"
)
else:
logger.info("No new tags werer present")
# now we can save the current state
state.save_state()
# Add CLI args
parser = argparse.ArgumentParser(description="Batch dicom-anonymization CLI")
parser.add_argument(
"--type",
type=str,
choices=["batch", "folder"],
default="batch",
help="Process only one folder - folder or all nested folders - batch, default = batch",
)
parser.add_argument(
"--extra-rules",
default="",
help="Path to json file defining extra rules for additional tags. Defalult in project.",
)
parser.add_argument(
"--no-extra",
action="store_true",
help="Only use a rules from DICOM-standard basic de-id profile",
)
parser.add_argument(
"--debug", action="store_true", help="Will do a dry run (one file per folder)"
)
parser.add_argument(
"src",
type=str,
help="Absolute path to the folder containing dicom-files or nested folders with dicom-files",
)
parser.add_argument(
"dst",
type=str,
help="Absolute path to the folder where to save anonymized copy of src",
)
def main():
# parse args
args = parser.parse_args()
in_path = Path(args.src)
out_path = Path(args.dst)
debug = args.debug
path = args.extra_rules
if not path:
path = PROJ_ROOT / "dicomanonymizer/resources/extra_rules.json"
extra_rules = get_extra_rules(use_extra=not args.no_extra, extra_json_path=path)
# fix known issue with dicom
fix_exposure()
msg = f"""
Start a job: {args.type}, debug set to {args.debug}
Will anonymize data at: {in_path} and save to {out_path}
"""
logger.info(msg)
# anonymize
if args.type == "batch":
anonymize_root_folder(
in_path, out_path, debug=debug, extra_anonymization_rules=extra_rules
)
elif args.type == "folder":
anonymize_dicom_folder(
in_path, out_path, debug=debug, extra_anonymization_rules=extra_rules
)
logger.info("Well done!")
if __name__ == "__main__":
main()
| 31.026923
| 118
| 0.655882
|
import argparse
import json
import logging
import logging.config
import random
from pathlib import Path
from typing import Optional
import pydicom
from dicomanonymizer.anonym_state import AnonState
from dicomanonymizer.dicom_utils import fix_exposure
from dicomanonymizer.simpledicomanonymizer import (
anonymize_dicom_file,
initialize_actions,
)
from dicomanonymizer.utils import (
LOGS_PATH,
PROJ_ROOT,
ActionsDict,
Path_Str,
get_dirs,
to_Path,
try_valid_dir,
)
LOGS_PATH.mkdir(parents=True, exist_ok=True)
logging.config.fileConfig(
PROJ_ROOT / "dicomanonymizer/config/logging.ini",
defaults={"logfilename": (LOGS_PATH / "file.log").as_posix()},
disable_existing_loggers=False,
)
logger = logging.getLogger(__name__)
_STATE_PATH = Path.home() / ".dicomanonymizer/cache"
_STATE_PATH.mkdir(parents=True, exist_ok=True)
def get_extra_rules(
use_extra: bool,
extra_json_path: Path_Str,
) -> Optional[ActionsDict]:
extra_rules = None
if use_extra:
with open(extra_json_path, "r") as fout:
extra_rules = json.load(fout)
for key in extra_rules:
tag_list = extra_rules[key]
tag_list = [tuple(elem) for elem in tag_list]
extra_rules[key] = tag_list
extra_rules = initialize_actions(extra_rules)
return extra_rules
def anonymize_dicom_folder(
in_path: Path_Str, out_path: Path_Str, debug: bool = False, **kwargs
):
in_path = to_Path(in_path)
try_valid_dir(in_path)
out_path = to_Path(out_path)
out_path.mkdir(parents=True, exist_ok=True)
logger.info(f"Processing: {in_path}")
in_files = [p for p in in_path.iterdir() if p.is_file()]
if not in_files:
logger.info(f"Folder {in_path} doesn't have dicom files, skip.")
return
if debug:
# anonymize just one file
f_in = random.choice(in_files)
f_out = out_path / f_in.name
try:
anonymize_dicom_file(f_in, f_out)
except Exception as e:
logger.info(f_in)
logger.exception(e)
raise e
else:
for f_in in in_files:
f_out = out_path / f_in.name
try:
anonymize_dicom_file(f_in, f_out, **kwargs)
except Exception as e:
logger.info(f_in)
logger.exception(e)
raise e
def anonymize_root_folder(
in_root: Path_Str,
out_root: Path_Str,
**kwargs,
):
in_root = to_Path(in_root)
try_valid_dir(in_root)
out_root = to_Path(out_root)
out_root.mkdir(parents=True, exist_ok=True)
in_dirs = get_dirs(in_root)
state = AnonState(_STATE_PATH)
state.init_state()
state.load_state()
def get_tags_callback(dataset: pydicom.Dataset):
state.tag_counter.update(dataset.dir())
logger.info(
"Processed paths will be added to the cache, if cache exist and has some paths included, they will be skipped"
)
logger.info(
f"if, you need to process data again delete files {_STATE_PATH}, please"
)
# will try to process all folders, if exception will dump state before raising
try:
for in_d in in_dirs:
rel_path = in_d.relative_to(in_root)
if str(rel_path) in state.visited_folders:
logger.info(f"{in_d} path is in cache, skipping")
continue
else:
out_d = out_root / rel_path
anonymize_dicom_folder(
in_d, out_d, ds_callback=get_tags_callback, **kwargs
)
# update state
state.visited_folders[str(rel_path)] = True
except Exception as e:
raise e
finally:
# before saving updated state let's flag tags not seen previously
prev_state = AnonState(_STATE_PATH)
prev_state.init_state()
prev_state.load_state()
new_tags = set(state.tag_counter.keys()).difference(
prev_state.tag_counter.keys()
)
if new_tags:
logger.warning(
f"During the anonymization new tags: {new_tags} were present"
)
else:
logger.info("No new tags werer present")
state.save_state()
parser = argparse.ArgumentParser(description="Batch dicom-anonymization CLI")
parser.add_argument(
"--type",
type=str,
choices=["batch", "folder"],
default="batch",
help="Process only one folder - folder or all nested folders - batch, default = batch",
)
parser.add_argument(
"--extra-rules",
default="",
help="Path to json file defining extra rules for additional tags. Defalult in project.",
)
parser.add_argument(
"--no-extra",
action="store_true",
help="Only use a rules from DICOM-standard basic de-id profile",
)
parser.add_argument(
"--debug", action="store_true", help="Will do a dry run (one file per folder)"
)
parser.add_argument(
"src",
type=str,
help="Absolute path to the folder containing dicom-files or nested folders with dicom-files",
)
parser.add_argument(
"dst",
type=str,
help="Absolute path to the folder where to save anonymized copy of src",
)
def main():
args = parser.parse_args()
in_path = Path(args.src)
out_path = Path(args.dst)
debug = args.debug
path = args.extra_rules
if not path:
path = PROJ_ROOT / "dicomanonymizer/resources/extra_rules.json"
extra_rules = get_extra_rules(use_extra=not args.no_extra, extra_json_path=path)
fix_exposure()
msg = f"""
Start a job: {args.type}, debug set to {args.debug}
Will anonymize data at: {in_path} and save to {out_path}
"""
logger.info(msg)
if args.type == "batch":
anonymize_root_folder(
in_path, out_path, debug=debug, extra_anonymization_rules=extra_rules
)
elif args.type == "folder":
anonymize_dicom_folder(
in_path, out_path, debug=debug, extra_anonymization_rules=extra_rules
)
logger.info("Well done!")
if __name__ == "__main__":
main()
| true
| true
|
790ae747593fe6ec9ef221e8ec100e0e758c352c
| 2,599
|
py
|
Python
|
examples/xgboost_simple.py
|
matsuken92/optuna
|
79ce70c24c2150ffc8a4fd243e664976c6e1acef
|
[
"MIT"
] | 1
|
2019-09-28T10:31:01.000Z
|
2019-09-28T10:31:01.000Z
|
examples/xgboost_simple.py
|
matsuken92/optuna
|
79ce70c24c2150ffc8a4fd243e664976c6e1acef
|
[
"MIT"
] | null | null | null |
examples/xgboost_simple.py
|
matsuken92/optuna
|
79ce70c24c2150ffc8a4fd243e664976c6e1acef
|
[
"MIT"
] | null | null | null |
"""
Optuna example that optimizes a classifier configuration for cancer dataset
using XGBoost.
In this example, we optimize the validation accuracy of cancer detection
using XGBoost. We optimize both the choice of booster model and their hyper
parameters.
We have following two ways to execute this example:
(1) Execute this code directly.
$ python xgboost_simple.py
(2) Execute through CLI.
$ STUDY_NAME=`optuna create-study --storage sqlite:///example.db`
$ optuna study optimize xgboost_simple.py objective --n-trials=100 --study $STUDY_NAME \
--storage sqlite:///example.db
"""
from __future__ import division
import numpy as np
import sklearn.datasets
import sklearn.metrics
from sklearn.model_selection import train_test_split
import xgboost as xgb
import optuna
# FYI: Objective functions can take additional arguments
# (https://optuna.readthedocs.io/en/stable/faq.html#objective-func-additional-args).
def objective(trial):
(data, target) = sklearn.datasets.load_breast_cancer(return_X_y=True)
train_x, test_x, train_y, test_y = train_test_split(data, target, test_size=0.25)
dtrain = xgb.DMatrix(train_x, label=train_y)
dtest = xgb.DMatrix(test_x, label=test_y)
param = {
'silent': 1,
'objective': 'binary:logistic',
'booster': trial.suggest_categorical('booster', ['gbtree', 'gblinear', 'dart']),
'lambda': trial.suggest_loguniform('lambda', 1e-8, 1.0),
'alpha': trial.suggest_loguniform('alpha', 1e-8, 1.0)
}
if param['booster'] == 'gbtree' or param['booster'] == 'dart':
param['max_depth'] = trial.suggest_int('max_depth', 1, 9)
param['eta'] = trial.suggest_loguniform('eta', 1e-8, 1.0)
param['gamma'] = trial.suggest_loguniform('gamma', 1e-8, 1.0)
param['grow_policy'] = trial.suggest_categorical('grow_policy', ['depthwise', 'lossguide'])
if param['booster'] == 'dart':
param['sample_type'] = trial.suggest_categorical('sample_type', ['uniform', 'weighted'])
param['normalize_type'] = trial.suggest_categorical('normalize_type', ['tree', 'forest'])
param['rate_drop'] = trial.suggest_loguniform('rate_drop', 1e-8, 1.0)
param['skip_drop'] = trial.suggest_loguniform('skip_drop', 1e-8, 1.0)
bst = xgb.train(param, dtrain)
preds = bst.predict(dtest)
pred_labels = np.rint(preds)
accuracy = sklearn.metrics.accuracy_score(test_y, pred_labels)
return 1.0 - accuracy
if __name__ == '__main__':
study = optuna.create_study()
study.optimize(objective, n_trials=100)
print(study.best_trial)
| 36.605634
| 99
| 0.69873
|
from __future__ import division
import numpy as np
import sklearn.datasets
import sklearn.metrics
from sklearn.model_selection import train_test_split
import xgboost as xgb
import optuna
target) = sklearn.datasets.load_breast_cancer(return_X_y=True)
train_x, test_x, train_y, test_y = train_test_split(data, target, test_size=0.25)
dtrain = xgb.DMatrix(train_x, label=train_y)
dtest = xgb.DMatrix(test_x, label=test_y)
param = {
'silent': 1,
'objective': 'binary:logistic',
'booster': trial.suggest_categorical('booster', ['gbtree', 'gblinear', 'dart']),
'lambda': trial.suggest_loguniform('lambda', 1e-8, 1.0),
'alpha': trial.suggest_loguniform('alpha', 1e-8, 1.0)
}
if param['booster'] == 'gbtree' or param['booster'] == 'dart':
param['max_depth'] = trial.suggest_int('max_depth', 1, 9)
param['eta'] = trial.suggest_loguniform('eta', 1e-8, 1.0)
param['gamma'] = trial.suggest_loguniform('gamma', 1e-8, 1.0)
param['grow_policy'] = trial.suggest_categorical('grow_policy', ['depthwise', 'lossguide'])
if param['booster'] == 'dart':
param['sample_type'] = trial.suggest_categorical('sample_type', ['uniform', 'weighted'])
param['normalize_type'] = trial.suggest_categorical('normalize_type', ['tree', 'forest'])
param['rate_drop'] = trial.suggest_loguniform('rate_drop', 1e-8, 1.0)
param['skip_drop'] = trial.suggest_loguniform('skip_drop', 1e-8, 1.0)
bst = xgb.train(param, dtrain)
preds = bst.predict(dtest)
pred_labels = np.rint(preds)
accuracy = sklearn.metrics.accuracy_score(test_y, pred_labels)
return 1.0 - accuracy
if __name__ == '__main__':
study = optuna.create_study()
study.optimize(objective, n_trials=100)
print(study.best_trial)
| true
| true
|
790ae7c9f0357d9001dd157611c82be6f342fe97
| 1,213
|
py
|
Python
|
mail_app/mail_processors/password_processor.py
|
teamsaucisse/Data-Mailing
|
1815582008e27a3927a030deddc732a6a02add80
|
[
"Apache-2.0"
] | null | null | null |
mail_app/mail_processors/password_processor.py
|
teamsaucisse/Data-Mailing
|
1815582008e27a3927a030deddc732a6a02add80
|
[
"Apache-2.0"
] | null | null | null |
mail_app/mail_processors/password_processor.py
|
teamsaucisse/Data-Mailing
|
1815582008e27a3927a030deddc732a6a02add80
|
[
"Apache-2.0"
] | null | null | null |
import re
from mail_app.mail import Mail
from mail_app.mail_processors.abstract_processor import AbstractProcessor
from mail_app.processed_mail import ProcessedMail
class PasswordProcessor(AbstractProcessor):
general_keywords = ["password (reset|request|update|updated)", "(new|reset|change|updated|changed your) password",
"address verification", "(confirm|confirm your|activate your) (registration|account)"]
def __init__(self):
super().__init__()
self.category = "Password"
def process(self, mail):
if self.__general_conditions(mail):
return ProcessedMail(mail.user_id, mail.message_id, mail.from_, self.category, mail.body, mail.time, mail.attachments)
############################################ Conditions ############################################
def __general_conditions(self, mail: Mail):
return (any(re.search(keyword, mail.subject.lower()) for keyword in self.general_keywords) or
any(re.search(keyword, mail.body.lower()) for keyword in self.general_keywords) or
any(re.search(keyword, name.lower()) for name, _ in mail.attachments.items() for keyword in self.general_keywords))
| 48.52
| 130
| 0.666117
|
import re
from mail_app.mail import Mail
from mail_app.mail_processors.abstract_processor import AbstractProcessor
from mail_app.processed_mail import ProcessedMail
class PasswordProcessor(AbstractProcessor):
general_keywords = ["password (reset|request|update|updated)", "(new|reset|change|updated|changed your) password",
"address verification", "(confirm|confirm your|activate your) (registration|account)"]
def __init__(self):
super().__init__()
self.category = "Password"
def process(self, mail):
if self.__general_conditions(mail):
return ProcessedMail(mail.user_id, mail.message_id, mail.from_, self.category, mail.body, mail.time, mail.attachments)
| true
| true
|
790ae91cb20ee13046f79ff0d5426c5f04495bc1
| 1,643
|
py
|
Python
|
comparison/pwgs/convert_inputs.py
|
J-Moravec/pairtree
|
91cbba628b78aea31034efb080976fdb47d83976
|
[
"MIT"
] | 15
|
2021-01-19T21:13:50.000Z
|
2022-02-02T00:01:33.000Z
|
comparison/pwgs/convert_inputs.py
|
J-Moravec/pairtree
|
91cbba628b78aea31034efb080976fdb47d83976
|
[
"MIT"
] | 17
|
2020-11-25T09:41:03.000Z
|
2022-03-28T04:52:14.000Z
|
comparison/pwgs/convert_inputs.py
|
J-Moravec/pairtree
|
91cbba628b78aea31034efb080976fdb47d83976
|
[
"MIT"
] | 6
|
2021-01-01T06:00:31.000Z
|
2021-06-29T15:03:11.000Z
|
import sys
import os
import argparse
import json
import numpy as np
sys.path.append(os.path.join(os.path.dirname(__file__), '..', '..', 'lib'))
import inputparser
import clustermaker
def write_ssms(variants, outfn):
_stringify = lambda A: ','.join([str(V) for V in A])
mu_r = 0.999
cols = ('id', 'gene', 'a', 'd', 'mu_r', 'mu_v')
with open(outfn, 'w') as outf:
print(*cols, sep='\t', file=outf)
for V in variants.values():
assert len(set(V['omega_v'])) == 1
variant = {
'id': 's%s' % int(V['id'][1:]),
'gene': V['name'],
'a': _stringify(V['ref_reads']),
'd': _stringify(V['total_reads']),
'mu_r': mu_r,
'mu_v': np.mean(1 - V['omega_v']),
}
print(*[variant[K] for K in cols], sep='\t', file=outf)
def write_params(sampnames, outfn):
with open(outfn, 'w') as outf:
json.dump({'samples': sampnames}, outf)
def main():
parser = argparse.ArgumentParser(
description='LOL HI THERE',
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument('--use-supervars', dest='use_supervars', action='store_true')
parser.add_argument('ssm_fn')
parser.add_argument('params_fn')
parser.add_argument('pwgs_ssm_fn')
parser.add_argument('pwgs_params_fn')
args = parser.parse_args()
variants = inputparser.load_ssms(args.ssm_fn)
params = inputparser.load_params(args.params_fn)
if args.use_supervars:
variants = clustermaker.make_cluster_supervars(params['clusters'], variants)
write_ssms(variants, args.pwgs_ssm_fn)
write_params(params['samples'], args.pwgs_params_fn)
if __name__ == '__main__':
main()
| 28.824561
| 83
| 0.656726
|
import sys
import os
import argparse
import json
import numpy as np
sys.path.append(os.path.join(os.path.dirname(__file__), '..', '..', 'lib'))
import inputparser
import clustermaker
def write_ssms(variants, outfn):
_stringify = lambda A: ','.join([str(V) for V in A])
mu_r = 0.999
cols = ('id', 'gene', 'a', 'd', 'mu_r', 'mu_v')
with open(outfn, 'w') as outf:
print(*cols, sep='\t', file=outf)
for V in variants.values():
assert len(set(V['omega_v'])) == 1
variant = {
'id': 's%s' % int(V['id'][1:]),
'gene': V['name'],
'a': _stringify(V['ref_reads']),
'd': _stringify(V['total_reads']),
'mu_r': mu_r,
'mu_v': np.mean(1 - V['omega_v']),
}
print(*[variant[K] for K in cols], sep='\t', file=outf)
def write_params(sampnames, outfn):
with open(outfn, 'w') as outf:
json.dump({'samples': sampnames}, outf)
def main():
parser = argparse.ArgumentParser(
description='LOL HI THERE',
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument('--use-supervars', dest='use_supervars', action='store_true')
parser.add_argument('ssm_fn')
parser.add_argument('params_fn')
parser.add_argument('pwgs_ssm_fn')
parser.add_argument('pwgs_params_fn')
args = parser.parse_args()
variants = inputparser.load_ssms(args.ssm_fn)
params = inputparser.load_params(args.params_fn)
if args.use_supervars:
variants = clustermaker.make_cluster_supervars(params['clusters'], variants)
write_ssms(variants, args.pwgs_ssm_fn)
write_params(params['samples'], args.pwgs_params_fn)
if __name__ == '__main__':
main()
| true
| true
|
790aea14d3c9f3472ebe4b7429ac86785b235ef6
| 3,492
|
py
|
Python
|
http_prompt/context/__init__.py
|
elenalape/http-prompt
|
09fd0cce1f3d58bd945ab96a948456cb151ac28c
|
[
"MIT"
] | 1
|
2020-10-29T14:17:46.000Z
|
2020-10-29T14:17:46.000Z
|
http_prompt/context/__init__.py
|
elenalape/http-prompt
|
09fd0cce1f3d58bd945ab96a948456cb151ac28c
|
[
"MIT"
] | null | null | null |
http_prompt/context/__init__.py
|
elenalape/http-prompt
|
09fd0cce1f3d58bd945ab96a948456cb151ac28c
|
[
"MIT"
] | null | null | null |
from http_prompt.tree import Node
class Context(object):
def __init__(self, url=None, spec=None):
self.url = url
self.headers = {}
self.querystring_params = {}
self.body_params = {}
self.body_json_params = {}
self.options = {}
self.should_exit = False
# Create a tree for supporting API spec and ls command
self.root = Node('root')
if spec:
if not self.url:
schemes = spec.get('schemes')
scheme = schemes[0] if schemes else 'https'
self.url = (scheme + '://' +
spec.get('host', 'http://localhost:8000') +
spec.get('basePath', ''))
base_path_tokens = list(filter(lambda s: s,
spec.get('basePath', '').split('/')))
paths = spec.get('paths')
if paths:
for path in paths:
path_tokens = (base_path_tokens +
list(filter(lambda s: s, path.split('/'))))
if path == '/': # Path is a trailing slash
path_tokens.insert(len(base_path_tokens), '/')
elif path[-1] == '/': # Path ends with a trailing slash
path_tokens[-1] = path_tokens[-1] + '/'
self.root.add_path(*path_tokens)
endpoint = paths[path]
for method, info in endpoint.items():
params = info.get('parameters')
if params:
for param in params:
if param.get("$ref"):
for section in param.get("$ref").split('/'):
param = param.get(section) if not section == "#" else spec
if param.get('in') != 'path':
full_path = path_tokens + [param['name']]
self.root.add_path(*full_path,
node_type='file')
elif not self.url:
self.url = 'http://localhost:8000'
def __eq__(self, other):
return (self.url == other.url and
self.headers == other.headers and
self.options == other.options and
self.querystring_params == other.querystring_params and
self.body_params == other.body_params and
self.body_json_params == other.body_json_params and
self.should_exit == other.should_exit)
def copy(self):
context = Context(self.url)
context.headers = self.headers.copy()
context.querystring_params = self.querystring_params.copy()
context.body_params = self.body_params.copy()
context.body_json_params = self.body_json_params.copy()
context.options = self.options.copy()
context.should_exit = self.should_exit
return context
def update(self, context):
if context.url:
self.url = context.url
self.headers.update(context.headers)
self.querystring_params.update(context.querystring_params)
self.body_params.update(context.body_params)
self.body_json_params.update(context.body_json_params)
self.options.update(context.options)
self.should_exit = self.should_exit
| 42.585366
| 98
| 0.505155
|
from http_prompt.tree import Node
class Context(object):
def __init__(self, url=None, spec=None):
self.url = url
self.headers = {}
self.querystring_params = {}
self.body_params = {}
self.body_json_params = {}
self.options = {}
self.should_exit = False
self.root = Node('root')
if spec:
if not self.url:
schemes = spec.get('schemes')
scheme = schemes[0] if schemes else 'https'
self.url = (scheme + '://' +
spec.get('host', 'http://localhost:8000') +
spec.get('basePath', ''))
base_path_tokens = list(filter(lambda s: s,
spec.get('basePath', '').split('/')))
paths = spec.get('paths')
if paths:
for path in paths:
path_tokens = (base_path_tokens +
list(filter(lambda s: s, path.split('/'))))
if path == '/':
path_tokens.insert(len(base_path_tokens), '/')
elif path[-1] == '/':
path_tokens[-1] = path_tokens[-1] + '/'
self.root.add_path(*path_tokens)
endpoint = paths[path]
for method, info in endpoint.items():
params = info.get('parameters')
if params:
for param in params:
if param.get("$ref"):
for section in param.get("$ref").split('/'):
param = param.get(section) if not section == "#" else spec
if param.get('in') != 'path':
full_path = path_tokens + [param['name']]
self.root.add_path(*full_path,
node_type='file')
elif not self.url:
self.url = 'http://localhost:8000'
def __eq__(self, other):
return (self.url == other.url and
self.headers == other.headers and
self.options == other.options and
self.querystring_params == other.querystring_params and
self.body_params == other.body_params and
self.body_json_params == other.body_json_params and
self.should_exit == other.should_exit)
def copy(self):
context = Context(self.url)
context.headers = self.headers.copy()
context.querystring_params = self.querystring_params.copy()
context.body_params = self.body_params.copy()
context.body_json_params = self.body_json_params.copy()
context.options = self.options.copy()
context.should_exit = self.should_exit
return context
def update(self, context):
if context.url:
self.url = context.url
self.headers.update(context.headers)
self.querystring_params.update(context.querystring_params)
self.body_params.update(context.body_params)
self.body_json_params.update(context.body_json_params)
self.options.update(context.options)
self.should_exit = self.should_exit
| true
| true
|
790aec2fc05aa1e6cedffaa7d116763b0bcfb9e2
| 2,395
|
py
|
Python
|
streamlit_app.py
|
tbarford/bg_streamlit_demo
|
72e54d2c9cff278edf1852ae04893fdb80d8ed4d
|
[
"Apache-2.0"
] | null | null | null |
streamlit_app.py
|
tbarford/bg_streamlit_demo
|
72e54d2c9cff278edf1852ae04893fdb80d8ed4d
|
[
"Apache-2.0"
] | null | null | null |
streamlit_app.py
|
tbarford/bg_streamlit_demo
|
72e54d2c9cff278edf1852ae04893fdb80d8ed4d
|
[
"Apache-2.0"
] | null | null | null |
##~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~##
##python3 script created by tBarford on 20220205
##
##
##File Description: This is the streamlit webapp MVP for BG Golf EI Profile Database Demo
## run in term w/ : streamlit run streamlit_app.py
##~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~##
import streamlit as st
import firestoreservice as fs
from matplotlib import pyplot as plt
import PIL as img
def main():
firestore = fs.FirestoreService()
## Sidebar
with st.sidebar:
st.subheader('Shaft Selection Tools:')
shaftType = st.selectbox('Type of shaft:', options = ['iron', 'wood'], key = 'type')
shaft = st.selectbox('Choose a shaft to display:', options = firestore.getShaftList(shaftType), key = 'shaft')
stiffness = st.selectbox('Choose a stiffness:', options = firestore.getStiffness(shaftType, shaft), key = 'stiff')
compare = st.radio('Compare another shaft?', options = ['No', 'Yes'])
if compare == 'Yes':
shaftType_compare = st.selectbox('Type of shaft:', options = ['iron', 'wood'], key = 'type2')
shaft_compare = st.selectbox('Choose a shaft to display:', options = firestore.getShaftList(shaftType_compare), key = 'shaft2')
stiffness_compare = st.selectbox('Choose a stiffness:', options = firestore.getStiffness(shaftType_compare, shaft_compare), key = 'stiff2')
else:
shaftType_compare, shaft_compare, stiffness_compare = None, None, None
## Main Content
st.image(img.Image.open('./assets/bg_logo_horz.png'), use_column_width=True)
st.header('Shaft Profile Demo')
#manage shafts to plot
if stiffness is not None:
dataToPlot = {f'{shaft} {stiffness}':firestore.getEI(shaftType, shaft, stiffness)}
if stiffness_compare is not None:
dataToPlot[f'{shaft_compare} {stiffness_compare}'] = firestore.getEI(shaftType_compare, shaft_compare, stiffness_compare)
if st.button('Update Plot'):
fig, ax = plt.subplots()
for each in dataToPlot.keys():
ax.plot(dataToPlot[each][0], dataToPlot[each][1], label = each)
ax.set(xlabel='Length From Tip (in.)', ylabel='EI',
title='BG Measured EI Curve')
ax.grid()
ax.legend()
st.pyplot(fig)
if __name__ == '__main__':
main()
| 42.767857
| 152
| 0.612944
|
ns = ['iron', 'wood'], key = 'type')
shaft = st.selectbox('Choose a shaft to display:', options = firestore.getShaftList(shaftType), key = 'shaft')
stiffness = st.selectbox('Choose a stiffness:', options = firestore.getStiffness(shaftType, shaft), key = 'stiff')
compare = st.radio('Compare another shaft?', options = ['No', 'Yes'])
if compare == 'Yes':
shaftType_compare = st.selectbox('Type of shaft:', options = ['iron', 'wood'], key = 'type2')
shaft_compare = st.selectbox('Choose a shaft to display:', options = firestore.getShaftList(shaftType_compare), key = 'shaft2')
stiffness_compare = st.selectbox('Choose a stiffness:', options = firestore.getStiffness(shaftType_compare, shaft_compare), key = 'stiff2')
else:
shaftType_compare, shaft_compare, stiffness_compare = None, None, None
mg.Image.open('./assets/bg_logo_horz.png'), use_column_width=True)
st.header('Shaft Profile Demo')
if stiffness is not None:
dataToPlot = {f'{shaft} {stiffness}':firestore.getEI(shaftType, shaft, stiffness)}
if stiffness_compare is not None:
dataToPlot[f'{shaft_compare} {stiffness_compare}'] = firestore.getEI(shaftType_compare, shaft_compare, stiffness_compare)
if st.button('Update Plot'):
fig, ax = plt.subplots()
for each in dataToPlot.keys():
ax.plot(dataToPlot[each][0], dataToPlot[each][1], label = each)
ax.set(xlabel='Length From Tip (in.)', ylabel='EI',
title='BG Measured EI Curve')
ax.grid()
ax.legend()
st.pyplot(fig)
if __name__ == '__main__':
main()
| true
| true
|
790aecdd40d247bbd2cab185e89d5cd83d8f374d
| 2,214
|
py
|
Python
|
app/views/combiners_views.py
|
lawrence-gandhar/data_security_project
|
46cffb38b3c4c47cb01bfb3585bf2365c77e407c
|
[
"MIT"
] | null | null | null |
app/views/combiners_views.py
|
lawrence-gandhar/data_security_project
|
46cffb38b3c4c47cb01bfb3585bf2365c77e407c
|
[
"MIT"
] | null | null | null |
app/views/combiners_views.py
|
lawrence-gandhar/data_security_project
|
46cffb38b3c4c47cb01bfb3585bf2365c77e407c
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
# IntegrityError Exception for checking duplicate entry,
# connection import to establish connection to database
from django.db import IntegrityError, connection
# Used for serializing object data to json string
from django.core.serializers.json import DjangoJSONEncoder
from django.core.serializers import serialize
# Django HTTP Request
from django.shortcuts import render, redirect
from django.http import HttpResponse, Http404, HttpResponseForbidden, HttpResponseRedirect, JsonResponse
# Generic views as Class
from django.views.generic import TemplateView
from django.views.generic.list import ListView
from django.views import View
# system imports
import sys, os, csv, json, datetime, calendar, re
# Django utils
from django.utils import timezone, safestring
from django.utils.decorators import method_decorator
# Django authentication
from django.contrib.auth.models import User
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.decorators import login_required
from django.contrib.auth.hashers import make_password
# Django Messaging Framework
from django.contrib import messages
# Conditional operators and exception for models
from django.db.models import Q, Count, Sum, Prefetch
from django.core.exceptions import ObjectDoesNotExist
# Paginator class import
from django.core.paginator import Paginator, InvalidPage, EmptyPage, PageNotAnInteger
# Helpers
import app.user_helper as user_helper
import app.records_helper as records_helper
# Forms
from app.forms import *
#=========================================================================================
# GET SUB CATEGORY ON BASIS OF CATEGORY
#=========================================================================================
def get_sub_category(request):
sub_cat_list = request.GET.getlist("cat_id[]")
if len(sub_cat_list) > 0:
sub_cats = records_helper.SubCategoryList(sub_cat_list)
html = []
for sub in sub_cats:
html.append('<option value="'+str(sub.id)+'">'+str(sub)+'</option>');
return HttpResponse(''.join(html))
return HttpResponse('')
| 33.545455
| 104
| 0.714995
|
from __future__ import unicode_literals
from django.db import IntegrityError, connection
from django.core.serializers.json import DjangoJSONEncoder
from django.core.serializers import serialize
from django.shortcuts import render, redirect
from django.http import HttpResponse, Http404, HttpResponseForbidden, HttpResponseRedirect, JsonResponse
from django.views.generic import TemplateView
from django.views.generic.list import ListView
from django.views import View
import sys, os, csv, json, datetime, calendar, re
from django.utils import timezone, safestring
from django.utils.decorators import method_decorator
from django.contrib.auth.models import User
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.decorators import login_required
from django.contrib.auth.hashers import make_password
from django.contrib import messages
from django.db.models import Q, Count, Sum, Prefetch
from django.core.exceptions import ObjectDoesNotExist
from django.core.paginator import Paginator, InvalidPage, EmptyPage, PageNotAnInteger
import app.user_helper as user_helper
import app.records_helper as records_helper
from app.forms import *
def get_sub_category(request):
sub_cat_list = request.GET.getlist("cat_id[]")
if len(sub_cat_list) > 0:
sub_cats = records_helper.SubCategoryList(sub_cat_list)
html = []
for sub in sub_cats:
html.append('<option value="'+str(sub.id)+'">'+str(sub)+'</option>');
return HttpResponse(''.join(html))
return HttpResponse('')
| true
| true
|
790aed47dcf309c1eef143d65af8d070cba89236
| 2,073
|
py
|
Python
|
docs/source/conf.py
|
jeanphilippemercier/uquake-useis
|
fb03197852e4180ad32c6308787543b2af95f798
|
[
"MIT"
] | null | null | null |
docs/source/conf.py
|
jeanphilippemercier/uquake-useis
|
fb03197852e4180ad32c6308787543b2af95f798
|
[
"MIT"
] | null | null | null |
docs/source/conf.py
|
jeanphilippemercier/uquake-useis
|
fb03197852e4180ad32c6308787543b2af95f798
|
[
"MIT"
] | null | null | null |
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('../../useis'))
# -- Project information -----------------------------------------------------
project = 'useis'
copyright = '2021, Jean-Philippe Mercier'
author = 'Jean-Philippe Mercier'
# The full version, including alpha/beta/rc tags
release = '"0.5.0"'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'rinoh.frontend.sphinx',
'sphinx.ext.autodoc',
'sphinx.ext.mathjax',
'sphinx.ext.viewcode',
'sphinx.ext.napoleon',
'sphinx.ext.coverage'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
| 33.983607
| 79
| 0.662808
|
import os
import sys
sys.path.insert(0, os.path.abspath('../../useis'))
project = 'useis'
copyright = '2021, Jean-Philippe Mercier'
author = 'Jean-Philippe Mercier'
release = '"0.5.0"'
extensions = [
'rinoh.frontend.sphinx',
'sphinx.ext.autodoc',
'sphinx.ext.mathjax',
'sphinx.ext.viewcode',
'sphinx.ext.napoleon',
'sphinx.ext.coverage'
]
templates_path = ['_templates']
exclude_patterns = []
html_theme = 'alabaster'
html_static_path = ['_static']
| true
| true
|
790aed5ae55c522da0eaaaef4bf1503de4b0e9e0
| 2,703
|
py
|
Python
|
mdrsl/rule_generation/decision_tree_conversion/tree_edge.py
|
joschout/Multi-Directional-Rule-Set-Learning
|
ef0620b115f4e0fd7fba3e752d238a8020c1ca6b
|
[
"Apache-2.0"
] | 3
|
2020-08-03T19:25:44.000Z
|
2021-06-27T22:25:55.000Z
|
mdrsl/rule_generation/decision_tree_conversion/tree_edge.py
|
joschout/Multi-Directional-Rule-Set-Learning
|
ef0620b115f4e0fd7fba3e752d238a8020c1ca6b
|
[
"Apache-2.0"
] | null | null | null |
mdrsl/rule_generation/decision_tree_conversion/tree_edge.py
|
joschout/Multi-Directional-Rule-Set-Learning
|
ef0620b115f4e0fd7fba3e752d238a8020c1ca6b
|
[
"Apache-2.0"
] | 2
|
2020-08-07T22:54:28.000Z
|
2021-02-18T06:11:01.000Z
|
from typing import List
from mdrsl.data_structures.rules.generalized_rule_part import GeneralizedAntecedent
from mdrsl.data_structures.item import Literal, NEQLiteral, EQLiteral
from mdrsl.rule_generation.decision_tree_conversion.attribute_id_to_name_conversion import DecisionTreeFeatureIDConverter
class TreeEdge:
def __init__(self, feature_id: int, threshold: float, is_left: bool):
self.feature_id: int = feature_id
self.threshold: float = threshold
self.is_left: bool = is_left
def __str__(self):
output_str = 'f(' + str(self.feature_id) + ')'
if self.is_left:
output_str += '<='
else:
output_str += '>'
output_str += str(self.threshold)
if self.is_left:
output_str += ' (L)'
else:
output_str += ' (R)'
return output_str
def __repr__(self):
return self.__str__()
class AntecedentBuilder:
def __init__(self, one_hot_encoded_feature_names: List[str], ohe_prefix_separator: str):
self.ohe_prefix_separator: str = ohe_prefix_separator
self.decision_tree_feature_id_converter = DecisionTreeFeatureIDConverter(one_hot_encoded_feature_names)
def convert_edges(self, edges: List[TreeEdge]):
antecedent_literals: List[Literal] = []
for tree_edge in edges:
lit = self.convert(tree_edge)
antecedent_literals.append(lit)
antecedent = GeneralizedAntecedent(antecedent_literals)
return antecedent
def convert(self, tree_edge: TreeEdge):
if tree_edge.threshold != 0.5:
print("Unexpected tree edge threshold value: " + str(tree_edge.threshold))
# find the descriptive attr as used for input for the decision tree
dt_descriptive_attribute = self.decision_tree_feature_id_converter.convert(tree_edge.feature_id)
splitted_string = dt_descriptive_attribute.split(self.ohe_prefix_separator)
if len(splitted_string) == 1:
feature_name = dt_descriptive_attribute
if tree_edge.is_left:
feature_value = str(0)
else:
feature_value = str(1)
return EQLiteral(attribute=feature_name, value=feature_value)
elif len(splitted_string) == 2:
feature_name = splitted_string[0]
feature_value = splitted_string[1]
if tree_edge.is_left:
return NEQLiteral(attribute=feature_name, value=feature_value)
else:
return EQLiteral(attribute=feature_name, value=feature_value)
else:
raise Exception("Unexpected feature name:" + dt_descriptive_attribute)
| 35.565789
| 121
| 0.669996
|
from typing import List
from mdrsl.data_structures.rules.generalized_rule_part import GeneralizedAntecedent
from mdrsl.data_structures.item import Literal, NEQLiteral, EQLiteral
from mdrsl.rule_generation.decision_tree_conversion.attribute_id_to_name_conversion import DecisionTreeFeatureIDConverter
class TreeEdge:
def __init__(self, feature_id: int, threshold: float, is_left: bool):
self.feature_id: int = feature_id
self.threshold: float = threshold
self.is_left: bool = is_left
def __str__(self):
output_str = 'f(' + str(self.feature_id) + ')'
if self.is_left:
output_str += '<='
else:
output_str += '>'
output_str += str(self.threshold)
if self.is_left:
output_str += ' (L)'
else:
output_str += ' (R)'
return output_str
def __repr__(self):
return self.__str__()
class AntecedentBuilder:
def __init__(self, one_hot_encoded_feature_names: List[str], ohe_prefix_separator: str):
self.ohe_prefix_separator: str = ohe_prefix_separator
self.decision_tree_feature_id_converter = DecisionTreeFeatureIDConverter(one_hot_encoded_feature_names)
def convert_edges(self, edges: List[TreeEdge]):
antecedent_literals: List[Literal] = []
for tree_edge in edges:
lit = self.convert(tree_edge)
antecedent_literals.append(lit)
antecedent = GeneralizedAntecedent(antecedent_literals)
return antecedent
def convert(self, tree_edge: TreeEdge):
if tree_edge.threshold != 0.5:
print("Unexpected tree edge threshold value: " + str(tree_edge.threshold))
dt_descriptive_attribute = self.decision_tree_feature_id_converter.convert(tree_edge.feature_id)
splitted_string = dt_descriptive_attribute.split(self.ohe_prefix_separator)
if len(splitted_string) == 1:
feature_name = dt_descriptive_attribute
if tree_edge.is_left:
feature_value = str(0)
else:
feature_value = str(1)
return EQLiteral(attribute=feature_name, value=feature_value)
elif len(splitted_string) == 2:
feature_name = splitted_string[0]
feature_value = splitted_string[1]
if tree_edge.is_left:
return NEQLiteral(attribute=feature_name, value=feature_value)
else:
return EQLiteral(attribute=feature_name, value=feature_value)
else:
raise Exception("Unexpected feature name:" + dt_descriptive_attribute)
| true
| true
|
790aee074f72c9c74a7b392eebbb6be67947cfba
| 19,204
|
py
|
Python
|
app/views.py
|
alonlevko/seminarmiddleeast
|
fc0822747015485da8670b3126418819f883083b
|
[
"Apache-2.0"
] | null | null | null |
app/views.py
|
alonlevko/seminarmiddleeast
|
fc0822747015485da8670b3126418819f883083b
|
[
"Apache-2.0"
] | null | null | null |
app/views.py
|
alonlevko/seminarmiddleeast
|
fc0822747015485da8670b3126418819f883083b
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import unicode_literals
import json
import collections
import string
from django.http import JsonResponse, HttpResponseRedirect, HttpResponseNotFound
from django.http import Http404
from django.shortcuts import render
import threading
import json
from django.views.decorators.csrf import csrf_exempt
from .forms import NameForm, RegionForm, PlaceForm, GetTweetsForm
from .app_logic import handle_region_form, handle_place_form, get_user, \
init_tweet_accumulation_tweet_list, handle_search_form,\
generate_tweet_sendaway, generate_user_sendaway, word_trends_merge_jsons, replace_string_character, contains_whitespace,\
filter_strings, phrase_list_to_word_list, get_all_twitter_users_ids, slider_val_transform, convert_to_iso, \
get_tweet_list, user_ext_to_json, single_word_obj, generate_days_list, parse_parameters, generate_users_tweets
import jsonpickle
from .Analytics import QueriesManager
from .classes import get_from_db, UserExtension, twitter_users_database_name, TweetExtension
def index(request):
if request.method == 'POST':
form_user = NameForm(request.POST)
if form_user.is_valid():
name = form_user.cleaned_data['your_name']
name = name.replace(" ", "_")
return HttpResponseRedirect('/tweets/' + name)
# if a GET (or any other method) we'll create a blank form
else:
form_user = NameForm()
return render(request, 'index.html', {'form': form_user})
def dashboard(request, name):
user = get_user(name)
if request.method == 'POST':
form_region = RegionForm(request.POST or None)
regions = handle_region_form(form_region, user)
place_form = PlaceForm(request.POST or None)
print(place_form)
handle_place_form(place_form, user)
search_form = GetTweetsForm(request.POST or None)
region, place = handle_search_form(search_form)
if region != "":
user.remove_location(region, place)
user.save_me_to_db()
user = get_user(name)
regions = collections.OrderedDict(user.get_regions())
return render(request, 'dashboard.html',
{'name': name, 'regions': regions, 'region_form': RegionForm(), 'place_form': PlaceForm(), 'tweets_form': GetTweetsForm()})
else:
user = get_user(name)
regions = collections.OrderedDict(user.get_regions())
return render(request, 'dashboard.html',
{'name': name, 'regions': regions, 'region_form': RegionForm(), 'place_form': PlaceForm(),
'tweets_form': GetTweetsForm()})
def help_page(request, name):
return render(request, 'help.html', {'name': name})
@csrf_exempt
def get_regions_places_list(request):
if request.method == 'POST':
user_name = request.POST.get('user_name', None)
user = get_user(user_name)
region_place_dict = user.get_region_place_dict()
return JsonResponse(region_place_dict, safe=False)
else:
empty = {}
return JsonResponse(empty)
@csrf_exempt
def get_search_words(request):
if request.method == 'POST':
user_name = request.POST.get('user_name', None)
user = get_user(user_name)
word_to_add = request.POST.get('to_add', None)
if word_to_add != "":
word_to_add = word_to_add.replace('"', "")
user.add_search_word(word_to_add)
words_to_remove = jsonpickle.decode(request.POST.get('to_remove', None))
print(words_to_remove)
if words_to_remove != "":
words_to_remove = replace_string_character(words_to_remove)
print(words_to_remove)
user.remove_search_word(words_to_remove)
print(user_name)
print(word_to_add)
print(user.all_search_words())
return JsonResponse(user.all_search_words(), safe=False)
else:
empty = {}
return JsonResponse(empty)
@csrf_exempt
def accumulate_tweets(request):
if request.method == 'POST':
name, locations, start_date, end_date, word_list, logic, exact = parse_parameters(request)
user = get_user(name)
for loc in locations:
init_tweet_accumulation_tweet_list(user, loc['region'], loc['place'], word_list)
empty = {}
return JsonResponse(empty)
@csrf_exempt
def get_query_links(request):
if request.method == 'POST':
name = request.POST.get('user_name', None)
locations = jsonpickle.decode(request.POST.get('locations_list', None))
print(locations)
user = get_user(name)
results = []
for loc in locations:
results.append(user.get_region(loc['region']).get_place_by_name(loc['place']).get_query_string())
print(results)
return JsonResponse(results, safe=False)
else:
empty = {}
return JsonResponse(empty)
def show_tweets_list(request, name):
user = get_user(name)
print("in show_tweets_list")
if request.method == 'POST':
search_form = GetTweetsForm(request.POST)
region, place = handle_search_form(search_form)
#quary = init_tweet_accumulation_tweet_list(user, region, place)
quary = "I am lish lash"
region = user.get_region(region)
place = region.get_place_by_name(place)
return render(request, 'tweets.html', { 'quary': quary, 'region': region, 'place': place, 'user': name})
elif request.method == 'GET':
quary = "I am lish lash"
region = "Lash"
place = "Lish"
return render(request, 'tweets.html', { 'quary': quary, 'region': region, 'place': place, 'user': name})
@csrf_exempt
def popular_users_get(request):
print("popular_users_get")
users_list = []
if request.method == 'POST':
twitter_users, tweets = generate_users_tweets(request, tasdocs=True, uasdocs=True)
if isinstance(twitter_users, Exception):
return JsonResponse(str(twitter_users), safe=False, status=500)
elif len(twitter_users) == 0:
return JsonResponse("Error: No Tweets in location / date / search phrase (if included)", safe=False, status=500)
slider_valus = slider_val_transform(jsonpickle.decode(request.POST.get('sliders_data', None)))
print(len(twitter_users))
print(len(tweets))
# ["followers_slider", "retweet_slider", "favorites_slider", "tweets_slider"]
# ["followers, statusses, favorites (likes), retweets]
queriesManager = QueriesManager()
params = ['Opinion_Leaders', [str(slider_valus[0][1])], [str(slider_valus[0][0])],
[str(slider_valus[3][1])], [str(slider_valus[3][0])], [str(slider_valus[2][1])],
[str(slider_valus[2][0])],
[str(slider_valus[1][1])], [str(slider_valus[1][0])]]
print(params)
print(twitter_users[0].keys())
df = queriesManager.call_querie(params, tweets, twitter_users)
print(df)
if df.empty:
return JsonResponse("Error: No opinion leaders found", safe=False)
idlist = df['id'].tolist()
print(len(idlist))
users_list = get_from_db(idlist, twitter_users_database_name, UserExtension)
print(len(users_list))
user_ext_list = user_ext_to_json(users_list)
return JsonResponse(user_ext_list, safe=False)
@csrf_exempt
def tweet_list_place(request):
print("tweet_list_place")
if request.method == 'POST':
name, locations, start_date, end_date, word_list, logic, exact = parse_parameters(request)
print("word list is: " + str(word_list))
print("logic is: " + logic)
if start_date is not "" and end_date is not "":
days_list = generate_days_list(start_date, end_date)
else:
days_list = None
print(days_list)
total_tweets = []
for loc in locations:
place = get_user(name).get_region(loc['region']).get_place_by_name(loc['place'])
mylist = place.get_tweets_directly(name, loc['region'], days_list, word_list, logic=logic, exact=exact)
if isinstance(mylist, Exception):
return JsonResponse(str(mylist), safe=False, status=500)
#print(mylist)
json_list = []
for l in mylist:
result = generate_tweet_sendaway(l.tweet)
""" this is the paid data from ibm watson
result.append(l.category)
result.append(l.concept)
result.append(l.entities)
result.append(l.entities_sentiment)
result.append(l.keywords)
result.append(l.keywords_sentiment)
"""
json_list.append(result)
total_tweets = total_tweets + json_list
#print(total_tweets)
if len(total_tweets) == 0:
return JsonResponse("Error: No Tweets in location / date / search phrase (if included)", safe=False, status=500)
return JsonResponse(total_tweets, safe=False)
else:
empty = {}
return JsonResponse(empty)
@csrf_exempt
def show_users_place(request):
print("show_users_place")
if request.method == 'POST':
twitter_users, _ = generate_users_tweets(request)
if isinstance(twitter_users, Exception):
return JsonResponse(str(twitter_users), safe=False, status=500)
elif len(twitter_users) == 0:
return JsonResponse("Error: No Tweets in location and date", safe=False, status=500)
return JsonResponse(user_ext_to_json(twitter_users), safe=False)
else:
empty = {}
return JsonResponse(empty)
@csrf_exempt
def word_trends_get(request):
print("word_trends_get")
total_result = []
if request.method == 'POST':
queriesManager = QueriesManager()
name, locations, start_date, end_date, word_list, logic, exact = parse_parameters(request)
days_list = generate_days_list(start_date, end_date)
total_tweets = get_tweet_list(locations, get_user(name), days_list, word_list=word_list, asdocs=True, exact=exact)
if isinstance(total_tweets, Exception):
return JsonResponse(str(total_tweets), safe=False, status=500)
elif len(total_tweets) == 0:
return JsonResponse("Error: No Tweets in location and date", safe=False, status=500)
word_list, pharses_list = filter_strings(word_list)
if not exact:
for word in word_list:
pharses_list.append(word)
word_list = []
params = ["Word_trend", word_list]
print(params)
print(len(total_tweets))
df = queriesManager.call_querie(params, total_tweets, [])
phrase_dfs = []
for phrase in pharses_list:
params = ['Phrase_trend', phrase]
print(params)
phrase_dfs.append({"df": queriesManager.call_querie(params, total_tweets, []), "phrase": phrase})
print(phrase_dfs)
for word in word_list:
total_result.append(single_word_obj(word, 'word', df, days_list))
for dic in phrase_dfs:
total_result.append(single_word_obj(dic['phrase'], 'phrase', dic["df"], days_list))
print(total_result)
return JsonResponse(total_result, safe=False)
@csrf_exempt
def top_words_per_date_get(request):
print("top_words_per_date_get")
words_list = []
counter_list = []
days_list = []
if request.method == 'POST':
queriesManager = QueriesManager()
name, locations, start_date, end_date, words_list, logic, exact = parse_parameters(request)
days_list = generate_days_list(start_date, end_date)
dates_counter = dict.fromkeys(days_list)
print(days_list)
total_tweets = get_tweet_list(locations, get_user(name), days_list, None, asdocs=True)
if isinstance(total_tweets, Exception):
return JsonResponse(str(total_tweets), safe=False, status=500)
elif len(total_tweets) == 0:
return JsonResponse("Error: No Tweets in location and date", safe=False, status=500)
for k, _ in dates_counter.items():
dates_counter[k] = {'word': "", 'count': 0}
params = ["Popular_word_per_date"]
print(params)
print(len(total_tweets))
df = queriesManager.call_querie(params, total_tweets, [])
print(df)
words_list = ["No Tweets"] * len(days_list)
counter_list = [0] * len(days_list)
for i, day in enumerate(days_list):
print(day)
col = df.loc[df['date'] == day]
for index, row in col.iterrows():
words_list[i] = row['popular_word']
counter_list[i] = row['counter']
print(days_list)
print(words_list)
print(counter_list)
return JsonResponse({'dates': days_list, 'words': words_list, 'counter': counter_list}, safe=False)
@csrf_exempt
def popularity_of_words_get(request):
print("popularity_of_words_get")
df = ""
if request.method == 'POST':
queriesManager = QueriesManager()
name, locations, start_date, end_date, word_list, logic, exact = parse_parameters(request)
word_list, pharase_list = filter_strings(word_list)
word_list = word_list + phrase_list_to_word_list(pharase_list)
days_list = generate_days_list(start_date, end_date)
total_tweets = get_tweet_list(locations, get_user(name), days_list, word_list=word_list, asdocs=True, exact=exact)
if isinstance(total_tweets, Exception):
return JsonResponse(str(total_tweets), safe=False, status=500)
elif len(total_tweets) == 0:
return JsonResponse("Error: No Tweets in location and date", safe=False, status=500)
params = ["Popularity_of_word_bank_per_place", word_list]
print(params)
print(len(total_tweets))
df = queriesManager.call_querie(params, total_tweets, [])
if df.empty:
return JsonResponse("Error: No appearances of search phrases in tweets", safe=False)
rows = df.shape[0]
df = df.to_json()
df = df[:-1]
df = df + ', "rows": ' + str(rows)
df = df + ', "word_list": ' + str(word_list) + '}'
print(df)
df = df.replace("'", '"')
print(df)
return JsonResponse(df, safe=False)
@csrf_exempt
def most_popular_word_get(request):
print("most_popular_word_get")
place_list = []
word_list = []
counter_list = []
if request.method == 'POST':
queriesManager = QueriesManager()
name, locations, start_date, end_date, word_list, logic, exact = parse_parameters(request)
days_list = generate_days_list(start_date, end_date)
total_tweets = get_tweet_list(locations, get_user(name), days_list, None, asdocs=True)
if isinstance(total_tweets, Exception):
return JsonResponse(str(total_tweets), safe=False, status=500)
elif len(total_tweets) == 0:
return JsonResponse("Error: No Tweets in location and date", safe=False, status=500)
params = ["Popular_word_per_place"]
df = queriesManager.call_querie(params, total_tweets, [])
print(df)
for i, row in df.iterrows():
place_list.append(row['place_name'])
word_list.append(row['popular_word'])
counter_list.append(row['counter'])
return JsonResponse({'places': place_list, 'words': word_list, 'counters': counter_list}, safe=False)
@csrf_exempt
def first_time_get(request):
print("first_time_get")
df_list = []
if request.method == 'POST':
queriesManager = QueriesManager()
name, locations, start_date, end_date, word_list, logic, exact = parse_parameters(request)
max_results = request.POST.get('max_results', None)
total_users, total_tweets = generate_users_tweets(request, use_words=True, tasdocs=True, uasdocs=True)
if isinstance(total_users, Exception):
return JsonResponse(str(total_users), safe=False, status=500)
elif len(total_tweets) == 0:
return JsonResponse("Error: No Tweets in location and date", safe=False, status=500)
print(len(total_tweets))
print(len(total_users))
for word in word_list:
if exact:
params = ["First_Time", [" " + word + " "], [max_results]]
else:
params = ["First_Time", [word], [max_results]]
print(params)
df = queriesManager.call_querie(params, total_tweets, total_users)
print(df)
if df.empty:
continue
row_data = []
# [id, text, user_id, screen_name, full_date, time_rnk]
for index, row in df.iterrows():
row_data.append([row["id"], row["text"], row["screen_name"], convert_to_iso(row["full_date"]),
row["time_rnk"]])
df_list.append({"word": word, "len": df.shape[0], "row_data": row_data})
if len(df_list) == 0:
return JsonResponse("Error: No Tweets in location and date contain search phrase", safe=False, status=500)
return JsonResponse(df_list, safe=False)
@csrf_exempt
def most_retweeted_get(request):
print("most_retweeted_get")
row_data = []
if request.method == 'POST':
queriesManager = QueriesManager()
name, locations, start_date, end_date, word_list, logic, exact = parse_parameters(request)
max_results = request.POST.get('max_results', None)
total_users, total_tweets = generate_users_tweets(request, use_words=True, tasdocs=True, uasdocs=True)
if isinstance(total_users, Exception):
return JsonResponse(str(total_users), safe=False, status=500)
elif len(total_tweets) == 0:
return JsonResponse("Error: No Tweets in location and date", safe=False, status=500)
print(len(total_tweets))
print(len(total_users))
for word in word_list:
if exact:
params = ["Most_Retweeted", [" " + word + " "], [max_results]]
else:
params = ["Most_Retweeted", [word], [max_results]]
print(params)
df = queriesManager.call_querie(params, total_tweets, total_users)
print(df)
if df.empty:
continue
# [phrase, id, text, user_id, screen_name, full_date, retweet_count, retweet_rnk
for index, row in df.iterrows():
row_data.append([row["id"], row["text"], row["screen_name"], convert_to_iso(row["full_date"]),
row["retweet_count"], row["retweet_rnk"], row["phrase"]])
print(row_data)
if len(row_data) == 0:
return JsonResponse("Error: No Tweets in location and date contain search phrase", safe=False, status=500)
return JsonResponse(row_data, safe=False)
def health(request):
state = {"status": "UP"}
return JsonResponse(state)
def handler404(request):
return render(request, '404.html', status=404)
def handler500(request):
return render(request, '500.html', status=500)
| 42.580931
| 145
| 0.64221
|
from __future__ import unicode_literals
import json
import collections
import string
from django.http import JsonResponse, HttpResponseRedirect, HttpResponseNotFound
from django.http import Http404
from django.shortcuts import render
import threading
import json
from django.views.decorators.csrf import csrf_exempt
from .forms import NameForm, RegionForm, PlaceForm, GetTweetsForm
from .app_logic import handle_region_form, handle_place_form, get_user, \
init_tweet_accumulation_tweet_list, handle_search_form,\
generate_tweet_sendaway, generate_user_sendaway, word_trends_merge_jsons, replace_string_character, contains_whitespace,\
filter_strings, phrase_list_to_word_list, get_all_twitter_users_ids, slider_val_transform, convert_to_iso, \
get_tweet_list, user_ext_to_json, single_word_obj, generate_days_list, parse_parameters, generate_users_tweets
import jsonpickle
from .Analytics import QueriesManager
from .classes import get_from_db, UserExtension, twitter_users_database_name, TweetExtension
def index(request):
if request.method == 'POST':
form_user = NameForm(request.POST)
if form_user.is_valid():
name = form_user.cleaned_data['your_name']
name = name.replace(" ", "_")
return HttpResponseRedirect('/tweets/' + name)
else:
form_user = NameForm()
return render(request, 'index.html', {'form': form_user})
def dashboard(request, name):
user = get_user(name)
if request.method == 'POST':
form_region = RegionForm(request.POST or None)
regions = handle_region_form(form_region, user)
place_form = PlaceForm(request.POST or None)
print(place_form)
handle_place_form(place_form, user)
search_form = GetTweetsForm(request.POST or None)
region, place = handle_search_form(search_form)
if region != "":
user.remove_location(region, place)
user.save_me_to_db()
user = get_user(name)
regions = collections.OrderedDict(user.get_regions())
return render(request, 'dashboard.html',
{'name': name, 'regions': regions, 'region_form': RegionForm(), 'place_form': PlaceForm(), 'tweets_form': GetTweetsForm()})
else:
user = get_user(name)
regions = collections.OrderedDict(user.get_regions())
return render(request, 'dashboard.html',
{'name': name, 'regions': regions, 'region_form': RegionForm(), 'place_form': PlaceForm(),
'tweets_form': GetTweetsForm()})
def help_page(request, name):
return render(request, 'help.html', {'name': name})
@csrf_exempt
def get_regions_places_list(request):
if request.method == 'POST':
user_name = request.POST.get('user_name', None)
user = get_user(user_name)
region_place_dict = user.get_region_place_dict()
return JsonResponse(region_place_dict, safe=False)
else:
empty = {}
return JsonResponse(empty)
@csrf_exempt
def get_search_words(request):
if request.method == 'POST':
user_name = request.POST.get('user_name', None)
user = get_user(user_name)
word_to_add = request.POST.get('to_add', None)
if word_to_add != "":
word_to_add = word_to_add.replace('"', "")
user.add_search_word(word_to_add)
words_to_remove = jsonpickle.decode(request.POST.get('to_remove', None))
print(words_to_remove)
if words_to_remove != "":
words_to_remove = replace_string_character(words_to_remove)
print(words_to_remove)
user.remove_search_word(words_to_remove)
print(user_name)
print(word_to_add)
print(user.all_search_words())
return JsonResponse(user.all_search_words(), safe=False)
else:
empty = {}
return JsonResponse(empty)
@csrf_exempt
def accumulate_tweets(request):
if request.method == 'POST':
name, locations, start_date, end_date, word_list, logic, exact = parse_parameters(request)
user = get_user(name)
for loc in locations:
init_tweet_accumulation_tweet_list(user, loc['region'], loc['place'], word_list)
empty = {}
return JsonResponse(empty)
@csrf_exempt
def get_query_links(request):
if request.method == 'POST':
name = request.POST.get('user_name', None)
locations = jsonpickle.decode(request.POST.get('locations_list', None))
print(locations)
user = get_user(name)
results = []
for loc in locations:
results.append(user.get_region(loc['region']).get_place_by_name(loc['place']).get_query_string())
print(results)
return JsonResponse(results, safe=False)
else:
empty = {}
return JsonResponse(empty)
def show_tweets_list(request, name):
user = get_user(name)
print("in show_tweets_list")
if request.method == 'POST':
search_form = GetTweetsForm(request.POST)
region, place = handle_search_form(search_form)
#quary = init_tweet_accumulation_tweet_list(user, region, place)
quary = "I am lish lash"
region = user.get_region(region)
place = region.get_place_by_name(place)
return render(request, 'tweets.html', { 'quary': quary, 'region': region, 'place': place, 'user': name})
elif request.method == 'GET':
quary = "I am lish lash"
region = "Lash"
place = "Lish"
return render(request, 'tweets.html', { 'quary': quary, 'region': region, 'place': place, 'user': name})
@csrf_exempt
def popular_users_get(request):
print("popular_users_get")
users_list = []
if request.method == 'POST':
twitter_users, tweets = generate_users_tweets(request, tasdocs=True, uasdocs=True)
if isinstance(twitter_users, Exception):
return JsonResponse(str(twitter_users), safe=False, status=500)
elif len(twitter_users) == 0:
return JsonResponse("Error: No Tweets in location / date / search phrase (if included)", safe=False, status=500)
slider_valus = slider_val_transform(jsonpickle.decode(request.POST.get('sliders_data', None)))
print(len(twitter_users))
print(len(tweets))
# ["followers_slider", "retweet_slider", "favorites_slider", "tweets_slider"]
# ["followers, statusses, favorites (likes), retweets]
queriesManager = QueriesManager()
params = ['Opinion_Leaders', [str(slider_valus[0][1])], [str(slider_valus[0][0])],
[str(slider_valus[3][1])], [str(slider_valus[3][0])], [str(slider_valus[2][1])],
[str(slider_valus[2][0])],
[str(slider_valus[1][1])], [str(slider_valus[1][0])]]
print(params)
print(twitter_users[0].keys())
df = queriesManager.call_querie(params, tweets, twitter_users)
print(df)
if df.empty:
return JsonResponse("Error: No opinion leaders found", safe=False)
idlist = df['id'].tolist()
print(len(idlist))
users_list = get_from_db(idlist, twitter_users_database_name, UserExtension)
print(len(users_list))
user_ext_list = user_ext_to_json(users_list)
return JsonResponse(user_ext_list, safe=False)
@csrf_exempt
def tweet_list_place(request):
print("tweet_list_place")
if request.method == 'POST':
name, locations, start_date, end_date, word_list, logic, exact = parse_parameters(request)
print("word list is: " + str(word_list))
print("logic is: " + logic)
if start_date is not "" and end_date is not "":
days_list = generate_days_list(start_date, end_date)
else:
days_list = None
print(days_list)
total_tweets = []
for loc in locations:
place = get_user(name).get_region(loc['region']).get_place_by_name(loc['place'])
mylist = place.get_tweets_directly(name, loc['region'], days_list, word_list, logic=logic, exact=exact)
if isinstance(mylist, Exception):
return JsonResponse(str(mylist), safe=False, status=500)
#print(mylist)
json_list = []
for l in mylist:
result = generate_tweet_sendaway(l.tweet)
json_list.append(result)
total_tweets = total_tweets + json_list
#print(total_tweets)
if len(total_tweets) == 0:
return JsonResponse("Error: No Tweets in location / date / search phrase (if included)", safe=False, status=500)
return JsonResponse(total_tweets, safe=False)
else:
empty = {}
return JsonResponse(empty)
@csrf_exempt
def show_users_place(request):
print("show_users_place")
if request.method == 'POST':
twitter_users, _ = generate_users_tweets(request)
if isinstance(twitter_users, Exception):
return JsonResponse(str(twitter_users), safe=False, status=500)
elif len(twitter_users) == 0:
return JsonResponse("Error: No Tweets in location and date", safe=False, status=500)
return JsonResponse(user_ext_to_json(twitter_users), safe=False)
else:
empty = {}
return JsonResponse(empty)
@csrf_exempt
def word_trends_get(request):
print("word_trends_get")
total_result = []
if request.method == 'POST':
queriesManager = QueriesManager()
name, locations, start_date, end_date, word_list, logic, exact = parse_parameters(request)
days_list = generate_days_list(start_date, end_date)
total_tweets = get_tweet_list(locations, get_user(name), days_list, word_list=word_list, asdocs=True, exact=exact)
if isinstance(total_tweets, Exception):
return JsonResponse(str(total_tweets), safe=False, status=500)
elif len(total_tweets) == 0:
return JsonResponse("Error: No Tweets in location and date", safe=False, status=500)
word_list, pharses_list = filter_strings(word_list)
if not exact:
for word in word_list:
pharses_list.append(word)
word_list = []
params = ["Word_trend", word_list]
print(params)
print(len(total_tweets))
df = queriesManager.call_querie(params, total_tweets, [])
phrase_dfs = []
for phrase in pharses_list:
params = ['Phrase_trend', phrase]
print(params)
phrase_dfs.append({"df": queriesManager.call_querie(params, total_tweets, []), "phrase": phrase})
print(phrase_dfs)
for word in word_list:
total_result.append(single_word_obj(word, 'word', df, days_list))
for dic in phrase_dfs:
total_result.append(single_word_obj(dic['phrase'], 'phrase', dic["df"], days_list))
print(total_result)
return JsonResponse(total_result, safe=False)
@csrf_exempt
def top_words_per_date_get(request):
print("top_words_per_date_get")
words_list = []
counter_list = []
days_list = []
if request.method == 'POST':
queriesManager = QueriesManager()
name, locations, start_date, end_date, words_list, logic, exact = parse_parameters(request)
days_list = generate_days_list(start_date, end_date)
dates_counter = dict.fromkeys(days_list)
print(days_list)
total_tweets = get_tweet_list(locations, get_user(name), days_list, None, asdocs=True)
if isinstance(total_tweets, Exception):
return JsonResponse(str(total_tweets), safe=False, status=500)
elif len(total_tweets) == 0:
return JsonResponse("Error: No Tweets in location and date", safe=False, status=500)
for k, _ in dates_counter.items():
dates_counter[k] = {'word': "", 'count': 0}
params = ["Popular_word_per_date"]
print(params)
print(len(total_tweets))
df = queriesManager.call_querie(params, total_tweets, [])
print(df)
words_list = ["No Tweets"] * len(days_list)
counter_list = [0] * len(days_list)
for i, day in enumerate(days_list):
print(day)
col = df.loc[df['date'] == day]
for index, row in col.iterrows():
words_list[i] = row['popular_word']
counter_list[i] = row['counter']
print(days_list)
print(words_list)
print(counter_list)
return JsonResponse({'dates': days_list, 'words': words_list, 'counter': counter_list}, safe=False)
@csrf_exempt
def popularity_of_words_get(request):
print("popularity_of_words_get")
df = ""
if request.method == 'POST':
queriesManager = QueriesManager()
name, locations, start_date, end_date, word_list, logic, exact = parse_parameters(request)
word_list, pharase_list = filter_strings(word_list)
word_list = word_list + phrase_list_to_word_list(pharase_list)
days_list = generate_days_list(start_date, end_date)
total_tweets = get_tweet_list(locations, get_user(name), days_list, word_list=word_list, asdocs=True, exact=exact)
if isinstance(total_tweets, Exception):
return JsonResponse(str(total_tweets), safe=False, status=500)
elif len(total_tweets) == 0:
return JsonResponse("Error: No Tweets in location and date", safe=False, status=500)
params = ["Popularity_of_word_bank_per_place", word_list]
print(params)
print(len(total_tweets))
df = queriesManager.call_querie(params, total_tweets, [])
if df.empty:
return JsonResponse("Error: No appearances of search phrases in tweets", safe=False)
rows = df.shape[0]
df = df.to_json()
df = df[:-1]
df = df + ', "rows": ' + str(rows)
df = df + ', "word_list": ' + str(word_list) + '}'
print(df)
df = df.replace("'", '"')
print(df)
return JsonResponse(df, safe=False)
@csrf_exempt
def most_popular_word_get(request):
print("most_popular_word_get")
place_list = []
word_list = []
counter_list = []
if request.method == 'POST':
queriesManager = QueriesManager()
name, locations, start_date, end_date, word_list, logic, exact = parse_parameters(request)
days_list = generate_days_list(start_date, end_date)
total_tweets = get_tweet_list(locations, get_user(name), days_list, None, asdocs=True)
if isinstance(total_tweets, Exception):
return JsonResponse(str(total_tweets), safe=False, status=500)
elif len(total_tweets) == 0:
return JsonResponse("Error: No Tweets in location and date", safe=False, status=500)
params = ["Popular_word_per_place"]
df = queriesManager.call_querie(params, total_tweets, [])
print(df)
for i, row in df.iterrows():
place_list.append(row['place_name'])
word_list.append(row['popular_word'])
counter_list.append(row['counter'])
return JsonResponse({'places': place_list, 'words': word_list, 'counters': counter_list}, safe=False)
@csrf_exempt
def first_time_get(request):
print("first_time_get")
df_list = []
if request.method == 'POST':
queriesManager = QueriesManager()
name, locations, start_date, end_date, word_list, logic, exact = parse_parameters(request)
max_results = request.POST.get('max_results', None)
total_users, total_tweets = generate_users_tweets(request, use_words=True, tasdocs=True, uasdocs=True)
if isinstance(total_users, Exception):
return JsonResponse(str(total_users), safe=False, status=500)
elif len(total_tweets) == 0:
return JsonResponse("Error: No Tweets in location and date", safe=False, status=500)
print(len(total_tweets))
print(len(total_users))
for word in word_list:
if exact:
params = ["First_Time", [" " + word + " "], [max_results]]
else:
params = ["First_Time", [word], [max_results]]
print(params)
df = queriesManager.call_querie(params, total_tweets, total_users)
print(df)
if df.empty:
continue
row_data = []
# [id, text, user_id, screen_name, full_date, time_rnk]
for index, row in df.iterrows():
row_data.append([row["id"], row["text"], row["screen_name"], convert_to_iso(row["full_date"]),
row["time_rnk"]])
df_list.append({"word": word, "len": df.shape[0], "row_data": row_data})
if len(df_list) == 0:
return JsonResponse("Error: No Tweets in location and date contain search phrase", safe=False, status=500)
return JsonResponse(df_list, safe=False)
@csrf_exempt
def most_retweeted_get(request):
print("most_retweeted_get")
row_data = []
if request.method == 'POST':
queriesManager = QueriesManager()
name, locations, start_date, end_date, word_list, logic, exact = parse_parameters(request)
max_results = request.POST.get('max_results', None)
total_users, total_tweets = generate_users_tweets(request, use_words=True, tasdocs=True, uasdocs=True)
if isinstance(total_users, Exception):
return JsonResponse(str(total_users), safe=False, status=500)
elif len(total_tweets) == 0:
return JsonResponse("Error: No Tweets in location and date", safe=False, status=500)
print(len(total_tweets))
print(len(total_users))
for word in word_list:
if exact:
params = ["Most_Retweeted", [" " + word + " "], [max_results]]
else:
params = ["Most_Retweeted", [word], [max_results]]
print(params)
df = queriesManager.call_querie(params, total_tweets, total_users)
print(df)
if df.empty:
continue
# [phrase, id, text, user_id, screen_name, full_date, retweet_count, retweet_rnk
for index, row in df.iterrows():
row_data.append([row["id"], row["text"], row["screen_name"], convert_to_iso(row["full_date"]),
row["retweet_count"], row["retweet_rnk"], row["phrase"]])
print(row_data)
if len(row_data) == 0:
return JsonResponse("Error: No Tweets in location and date contain search phrase", safe=False, status=500)
return JsonResponse(row_data, safe=False)
def health(request):
state = {"status": "UP"}
return JsonResponse(state)
def handler404(request):
return render(request, '404.html', status=404)
def handler500(request):
return render(request, '500.html', status=500)
| true
| true
|
790aeefe9f2a259615a35b09061f04aa296a58d9
| 5,885
|
py
|
Python
|
electrum/constants.py
|
dappcenter/electrum-dogecoin
|
ee29a07b127a4120e785c6941af9313176a3c4f3
|
[
"MIT"
] | 3
|
2021-10-03T06:45:36.000Z
|
2022-03-29T05:13:52.000Z
|
electrum/constants.py
|
0xAyanami/electrum
|
4c3c8b6a36f8b139625d7132d200ffbc742123b0
|
[
"MIT"
] | 1
|
2022-03-29T05:20:21.000Z
|
2022-03-29T21:31:06.000Z
|
electrum/constants.py
|
0xAyanami/electrum
|
4c3c8b6a36f8b139625d7132d200ffbc742123b0
|
[
"MIT"
] | 4
|
2021-11-08T20:44:38.000Z
|
2022-03-28T20:16:02.000Z
|
# -*- coding: utf-8 -*-
#
# Electrum - lightweight Dogecoin client
# Copyright (C) 2018 The Electrum developers
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
import json
from .util import inv_dict, all_subclasses
from . import bitcoin
def read_json(filename, default):
path = os.path.join(os.path.dirname(__file__), filename)
try:
with open(path, 'r') as f:
r = json.loads(f.read())
except:
r = default
return r
GIT_REPO_URL = "https://github.com/spesmilo/electrum"
GIT_REPO_ISSUES_URL = "https://github.com/spesmilo/electrum/issues"
BIP39_WALLET_FORMATS = read_json('bip39_wallet_formats.json', [])
class AbstractNet:
NET_NAME: str
TESTNET: bool
WIF_PREFIX: int
ADDRTYPE_P2PKH: int
ADDRTYPE_P2SH: int
SEGWIT_HRP: str
# BOLT11_HRP: str
GENESIS: str
BLOCK_HEIGHT_FIRST_LIGHTNING_CHANNELS: int = 0
BIP44_COIN_TYPE: int
LN_REALM_BYTE: int
@classmethod
def max_checkpoint(cls) -> int:
return max(0, len(cls.CHECKPOINTS) * 2016 - 1)
@classmethod
def rev_genesis_bytes(cls) -> bytes:
return bytes.fromhex(bitcoin.rev_hex(cls.GENESIS))
class BitcoinMainnet(AbstractNet):
TESTNET = False
WIF_PREFIX = 158
ADDRTYPE_P2PKH = 30
ADDRTYPE_P2SH = 22
SEGWIT_HRP = "doge"
# GENESIS = "000000000062b72c5e2ceb45fbc8587e807c155b0da735e6483dfba2f0a9c770"
GENESIS = "1a91e3dace36e2be3bf030a65679fe821aa1d6ef92e7c9902eb318182c355691"
DEFAULT_PORTS = {'t': '50011', 's': '50022'}
DEFAULT_SERVERS = read_json('servers.json', {})
CHECKPOINTS = read_json('', [])
BLOCK_HEIGHT_FIRST_LIGHTNING_CHANNELS = 200
#BITCOIN_HEADER_PRIV = "02fac398"
#BITCOIN_HEADER_PUB = "02facafd"
XPRV_HEADERS = {
'standard': 0x02fac398, # xprv
# 'p2wpkh-p2sh': 0x02fac398, # yprv
# 'p2wsh-p2sh': 0x02fac398, # Yprv
# 'p2wpkh': 0x02fac398, # zprv
# 'p2wsh': 0x02fac398, # Zprv
}
XPRV_HEADERS_INV = inv_dict(XPRV_HEADERS)
XPUB_HEADERS = {
'standard': 0x02facafd, # xpub
# 'p2wpkh-p2sh': 0x02facafd, # ypub
# 'p2wsh-p2sh': 0x02facafd, # Ypub
# 'p2wpkh': 0x02facafd, # zpub
# 'p2wsh': 0x02facafd, # Zpub
}
XPUB_HEADERS_INV = inv_dict(XPUB_HEADERS)
# BIP44_COIN_TYPE = 1
#namecoin
# BIP44_COIN_TYPE = 7
# dogecoin
BIP44_COIN_TYPE = 1
LN_REALM_BYTE = 0
LN_DNS_SEEDS = [
'radioblockchain.info',
'radiopool.me',
]
AUXPOW_CHAIN_ID = 0x00620004
AUXPOW_START_HEIGHT = 371337
NAME_EXPIRATION = 60
class BitcoinTestnet(AbstractNet):
TESTNET = True
WIF_PREFIX = 239
ADDRTYPE_P2PKH = 111
ADDRTYPE_P2SH = 196
SEGWIT_HRP = "xdoge"
GENESIS = "00000a2ee9363d21e47bc10d5b1e39d4ae4bd950491790e522f90dad86d2d1eb"
# GENESIS = "00000007199508e34a9ff81e6ec0c477a4cccff2a4767a8eee39c11db367b008"
DEFAULT_PORTS = {'t': '51001', 's': '51002'}
DEFAULT_SERVERS = read_json('servers_testnet.json', {})
CHECKPOINTS = read_json('checkpoints_testnet.json', [])
XPRV_HEADERS = {
'standard': 0x04358394, # tprv
# 'p2wpkh-p2sh': 0x044a4e28, # uprv
# 'p2wsh-p2sh': 0x024285b5, # Uprv
# 'p2wpkh': 0x045f18bc, # vprv
# 'p2wsh': 0x02575048, # Vprv
}
XPRV_HEADERS_INV = inv_dict(XPRV_HEADERS)
XPUB_HEADERS = {
'standard': 0x043587cf, # tpub
# 'p2wpkh-p2sh': 0x044a5262, # upub
# 'p2wsh-p2sh': 0x024289ef, # Upub
# 'p2wpkh': 0x045f1cf6, # vpub
# 'p2wsh': 0x02575483, # Vpub
}
XPUB_HEADERS_INV = inv_dict(XPUB_HEADERS)
BIP44_COIN_TYPE = 3
LN_REALM_BYTE = 1
LN_DNS_SEEDS = []
AUXPOW_CHAIN_ID = 0x0062
AUXPOW_START_HEIGHT = 200
NAME_EXPIRATION = 36000
class BitcoinRegtest(BitcoinTestnet):
SEGWIT_HRP = "ncrt"
GENESIS = "0f9188f13cb7b2c71f2a335e3a4fc328bf5beb436012afca590b1a11466e2206"
DEFAULT_SERVERS = read_json('servers_regtest.json', {})
CHECKPOINTS = []
LN_DNS_SEEDS = []
NAME_EXPIRATION = 30
class BitcoinSimnet(BitcoinTestnet):
WIF_PREFIX = 0x64
ADDRTYPE_P2PKH = 0x3f
ADDRTYPE_P2SH = 0x7b
SEGWIT_HRP = "sb"
GENESIS = "683e86bd5c6d110d91b94b97137ba6bfe02dbbdb8e3dff722a669b5d69d77af6"
DEFAULT_SERVERS = read_json('servers_regtest.json', {})
CHECKPOINTS = []
LN_DNS_SEEDS = []
NETS_LIST = tuple(all_subclasses(AbstractNet))
# don't import net directly, import the module instead (so that net is singleton)
net = BitcoinMainnet
def set_signet():
global net
net = BitcoinSignet
def set_simnet():
global net
net = BitcoinSimnet
def set_mainnet():
global net
net = BitcoinMainnet
def set_testnet():
global net
net = BitcoinTestnet
def set_regtest():
global net
net = BitcoinRegtest
| 27.890995
| 81
| 0.683772
|
import os
import json
from .util import inv_dict, all_subclasses
from . import bitcoin
def read_json(filename, default):
path = os.path.join(os.path.dirname(__file__), filename)
try:
with open(path, 'r') as f:
r = json.loads(f.read())
except:
r = default
return r
GIT_REPO_URL = "https://github.com/spesmilo/electrum"
GIT_REPO_ISSUES_URL = "https://github.com/spesmilo/electrum/issues"
BIP39_WALLET_FORMATS = read_json('bip39_wallet_formats.json', [])
class AbstractNet:
NET_NAME: str
TESTNET: bool
WIF_PREFIX: int
ADDRTYPE_P2PKH: int
ADDRTYPE_P2SH: int
SEGWIT_HRP: str
GENESIS: str
BLOCK_HEIGHT_FIRST_LIGHTNING_CHANNELS: int = 0
BIP44_COIN_TYPE: int
LN_REALM_BYTE: int
@classmethod
def max_checkpoint(cls) -> int:
return max(0, len(cls.CHECKPOINTS) * 2016 - 1)
@classmethod
def rev_genesis_bytes(cls) -> bytes:
return bytes.fromhex(bitcoin.rev_hex(cls.GENESIS))
class BitcoinMainnet(AbstractNet):
TESTNET = False
WIF_PREFIX = 158
ADDRTYPE_P2PKH = 30
ADDRTYPE_P2SH = 22
SEGWIT_HRP = "doge"
GENESIS = "1a91e3dace36e2be3bf030a65679fe821aa1d6ef92e7c9902eb318182c355691"
DEFAULT_PORTS = {'t': '50011', 's': '50022'}
DEFAULT_SERVERS = read_json('servers.json', {})
CHECKPOINTS = read_json('', [])
BLOCK_HEIGHT_FIRST_LIGHTNING_CHANNELS = 200
XPRV_HEADERS = {
'standard': 0x02fac398,
RS_INV = inv_dict(XPRV_HEADERS)
XPUB_HEADERS = {
'standard': 0x02facafd,
RS_INV = inv_dict(XPUB_HEADERS)
BIP44_COIN_TYPE = 1
LN_REALM_BYTE = 0
LN_DNS_SEEDS = [
'radioblockchain.info',
'radiopool.me',
]
AUXPOW_CHAIN_ID = 0x00620004
AUXPOW_START_HEIGHT = 371337
NAME_EXPIRATION = 60
class BitcoinTestnet(AbstractNet):
TESTNET = True
WIF_PREFIX = 239
ADDRTYPE_P2PKH = 111
ADDRTYPE_P2SH = 196
SEGWIT_HRP = "xdoge"
GENESIS = "00000a2ee9363d21e47bc10d5b1e39d4ae4bd950491790e522f90dad86d2d1eb"
DEFAULT_PORTS = {'t': '51001', 's': '51002'}
DEFAULT_SERVERS = read_json('servers_testnet.json', {})
CHECKPOINTS = read_json('checkpoints_testnet.json', [])
XPRV_HEADERS = {
'standard': 0x04358394,
RS_INV = inv_dict(XPRV_HEADERS)
XPUB_HEADERS = {
'standard': 0x043587cf,
RS_INV = inv_dict(XPUB_HEADERS)
BIP44_COIN_TYPE = 3
LN_REALM_BYTE = 1
LN_DNS_SEEDS = []
AUXPOW_CHAIN_ID = 0x0062
AUXPOW_START_HEIGHT = 200
NAME_EXPIRATION = 36000
class BitcoinRegtest(BitcoinTestnet):
SEGWIT_HRP = "ncrt"
GENESIS = "0f9188f13cb7b2c71f2a335e3a4fc328bf5beb436012afca590b1a11466e2206"
DEFAULT_SERVERS = read_json('servers_regtest.json', {})
CHECKPOINTS = []
LN_DNS_SEEDS = []
NAME_EXPIRATION = 30
class BitcoinSimnet(BitcoinTestnet):
WIF_PREFIX = 0x64
ADDRTYPE_P2PKH = 0x3f
ADDRTYPE_P2SH = 0x7b
SEGWIT_HRP = "sb"
GENESIS = "683e86bd5c6d110d91b94b97137ba6bfe02dbbdb8e3dff722a669b5d69d77af6"
DEFAULT_SERVERS = read_json('servers_regtest.json', {})
CHECKPOINTS = []
LN_DNS_SEEDS = []
NETS_LIST = tuple(all_subclasses(AbstractNet))
net = BitcoinMainnet
def set_signet():
global net
net = BitcoinSignet
def set_simnet():
global net
net = BitcoinSimnet
def set_mainnet():
global net
net = BitcoinMainnet
def set_testnet():
global net
net = BitcoinTestnet
def set_regtest():
global net
net = BitcoinRegtest
| true
| true
|
790af0bbaa36b748af8db96c98f4a5bb074638c4
| 1,256
|
py
|
Python
|
enums.py
|
chenghao/haoAdmin
|
b627f3ead498cce29b23038c92230a63b3a0548f
|
[
"Apache-2.0"
] | 1
|
2017-12-15T11:27:31.000Z
|
2017-12-15T11:27:31.000Z
|
enums.py
|
chenghao/haoAdmin
|
b627f3ead498cce29b23038c92230a63b3a0548f
|
[
"Apache-2.0"
] | null | null | null |
enums.py
|
chenghao/haoAdmin
|
b627f3ead498cce29b23038c92230a63b3a0548f
|
[
"Apache-2.0"
] | null | null | null |
# coding:utf-8
__author__ = "gaunt"
import enum
# layui页面框架的表格成功标识
layui_table_code = 0
class BaseEnum(enum.Enum):
pass
class ResultEnum(BaseEnum):
success = {"code": 200, "msg": "操作成功"}
error = {"code": 500, "msg": "操作失败"}
error400 = {"code": 400, "msg": "400 - 请求参数错误"}
error401 = {"code": 401, "msg": "401 - 未授权"}
error404 = {"code": 404, "msg": "404 - 未找到资源"}
error405 = {"code": 405, "msg": "405 - 没有找到请求方法"}
error422 = {"code": 422, "msg": "422 - 请求参数不完整"}
login_error = {"code": 1000, "msg": "用户名或密码失败"}
def success_result(data=None, code=None):
value = ResultEnum.success.value
return {
"code": code if code is not None else value["code"],
"msg": value["msg"],
"data": data if data is not None else ""
}
def error_result(data=None, code=None, msg=None):
value = ResultEnum.error.value
return {
"code": code if code is not None else value["code"],
"msg": msg if msg is not None else value["msg"],
"data": data if data is not None else ""
}
class Logical(BaseEnum):
AND = "and"
OR = "or"
class UserStateEnum(BaseEnum):
NORMAL = 1 # 正常
FREEZE = 0 # 冻结
if __name__ == "__main__":
print(Logical.AND.value)
| 22.428571
| 60
| 0.585987
|
__author__ = "gaunt"
import enum
layui_table_code = 0
class BaseEnum(enum.Enum):
pass
class ResultEnum(BaseEnum):
success = {"code": 200, "msg": "操作成功"}
error = {"code": 500, "msg": "操作失败"}
error400 = {"code": 400, "msg": "400 - 请求参数错误"}
error401 = {"code": 401, "msg": "401 - 未授权"}
error404 = {"code": 404, "msg": "404 - 未找到资源"}
error405 = {"code": 405, "msg": "405 - 没有找到请求方法"}
error422 = {"code": 422, "msg": "422 - 请求参数不完整"}
login_error = {"code": 1000, "msg": "用户名或密码失败"}
def success_result(data=None, code=None):
value = ResultEnum.success.value
return {
"code": code if code is not None else value["code"],
"msg": value["msg"],
"data": data if data is not None else ""
}
def error_result(data=None, code=None, msg=None):
value = ResultEnum.error.value
return {
"code": code if code is not None else value["code"],
"msg": msg if msg is not None else value["msg"],
"data": data if data is not None else ""
}
class Logical(BaseEnum):
AND = "and"
OR = "or"
class UserStateEnum(BaseEnum):
NORMAL = 1
FREEZE = 0
if __name__ == "__main__":
print(Logical.AND.value)
| true
| true
|
790af15b41bd54861264ce39f31388800d44dbd5
| 243
|
py
|
Python
|
transformers_interpret/__init__.py
|
MichalMalyska/transformers-interpret
|
878ec4b6928e2417a3ffe8499be52033938090f0
|
[
"Apache-2.0"
] | 1
|
2021-07-06T21:07:49.000Z
|
2021-07-06T21:07:49.000Z
|
transformers_interpret/__init__.py
|
MichalMalyska/transformers-interpret
|
878ec4b6928e2417a3ffe8499be52033938090f0
|
[
"Apache-2.0"
] | null | null | null |
transformers_interpret/__init__.py
|
MichalMalyska/transformers-interpret
|
878ec4b6928e2417a3ffe8499be52033938090f0
|
[
"Apache-2.0"
] | null | null | null |
from .attributions import Attributions, LIGAttributions
from .explainer import BaseExplainer
from .explainers.question_answering import QuestionAnsweringExplainer
from .explainers.sequence_classification import SequenceClassificationExplainer
| 48.6
| 79
| 0.901235
|
from .attributions import Attributions, LIGAttributions
from .explainer import BaseExplainer
from .explainers.question_answering import QuestionAnsweringExplainer
from .explainers.sequence_classification import SequenceClassificationExplainer
| true
| true
|
790af198729099b8a1759e463889e41f0a8ccf9d
| 3,343
|
py
|
Python
|
tests/integration/factories/master/test_master.py
|
cmcmarrow/pytest-salt-factories
|
12515411ea0fa11d7058a9deb61584a56c5f5108
|
[
"Apache-2.0"
] | null | null | null |
tests/integration/factories/master/test_master.py
|
cmcmarrow/pytest-salt-factories
|
12515411ea0fa11d7058a9deb61584a56c5f5108
|
[
"Apache-2.0"
] | null | null | null |
tests/integration/factories/master/test_master.py
|
cmcmarrow/pytest-salt-factories
|
12515411ea0fa11d7058a9deb61584a56c5f5108
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
import os
import tempfile
import pytest
@pytest.fixture(scope="module")
def master(request, salt_factories):
return salt_factories.spawn_master(request, "master-1")
@pytest.fixture(scope="module")
def minion(request, salt_factories, master):
return salt_factories.spawn_minion(request, "minion-1", master_id="master-1")
@pytest.fixture
def minion_3(request, salt_factories, master):
return salt_factories.spawn_minion(request, "minion-3", master_id="master-1")
@pytest.fixture
def salt_run(salt_factories, master):
return salt_factories.get_salt_run_cli(master.config["id"])
@pytest.fixture
def salt_cp(salt_factories, master):
return salt_factories.get_salt_cp_cli(master.config["id"])
@pytest.fixture
def salt_key(salt_factories, master):
return salt_factories.get_salt_key_cli(master.config["id"])
def test_master(master):
assert master.is_alive()
def test_salt_run(master, salt_run):
max_open_files_config_value = master.config["max_open_files"]
ret = salt_run.run("config.get", "max_open_files")
assert ret.exitcode == 0, ret
assert ret.json == max_open_files_config_value
def test_salt_cp(master, minion, salt_cp, tempfiles):
"""
Test copying a file from the master to the minion
"""
tfile = tempfile.NamedTemporaryFile(delete=True)
tfile.close()
dest = tfile.name
try:
contents = "id: foo"
sls = tempfiles.makeslsfile(contents)
assert master.is_alive()
assert minion.is_alive()
ret = salt_cp.run("minion-1", sls, dest)
assert ret.exitcode == 0, ret
assert ret.json == {"minion-1": {dest: True}}, ret
assert os.path.exists(dest)
with open(dest) as rfh:
assert rfh.read() == contents
finally:
if os.path.exists(dest):
os.unlink(dest)
tfile = tempfile.NamedTemporaryFile(delete=True)
tfile.close()
dest = tfile.name
try:
contents = "id: foo"
sls = tempfiles.makeslsfile(contents)
assert master.is_alive()
assert minion.is_alive()
ret = salt_cp.run(sls, dest, minion_tgt="minion-1")
assert ret.exitcode == 0, ret
assert ret.json == {dest: True}, ret
assert os.path.exists(dest)
with open(dest) as rfh:
assert rfh.read() == contents
finally:
if os.path.exists(dest):
os.unlink(dest)
def test_salt_cp_no_match(master, minion, salt_cp, tempfiles):
assert master.is_alive()
assert minion.is_alive()
tfile = tempfile.NamedTemporaryFile(delete=True)
tfile.close()
dest = tfile.name
try:
contents = "id: foo"
sls = tempfiles.makeslsfile(contents)
assert master.is_alive()
assert minion.is_alive()
ret = salt_cp.run(sls, dest, minion_tgt="minion-2")
assert ret.exitcode == 0, ret
assert not ret.json, ret
assert not os.path.exists(dest)
finally:
if os.path.exists(dest):
os.unlink(dest)
def test_salt_key(master, minion, minion_3, salt_key):
ret = salt_key.run("--list-all")
assert ret.exitcode == 0, ret
assert ret.json == {
"minions": ["minion-1", "minion-3"],
"minions_pre": [],
"minions_denied": [],
"minions_rejected": [],
}, ret
| 27.858333
| 81
| 0.647921
|
import os
import tempfile
import pytest
@pytest.fixture(scope="module")
def master(request, salt_factories):
return salt_factories.spawn_master(request, "master-1")
@pytest.fixture(scope="module")
def minion(request, salt_factories, master):
return salt_factories.spawn_minion(request, "minion-1", master_id="master-1")
@pytest.fixture
def minion_3(request, salt_factories, master):
return salt_factories.spawn_minion(request, "minion-3", master_id="master-1")
@pytest.fixture
def salt_run(salt_factories, master):
return salt_factories.get_salt_run_cli(master.config["id"])
@pytest.fixture
def salt_cp(salt_factories, master):
return salt_factories.get_salt_cp_cli(master.config["id"])
@pytest.fixture
def salt_key(salt_factories, master):
return salt_factories.get_salt_key_cli(master.config["id"])
def test_master(master):
assert master.is_alive()
def test_salt_run(master, salt_run):
max_open_files_config_value = master.config["max_open_files"]
ret = salt_run.run("config.get", "max_open_files")
assert ret.exitcode == 0, ret
assert ret.json == max_open_files_config_value
def test_salt_cp(master, minion, salt_cp, tempfiles):
tfile = tempfile.NamedTemporaryFile(delete=True)
tfile.close()
dest = tfile.name
try:
contents = "id: foo"
sls = tempfiles.makeslsfile(contents)
assert master.is_alive()
assert minion.is_alive()
ret = salt_cp.run("minion-1", sls, dest)
assert ret.exitcode == 0, ret
assert ret.json == {"minion-1": {dest: True}}, ret
assert os.path.exists(dest)
with open(dest) as rfh:
assert rfh.read() == contents
finally:
if os.path.exists(dest):
os.unlink(dest)
tfile = tempfile.NamedTemporaryFile(delete=True)
tfile.close()
dest = tfile.name
try:
contents = "id: foo"
sls = tempfiles.makeslsfile(contents)
assert master.is_alive()
assert minion.is_alive()
ret = salt_cp.run(sls, dest, minion_tgt="minion-1")
assert ret.exitcode == 0, ret
assert ret.json == {dest: True}, ret
assert os.path.exists(dest)
with open(dest) as rfh:
assert rfh.read() == contents
finally:
if os.path.exists(dest):
os.unlink(dest)
def test_salt_cp_no_match(master, minion, salt_cp, tempfiles):
assert master.is_alive()
assert minion.is_alive()
tfile = tempfile.NamedTemporaryFile(delete=True)
tfile.close()
dest = tfile.name
try:
contents = "id: foo"
sls = tempfiles.makeslsfile(contents)
assert master.is_alive()
assert minion.is_alive()
ret = salt_cp.run(sls, dest, minion_tgt="minion-2")
assert ret.exitcode == 0, ret
assert not ret.json, ret
assert not os.path.exists(dest)
finally:
if os.path.exists(dest):
os.unlink(dest)
def test_salt_key(master, minion, minion_3, salt_key):
ret = salt_key.run("--list-all")
assert ret.exitcode == 0, ret
assert ret.json == {
"minions": ["minion-1", "minion-3"],
"minions_pre": [],
"minions_denied": [],
"minions_rejected": [],
}, ret
| true
| true
|
790af1afaa878ca8158a89e7077dc38f1aa44dcf
| 3,529
|
py
|
Python
|
homeassistant/components/goodwe/number.py
|
kubawolanin/core
|
ba1b09a3a52954c7cbf60df926e3c9d7a5779d99
|
[
"Apache-2.0"
] | null | null | null |
homeassistant/components/goodwe/number.py
|
kubawolanin/core
|
ba1b09a3a52954c7cbf60df926e3c9d7a5779d99
|
[
"Apache-2.0"
] | 10
|
2022-01-26T06:25:49.000Z
|
2022-03-31T07:18:45.000Z
|
homeassistant/components/goodwe/number.py
|
swimguy81084/home-assistant
|
224f9600507b8684b919164e7de4ad81a649b9cb
|
[
"Apache-2.0"
] | null | null | null |
"""GoodWe PV inverter numeric settings entities."""
from __future__ import annotations
from collections.abc import Awaitable, Callable
from dataclasses import dataclass
import logging
from goodwe import Inverter, InverterError
from homeassistant.components.number import NumberEntity, NumberEntityDescription
from homeassistant.const import ENTITY_CATEGORY_CONFIG, PERCENTAGE, POWER_WATT
from homeassistant.helpers.entity import DeviceInfo
from .const import DOMAIN, KEY_DEVICE_INFO, KEY_INVERTER
_LOGGER = logging.getLogger(__name__)
@dataclass
class GoodweNumberEntityDescriptionBase:
"""Required values when describing Goodwe number entities."""
getter: Callable[[Inverter], Awaitable[int]]
setter: Callable[[Inverter, int], Awaitable[None]]
@dataclass
class GoodweNumberEntityDescription(
NumberEntityDescription, GoodweNumberEntityDescriptionBase
):
"""Class describing Goodwe number entities."""
NUMBERS = (
GoodweNumberEntityDescription(
key="grid_export_limit",
name="Grid export limit",
icon="mdi:transmission-tower",
entity_category=ENTITY_CATEGORY_CONFIG,
unit_of_measurement=POWER_WATT,
getter=lambda inv: inv.get_grid_export_limit(),
setter=lambda inv, val: inv.set_grid_export_limit(val),
step=100,
min_value=0,
max_value=10000,
),
GoodweNumberEntityDescription(
key="battery_discharge_depth",
name="Depth of discharge (on-grid)",
icon="mdi:battery-arrow-down",
entity_category=ENTITY_CATEGORY_CONFIG,
unit_of_measurement=PERCENTAGE,
getter=lambda inv: inv.get_ongrid_battery_dod(),
setter=lambda inv, val: inv.set_ongrid_battery_dod(val),
step=1,
min_value=0,
max_value=99,
),
)
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up the inverter select entities from a config entry."""
inverter = hass.data[DOMAIN][config_entry.entry_id][KEY_INVERTER]
device_info = hass.data[DOMAIN][config_entry.entry_id][KEY_DEVICE_INFO]
entities = []
for description in NUMBERS:
try:
current_value = await description.getter(inverter)
except InverterError:
# Inverter model does not support this setting
_LOGGER.debug("Could not read inverter setting %s", description.key)
continue
entities.append(
InverterNumberEntity(device_info, description, inverter, current_value),
)
async_add_entities(entities)
class InverterNumberEntity(NumberEntity):
"""Inverter numeric setting entity."""
_attr_should_poll = False
entity_description: GoodweNumberEntityDescription
def __init__(
self,
device_info: DeviceInfo,
description: GoodweNumberEntityDescription,
inverter: Inverter,
current_value: int,
) -> None:
"""Initialize the number inverter setting entity."""
self.entity_description = description
self._attr_unique_id = f"{DOMAIN}-{description.key}-{inverter.serial_number}"
self._attr_device_info = device_info
self._attr_value = float(current_value)
self._inverter: Inverter = inverter
async def async_set_value(self, value: float) -> None:
"""Set new value."""
if self.entity_description.setter:
await self.entity_description.setter(self._inverter, int(value))
self._attr_value = value
self.async_write_ha_state()
| 32.081818
| 85
| 0.706999
|
from __future__ import annotations
from collections.abc import Awaitable, Callable
from dataclasses import dataclass
import logging
from goodwe import Inverter, InverterError
from homeassistant.components.number import NumberEntity, NumberEntityDescription
from homeassistant.const import ENTITY_CATEGORY_CONFIG, PERCENTAGE, POWER_WATT
from homeassistant.helpers.entity import DeviceInfo
from .const import DOMAIN, KEY_DEVICE_INFO, KEY_INVERTER
_LOGGER = logging.getLogger(__name__)
@dataclass
class GoodweNumberEntityDescriptionBase:
getter: Callable[[Inverter], Awaitable[int]]
setter: Callable[[Inverter, int], Awaitable[None]]
@dataclass
class GoodweNumberEntityDescription(
NumberEntityDescription, GoodweNumberEntityDescriptionBase
):
NUMBERS = (
GoodweNumberEntityDescription(
key="grid_export_limit",
name="Grid export limit",
icon="mdi:transmission-tower",
entity_category=ENTITY_CATEGORY_CONFIG,
unit_of_measurement=POWER_WATT,
getter=lambda inv: inv.get_grid_export_limit(),
setter=lambda inv, val: inv.set_grid_export_limit(val),
step=100,
min_value=0,
max_value=10000,
),
GoodweNumberEntityDescription(
key="battery_discharge_depth",
name="Depth of discharge (on-grid)",
icon="mdi:battery-arrow-down",
entity_category=ENTITY_CATEGORY_CONFIG,
unit_of_measurement=PERCENTAGE,
getter=lambda inv: inv.get_ongrid_battery_dod(),
setter=lambda inv, val: inv.set_ongrid_battery_dod(val),
step=1,
min_value=0,
max_value=99,
),
)
async def async_setup_entry(hass, config_entry, async_add_entities):
inverter = hass.data[DOMAIN][config_entry.entry_id][KEY_INVERTER]
device_info = hass.data[DOMAIN][config_entry.entry_id][KEY_DEVICE_INFO]
entities = []
for description in NUMBERS:
try:
current_value = await description.getter(inverter)
except InverterError:
_LOGGER.debug("Could not read inverter setting %s", description.key)
continue
entities.append(
InverterNumberEntity(device_info, description, inverter, current_value),
)
async_add_entities(entities)
class InverterNumberEntity(NumberEntity):
_attr_should_poll = False
entity_description: GoodweNumberEntityDescription
def __init__(
self,
device_info: DeviceInfo,
description: GoodweNumberEntityDescription,
inverter: Inverter,
current_value: int,
) -> None:
self.entity_description = description
self._attr_unique_id = f"{DOMAIN}-{description.key}-{inverter.serial_number}"
self._attr_device_info = device_info
self._attr_value = float(current_value)
self._inverter: Inverter = inverter
async def async_set_value(self, value: float) -> None:
if self.entity_description.setter:
await self.entity_description.setter(self._inverter, int(value))
self._attr_value = value
self.async_write_ha_state()
| true
| true
|
790af2bda5365b4fa09f1a04fd33592582de11b3
| 584
|
py
|
Python
|
tictactoe/tools/validators.py
|
motiejus/tictactoe
|
66bb11e9a892a0c682af7c65ab08f048b52ca24b
|
[
"MIT"
] | 1
|
2021-04-25T10:45:27.000Z
|
2021-04-25T10:45:27.000Z
|
tictactoe/tools/validators.py
|
motiejus/tictactoe
|
66bb11e9a892a0c682af7c65ab08f048b52ca24b
|
[
"MIT"
] | 1
|
2021-06-10T19:14:37.000Z
|
2021-06-10T19:14:37.000Z
|
tictactoe/tools/validators.py
|
motiejus/tictactoe
|
66bb11e9a892a0c682af7c65ab08f048b52ca24b
|
[
"MIT"
] | null | null | null |
from django.core.validators import BaseValidator
from django.utils.deconstruct import deconstructible
from django.utils.translation import ungettext_lazy
@deconstructible
class ByteLengthValidator(BaseValidator):
compare = lambda self, a, b: a > b
clean = lambda self, x: len(x.encode('utf8'))
message = ungettext_lazy(
('Ensure this value has at most %(limit_value)d byte '
'(it has %(show_value)d).'),
('Ensure this value has at most %(limit_value)d bytes '
'(it has %(show_value)d).'),
'limit_value')
code = 'max_length'
| 34.352941
| 63
| 0.681507
|
from django.core.validators import BaseValidator
from django.utils.deconstruct import deconstructible
from django.utils.translation import ungettext_lazy
@deconstructible
class ByteLengthValidator(BaseValidator):
compare = lambda self, a, b: a > b
clean = lambda self, x: len(x.encode('utf8'))
message = ungettext_lazy(
('Ensure this value has at most %(limit_value)d byte '
'(it has %(show_value)d).'),
('Ensure this value has at most %(limit_value)d bytes '
'(it has %(show_value)d).'),
'limit_value')
code = 'max_length'
| true
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.